code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np A = np.array([1, 2, 3]) B = np.array([4, 5, 6]) np.vstack([A,B]) A[0:1] A[0] A[1]
nwb_conversion_tools/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <font size = "5"> **Chapter 2: [Diffraction](CH2_00-Diffraction.ipynb)** </font> # # # <hr style="height:1px;border-top:4px solid #FF8200" /> # # # # # Basic Crystallography # [Download](https://raw.githubusercontent.com/gduscher/MSE672-Introduction-to-TEM//main/Diffraction/CH2_03-Basic_Crystallography.ipynb) # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)]( # https://colab.research.google.com/github/gduscher/MSE672-Introduction-to-TEM/blob/main/Diffraction/CH2_03-Basic_Crystallography.ipynb) # # # # part of # # <font size = "5"> **[MSE672: Introduction to Transmission Electron Microscopy](../_MSE672_Intro_TEM.ipynb)**</font> # # by <NAME>, Spring 2021 # # Microscopy Facilities<br> # Joint Institute of Advanced Materials<br> # Materials Science & Engineering<br> # The University of Tennessee, Knoxville # # Background and methods to analysis and quantification of data acquired with transmission electron microscopes. # # # + [markdown] slideshow={"slide_type": "slide"} # ## Load relevant python packages # ### Check Installed Packages # + import sys from pkg_resources import get_distribution, DistributionNotFound def test_package(package_name): """Test if package exists and returns version or -1""" try: version = get_distribution(package_name).version except (DistributionNotFound, ImportError) as err: version = '-1' return version # Colab setup ------------------ if 'google.colab' in sys.modules: # !pip install pyTEMlib -q # pyTEMlib setup ------------------ else: if test_package('pyTEMlib') < '0.2021.1.9': print('installing pyTEMlib') # !{sys.executable} -m pip install --upgrade pyTEMlib -q # ------------------------------ print('done') # + [markdown] slideshow={"slide_type": "slide"} # ### Load the plotting and figure packages # Import the python packages that we will use: # # Beside the basic numerical (numpy) and plotting (pylab of matplotlib) libraries, # * three dimensional plotting # and some libraries from the book # * kinematic scattering library. # + slideshow={"slide_type": "-"} import sys if 'google.colab' in sys.modules: # %pylab --no-import-all inline else: # %pylab --no-import-all notebook # 3D plotting package from mpl_toolkits.mplot3d import Axes3D # 3D plotting # Import libraries from the book import pyTEMlib import pyTEMlib.KinsCat as ks # Kinematic sCattering Library # with Atomic form factors from Kirklands book # it is a good idea to show the version numbers at this point for archiving reasons. print('pyTEM version: ',pyTEMlib.__version__) # + [markdown] slideshow={"slide_type": "slide"} # # ## Define Crystal # # A crystal is well defined by its unit cell and the atom positions within, the so called base. # # The unit cell fills the volume completely when translated in all three directions. Placing the unit cell in a global carthesian coordination system, we need the length of the sides and their angles for a complete description. This is depicted in the graph below. # ![unitcell_angles.jpg](attachment:unitcell_angles.jpg) # # Figure taken from the wikipedia page on lattice constants. # # Mathematically it is more advantageous to describe the unit cell as matrix, the # ### Structure Matrix # # This matrix consists of rows of vectors that span the unit cell: # $\begin{bmatrix} # a_1 & a_2 & a_3 \\ # b_1 & b_2 & b_3 \\ # c_1 & c_2 & c_3 \\ # \end{bmatrix} =\left[\vec{a},\vec{b},\vec{c}\right]$. # # This structure matrix is also used to describe the super cells in materials simulations for example density functional theory. # # The representation of unit cells as structure matrices allows also for easy conversions as we will see in the following. # # + slideshow={"slide_type": "-"} #Initialize the dictionary for all the input tags = {} # Create graphite unit cell (or structure matrix) a = b = 0.246 #nm c = 0.671 gamma = 60 alpha = beta = 90 ## Create the structure matrix for a hexagonal system explicitly: tags['unit_cell'] = np.array([[a,0.,0.], ## also called the structure matrix [np.cos(np.radians(gamma))*a,np.sin(np.radians(gamma))*a,0. ], [0.,0.,c] ]) print('structure matrix \n',np.round(tags['unit_cell'],3)) tags['elements'] = ['C']*4 tags['base'] = [[0,0,0], [0,0,1/2], [1/3,1/3,0], [2/3,2/3, 1/2]] print('elements \n',(tags['elements'])) print('base \n',np.round(tags['base'],3)) # + #alternatively with the function "structure_by_name" of the KinsCat library tags = ks.structure_by_name('Graphite') print('structure matrix [nm]\n',np.round(tags['unit_cell'],3)) print('elements \n',tags['elements']) print('base \n',np.round(tags['base'],3)) # - # ### Volume of Unit Cell # We will need the volume of the unit cell for unit conversions later. # # Volume of the parallelepiped (https://en.wikipedia.org/wiki/Triple_product) : # $\vec{a} \cdot \vec{b} \times \vec{c} = \det \begin{bmatrix} # a_1 & a_2 & a_3 \\ # b_1 & b_2 & b_3 \\ # c_1 & c_2 & c_3 \\ # \end{bmatrix} ={\rm det}\left(\vec{a},\vec{b},\vec{c}\right)$ # # We see that the structure matrix comes in handy for that calculation. tags['volume'] = v = np.linalg.det(tags['unit_cell']) print(f"volume of unit cell: {tags['volume']:.4f} nm\u00b3") # ### Vector Algebra in Unit Cell # We will use the linear algebra package of numpy (np.linalg) for our vector calculations. # # The length of a vector is called its norm. # # And the angle between two vectors is calculated by the dot product: $\vec{a} \cdot \vec{b} = \left\| \vec{a} \right\| \left\| \vec{b} \right\| \cos (\theta) $ # + length_b = np.linalg.norm(tags['unit_cell'][1]) print(f'length of second unit cell vector is {length_b:.3f} nm' ) gamma = np.arccos(np.dot(tags['unit_cell'][0]/length_b, tags['unit_cell'][1]/length_b)) print(f'angle between a and b is {np.degrees(gamma):.1f}\u00ba') # + [markdown] slideshow={"slide_type": "slide"} # ### Plot the unit cell # + slideshow={"slide_type": "-"} fig = plt.figure();ax = fig.add_subplot(111, projection='3d') # draw unit_cell a = tags['unit_cell'][0] b = tags['unit_cell'][1] c = tags['unit_cell'][2] corners = [[0,0,0], a, a+b, b, c, c+a, c+a+b, c+b] trace = [[0,1],[1,2],[2,3],[3,0], [0,4], [4,5], [5,6], [6,7], [7,4], [1,5], [2,6], [3,7]] for s, e in trace: ax.plot3D(*zip(corners[s], corners[e]), color="blue") # draw atoms baseA = list(np.dot(tags['base'], tags['unit_cell']) ) # convert to a list of carthesian coordinates #initialize lists and a dictionary base = [] elements = [] colors = ['red', 'blue', 'green'] coloring = {} # Add atoms that are 'on the other side' of the unit cell j = 0 for i, atom in enumerate(baseA): base.append(atom) if tags['elements'][i] not in elements: coloring[tags['elements'][i]] = colors[j] j += 1 elements.append(tags['elements'][i]) for corner in corners: if np.dot(atom,corner) < 1e-15: base.append(atom+corner) elements.append(tags['elements'][i]) print(coloring) base = np.array(base) for i, atom in enumerate(base): ax.scatter(atom[0], atom[1], atom[2], c=coloring[elements[i]], alpha = 0.75, s=2000) ## Matplotlib does not longer support ax.set_aspect('equal') for 3D !!! maximum_position = base.max()*1.05 ax.set_zlim( 0,maximum_position) ax.set_ylim( 0,maximum_position) ax.set_xlim( 0,maximum_position) # - ## alternatively we can use a function in KinsCat. tags['max_bond_length'] = 0.22 ks.plot_unitcell(tags) # ### May be with a few more atoms # # # + corners,balls, Z, bonds = ks.ball_and_stick(tags,extend=[3,3,1], max_bond_length = 0.22) fig = plt.figure();ax = fig.add_subplot(111, projection='3d') maximum_position = balls.max()*1.05 maximum_x = balls[:,0].max() maximum_y = balls[:,1].max() maximum_z = balls[:,2].max() balls = balls - [maximum_x/2,maximum_y/2,maximum_z/2] # draw unit_cell for x, y, z in corners: ax.plot3D( x-maximum_x/2,y-maximum_y/2,z-maximum_z/2, color="blue") # draw bonds for x, y, z in bonds: ax.plot3D( x-maximum_x/2,y-maximum_y/2,z-maximum_z/2, color="black", linewidth = 4)#, tube_radius=0.02) # draw atoms for i,atom in enumerate(balls): ax.scatter(atom[0],atom[1],atom[2], color=tuple(ks.jmol_colors [Z[i]]), alpha = 1.0, s=50) maximum_position = balls.max()*1.05 ax.set_proj_type('ortho') ax.set_zlim( -maximum_position/2,maximum_position/2) ax.set_ylim( -maximum_position/2,maximum_position/2) ax.set_xlim( -maximum_position/2,maximum_position/2) # - # Or with the KinsCat library tags['extend'] = [3,3,1] ks.plot_unitcell(tags) # ### Okay, the above plot is not very beautiful. # # If you use the **ase** or **mayavi** package, you can make nicer plots. # # Please see: # [Plot Unit Cell with Other Packages](Plot_UnitCell.ipynb) # # ## Reciprocal Lattice # The unit cell in reciprocal space # + reciprocal_lattice = np.linalg.inv(tags['unit_cell']).T # transposed of inverted unit_cell tags['reciprocal_lattice'] = reciprocal_lattice print('reciprocal lattice [1/nm]:') print(np.round(reciprocal_lattice,4)) # - # ### Reciprocal Lattice Vectors # From your crystallography book and lecture you are probably used to the following expression for the reciprocal lattice vectors ($\vec{a}^*, \vec{c}^*, \vec{c}^*$) # # $ \begin{align} # \vec{a}^* &= \frac{\vec{b} \times \vec{c}}{\vec{a} \cdot \left(\vec{b} \times \vec{c}\right)} \\ # \vec{b}^* &= \frac{\vec{c} \times \vec{a}}{\vec{b} \cdot \left(\vec{c} \times \vec{a}\right)} \\ # \vec{c}^* &= \frac{\vec{a} \times \vec{b}}{\vec{c} \cdot \left(\vec{a} \times \vec{b}\right)} # \end{align}$\ # # Where we see that the denominators of the above vector equations are the volume of the unit cell. # # In physics book, you will see an additional factor of 2$\pi$, which is generally omitted in materials science and microscopy. # + ## Now let's test whether this is really equivalent to the matrix expression above. a_recip = np.cross(b,c)/np.dot(a,np.cross(b,c)) print (np.round(a_recip,3)) b_recip = np.cross(c,a)/np.dot(a,np.cross(b,c)) print (np.round(b_recip,3)) c_recip = np.cross(a,b)/np.dot(a,np.cross(b,c)) print (np.round(c_recip,3)) print('Compare to:') print(np.round(reciprocal_lattice,3)) # - # ## Conclusion # # With these definitions we have everything to define a crystal and to analyse diffraction and imaging data of crystalline specimens. # # Crystallography deals with the application of symmetry and group theory of symmetry to crystal structures. # If you want to play around with symmetry and space groups, you can install the [spglib](http://atztogo.github.io/spglib/python-spglib.html#python-spglib). The spglib is especially helpfull for determination of reduced unit cells (the smallest possible ones, instead of the ones with the full symmetry). # # A number of common crystal structures are defined in the KinsCat libary under the function ''structure_by_name''. Try them out in this notebook. # As ususal the help function will show you the usage of a function: help(ks.structure_by_name) print(ks.crystal_data_base.keys()) # Now use one name of above structures and redo this notebook # ## Navigation # # - <font size = "3"> **Back Chapter 1: [Atomic Form Factor](CH2_02-Atomic_Form_Factor.ipynb)** </font> # - <font size = "3"> **Next: [Structure Factors](CH2_04-Structure_Factors.ipynb)** </font> # - <font size = "3"> **Chapter 2: [Diffraction](CH2_00-_Diffraction.ipynb)** </font> # - <font size = "3"> **List of Content: [Front](../_MSE672_Intro_TEM.ipynb)** </font> # ## Appendix: Read POSCAR # # Load and draw a crystal structure from a POSCAR file # # + #tags = ks.read_poscar() #tags['max_bond_length'] = 0.27 #tags['extend'] = [1,1,1] #tags[name ] #ks.plot_unitcell(tags) # - # ### The function # + from ase.io import read, write import pyTEMlib.file_tools as ft import os def Read_POSCAR(): # open file dialog to select poscar file file_name = ft.openfile_dialog('POSCAR (POSCAR*.txt);;All files (*)') #use ase package to read file base=os.path.basename(file_name) base_name = os.path.splitext(base)[0] crystal = read(file_name,format='vasp', parallel=False) ## make dictionary and plot structure (not essential for further notebook) tags = {} tags['unit_cell'] = crystal.cell*1e-1 tags['elements'] = crystal.get_chemical_symbols() tags['base'] = crystal.get_scaled_positions() tags['max_bond_length'] = 0.23 tags['name'] = base_name return tags # + #tags = ks.Read_POSCAR() #tags['extend'] = [2,2,1] #ks.plot_unitcell(tags) #plt.title(tags['name']) # -
docs/doctrees/nbsphinx/Diffraction/CH2_03-Basic_Crystallography.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline import seaborn import numpy, scipy, matplotlib.pyplot as plt, sklearn, librosa, mir_eval, urllib, IPython.display, stanford_mir plt.rcParams['figure.figsize'] = (14,5) # [&larr; Back to Index](index.html) # # Exercise: Instrument Classification using K-NN # This exercise is loosely based upon "Lab 1" from previous MIR workshops ([2010](https://ccrma.stanford.edu/workshops/mir2010/Lab1_2010.pdf)). # For more on K-NN, see the [notebook on K-NN](knn.ipynb). # For help from a similar exercise, [follow the steps in the feature sonification exercise](feature_sonification.ipynb#Step-1:-Retrieve-Audio) first. # ## Goals # 1. Extract spectral features from an audio signal. # 2. Train a K-Nearest Neighbor classifier. # 3. Use the classifier to classify beats in a drum loop. # ## Step 1: Retrieve Audio, Detect Onsets, and Segment # Download the file `simple_loop.wav` onto your local machine. # + filename = 'simple_loop.wav' # urllib.urlretrieve? # - # Load the audio file: # + # librosa.load? # - # Play the audio file: # + # IPython.display.Audio? # - # Detect onsets: # + # librosa.onset.onset_detect? # - # Convert onsets from frames to seconds (and samples): # + # librosa.frames_to_time? # + # librosa.frames_to_samples? # - # Listen to a click track, with clicks located at each onset, plus the original audio: # + # mir_eval.sonify.clicks? # + # IPython.display.Audio? # - # ## Step 2: Extract Features # For each segment, compute the zero crossing rate and spectral centroid. # + # librosa.zero_crossings? # + # librosa.feature.spectral_centroid? # - # Scale the features to be in the range [-1, 1]: # + # sklearn.preprocessing.MinMaxScaler? # + # sklearn.preprocessing.MinMaxScaler.fit_transform? # - # ## Step 3: Train K-NN Classifier # Use `stanford_mir.download_drum_samples` to download ten kick drum samples and ten snare drum samples. Each audio file contains a single drum hit at the beginning of the file. # + # stanford_mir.download_drum_samples? # - # For each audio file, extract one feature vector. Concatenate all of these feature vectors into one feature table. # + # numpy.concatenate? # - # ## Step 4: Run the Classifier # Create a K-NN classifer model object: # + # sklearn.neighbors.KNeighborsClassifier? # - # Train the classifier: # + # sklearn.neighbors.KNeighborsClassifier.fit? # - # Finally, run the classifier on the test input audio file: # + # sklearn.neighbors.KNeighborsClassifier.predict? # - # ## Step 5: Sonify the Classifier Output # Play a "beep" for each detected kick drum. Repeat for the snare drum. # + # mir_eval.sonify.clicks? # - # ## For Further Exploration # In addition to the features used above, extract the following features (see librosa docs on [feature extraction](http://librosa.github.io/librosa/feature.html)): # # - spectral centroid # - spectral spread # - spectral skewness # - spectral kurtosis # - spectral rolloff # - MFCCs # Re-train the classifier, and re-run the classifier over the test audio signal. Do the results change? # Repeat the steps above for more audio files. # [&larr; Back to Index](index.html)
knn_instrument_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Deep Learning Bootcamp November 2017, GPU Computing for Data Scientists # # <img src="../images/bcamp.png" align="center"> # # ## 18 PyTorch NUMER.AI Binary Classification using BCELoss # # Web: https://www.meetup.com/Tel-Aviv-Deep-Learning-Bootcamp/events/241762893/ # # Notebooks: <a href="https://github.com/QuantScientist/Data-Science-PyCUDA-GPU"> On GitHub</a> # # *<NAME>* # # <img src="../images/pt.jpg" width="35%" align="center"> # # # ### Data # - Download from https://numer.ai/leaderboard # # <img src="../images/Numerai.png" width="35%" align="center"> # + [markdown] slideshow={"slide_type": "slide"} # # PyTorch Imports # # + slideshow={"slide_type": "-"} # # !pip install pycuda # %reset -f # # %%timeit import torch from torch.autograd import Variable import numpy as np import pandas import numpy as np import pandas as pd from sklearn import cross_validation from sklearn import metrics from sklearn.metrics import roc_auc_score, log_loss, roc_auc_score, roc_curve, auc import matplotlib.pyplot as plt from sklearn import cross_validation from sklearn import metrics from sklearn.metrics import roc_auc_score, log_loss, roc_auc_score, roc_curve, auc from sklearn.cross_validation import StratifiedKFold, ShuffleSplit, cross_val_score, train_test_split import logging import numpy import numpy as np from __future__ import print_function from __future__ import division import math import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import pandas as pd import os import torch from torch.utils.data.dataset import Dataset from torch.utils.data import DataLoader from torchvision import transforms from torch import nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable from sklearn.preprocessing import MultiLabelBinarizer import time from sklearn.preprocessing import PolynomialFeatures import pandas as pd import numpy as np import scipy # %matplotlib inline from pylab import rcParams rcParams['figure.figsize'] = (6, 6) # setting default size of plots import tensorflow as tf print("tensorflow:" + tf.__version__) # !set "KERAS_BACKEND=tensorflow" import torch import sys print('__Python VERSION:', sys.version) print('__pyTorch VERSION:', torch.__version__) print('__CUDA VERSION') from subprocess import call print('__CUDNN VERSION:', torch.backends.cudnn.version()) print('__Number CUDA Devices:', torch.cuda.device_count()) print('__Devices') # # !pip install http://download.pytorch.org/whl/cu75/torch-0.2.0.post1-cp27-cp27mu-manylinux1_x86_64.whl # # !pip install torchvision # # ! pip install cv2 # import cv2 print("OS: ", sys.platform) print("Python: ", sys.version) print("PyTorch: ", torch.__version__) print("Numpy: ", np.__version__) handler=logging.basicConfig(level=logging.INFO) lgr = logging.getLogger(__name__) # %matplotlib inline # # !pip install psutil import psutil def cpuStats(): print(sys.version) print(psutil.cpu_percent()) print(psutil.virtual_memory()) # physical memory usage pid = os.getpid() py = psutil.Process(pid) memoryUse = py.memory_info()[0] / 2. ** 30 # memory use in GB...I think print('memory GB:', memoryUse) cpuStats() # + [markdown] slideshow={"slide_type": "slide"} # # CUDA # + # # %%timeit use_cuda = torch.cuda.is_available() # use_cuda = False FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor Tensor = FloatTensor lgr.info("USE CUDA=" + str (use_cuda)) # # ! watch -n 0.1 'ps f -o user,pgrp,pid,pcpu,pmem,start,time,command -p `lsof -n -w -t /dev/nvidia*`' # sudo apt-get install dstat #install dstat # sudo pip install nvidia-ml-py #install Python NVIDIA Management Library # wget https://raw.githubusercontent.com/datumbox/dstat/master/plugins/dstat_nvidia_gpu.py # sudo mv dstat_nvidia_gpu.py /usr/share/dstat/ #move file to the plugins directory of dstat # + [markdown] slideshow={"slide_type": "slide"} # # Global params # + # NN params DROPOUT_PROB = 0.75 N_EPOCHS = 50 BATCH_SIZE = 4 LR = 0.005 TEST_RATIO = .11 MOMENTUM= 0.9 PIN_MEMORY=use_cuda # True IF CUDA # Data params TARGET_VAR= 'target' TOURNAMENT_DATA_CSV = 'numerai_tournament_data.csv' TRAINING_DATA_CSV = 'numerai_training_data.csv' BASE_FOLDER = 'numerai/' # fix seed seed=17*19 np.random.seed(seed) torch.manual_seed(seed) if use_cuda: torch.cuda.manual_seed(seed) # + [markdown] slideshow={"slide_type": "slide"} # # Load a CSV file for Binary classification (numpy) # - # # %%timeit df_train = pd.read_csv(BASE_FOLDER + TRAINING_DATA_CSV) df_train.head(5) # + [markdown] slideshow={"slide_type": "slide"} # # Feature enrichement # - This would be usually not required when using NN # + def genBasicFeatures(inDF): print('Generating basic features ...') df_copy=inDF.copy(deep=True) magicNumber=21 feature_cols = list(inDF.columns) # feature_cols = list(inDF.columns[:-1]) # feature_cols=xgb_cols # target_col = inDF.columns[-1] inDF['x_mean'] = np.mean(df_copy.ix[:, 0:magicNumber], axis=1) inDF['x_median'] = np.median(df_copy.ix[:, 0:magicNumber], axis=1) inDF['x_std'] = np.std(df_copy.ix[:, 0:magicNumber], axis=1) inDF['x_skew'] = scipy.stats.skew(df_copy.ix[:, 0:magicNumber], axis=1) inDF['x_kurt'] = scipy.stats.kurtosis(df_copy.ix[:, 0:magicNumber], axis=1) inDF['x_var'] = np.var(df_copy.ix[:, 0:magicNumber], axis=1) inDF['x_max'] = np.max(df_copy.ix[:, 0:magicNumber], axis=1) inDF['x_min'] = np.min(df_copy.ix[:, 0:magicNumber], axis=1) # http://stackoverflow.com/questions/16236684/apply-pandas-function-to-column-to-create-multiple-new-columns # inDF=inDF.merge(df_copy.ix[:, 0:magicNumber].apply(lambda row: NumerCommonML.enrichFeatures(row), axis=1), # left_index=True, right_index=True) print (inDF.head(1)) return inDF def addPolyFeatures(inDF, deg=2): print('Generating poly features ...') df_copy=inDF.copy(deep=True) poly=PolynomialFeatures(degree=deg) p_testX = poly.fit(df_copy) # AttributeError: 'PolynomialFeatures' object has no attribute 'get_feature_names' target_feature_names = ['x'.join(['{}^{}'.format(pair[0],pair[1]) for pair in tuple if pair[1]!=0]) for tuple in [zip(df_copy.columns,p) for p in poly.powers_]] df_copy = pd.DataFrame(p_testX.transform(df_copy),columns=target_feature_names) return df_copy # + [markdown] slideshow={"slide_type": "slide"} # # Train / Validation / Test Split # - Numerai provides a data set that is allready split into train, validation and test sets. # - # Train, Validation, Test Split def loadDataSplit(): df_train = pd.read_csv(BASE_FOLDER + TRAINING_DATA_CSV) # TOURNAMENT_DATA_CSV has both validation and test data provided by NumerAI df_test_valid = pd.read_csv(BASE_FOLDER + TOURNAMENT_DATA_CSV) answers_1_SINGLE = df_train[TARGET_VAR] df_train.drop(TARGET_VAR, axis=1,inplace=True) df_train.drop('id', axis=1,inplace=True) df_train.drop('era', axis=1,inplace=True) df_train.drop('data_type', axis=1,inplace=True) # Add polynomial features df_train=genBasicFeatures(df_train) # df_train = addPolyFeatures(df_train) df_train.to_csv(BASE_FOLDER + TRAINING_DATA_CSV + 'clean.csv', header=False, index = False) df_train= pd.read_csv(BASE_FOLDER + TRAINING_DATA_CSV + 'clean.csv', header=None, dtype=np.float32) df_train = pd.concat([df_train, answers_1_SINGLE], axis=1) feature_cols = list(df_train.columns[:-1]) # print (feature_cols) target_col = df_train.columns[-1] trainX, trainY = df_train[feature_cols], df_train[target_col] # TOURNAMENT_DATA_CSV has both validation and test data provided by NumerAI # Validation set df_validation_set=df_test_valid.loc[df_test_valid['data_type'] == 'validation'] df_validation_set=df_validation_set.copy(deep=True) answers_1_SINGLE_validation = df_validation_set[TARGET_VAR] df_validation_set.drop(TARGET_VAR, axis=1,inplace=True) df_validation_set.drop('id', axis=1,inplace=True) df_validation_set.drop('era', axis=1,inplace=True) df_validation_set.drop('data_type', axis=1,inplace=True) # Add polynomial features df_validation_set=genBasicFeatures(df_validation_set) # df_validation_set = addPolyFeatures(df_validation_set) df_validation_set.to_csv(BASE_FOLDER + TRAINING_DATA_CSV + '-validation-clean.csv', header=False, index = False) df_validation_set= pd.read_csv(BASE_FOLDER + TRAINING_DATA_CSV + '-validation-clean.csv', header=None, dtype=np.float32) df_validation_set = pd.concat([df_validation_set, answers_1_SINGLE_validation], axis=1) feature_cols = list(df_validation_set.columns[:-1]) target_col = df_validation_set.columns[-1] valX, valY = df_validation_set[feature_cols], df_validation_set[target_col] # Test set for submission (not labeled) df_test_set = pd.read_csv(BASE_FOLDER + TOURNAMENT_DATA_CSV) # df_test_set=df_test_set.loc[df_test_valid['data_type'] == 'live'] df_test_set=df_test_set.copy(deep=True) df_test_set.drop(TARGET_VAR, axis=1,inplace=True) tid_1_SINGLE = df_test_set['id'] df_test_set.drop('id', axis=1,inplace=True) df_test_set.drop('era', axis=1,inplace=True) df_test_set.drop('data_type', axis=1,inplace=True) # Add polynomial features df_test_set=genBasicFeatures(df_test_set) # df_test_set = addPolyFeatures(df_test_set) feature_cols = list(df_test_set.columns) # must be run here, we dont want the ID # print (feature_cols) df_test_set = pd.concat([tid_1_SINGLE, df_test_set], axis=1) testX = df_test_set[feature_cols].values return trainX, trainY, valX, valY, testX, df_test_set # + # # %%timeit trainX, trainY, valX, valY, testX, df_test_set = loadDataSplit() # X, y = loadDataSplit(999) # # Number of features for the input layer N_FEATURES=trainX.shape[1] # print (trainX.head(3)) # print (df_test_set.head(3)) print (trainX.shape) print (trainY.shape) print (valX.shape) print (valY.shape) print (testX.shape) print (df_test_set.shape) # + [markdown] slideshow={"slide_type": "slide"} # # Create PyTorch GPU tensors from numpy arrays # # - Note how we transfrom the np arrays # + # Convert the np arrays into the correct dimention and type # Note that BCEloss requires Float in X as well as in y def XnumpyToTensor(x_data_np): x_data_np = np.array(x_data_np.values, dtype=np.float32) print(x_data_np.shape) print(type(x_data_np)) if use_cuda: lgr.info ("Using the GPU") X_tensor = Variable(torch.from_numpy(x_data_np).cuda()) # Note the conversion for pytorch else: lgr.info ("Using the CPU") X_tensor = Variable(torch.from_numpy(x_data_np)) # Note the conversion for pytorch print(type(X_tensor.data)) # should be 'torch.cuda.FloatTensor' print(x_data_np.shape) print(type(x_data_np)) return X_tensor # Convert the np arrays into the correct dimention and type # Note that BCEloss requires Float in X as well as in y def YnumpyToTensor(y_data_np): y_data_np=y_data_np.reshape((y_data_np.shape[0],1)) # Must be reshaped for PyTorch! print(y_data_np.shape) print(type(y_data_np)) if use_cuda: lgr.info ("Using the GPU") # Y = Variable(torch.from_numpy(y_data_np).type(torch.LongTensor).cuda()) Y_tensor = Variable(torch.from_numpy(y_data_np)).type(torch.FloatTensor).cuda() # BCEloss requires Float else: lgr.info ("Using the CPU") # Y = Variable(torch.squeeze (torch.from_numpy(y_data_np).type(torch.LongTensor))) # Y_tensor = Variable(torch.from_numpy(y_data_np)).type(torch.FloatTensor) # BCEloss requires Float print(type(Y_tensor.data)) # should be 'torch.cuda.FloatTensor' print(y_data_np.shape) print(type(y_data_np)) return Y_tensor # + [markdown] slideshow={"slide_type": "slide"} # # The NN model # # ### MLP model # - A multilayer perceptron is a logistic regressor where instead of feeding the input to the logistic regression you insert a intermediate layer, called the hidden layer, that has a nonlinear activation function (usually tanh or sigmoid) . One can use many such hidden layers making the architecture deep. # # - Here we define a simple MLP structure. We map the input feature vector to a higher space (256), then later gradually decrease the dimension, and in the end into a 16-dimension space. Because we are calculating the probability of each genre independently, after the final affine layer we need to implement a sigmoid layer. # # ### Initial weights selection # # - There are many ways to select the initial weights to a neural network architecture. A common initialization scheme is random initialization, which sets the biases and weights of all the nodes in each hidden layer randomly. # # - Before starting the training process, an initial value is assigned to each variable. This is done by pure randomness, using for example a uniform or Gaussian distribution. But if we start with weights that are too small, the signal could decrease so much that it is too small to be useful. On the other side, when the parameters are initialized with high values, the signal can end up to explode while propagating through the network. # # - In consequence, a good initialization can have a radical effect on how fast the network will learn useful patterns.For this purpose, some best practices have been developed. One famous example used is **Xavier initialization**. Its formulation is based on the number of input and output neurons and uses sampling from a uniform distribution with zero mean and all biases set to zero. # # - In effect (according to theory) initializing the weights of the network to values that would be closer to the optimal, and therefore require less epochs to train. # # ### References: # * **`nninit.xavier_uniform(tensor, gain=1)`** - Fills `tensor` with values according to the method described in ["Understanding the difficulty of training deep feedforward neural networks" - Glorot, X. and <NAME>.](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf), using a uniform distribution. # * **`nninit.xavier_normal(tensor, gain=1)`** - Fills `tensor` with values according to the method described in ["Understanding the difficulty of training deep feedforward neural networks" - <NAME>. and <NAME>.](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf), using a normal distribution. # * **`nninit.kaiming_uniform(tensor, gain=1)`** - Fills `tensor` with values according to the method described in ["Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification" - He, K. et al.](https://arxiv.org/abs/1502.01852) using a uniform distribution. # * **`nninit.kaiming_normal(tensor, gain=1)`** - Fills `tensor` with values according to the method described in ["Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification" - He, K. et al.] # # + # p is the probability of being dropped in PyTorch # At each layer, DECREASE dropout dropout = torch.nn.Dropout(p=1 - (DROPOUT_PROB +0.20)) # class Net(torch.nn.Module): # def __init__(self, n_feature, n_hidden, n_output,initKernel='uniform'): # super(Net, self).__init__() # self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer # self.out = torch.nn.Linear(n_hidden, n_output) # output layer # # xavier initializer # if initKernel == 'uniform': # nn.init.xavier_uniform(self.hidden.weight, gain=np.sqrt(2.0)) # else: # nn.init.kaiming_normal(self.hidden.weight) # def forward(self, x): # x = F.relu(self.hidden(x)) # activation function for hidden layer # x = self.out(x) # return F.sigmoid(x) class Net2(nn.Module): def __init__(self, n_feature, n_hidden, n_output,initKernel='uniform'): super(Net2, self).__init__() self.dis = nn.Sequential( nn.Linear(n_feature, n_hidden), dropout, nn.LeakyReLU(0.1), nn.Linear(n_hidden, n_hidden), dropout, nn.LeakyReLU(0.1), nn.Linear(n_hidden, 1), dropout, nn.Sigmoid() ) def forward(self, x): x = self.dis(x) return x hiddenLayer1Size=1024 hiddenLayer2Size=int(hiddenLayer1Size/8) hiddenLayer3Size=int(hiddenLayer1Size/16) hiddenLayer4Size=int(hiddenLayer1Size/32) hiddenLayer5Size=int(hiddenLayer1Size/64) # # Hypothesis using sigmoid linear1=torch.nn.Linear(N_FEATURES, hiddenLayer1Size, bias=True) torch.nn.init.xavier_uniform(linear1.weight) linear2=torch.nn.Linear(hiddenLayer1Size, hiddenLayer2Size) torch.nn.init.xavier_uniform(linear2.weight) linear3=torch.nn.Linear(hiddenLayer2Size, hiddenLayer3Size) torch.nn.init.xavier_uniform(linear3.weight) linear4=torch.nn.Linear(hiddenLayer3Size, hiddenLayer4Size) torch.nn.init.xavier_uniform(linear4.weight) linear5=torch.nn.Linear(hiddenLayer4Size, hiddenLayer5Size) torch.nn.init.xavier_uniform(linear5.weight) linear6=torch.nn.Linear(hiddenLayer5Size, 1) torch.nn.init.xavier_uniform(linear6.weight) sigmoid = torch.nn.Sigmoid() tanh=torch.nn.Tanh() relu=torch.nn.LeakyReLU() net = torch.nn.Sequential(linear1,dropout,tanh,nn.BatchNorm1d(hiddenLayer1Size), linear2,dropout,tanh, linear3,dropout,relu, linear4,dropout,tanh, linear5,dropout,relu, linear6,sigmoid ) # net = Net(n_feature=N_FEATURES, n_hidden=1024, n_output=1) # define the network # net = Net2(n_feature=N_FEATURES, n_hidden=2048, n_output=1) # define the network lgr.info(net) # net architecture # + [markdown] slideshow={"slide_type": "slide"} # ## Print the full net architecture # + # See https://stackoverflow.com/questions/42480111/model-summary-in-pytorch/42616812 from torch.nn.modules.module import _addindent import torch import numpy as np def torch_summarize(model, show_weights=True, show_parameters=True): """Summarizes torch model by showing trainable parameters and weights.""" tmpstr = model.__class__.__name__ + ' (\n' for key, module in model._modules.items(): # if it contains layers let call it recursively to get params and weights if type(module) in [ torch.nn.modules.container.Container, torch.nn.modules.container.Sequential ]: modstr = torch_summarize(module) else: modstr = module.__repr__() modstr = _addindent(modstr, 2) params = sum([np.prod(p.size()) for p in module.parameters()]) weights = tuple([tuple(p.size()) for p in module.parameters()]) tmpstr += ' (' + key + '): ' + modstr if show_weights: tmpstr += ', weights={}'.format(weights) if show_parameters: tmpstr += ', parameters={}'.format(params) tmpstr += '\n' tmpstr = tmpstr + ')' return tmpstr lgr.info(torch_summarize(net)) # + [markdown] slideshow={"slide_type": "slide"} # # Loss and Optimizer # # ### BCELoss # - In addition, we will calculate the binary cross entropy loss (BCELoss). Luckily we have one loss function already present. For details please checkout http://pytorch.org/docs/master/nn.html. # # - ** NOTE this BCELoss may not be numerical stable, although it's fine during my training process.** # # ### Optimization # # - if return F.log_softmax(x) then loss = F.nll_loss(output, target) (MNIST) # - print(nn.BCEWithLogitsLoss()(o, t)) is equivalent to print(nn.BCELoss()(sigmoid(o), t)) # + # # %%timeit # optimizer = torch.optim.SGD(net.parameters(), lr=0.02) # optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # optimizer = optim.SGD(net.parameters(), lr=LR, momentum=MOMENTUM, weight_decay=5e-4) #L2 regularization can easily be added to the entire model via the optimizer # optimizer = torch.optim.Adam(net.parameters(), lr=LR,weight_decay=5e-4) # L2 regularization optimizer = torch.optim.Adagrad(net.parameters(), lr=1e-6, weight_decay=5e-4) # loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted # loss_func = torch.nn.NLLLoss() loss_func=torch.nn.BCELoss() # Binary cross entropy: http://pytorch.org/docs/nn.html#bceloss # http://andersonjo.github.io/artificial-intelligence/2017/01/07/Cost-Functions/ # use_cuda=True if use_cuda: lgr.info ("Using the GPU") net.cuda() loss_func.cuda() # cudnn.benchmark = True #net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count())) lgr.info (optimizer) lgr.info (loss_func) # + [markdown] slideshow={"slide_type": "slide"} # # Training in batches + Measuring the performance of the deep learning model # + import time start_time = time.time() epochs=1500 all_losses = [] X_tensor_train= XnumpyToTensor(trainX) Y_tensor_train= YnumpyToTensor(trainY) print(type(X_tensor_train.data), type(Y_tensor_train.data)) # should be 'torch.cuda.FloatTensor' # From here onwards, we must only use PyTorch Tensors for step in range(epochs): # net.train() # output = F.sigmoid(net(input)) # loss = crit(output, target) out = net(X_tensor_train) # input x and predict based on x cost = loss_func(out, Y_tensor_train) # must be (1. nn output, 2. target), the target label is NOT one-hotted optimizer.zero_grad() # clear gradients for next train cost.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if step % 150 == 0: loss = cost.data[0] all_losses.append(loss) print(step, cost.data.cpu().numpy()) # RuntimeError: can't convert CUDA tensor to numpy (it doesn't support GPU arrays). # Use .cpu() to move the tensor to host memory first. # prediction = torch.max(F.softmax(out), 1)[1] # _, prediction = torch.max(out, 1) prediction = (net(X_tensor_train).data).float() # probabilities # prediction = (net(X_tensor).data > 0.5).float() # zero or one # print ("Pred:" + str (prediction)) # Pred:Variable containing: 0 or 1 # pred_y = prediction.data.numpy().squeeze() # RuntimeError: can't convert CUDA tensor to numpy (it doesn't support GPU arrays). pred_y = prediction.cpu().numpy().squeeze() target_y = Y_tensor_train.cpu().data.numpy() tu = ((pred_y == target_y).mean(),log_loss(target_y, pred_y),roc_auc_score(target_y,pred_y )) print ('ACC={}, LOG_LOSS={}, ROC_AUC={} '.format(*tu)) end_time = time.time() print ('{} {:6.3f} seconds'.format('GPU:', end_time-start_time)) # %matplotlib inline import matplotlib.pyplot as plt plt.plot(all_losses) plt.show() false_positive_rate, true_positive_rate, thresholds = roc_curve(target_y,pred_y) roc_auc = auc(false_positive_rate, true_positive_rate) plt.title('LOG_LOSS=' + str(log_loss(target_y, pred_y))) plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.6f' % roc_auc) plt.legend(loc='lower right') plt.plot([0, 1], [0, 1], 'r--') plt.xlim([-0.1, 1.2]) plt.ylim([-0.1, 1.2]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() # + [markdown] slideshow={"slide_type": "slide"} # # Performance of the deep learning model on the Validation set # + net.eval() # Validation data print (valX.shape) print (valY.shape) X_tensor_val= XnumpyToTensor(valX) Y_tensor_val= YnumpyToTensor(valY) print(type(X_tensor_val.data), type(Y_tensor_val.data)) # should be 'torch.cuda.FloatTensor' predicted_val = (net(X_tensor_val).data).float() # probabilities # predicted_val = (net(X_tensor_val).data > 0.5).float() # zero or one pred_y = predicted_val.cpu().numpy() target_y = Y_tensor_val.cpu().data.numpy() print (type(pred_y)) print (type(target_y)) tu = (str ((pred_y == target_y).mean()),log_loss(target_y, pred_y),roc_auc_score(target_y,pred_y )) print ('\n') print ('acc={} log_loss={} roc_auc={} '.format(*tu)) false_positive_rate, true_positive_rate, thresholds = roc_curve(target_y,pred_y) roc_auc = auc(false_positive_rate, true_positive_rate) plt.title('LOG_LOSS=' + str(log_loss(target_y, pred_y))) plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.6f' % roc_auc) plt.legend(loc='lower right') plt.plot([0, 1], [0, 1], 'r--') plt.xlim([-0.1, 1.2]) plt.ylim([-0.1, 1.2]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() # print (pred_y) # + [markdown] slideshow={"slide_type": "slide"} # # Submission on Test set # + # testX, df_test_set # df[df.columns.difference(['b'])] # trainX, trainY, valX, valY, testX, df_test_set = loadDataSplit() print (df_test_set.shape) columns = ['id', 'probability'] df_pred=pd.DataFrame(data=np.zeros((0,len(columns))), columns=columns) df_pred.id.astype(int) for index, row in df_test_set.iterrows(): rwo_no_id=row.drop('id') # print (rwo_no_id.values) x_data_np = np.array(rwo_no_id.values, dtype=np.float32) if use_cuda: X_tensor_test = Variable(torch.from_numpy(x_data_np).cuda()) # Note the conversion for pytorch else: X_tensor_test = Variable(torch.from_numpy(x_data_np)) # Note the conversion for pytorch X_tensor_test=X_tensor_test.view(1, trainX.shape[1]) # does not work with 1d tensors predicted_val = (net(X_tensor_test).data).float() # probabilities p_test = predicted_val.cpu().numpy().item() # otherwise we get an array, we need a single float df_pred = df_pred.append({'id':row['id'].astype(int), 'probability':p_test},ignore_index=True) # p_test = pd.DataFrame (p_test, columns=['probability']) # # df_pred = df_test_set.append(p_test, ignore_index=True) # df_pred = pd.concat([p_test, df_test_set], axis=0, ignore_index=True) # # # df_pred = pd.DataFrame({ # # # 'id': df_test_set['id'], # # # 'probability': p_test[:,1] # # # }) df_pred.head(5) # df_test_set = pd.concat([tid_1_SINGLE, df_test_set], axis=1) # + df_pred.id=df_pred.id.astype(int) def savePred(df_pred, loss): # csv_path = 'pred/p_{}_{}_{}.csv'.format(loss, name, (str(time.time()))) csv_path = 'pred/pred_{}_{}.csv'.format(loss, (str(time.time()))) df_pred.to_csv(csv_path, columns=('id', 'probability'), index=None) print (csv_path) savePred (df_pred, log_loss(target_y, pred_y)) # -
day 02 PyTORCH and PyCUDA/PyTorch/18-PyTorch-NUMER.AI-Binary-Classification-BCELoss.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd from sklearn.cross_validation import * from sklearn.linear_model import * import numpy as np titanic = pd.read_csv(r"C:\Users\John\Desktop\ML_coding\train_titanic.csv") # Replace all the missing ages in the data with the median age titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median()) print (titanic.describe()) # Replace all male and female genders with '0's and '1's respectively titanic.loc[titanic["Sex"] == "male", "Sex"] = 0 titanic.loc[titanic["Sex"] == "female", "Sex"] = 1 print (titanic.head()) print (titanic["Embarked"].unique()) # Replace all the empty port calls with S titanic["Embarked"] = titanic["Embarked"].fillna("S") print (titanic["Embarked"].unique()) # Replace the respective port calls with 0,1 and 2 titanic.loc[titanic["Embarked"] == "S", "Embarked"] = 0 titanic.loc[titanic["Embarked"] == "C", "Embarked"] = 1 titanic.loc[titanic["Embarked"] == "Q", "Embarked"] = 2 # Using Scikit Learn to make linear-regression predictions on the target # Using these features for prediction predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"] # Initialize the algorithm alg = LinearRegression() kf = KFold(titanic.shape[0], n_folds=3, random_state=1) predictions = [] for train, test in kf: train_predictors = (titanic[predictors].iloc[train, :]) # the features for training (x1, x2...xn) train_target = titanic["Survived"].iloc[train] # the predictive target (y) alg.fit(train_predictors, train_target) # finding the best fit for the target (using what? Gradient Descent?) test_predictions = alg.predict(titanic[predictors].iloc[test, :]) # predict based on the best fit produced by alg.fit predictions.append(test_predictions) # Evaluating error - i.e. checking against the actual list of survived/died predictions = np.concatenate(predictions) print (predictions) predictions[predictions > 0.5] = 1 predictions[predictions < 0.5] = 0 # Dividing the number of right predictions by the total count accuracy = np.count_nonzero(titanic["Survived"] == predictions)/titanic["Survived"].count() print (accuracy) # Using logistic regression to make predictions alg_logReg = LogisticRegression(random_state=1) # Compute the accuracy score for all the cross validation folds: returns an array of the scores from the 3 folds logReg_scores = cross_val_score(alg_logReg, titanic[predictors], titanic["Survived"], cv=3) print("Scores from logistic regression: " + str(logReg_scores.mean())) # Submitting the assignment with test.csv titanic_test = pd.read_csv(r"C:\Users\John\Desktop\ML_coding\test_titanic.csv") print(titanic_test.describe()) # fill in the blank entries in the age with the median age titanic_test["Age"] = titanic_test["Age"].fillna(titanic["Age"].median()) # replace genders with numbers titanic_test.loc[titanic_test["Sex"] == "male", "Sex"] = 0 titanic_test.loc[titanic_test["Sex"] == "female", "Sex"] = 1 # fill in missing values in embarked with 'S' titanic_test["Embarked"] = titanic_test["Embarked"].fillna("S") print(titanic_test["Embarked"].unique()) # replace Embarked initials with letters titanic_test.loc[titanic_test["Embarked"] == "S", "Embarked"] = 0 titanic_test.loc[titanic_test["Embarked"] == "C", "Embarked"] = 1 titanic_test.loc[titanic_test["Embarked"] == "Q", "Embarked"] = 2 # replace missing value in the Fare column titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].median()) # use the training data to fit the logistic regression algo # use the algo to apply on the test set alg_logReg.fit(titanic[predictors], titanic["Survived"]) predictions = alg_logReg.predict(titanic_test[predictors]) # create a submission to kaggle submission = pd.DataFrame({ "PassengerId": titanic_test["PassengerId"], "Survived": predictions }) submission.to_csv("kaggle.csv", index=False) # -
Linear_and_Logistic_Regression_Titanic_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import itertools import matplotlib.pyplot as plt import numpy as np import pandas as pd import pyrvt import pysra # %matplotlib inline # - # Increased figure sizes plt.rcParams["figure.dpi"] = 120 osc_freqs = np.logspace(-1, 2, num=91) # + peak_calcs = [ pyrvt.peak_calculators.CartwrightLonguetHiggins1956(), pyrvt.peak_calculators.Vanmarcke1975(), ] mags = [5, 6, 7] # Create CLH and V peak factor motions motions = [ pysra.motion.SourceTheoryRvtMotion( mag, 20, "wna", depth=8, stress_drop=100, peak_calculator=pc ) for pc, mag in itertools.product(peak_calcs, mags) ] # Create WR peak factor motions. motions += [ pysra.motion.SourceTheoryRvtMotion( mag, 20, "wna", depth=8, stress_drop=100, peak_calculator=pyrvt.peak_calculators.WangRathje2018("wna", mag, 20), ) for mag in mags ] # - for m in motions: m.calc_fourier_amps(freqs=np.logspace(-2, 2, 1024)) # + fig, axes = plt.subplots( ncols=2, sharex=True, sharey=False, subplot_kw={"xscale": "log"}, figsize=(10, 6), ) for m in motions: osc_resps = m.calc_osc_accels(osc_freqs) for ax in axes: ax.plot( osc_freqs, osc_resps, label=f"M{m.magnitude}, {m.peak_calculator.ABBREV}" ) axes[0].set(ylabel="5%-Damped, Spectral Accel. (g)", yscale="linear") axes[1].set(yscale="log") plt.setp(axes, xlabel="Frequency (Hz)") ax.legend() fig.tight_layout() # -
examples/example-10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 하나의 화면에 여러 개의 그래프 그려보기 # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt fig = plt.figure(figsize=(20, 10)) ax1 = fig.add_subplot(2, 1, 1) ax2 = fig.add_subplot(2, 1, 2) ax1.plot(np.random.randn(100)) ax2.plot(np.random.randn(200).cumsum()) # - # x, y 축 값 범위 지정 fig = plt.figure() ax = fig.add_subplot(111) ax.plot(np.random.randn(50), 'b*--'); ax.set_xlabel('data') ax.set_ylabel('value') ax.set_title('My Sample') ax.set_xlim([0, 30]) ax.set_ylim([-5, 2]) # ## 그래프 속성 지정 # # - title() : # - xlabel() : # - ylabel() : # - gri() : 격자 눈금 # - text() : 글씨를 출력 (tooltip을 간단하게 하고자 할 때) plt.plot(np.random.randn(10), 'g*:') # ## x 축 간격 # figure로 전체 그래프의 액자 같은 개념으로 액자 크기 조절이 가능하다
jupyter/dAnalysis/d_matplotlib_class/Ex04_subplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Jupyter Data Science Workflow # # #### From exploratory analysis to reproduciable science # # ##### <NAME> : https://github.com/jakevdp/JupyterWorkflow # #### 데이터 가져오기 # # 1. "Fremont Bridge Hourly Bicycle Counts by Month October 2012 to present" 프리몬트 대교의 시간별 자전거 통행량 # 2. 해당 페이지의 url 카피, 등록 URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD' # 해당 주소에 데이터가 있는지 확인하기 위하여 필요한 모듈을 임포트하고 확인 from urllib.request import urlretrieve urlretrieve(URL, 'Fremont.csv') # 분석을 위하여 파일을 'data' 테이블로 읽어들이고 내용을 확인 함 import pandas as pd data = pd.read_csv('Fremont.csv') # data.shape # rows(48864), cols(3) 확인 data.head() # 상위 5개(0~4) 리스팅, 데이터의 저장형태와 칼럼명을 확인할 수 있음 data = pd.read_csv('Fremont.csv', index_col = 'Date')#자동으로 부여된 index를 date로 칼럼으로 변경 data.head() # date의 표시 형식 변경 data = pd.read_csv('Fremont.csv', index_col = 'Date', parse_dates = True) data.head() # 분석을 위한 그래프를 그리기 위하여 모듈 임포트, 노트북에 표시될 수 있도록 선언 # %matplotlib inline data.plot() #데이터 전구간(일별)을 그래프로 표시 data.plot(); # ';' 메시지 제거 # 데이터 구간을 주간(W)으로 변경하여 그래프, M == 월, Y = 년 data.resample('W').sum().plot();
jupyterworkflowEx1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/srirampattabiraman/Extensive-NLP-and-Deep-Learning/blob/main/session_5/Standford_Data_Augumentation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="GhTcA3TU-Sqx" outputId="6c927d5e-fc9d-4aa8-ba70-2234a650afaf" #We are mounting drive from google.colab import drive drive.mount('/content/drive') # + colab={"base_uri": "https://localhost:8080/"} id="Hyupqp5m-b3c" outputId="fe036f25-2df8-42d3-c450-ff1df39e5dff" ##Installing Necessary Packages # !pip install googletrans==3.1.0a0 # import googletrans.Translator # + id="oAFIctHk9xyX" import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim import torchtext from torchtext.legacy import data from torchtext.legacy.datasets import IMDB from torchtext.legacy.data import Field, LabelField, BucketIterator import random import googletrans from googletrans import Translator from tqdm import tqdm import sys import os # + id="lTMPFB8t-yJw" def group_labels(label): if label in ["very negative", "negative"]: return "negative" elif label in ["positive", "very positive"]: return "positive" else: return "neutral" # + id="x6HP2SG196Hy" dictionary = pd.read_csv("drive/MyDrive/END_DATA/session4/stanfordSentimentTreebank/dictionary.txt", sep="|") dictionary.columns = ["phrase", "id"] dictionary = dictionary.set_index("id") sentiment_labels = pd.read_csv("drive/MyDrive/END_DATA/session4/stanfordSentimentTreebank/sentiment_labels.txt", sep="|") sentiment_labels.columns = ["id", "sentiment"] sentiment_labels = sentiment_labels.set_index("id") phrase_sentiments = dictionary.join(sentiment_labels) phrase_sentiments["fine"] = pd.cut(phrase_sentiments.sentiment, [0, 0.2, 0.4, 0.6, 0.8, 1.0], include_lowest=True, labels=["very negative", "negative", "neutral", "positive", "very positive"]) phrase_sentiments["coarse"] = phrase_sentiments.fine.apply(group_labels) # + id="P2d00w1NALfQ" phrase_sentiments = phrase_sentiments.reset_index(level=0) # + id="9ugD3VmCAN3v" sentences = pd.read_csv("drive/MyDrive/END_DATA/session4/stanfordSentimentTreebank/datasetSentences.txt", index_col="sentence_index", sep="\t") splits = pd.read_csv("drive/MyDrive/END_DATA/session4/stanfordSentimentTreebank/datasetSplit.txt", index_col="sentence_index") sentence_partitions = sentences.join(splits) # + id="glqaoBmGAzQn" data = sentence_partitions.join(phrase_sentiments.set_index("phrase"), on="sentence") data["splitset_label"] = data["splitset_label"].fillna(1).astype(int) split_data = data.groupby("splitset_label") # + id="AS1axIzTBmev" def back_translate(sequence,lab, PROB = 1): languages = ['en', 'fr', 'th', 'tr', 'ur', 'ru', 'bg', 'de', 'ar', 'zh-cn', 'hi', 'sw', 'vi', 'es', 'el'] #instantiate translator translator = Translator() #store original language so we can convert back org_lang = translator.detect(sequence).lang #randomly choose language to translate sequence to random_lang = np.random.choice([lang for lang in languages if lang is not org_lang]) #print(random_lang) if org_lang in languages: #translate to new language and back to original translated = translator.translate(sequence, dest = random_lang).text #translate back to original language translated_back = translator.translate(translated, dest = org_lang).text #print(translated,translated_back) #apply with certain probability if np.random.uniform(0, 1) <= PROB: output_sequence = translated_back else: output_sequence = sequence #if detected language not in our list of languages, do nothing else: output_sequence = sequence return output_sequence,lab # + id="bIaQbISZBqln" def random_deletion(words,lab, p=0.5): if len(words) == 1: # return if single word return words remaining = list(filter(lambda x: random.uniform(0,1) > p,words)) if len(remaining) == 0: # if not left, sample a random word return [random.choice(words)] ,lab else: return remaining,lab # + id="qcZOFntYBsd_" def random_swap(sentence,lab, n=5): length = range(len(sentence)) for _ in range(n): idx1, idx2 = random.sample(length, 2) sentence[idx1], sentence[idx2] = sentence[idx2], sentence[idx1] return sentence,lab # + id="-R7dHSkPBuKv" for splitset, partition in split_data: split_name = {1: "train", 2: "test", 3: "dev"}[splitset] filename = split_name+'.csv' del partition["splitset_label"] partition.to_csv("drive/MyDrive/END_DATA/session4/stanfordSentimentTreebank/"+filename, index=False) # + id="AnqIZphRCwWJ" def discretize_label(label): if label <= 0.05*100: return 'Class1' if label <= 0.1*100: return 'Class2' if label <= 0.15*100: return 'Class3' if label <= 0.2*100: return 'Class4' return 'Class5' # + id="-yRfyMXgCzoW" train_data = pd.read_csv('drive/MyDrive/END_DATA/session4/stanfordSentimentTreebank/train.csv') test_data = pd.read_csv('drive/MyDrive/END_DATA/session4/stanfordSentimentTreebank/test.csv') dev_data = pd.read_csv('drive/MyDrive/END_DATA/session4/stanfordSentimentTreebank/dev.csv') # + colab={"base_uri": "https://localhost:8080/"} id="MtstHzOEDe6H" outputId="b353ab46-4208-4bd8-973a-d0e853cd3977" Train_df = pd.concat([train_data,test_data]) Train_df.reset_index(inplace = True,drop = True) Train_df.shape # + id="qYi61g0oDhaG" def random_pick(df): for num in range (df.shape[0]): return df.sentence[num],df.sentiment[num] # + colab={"base_uri": "https://localhost:8080/", "height": 458} id="2Xsqaz2lDr--" outputId="d160a1e1-9f42-4e35-bf0c-2d0c574d77b7" df = Train_df.copy() pbar = tqdm(range(5492+1057,df.shape[0])) count = len(df) print (f'Before the shape was :{len(df)}' ) aug_data = [] aug_label = [] for i in pbar: word,val = random_pick(df) word1,val1 = back_translate(word,val) word,val = random_pick(df) word = word.split() word2,val2 = random_deletion(word,val) word2 = ' '.join(i for i in word2) word,val = random_pick(df) word = word.split() word3,val3 = random_swap(word,val) word3 = ' '.join(i for i in word3) ins = {'sentence':[word1,word2,word3],'label':[val1,val2,val3]} df2 = pd.DataFrame(ins) df2.to_csv('transforms_1.csv') pbar.set_description(desc = f'Loop:{i}') # + id="DJ_rHdYnDt5h"
session_5/Standford_Data_Augumentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Writing Functions # This lecture discusses the mechanics of writing functions and how to encapsulate scripts as functions. # ## Running example # Write a script that determines if a number is prime. # Determine if `num` is prime import numpy as np # Determine if num is prime # This code has a bug. What is it? # Also, the efficiency of the code can be improved. How? num = 8 upper = int(np.sqrt(num)) + 1: if num % integer == 0: print("Not prime!") is_prime = False import pdb; pdb.set_trace() if is_prime: print("Is prime!") is_prime = True for integer in range(2, upper) # Extending the code to test if a list of numbers is prime a_list = [3, 6, 11] # How do we test all elements of the list? # ## Mechanics of Writing a Function # - Function definition line - How python knows that this is a function # - Function body - code that does the computation of the function # - Formal arguments - arguments passed to the function # - Return values - value returned to the caller # - Name scopes # Transforming the prime number calculation script into a function def check_prime(num): upper = int(np.sqrt(num)) + 1 for integer in range(2, upper): if num % integer == 0: return False return True check_prime(-3) # ## Exercise - Write a Function # - Write a function that finds two factors of a provided number if it is not prime. # - What did you name the function? # - What are the formal arguments? What did you name them? # - What value(s) are returned? # ## Crafting a Function # Crafting refers to what you want a function to do. # - What are the arguments? # - Should it refer to names outside the function definition? # - Should it return a value? # Problem: Write a function that finds all of the prime numbers less than a given value. # + # Function header: name, arguments # Function logic # return somethin # - # ## Exercise - Craft a Function # Create a function (or set of functions) that finds the prime factors of a number. # - What did you name the function? # - What are the arguments? # - What does the function return?
PreFall2018/Python Functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import glob import os fileNames = glob.glob("*.png") print(fileNames) fileNames_sorted = sorted(fileNames,key=lambda x: int(os.path.splitext(x)[0])) print(fileNames_sorted) # -
TestAnimeImport/.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # Project 2 – First Draft # ## 0. Imports & Versions # + import numpy as np import pandas as pd import requests from bs4 import BeautifulSoup import pickle from datetime import datetime, timedelta import time import re import sys import matplotlib.pyplot as plt % matplotlib inline from selenium import webdriver from selenium.webdriver.common.keys import Keys import os chromedriver = "/Applications/chromedriver" os.environ["webdriver.chrome.driver"] = chromedriver # + list_of_imports = [('Numpy', np), ('Pandas', pd), # ('Beautiful Soup', BeautifulSoup), ('Selenium', webdriver) ] for mod in list_of_imports: print(f"{mod[0]}: {mod[1].__version__}") print("Python:", sys.version) # - # ## 1. Scraping def make_flight_url(date_str, flight_num, route_num): """ Takes date string, flight number, and route number, returns URL to search for a particular flight/route/day. --- IN: date string YYYYMMDD (str), flight_num (str), route_num (str) OUT: search URL (str) """ # base URL constructors base_url1 = 'http://flightaware.com/live/flight/' base_url2 = '/history/' base_url3 = '/KLGA/KORD' # merge vars with URL bases search_url = (base_url1 + flight_num + base_url2 + date_str + '/' + route_num + base_url3) return search_url def scrape_flight_soup(soup, flight_num, search_date): """ Scrapes pertinient information off single flight page, returns record for that flight (one record), returns None if no record for that day. --- IN: BS4 object of webpage, search date (datetime obj) OUT: four flight arrival times (list) """ date_str = datetime.strftime(search_date, "%Y%m%d") # is there a flight that day? names = [] for meta in soup.find_all('meta'): names.append(meta.get('name')) if not 'airline' in names: return 'No Flight' # was the flight canceled? # modify to look at scheduled arrival times, fill actual with 'C' # but for now... if soup.text.count('cancelled') == 32: return 'Canceled' # if flight arrived else: try: details = soup.find(class_="flightPageDetails") details_sub = details.find(attrs={"data-template": "live/flight/details"}) spans = list(details_sub.find_all('span')) arrival_times = [] fptd_divs = details_sub.find_all(class_="flightPageTimeData") # pulls from the four relevant indices of fptd_divs for i in [9,11,12,14]: time_str = fptd_divs[i].text.strip().split(' ')[0] arrival_times.append(time_str) arr_conv = map(lambda x: datetime.strptime(x, "%I:%M%p").time(), arrival_times) arrival_times = list(map(lambda x: datetime.combine(search_date, x), arr_conv)) return arrival_times except Exception as e: print(f"*** {flight_num}, {date_str}: ERROR: {e}") return None def scrape_fn(days, flight_num, route_num, df=None): """ Goes through a series of steps to gather data for a given flight number and route over a given length of time. Appends each record to a dataframe (provided or generated). --- IN: days, number of days to scrape, starting yesterday (int) flight_num, flight number as searched on FlightAware (str) route_num, route number as searched on FlightAware (str) df, pandas dataframe OUT: pandas dataframe """ # makes df if none passed if df is None: df = pd.DataFrame(columns=['airline', 'f_num', 'origin', 'dest', 'date', 'land_act', 'arr_act', 'land_sch', 'arr_sch']) # starts Selenium driver = webdriver.Chrome(chromedriver) today = datetime.now().date() no_flight_count = 0 # loop to search each date for d in range(days): time.sleep(np.random.uniform(1.0,2.0)) search_date = today - timedelta(days=d+1) date_str = datetime.strftime(search_date, "%Y%m%d") record_a = ['American', flight_num, 'LGA', 'ORD', search_date] flight_url = make_flight_url(date_str, flight_num, route_num) driver.get(flight_url) flight_soup = BeautifulSoup(driver.page_source, 'html.parser') record_b = scrape_flight_soup(flight_soup, flight_num, search_date) if record_b == None: continue elif record_b == 'Canceled': no_flight_count = 0 print(f"{flight_num}, {date_str}: canceled") continue elif record_b == 'No Flight': no_flight_count += 1 print(f"{flight_num}, {date_str}: no flight") if no_flight_count == 7: print(f"{flight_num}: 7 consecutive days of no flights as of {date_str}!") break else: no_flight_count = 0 record = record_a + record_b print(f"{flight_num}, {date_str}: flight data recorded") df.loc[len(df)] = record driver.close() return df def multiple_flights(days, flight_list): """ Finds all flights in a list of flight number/route number tuples over however many days provided and returns data in a concatenated dataframe. --- IN: number of days to search (int) list of flight number/route numbers (string tuples in list) OUT: dataframe with all flight info (pandas df) """ flight_df = pd.DataFrame(columns=['airline', 'f_num', 'origin', 'dest', 'date', 'land_act', 'arr_act', 'land_sch', 'arr_sch']) for fn, rn in flight_list: flight_df = scrape_fn(days, fn, rn, df=flight_df) return flight_df # + # All the LGA-ORD flights: flight_urls = [ "http://flightaware.com/live/flight/AAL321/history/20171003/0130Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL371/history/20171003/0030Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL359/history/20171002/2330Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL361/history/20171002/2230Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL383/history/20171002/2200Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL345/history/20171002/2130Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL343/history/20171002/2030Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL358/history/20171002/1930Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL187/history/20171002/1830Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL332/history/20171002/1730Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL390/history/20171002/1630Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL337/history/20171002/1530Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL328/history/20171002/1430Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL373/history/20171002/1330Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL1619/history/20171002/1230Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL289/history/20171002/1130Z/KLGA/KORD", "http://flightaware.com/live/flight/AAL304/history/20171002/1030Z/KLGA/KORD" ] # + # if I have time, I'll write a function to get this by scraping. # but for now, the 'manual' way... flights = [] for url in flight_urls: fn_p = re.compile(r'AAL\d{3,4}') rn_p = re.compile(r'\d{4}Z') fn = fn_p.search(url).group() rn = rn_p.search(url).group() flights.append((fn,rn)) # - AAL321_df = scrape_fn(20, 'AAL321', '0130Z') AAL371_df = scrape_fn(365, 'AAL371', '0030Z') AAL358_df = scrape_fn(365, 'AAL358', '1930Z') AAL_df = multiple_flights(365, flights) # ## 2. MVP Exercise # #### Retrieving Pickles with open('../pickles/AAL321.pkl', 'rb') as picklefile: AAL321_df = pickle.load(picklefile) AAL321_df AAL321_df['lateness_land'] = AAL321_df['land_act'] - AAL321_df['land_sch'] AAL321_df['lateness_arr'] = AAL321_df['arr_act'] - AAL321_df['arr_sch'] AAL321_df AAL321_df.loc[6, 'lateness_land'].seconds jr = AAL321_df jr.head() jr['land_act_hour'] = jr['land_act'].apply(lambda x: x.hour) jr['land_sch_hour'] = jr['land_sch'].apply(lambda x: x.hour) jr['land_hr_diff'] = jr['land_act_hour'] - jr['land_sch_hour'] jr['land_hr_diff'] # + # AAL321_df['lateness_land'] = AAL321_df.apply(lambda x: x['lateness_land'].seconds, type=1) # - AAL321_df['land_act_t'] = AAL321_df.apply(lambda x: x['land_act'].time(), axis=1) AAL321_df['arr_act_t'] = AAL321_df.apply(lambda x: x['arr_act'].time(), axis=1) AAL321_df['land_sch_t'] = AAL321_df.apply(lambda x: x['land_sch'].time(), axis=1) AAL321_df['arr_sch_t'] = AAL321_df.apply(lambda x: x['arr_sch'].time(), axis=1) AAL321_df['weekday'] = AAL321_df.apply(lambda x: x['land_sch'].weekday(), axis=1) AAL321_df.drop(AAL321_df.index[12], inplace=True) # + x = AAL321_df.date y1 = AAL321_df.arr_act_t y2 = AAL321_df.arr_sch_t plt.figure(figsize=(12,8)) plt.plot(x,y1, label='Actual arrival') plt.plot(x,y2, label='Scheduled arrival') plt.legend() plt.title("Actual v. Scheduled Arrival (gate)"); # - # The only data currently available — AAL321 arrivals since late September. AAL321_sorted = AAL321_df.sort_values(by=['weekday', 'date']) # + x = AAL321_sorted.weekday y1 = AAL321_sorted.arr_act_t y2 = AAL321_sorted.arr_sch_t plt.figure(figsize=(12,8)) plt.plot(x,y1, "ro", label='Actual arrival') plt.plot(x,y2, "bo", label='Scheduled arrival') plt.legend() plt.title("Actual v. Scheduled Arrivals (gate), Days of Week"); # - # At first glance, some possible correlation between day of week and ETA, but not enough data to even begin to explore.
2-delays-expected/code/Luther-Draft-MVP-Exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # Semiconductors# Condensed Matter Physics # # University of Sydney # April 2020 # # ## Lecture 15 # Semiconductors and magnetism in solids
Lecture 15/Lecture 15.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 将图形嵌入式到notebook中 # + # %matplotlib inline # %config InlineBackend.figure_format='retina' import matplotlib.pyplot as plt; import numpy as np; x=np.linspace(0,1,300); plt.figure(1); for w in range(2,6,2): plt.plot(x,np.sin(np.pi*x)*np.sin(np.pi*2*w)); x = np.linspace(0, 1, 1000)**1.5; plt.figure(2); plt.hist(x); # - # #### adboost 里第m个分类器的Gm(x)训练集正确率与该分类器的权重的关系 # # 分类正确率越高,该该分类器的权重越低 x=np.linspace(0,1,300); plt.figure(1); y = 0.5 * np.log( (1-x) / x); plt.plot(x,y) # #### exp(-x ) # # 用exp(-x)的方式将分类结果概率化 # # z = np.exp(-y); plt.plot(y,z) # + #查看环境变量 # #%env # + # load xx.py 把代码加载到notebook中; # run xx.py 执行xx.py # - # # %cmd # %pwd #查看magic帮助 # %magic # ### 图片左对齐不能用md语法,只能用html了 # <img src='http://ozt5ysx10.bkt.clouddn.com/2018-04-24-14-24-43.png' align='left'/> # ### 内嵌pdf from IPython.display import IFrame # IFrame("../SVM.pdf", width=1080, height=800)
jupyter_ex.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: lammps # language: python # name: lammps # --- # # Calculation of Steinhardt parameters using pyscal # # Import statements import pyscal.core as pc import pyscal.crystal_structures as pcs from ase.build import bulk import numpy as np # Function to calculate and print q values. Structures will be created using both ase and pyscal crystal structures module. def calc_q_ase(element=None, reps=None): ob = bulk(element).repeat(reps) sys = pc.System() sys.read_inputfile(ob, format="ase") sys.find_neighbors(method="cutoff", cutoff=0) sys.calculate_q([4,6,8,10,12]) q = sys.get_qvals([4, 6, 8, 10, 12]) print("q4: %.2f, q6: %.2f, q8: %.2f, q10: %.2f, q12: %.2f"%(np.mean(q[0]), np.mean(q[1]), np.mean(q[2]), np.mean(q[3]), np.mean(q[4]))) def calc_q_pcs(structure=None, alat=None, reps=None, ca_ratio=1.633): atoms, box = pcs.make_crystal(structure=structure, lattice_constant=alat, repetitions=reps, ca_ratio=ca_ratio) sys = pc.System() sys.box = box sys.atoms = atoms sys.find_neighbors(method="cutoff", cutoff=0) sys.calculate_q([4,6,8,10,12]) q = sys.get_qvals([4, 6, 8, 10, 12]) print("q4: %.2f, q6: %.2f, q8: %.2f, q10: %.2f, q12: %.2f"%(np.mean(q[0]), np.mean(q[1]), np.mean(q[2]), np.mean(q[3]), np.mean(q[4]))) # ## BCC calc_q_ase(element="Nb", reps=(1,1,1)) calc_q_ase(element="Nb", reps=(2,2,2)) calc_q_ase(element="Nb", reps=(3,3,3)) calc_q_ase(element="Nb", reps=(3,3,2)) calc_q_pcs(structure="bcc", alat=4.00, reps=(1,1,1)) calc_q_pcs(structure="bcc", alat=4.00, reps=(2,2,2)) calc_q_pcs(structure="bcc", alat=4.00, reps=(3,3,3)) calc_q_pcs(structure="bcc", alat=4.00, reps=(3,3,2)) # ## FCC calc_q_ase(element="Al", reps=(1,1,1)) calc_q_ase(element="Al", reps=(2,2,2)) calc_q_ase(element="Al", reps=(3,3,3)) calc_q_ase(element="Al", reps=(3,3,2)) calc_q_pcs(structure="fcc", alat=4.00, reps=(1,1,1)) calc_q_pcs(structure="fcc", alat=4.00, reps=(2,2,2)) calc_q_pcs(structure="fcc", alat=4.00, reps=(3,3,3)) calc_q_pcs(structure="fcc", alat=4.00, reps=(3,3,2)) # ## HCP calc_q_ase(element="Mg", reps=(1,1,1)) calc_q_ase(element="Mg", reps=(2,2,2)) calc_q_ase(element="Mg", reps=(3,3,3)) calc_q_ase(element="Mg", reps=(3,3,2)) calc_q_pcs(structure="hcp", alat=4.00, reps=(1,1,1), ca_ratio=1.624) calc_q_pcs(structure="hcp", alat=4.00, reps=(2,2,2), ca_ratio=1.624) calc_q_pcs(structure="hcp", alat=4.00, reps=(3,3,3), ca_ratio=1.624) calc_q_pcs(structure="hcp", alat=4.00, reps=(3,3,2), ca_ratio=1.624) # ## Diamond calc_q_ase(element="Si", reps=(1,1,1)) calc_q_ase(element="Si", reps=(2,2,2)) calc_q_ase(element="Si", reps=(3,3,3)) calc_q_ase(element="Si", reps=(3,3,2)) calc_q_pcs(structure="diamond", alat=4.00, reps=(1,1,1)) calc_q_pcs(structure="diamond", alat=4.00, reps=(2,2,2)) calc_q_pcs(structure="diamond", alat=4.00, reps=(3,3,3)) calc_q_pcs(structure="diamond", alat=4.00, reps=(3,3,2))
notebooks/test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="02034a55-aa87-44cc-b3d3-b37fa7d1dc33" _uuid="01613113e137f697d47ca7c4dcf4b02b39ad155c" # ## Exploratory Data Analysis: Mental Health in Tech Survey (2014) # # "This dataset is from a 2014 survey that measures attitudes towards mental health and frequency of mental health disorders in the tech workplace." The data set and a description of the data are made available at: https://www.kaggle.com/osmi/mental-health-in-tech-survey. The data set was downloaded on February 4, 2018. # # #### Introduction # I'm conducting this analysis to practice my analysis and Python skills on a structured data set including loading, inspecting, wrangling, exploring, and drawing conclusions from data. I'll note my observations with each step in order to explain how I think through and approach the data set. I'll also note questions that arise at each step, although I won't explore all of them in this analysis. # # (I'm going to let myself do a bit more explaining throughout this analysis than I might normally do in other settings, because I hope that by erring on the side of too much detail, it will be helpful to others who are learning, too!) # # Kaggle suggested two questions to explore, which I will also address in my analysis. Kaggle's questions are: # # - How does the frequency of mental health illness and attitudes towards mental health vary by geographic location? # - What are the strongest predictors of mental health illness or certain attitudes towards mental health in the workplace? # # This analysis is a living document and I am continuing to deepen the exploration with each iteration. I will update this occasionally with the latest version. Thank you for reading! I welcome your questions or comments. -Liz # # # ### Table of Contents # - <a href="#step1">Step 1: Understand where the data came from</a> # - <a href="#step2">Step 2: Load the data and packages</a> # - <a href="#step3">Step 3: Inspect the data to begin understanding it/ data profiling</a> # - <a href="#step4">Step 4: Clean up the data as identified in Step 3</a> # - <a href="#step5">Step 5: Identify patterns in the data</a> # - <a href="#step6">Step 6: Answer Kaggle's questions about the data</a> # # <a id="step1"></a> # ### Step 1: Understand where the data came from # # a. How was it collected? # # - Name: "Annual Mental Health in Tech Survey" # - Sponsoring Organization: Open Sourcing Mental Illness (OSMI) # - Year: 2014 # - Description: "With over 1200 responses, we believe the 2014 Mental Health in Tech Survey was the largest survey done on mental health in the tech industry." Since then, OSMI has conducted two more surveys, 2016 and 2017. # - The original dataset is from Open Sourcing Mental Illness (OSMI) and can be downloaded here. (https://osmihelp.org/research/) # - We do not have access to information about their data collection methods. # # b/c. Is it a sample, and was it properly sampled? # - Yes, it is a sample. We don't have official information about the data collection method, but it appears *not* to be a random sample (the survey link is available on their website), so we can assume that it is not representative. # # d. Was the dataset transformed in any way? # - Unknown. # # e. Are there some known problems on the dataset? # - Based on the previous analyses of the data (posted on Kaggle), <strong>Gender</strong> and <strong>Age</strong> will need to be cleaned up. # - It does <em>not</em> appear that the survey was administered to a representative sample and so, we won't be able to generalize the findings to a broader population. # # <a id="step2"></a> # ### Step 2: Load the data and packages # + _kg_hide-input=false _cell_guid="87c98308-2f60-47b0-8007-777feadea29e" _uuid="7358d222da406e9bb090d83c4ef0b1f01bcab83d" _kg_hide-output=false # Import packages import sys import pandas as pd import seaborn as sns import numpy as np import matplotlib.pyplot as plt from matplotlib.pyplot import pie, axis, show # Display plots inline in the notebook # %matplotlib inline # Ignore warning related to pandas_profiling (https://github.com/pandas-profiling/pandas-profiling/issues/68) import warnings warnings.filterwarnings('ignore') # Display all dataframe columns in outputs (it has 27 columns, which is wider than the notebook) # This sets it up to dispaly with a horizontal scroll instead of hiding the middle columns pd.set_option('display.max_columns', 100) # Load in the dataset as mh mh = pd.read_csv("../input/survey.csv") # + [markdown] _cell_guid="bcf14f2c-a2d0-46c7-ac21-0616cac0624d" _uuid="1364017f0b2e4c92840482382b4d7fbdfb5d51f1" # <a id="step3"></a> # ### Step 3: Inspect the data to begin to understand it / Data Profiling # I am adapting the process outlined here: https://www.datacamp.com/community/tutorials/python-data-profiling # # Review the data types and sample data. What variables am I dealing with? Which variables need to be transformed in some way before they can be analyzed? After the code, I'll note some observations. # + _cell_guid="2cbcb1ea-b2af-45c9-8fce-4b00c213dab1" _uuid="9b429eb8b0d07d1bbd0bb8a1d135a430bed53491" # Display the data type for each variable mh.dtypes # + _cell_guid="15b014b4-7bce-4c7a-9a1a-2d917e94d27f" _uuid="4a9d3b2900adbd63975153afd6169e9451487530" # Display the first five rows of the data set mh.head() # + _cell_guid="4c9ac9d4-de2c-42d0-957b-71e495ab20ad" _uuid="2360d33dfda9393bba6a01bf9eacc74662ebcd66" # Display a random 10 rows from the data set mh.sample(10) # + _cell_guid="699bc657-2973-4a0e-bb9b-e64f0b87a844" _uuid="f873c590452c8bc6d846cfcae6f202b0f2af749d" # Get a quick overview of all of the variables using pandas_profiling import pandas_profiling pandas_profiling.ProfileReport(mh) # + [markdown] _cell_guid="97bc7f0f-e6e0-43d0-afd3-552c384d85b8" _uuid="2b973e81559b4cd877756318c68811137e0ac6c2" # #### Initial observations after running the code above: # # - Summary of data types in this dataset: # - <em>Numeric</em>: <strong>Age</strong> (integer) # - <em>Object</em>: Everything else # - <strong>Gender</strong> has 49 distinct responses. I'll group the responses into fewer categories. # - <strong>Age</strong> has some values that are too low or too high to be real ages. I'll need to decide what to do about them. # - The following variables have null values that I may need to address. I will write out my initial thoughts on them: # - <strong>state</strong>: This should be null only when <strong>country</strong> is something other than the US # - <strong>self_employed</strong>: There are 18 missing values. I'll need to investigate why. (Was it an optional question, perhaps?) # - <strong>work-interfere</strong>: There are 264 missing values. The question begins with "If you have a mental health condition,..." so I will assume that those who did not respond, did so because they do not have a mental health condition currently. This is an imperfect assumption since we can't know that for sure; and because some people may have felt the need to respond even if they don't have a mental health condition, if they did not feel comfortable leaving a question blank. # - <strong>comments</strong>: There are 1095 missing values. It was an optional text box so it's reasonable to expect that many (most) respondents would leave it blank. We may be able to learn something by doing a text analysis on the submitted comments: are there themes that come up in the comments that would inform the next version of the survey - for example, a popular theme might justify creating its own multiple-choice question next time. # # + _cell_guid="a19eb500-db4c-4918-a269-46a656624fa9" _uuid="4a28fefa960f73389540bbe02ffaee1028fbda67" # Explore the numeric variable Age to confirm whether all values are within a reasonable range and if any are NaN. print("'Age'") print("Minimum value: ", mh["Age"].min()) print("Maximum value: ", mh["Age"].max()) print("How many values are NaN?: ", pd.isnull(mh['Age']).sum()) # + _cell_guid="82f59e8e-6339-47f5-a0da-ebe29136b2b7" _uuid="923f820cd342acc19fe31c2aa0761891aa127322" # Learn more about the variable Gender, which appears not to be standardized with 49 distinct responses. # Count the number of distinct responses and list them: print("Count of distinct responses for Gender:", len(set(mh['Gender']))) print("Distinct responses for Gender:", set(mh['Gender'])) # + [markdown] _cell_guid="091757f1-8a59-4cdf-a6d0-89c478ea1246" _uuid="75f8057a72671892f1feabfaa34b7981425401b4" # #### Further observations on this data set # # - Variables to transform prior to analysis: # - Standardize all column headers to lower case (to prevent typos!) # - <strong>Timestamp</strong>: convert to datetime # - <strong>Age</strong>: remove outliers # - <strong>Gender</strong>: group into standardized categories # # #### Questions that have come up: # # - It will be important to decide how to handle NaN values. # - It would be helpful to understand the author's data collection methods and the extent to which they introduced bias. Without that information I will continue to assume that the results are not generalizable to the entire tech industry. All findings from this data set will be assumed to describe this sample, only. # - How does the employer's attitude toward mental health issues relate to employees' behavior, such as seeking treatment? # + [markdown] _cell_guid="aba98060-ff79-406b-a647-a8ddd6edd103" _uuid="66a131e7e2c6d02635d90af1a7860a8e069e2081" # <a id="step4"></a> # ### Step 4: Clean up the data as identified in Step 3 # # - Step 4A: Standardize all column headers to lower case (to prevent typos!) # - Step 4B: <strong>Timestamp</strong>: convert to datetime # - Step 4C: <strong>Age</strong>: remove outliers # - Step 4D: <strong>Gender</strong>: group into standardized categories # - Step 4E: Decide how to handle NaN data in the categorical columns. # # # #### 4A: Standardize all column headers to lower case # + _cell_guid="0229f0d5-4eac-46a8-b1ff-647591a2484c" _uuid="984c7ecac94372f69ff32da1010f509ec6c7367a" mh.columns = map(str.lower, mh.columns) # Confirm that all variable names are now lower case mh.dtypes # + [markdown] _cell_guid="99df77ab-3e99-4499-878d-61e663de7ea5" _uuid="5e00748653fdb8906fa9dd3d80b24e96aefbe854" # #### 4B: Convert "timestamp" to datetime # + _cell_guid="6d99e3db-9918-4b25-a9cf-31d0dc2b7f09" _uuid="eeae1c609a9507228ddd703f1b1dea25ac8064ff" # Convert "timestamp" data type from object to datetime mh['timestamp'] = pd.to_datetime(mh['timestamp']) # Confirm that it worked mh.dtypes # + [markdown] _cell_guid="0d28fd88-b287-4bbb-96eb-4d039c8bd946" _uuid="c065a63052f6ed981ffaa11e097ea89394d2ffab" # #### 4C: Remove Age outliers # # For the purpose of this analysis, I will simply replace any out-of-range age values with "NaN", rather than delete the rows from the data set. In a more detailed analysis it could be worth looking more closely at the rows with out-of-range ages and deciding whether it makes more sense to remove them. # # I adapted the process from Kaggler shivamnijhawan96: https://www.kaggle.com/shivamnijhawan96/mental-health-survey-exploration/. (Thanks!) # + _cell_guid="b09ef7e5-6687-457f-a05c-e2e5094e9977" _uuid="deea27ed1214745c55a4952a5b0661d3b66aa265" # Create a new column "age_clean" that replaces out-of-range ages with "NaN" # The oldest living person on record lived to be 122 years, 164 days (Jeanne Calment, 1875-1997) def clean_age(age): if age>=0 and age<=123: return age else: return np.nan mh['age_clean'] = mh['age'].apply(clean_age) # Check out the new column and make sure it looks right print("'Age'") print("Minimum value: ", mh["age_clean"].min()) print("Maximum value: ", mh["age_clean"].max()) print("How many values are NaN?: ", pd.isnull(mh['age_clean']).sum()) print("Frequency table for age_clean:") mh["age_clean"].value_counts().sort_index(0) # + [markdown] _cell_guid="bc3355ad-ee17-4998-a39c-eb44e0a10212" _uuid="afad35eadfc46f289a34e827a7c329ffbc881404" # #### View the age distribution of the sample # # # + _cell_guid="00f89c92-45a4-481f-81f5-e9b495d70f09" _uuid="29a04e6dc5b8628d1c722f4d945e9acd064f20d8" # Plot a histogram of the respondents' ages (remove any NaN values) sns.set(color_codes=True) sns.set_palette(sns.color_palette("muted")) sns.distplot(mh["age_clean"].dropna()); # + [markdown] _cell_guid="def36876-ccba-42cc-a151-52346e831196" _uuid="10f0eb59685a4ca189833bbffb53ff7e0b3b1263" # #### Observations # - Peak between mid-20s to about mid-40s.  # # #### Questions that come up: # - How does <strong>age</strong> relate to various behaviors and/or their awareness of their employer's attitude toward mental health? # + [markdown] _cell_guid="88cf8b77-86e5-4234-9e2f-a807dfd43668" _uuid="1934cdc20c051802b8cb74eea3b52915e93dd810" # #### 4D: Standardize gender into categories # # I have decided to consolidate the gender variable into the following categories, given the nature and quantities of the open-ended responses. # # - Male (cis) # - Male (trans) # - Female (cis) # - Female (trans) # - Other # # Once again, I adapted the process used by Kaggler shivamnijhawan96: https://www.kaggle.com/shivamnijhawan96/mental-health-survey-exploration. (Thanks!) # # ##### Suggestion for the future: # Future surveys might consider restructuring the sex/gender questions to reduce the frequency of "other" while remaining transgender-inclusive. One potential method is described in this study from May 2017: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5444783/) # + _cell_guid="6066975f-ed70-41cf-887f-877e362a9560" _uuid="6e554909b44f99cd1bb65376421891d5ea13802c" # Recategorize each response into these categories: Male (cis), Male (trans), Female (cis), Female (trans), Other # Standardize the capitalization in the responses by making all characters lower case mh['gender'] = mh['gender'].str.lower() # Make a copy of the column to preserve the original data. I will work with the new column going forward. mh['gender_new'] = mh['gender'] # Assign each type of response to one of the five categories male = ["male", "m", "male-ish", "maile", "mal", "male (cis)", "make", "male ", "man", "msle", "mail", "malr","cis man", "cis male"] trans_male = [None] trans_female = ["trans-female", "trans woman", "female (trans)"] female = ["cis female", "f", "female", "woman", "femake", "female ","cis-female/femme", "female (cis)", "femail"] other = ["non-binary", "nah", "all", "enby", "fluid", "genderqueer", "androgyne", "agender", "male leaning androgynous", "guy (-ish) ^_^", "neuter", "queer", "ostensibly male, unsure what that really means", "queer/she/they", "something kinda male?"] mh['gender_new'] = mh['gender_new'].apply(lambda x:"Male (Cis)" if x in male else x) mh['gender_new'] = mh['gender_new'].apply(lambda x:"Female (Cis)" if x in female else x) mh['gender_new'] = mh['gender_new'].apply(lambda x:"Female (Trans)" if x in trans_female else x) mh['gender_new'] = mh['gender_new'].apply(lambda x:"Male (Trans)" if x in trans_male else x) mh['gender_new'] = mh['gender_new'].apply(lambda x:"Other" if x in other else x) mh.drop(mh[mh.gender_new == 'p'].index, inplace=True) mh.drop(mh[mh.gender_new == 'a little about you'].index, inplace=True) # Make a crosstab to view the count for each of the new categories print("Gender:") print("How many values are NaN?: ", pd.isnull(mh['gender_new']).sum()) print("") print("Frequency table for gender_new:\n", mh["gender_new"].value_counts().sort_index(0)) print("") # Confirm that no entries were missed in the sorting above: display the size of the old and new variables, and of the entire dataset print("If we didn't miss any entries, these numbers will be the same:") print("gender =", len(mh['gender']), "values") print("gender_new =", len(mh['gender_new']), "values") print("Dataset Entries =", len(mh), "values") # + [markdown] _cell_guid="a45eb2cb-952f-485a-9717-4bcabf8bb8c1" _uuid="b208ba0755882246d535fc59bde6e10f9da8d0db" # #### View the gender distribution of the sample # + _cell_guid="03b36eeb-5de7-4254-9378-5d8801e086c8" _uuid="152b9cb4e1bfb72f569249971dd15268de7e075f" # Create a bar chart comparing gender mh['gender_new'].value_counts().plot(kind='bar') # + [markdown] _cell_guid="41e9f51b-525c-4fde-a134-18102b51d858" _uuid="045d0053fa2167d50553d877b0cfc1a6757bf767" # #### Observations # - The large majority are Male (Cis). This isn't a surprise since it's focused on the tech industry. # - There are no Male (Trans) entries, which I do find a little surprising. # - Almost certainly, at least some of the respondents that I grouped into "Other" would have chosen one of the other four options if they'd been given the option to self-sort. So this is an imperfect solution, but I still consider it a reasonable one given what we have available to us. # # #### Questions that come up: # - How does <strong>gender</strong> relate to various behaviors and/or their awareness of their employer's attitude toward mental health? # # #### 4E: Decide how to handle NaN data in the categorical columns # - There are three variables with many missing values. None of them points to a need to "fill in" the missing responses or delete the rows. More detail: # - <strong>state</strong>: the missing values are likely to occur in the instances where the country is not "United States". This can be confirmed below. # - <strong>work_intefere</strong>: the missing values indicate that the respondent skipped the question because it does not apply to them, which is important information. We can infer that missing responses means the respondent is not experiencing a mental illness at the moment. # - <strong>comments</strong>: this missing values indicate that the respondent didn't leave a comment, which was optional. # # ##### Questions that come up: # - How many values for <strong>state</strong> are blank when the <strong>country</strong> is "United States"? In other words, when <strong>state</strong> shouldn't be blank? # # <a id="step5"></a> # ### Step 5: Identify patterns in the data # # #### 5A: treatment v. work_interfere # # ##### Questions that come up: # - Is it possible to run an accurate correlation calculation with two categorical variables? Below I run it three ways. # + _cell_guid="18b869e6-9c95-46b5-bca1-30e6af9d0b9b" _uuid="3e541cc26d15d5f3a76d631d2ff7f2323d2506c1" # Create a new dataframe with the two columns and assign numbers in place of their categories df = pd.DataFrame({'treatment': mh['treatment'], 'work_interfere': mh['work_interfere']}, dtype='category') df_num = df.apply(lambda x: x.cat.codes) # Run a correlation calculation print("Pearson:", df_num.corr()) print("") print("Spearman:", df_num.corr('spearman')) print("") print("Kendall:", df_num.corr('kendall')) # + [markdown] _cell_guid="46323ae3-2ffb-4270-bf4d-7348a8fbcd7f" _uuid="29ed88e1c70ad9b8574285ba7b47b65e88fc8d68" # #### 5B: Chart: mental health benefits v. seeking treatment # # Whether an employee is seeking treatment for a mental health condition appears to be correlated with their knowledge of their employer's mental health benefits. # # ##### Questions that come up: # - How to determine if this is a statistically significant correlation? # + _cell_guid="1f0f6862-a4f9-4893-9f42-3615bf4c4438" _uuid="7ca81ba4b4ffd5202f9b07d6d3cb7bc87efc8241" plt.figure(figsize=(10,7)) sns.countplot(x="benefits", hue="treatment", hue_order = ["Yes", "No"], data=mh) plt.title("Does your employer provide mental health benefits?",fontsize=16) plt.suptitle("Seeking Treatment v. Work Benefits", fontsize=20) plt.xlabel("") plt.show() # + _cell_guid="11457a05-74c8-48a5-8e03-3a41c78e0b85" _uuid="6d7fc1ca91a2981d65ed6e20644fe831dc81d5fc" # Generate a chart comparing mental health benefits and treatment plt.figure(figsize=(10,7)) sns.countplot(x="treatment", hue="benefits", hue_order = ["Yes", "No", "Don't know"], data=mh) plt.suptitle("Seeking Treatment v. Work Benefits (Inverted)", fontsize=20) plt.title("Have you sought treatment for a mental health condition?",fontsize=16) plt.xlabel("") plt.ylabel("") plt.show() # + [markdown] _cell_guid="069f4df2-a323-40a8-9407-9f4dfe1225cf" _uuid="fa616aa9c6cd281e74ebe7749b445a9f503121cf" # #### 5C: Chart: family history v. seeking treatment # # There appears to be a pretty clear correlation between these two variables, too. # + _cell_guid="943d53f0-35f4-4aa2-aa57-e2d0ab22a022" _uuid="b4d9ba8677e6151502d9903fc6c46de230dfd794" plt.figure(figsize=(10,7)) sns.countplot(x="family_history", hue="treatment", hue_order = ["Yes", "No"], data=mh) plt.suptitle("Family History v. Seeking Treatment", fontsize=20) plt.title("Do you have a family history of mental illness?", fontsize=16) plt.xlabel("") plt.ylabel("") plt.show() # + [markdown] _cell_guid="aa5f4629-f9d6-460a-8cf9-837c6c619869" _uuid="93a29df0919dda75281f78b1df918e675bfc6902" # #### 5C.1 Chi-Squared Test of Independence # I would like to calculate whether these variables are statistically independent of each other. First, I will try a chi-squared test of independence. (Adapting the process from: http://hamelg.blogspot.com/2015/11/python-for-data-analysis-part-25-chi.html) # + _cell_guid="91424c1c-e403-41a7-a07e-d3ac053539fc" _uuid="6cd3cf8c479a77decde4169d8d3395ac0533db46" # Generate crosstabs of "family history" and "treatment" (the "observed" values) import scipy.stats as stats tab_famhist_treatment = pd.crosstab(mh["family_history"], mh["treatment"], margins = True) tab_famhist_treatment.columns = ["Treatment Yes","Treatment No","row_totals"] tab_famhist_treatment.index = ["Fam Hist Yes","Fam Hist No","col_totals"] observed = tab_famhist_treatment.iloc[0:2,0:2] # Get table without totals for later use tab_famhist_treatment # + _cell_guid="c3771022-e19f-4b8d-a658-ab2f0ade89ec" _uuid="414178eada039bd0e599a9f5aa8df827f9e939eb" # Generate the "expected" values to compare against our "observed" values expected = np.outer(tab_famhist_treatment["row_totals"][0:2], tab_famhist_treatment.loc["col_totals"][0:2]) / 1257 expected = pd.DataFrame(expected) expected.columns = ["Treatment Yes","Treatment No"] expected.index = ["Fam Hist Yes","Fam Hist No"] expected # + _cell_guid="82fde8ff-2f05-45a9-90f8-09362860211d" _uuid="34252998294f2c04acd1e33af1a5079469aa727f" # Run the Chi-Squared test chi_squared_stat = (((observed-expected)**2)/expected).sum().sum() print(chi_squared_stat) # Note: We call .sum() twice: once to get the column sums and a second time to # add the column sums together, returning the sum of the entire 2D table. # + _cell_guid="76999191-3674-46c7-b027-7b67663a8493" _uuid="524c2df92775f775827179754954e8798b93d072" crit = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence* df = 1) # * # *Note: The degrees of freedom for a test of independence equals the product of # the number of categories in each variable minus 1. In this case we have a 2x2 table # so df = 1x1 = 1. print("Critical value") print(crit) p_value = 1 - stats.chi2.cdf(x=chi_squared_stat, # Find the p-value df=1) print("P value") print(p_value) # + [markdown] _cell_guid="56ae8096-533f-43a7-b337-5b5e4d5d195a" _uuid="071bfc50fbf30317fe677f8b56b3199426c79447" # ##### Observation: # A P value of 0.0 does not seem right. I will go back to further investigate whether the chi-squared is appropriate and if I made mistakes in the code above. # + [markdown] _cell_guid="f2d9d3d5-7bad-4ffc-84c8-147c99d95dcb" _uuid="531653c8483bd24e1e4766ee76a0de1cc17e3a45" # <a id="step6"></a> # ### Step 6: Answer Kaggle's questions about the data # + [markdown] _cell_guid="a6fe0c20-3bb1-4613-b712-26f2b967b12c" _uuid="8c61f08e7c32c55336629e21514c99191dd59b8c" # #### 6A: How does the frequency of mental health illness vary by geographic location? # # I will explore the <strong>state</strong> data and its relationship to the frequency of mental health illness. A response to <strong>work_interfere</strong> will be inferred to mean that the respondent experienced a mental health condition at the time of their response. From a non-response I will infer the non-existence of a mental health condition. # # I will group the states into four regions according to the <a href="https://commons.wikimedia.org/w/index.php?curid=10687031" target="_blank">US Census Bureau</a> to make it a more manageable comparison. # + _cell_guid="7de43074-7912-4e2f-96f5-260ef562bc33" _uuid="bf2bb147c0e1df59a259a45d0633dbca4ced0e82" # Display the distinct countries represented in the data (quantity and names) print("Country Count =", len(set(mh['country']))) print("Country Names =", set(mh['country'])) # + _cell_guid="207e8ad4-bdd7-41ca-90c2-4d03c5c91b22" _uuid="75e13e3805e7e77324c8f3e0c7b8f64dfb6a1f91" # Display quantity and names of distinct countries represented in the data (quantity and names) print("State Count =", len(set(mh['state']))) print("State Names =", set(mh['state'])) print(mh['state'].describe()) # + _cell_guid="8594f72f-bd4d-4f7e-ac5e-be6498980fe7" _uuid="60d200c3a99598761500ecd6321df9c1e6012abd" # Create a frequency chart for "country" plt.figure(figsize=(10, 7)) sns.countplot(y='country', order = mh['country'].value_counts().index, data=mh) plt.title('Survey Responses by Country', fontsize=20) plt.xlabel('') plt.ylabel('') plt.show() # + _cell_guid="b3b8a149-388b-4312-9778-3ea7f53a99b2" _uuid="42db61ccae9d775a65f329002f9e8c833d219e95" #### Survey Responses by state total = float(len(mh)) plt.figure(figsize=(20, 7)) ax = sns.countplot(x='state', order = mh['state'].value_counts().index, data=mh) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total), ha="center") plt.title('Responses by State', fontsize=20) plt.xlabel('') plt.ylabel('') plt.show() # + _cell_guid="db0a5601-1c92-40f0-ae74-3b3fad36646c" _uuid="3abbe44ab48516769e9788203491b9614e02a0fa" # Define how to recategorize each state into one of the US Census Bureau regions: West, Midwest, South, Northeast # Mke a copy of the column to preserve the original data. Work with the new column going forward. mh['region'] = mh['state'] # Then, in the new column, assign each type of response to one of the new categories west = ["WA", "OR", "CA", "NV", "ID", "MT", "WY", "UT", "AZ", "NM", "CO"] midwest = ["ND", "SD", "NE", "KS", "MN", "IA", "MO", "WI", "IL", "IN", "OH", "MI"] northeast = ["ME", "NH", "VT", "MA", "CT", "RI", "NY", "PA", "NJ"] south = ["MD", "DE", "DC", "WV", "VA", "NC","SC", "GA", "FL", "KY", "TN", "AL", "MS", "AR", "LA", "OK", "TX"] mh['region'] = mh['region'].apply(lambda x:"West" if x in west else x) mh['region'] = mh['region'].apply(lambda x:"Midwest" if x in midwest else x) mh['region'] = mh['region'].apply(lambda x:"Northeast" if x in northeast else x) mh['region'] = mh['region'].apply(lambda x:"South" if x in south else x) # Make a crosstab to view the count for each of the new categories region_tab = pd.crosstab(index=mh["region"], columns="count") print(region_tab) # Confirm that we didn't miss any entries print("If we didn't miss anything, this will equal 1257:", len(mh['region'])) region_tab.plot(kind="bar", figsize=(20,7), stacked=True) # + _cell_guid="5d076bcf-e146-4582-9cf0-2f8d3fd6a258" _uuid="0be285bb8f8cb7c19703c90facd6c6e15934cb34" #### Survey Responses by region total = float(len(mh)) plt.figure(figsize=(20, 7)) ax = sns.countplot(x='region', order = mh['region'].value_counts().index, data=mh) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total), ha="center") plt.title('Responses by Region', fontsize=20) plt.xlabel('') plt.ylabel('') plt.show() # + _cell_guid="2e38b1f9-bacf-41d9-a3f4-373f78684dbd" _uuid="48e03e7d69edbecf88cba7ed688011df4d189622" plt.figure(figsize=(10,7)) sns.countplot(x="region", hue="work_interfere", hue_order = ["Never", "Rarely", "Sometimes", "Often"], data=mh) plt.suptitle("Work Interfere v. Region (U.S.)", fontsize=20) plt.title("Frequency of mental health conditions in the U.S. by region", fontsize=16) plt.xlabel("") plt.ylabel("") plt.show() # + _cell_guid="d9902d25-7920-4a59-be11-b0d385a0d2a9" _uuid="2205033248e87218d73b306fb60b0c5343d1891c" mh['work_interfere'] # + _cell_guid="1196d188-059d-48a1-8761-19e1fb9ff376" _uuid="567b56f56bf37ad1ea2654a83dd6ec5e884341a9" # Convert the work_interfere responses into a new variable, # Make a copy of the column to preserve the original data. Work with the new column going forward. mh['ill'] = mh['work_interfere'] # Transform all NaN to "No" (which means, not currently experiencing a mental health condition) mh['ill'] = mh['ill'].replace(np.nan, 'No', regex=True) # Assign each type of response to one of two categories notill = ["No"] ill = ["Never", "Rarely", "Sometimes", "Often"] mh['ill'] = mh['ill'].apply(lambda x:"Yes" if x in ill else x) mh['ill'] = mh['ill'].apply(lambda x:"No" if x in notill else x) # Make a crosstab to view the count for each of the new categories ill_tab = pd.crosstab(index=mh["ill"], columns="count") print(ill_tab) # Confirm that we didn't miss any entries print("If we didn't miss anything, this will equal 1257:", len(mh['ill'])) ill_tab.plot(kind="bar", figsize=(20,7), stacked=True) # + _cell_guid="064e94c8-e787-4f29-b330-7babed2efaf1" _uuid="bb0ffa056f09ee8947f6fd94bfbff4c6ecb31e61" # Display the relationship between "ill" and "region" plt.figure(figsize=(10,7)) sns.countplot(x="region", hue="ill", hue_order = ["Yes", "No"], data=mh) plt.suptitle("Mental Health Conditions v. Region (U.S.)", fontsize=20) plt.title("Frequency of mental health conditions in the U.S. by region", fontsize=16) plt.xlabel("") plt.ylabel("") plt.show() # + [markdown] _cell_guid="50aee321-ab87-4851-92c0-db8222ebd65f" _uuid="8717ccde510b04b9e51f305e8706ba3eed14b072" # #### 6B: How do attitudes towards mental health vary by geographic location? # # I will explore the <strong>region</strong> data and its relationship to attitudes towards mental health. A "No" response to <strong>mental_health_consequence</strong> ("Do you think that discussing a mental health issue with your employer would have negative consequences?") will be inferred to represent positive attitudes toward mental health, while a "Yes" response will represent negative attitudes. # + _cell_guid="9cc9a99c-76a4-417b-afa0-837f6ad6db5d" _uuid="198cd3b06fe4d41f9ee13452cbf1f9b1027c286f" # Convert the mental_health_consequence responses into a new variable, # Make a copy of the column to preserve the original data. Work with the new column going forward. mh['attitudes'] = mh['mental_health_consequence'] # Assign each type of response to one of two categories positive = ["No"] negative = ["Yes"] moderate = ['Maybe'] mh['attitudes'] = mh['attitudes'].apply(lambda x:"Positive" if x in positive else x) mh['attitudes'] = mh['attitudes'].apply(lambda x:"Negative" if x in negative else x) mh['attitudes'] = mh['attitudes'].apply(lambda x:"Moderate" if x in moderate else x) # Make a crosstab to view the count for each of the new categories attitudes_tab = pd.crosstab(index=mh["attitudes"], columns="count") print(attitudes_tab) # Confirm that we didn't miss any entries print("If we didn't miss anything, this will equal 1257:", len(mh['attitudes'])) print(attitudes_tab.plot(kind="bar", figsize=(20,7), stacked=True)) # Display the relationship between "mental_health_consequence" and "region" plt.figure(figsize=(10,7)) sns.countplot(x="region", hue="attitudes", hue_order = ["Positive", "Moderate", "Negative"], data=mh) plt.suptitle("Mental Health Attitudes v. Region (U.S.)", fontsize=20) plt.title("Attitudes towards mental health in the U.S. by region", fontsize=16) plt.xlabel("") plt.ylabel("") print(plt.show()) # + [markdown] _cell_guid="22bfff41-6cdc-491a-8109-b819d0eb6961" _uuid="f9f380c0f74dcd03a3a2a7706e063a1c588ecf0c" # ## Next up: 6C- What are the strongest predictors of mental health illness or certain attitudes towards mental health in the workplace? # # + [markdown] _cell_guid="7f450152-b355-4c27-9818-c6d0d4d8b63f" _uuid="507332cf885bedf53f157e592599eae19b0422c8" # ## Next up: all of these charts are in raw numbers. How to show proportions, instead? # # This analysis is a living document and I am continuing to explore new, deeper questions with each iteration. I will update this occasionally with the latest version. # # Thank you for reading! Please contact me with any questions or thoughts. -Liz
Mental Health In Tech Survey 2018_03_06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JSJeong-me/KOSA-Big-Data_Vision/blob/main/python/data_type_conversion.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="6RLTOlDXKY-5" score = [[15, 16, 17], [25, 26, 27], [35, 36, 37]] # + colab={"base_uri": "https://localhost:8080/"} id="1PJUF0M2LR3W" outputId="3ad9e9d9-5388-45ce-dea1-6fbb51266d9b" type(score) # + id="XoLWSaNgLW8F" import numpy as np # + id="xGC0IUuoLbxf" np_score = np.array(score) # + colab={"base_uri": "https://localhost:8080/"} id="dqhDISScMN8G" outputId="ce3b0fad-54b5-4ced-b1ff-08b464231ca3" np_score # + colab={"base_uri": "https://localhost:8080/"} id="oXvaHjktM3Ye" outputId="aa7c9e05-9004-4108-d337-b5d918a4466b" np_score.strides # + colab={"base_uri": "https://localhost:8080/"} id="bPtrz4HlLlpd" outputId="1dca77d1-ad33-4975-c1cc-54b403230d06" type(np_score) # + id="JG45jj3mLtpd" import pandas as pd # + id="rBBCIt_cLxEn" df_score = pd.DataFrame(np_score, index=['A_class', 'B_class', 'C_class'], columns=['kor', 'eng', 'math']) # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="G1za533eMJYH" outputId="bb65d03a-eefa-401c-d72b-dbc33ef17078" df_score # + id="QL0HdYOBMUJO" avr_kor = df_score['kor'].mean() # + colab={"base_uri": "https://localhost:8080/"} id="QUp1m37AMdzm" outputId="b90b3003-5faa-4b42-a9d1-05beff67afc7" avr_kor # + id="MhwcxxqDMiVV" max_kor = df_score['kor'].max() # + colab={"base_uri": "https://localhost:8080/"} id="-aH-vfUBMoDG" outputId="e3ac7794-feec-425d-eaac-c216a0b82d2e" max_kor # + id="Ls_mLd3bNkSa" new_np_array = df_score.values # + colab={"base_uri": "https://localhost:8080/"} id="snZBkdftOnbK" outputId="d0b23940-bdb0-4e9f-f714-75044cdcf8a7" df_score.index # + colab={"base_uri": "https://localhost:8080/"} id="WWy0BOwCOrwp" outputId="616a8939-082f-444a-c7af-da8f7ed4672e" df_score.columns # + colab={"base_uri": "https://localhost:8080/"} id="GTcxF5X4Nvi2" outputId="ecc24e82-c1a8-4692-c1ae-f3c5ee3cbfaa" new_np_array # + colab={"base_uri": "https://localhost:8080/"} id="3ZH7tev8NyVW" outputId="dad3a048-1731-436b-e8a0-a4f5b9f0e35b" type(new_np_array) # + id="YTi-TFdCN3qv" new_list_score = new_np_array.tolist() # + colab={"base_uri": "https://localhost:8080/"} id="4zoJ7ZfvOBG-" outputId="6f3edc82-199d-4f77-99dc-e2238ecf8839" type(new_list_score)
python/data_type_conversion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import gpt_2_simple as gpt2 import os import requests # !pip install nltk import nltk nltk.download('averaged_perceptron_tagger') from nltk.tag import pos_tag # !nvidia-smi # + # gpt2.download_gpt2(model_name="117M") # - sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='run3') gpt2.generate(sess, run_name='run3') file_name = "y_u_no.txt" import tensorflow as tf # + tf.compat.v1.reset_default_graph() sess1 = gpt2.start_tf_sess() gpt2.finetune(sess1, dataset=file_name, model_name='117M', steps=200, restore_from='latest', run_name='y_u_no', print_every=5, sample_every=200, save_every=300 ) # - all_captions_string=list(gpt2.generate(sess, temperature=2, batch_size=1, run_name='run3', return_as_list=True )) all_captions_string all_captions_string='\n'.join(all_captions_string[i] for i in range(len(all_captions_string))) print(type(all_captions_string)) k = all_captions_string.split("\n") print(len(k)) type(k) prof = ["fuck","shit","fart","ass","cunt","fag","faggot","retard","dick","sex","gay","lesbian","queer","porn","slut","whore","bitch","breast","penis","masturbate","masturbates","pregnant", "boob","rape","sleeps with"] for i in k: i1 = i.lower() for j in prof: if j in i1: # print(k[k.index(i)]) print(i1) k.remove(i) break all_captions_string='\n'.join(all_captions_string[i] for i in range(len(all_captions_string))) len(k) klist = k[33].split() klist f = pos_tag(klist) f sentence_list = klist sentence_list=list(filter(lambda a: a!='', sentence_list)) break_point=0 for i in range(1, len(f)): word=sentence_list[i] tag=f[i][1] if(word[0].isupper()): if(tag!='NNP' and tag!='NN' and tag!='NNS' and tag!='NNPS'): # print(word) break_point=i break # print(break_point) if(break_point!=0): first_part=' '.join(sentence_list[ite] for ite in range(break_point)) second_part=' '.join(sentence_list[ite] for ite in range(break_point, len(sentence_list))) print(first_part,"\n",second_part) else: break_point=len(sentence_list)//2 first_part=' '.join(sentence_list[ite] for ite in range(break_point)) second_part=' '.join(sentence_list[ite] for ite in range(break_point, len(sentence_list))) print(first_part,"\n", second_part) from PIL import Image from PIL import ImageFont from PIL import ImageDraw # + import textwrap toptext = first_part bottomtext = second_part toptext = toptext.upper() bottomtext = bottomtext.upper() shadowcolor = 'black' fillcolor = 'white' W,H=(1280,720) img = Image.open("sad_keanu.jpg") draw = ImageDraw.Draw(img) font = ImageFont.truetype("impact.ttf", 50) lines = textwrap.wrap(toptext, width=25) y_text =0 for line in lines: width, height = font.getsize(line) x=(W - width) / 2 y=y_text # draw.text((((W - width) / 2)+3, y_text+3), line, font=font, fill=(0,0,0)) draw.text((x-1, y-1), line, font=font, fill=shadowcolor) draw.text((x+1, y-1), line, font=font, fill=shadowcolor) draw.text((x-1, y-1), line, font=font, fill=shadowcolor) draw.text((x+1, y+1), line, font=font, fill=shadowcolor) draw.text((x, y), line, font=font, fill=fillcolor) y_text += height w2, h2 = font.getsize(bottomtext) lines2 = textwrap.wrap(bottomtext, width=25) width2,height2 = font.getsize(lines[0]) y_text2 = (len(lines2)*height2)+10 for line in lines2: width, height = font.getsize(line) x = (W - width) / 2 y = (H-y_text2) # draw.text((((W - width) / 2), (H-y_text2)), line, font=font, fill=(0,0,0)) draw.text((x-1, y-1), line, font=font, fill=shadowcolor) draw.text((x+1, y-1), line, font=font, fill=shadowcolor) draw.text((x-1, y-1), line, font=font, fill=shadowcolor) draw.text((x+1, y+1), line, font=font, fill=shadowcolor) draw.text((x, y), line, font=font, fill=fillcolor) y_text2 -= height2 img.save('output_memes_2/sad_keanu_output_2.jpg') # - def generate_meme(i2): klist = k[i2].split() f = pos_tag(klist) sentence_list = klist sentence_list=list(filter(lambda a: a!='', sentence_list)) break_point=0 for i in range(1, len(f)): word=sentence_list[i] tag=f[i][1] if(word[0].isupper()): if(tag!='NNP' and tag!='NN' and tag!='NNS' and tag!='NNPS'): # print(word) break_point=i break # print(break_point) if(break_point!=0): first_part=' '.join(sentence_list[ite] for ite in range(break_point)) second_part=' '.join(sentence_list[ite] for ite in range(break_point, len(sentence_list))) # print(first_part,"\n",second_part) else: break_point=len(sentence_list)//2 first_part=' '.join(sentence_list[ite] for ite in range(break_point)) second_part=' '.join(sentence_list[ite] for ite in range(break_point, len(sentence_list))) # print(first_part,"\n", second_part) toptext = first_part bottomtext = second_part toptext = toptext.upper() bottomtext = bottomtext.upper() shadowcolor = 'black' fillcolor = 'white' W,H=(1280,720) img = Image.open("sad_keanu.jpg") draw = ImageDraw.Draw(img) font = ImageFont.truetype("impact.ttf", 50) lines = textwrap.wrap(toptext, width=25) y_text =0 for line in lines: width, height = font.getsize(line) x=(W - width) / 2 y=y_text # draw.text((((W - width) / 2)+3, y_text+3), line, font=font, fill=(0,0,0)) draw.text((x-1, y-1), line, font=font, fill=shadowcolor) draw.text((x+1, y-1), line, font=font, fill=shadowcolor) draw.text((x-1, y-1), line, font=font, fill=shadowcolor) draw.text((x+1, y+1), line, font=font, fill=shadowcolor) draw.text((x, y), line, font=font, fill=fillcolor) y_text += height w2, h2 = font.getsize(bottomtext) lines2 = textwrap.wrap(bottomtext, width=25) width2,height2 = font.getsize(lines[0]) y_text2 = (len(lines2)*height2)+10 for line in lines2: width, height = font.getsize(line) x = (W - width) / 2 y = (H-y_text2) # draw.text((((W - width) / 2), (H-y_text2)), line, font=font, fill=(0,0,0)) draw.text((x-1, y-1), line, font=font, fill=shadowcolor) draw.text((x+1, y-1), line, font=font, fill=shadowcolor) draw.text((x-1, y-1), line, font=font, fill=shadowcolor) draw.text((x+1, y+1), line, font=font, fill=shadowcolor) draw.text((x, y), line, font=font, fill=fillcolor) y_text2 -= height2 img.save('output_memes_2/sad_keanu_output_'+str(i2)+'.jpg') generate_meme(7) for i in range(100): generate_meme(i)
GPT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/PacktPublishing/Modern-Computer-Vision-with-PyTorch/blob/master/Chapter04/Image_augmentation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="CIR7VZ1XRBCY" # %%capture # !pip install -U imgaug # + id="27DMXq3WRtIX" colab={"base_uri": "https://localhost:8080/"} outputId="37eec11b-047d-440d-b8e8-81afc6aac42d" import imgaug print(imgaug.__version__) # + id="8dqBSoIUfmoN" import imgaug.augmenters as iaa # + id="0Lrs6ufwfsRh" colab={"base_uri": "https://localhost:8080/"} outputId="f19653eb-ba27-461b-ab61-c1bb3d7c7e32" from torchvision import datasets import torch data_folder = '/content/' # This can be any directory you want to download FMNIST to fmnist = datasets.FashionMNIST(data_folder, download=True, train=True) # + id="uNpLxbc6gGIl" tr_images = fmnist.data tr_targets = fmnist.targets # + id="kuGaMbCGgHsd" import matplotlib.pyplot as plt # %matplotlib inline import numpy as np from torch.utils.data import Dataset, DataLoader import torch import torch.nn as nn device = 'cuda' if torch.cuda.is_available() else 'cpu' def to_numpy(tensor): return tensor.cpu().detach().numpy() # + id="o2w1Ac2fgMEY" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="932f07c9-332d-4fba-bef1-f66fee9b2df0" plt.imshow(tr_images[0], cmap='gray') plt.title('Original image') # + id="x846czVPt6zf" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="fdf1544e-1f9a-46db-b1ab-3d16e4707bda" aug = iaa.Affine(scale=2) plt.imshow(aug.augment_image(to_numpy(tr_images[0]))) plt.title('Scaled image') # + id="px6aanrNt62B" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="51217b39-edab-4b15-d951-17c0100e6aea" aug = iaa.Affine(translate_px=10) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Translated image by 10 pixels (right and bottom)') # + id="svm9TAnJvbn6" colab={"base_uri": "https://localhost:8080/", "height": 315} outputId="3b602b41-4897-4b98-8afd-ec46f2848ad1" aug = iaa.Affine(translate_px={'x':10,'y':2}) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Translation of 10 pixels \nacross columns and 2 pixels over rows') # + id="iCOcbS7sxpVg" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="bf525b86-ba68-4e0d-858a-2609298fbb4e" aug = iaa.Affine(rotate=30) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Rotation of image by 30 degrees') # + id="_JRiQsamxpXx" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="cf6bde8b-a3cf-4539-ba26-843c89524eb2" aug = iaa.Affine(rotate=-30) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Rotation of image by -30 degrees') # + id="WmxcpYbvx2ct" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="28d20583-0283-41d2-c76d-5a1d0531f029" aug = iaa.Affine(shear=30) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Shear of image by 30 degrees') # + id="LQO41jTTx2fW" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="6bb64817-8fa7-440e-e998-8287a300a028" aug = iaa.Affine(shear=-30) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Shear of image by -30 degrees') # + id="4RD-1tV0zuSm" colab={"base_uri": "https://localhost:8080/", "height": 256} outputId="0b704e83-3c6e-49c1-e967-5fad2337f591" plt.figure(figsize=(20,20)) plt.subplot(161) plt.imshow(tr_images[0], cmap='gray') plt.title('Original image') plt.subplot(162) aug = iaa.Affine(scale=2, fit_output=True) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Scaled image') plt.subplot(163) aug = iaa.Affine(translate_px={'x':10,'y':2}, fit_output=True) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Translation of 10 pixels across \ncolumns and 2 pixels over rows') plt.subplot(164) aug = iaa.Affine(rotate=30, fit_output=True) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Rotation of image \nby 30 degrees') plt.subplot(165) aug = iaa.Affine(shear=30, fit_output=True) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Shear of image \nby 30 degrees') # + id="I4sSQxqPzuXa" colab={"base_uri": "https://localhost:8080/", "height": 256} outputId="c024fe3c-1dff-4ac1-a019-a3e54bb52701" plt.figure(figsize=(20,20)) plt.subplot(161) plt.imshow(tr_images[0], cmap='gray') plt.title('Original image') plt.subplot(162) aug = iaa.Affine(scale=2, fit_output=True) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Scaled image') plt.subplot(163) aug = iaa.Affine(translate_px={'x':10,'y':2}, fit_output=True, cval = 255) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Translation of 10 pixels across \ncolumns and 2 pixels over rows') plt.subplot(164) aug = iaa.Affine(rotate=30, fit_output=True) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Rotation of image \nby 30 degrees') plt.subplot(165) aug = iaa.Affine(shear=30, fit_output=True) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Shear of image \nby 30 degrees') # + id="qXCHS8M0zucP" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="fe1dc67d-57a1-47b1-dc52-1e3b5866986d" aug = iaa.Affine(rotate=30, fit_output=True, cval=255) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Rotation of image by 30 degrees') # + id="9JUsjQ8f4eZy" colab={"base_uri": "https://localhost:8080/", "height": 256} outputId="ce473612-8fa4-4416-d1d2-ee540dbabf9a" plt.figure(figsize=(20,20)) plt.subplot(161) aug = iaa.Affine(rotate=30, fit_output=True, cval=0, mode='constant') plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Rotation of image by \n30 degrees with constant mode') plt.subplot(162) aug = iaa.Affine(rotate=30, fit_output=True, cval=0, mode='edge') plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Rotation of image by 30 degrees \n with edge mode') plt.subplot(163) aug = iaa.Affine(rotate=30, fit_output=True, cval=0, mode='symmetric') plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Rotation of image by \n30 degrees with symmetric mode') plt.subplot(164) aug = iaa.Affine(rotate=30, fit_output=True, cval=0, mode='reflect') plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Rotation of image by 30 degrees \n with reflect mode') plt.subplot(165) aug = iaa.Affine(rotate=30, fit_output=True, cval=0, mode='wrap') plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.title('Rotation of image by \n30 degrees with wrap mode') # + id="xguA31pNMfs4" colab={"base_uri": "https://localhost:8080/", "height": 260} outputId="d88ac13e-fc7d-4e4d-c0b9-2148629baad6" plt.figure(figsize=(20,20)) plt.subplot(151) aug = iaa.Affine(rotate=(-45,45), fit_output=True, cval=0, mode='constant') plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.subplot(152) aug = iaa.Affine(rotate=(-45,45), fit_output=True, cval=0, mode='constant') plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.subplot(153) aug = iaa.Affine(rotate=(-45,45), fit_output=True, cval=0, mode='constant') plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') plt.subplot(154) aug = iaa.Affine(rotate=(-45,45), fit_output=True, cval=0, mode='constant') plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') # + id="e2fD-_dIPv7j" # + id="9cFKjesAPv-y" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="9cae39d7-f1ba-4827-c6cf-df3e398dff14" aug = iaa.Multiply(1) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray') # + id="Jfyc8e13Pv5Q" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="2b071110-8eb4-4518-add8-98910da3b33b" aug = iaa.Multiply(0.5) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray',vmin = 0, vmax = 255) plt.title('Pixels multiplied by 0.5') # + id="wfBqBp6uMfqj" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="0031d938-aa06-43b9-9dea-f750de4704bb" aug = iaa.LinearContrast(0.5) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray',vmin = 0, vmax = 255) plt.title('Pixel contrast by 0.5') # + id="zqBbV3Zyk0Eb" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="1350d802-1a41-4e4e-f070-65b7e19ebca3" aug = iaa.Dropout(p=0.2) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray',vmin = 0, vmax = 255) plt.title('Random 20% pixel dropout') # + id="E4y_n9Zjk0Cg" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="b16edd07-887f-43a3-df01-4ef89d8c6c0f" aug = iaa.SaltAndPepper(0.2) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray',vmin = 0, vmax = 255) plt.title('Random 20% salt and pepper noise') # + id="m-y03oXDngcW" colab={"base_uri": "https://localhost:8080/", "height": 336} outputId="7ca03c6a-478b-4909-b254-17fcc0d6c479" plt.figure(figsize=(10,10)) plt.subplot(121) aug = iaa.Dropout(p=0.2,) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray',vmin = 0, vmax = 255) plt.title('Random 20% pixel dropout') plt.subplot(122) aug = iaa.SaltAndPepper(0.2,) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray',vmin = 0, vmax = 255) plt.title('Random 20% salt and pepper noise') # + id="khA0JNibph3U" seq = iaa.Sequential([ iaa.Dropout(p=0.2,), iaa.Affine(rotate=(-30,30))], random_order= True) # + id="OzAZsENWph8X" colab={"base_uri": "https://localhost:8080/", "height": 315} outputId="00c516b0-7053-443a-eadc-9e1298537224" plt.imshow(seq.augment_image(to_numpy(tr_images[0])), cmap='gray',vmin = 0, vmax = 255) plt.title('Image augmented using a \nrandom orderof the two augmentations') # + id="2Jsk4U1clQFh" colab={"base_uri": "https://localhost:8080/", "height": 315} outputId="7944eda8-3280-441f-c7b5-48d3622bb0a9" aug = iaa.GaussianBlur(sigma=1) plt.imshow(aug.augment_image(to_numpy(tr_images[0])), cmap='gray',vmin = 0, vmax = 255) plt.title('Gaussian blurring of image\n with a sigma of 1') # + id="Z1iOo9DPTPrU"
Chapter04/Image_augmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os files = os.listdir("/Users/<NAME>ang/Desktop/research/2019_04_04_P3HTPCBM_150C_15min_Sub6") filenames = [] for i in files: if ('liv1' in i): filenames.append("/Users/<NAME>ang/Desktop/research/2019_04_04_P3HTPCBM_150C_15min_Sub6" + "/" + i) with open('/Users/<NAME>ang/Desktop/output', 'w') as outfile: for fname in filenames: with open(fname) as infile: for line in infile: outfile.write(line) # -
file concatenator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # 设定沪深300作为基准 set_benchmark('000300.XSHG') # 开启动态复权模式(真实价格) set_option('use_real_price', True) #是否有未来函数 set_option("avoid_future_data", True) # 输出内容到日志 log.info() log.info('初始函数开始运行且全局只运行一次') # 过滤掉order系列API产生的比error级别低的log log.set_level('order', 'error') ### 股票相关设定 ### # 股票类每笔交易时的手续费是:买入时佣金万分之三,卖出时佣金万分之三加千分之一印花税, 每笔交易佣金最低扣5块钱 set_order_cost(OrderCost(close_tax=0.001, open_commission=0.0003, close_commission=0.0003, min_commission=5), type='stock') # 开盘前运行 run_daily(before_market_open, time='before_open', reference_security='000300.XSHG') # 开盘时运行 run_daily(market_open, time='open', reference_security='000300.XSHG') # 收盘后运行 run_daily(after_market_close, time='after_close', reference_security='000300.XSHG') # + ## 开盘前运行函数 def before_market_open(context): ## 开盘时运行函数 def market_open(context): ## 收盘后运行函数 def after_market_close(context):
聚宽/.ipynb_checkpoints/initialize-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Warping Images # # In Menpo, we provide an easy way to warp images that utilizes our set of Transform classes. If you want an overview of what Transforms we have in Menpo, go take a look at the Transforms notebooks. # # Warping in Menpo follows a simple set of rules. The idea is that, given an image, we wish to warp it into some sort of reference frame. This is most obviously illustrated by the example of warping a sample face in to the reference space of a deformable model. However, we need some way of approximating the placement of pixels from the input image to the reference space. This is where Transforms are used. The Transform defines how we map from input space to reference space. # # To make everything more clear, lets take a look at some examples. # Import the IO module import menpo.io as mio # Import Matplotlib so we can plot subplots import matplotlib.pyplot as plt # Import a couple of interesting images that are landmarked! takeo = mio.import_builtin_asset('takeo.ppm') takeo = takeo.as_masked() lenna = mio.import_builtin_asset('lenna.png') lenna = lenna.as_masked() # Now, given a landmarked image, it is simple to create a reference template by constraining the images mask to lie within the boundary of the landmarks. For example: # + # %matplotlib inline takeo = takeo.crop_to_landmarks() takeo = takeo.constrain_mask_to_landmarks() plt.subplot(121) takeo.view_landmarks(); plt.subplot(122) takeo.mask.view(); # - # Different landmark sets will obviously produce different shaped masks! # + # %matplotlib inline lenna = lenna.crop_to_landmarks() lenna = lenna.constrain_mask_to_landmarks() plt.subplot(121) lenna.view_landmarks(); plt.subplot(122) lenna.mask.view(); # - # ## Commonly used parametric warps: Piecewise Affine and Thin Plate Splines # Commonly, in deformable modelling and parametric image alignment, we want to be able to approximate non-rigid movement within our objects. In faces, this non-rigid movement can come from changes in pose and expression. Two commonly used transforms that enable this kind of movement are Piecewise Affine (PWA) and Thin Plate Splines (TPS). Please go check over the Notebooks for these Transforms if you haven't already! # # Both PWA and TPS are defined between a set of source and target landmarks. Luckily, it is easy to define these classes given the landmarks on our images. Let's see what this looks like: # + from menpo.transform import ThinPlateSplines, PiecewiseAffine tps_lenna_to_takeo = ThinPlateSplines(lenna.landmarks['LJSON'].lms, takeo.landmarks['PTS'].lms) pwa_lenna_to_takeo = PiecewiseAffine(lenna.landmarks['LJSON'].lms, takeo.landmarks['PTS'].lms) tps_takeo_to_lenna = ThinPlateSplines(takeo.landmarks['PTS'].lms, lenna.landmarks['LJSON'].lms) pwa_takeo_to_lenna = PiecewiseAffine(takeo.landmarks['PTS'].lms, lenna.landmarks['LJSON'].lms) # - # We can then see what it would look like if we warped Takeo's face into the space of Lenna's! Notice that the output image has the same shape as the mask of Lenna. This is because Lenna is defining the reference frame. Also notice that you achieve different results depending on what Transform was used! PWA is a local discrete approximation, whilst TPS is global. Therefore, you are likely to get quit different results in extreme cases! # # ### NOTE: The results might look pretty odd in case the two shapes are in totally different poses. Warping frontal images produces much more visually appealing results. warped_takeo_to_lenna_pwa = takeo.as_unmasked(copy=False).warp_to_mask(lenna.mask, pwa_lenna_to_takeo) warped_takeo_to_lenna_tps = takeo.as_unmasked(copy=False).warp_to_mask(lenna.mask, tps_lenna_to_takeo) # The parameters to the warp function are very simple: # # - The `BooleanImage` mask that represents the output frame # - The `Transform` to apply that will dictate how pixel positions in the original coordinate system are mapped in to the reference frame. # %matplotlib inline # Takeo to Lenna with PWA warped_takeo_to_lenna_pwa.view(); import numpy as np np.nanmax(warped_takeo_to_lenna_pwa.pixels) + 1 warped_takeo_to_lenna_pwa.pixels[0,1,1] # %matplotlib inline # Takeo to Lenna with TPS warped_takeo_to_lenna_tps.view(); # Naturally, we can also perform the warp in the opposite direction! warped_lenna_to_takeo_pwa = lenna.as_unmasked(copy=False).warp_to_mask(takeo.mask, pwa_takeo_to_lenna) warped_lenna_to_takeo_tps = lenna.as_unmasked(copy=False).warp_to_mask(takeo.mask, pwa_takeo_to_lenna) # %matplotlib inline # Lenna to Takeo with PWA warped_lenna_to_takeo_pwa.view(); # %matplotlib inline # Lenna to Takeo with TPS warped_lenna_to_takeo_tps.view();
menpo/Images/Warping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Clasificacion y regresion import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns flores = sns.load_dataset('iris') flores.head(5) flores.info() flores['species'].unique() sns.pairplot(flores, hue='species') from sklearn.model_selection import train_test_split x=flores.drop('species', axis=1) y=flores['species'] x_train, x_test, y_train, y_tests= train_test_split(x,y, test_size=0.3) from sklearn.svm import SVC modelo= SVC(gamma='auto') modelo.fit(x_train, y_train) predicciones= modelo.predict(x_test) predicciones from sklearn.metrics import classification_report, confusion_matrix print(classification_report(y_tests,predicciones)) print(confusion_matrix(y_tests,predicciones))
Maquina de vector de soporte.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Geometric Sum # # 1 + 1/2 + 1/4 + 1/8 + ... + 1/(2^k) # + def geoSum(k): if k < 0: return 0 return geoSum(k - 1) + 1/(2**k) print(format(geoSum(3), '.5f')) # - # # Check Palindrome (recursive) # + def check(s, si, ei): if si > ei: return True if s[si] == s[ei]: return check(s, si+1, ei-1) else: return False def palinCheck(s): if len(s) == 0 or len(s) == 1: return True if check(s, 0, len(s)-1): return 'true' else: return 'false' print(palinCheck('racecar')) # - # # Sum of digits (recursive) # + def sumDigits(n): if n >= 0 and n <= 9: return n return (n%10) + sumDigits(n//10) print(sumDigits(1234)) # - # # Multiplication (Recursive) # # Given two integers M & N, calculate and return their multiplication using recursion. You can only use subtraction and addition for your calculation. No other operators are allowed. # + def multiply(m, n): if m == 0 or n == 0: return 0 if n > 0: return m + multiply(m, n-1) print(multiply(3, 2)) # + def product( x , y ): if x < y: return product(y, x) elif y != 0: return (x + product(x, y - 1)) else: return 0 m = int(input()) n = int(input()) x = product(m, n) print(x) # - # # Count Zeros # + def countZeros(n): if n == 0: return 0 if n%10 == 0: return 1 + countZeros(n//10) else: return countZeros(n//10) n = int(input()) if n == 0: print('0') else: print(countZeros(n)) # - # # String to Integer # + def strToInt(str): if (len(str) == 1): return ord(str[0]) - ord('0'); y = strToInt(str[1:]); x = ord(str[0]) - ord('0'); x = x * (10**(len(str) - 1)) + y; return int(x); s = input() print(strToInt(s)) # - # # Pair star # # Given a string S, compute recursively a new string where identical chars that are adjacent in the original string are separated from each other by a "*". # + def pairStar(s): if len(s) == 0: return '' if len(s) == 1: return s[0] if s[0] == s[1]: return s[0] + '*' + pairStar(s[1:]) else: return s[0] + pairStar(s[1:]) s = input() print(pairStar(s)) # - # # Check AB # # 1. The string begins with an 'a' # 2. Each 'a' is followed by nothing or an 'a' or "bb" # 3. Each "bb" is followed by nothing or an 'a' # + def checkAB(s): if len(s) == 0: return 1 if s[0] == 'a': if len(s[1:]) > 1 and s[1:3] == 'bb': return checkAB(s[3:]) else: return checkAB(s[1:]) else: return 0 s = input() if checkAB(s): print('true') else: print('false') # + def checkAB(s): if len(s) == 0: return 1 if s[0] == 'a': if s[1] == 'a' or s[1] == 'b': return checkAB(s[2:]) if s[1] == None: return 1 else: return 0 elif s[0] == 'b': if s[1] == 'b' and s[2] == None: return checkAB(s[2:]) elif s[1] == 'b' and s[2] == 'a': return 1 else: return 0 s = input() if s[0] == 'a': if checkAB(s[1:]): print('true') else: print('false') else: print('false') # + int checkstring(char *t) { if(*t==NULL) { return 1; } char *c=t; if(*t==’a’) { if(*(c+1)==’a’) { return checkstring(t+1); } else if(*(c+1)==’b’) { return checkstring(t+1); } if(*(c+1)==NULL) { return 1; } else { return 0; } } else if(*t==’b’) { if(*(c+1)==’b’&&*(c+2)==’a’) { return checkstring(t+2); } else if(*(c+1)==’b’&&*(c+2)==NULL) { return 1; } else { return 0; } } } main() { char s[100]; cin>>s; char *t; t=s; if(*t==’a’) { t=t++; if(checkstring(t)) { cout<<"valid substring"; } else { cout<<"Not a valid substring"; } } else { cout<<"Not a valid substring"; } } # + def state2(lst): if lst==[]: return True poped = lst.pop(0) if poped == "a": return state2(lst) else: if lst == [] or lst[0] == 'a': return False if lst[0]=="b": lst.pop(0) # removed "BB" return state1(lst) def state1(lst): if lst==[]: return True if lst[0]=="a": lst.pop(0) return state2(lst) return False # + def checkString(inp): # print(list(inp)) return state1(list(inp)) testcases = ["abb","a","ab","abba", "","abbabbb","abbabbaaabbaaabbabb"] for i in testcases: print(checkString(i)) # - # # Staircase # # A child is running up a staircase with N steps, and can hop either 1 step, 2 steps or 3 steps at a time. Implement a method to count how many possible ways the child can run up to the stairs. You need to return number of possible ways W. # + def staircase(n): if n == 0 or n == 1: return 1 elif n == 2: return 2 else: return staircase(n-1) + staircase(n-2) + staircase(n-3) n = int(input()) print(staircase(n))
Recursions/recursions_practice_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="NXalien8M7Ll" colab_type="code" colab={} import tensorflow as tf import numpy as np import random # + [markdown] id="6jEuNOgfNjC0" colab_type="text" # Loading dataset... # + id="91aOiFV4NkcM" colab_type="code" outputId="cfdb1b4f-16a9-402a-839d-07d33e5d3f1a" colab={"base_uri": "https://localhost:8080/", "height": 119} (x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() label_name = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] print("Brief on training dataset:") print(" Num of figures: {:d}".format(y_train.shape[0])) print(" Size of figure: {:d}x{:d}".format(x_train.shape[1], x_train.shape[2])) print("Brief on testing dataset:") print(" Num of figures: {:d}".format(y_test.shape[0])) print(" Size of figure: {:d}x{:d}".format(x_test.shape[1], x_test.shape[2])) # + [markdown] id="hAScgl3UNpSF" colab_type="text" # Building model... # + id="NMGw-ns-NqU1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="aae9eeb5-7b97-4920-fc21-f52226f1f60e" model = tf.keras.models.Sequential() model.add(tf.keras.layers.Flatten(input_shape=(x_train.shape[1], x_train.shape[2]))) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) model.summary() # + [markdown] id="PAcn8lmSj-Zb" colab_type="text" # Compiling model... # + id="L1UCualpj8-n" colab_type="code" colab={} loc_optimizer = tf.keras.optimizers.Adam(lr=1e-3, ) loc_loss = tf.keras.losses.sparse_categorical_crossentropy loc_metrics = ['sparse_categorical_accuracy'] model.compile(optimizer=loc_optimizer,loss=loc_loss,metrics=loc_metrics) # + [markdown] id="HRs_d_mAkIny" colab_type="text" # Fitting model... # + id="Yg3SLJQmkOOd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 578} outputId="a983c1d8-92da-4898-83ba-813bd067a6ed" model.fit(x_train.astype(np.float32), y_train.astype(np.float32), verbose = 1, batch_size = 128, epochs=16) # + [markdown] id="2OiOitr1nDvG" colab_type="text" # Evaluating model... # + id="AKEulJpCnFYw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="dae7b0ab-7307-4f5e-be02-<KEY>" score = model.evaluate(x_test.astype(np.float32), y_test.astype(np.float32), verbose = 1) print("Loss: ", score[0]) print("Accuracy: ", score[1]) # + [markdown] id="n_oHxuB2x5zV" colab_type="text" # Predicting... # + id="Z7uMTqrGx7Fu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="a778018a-211c-4c51-abef-46445812c014" print("Choosing 10 figures randomly from testing dataset: ") idx = [] cnt = 0 while cnt < 10: loc_idx = random.randint(0, 10000) if loc_idx not in idx: idx.append(loc_idx) cnt = cnt + 1 pred = model.predict_classes(np.array([x_test[e] for e in idx])) print("{:>8s}{:>18s}{:>18s}{:>12s}".format("Index", "Prediction", "Actual", "Judgement")) for i in range(10): loc_idx = idx[i] loc_pred = label_name[pred[i]] loc_ans = label_name[y_test[idx[i]]] loc_judge = pred[i]== y_test[idx[i]] print("{:>8d}{:>18s}{:>18s}{:>12b}".format(loc_idx, loc_pred, loc_ans, loc_judge))
VE581/HW1/Fashion_DNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Understanding word vectors # # reference: https://gist.github.com/aparrish/2f562e3737544cf29aaf1af30362f469 # # In this tutorial, I'm going to show you how word vectors work. This tutorial assumes a good amount of Python knowledge, but even if you're not a Python expert, you should be able to follow along and make small changes to the examples without too much trouble. # # This is a "Jupyter Notebook," which consists of text and "cells" of code. After you've loaded the notebook, you can execute the code in a cell by highlighting it and hitting Ctrl+Enter. In general, you need to execute the cells from top to bottom, but you can usually run a cell more than once without messing anything up. Experiment! # # If things start acting strange, you can interrupt the Python process by selecting "Kernel > Interrupt"—this tells Python to stop doing whatever it was doing. Select "Kernel > Restart" to clear all of your variables and start from scratch. # ## why word vector # # Poetry is, at its core, the art of identifying and manipulating linguistic similarity. I have discovered a truly marvelous proof of this, which this notebook is too narrow to contain. (By which I mean: I will elaborate on this some other time) # # Animal similarity and simple linear algebra # ## Animal similarity and simple linear algebra # # We'll begin by considering a small subset of English: words for animals. Our task is to be able to write computer programs to find similarities among these words and the creatures they designate. To do this, we might start by making a spreadsheet of some animals and their characteristics. For example: # ![image.png](attachment:image.png) # This spreadsheet associates a handful of animals with two numbers: their cuteness and their size, both in a range from zero to one hundred. (The values themselves are simply based on my own judgment. Your taste in cuteness and evaluation of size may differ significantly from mine. As with all data, these data are simply a mirror reflection of the person who collected them.) import math def distance2d(x1, y1, x2, y2): return math.sqrt((x1 - x2)**2 + (y1 - y2)**2) # ![image.png](attachment:image.png) # So, the distance between "capybara" (70, 30) and "panda" (74, 40): distance2d(70, 30, 75, 40) # Modeling animals in this way has a few other interesting properties. For example, you can pick an arbitrary point in "animal space" and then find the animal closest to that point. If you imagine an animal of size 25 and cuteness 30, you can easily look at the space to find the animal that most closely fits that description: the chicken. # # Reasoning visually, you can also answer questions like: what's halfway between a chicken and an elephant? Simply draw a line from "elephant" to "chicken," mark off the midpoint and find the closest animal. (According to our chart, halfway between an elephant and a chicken is a horse.) # # You can also ask: what's the difference between a hamster and a tarantula? According to our plot, it's about seventy five units of cute (and a few units of size). # # The relationship of "difference" is an interesting one, because it allows us to reason about analogous relationships. In the chart below, I've drawn an arrow from "tarantula" to "hamster" (in blue): # # ![image.png](attachment:image.png) # ## Language with vectors: colors # So far, so good. We have a system in place—albeit highly subjective—for talking about animals and the words used to name them. I want to talk about another vector space that has to do with language: the vector space of colors. # # Colors are often represented in computers as vectors with three dimensions: red, green, and blue. Just as with the animals in the previous section, we can use these vectors to answer questions like: which colors are similar? What's the most likely color name for an arbitrarily chosen set of values for red, green and blue? Given the names of two colors, what's the name of those colors' "average"? # # We'll be working with this color data from the xkcd color survey. The data relates a color name to the RGB value associated with that color. Here's a page that shows what the colors look like. Download the color data and put it in the same directory as this notebook. # # A few notes before we proceed: # # 1. The linear algebra functions implemented below (addv, meanv, etc.) are slow, potentially inaccurate, and shouldn't be used for "real" code—I wrote them so beginner programmers can understand how these kinds of functions work behind the scenes. Use numpy for fast and accurate math in Python. # # 2. If you're interested in perceptually accurate color math in Python, consider using the colormath library. # # Now, import the json library and load the color data: # ![image.png](attachment:image.png) # ## vector math # # Before we keep going, we'll need some functions for performing basic vector "arithmetic." These functions will work with vectors in spaces of any number of dimensions. import math def distance(coord1, coord2): # note, this is VERY SLOW, don't use for actual code return math.sqrt(sum([(i - j)**2 for i, j in zip(coord1, coord2)])) distance([10, 1], [5, 2]) # The subtractv function subtracts one vector from another: def subtractv(coord1, coord2): return [c1 - c2 for c1, c2 in zip(coord1, coord2)] subtractv([10, 1], [5, 2]) # + # The addv vector adds two vectors together: def addv(coord1, coord2): return [c1 + c2 for c1, c2 in zip(coord1, coord2)] addv([10, 1], [5, 2]) # - # And the meanv function takes a list of vectors and finds their mean or average: def meanv(coords): # assumes every item in coords has same length as item 0 sumv = [0] * len(coords[0]) for item in coords: for i in range(len(item)): sumv[i] += item[i] mean = [0] * len(sumv) for i in range(len(sumv)): mean[i] = float(sumv[i]) / len(coords) return mean meanv([[0, 1], [2, 2], [4, 3]]) # ## find the closest item # # Just as we wanted to find the animal that most closely matched an arbitrary point in cuteness/size space, we'll want to find the closest color name to an arbitrary point in RGB space. The easiest way to find the closest item to an arbitrary vector is simply to find the distance between the target vector and each item in the space, in turn, then sort the list from closest to farthest. The closest() function below does just that. By default, it returns a list of the ten closest items to the given vector. # # Note: Calculating "closest neighbors" like this is fine for the examples in this notebook, but unmanageably slow for vector spaces of any appreciable size. As your vector space grows, you'll want to move to a faster solution, like SciPy's kdtree or Annoy. # # 正如我们想要找到最接近于可爱度/大小空间中任意点的动物一样,我们也希望找到与RGB空间中任意点最接近的颜色名称。 找到与任意向量最接近的项的最简单方法是,依次找到目标向量与空间中每个项之间的距离,然后从最接近到最远对列表进行排序。 下面最接近的()函数就是这样做的。 默认情况下,它将返回与给定向量最接近的十个项目的列表。 import json color_data = json.loads(open("xkcd.json").read()) # + # The following function converts colors from hex format (#1a2b3c) to a tuple of integers: def hex_to_int(s): s = s.lstrip("#") return int(s[:2], 16), int(s[2:4], 16), int(s[4:6], 16) # And the following cell creates a dictionary and populates it with mappings from color names to RGB vectors for each color in the data: colors = dict() for item in color_data['colors']: colors[item["color"]] = hex_to_int(item["hex"]) # - colors['olive'] def closest(space, coord, n=10): closest = [] for key in sorted(space.keys(), key=lambda x: distance(coord, space[x]))[:n]: closest.append(key) return closest closest(colors, colors['red']) # .. or the ten colors closest to (150, 60, 150): closest(colors, [150, 60, 150]) # 将单词表示为向量的神奇之处在于,我们前面定义的向量运算似乎在语言上运算的方式与在数字上运算的方式相同。 例如,如果我们发现最接近向量的词是从“紫色”中减去“红色”而得到的,则会得到一系列“蓝色”颜色: # # The magical part of representing words as vectors is that the vector operations we defined earlier appear to operate on language the same way they operate on numbers. For example, if we find the word closest to the vector resulting from subtracting "red" from "purple," we get a series of "blue" colors: closest(colors, subtractv(colors['purple'], colors['red'])) closest(colors, addv(colors['blue'], colors['green'])) # the average of black and white: medium grey closest(colors, meanv([colors['black'], colors['white']])) # an analogy: pink is to red as X is to blue pink_to_red = subtractv(colors['pink'], colors['red']) closest(colors, addv(pink_to_red, colors['blue'])) # another example: navy_to_blue = subtractv(colors['navy'], colors['blue']) closest(colors, addv(navy_to_blue, colors['green'])) # The examples above are fairly simple from a mathematical perspective but nevertheless feel magical: they're demonstrating that it's possible to use math to reason about how people use language. # ## Doing bad digital humanities with color vectors # # With the tools above in hand, we can start using our vectorized knowledge of language toward academic ends. In the following example, I'm going to calculate the average color of <NAME> Dracula. # # (Before you proceed, make sure to download the text file from Project Gutenberg and place it in the same directory as this notebook.) # # First, we'll load spaCy: import spacy nlp = spacy.load('en_core_web_sm') # # To calculate the average color, we'll follow these steps: # # 1. Parse the text into words # 2. Check every word to see if it names a color in our vector space. If it does, add it to a list of vectors. # 3. Find the average of that list of vectors. # 4. Find the color(s) closest to that average vector. # # The following cell performs steps 1-3: doc = nlp(open("pg345.txt").read()) # use word.lower_ to normalize case drac_colors = [colors[word.lower_] for word in doc if word.lower_ in colors] avg_color = meanv(drac_colors) print(avg_color) # Now, we'll pass the averaged color vector to the closest() function, yielding... well, it's just a brown mush, which is kinda what you'd expect from adding a bunch of colors together willy-nilly. closest(colors, avg_color) doc = nlp(open("pg1952.txt").read()) wallpaper_colors = [colors[word.lower_] for word in doc if word.lower_ in colors] avg_color = meanv(wallpaper_colors) closest(colors, avg_color) for cname in closest(colors, colors['mauve']): print(cname + " trousers") # That's all well and good for color words, which intuitively seem to exist in a multidimensional continuum of perception, and for our animal space, where we've written out the vectors ahead of time. But what about... arbitrary words? Is it possible to create a vector space for all English words that has this same "closer in space is closer in meaning" property? # # To answer that, we have to back up a bit and ask the question: what does meaning mean? No one really knows, but one theory popular among computational linguists, computer scientists and other people who make search engines is the Distributional Hypothesis, which states that: # # Linguistic items with similar distributions have similar meanings. # # What's meant by "similar distributions" is similar contexts. Take for example the following sentences: # # It was really cold yesterday. # It will be really warm today, though. # It'll be really hot tomorrow! # Will it be really cool Tuesday? # # According to the Distributional Hypothesis, the words cold, warm, hot and cool must be related in some way (i.e., be close in meaning) because they occur in a similar context, i.e., between the word "really" and a word indicating a particular day. (Likewise, the words yesterday, today, tomorrow and Tuesday must be related, since they occur in the context of a word indicating a temperature.) # # In other words, according to the Distributional Hypothesis, a word's meaning is just a big list of all the contexts it occurs in. Two words are closer in meaning if they share contexts. # # 根据分布假说,冷,暖,热和凉这两个词必须以某种方式关联(即,含义相近),因为它们出现在相似的上下文中,即在“真正”一词和表示“真实”的词之间 特定的一天。 (同样,昨天,今天,明天和星期二这两个词必须相关,因为它们出现在表示温度的词的上下文中。) # # 换句话说,根据分布假说,一个单词的含义只是其出现的所有上下文的一个大列表。如果两个单词共享上下文,则两个单词的含义更接近。 # ## Word vectors by counting contexts # # So how do we turn this insight from the Distributional Hypothesis into a system for creating general-purpose vectors that capture the meaning of words? Maybe you can see where I'm going with this. What if we made a really big spreadsheet that had one column for every context for every word in a given source text. Let's use a small source text to begin with, such as this excerpt from Dickens: # # It was the best of times, it was the worst of times. # # Such a spreadsheet might look something like this: # # ![Screen%20Shot%202020-07-28%20at%2014.54.44.png](attachment:Screen%20Shot%202020-07-28%20at%2014.54.44.png) # # The spreadsheet has one column for every possible context, and one row for every word. The values in each cell correspond with how many times the word occurs in the given context. The numbers in the columns constitute that word's vector, i.e., the vector for the word of is # # [0, 0, 0, 0, 1, 0, 0, 0, 1, 0] # # Because there are ten possible contexts, this is a ten dimensional space! It might be strange to think of it, but you can do vector arithmetic on vectors with ten dimensions just as easily as you can on vectors with two or three dimensions, and you could use the same distance formula that we defined earlier to get useful information about which vectors in this space are similar to each other. In particular, the vectors for best and worst are actually the same (a distance of zero), since they occur only in the same context (the ___ of): # # [0, 0, 0, 1, 0, 0, 0, 0, 0, 0] # # Of course, the conventional way of thinking about "best" and "worst" is that they're antonyms, not synonyms. But they're also clearly two words of the same kind, with related meanings (through opposition), a fact that is captured by this distributional model. # ## Contexts and dimensionality # # Of course, in a corpus of any reasonable size, there will be many thousands if not many millions of possible contexts. It's difficult enough working with a vector space of ten dimensions, let alone a vector space of a million dimensions! It turns out, though, that many of the dimensions end up being superfluous and can either be eliminated or combined with other dimensions without significantly affecting the predictive power of the resulting vectors. The process of getting rid of superfluous dimensions in a vector space is called dimensionality reduction, and most implementations of count-based word vectors make use of dimensionality reduction so that the resulting vector space has a reasonable number of dimensions (say, 100—300, depending on the corpus and application). # # The question of how to identify a "context" is itself very difficult to answer. In the toy example above, we've said that a "context" is just the word that precedes and the word that follows. Depending on your implementation of this procedure, though, you might want a context with a bigger "window" (e.g., two words before and after), or a non-contiguous window (skip a word before and after the given word). You might exclude certain "function" words like "the" and "of" when determining a word's context, or you might lemmatize the words before you begin your analysis, so two occurrences with different "forms" of the same word count as the same context. These are all questions open to research and debate, and different implementations of procedures for creating count-based word vectors make different decisions on this issue. # # # ## Word vectors in spaCy # # Okay, let's have some fun with real word vectors. We're going to use the GloVe vectors that come with spaCy to creatively analyze and manipulate the text of <NAME> Dracula. First, make sure you've got spacy imported: from __future__ import unicode_literals import spacy # + # the following cell loads the language model and parses the input text nlp = spacy.load('en_core_web_sm') doc = nlp(open("pg345.txt").read()) # + # And the cell below creates a list of unique words (or tokens) in the text, as a list of strings. # All of the words in the text file tokens = list(set([w.text for w in doc if w.is_alpha])) tokens # - def vec(s): return nlp.vocab[s].vector import numpy as np from numpy import dot from numpy.linalg import norm # cosine similarity def cosine(v1, v2): if norm(v1) > 0 and norm(v2) > 0: return dot(v1, v2) / (norm(v1) * norm(v2)) else: return 0.0 cosine(vec('dog'), vec('puppy')) > cosine(vec('trousers'), vec('octopus'))
NLP_exercise/09-Word2Vec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="images/dask_horizontal.svg" align="right" width="30%"> # # Lazy execution # Here we discuss some of the concepts behind dask, and lazy execution of code. You do not need to go through this material if you are eager to get on with the tutorial, but it may help understand the concepts underlying dask, how these things fit in with techniques you might already be using, and how to understand things that can go wrong. # ## Prelude # As Python programmers, you probably already perform certain *tricks* to enable computation of larger-than-memory datasets, parallel execution or delayed/background execution. Perhaps with this phrasing, it is not clear what we mean, but a few examples should make things clearer. The point of Dask is to make simple things easy and complex things possible! # # Aside from the [detailed introduction](http://dask.pydata.org/en/latest/), we can summarize the basics of Dask as follows: # # - process data that doesn't fit into memory by breaking it into blocks and specifying task chains # - parallelize execution of tasks across cores and even nodes of a cluster # - move computation to the data rather than the other way around, to minimize communication overhead # # All of this allows you to get the most out of your computation resources, but program in a way that is very familiar: for-loops to build basic tasks, Python iterators, and the NumPy (array) and Pandas (dataframe) functions for multi-dimensional or tabular data, respectively. # # The remainder of this notebook will take you through the first of these programming paradigms. This is more detail than some users will want, who can skip ahead to the iterator, array and dataframe sections; but there will be some data processing tasks that don't easily fit into those abstractions and need to fall back to the methods here. # # We include a few examples at the end of the notebooks showing that the ideas behind how Dask is built are not actually that novel, and experienced programmers will have met parts of the design in other situations before. Those examples are left for the interested. # ## Dask is a graph execution engine # Dask allows you to construct a prescription for the calculation you want to carry out. That may sound strange, but a simple example will demonstrate that you can achieve this while programming with perfectly ordinary Python functions and for-loops. We saw this in Chapter 02. # + from dask import delayed @delayed def inc(x): return x + 1 @delayed def add(x, y): return x + y # - # Here we have used the delayed annotation to show that we want these functions to operate lazily — to save the set of inputs and execute only on demand. `dask.delayed` is also a function which can do this, without the annotation, leaving the original function unchanged, e.g., # ```python # delayed_inc = delayed(inc) # ``` # this looks like ordinary code x = inc(15) y = inc(30) total = add(x, y) # incx, incy and total are all delayed objects. # They contain a prescription of how to execute # Calling a delayed function created a delayed object (`incx, incy, total`) - examine these interactively. Making these objects is somewhat equivalent to constructs like the `lambda` or function wrappers (see below). Each holds a simple dictionary describing the task graph, a full specification of how to carry out the computation. # # We can visualize the chain of calculations that the object `total` corresponds to as follows; the circles are functions, rectangles are data/results. total.visualize() # But so far, no functions have actually been executed. This demonstrated the division between the graph-creation part of Dask (`delayed()`, in this example) and the graph execution part of Dask. # # To run the "graph" in the visualization, and actually get a result, do: # execute all tasks total.compute() # **Why should you care about this?** # # By building a specification of the calculation we want to carry out before executing anything, we can pass the specification to an *execution engine* for evaluation. In the case of Dask, this execution engine could be running on many nodes of a cluster, so you have access to the full number of CPU cores and memory across all the machines. Dask will intelligently execute your calculation with care for minimizing the amount of data held in memory, while parallelizing over the tasks that make up a graph. Notice that in the animated diagram below, where four workers are processing the (simple) graph, execution progresses vertically up the branches first, so that intermediate results can be expunged before moving onto a new branch. # # With `delayed` and normal pythonic looped code, very complex graphs can be built up and passed on to Dask for execution. See a nice example of [simulated complex ETL](https://blog.dask.org/2017/01/24/dask-custom) work flow. # # ![this](images/grid_search_schedule.gif) # ### Exercise # We will apply `delayed` to a real data processing task, albeit a simple one. # # Consider reading three CSV files with `pd.read_csv` and then measuring their total length. We will consider how you would do this with ordinary Python code, then build a graph for this process using delayed, and finally execute this graph using Dask, for a handy speed-up factor of more than two (there are only three inputs to parallelize over). # %run prep.py -d accounts import pandas as pd import os filenames = [os.path.join('data', 'accounts.%d.csv' % i) for i in [0, 1, 2]] filenames # + # %%time # normal, sequential code a = pd.read_csv(filenames[0]) b = pd.read_csv(filenames[1]) c = pd.read_csv(filenames[2]) na = len(a) nb = len(b) nc = len(c) total = sum([na, nb, nc]) print(total) # - # Your task is to recreate this graph again using the delayed function on the original Python code. The three functions you want to delay are `pd.read_csv`, `len` and `sum`.. # ```python # delayed_read_csv = delayed(pd.read_csv) # a = delayed_read_csv(filenames[0]) # ... # # total = ... # # # execute # # %time total.compute() # ``` # + # your verbose code here # - # Next, repeat this using loops, rather than writing out all the variables. # + # your concise code here # + ## verbose version delayed_read_csv = delayed(pd.read_csv) a = delayed_read_csv(filenames[0]) b = delayed_read_csv(filenames[1]) c = delayed_read_csv(filenames[2]) delayed_len = delayed(len) na = delayed_len(a) nb = delayed_len(b) nc = delayed_len(c) delayed_sum = delayed(sum) total = delayed_sum([na, nb, nc]) # %time print(total.compute()) ## concise version csvs = [delayed(pd.read_csv)(fn) for fn in filenames] lens = [delayed(len)(csv) for csv in csvs] total = delayed(sum)(lens) # %time print(total.compute()) # - # **Notes** # # Delayed objects support various operations: # ```python # x2 = x + 1 # ``` # if `x` was a delayed result (like `total`, above), then so is `x2`. Supported operations include arithmetic operators, item or slice selection, attribute access and method calls - essentially anything that could be phrased as a `lambda` expression. # # Operations which are *not* supported include mutation, setter methods, iteration (for) and bool (predicate). # ## Appendix: Further detail and examples # The following examples show that the kinds of things Dask does are not so far removed from normal Python programming when dealing with big data. These examples are **only meant for experts**, typical users can continue with the next notebook in the tutorial. # ### Example 1: simple word count # This directory contains a file called `README.md`. How would you count the number of words in that file? # # The simplest approach would be to load all the data into memory, split on whitespace and count the number of results. Here we use a regular expression to split words. import re splitter = re.compile('\w+') with open('README.md', 'r') as f: data = f.read() result = len(splitter.findall(data)) result # The trouble with this approach is that it does not scale - if the file is very large, it, and the generated list of words, might fill up memory. We can easily avoid that, because we only need a simple sum, and each line is totally independent of the others. Now we evaluate each piece of data and immediately free up the space again, so we could perform this on arbitrarily-large files. Note that there is often a trade-off between time-efficiency and memory footprint: the following uses very little memory, but may be slower for files that do not fill a large faction of memory. In general, one would like chunks small enough not to stress memory, but big enough for efficient use of the CPU. result = 0 with open('README.md', 'r') as f: for line in f: result += len(splitter.findall(line)) result # ### Example 2: background execution # There are many tasks that take a while to complete, but don't actually require much of the CPU, for example anything that requires communication over a network, or input from a user. In typical sequential programming, execution would need to halt while the process completes, and then continue execution. That would be dreadful for a user experience (imagine the slow progress bar that locks up the application and cannot be canceled), and wasteful of time (the CPU could have been doing useful work in the meantime. # # For example, we can launch processes and get their output as follows: # ```python # import subprocess # p = subprocess.Popen(command, stdout=subprocess.PIPE) # p.returncode # ``` # The task is run in a separate process, and the return-code will remain `None` until it completes, when it will change to `0`. To get the result back, we need `out = p.communicate()[0]` (which would block if the process was not complete). # Similarly, we can launch Python processes and threads in the background. Some methods allow mapping over multiple inputs and gathering the results, more on that later. The thread starts and the cell completes immediately, but the data associated with the download only appears in the queue object some time later. # + import threading import queue import urllib def get_webdata(url, q): u = urllib.request.urlopen(url) # raise ValueError q.put(u.read()) q = queue.Queue() t = threading.Thread(target=get_webdata, args=('http://www.google.com', q)) t.start() # - # fetch result back into this thread. If the worker thread is not done, this would wait. q.get() # Consider: what would you see if there had been an exception within the `get_webdata` function? You could uncomment the `raise` line, above, and re-execute the two cells. What happens? Is there any way to debug the execution to find the lYou may need # ### Example 3: delayed execution # There are many ways in Python to specify the computation you want to execute, but only run it *later*. # + def add(x, y): return x + y # Sometimes we defer computations with strings x = 15 y = 30 z = "add(x, y)" eval(z) # - # we can use lambda or other "closure" x = 15 y = 30 z = lambda: add(x, y) z() # + # A very similar thing happens in functools.partial import functools z = functools.partial(add, x, y) z() # + # Python generators are delayed execution by default # Many Python functions expect such iterable objects def gen(): res = x yield res res += y yield y g = gen() # - # run once: we get one value and execution halts within the generator # run again and the execution completes next(g) # ### Dask graphs # Any Dask object, such as `total`, above, has an attribute which describes the calculations necessary to produce that result. Indeed, this is exactly the graph that we have been talking about, which can be visualized. We see that it is a simple dictionary, the keys are unique task identifiers, and the values are the functions and inputs for calculation. # # `delayed` is a handy mechanism for creating the Dask graph, but the adventerous may wish to play with the full fexibility afforded by building the graph dictionaries directly. Detailed information can be found [here](http://dask.pydata.org/en/latest/graphs.html). total.dask dict(total.dask)
Day_1_Scientific_Python/dask/01x_lazy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: dev # language: python # name: dev # --- # # Financial Planning with APIs and Simulations # # In this Challenge, you’ll create two financial analysis tools by using a single Jupyter notebook: # # Part 1: A financial planner for emergencies. The members will be able to use this tool to visualize their current savings. The members can then determine if they have enough reserves for an emergency fund. # # Part 2: A financial planner for retirement. This tool will forecast the performance of their retirement portfolio in 30 years. To do this, the tool will make an Alpaca API call via the Alpaca SDK to get historical price data for use in Monte Carlo simulations. # # You’ll use the information from the Monte Carlo simulation to answer questions about the portfolio in your Jupyter notebook. # # # + # Import the required libraries and dependencies import os import requests import json import pandas as pd from dotenv import load_dotenv import alpaca_trade_api as tradeapi from MCForecastTools import MCSimulation # %matplotlib inline # - # Load the environment variables from the .env file #by calling the load_dotenv function load_dotenv() # ## Part 1: Create a Financial Planner for Emergencies # ### Evaluate the Cryptocurrency Wallet by Using the Requests Library # # In this section, you’ll determine the current value of a member’s cryptocurrency wallet. You’ll collect the current prices for the Bitcoin and Ethereum cryptocurrencies by using the Python Requests library. For the prototype, you’ll assume that the member holds the 1.2 Bitcoins (BTC) and 5.3 Ethereum coins (ETH). To do all this, complete the following steps: # # 1. Create a variable named `monthly_income`, and set its value to `12000`. # # 2. Use the Requests library to get the current price (in US dollars) of Bitcoin (BTC) and Ethereum (ETH) by using the API endpoints that the starter code supplies. # # 3. Navigate the JSON response object to access the current price of each coin, and store each in a variable. # # > **Hint** Note the specific identifier for each cryptocurrency in the API JSON response. The Bitcoin identifier is `1`, and the Ethereum identifier is `1027`. # # 4. Calculate the value, in US dollars, of the current amount of each cryptocurrency and of the entire cryptocurrency wallet. # # # The current number of coins for each cryptocurrency asset held in the portfolio. btc_coins = 1.2 eth_coins = 5.3 # #### Step 1: Create a variable named `monthly_income`, and set its value to `12000`. # The monthly amount for the member's household income # YOUR CODE HERE monthly_income = 12000 # #### Review the endpoint URLs for the API calls to Free Crypto API in order to get the current pricing information for both BTC and ETH. # The Free Crypto API Call endpoint URLs for the held cryptocurrency assets btc_url = "https://api.alternative.me/v2/ticker/Bitcoin/?convert=USD" eth_url = "https://api.alternative.me/v2/ticker/Ethereum/?convert=USD" # #### Step 2. Use the Requests library to get the current price (in US dollars) of Bitcoin (BTC) and Ethereum (ETH) by using the API endpoints that the starter code supplied. # + # Using the Python requests library, make an API call to access the current price of BTC btc_response = requests.get(btc_url).json() # Use the json.dumps function to review the response data from the API call # Use the indent and sort_keys parameters to make the response object readable # YOUR CODE HERE print(json.dumps(btc_response, indent= 4, sort_keys=True)) # + # Using the Python requests library, make an API call to access the current price ETH eth_response = requests.get(eth_url).json() # Use the json.dumps function to review the response data from the API call # Use the indent and sort_keys parameters to make the response object readable # YOUR CODE HERE print(json.dumps(eth_response, indent= 4, sort_keys=True)) # - # #### Step 3: Navigate the JSON response object to access the current price of each coin, and store each in a variable. # + # Navigate the BTC response object to access the current price of BTC btc_price = btc_response["data"]["1"]["quotes"]["USD"]["price"] # Print the current price of BTC print(f"The current price of Bitcoin is ${btc_price}") # + # Navigate the BTC response object to access the current price of ETH eth_price = eth_response["data"]["1027"]["quotes"]["USD"]["price"] # Print the current price of ETH print(f" The current price of Etherium is ${eth_price}") # - # ### Step 4: Calculate the value, in US dollars, of the current amount of each cryptocurrency and of the entire cryptocurrency wallet. # + # Compute the current value of the BTC holding btc_value = btc_price * btc_coins # Print current value of your holding in BTC print(f"The current value of your BTC is ${btc_value:.2f}") # + # Compute the current value of the ETH holding eth_value = eth_price * eth_coins # Print current value of your holding in ETH print(f"The current value of your ETH is ${eth_value:.2f}") # + # Compute the total value of the cryptocurrency wallet # Add the value of the BTC holding to the value of the ETH holding total_crypto_wallet = btc_value + eth_value # Print current cryptocurrency wallet balance print(f"Your current cryptocurrency wallet balance is ${total_crypto_wallet:.2f}") # - # ### Evaluate the Stock and Bond Holdings by Using the Alpaca SDK # # In this section, you’ll determine the current value of a member’s stock and bond holdings. You’ll make an API call to Alpaca via the Alpaca SDK to get the current closing prices of the SPDR S&P 500 ETF Trust (ticker: SPY) and of the iShares Core US Aggregate Bond ETF (ticker: AGG). For the prototype, assume that the member holds 110 shares of SPY, which represents the stock portion of their portfolio, and 200 shares of AGG, which represents the bond portion. To do all this, complete the following steps: # # 1. In the `Starter_Code` folder, create an environment file (`.env`) to store the values of your Alpaca API key and Alpaca secret key. # # 2. Set the variables for the Alpaca API and secret keys. Using the Alpaca SDK, create the Alpaca `tradeapi.REST` object. In this object, include the parameters for the Alpaca API key, the secret key, and the version number. # # 3. Set the following parameters for the Alpaca API call: # # - `tickers`: Use the tickers for the member’s stock and bond holdings. # # - `timeframe`: Use a time frame of one day. # # - `start_date` and `end_date`: Use the same date for these parameters, and format them with the date of the previous weekday (or `2020-08-07`). This is because you want the one closing price for the most-recent trading day. # # 4. Get the current closing prices for `SPY` and `AGG` by using the Alpaca `get_barset` function. Format the response as a Pandas DataFrame by including the `df` property at the end of the `get_barset` function. # # 5. Navigating the Alpaca response DataFrame, select the `SPY` and `AGG` closing prices, and store them as variables. # # 6. Calculate the value, in US dollars, of the current amount of shares in each of the stock and bond portions of the portfolio, and print the results. # # #### Review the total number of shares held in both (SPY) and (AGG). # Current amount of shares held in both the stock (SPY) and bond (AGG) portion of the portfolio. spy_shares = 110 agg_shares = 200 # #### Step 1: In the `Starter_Code` folder, create an environment file (`.env`) to store the values of your Alpaca API key and Alpaca secret key. # #### Step 2: Set the variables for the Alpaca API and secret keys. Using the Alpaca SDK, create the Alpaca `tradeapi.REST` object. In this object, include the parameters for the Alpaca API key, the secret key, and the version number. # + # Set the variables for the Alpaca API and secret keys alpaca_api_key = os.getenv("ALPACA_API_KEY") alpaca_secret_key = os.getenv("ALPACA_SECRET_KEY") # Create the Alpaca tradeapi.REST object alpaca = tradeapi.REST( alpaca_api_key, alpaca_secret_key, api_version="V2") # - # #### Step 3: Set the following parameters for the Alpaca API call: # # - `tickers`: Use the tickers for the member’s stock and bond holdings. # # - `timeframe`: Use a time frame of one day. # # - `start_date` and `end_date`: Use the same date for these parameters, and format them with the date of the previous weekday (or `2020-08-07`). This is because you want the one closing price for the most-recent trading day. # # + # Set the tickers for both the bond and stock portion of the portfolio tickers= ["AGG", "SPY"] # Set timeframe to 1D timeframe= "1D" # Format current date as ISO format # Set both the start and end date at the date of your prior weekday # This will give you the closing price of the previous trading day # Alternatively you can use a start and end date of 2020-08-07 start_date = pd.Timestamp("2020-08-07", tz="America/New_York").isoformat() end_date = pd.Timestamp("2020-08-07", tz="America/New_York").isoformat() # - # #### Step 4: Get the current closing prices for `SPY` and `AGG` by using the Alpaca `get_barset` function. Format the response as a Pandas DataFrame by including the `df` property at the end of the `get_barset` function. # + # Use the Alpaca get_barset function to get current closing prices the portfolio # Be sure to set the `df` property after the function to format the response object as a DataFrame prices_df = alpaca.get_barset( tickers, timeframe, start= start_date, end= end_date, ).df # Review the first 5 rows of the Alpaca DataFrame prices_df.head() # - # #### Step 5: Navigating the Alpaca response DataFrame, select the `SPY` and `AGG` closing prices, and store them as variables. # + # Access the closing price for AGG from the Alpaca DataFrame # Converting the value to a floating point number agg_close_price = float(prices_df["AGG"]["close"]) # Print the AGG closing price print(agg_close_price) # + # Access the closing price for SPY from the Alpaca DataFrame # Converting the value to a floating point number spy_close_price = float(prices_df["SPY"]["close"]) # Print the SPY closing price print(spy_close_price) # - # #### Step 6: Calculate the value, in US dollars, of the current amount of shares in each of the stock and bond portions of the portfolio, and print the results. # + # Calculate the current value of the bond portion of the portfolio agg_value = agg_close_price * agg_shares # Print the current value of the bond portfolio print(f"The current value of the bond portfolio is ${agg_value}") # + # Calculate the current value of the stock portion of the portfolio spy_value = spy_close_price * spy_shares # Print the current value of the stock portfolio print(f"The current value of the stock portfolios is ${spy_value}") # + # Calculate the total value of the stock and bond portion of the portfolio total_stocks_bonds = spy_value + agg_value # Print the current balance of the stock and bond portion of the portfolio print(f"The current balance of the stock and bond portfolio is ${total_stocks_bonds}") # + # Calculate the total value of the member's entire savings portfolio # Add the value of the cryptocurrency walled to the value of the total stocks and bonds total_portfolio = total_crypto_wallet + total_stocks_bonds # Print total portfolio value print(f"The current total balance of your crypto wallet, stock and bonds portfolio is ${total_portfolio:.2f}" ) # - # ### Evaluate the Emergency Fund # # In this section, you’ll use the valuations for the cryptocurrency wallet and for the stock and bond portions of the portfolio to determine if the credit union member has enough savings to build an emergency fund into their financial plan. To do this, complete the following steps: # # 1. Create a Python list named `savings_data` that has two elements. The first element contains the total value of the cryptocurrency wallet. The second element contains the total value of the stock and bond portions of the portfolio. # # 2. Use the `savings_data` list to create a Pandas DataFrame named `savings_df`, and then display this DataFrame. The function to create the DataFrame should take the following three parameters: # # - `savings_data`: Use the list that you just created. # # - `columns`: Set this parameter equal to a Python list with a single value called `amount`. # # - `index`: Set this parameter equal to a Python list with the values of `crypto` and `stock/bond`. # # 3. Use the `savings_df` DataFrame to plot a pie chart that visualizes the composition of the member’s portfolio. The y-axis of the pie chart uses `amount`. Be sure to add a title. # # 4. Using Python, determine if the current portfolio has enough to create an emergency fund as part of the member’s financial plan. Ideally, an emergency fund should equal to three times the member’s monthly income. To do this, implement the following steps: # # 1. Create a variable named `emergency_fund_value`, and set it equal to three times the value of the member’s `monthly_income` of $12000. (You set this earlier in Part 1). # # 2. Create a series of three if statements to determine if the member’s total portfolio is large enough to fund the emergency portfolio: # # 1. If the total portfolio value is greater than the emergency fund value, display a message congratulating the member for having enough money in this fund. # # 2. Else if the total portfolio value is equal to the emergency fund value, display a message congratulating the member on reaching this important financial goal. # # 3. Else the total portfolio is less than the emergency fund value, so display a message showing how many dollars away the member is from reaching the goal. (Subtract the total portfolio value from the emergency fund value.) # # #### Step 1: Create a Python list named `savings_data` that has two elements. The first element contains the total value of the cryptocurrency wallet. The second element contains the total value of the stock and bond portions of the portfolio. # + # Consolidate financial assets data into a Python list savings_data = [total_crypto_wallet, total_stocks_bonds] # Review the Python list savings_data savings_data # - # #### Step 2: Use the `savings_data` list to create a Pandas DataFrame named `savings_df`, and then display this DataFrame. The function to create the DataFrame should take the following three parameters: # # - `savings_data`: Use the list that you just created. # # - `columns`: Set this parameter equal to a Python list with a single value called `amount`. # # - `index`: Set this parameter equal to a Python list with the values of `crypto` and `stock/bond`. # # + # Create a Pandas DataFrame called savings_df savings_df = pd.DataFrame(data = savings_data, columns= ["amount"], index= ["crypto", "stock/bond"]) # Display the savings_df DataFrame # YOUR CODE HERE display(savings_df) # - # #### Step 3: Use the `savings_df` DataFrame to plot a pie chart that visualizes the composition of the member’s portfolio. The y-axis of the pie chart uses `amount`. Be sure to add a title. # Plot the total value of the member's portfolio (crypto and stock/bond) in a pie chart # YOUR CODE HERE savings_df.plot.pie(y="amount", title= "Portfolio Buildup") # #### Step 4: Using Python, determine if the current portfolio has enough to create an emergency fund as part of the member’s financial plan. Ideally, an emergency fund should equal to three times the member’s monthly income. To do this, implement the following steps: # # Step 1. Create a variable named `emergency_fund_value`, and set it equal to three times the value of the member’s `monthly_income` of 12000. (You set this earlier in Part 1). # # Step 2. Create a series of three if statements to determine if the member’s total portfolio is large enough to fund the emergency portfolio: # # * If the total portfolio value is greater than the emergency fund value, display a message congratulating the member for having enough money in this fund. # # * Else if the total portfolio value is equal to the emergency fund value, display a message congratulating the member on reaching this important financial goal. # # * Else the total portfolio is less than the emergency fund value, so display a message showing how many dollars away the member is from reaching the goal. (Subtract the total portfolio value from the emergency fund value.) # # ##### Step 4-1: Create a variable named `emergency_fund_value`, and set it equal to three times the value of the member’s `monthly_income` of 12000. (You set this earlier in Part 1). # Create a variable named emergency_fund_value emergency_fund_value = monthly_income *3 # ##### Step 4-2: Create a series of three if statements to determine if the member’s total portfolio is large enough to fund the emergency portfolio: # # * If the total portfolio value is greater than the emergency fund value, display a message congratulating the member for having enough money in this fund. # # * Else if the total portfolio value is equal to the emergency fund value, display a message congratulating the member on reaching this important financial goal. # # * Else the total portfolio is less than the emergency fund value, so display a message showing how many dollars away the member is from reaching the goal. (Subtract the total portfolio value from the emergency fund value.) # Evaluate the possibility of creating an emergency fund with 3 conditions: if total_portfolio > emergency_fund_value: print("Congrats! You have enough money in this fund!") elif total_portfolio == emergency_fund_value: print("Congrats on reaching this important financial goal!") else: print(f"you are ${ emergency_fund_value - total_portfolio} short") # ## Part 2: Create a Financial Planner for Retirement # ### Create the Monte Carlo Simulation # # In this section, you’ll use the MCForecastTools library to create a Monte Carlo simulation for the member’s savings portfolio. To do this, complete the following steps: # # 1. Make an API call via the Alpaca SDK to get 3 years of historical closing prices for a traditional 60/40 portfolio split: 60% stocks (SPY) and 40% bonds (AGG). # # 2. Run a Monte Carlo simulation of 500 samples and 30 years for the 60/40 portfolio, and then plot the results.The following image shows the overlay line plot resulting from a simulation with these characteristics. However, because a random number generator is used to run each live Monte Carlo simulation, your image will differ slightly from this exact image: # # ![A screenshot depicts the resulting plot.](Images/5-4-monte-carlo-line-plot.png) # # 3. Plot the probability distribution of the Monte Carlo simulation. Plot the probability distribution of the Monte Carlo simulation. The following image shows the histogram plot resulting from a simulation with these characteristics. However, because a random number generator is used to run each live Monte Carlo simulation, your image will differ slightly from this exact image: # # ![A screenshot depicts the histogram plot.](Images/5-4-monte-carlo-histogram.png) # # 4. Generate the summary statistics for the Monte Carlo simulation. # # # #### Step 1: Make an API call via the Alpaca SDK to get 3 years of historical closing prices for a traditional 60/40 portfolio split: 60% stocks (SPY) and 40% bonds (AGG). # + # Set start and end dates of 3 years back from your current date # Alternatively, you can use an end date of 2020-08-07 and work 3 years back from that date start_date = "2017-08-07" end_date = "2020-08-07" # Set number of rows to 1000 to retrieve the maximum amount of rows limit_rows = 1000 # + # Use the Alpaca get_barset function to make the API call to get the 3 years worth of pricing data # The tickers and timeframe parameters should have been set in Part 1 of this activity # The start and end dates should be updated with the information set above # Remember to add the df property to the end of the call so the response is returned as a DataFrame three_year_df = alpaca.get_barset( tickers, timeframe, start= start_date, end= end_date, limit= limit_rows ).df # Display both the first and last five rows of the DataFrame display(three_year_df.head()) display(three_year_df.tail()) tickers # - # #### Step 2: Run a Monte Carlo simulation of 500 samples and 30 years for the 60/40 portfolio, and then plot the results. # + # Configure the Monte Carlo simulation to forecast 30 years cumulative returns # The weights should be split 40% to AGG and 60% to SPY. # Run 500 samples. MC_60_40_portfolio = MCSimulation( portfolio_data = three_year_df, weights = [.60,.40], num_simulation = 500, num_trading_days = 252*30, ) # Review the simulation input data MC_60_40_portfolio.portfolio_data # - # Run the Monte Carlo simulation to forecast 30 years cumulative returns MC_60_40_portfolio.calc_cumulative_return() # Visualize the 30-year Monte Carlo simulation by creating an # overlay line plot MC_60_40_portfolio.plot_simulation() # #### Step 3: Plot the probability distribution of the Monte Carlo simulation. # Visualize the probability distribution of the 30-year Monte Carlo simulation # by plotting a histogram MC_60_40_portfolio.plot_distribution() # #### Step 4: Generate the summary statistics for the Monte Carlo simulation. # + # Generate summary statistics from the 30-year Monte Carlo simulation results # Save the results as a variable MC_60_40_summary_table =MC_60_40_portfolio.summarize_cumulative_return() # Review the 30-year Monte Carlo summary statistics MC_60_40_summary_table # - # ### Analyze the Retirement Portfolio Forecasts # # Using the current value of only the stock and bond portion of the member's portfolio and the summary statistics that you generated from the Monte Carlo simulation, answer the following question in your Jupyter notebook: # # - What are the lower and upper bounds for the expected value of the portfolio with a 95% confidence interval? # **Lower: $151,672.71 (2.499159*60689.5) # Upper: $1,060,906.11 (17.480884*60689.5) # # Print the current balance of the stock and bond portion of the members portfolio print(f"The current value of the stock portfolio is ${spy_value}") print(f"The current value of the bond portfolio is ${agg_value}") print(f"The current balance of the stock and bond portfolio is ${total_stocks_bonds}") # + # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes for the current stock/bond portfolio ci_lower_thirty_cumulative_return = round(MC_60_40_summary_table[8]*total_stocks_bonds,2) ci_upper_thirty_cumulative_return = round(MC_60_40_summary_table[9]*total_stocks_bonds,2) # Print the result of your calculations print(f"There is a 95% chance that an initial investment of ${total_stocks_bonds} in the portfolio" f" with a 60% weigh in the S&P500 and 40% in AGG over the following 30 years will land within the range of" f" ${ci_lower_thirty_cumulative_return} and ${ci_upper_thirty_cumulative_return}.") # - # ### Forecast Cumulative Returns in 10 Years # # The CTO of the credit union is impressed with your work on these planning tools but wonders if 30 years is a long time to wait until retirement. So, your next task is to adjust the retirement portfolio and run a new Monte Carlo simulation to find out if the changes will allow members to retire earlier. # # For this new Monte Carlo simulation, do the following: # # - Forecast the cumulative returns for 10 years from now. Because of the shortened investment horizon (30 years to 10 years), the portfolio needs to invest more heavily in the riskier asset&mdash;that is, stock&mdash;to help accumulate wealth for retirement. # # - Adjust the weights of the retirement portfolio so that the composition for the Monte Carlo simulation consists of 20% bonds and 80% stocks. # # - Run the simulation over 500 samples, and use the same data that the API call to Alpaca generated. # # - Based on the new Monte Carlo simulation, answer the following questions in your Jupyter notebook: # # - Using the current value of only the stock and bond portion of the member's portfolio and the summary statistics that you generated from the new Monte Carlo simulation, what are the lower and upper bounds for the expected value of the portfolio (with the new weights) with a 95% confidence interval? # # - Will weighting the portfolio more heavily toward stocks allow the credit union members to retire after only 10 years? # # + # Configure a Monte Carlo simulation to forecast 10 years cumulative returns # The weights should be split 20% to AGG and 80% to SPY. # Run 500 samples. MC_80_20_portfolio = MCSimulation( portfolio_data = three_year_df, weights = [.80,.20], num_simulation = 500, num_trading_days = 252*10, ) # Review the simulation input data MC_80_20_portfolio.portfolio_data # - # Run the Monte Carlo simulation to forecast 10 years cumulative returns MC_80_20_portfolio.calc_cumulative_return() # Visualize the 10-year Monte Carlo simulation by creating an # overlay line plot MC_80_20_portfolio.plot_simulation() # Visualize the probability distribution of the 10-year Monte Carlo simulation # by plotting a histogram MC_80_20_portfolio.plot_distribution() # + # Generate summary statistics from the 10-year Monte Carlo simulation results # Save the results as a variable MC_80_20_summary_table = MC_80_20_portfolio.summarize_cumulative_return() # Review the 10-year Monte Carlo summary statistics MC_80_20_summary_table # - # ### Answer the following questions: # #### Question: Using the current value of only the stock and bond portion of the member's portfolio and the summary statistics that you generated from the new Monte Carlo simulation, what are the lower and upper bounds for the expected value of the portfolio (with the new weights) with a 95% confidence interval? # # Lower: $58,451.39(0.963122∗60689.5) # Upper: $123,664.08 (2.037652*60689.5) # Print the current balance of the stock and bond portion of the members portfolio print(f"The current value of the stock portfolios is ${spy_value}") print(f"The current value of the bond portfolios is ${agg_value}") print(f"The current balance of the stock and bond portfolio is ${total_stocks_bonds}") # + # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes for the current stock/bond portfolio ci_lower_ten_cumulative_return = round(MC_80_20_summary_table[8]*total_stocks_bonds,2) ci_upper_ten_cumulative_return = round(MC_80_20_summary_table[9]*total_stocks_bonds,2) # Print the result of your calculations # YOUR CODE HERE print(f"There is a 95% chance that an initial investment of {total_stocks_bonds} in the portfolio" f" with a 80% weight in S&P 500 and 20% in AGG over the next 10 years will end within the range of" f" ${ci_lower_ten_cumulative_return} and ${ci_upper_ten_cumulative_return}.") # - # #### Question: Will weighting the portfolio more heavily to stocks allow the credit union members to retire after only 10 years? # + active="" # **Answer** # No, there is still not enough yield despite the more aggressive strategy based on the cost of living in California
financial_planning_tools.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # 23. K-Means Clustering # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/rhennig/EMA6938/blob/main/Notebooks/23.K-MeansClustering-MP.ipynb) # # (Based on https://medium.com/@arifromadhan19/step-by-step-to-understanding-k-means-clustering-and-implementation-with-sklearn-b55803f519d6) # # In this notebook, we will apply an unsupervised machine learning method for clustering, called K-Means Clustering. # ### Eight Steps of K-Means Clustering # # <img src="https://github.com/rhennig/EMA6938/blob/main/Notebooks/Figures/K-MeansClustering.png?raw=1" alt="K-Means Clustering Algorithm" align="center" style="width:500px; float:center"/> # ### Create a Dataset # # Before we apply the clustering technique, we create a dataset of materials with experimental bandgaps. # + import warnings warnings.filterwarnings("ignore") import pandas as pd import numpy as np from sklearn import decomposition from matminer.datasets import load_dataset from matminer.featurizers.base import MultipleFeaturizer from matminer.featurizers import composition as cf from matminer.featurizers.conversions import StrToComposition import matplotlib.pyplot as plt plt.rc('xtick', labelsize=15) plt.rc('ytick', labelsize=15) data = load_dataset("expt_gap") data.describe() # - data.head() # How many unique compositions do we have? data["formula"].unique().shape[0] # + # Sort by size of badgap data = data.sort_values('gap expt') # Remove duplicate compositions data = data.drop_duplicates('formula') data.describe() # - # ### Obtain a Feature Vector for Each Material # # The first step in building a machine learning model is to convert the raw materials data, here the composition, into the required input for an ML model: a finite list of quantitative attributes. Here we use the Magpie descriptors from Ward et al. # + feature_calculators = MultipleFeaturizer([cf.Stoichiometry(), cf.ElementProperty.from_preset("magpie"), cf.ValenceOrbital(props=['avg']), cf.IonProperty(fast=True)]) # Get the feature names feature_labels = feature_calculators.feature_labels() # Compute the features for all materials entries data = StrToComposition(target_col_id='composition_obj').featurize_dataframe(data, 'formula') data = feature_calculators.featurize_dataframe(data, col_id='composition_obj', ignore_errors=True); # - print('Generated %d features'%len(feature_labels)) print('Training set size:', 'x'.join([str(x) for x in data[feature_labels].shape])) print('Feature labels', feature_labels) # Retain only numerical values data_num = data.select_dtypes([np.number]) data_num.head(10) data_num.describe() # + # Drop the columns that include incomplete data data_num = data_num.dropna(axis=0) data_num.describe() # + # Standardizing the features from sklearn.preprocessing import StandardScaler data_std = StandardScaler().fit_transform(data_num) # Principal component analysis to project onto first two principal components from sklearn.decomposition import PCA # Plot explained variance pca = PCA() pca.fit(data_std) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance'); # - # Project onto two principal components pca = PCA(n_components=2) pca.fit(data_std) principalComponents = pca.fit_transform(data_std) data_pca = pd.DataFrame(data = principalComponents , columns = ['principal component 1', 'principal component 2']) # + fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('Principal Component 1', fontsize = 15) ax.set_ylabel('Principal Component 2', fontsize = 15) ax.set_title('2 component PCA', fontsize = 20) ax.scatter(data_pca['principal component 1'], data_pca['principal component 2'] , c = 'blue' , s = 50) ax.grid() # + # implementing k-means clustering from sklearn import cluster kmeans_data = cluster.KMeans(n_clusters=4, max_iter=300, init='k-means++',n_init=10).fit_predict(data_num) # Plot clusters fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('Principal Component 1', fontsize = 15) ax.set_ylabel('Principal Component 2', fontsize = 15) ax.set_title('2 component PCA', fontsize = 20) ax.scatter(data_pca['principal component 1'], data_pca['principal component 2'], c = kmeans_data, s = 10) plt.show() # -
Notebooks/23.K-MeansClustering-MP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/StanleyLiangYork/crawlers/blob/master/crawler.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="7LJiFyLhoUhv" colab_type="text" # ## demonstrate how to crawl a single webpage to extract useful information and statistics # + id="_tvCzrXOtEso" colab_type="code" colab={} import requests # + id="SCVKIL9EtOzY" colab_type="code" colab={} response = requests.get('https://www.herbshealth.ca/about/') # + id="ou1rKAi_tuDg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="96422fc6-daf9-4d27-dffb-99b36fa18cc7" response.status_code # + id="NNhYRlhhtyzN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 124} outputId="8c3a29a5-7073-421d-cd54-46bf321722b5" response.text # + id="PbuVGTVt0Rsp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 159} outputId="012b1d54-37e8-480e-8cf8-870923c6cff4" from bs4 import BeautifulSoup import re regex = re.compile('figcaption') soup = BeautifulSoup(response.text, 'lxml') pattern = soup.find_all(regex) for item in pattern: print(item.text) # + [markdown] id="1yM5Xojp1V0i" colab_type="text" # Get all the titles from a new page (with AJAX script) # + id="yhK5lfpp3kWJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 941} outputId="6b8fd353-b8d2-4560-e7f7-57310380b856" import requests from bs4 import BeautifulSoup import re headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'} r = requests.get('https://feed.mix.sina.com.cn/api/roll/get?pageid=153&lid=2509&k=&num=50&page=1&r=0.8915410555425298&callback=jQuery111206207858513920999_1596683842058&_=1596683842060', headers = headers) # encode the non-English characters into unicode text = r.text.encode('utf-8').decode('unicode-escape') # get all the titles of the news # use https://regex101.com/ to verify and group your regular expression regex = re.compile('"title":"(.*?)"') p = re.findall(regex,text) for item in p: print(item) # + [markdown] id="hboONgF31jk8" colab_type="text" # Get all the comments on a book review and compute the average rating # + id="eWXlrdXfQVzO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 535} outputId="791b86ca-ee36-4d38-c339-7b070153af91" # book page: https://book.douban.com/subject/34990157/?icn=index-latestbook-subject # find the book comment page # specify the browser in the header to decode the response headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'} r = requests.get('https://book.douban.com/subject/34990157/comments/', headers=headers) soup = BeautifulSoup(r.text, 'lxml') title = soup.find_all('title') title = title[0].string.split()[0] pattern = soup.find_all('span','short') print(f'Book title: {title}\n') # the pattern is a set of bs4 Tag element, use .string to get the text for item in pattern: print(item.string) pattern_s = re.compile('<span class="user-stars allstar(.*?) rating"') p = re.findall(pattern_s, r.text) s = 0 for star in p: s += int(star) print(f'Total rating score {s}') print('Average rating score {:.2f}'.format(float(s/len(p)))) # + [markdown] id="jP4JlOdDNarB" colab_type="text" # Get the DOW JONES INDU AVERAGE NDX information: <p> # company code, company name, price <p> # list them in a table # + id="q9Zf8LyJNmWS" colab_type="code" colab={} headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'} response = requests.get('https://money.cnn.com/data/markets/dow/', headers=headers) #use non-greedy algorithm (.*?) to restrict the search in one table regex = re.compile('<h3>Gainers</h3>(<table\s.*?<th>.*?</th>.*?</table>)') # first get the whole table - it can be more than one table on the page table_text = re.findall(regex, response.text) table_text = table_text[0] # list to string # + id="vXTFVmWisn7r" colab_type="code" colab={} regex_h = re.compile('<th.*?>(.*?)</th>') p_header = re.findall(regex_h, table_text) # + id="D4PIR3f74OBJ" colab_type="code" colab={} regex_data = re.compile('<tr><td.*?>.*?>(.*?)<.*?<span.*?>(.*?)</span>.*?<span.*?>(\d{1,4}\.\d{2})</span>.*?<span.*?>([+-]\d{1,2}\.\d{2})</span>.*?<span.*?>([+-]\d{1,2}\.\d{2}%)') data = re.findall(regex_data, table_text) # + id="stecfGlWI4Y3" colab_type="code" colab={} p_header.insert(0, 'Acronym') # + id="VsD7AgvqJnfo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="a96dcb03-e4f5-4dd0-eec2-41e1bd765590" (p_header) # + id="5xmkHjOxI_7C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 159} outputId="794ef3aa-5181-4555-97b5-288c77e79ee1" data # + id="H50qFdYiJ9RF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 291} outputId="50d3f920-0bf5-42c0-de94-fb05129c4ee7" import pandas as pd data_table = pd.DataFrame(data=data, columns=p_header) data_table # + [markdown] id="Yeuc1kKJ1il8" colab_type="text" # Get the statistics data from the volleyball league website: https://www.volleyball.world/en/vnl/women/statistics<p> # Store in a pandas DataFrame and export into a CSV file # + id="jnuL6URs1uRP" colab_type="code" colab={} import pandas as pd headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'} response = requests.get('https://www.volleyball.world/en/women/statistics', headers=headers) # + id="EUusatns2qik" colab_type="code" colab={} # get the table header # the header can be different in different tables text = response.text.strip().replace("\n"," ") # + id="UgLU-hcp4HHh" colab_type="code" colab={} regex = re.compile('<a\s.*>Best Receivers.*?<th>(.*?)</th><th>(.*?)</th>') p = re.findall(regex, text) # + id="Uj6AvWXB4wQC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="4b205123-7d98-455f-dcaa-4c8a7c58f4c4" p # + [markdown] id="HS-0_8-55p5b" colab_type="text" # Compute all types of score for a movie review # + id="vM7eg8KDoTgi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 124} outputId="460c280a-48d5-4286-8db1-cd76cdd047b6" import json import re import numpy as np headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'} response = requests.get('https://www.rottentomatoes.com/m/an_american_pickle', headers=headers) soup = BeautifulSoup(response.text, 'lxml') result = soup.find_all('title') print(f'Movie title: {result[0].string}') # use Regex NOT the Soup to find the score # given pattern: "avgScore":5.84,"freshCount":15,"numberOfReviews":20,"rottenCount":5,"score":75, pattern_s = re.compile('\"([A-z]*Score)\":(\d+\.*\d{1,2})') # group-0 for score name, group-1 for value p = re.findall(pattern_s, response.text) # scores = np.empty([1,len(p)]) scores_dict = dict() names = [] for item in p: names.append(item[0]) score_type = set(names) for item in score_type: scores_dict[item] = list() for item in p: scores_dict[item[0]].append(float(item[1])) for name, scores in scores_dict.items(): s = np.array(scores) avg = np.average(s, axis=-1) print("{:20s}: {:.3f}".format(name, avg)) # + [markdown] id="J_viga1I8Uw9" colab_type="text" # Get the first 10 comments on a particular book, compute the average score and frequency # + id="vggGXAsa8jh-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 552} outputId="a30d844f-5048-4d83-f03f-448e18db0710" headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'} r = requests.get('https://book.douban.com/subject/10594787/comments', headers=headers) soup = BeautifulSoup(r.text, 'lxml') title = soup.find_all('title')[0].string pattern = soup.find_all('span','short') print(f'Book title: {title}\n') for item in pattern: print(item.string) regex_p = re.compile('<span class="user-stars allstar(.*?) rating" title="(.*?)">') result = re.findall(regex_p, r.text) star = 0 comment = dict() rating = [] for i in range(10): star += (int(result[i][0])) rating.append(result[i][1]) print('Average score: {:.2f}'.format(star/10)) rating = set(rating) for item in rating: comment[item] = 0 for i in range(10): comment[result[i][1]] +=1 print() for k, v in comment.items(): print(f'{k}:{v}', end=' ') # + id="IfPRPSrRBpaV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 464} outputId="299cc834-9ecc-4638-c59d-6505b8215cc9" for item in pattern: print(item.string) regex_p = re.compile('<span class="user-stars allstar(.*?) rating" title="(.*?)">') result = re.findall(regex_p, r.text) star = 0 comment = dict() rating = [] for i in range(10): star += (int(result[i][0])) rating.append(result[i][1]) print('Average score: {:.2f}'.format(star/10)) rating = set(rating) for item in rating: comment[item] = 0 for i in range(10): comment[result[i][1]] +=1 for k, v in comment.items(): print(f'{k}:{v}', end=' ') # + [markdown] id="ndWQkFNBwFOV" colab_type="text" # get and save an image # + id="gkujzaUkt9bt" colab_type="code" colab={} r = requests.get('https://github.githubassets.com/images/modules/logos_page/Octocat.png') with open('logo.png', 'wb') as fp: fp.write(r.content) # + id="4YmY7RUvupSg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 287} outputId="80dbc024-9336-4143-8285-a66cd8ea5875" import matplotlib.pyplot as plt from cv2 import imread, imshow img = imread('logo.png') plt.imshow(img)
crawler.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 20px; border-radius: 10px; display:flex; flex-direction: row;"> # <img style="width: auto; height: 80px; margin: 0; object-fit: contain;" src="https://www.freepnglogos.com/uploads/google-play-png-logo/google-changes-play-store-png-logo-0.png"/> # <div style="flex: 1"> # <h1 style="color:white;">Android Play Store Dataset Cleaning</h1> # <strong><a style="color:white" href="https://www.kaggle.com/lava18/google-play-store-apps">Dataset link</a></strong> # </div> # </div> # #### Overview and Motivation # # This EDA project aims to discover patterns that lead to a successfull application on the Google Play Store. This will be done by analyzing the historical data collected from the Google Play Store as of 03 Feb 2019 according to dataset source. We hypothesize that there are patterns within the data that lead to a successfull app. We are trying find the successfull pattern to develop an app that may be in the high ranks one day, help ad providers know which apps to post their ads on. This will be done by initializing a machine learning model after the data is cleaned, that when givem features about an arbitrary app it gives an estimate rating for that app. # #### Sources we used as aid: # # - Course Material: https://github.com/mervatkheir/CSEN1095-Data-Engineering # - How to Calculate Correlation Between Variables in Python: https://machinelearningmastery.com/how-to-use-correlation-to-understand-the-relationship-between-variables/ # - A Beginners Guide to Random Forest Regression: https://medium.com/datadriveninvestor/random-forest-regression-9871bc9a25eb # - seaborn cheat sheet: https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Python_Seaborn_Cheat_Sheet.pdf # #### Dataset columns description # # - **App:** Application name # - **Category:** Category the app belongs to # - **Rating:** Overall user rating of the app (as when scraped) # - **Reviews:** Number of user reviews for the app (as when scraped) # - **Size:** Size of the app (as when scraped) # - **Installs:** Number of user downloads/installs for the app (as when scraped) # - **Type:** Paid or Free # - **Price:** Price of the app (as when scraped) # - **Content:** Rating Age group the app is targeted at - Children / Mature 21+ / Adult # - **Genres:** An app can belong to multiple genres (apart from its main category). For eg, a musical family game will belong to Music, Game, Family genres. # - **Last Updated:** Date when the app was last updated on Play Store (as when scraped) # - **Current Ver:** Current version of the app available on Play Store (as when scraped) # - **Android Ver:** Min required Android version (as when scraped) import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy.stats import spearmanr import seaborn as sns from sklearn.ensemble import RandomForestRegressor from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">1. Dataset Reading</h2> # </div> raw_data = pd.read_csv("data/googleplaystore.csv") data = raw_data.copy() data.head() # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">2. Dataset Discovery</h2> # <strong style="color:white">dicovering the dataset and get a notion of what the attributes describe.</strong> # </div> data.count() data.describe() data.dtypes # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">3. Dataset Pre-processing</h2> # <strong style="color:white">Resolving issues that may interfere with the ML model accuracy or the analysis process.</strong> # </div> # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">3.1 Checking for Missing values</h2> # </div> data.isnull().sum(0) data[data["Rating"].isnull()].head() data.dropna(axis=0, subset=["Current Ver", "Android Ver", "Type", "Content Rating"], inplace=True) data = data.copy() # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white">There are no missing reviews. However, there're missing rating values. Google play store forces you to rate an app if you're wiritng a review.<strong> Something must have gone wrong during data scraping for the rating values to be missing.</strong> We decided to impute the values using regression.</p> # </div> # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">3.2 Checking for duplicate values</h2> # </div> data.duplicated().value_counts() data.drop_duplicates(inplace=True) data["App"].duplicated().value_counts() duplicated_app = data[data["App"].duplicated()].sort_values("App") # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white"><strong>We found that there're still some duplicate values based on the app name. After further investigation we found that the difference is the number of reviews. So, we dropped the duplicate rows and chose the row with the highest number of reviews to keep.</strong></p> # <p style="color:white"><strong>We first had to transform Reviews into a numeric column type and fix string values.</strong></p> # </div> replacement = dict(M=10**6) data["Reviews"] = data["Reviews"].replace(replacement, regex=True).astype(int) data = data.copy() ordered_slice = data.sort_values(['App', 'Reviews'], ascending=[False, False]) data = ordered_slice.drop_duplicates(["App"]) data["App"].duplicated().value_counts() # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">3.3 Data Cleaning</h2> # <ol style="color: white; font-weight: bold;"> # <li>Change all <em>size</em> values to MB with a numeric type</li> # <li>Remove all + signs from installs and change the column type into a numeric type</li> # <li>Remove Dollar values from price column</li> # <li>Change formatting and type of column date</li> # </ol> # </div> # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h3 style="color:white;">3.3.1 Change all <em>size</em> values to MB with a numeric type</h3> # </div> data["Size"].str.contains("[Mk+]").value_counts() # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white">Upon further investigation to row 10472 we found it to have a number of values that don't make sense. <strong>Thus, we decided to drop it.</strong></p> # </div> data[data["Size"] == "Varies with device"].shape # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white">Values of size are either in KB or in MB. There are however string values such as <em>varies with device</em>. There are apps that have sizes varying per device as they follow the AAB format not the APK one.<strong> We'll change all KB values to MB and <em>varies with device</em> to zero.</strong></p> # </div> data.Size.replace("Varies with device", "0", inplace=True) data = data.copy() replacement = dict(M=1, k=0.001) data["Size"] = data["Size"].replace("[Mk]", "", regex=True).astype(float) * data["Size"].replace(replacement, regex=True).fillna(1).astype(float) # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h3 style="color:white;">3.3.2 Remove all + signs from <em>installs</em> and change the column type into a numeric type</h3> # </div> data["Installs"] = data["Installs"].str.replace("+", "") data["Installs"] = data["Installs"].str.replace(",", "").astype(int) # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h3 style="color:white;">3.3.3 Remove Dollar values from price column</h3> # </div> data["Price"].unique() data["Price"] = data["Price"].str.replace("$", "").astype(float) # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h3 style="color:white;">3.3.4 Change formatting and type of column date</h3> # </div> date_expression = "(January|February|March|April|May|June|July|August|September|October|November|December)\s+\d{1,2},\s+\d{4}" data["Last Updated"].str.contains(date_expression, regex=False).value_counts() data["Last Updated"] = pd.to_datetime(data['Last Updated'], format='%B %d, %Y') # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">3.4 Data Transformation</h2> # <p style="color: white;">Adding or transforming features to the dataset</p> # <ol style="color: white; font-weight: bold;"> # <li>Perform numeric transformation for Category feature</li> # <li>Perform numeric transformation for Content Rating feature</li> # <li>Perform binary transformation for Type feature</li> # <li>Perform numeric transformation for Genres feature</li> # <li>Normalize Size feature</li> # <li>Normalize Installs feature</li> # <li>Normalize Reviews feature</li> # </ol> # </div> # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h3 style="color:white;">3.4.1 Perform numeric transformation for Category feature</h3> # </div> data["Category"] = data["Category"].astype('category') category_ids = dict(enumerate(data['Category'].cat.categories)) category_ids data["category_numeric"] = data["Category"].cat.codes # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h3 style="color:white;">3.4.2 Perform numeric transformation for Content Rating feature</h3> # </div> data["Content Rating"].unique() data["Content Rating"] = data["Content Rating"].astype("category") content_rating_ids = dict(enumerate(data["Content Rating"].cat.categories)) content_rating_ids data["content_rating_numeric"] = data["Content Rating"].cat.codes # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h3 style="color:white;">3.4.3 Perform binary transformation for Type feature</h3> # </div> data["Type"].unique() data["Type"] = data["Type"].astype("category") type_ids = dict(enumerate(data["Type"].cat.categories)) type_ids data["type_numeric"] = data["Type"].cat.codes # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h3 style="color:white;">3.4.4 Perform binary transformation for Genres feature</h3> # </div> data["Genres"] = data["Genres"].astype("category") genre_ids = dict(enumerate(data["Genres"].cat.categories)) genre_ids data["genres_numeric"] = data["Genres"].cat.codes # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h3 style="color:white;">3.4.5 Normalize Size feature</h3> # </div> scaler = MinMaxScaler() data["normalized_size"] = scaler.fit_transform(data["Size"].values.reshape(-1, 1)) # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h3 style="color:white;">3.4.6 Normalize Installs feature</h3> # </div> scaler = MinMaxScaler() data["normalized_installs"] = scaler.fit_transform(data[["Installs"]]) # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h3 style="color:white;">3.4.7 Normalize Reviews feature</h3> # </div> scaler = MinMaxScaler() data["normalized_reviews"] = scaler.fit_transform(data["Reviews"].values.reshape(-1, 1)) # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">4. Data Imputation</h2> # <strong style="color:white">Imputing missing Rating values</strong> # </div> # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">4.1 Rating Prediction ML Model</h2> # </div> # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white">Features used in the machine learning model are all normalized or scaled. So as not to mislead the ML model by different ranges.</p> # </div> # + rating_not_missing = data[data["Rating"].notnull()] include = ["category_numeric", "genres_numeric", "normalized_reviews", "normalized_size", "normalized_installs", "type_numeric"] random_forest = RandomForestRegressor(n_estimators=5) x = rating_not_missing[include] y = rating_not_missing["Rating"] x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.30, random_state=20) random_forest.fit(x_train, y_train) predicted = random_forest.predict(x_test) mean_squared_error(y_test, predicted) # - # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white">Mean squared error is 0.3, meaning that any output should have a ±0.3 range.</p> # </div> plt.figure(figsize=(12,7)) sns.regplot(predicted, y_test, color='#43a6cc', marker = 'x', logx=True) plt.title('RandomForestRegressor - Predecting App Rating') plt.xlabel('Predicted') plt.ylabel('Actual') plt.show() # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">4.2 Inputing missing rating values</h2> # </div> rating_missing = data[data["Rating"].isnull()] imputed_ratings = random_forest.predict(rating_missing[include]) data.loc[data["Rating"].isnull(), "Rating"] = imputed_ratings # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">5. Dataset Visualization</h2> # <strong style="color:white">Asking questions and aswering them.</strong> # <ol style="color: white; font-weight: bold"> # <li>Scatter Matrix</li> # <li>What is the percentage of paid to free apps?</li> # <li>What is the most dominant category?</li> # <li>What size should my app have?</li> # <li>What price should my paid app have?</li> # <li>What are the number of installs for apps priced more than $100?</li> # <li>What is the most successful category?</li> # <li>Distribution of app sizes</li> # <li>Rating per categories and type</li> # <li>Distribution of content rating per categories</li> # </ol> # </div> plt.rcParams['figure.figsize']=(10,20) # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">5.1 Scatter Matrix</h2> # </div> sns.pairplot(data[["Size", "Reviews", "Installs", "Rating", "Type"]], hue="Type", palette="GnBu") # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white">This matches our assumption that size greatly affects other features. <strong>The Scatter Matrix shows that when the app size is relatively small, other features prosper. This shows that cosumer behaviour on the play store is greatly affected by the app download size.</strong></p> # <p style="color:white"><strong>Takeaway: Developers should optimize their app download size as much as possible. </strong></p> # </div> # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">5.2 What is the percentage of paid to free apps?</h2> # </div> # + plt.rcParams['figure.figsize']=(5,5) size = data["Type"].value_counts() labels = data["Type"].unique() colors=["#43a6cc", "#bae4bf"] plt.pie(size, labels=labels, colors=colors, autopct='%1.1f%%') plt.show() # - # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">5.3 What size should my app have?</h2> # </div> plt.rcParams['figure.figsize']=(20,20) medians = data.groupby(['Category'])['Size'].median().values median_labels = [str(np.round(s, 2)) for s in medians] ax = sns.boxplot(x="Size", y="Category", data=data, palette="GnBu") data["Size"].mean() # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white">The trend is games have a much larger download size than any other category.<strong> If you're developing anything other than a game, you need to optimize your downlaod size. on Average any app should have a size of 17.8 MB.</strong></p> # </div> # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">5.4 What is the most dominant category?</h2> # </div> categories = data["Category"].value_counts() sns.barplot(x=categories.values, y=categories.index, palette="GnBu") # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white"><strong>This is an interesting finding, our first assumption for the most dominant category was games.</strong></p> # </div> # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">5.5 What price should my paid app have?</h2> # </div> paid_apps = data[data["Type"] == "Paid"] sns.boxplot(x="Price", y="Category", data=paid_apps, palette="GnBu") data["Price"].mean() # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white">Finance and lifestyle are valued the most but it's intresting to find that there are apps with values of more than \$100.<strong> On average an app price is $1.</strong></p> # </div> # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">5.6 What are the number of installs for apps priced more than $100?</h2> # </div> plt.rcParams['figure.figsize']=(10,5) expensive_apps = data[data["Price"] > 100] sns.lineplot(x="Price", y="Installs", data=expensive_apps, palette="GnBu") # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white"><strong>There are only 20 apps values of more than $100. However, the number of installs can get as high as 15k Downloads.</strong> </p> # </div> # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">5.7 What is the most successful category?</h2> # </div> plt.rcParams['figure.figsize']=(20,20) plot = sns.barplot(x="Installs", y="Category", data=data, palette="GnBu") # <div style="background-image: linear-gradient(to right, #f78ca0 0%, #f9748f 19%, #fd868c 60%, #fe9a8b 100%); padding: 20px; border-radius: 15px"> # <p style="color:white"><strong>Most successful category based on the number of installs is the communication category.</strong></p> # </div> # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">5.8 Distribution of app sizes</h2> # </div> plt.rcParams['figure.figsize']=(10,5) sns.distplot(data["Size"], color="#43a6cc") # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">5.9 Rating per categories and type</h2> # </div> plt.rcParams['figure.figsize']=(10,40) sns.violinplot(x="Rating", y="Category", hue="Type", data=data, split=True, palette="GnBu") # <div style="background-color: #FF3CAC; background-image: linear-gradient(225deg, #FF3CAC 0%, #784BA0 50%, #2B86C5 100%); padding: 15px; border-radius: 10px"> # <h2 style="color:white;">5.10 Distribution of content rating per categories</h2> # </div> plt.rcParams['figure.figsize']=(20,40) content_category = data[["Content Rating", "Category"]].groupby(["Content Rating", "Category"]).size().reset_index(name='counts') sns.barplot(x="counts" , y="Category", hue="Content Rating",data=content_category, palette="GnBu")
playstore-eda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 64-bit (windows store) # name: python3 # --- # # Hypersolvers vs purely data driven and analytical model # # We want to compare the three approaches with dynamical systems whose vector field can be fully expressed by an analytical formulation here as following: # # 1. ODE integrators # 2. Hypersolvers # 3. Purely data-driven approach # # Note: # These results are just to illustrate the differences and are not included in the final paper submission import sys; sys.path.append(2*'../') # go n dirs back from src import * # + import time import torch import torch.nn as nn import matplotlib.pyplot as plt from torchdyn.core import NeuralODE from torchdyn.datasets import * from torchdyn.numerics import odeint, Euler, HyperEuler # - # ## Model: damped controlled pendulum with torsional spring class ControlledPendulum(nn.Module): """ Inverted pendulum with torsional spring """ def __init__(self, u, m=1., k=.5, l=1., qr=0., β=1., g=9.81): super().__init__() self.u = u # controller (nn.Module) self.nfe = 0 # number of function evaluations self.cur_f = None # current function evaluation self.cur_u = None # current controller evaluation self.m, self.k, self.l, self.qr, self.β, self.g = m, k, l, qr, β, g # physics def forward(self, t, x): self.nfe += 1 q, p = x[..., :1], x[..., 1:] self.cur_u = self.u(t, x) dq = p/self.m dp = -self.k*(q - self.qr) - self.m*self.g*self.l*torch.sin(q) \ -self.β*p/self.m + self.cur_u self.cur_f = torch.cat([dq, dp], -1) return self.cur_f # Change device according to your configuration # device = torch.device('cuda:1') if torch.cuda.is_available() else torch.device('cpu') device = torch.device('cpu') # + from math import pi as π # Time span dt = 0.1 # this is the integration step. It is a bit large to more easily highlight the differences t0, tf = 0, 2 # initial and final time for controlling the system steps = int((tf - t0)/dt) + 1 # so we have a time step of 0.2s t_span = torch.linspace(t0, tf, steps).to(device) # + # The controller is a simple MLP with one hidden layer with bounded output class NeuralController(nn.Module): def __init__(self, model, u_min=-20, u_max=20): super().__init__() self.model = model self.u_min, self.u_max = u_min, u_max def forward(self, t, x): x = self.model(x) return torch.clamp(x, self.u_min, self.u_max) model = nn.Sequential(nn.Linear(2, 32), nn.Tanh(), nn.Linear(32, 1)).to(device) u = NeuralController(model) for p in u.model[-1].parameters(): torch.nn.init.zeros_(p) # Controlled system sys = ControlledPendulum(u).to(device) # - # ## Hypersolvers for Controlled Systems [Semi data-driven] # # By training the hypersolver on residuals between the ground-truth solution and the base solver (we will use Euler in this case) we can render the training more efficient while retaining solution accuracy. # # The residuals can be defined by: # \begin{equation} # \begin{aligned} # R \left(t_k, x(t_k), x(t_{k+1}) \right) = \Phi(x(t_k), t_k, t_{k+1}) - x(t_k) - \epsilon f(x_k, u_k) # \label{eq:residual_hypereuler} # \end{aligned} # \end{equation} # # where $\Phi$ is an accurate solver and $\epsilon$ is the time stepping. # # The hypernet $g_w$ should have as input data important features for predicting future trajectories: we can use the current state $x$, its derivative $f$ and the control input $u$. The training will be carried out by minimizing a loss function like: # # \begin{equation} # \ell = \frac{1}{K} \sum_{k=0}^{K-1} \left|\left|{ R \left( t_k, x(t_k), x(t_{k+1}) \right) - g_\omega\left(x(t_k), f(t_k), u(t_{k}) \right) }\right|\right|_2 # \label{eq:loss_hypersolver} # \end{equation} # + # We consider the controller fixed during each solver step class RandConstController(nn.Module): def __init__(self): super().__init__() self.u0 = torch.Tensor(1024, 1).uniform_(-10,10).to(device) def forward(self, t, x): return self.u0 # Save previously learned controller u_no_hypersolver = sys.u sys.u = RandConstController() # modify controller for training # + class VanillaHyperNet(nn.Module): """Simple hypernetwork for controlled systems Input: current x, f and u from the controlled system Output: p-th order residuals""" def __init__(self, net): super().__init__() self.net = net def forward(self, t, x): xfu = torch.cat([x, sys.cur_f, sys.cur_u], -1) return self.net(xfu) net = nn.Sequential(nn.Linear(5, 32), nn.Softplus(), nn.Linear(32, 32), nn.Tanh(), nn.Linear(32, 2)) hypersolver = HyperEuler(VanillaHyperNet(net)) # model = nn.DataParallel(hypersolver, device_ids=[1]) # feel free to change here according to your setup and GPU available. # model = model.to(device) # - # ## Training the Hypernetwork # # Training the hypernetwork $g_w$ requires us to know which set its input values are contained in. # If we want the pendulum with initial states in $\pm \pi$, we can sample the input states for the hypernetwork from a distribution which includes possible states which the system can visit (i.e., the hypernetwork should be able to predict outside of the initial boundaries in a certain range). For example, we can draw the states $x$ from a Uniform distribution: # $$x = \begin{bmatrix} q\\ p\\ \end{bmatrix} \sim Uniform(-2\pi, 2\pi) \times Uniform(-2\pi, 2\pi)$$ # Similarly, the controller values can be sampled from a distribution including all the possible controller values, such as # $$u \sim Uniform(-20, 20)$$ # + # Initial distribution from math import pi as π x0 = 2*π # limit of the state distribution (in rads and rads/second) init_dist = torch.distributions.Uniform(torch.Tensor([-x0, -x0]), torch.Tensor([x0, x0])) base_solver = Euler() # Time span t0, tf = 0, 2 # initial and final time for controlling the system steps = int((tf - t0)/dt) + 1 # so we have a time step of 0.2s t_span = torch.linspace(t0, tf, steps).to(device) dt = (t_span[1] - t_span[0]).detach().cpu().item() # + opt = torch.optim.Adam(hypersolver.parameters(), lr=3e-4) loss_func = nn.MSELoss() epochs = 50000 bs = 2048 hypernet = hypersolver.hypernet span = torch.linspace(0, dt, 2) losses = [] for i in range(epochs): # Sample random intial states and controllers x0 = init_dist.sample((bs,)).to(device) sys.u.u0 = torch.Tensor(bs, 1).uniform_(-5, 5).to(device) # Compute residuals _, sol_gt = odeint(sys, x0, span, solver='tsit5', atol=1e-5, rtol=1e-5)[-1] sol = x0 + sys(0., x0)*dt residuals = (sol_gt - sol) / dt**2 residuals_hypersolver = hypernet(0., x0) loss = loss_func(residuals, residuals_hypersolver) # Optimization step loss.backward(); opt.step(); opt.zero_grad() print(f'Step: {i}, Residual loss: {loss:.3f}', end='\r') losses.append(loss.detach().cpu().item()) # - fig, ax = plt.subplots(1, 1) ax.plot(losses) ax.set_yscale('log') torch.save(hypersolver, 'saved_models/hs_torchdyn.pt') # + # hypersolver = torch.load('saved_models/hs_torchdyn.pt').to(device) # - # ## Purely data-driven approach # + class ControlledDataDrivenModel(nn.Module): """ Data driven model of a controlled data-driven model """ def __init__(self, net, u): super().__init__() self.net = net # data-driven model including the controller self.u = u # controller (nn.Module) self.nfe = 0 # number of function evaluations self.cur_u = None # current controller evaluation def forward(self, t, x): self.nfe += 1 self.cur_u = self.u(t, x) dx = self.net(t, x, self.cur_u) return dx class ModelNet(nn.Module): '''Simple MLP''' def __init__(self): super().__init__() self.net = nn.Sequential(nn.Linear(3, 64), nn.Softplus(), nn.Linear(64, 64), nn.Softplus(), nn.Linear(64, 64), nn.Tanh(), nn.Linear(64, 2)) def forward(self, t, x, u): return self.net(torch.cat([x, u], -1)) # - model_net = ModelNet() data_driven_sys = ControlledDataDrivenModel(model_net, RandConstController()) # ### Training the data-driven model # + opt = torch.optim.Adam(data_driven_sys.net.parameters(), lr=3e-4) loss_func = nn.MSELoss() epochs = 50000 bs = 2048 span = torch.linspace(0, dt, 2) losses = [] for i in range(epochs): # Sample random intial states and controllers x0 = init_dist.sample((bs,)).to(device) control_input = torch.Tensor(bs, 1).uniform_(-5, 5).to(device) sys.u.u0 = control_input; data_driven_sys.u.u0 = control_input # Compute one step trajectory _, sol_gt = odeint(sys, x0, span, solver='tsit5', atol=1e-5, rtol=1e-5) _, sol_net = odeint(data_driven_sys, x0, span, solver='tsit5', atol=1e-5, rtol=1e-5) loss = loss_func(sol_gt, sol_net) # Optimization step loss.backward(); opt.step(); opt.zero_grad() print(f'Step: {i}, Loss: {loss:.6f}', end='\r') losses.append(loss.detach().cpu().item()) # - fig, ax = plt.subplots(1, 1) ax.plot(losses) ax.set_yscale('log') torch.save(data_driven_sys.net, 'saved_models/pure_data_driven.pt') # ### Uncontrolled trajectories # + # Sample and plot some trajectories x0 = π # limit of the state distribution (in rads and rads/second) init_dist = torch.distributions.Uniform(torch.Tensor([-x0, -x0]), torch.Tensor([x0, x0])) x0 = init_dist.sample((10000,)).to(device) sys = ControlledPendulum(RandConstController()).to(device) control_input = torch.Tensor(10000, 1).uniform_(-3,3).to(device) # Uncontrolled system # control_input = torch.zeros(10000, 1) sys.u.u0 = control_input; data_driven_sys.u.u0 = control_input tf = 10 t_span = torch.linspace(0, tf, int(tf/dt)+1).to(device) # Purely data-driven _, traj_dd = odeint(data_driven_sys, x0, t_span, solver='euler') # we use rk4 to show that no matter how many times the VF is sampled, pure data-driven cannot compete with the hypersolver approach # Hypersolver (analytical + data-driven) _, traj_hyper = odeint(sys, x0, t_span, solver=hypersolver) # Analytical methods _, traj_euler = odeint(sys, x0, t_span, solver='euler') _, traj_mp = odeint(sys, x0, t_span, solver='midpoint') _, traj_rk4 = odeint(sys, x0, t_span, solver='rk4') _, traj_gt = odeint(sys, x0, t_span, solver='tsit5', atol=1e-5, rtol=1e-5) traj_euler = traj_euler.detach().cpu(); traj_hyper = traj_hyper.detach().cpu(); traj_gt = traj_gt.detach().cpu() traj_mp = traj_mp.detach().cpu(); traj_rk4 = traj_rk4.detach().cpu() traj_dd = traj_dd.detach().cpu() t_span = t_span.cpu() fig, axs = plt.subplots(2, 5, figsize=(20,6)) for i in range(5): for j in range(2): axs[j, i].plot(t_span, traj_euler[:,i,j], 'r') axs[j, i].plot(t_span, traj_hyper[:,i,j], 'orange') axs[j, i].plot(t_span, traj_mp[:,i,j], 'green') axs[j, i].plot(t_span, traj_rk4[:,i,j], 'purple') axs[j, i].plot(t_span, traj_dd[:,i,j], 'b-.') axs[j, i].plot(t_span, traj_gt[:,i,j], 'k:') axs[j, i].legend(['Euler', 'HyperEuler', 'Midpoint', 'RK4', 'Pure data-driven', 'Ground Truth']) axs[j, i].label_outer() axs[0, i].set_ylabel(r'Positions $q$'); axs[1, i].set_ylabel(r'Momenta $p$') fig.suptitle('Uncontrolled Trajectories') # + # Error analysis def smape(yhat, y): return torch.abs(yhat - y) / (torch.abs(yhat) + torch.abs(y)) / 2 err_dd = smape(traj_gt, traj_dd).detach().cpu() err_euler = smape(traj_gt, traj_euler).detach().cpu(); err_hyper = smape(traj_gt, traj_hyper).detach().cpu() err_mp, err_rk4 = smape(traj_gt, traj_mp).detach().cpu(), smape(traj_gt, traj_rk4).detach().cpu() mean_dd, std_dd = err_dd.mean(1), err_dd.std(1) mean_euler, std_euler = err_euler.mean(1), err_euler.std(1) mean_hyper, std_hyper = err_hyper.mean(1), err_hyper.std(1) mean_mp, std_mp = err_mp.mean(1), err_mp.std(1) mean_rk4, std_rk4 = err_rk4.mean(1), err_rk4.std(1) fig = plt.figure(figsize=(10,10)) alpha = .1 def plot_errors(ax, var=0, title='Positions', scale=None): ax.plot(t_span, mean_hyper[:,var], 'orange', label='HyperEuler (ours)') ax.plot(t_span, mean_dd[:,var], 'b-.', label='Purely Data-Driven') ax.plot(t_span, mean_euler[:,var], 'r', label='Euler') ax.plot(t_span, mean_mp[:,var], 'green', label='Midpoint') ax.plot(t_span, mean_rk4[:,var], 'purple', label='RK4') ax.fill_between(t_span, mean_euler[:,var]-std_euler[:,var], mean_euler[:,var]+std_euler[:,var], alpha=alpha, color='r') ax.fill_between(t_span, mean_hyper[:,var]-std_hyper[:,var], mean_hyper[:,var]+std_hyper[:,var], alpha=alpha, color='orange') ax.fill_between(t_span, mean_mp[:,var]-std_mp[:,var], mean_mp[:,var]+std_mp[:,var], alpha=alpha, color='green') ax.fill_between(t_span, mean_rk4[:,var]-std_rk4[:,var], mean_rk4[:,var]+std_rk4[:,var], alpha=alpha, color='purple') ax.fill_between(t_span, mean_dd[:,var]-std_dd[:,var], mean_dd[:,var]+std_dd[:,var], alpha=alpha, color='blue') ax.legend() if scale is not None: ax.set_yscale(scale) ax.set_title(title) ax.set_xlabel('Time [s]') ax.label_outer() ax = fig.add_subplot(2,2,1) plot_errors(ax, 0, 'Position error') ax = fig.add_subplot(2,2,2) plot_errors(ax, 1, 'Momenta error') ax = fig.add_subplot(2,2,3) plot_errors(ax, 0, 'Position error log', 'log') ax = fig.add_subplot(2,2,4) plot_errors(ax, 1, 'Momenta error log', 'log') fig.suptitle('SMAPE Propagation') # - # ## Comment # We can see that even if the data-driven vector field can learn the dynamics, even by integrating it for more times (in this case, with `RK4`), it still does not reach the `hypersolver` performance.\ # Even if it did, we notice that it usually requires a larger network and more NFE. Most importantly, there is no error bound for this. # Here is a summary: # # <center> # # | | Classical | Hypersolvers | Purely data-driven* | # |-------------------|----------------|--------------------------|--------------------| # | Vector field | Analytical | Analytical + data-driven | Data-driven | # | ODE solver | Classical | Classical | Classical | # | Network size | n/a | Smaller | Bigger | # | Error bounds | ☑️ | ☑️ | ✘** | # | Pareto optimality | ✘ | ☑️ | ✘ | # #
hypersolvers-control/experiments/pendulum/00_data_driven_only_vs_hypersolver.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/felipemoreia/Awari/blob/master/Exercicios_unidade_4_Manipulacao_de_Dados_Parte_05.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="uC-G8j5oEYrN" colab_type="text" # # Awari - Data Science # # ## Exercícios Unidade 4 - Parte 5 # + [markdown] id="oiP0VpraEYrP" colab_type="text" # Neste Jupyter notebook você irá resolver uma exercícios utilizando a linguagem Python e a biblioteca Pandas. # # Todos os datasets utilizados nos exercícios estão salvos na pasta **datasets**. # # Todo o seu código deve ser executado neste Jupyter Notebook. Por fim, se desejar, revise as respostas com o seu mentor. # + [markdown] id="hukOWsuCEYrQ" colab_type="text" # ### Tarefa 1. Importe o dataset e salve os dados em um dataframe # # Os dados estão salvos no arquivo ***datasets/iris.csv***. # # Os dados consistem de 50 unidades amostrais de três espécies (setosa, # virginica, versicolor) de íris (uma espécie de planta), ou seja, temos um total de 150 observações. De cada uma delas mediu-se quatro variáveis morfológicas: # comprimento e largura da sépala e comprimento e largura da pétala. # # Salve os dados em um DataFrame de nome *iris*. # + id="kxP3t3PDEYrR" colab_type="code" colab={} import pandas as pd # + id="QNoJQpdjEYrX" colab_type="code" colab={} iris = pd.read_csv('iris.csv', names=['Comp-Sepal', 'Larg-Sepal', 'Comp-petal', 'Larg-petal', 'Especie']) # + id="XM-U3R5yEYrc" colab_type="code" colab={} outputId="ec8232cc-907d-4e00-ac6c-fefac959da8d" iris.head() # + id="R92i6UL5EYrg" colab_type="code" colab={} outputId="74fd0985-0bc1-422a-d855-c40016c0fd33" iris.tail() # + [markdown] id="mSPAcXvgEYrj" colab_type="text" # ### Tarefa 2. Mostre as 10 primeira linhas do dataset. # + id="MvpgWMsbEYrk" colab_type="code" colab={} outputId="f2238686-1460-48a1-94a3-fa7f9d971053" iris.head(10) # + [markdown] id="OBy62AOGEYrn" colab_type="text" # ### Tarefa 3. Os dados estão sem identificação de colunas, assim, adicione colunas ao dataset. # # Nomes das colunas: # * 1 - sepal_length (in cm) # * 2 - sepal_width (in cm) # * 3 - petal_length (in cm) # * 4 - petal_width (in cm) # * 5 - class # # # *Dica: utilize o atributo columns do DataFrame* # + id="-DudD7EvEYrn" colab_type="code" colab={} iris.columns = ['sepal_length (in cm)', 'sepal_width (in cm)', 'petal_length (in cm)', 'petal_width (in cm)', 'class'] # + [markdown] id="N9ArEysEEYrq" colab_type="text" # ### Tarefa 4. Verifique se existem valores missing (ausentes) no DataFrame? # # *Dica: use o método isnull() do Pandas em conjunto com o método sum()* # + id="xawptNpPEYrr" colab_type="code" colab={} outputId="e9ef79ca-50ad-4385-fd62-c4f68309d7f1" iris.info() # + id="oqz92naZEYru" colab_type="code" colab={} outputId="1669eec3-c11c-424c-d841-8819c8d3e71c" iris.isnull().sum() # + [markdown] id="4lGEcHOmEYrx" colab_type="text" # ### Tarefa 5. Atribua o valor NaN as linhas 10 até 19 da coluna 'petal_length'. # + id="i4j28HLIEYrx" colab_type="code" colab={} import numpy as np iris.loc[10:20,'petal_length (in cm)'] = np.nan # + id="cm7I3doCEYrz" colab_type="code" colab={} outputId="2e3f9929-51aa-4257-a9e7-70e1148fafff" iris[:25]['petal_length (in cm)'] # + [markdown] id="tAtybOErEYr2" colab_type="text" # ### Tarefa 6. Agora substitua os valores NaN por 1.0 # # *Dica: use o método fillna() do DataFrame* # + id="ukCS3ctMEYr2" colab_type="code" colab={} iris.fillna(1.0, inplace=True) # + [markdown] id="LGDYgucGEYr7" colab_type="text" # ### Tarefa 7. Remova a coluna 'class' # + id="7tOVd2pQEYr7" colab_type="code" colab={} iris.drop('class', axis=1, inplace=True) # + [markdown] id="FzL-X4NdEYr-" colab_type="text" # ### Tarefa 8. Atribua NaN a todas as colunas das 3 primeiras linhas. # + id="wTX6AZO7EYr-" colab_type="code" colab={} outputId="9226bc8d-2ab8-40bb-e802-5fd8b9dafd6b" # Seu código iris.loc[0:3] = np.nan iris.head() # + [markdown] id="2ceE4v4bEYsA" colab_type="text" # ### Tarefa 9. Remova as linhas que possuem NaN. # # *Dica: utilize o método dropna() do DataFrame* # + id="JFnMwNCUEYsA" colab_type="code" colab={} # Seu código iris.dropna(axis=0, inplace=True) # + [markdown] id="L3BMcHHzEYsC" colab_type="text" # ### Tarefa 10. Reinicie o index do DataFrame para que ele inicie a partir do zero novamente. # # *Dica: utilize o método reset_index() do DataFrame* # + id="mh8dykgQEYsD" colab_type="code" colab={} iris.reset_index() iris.drop(['index'], axis=1, inplace=True) # + id="tLQ5erjdEYsF" colab_type="code" colab={} outputId="c4ac054b-f677-4d95-d834-947857886570" iris # + [markdown] id="x-0VjqcLEYsH" colab_type="text" # ### Awari - <a href="https://awari.com.br/"> awari.com.br</a>
Exercicios_unidade_4_Manipulacao_de_Dados_Parte_05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="Va6a1JliHzHO" # # Shift Scheduling # # An article entitled ["Modeling and optimization of a weekly workforce with Python and Pyomo"](https://towardsdatascience.com/modeling-and-optimization-of-a-weekly-workforce-with-python-and-pyomo-29484ba065bb) by [<NAME>](https://medium.com/@ccarballolozano) posted on the [Towards Data Science](https://towardsdatascience.com/) blog showed how to build a Pyomo model to schedule weekly shifts for a small campus food store. The article was primarily intended as a tutorial introduction to Pyomo (see the [github](https://github.com/ccarballolozano/blog-post-codes/blob/master/Modeling-and-optimization-of-a-weekly-workforce-with-Python-and-Pyomo/Modeling%20and%20optimization%20of%20a%20weekly%20workforce%20with%20Python%20and%20Pyomo.ipynb) repository for the code). # # Here we revisit the example with a new model demonstrating use of Pyomo decorators and of Pyomo sets, and how to use the model solution to create useful visualizations and reports for workers and managers. # + colab={"base_uri": "https://localhost:8080/", "height": 213} executionInfo={"elapsed": 9216, "status": "ok", "timestamp": 1647782901780, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg_n8V7bVINy02QRuRgOoMo11Ri7NKU3OUKdC1bkQ=s64", "userId": "09038942003589296665"}, "user_tz": 240} id="iTmwYjZcHx3X" outputId="0c6ab3ce-1d86-4dca-932d-a8767e436cca" # install Pyomo and solvers for Google Colab import sys if "google.colab" in sys.modules: # !wget -N -q https://raw.githubusercontent.com/jckantor/MO-book/main/tools/install_on_colab.py # %run install_on_colab.py # + [markdown] id="e1gXyjsjuJjg" # ## Problem Statement # # From the original article: # # > A new food store has been opened at the University Campus which will be open 24 hours a day, 7 days a week. Each day, there are three eight-hour shifts. Morning shift is from 6:00 to 14:00, evening shift is from 14:00 to 22:00 and night shift is from 22:00 to 6:00 of the next day. # # > During the night there is only one worker while during the day there are two, except on Sunday that there is only one for each shift. Each worker will not exceed a maximum of 40 hours per week and have to rest for 12 hours between two shifts. # # > As for the weekly rest days, an employee who rests one Sunday will also prefer to do the same that Saturday. # In principle, there are available ten employees, which is clearly over-sized. The less the workers are needed, the more the resources for other stores. # + [markdown] id="h2Z4Ll__vSyF" tags=[] # ## Analysis # + [markdown] id="h2Z4Ll__vSyF" tags=[] # ### Model sets # # This problem requires assignment of an unspecified number of workers to a predetermined set of shifts. There are three shifts per day, seven days per week. These observations suggest the need for three ordered sets: # # * `WORKERS` with $N$ elements representing workers. $N$ is as input to a function creating an instance of the model. # # * `DAYS` with labeling the days of the week. # # * `SHIFTS` labeling the shifts each day. # # The problem describes additional considerations that suggest the utility of several additional sets. # # * `SLOTS` is an ordered set of (day, shift) pairs describing all of the available shifts during the week. # # * `BLOCKS` is an order set of all overlapping 24 hour periods in the week. An element of the set contains the (day, shift) period in the corresponding period. This set will be used to limit worker assignments to no more than one for each 24 hour period. # # * `WEEKENDS` is a the set of all (day, shift) pairs on a weekend. This set will be used to implement worker preferences on weekend scheduling. # # These additional sets improve the readability of the model. # # $$ # \begin{align*} # \text{WORKERS} & = \{w_1, w_2, \ldots, w_1\} \text{ set of all workers} \\ # \text{DAYS} & = \{\text{Mon}, \text{Tues}, \ldots, \text{Sun}\} \text{ days of the week} \\ # \text{SHIFTS} & = \{\text{morning}, \text{evening}, \text{night}\} \text{ 8 hour daily shifts} \\ # \text{SLOTS} & = \text{DAYS} \times \text{SHIFTS} \text{ ordered set of all (day, shift) pairs}\\ # \text{BLOCKS} & \subset \text{SLOTS} \times \text{SLOTS} \times \text{SLOTS} \text{ all 24 blocks of consecutive slots} \\ # \text{WEEKENDS} & \subset \text{SLOTS} \text{ subset of slots corresponding to weekends} \\ # \end{align*} # $$ # + [markdown] id="h2Z4Ll__vSyF" tags=[] # ### Model parameters # # $$ # \begin{align*} # N & = \text{ number of workers} \\ # \text{WorkersRequired}_{d, s} & = \text{ number of workers required for each day, shift pair } (d, s) \\ # \end{align*} # $$ # + [markdown] id="h2Z4Ll__vSyF" tags=[] # ### Model decision variables # # $$ # \begin{align*} # \text{assign}_{w, d, s} & = \begin{cases}1\quad\text{if worker } w \text{ is assigned to day, shift pair } (d,s)\in \text{SLOTS} \\ 0\quad \text{otherwise} \end{cases} \\ # \text{weekend}_{w} & = \begin{cases}1\quad\text{if worker } w \text{ is assigned to a weekend day, shift pair } (d,s)\in\text{WEEKENDS} \\ 0\quad \text{otherwise} \end{cases} \\ # \text{needed}_{w} & = \begin{cases}1\quad\text{if worker } w \text{ is needed during the week} \\ 0\quad \text{otherwise} \end{cases} \\ # \end{align*} # $$ # + [markdown] id="h2Z4Ll__vSyF" tags=[] # ### Model constraints # # Assign workers to each shift to meet staffing requirement. # # $$\begin{align*} # \\ # \sum_{w\in\text{ WORKERS}} \text{assign}_{w, d, s} & \geq \text{WorkersRequired}_{d, s} & \forall (d, s) \in \text{SLOTS} \\ # \end{align*}$$ # # Assign no more than 40 hours per week to each worker. # # $$\begin{align*} # \\ # 8\sum_{d,s\in\text{ SLOTS}} \text{assign}_{w, d, s} & \leq 40 & \forall w \in \text{WORKERS} \\ # \\ # \end{align*}$$ # # Assign no more than one shift in each 24 hour period. # # $$\begin{align*} # \\ # \text{assign}_{w, d_1,s_1} + \text{assign}_{w, d_2, s_2} + \text{assign}_{w, d_3, s_3} & \leq 1 & \forall w \in \text{WORKERS} \\ & & \forall ((d_1, s_1), (d_2, s_2), (d_3, s_3))\in \text{BLOCKS} \\ # \\ # \end{align*}$$ # # Indicator if worker has been assigned any shift. # # $$\begin{align*} # \\ # \sum_{d,s\in\text{ SLOTS}} \text{assign}_{w,d,s} & \leq M_{\text{SLOTS}}\cdot\text{needed}_w & \forall w\in \text{WORKERS} \\ # \\ # \end{align*}$$ # # Indicator if worker has been assigned a weekend shift. # # $$\begin{align*} # \\ # \sum_{d,s\in\text{ WEEKENDS}} \text{assign}_{w,d,s} & \leq M_{\text{WEEKENDS}}\cdot\text{weekend}_w & \forall w\in \text{WORKERS} \\ # \\ # \end{align*}$$ # + [markdown] id="h2Z4Ll__vSyF" tags=[] # ### Model objective # # The model objective is to minimize the overall number of workers needed to fill the shift and work requirements while also attempting to meet worker preferences regarding weekend shift assignments. This is formulated here as an objective for minimizing a weighted sum of the number of workers needed to meet all shift requirements and the number of workers assigned to weekend shifts. The positive weight $\gamma$ determines the relative importance of these two measures of a desirable shift schedule. # # $$ # \begin{align*} # \min \left(\sum_{w\in\text{ WORKERS}} \text{needed}_w + \gamma\sum_{w\in\text{ WORKERS}} \text{weekend}_w) # \right)\end{align*} # $$ # + [markdown] id="JbhSiHcox4Ef" # ## Pyomo Modeling # + executionInfo={"elapsed": 470, "status": "ok", "timestamp": 1647782911667, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg_n8V7bVINy02QRuRgOoMo11Ri7NKU3OUKdC1bkQ=s64", "userId": "09038942003589296665"}, "user_tz": 240} id="4Vvt0azzH0J7" import pyomo.environ as pyo def shift_schedule(N=10, hours=40): """return a solved model assigning N workers to shifts""" m = pyo.ConcreteModel('workforce') # ordered set of avaiable workers m.WORKERS = pyo.Set(initialize=[f"W{i:02d}" for i in range(1, N+1)]) # ordered sets of days and shifts m.DAYS = pyo.Set(initialize=['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']) m.SHIFTS = pyo.Set(initialize=['morning', 'evening', 'night']) # ordered set of day, shift time slots m.SLOTS = pyo.Set(initialize = m.DAYS * m.SHIFTS) # ordered set of 24 hour time blocks m.BLOCKS = pyo.Set(initialize = [[m.SLOTS.at(i), m.SLOTS.at(i+1), m.SLOTS.at(i+2)] for i in range(1, len(m.SLOTS)-1)]) # ordered set of weekend shifts m.WEEKENDS = pyo.Set(initialize = m.SLOTS, filter = lambda m, day, shift: day in ['Sat', 'Sun']) # parameter of worker requirements @m.Param(m.SLOTS) def WorkersRequired(m, day, shift): if shift in ['night'] or day in ['Sun']: return 1 return 2 # max hours per week per worker m.Hours = pyo.Param(mutable=True, default=hours) # decision variable: assign[worker, day, shift] = 1 assigns worker to a time slot m.assign = pyo.Var(m.WORKERS, m.SLOTS, domain=pyo.Binary) # decision variables: weekend[worker] = 1 worker is assigned weekend shift m.weekend = pyo.Var(m.WORKERS, domain=pyo.Binary) # decision variable: needed[worker] = 1 m.needed = pyo.Var(m.WORKERS, domain=pyo.Binary) # assign a sufficient number of workers for each time slot @m.Constraint(m.SLOTS) def required_workers(m, day, shift): return m.WorkersRequired[day, shift] == sum(m.assign[worker, day, shift] for worker in m.WORKERS) # workers limited to forty hours per week assuming 8 hours per shift @m.Constraint(m.WORKERS) def forty_hour_limit(m, worker): return 8*sum(m.assign[worker, day, shift] for day, shift in m.SLOTS) <= m.Hours # workers are assigned no more than one time slot per 24 time block @m.Constraint(m.WORKERS, m.BLOCKS) def required_rest(m, worker, d1, s1, d2, s2, d3, s3): return m.assign[worker, d1, s1] + m.assign[worker, d2, s2] + m.assign[worker, d3, s3] <= 1 # determine if a worker is assigned to any shift @m.Constraint(m.WORKERS) def is_needed(m, worker): return sum(m.assign[worker, day, shift] for day, shift in m.SLOTS) <= len(m.SLOTS)*m.needed[worker] # determine if a worker is assigned to a weekend shift @m.Constraint(m.WORKERS) def is__weekend(m, worker): return 6*m.weekend[worker] >= sum(m.assign[worker, day, shift] for day, shift in m.WEEKENDS) # minimize a blended objective of needed workers and needed weekend workers @m.Objective(sense=pyo.minimize) def minimize_workers(m): return sum(i*m.needed[worker] + 0.1*i*m.weekend[worker] for i, worker in enumerate(m.WORKERS)) solver = pyo.SolverFactory('cbc') solver.solve(m) return m m = shift_schedule(10, 40) # + [markdown] id="s9A7zkSAhC8-" # ## Visualizing the Solution # # Scheduling applications generate a considerable amount of data to be used by the participants. The following cells demonstrate the preparation of charts and reports that can be used to communicate scheduling information to the store management and shift workers. # + colab={"base_uri": "https://localhost:8080/", "height": 295} executionInfo={"elapsed": 739, "status": "ok", "timestamp": 1647784587745, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg_n8V7bVINy02QRuRgOoMo11Ri7NKU3OUKdC1bkQ=s64", "userId": "09038942003589296665"}, "user_tz": 240} id="Gk8SAHcPPbXo" outputId="2f249296-4170-4aaf-eae8-b03d5c13d8c8" import matplotlib.pyplot as plt from matplotlib.patches import Rectangle def visualize(m): bw = 1.0 workers = [worker for worker in m.WORKERS] fig, ax = plt.subplots(1, 1, figsize=(12, 1 + 0.3*len(m.WORKERS))) ax.set_title('Shift Schedule') # x axis styling ax.set_xlim(0, len(m.SLOTS)) colors = ['teal', 'gold', 'magenta'] for i in range(len(m.SLOTS) + 1): ax.axvline(i, lw=0.3) ax.fill_between([i, i+1], [0]*2, [len(m.WORKERS)]*2, alpha=0.1, color=colors[i%3]) for i in range(len(m.DAYS) + 1): ax.axvline(3*i, lw=1) ax.set_xticks([3*i + 1.5 for i in range(len(m.DAYS))]) ax.set_xticklabels(m.DAYS) ax.set_xlabel('Shift') # y axis styling ax.set_ylim(0, len(m.WORKERS)) for j in range(len(m.WORKERS) + 1): ax.axhline(j, lw=0.3) ax.set_yticks([j + 0.5 for j in range(len(m.WORKERS))]) ax.set_yticklabels(workers) ax.set_ylabel('Worker') # show shift assignments for i, slot in enumerate(m.SLOTS): day, shift = slot for j, worker in enumerate(m.WORKERS): if round(m.assign[worker, day, shift]()): ax.add_patch(Rectangle((i, j + (1-bw)/2), 1, bw, edgecolor='b')) ax.text(i + 1/2, j + 1/2, worker, ha='center', va='center', color='w') # display needed and weekend data for j, worker in enumerate(m.WORKERS): if not m.needed[worker](): ax.fill_between([0, len(m.SLOTS)], [j, j], [j+1, j+1], color='k', alpha=0.3) if m.needed[worker]() and not m.weekend[worker](): ax.fill_between([15, len(m.SLOTS)], [j, j], [j+1, j+1], color='k', alpha=0.3) visualize(m) # - # ## Implementing the Schedule with Reports # # Optimal planning models can generate large amounts of data that need to be summarized and communicated to individuals for implementation. # # ### Creating a master schedule with categorical data # # The following cell creates a pandas DataFrame comprising all active assignments from the solved model. The data consists of all (worker, day, shift) tuples for which the Boolean decision variable m.assign equals one. # # The data is categorical consisting of a unique id for each worker, a day of the week, or the name of a shift. Each of the categories has a natural ordering that should be used in creating reports. This is implemented using the `CategoricalDtype` class. # + executionInfo={"elapsed": 470, "status": "ok", "timestamp": 1647782911667, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg_n8V7bVINy02QRuRgOoMo11Ri7NKU3OUKdC1bkQ=s64", "userId": "09038942003589296665"}, "user_tz": 240} id="4Vvt0azzH0J7" import pandas as pd schedule = pd.DataFrame([[w, d, s] for w in m.WORKERS for d, s in m.SLOTS if m.assign[w, d, s]()], columns=["worker", "day", "shift"]) # create and assign a worker category type worker_type = pd.CategoricalDtype(categories=m.WORKERS, ordered=True) schedule["worker"] = schedule["worker"].astype(worker_type) # create and assign a day category type day_type = pd.CategoricalDtype(categories=m.DAYS, ordered=True) schedule["day"] = schedule["day"].astype(day_type) # create and assign a shift category type shift_type = pd.CategoricalDtype(categories=m.SHIFTS, ordered=True) schedule["shift"] = schedule["shift"].astype(shift_type) # demonstrate sorting and display of the master schedule schedule.sort_values(by=["day", "shift", "worker"]) # - # ### Reports for workers # # Each worker should receive a report detailing their shift assignments. The reports are created by sorting the master schedule by worker, day, and shift, then grouping by worker. # + executionInfo={"elapsed": 470, "status": "ok", "timestamp": 1647782911667, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg_n8V7bVINy02QRuRgOoMo11Ri7NKU3OUKdC1bkQ=s64", "userId": "09038942003589296665"}, "user_tz": 240} id="4Vvt0azzH0J7" # sort schedule by worker schedule = schedule.sort_values(by=["worker", "day", "shift"]) # print worker schedules for worker, worker_schedule in schedule.groupby('worker'): print(f"\n Work schedule for {worker}") if len(worker_schedule) > 0: for s in worker_schedule.to_string(index=False).split('\n'): print(s) else: print(" no assigned shifts") # - # ### Reports for store managers # # The store managers need reports listing workers by assigned day and shift. # + executionInfo={"elapsed": 470, "status": "ok", "timestamp": 1647782911667, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg_n8V7bVINy02QRuRgOoMo11Ri7NKU3OUKdC1bkQ=s64", "userId": "09038942003589296665"}, "user_tz": 240} id="4Vvt0azzH0J7" # sort by day, shift, worker schedule = schedule.sort_values(by=["day", "shift", "worker"]) for day, day_schedule in schedule.groupby(["day"]): print(f"\nShift schedule for {day}") for shift, shift_schedule in day_schedule.groupby(["shift"]): print(f" {shift} shift: ", end="") print(', '.join([worker for worker in shift_schedule["worker"].values])) # + [markdown] id="s4nX2GyHnyX6" # ## Suggested Exercises # # 1. How many workers will be required to operate the food store if all workers are limited to four shifts per week, i.e., 32 hours? How about 3 shifts or 24 hours per week? # # 2. Add a second class of workers called "manager". There needs to be one manager on duty for every shift, that morning shifts require 3 staff on duty, evening shifts 2, # and night shifts 1. # # 3. Add a third class of workers called "part_time". Part time workers are limited to no more than 30 hours per week. # # 4. Modify the problem formulation and objective to spread the shifts out amongst all workers, attempting to equalize the total number of assigned shifts, and similar numbers of day, evening, night, and weekend shifts. # # 5. Find the minimum cost staffing plan assuming managers cost 30 euros per hour + 100 euros per week in fixed benefits, regular workers cost 20 euros per hour plus 80 euros per week in fixed benefits, and part time workers cosst 15 euros per week with no benefits. # + id="hpk1I82wprmx"
_build/html/_sources/notebooks/03/Shift-Scheduling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Homework # #### Write a algorithm to sort the data from lowest to highest # a = [1,5,3,5,7,98,6,45,4,3,5,6,7,443,213] # I will provide a naive solution here, but you can be creative..! # If you cannot figure out a new method, just try to improve on my code...! # You get one mark as long as you have tried...! # Don't use python built-in implementation, otherwise will be boring sorted(a) def is_sorted(l): ''' This function checks whether the given list is already sorted ''' prev = None # set the previous element to None before enter the for loop for element in l: if prev: if element<prev: return # return None if list not sorted completely, to continue sorting prev = element # store the element to prev else: prev = element # store the element to prev return True def sort(l): # this perform pairwise sort, loop until the list is fully sorted while not is_sorted(l):# keep sorting until fully sorted prev = None # set the previous element to None before enter the for loop b = [] for element in l: if prev: if element < prev: b[-1] = element b.append(prev) # prev = prev # Normally need to re-assign, but this step here is redundant. else: b.append(element) prev = element else: b.append(element) prev = element l = b # assign the sorted back to the list for next round of sorting return l sort(a)
week1/Homework.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from gs_quant.session import Environment, GsSession from gs_quant.common import PayReceive, Currency from gs_quant.instrument import IRSwaption from gs_quant.risk import CurveScenario, MarketDataPattern import matplotlib.pyplot as plt import pandas as pd # external users should substitute their client id and secret; please skip this step if using internal jupyterhub GsSession.use(Environment.PROD, client_id=None, client_secret=None, scopes=('run_analytics',)) swaption = IRSwaption(PayReceive.Receive, '5y', Currency.USD, expiration_date='13m', strike='atm') swaption.resolve() original_price = swaption.price() # retrieve the market data our instrument is sensitive to. market_data = swaption.market().market_data_dict print('Base price: {:,.2f}'.format(original_price)) # + # Pivot point is the tenor at which curve shift =0 and influences the type and shape of curve shift # Price the swaption under a bear flattener scenario of -10bp bear_flattener_scenario = CurveScenario(market_data_pattern=MarketDataPattern('IR', 'USD'), curve_shift=-10, tenor_start=15, tenor_end=25, pivot_point=25) with bear_flattener_scenario: swaption_bear_flattener = swaption.price() market_data_bear_flattener = swaption.market().market_data_dict print('Price under bear flattener curve shift: {:,.2f}'.format(swaption_bear_flattener)) # - # Compare swap rate market data coordinates before and after curve scenario shock market_data_df = pd.DataFrame([{mkt_data.coordinate: mkt_data.value * 1e4 for mkt_data in market_data if (mkt_data.coordinate.mkt_type=="IR" and mkt_data.coordinate.mkt_class=="SWAP")}, {mkt_data.coordinate: mkt_data.value * 1e4 for mkt_data in market_data_bear_flattener if (mkt_data.coordinate.mkt_type=="IR" and mkt_data.coordinate.mkt_class=="SWAP")}], index=['Values', 'Shocked values']).transpose() market_data_df # + # Plotting swap rate market data before and after curve scenario shock swap_curve = pd.DataFrame.from_dict({int(''.join(list(filter(str.isdigit, str(v))))): market_data_df.loc[v] for v in market_data_df.index}, orient='index') swap_curve['Shock'] = swap_curve['Shocked values'] - swap_curve['Values'] swap_curve.plot(figsize=(12, 8), title='USD Swap Curve Before and After {}bp Bear flattening Shock'.format(bear_flattener_scenario.curve_shift)) plt.xlabel('Tenor (years)') plt.ylabel('bp')
gs_quant/documentation/02_pricing_and_risk/01_scenarios_and_contexts/examples/04_curve_shock/010404_bear_flattener_curve_shock.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: place-de-marche # language: python # name: place-de-marche # --- # Place de marché # ============== # # ![logo](../reports/figures/logo.png) # # # ### Votre mission # Votre mission est de **réaliser une première étude de faisabilité d'un moteur de classification** d'articles basé sur une image et une description pour l'automatisation de l'attribution de la catégorie de l'article. # # Pour ce faire, vous allez **évaluer la possibilité d'extraire des données depuis l'API Amazon** en **prenant connaissance de la documentation** et en **écrivant la requête** qui vous permettrait d'extraire des données supplémentaires. Vous vous assurerez ainsi que vous pourrez bien disposer de plus de données et diversifier les sources de données pour éviter les biais pour votre moteur de classification. # # Ensuite, vous **analyserez le jeu de données** déjà constitué en **réalisant un prétraitement** des images et des descriptions des produits, une **réduction de dimension**, puis un **clustering**. Les résultats du clustering seront présentés sous la forme d’une représentation en deux dimensions à déterminer, qui ’illustrera le fait que les caractéristiques extraites permettent de regrouper des produits de même catégorie. # # La représentation graphique vous aidera à convaincre Linda que cette approche de modélisation permettra bien de regrouper des produits de même catégorie. # # ### Contraintes # # Linda vous a communiqué les contraintes suivantes : # # * Limiter le nombre d’articles pris par l’API (par exemple : 1000 lignes) et filtrer sur un unique type d’article (par exemple un type d’article peu présent dans votre échantillon de données actuelles). # * Afin d’extraire les features, mettre en œuvre a minima un algorithme de type SIFT / ORB / SURF. # * Un algorithme de type CNN Transfer Learning peut éventuellement être utilisé en complément, s’il peut apporter un éclairage supplémentaire à la démonstration. # + import os import random import numpy as np import cv2 from PIL import Image, ImageOps, ImageFilter import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import MinMaxScaler from sklearn.manifold import TSNE from sklearn.decomposition import PCA from sklearn.cluster import DBSCAN, KMeans, MiniBatchKMeans from sklearn import metrics import plotly.express as px sns.set(font_scale=1.6) Image.MAX_IMAGE_PIXELS = 93680329 # - def histogram(image, figsize=(12, 8), kde=False): image = np.array(image) if len(image.shape) > 2: # RGB mode fig, axes = plt.subplots(3, 1, figsize=figsize) for channel, color, ax in zip(range(3), ['r', 'g', 'b'], axes): sns.distplot(image[:, :, channel].flatten(), kde=kde, color=color, ax=ax) else: # Gray sns.distplot(image.flatten(), kde=kde) # ### Chargement des descriptions for dirname, _, filenames in os.walk('../data/raw/'): if len(filenames) == 1: df = pd.read_csv(os.path.join(dirname, filenames[0])) df['path'] = df['image'].apply(lambda x: os.path.join('../data/raw/Images/', x)) # + col_to_drop = [ 'uniq_id', 'crawl_timestamp', 'product_url', 'pid', 'discounted_price', 'is_FK_Advantage_product', 'product_rating', 'overall_rating', 'product_specifications', 'brand', ] df.drop(columns=col_to_drop, inplace=True) # - def extract_level(tree_str, level=-1, strict=False): """return a specific level from product_category_tree. tips: specify a negative index to access latest part of the tree. """ tree_str = eval(tree_str)[0] levels = tree_str.split('>>') levels = list(map(lambda x: x.strip(), levels)) if strict: return levels[level] else: try: return levels[level] except IndexError: return None # On récupère le premier niveau de l'arbre des catégories comme label df['label'] = df['product_category_tree'].apply(extract_level, level=1) df['label'] # ## ORB def scale_down(image, factor=5): width, height = image.size target_width, target_height = width // factor, height // factor return image.resize((target_width, target_height)) images = [scale_down(Image.open(x), factor=5) for x in df['path']] # + # size = 10 # f, axes = plt.subplots(size, size, figsize=(12, 12)) # for ax, im in zip(axes.flatten(), random.sample(images, size ** 2)): # ax.imshow(im, cmap='gray', aspect='auto') # ax.set_xticks([]) # ax.set_yticks([]) # to hide tick values on X and Y axis # + extractor = cv2.ORB_create() def features(image, extractor): assert type(image) == np.ndarray keypoints, descriptors = extractor.detectAndCompute(image, None) return keypoints, descriptors # - images[1] histogram(images[1], kde=True) kp, desc = features(np.array(images[1]), extractor) Image.fromarray(cv2.drawKeypoints(np.array(images[1]), kp, None)) ImageOps.equalize(images[1]) histogram(ImageOps.equalize(images[1]), kde=True) kp, desc = features(np.array(ImageOps.equalize(images[1])), extractor) Image.fromarray(cv2.drawKeypoints(np.array(images[1]), kp, None)) kp, desc = features(np.array(images[1].filter(ImageFilter.BoxBlur(1))), extractor) Image.fromarray(cv2.drawKeypoints(np.array(images[1]), kp, None)) def preprocess(image): image = ImageOps.equalize(image) image = image.filter(ImageFilter.BoxBlur(1)) return image preprocess(images[30]) kp, desc = features(np.array(preprocess(images[1])), extractor) Image.fromarray(cv2.drawKeypoints(np.array(preprocess(images[1])), kp, None)) # + index = 5 kp1, desc1 = features(np.array(images[index]), extractor) Image.fromarray(cv2.drawKeypoints(np.array(images[index]), kp1, None)) # + index = 11 kp2, desc2 = features(np.array(preprocess(images[index])), extractor) Image.fromarray(cv2.drawKeypoints(np.array(preprocess(images[index])), kp2, None)) # + bf = cv2.BFMatcher_create(cv2.NORM_HAMMING, crossCheck=True) matches = bf.match(desc1, desc2) marches = sorted(matches, key=lambda x: x.distance) Image.fromarray(cv2.drawMatches(np.array(images[5]), kp1, np.array(images[11]), kp2, matches[:10], flags=2, outImg=None)) # - # ### Premier essai : peut-on séparer les montres des tasses à café? coffee_mugs = df[df['label'] == 'Coffee Mugs'] coffee_mugs = [scale_down(Image.open(x)) for x in coffee_mugs['path'].to_list()] watches = df[df['label'] == 'Wrist Watches'] watches = [scale_down(Image.open(x)) for x in watches['path'].to_list()] coffee_mugs[0] watches[0] kp1, desc1 = features(np.array(watches[0]), extractor) Image.fromarray(cv2.drawKeypoints(np.array(watches[0]), kp1, None)) descriptor_list = list() for im in watches + coffee_mugs: im = im.convert('L') kp, desc = features(np.array(im), extractor) if (desc is not None): descriptor_list.append(desc) len(descriptor_list) len(watches) descriptor_list = np.concatenate(descriptor_list) descriptor_list.shape descriptor_list kmeans = MiniBatchKMeans(n_clusters=800, init_size=3000) kmeans.fit(descriptor_list) # + pca = PCA(n_components=3) pca_res = pca.fit_transform(descriptor_list) pca_res = pd.DataFrame(pca_res) pca_res['kmeans'] = kmeans.labels_ fig, ax = plt.subplots(1, figsize=(12, 8)) # px.scatter_3d(data_frame=pca_res, x=0, y=1, z=2, color='kmeans') sns.scatterplot(data=pca_res, x=0, y=1, hue='kmeans', ax=ax) ax.legend_.remove() plt.show() # - hist, bin_edges = np.histogram(descriptor_list, bins=800) len(hist) plt.bar(bin_edges[:-1], hist) # + from collections import Counter def build_histogram(descriptor, kmeans): labels = kmeans.predict(descriptor) # centers = kmeans.cluster_centers_ return Counter(labels) # - preprocessed_images = [] for image in watches + coffee_mugs: image = image.convert('L') key, desc = features(np.array(image), extractor) if (desc is not None): histogram = build_histogram(desc, kmeans) preprocessed_images.append(histogram) bofvw = pd.DataFrame.from_records(preprocessed_images) bofvw.fillna(0, inplace=True) bofvw pca = PCA(n_components=20) pca_50 = pca.fit_transform(bofvw) pca_50.shape tsne = TSNE(n_components=2) tsne_res = tsne.fit_transform(bofvw) # + tsne_res = pd.DataFrame(tsne_res) tsne_res['label']= 'watch' tsne_res.loc[149:, 'label'] = 'Mugs' fig, ax = plt.subplots(1, figsize=(12, 8)) # px.scatter_3d(data_frame=pca_res, x=0, y=1, z=2, color='kmeans') sns.scatterplot(data=tsne_res, x=0, y=1, hue='label', ax=ax) # ax.legend_.remove() plt.show() # - # **warning** A ce stade rien ne va plus... # # Les features extraites sont communes aux deux catégories.... # D'où vient l'erreur? # # * Algorithme très bon mais pas adapté à ce genre de tâches: # * Bon pour créer des photos panoramiques # * Bon pour détecter le même objet dans des conditions différentes items = dict() for dirname, _, filenames in os.walk('../data/external/example_lafayette/'): if filenames: key = dirname.split('/')[-1] key = key.replace('\\', '-') items[key] = [Image.open(os.path.join(dirname, x)) for x in filenames] items[key] = [scale_down(im, factor=2) for im in items[key]] items['watches-1'][1] kp1, desc1 = features(np.array(items['watches-1'][0]), extractor) kp2, desc2 = features(np.array(items['watches-1'][1]), extractor) Image.fromarray(cv2.drawKeypoints(np.array(items['watches-1'][0]), kp1, None)) Image.fromarray(cv2.drawKeypoints(np.array(items['watches-1'][1]), kp2, None)) # + bf = cv2.BFMatcher_create(cv2.NORM_HAMMING, crossCheck=True) matches = bf.match(desc1, desc2) marches = sorted(matches, key=lambda x: x.distance) Image.fromarray(cv2.drawMatches(np.array(items['watches-1'][0]), kp1, np.array(items['watches-1'][1]), kp2, matches[:30], flags=2, outImg=None)) # + # FLANN parameters FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(np.float32(desc1), np.float32(desc2), k=3) # Need to draw only good matches, so create a mask matchesMask = [[0,0] for i in range(len(matches))] # ratio test as per Lowe's paper for i, x in enumerate(matches): m, n, o = x if m.distance < 0.7*n.distance: matchesMask[i]=[1,0] draw_params = dict(matchColor = (0,255,0), singlePointColor = (255,0,0), matchesMask = matchesMask, flags=0) Image.fromarray(cv2.drawMatchesKnn(np.array(items['watches-1'][0]), kp1, np.array(items['watches-1'][1]), kp2, matches, None, **draw_params)) # - kp1, desc1 = features(np.array(watches[0]), extractor) Image.fromarray(cv2.drawKeypoints(np.array(watches[0]), kp1, None)) kp2, desc2 = features(np.array(watches[1]), extractor) Image.fromarray(cv2.drawKeypoints(np.array(watches[1]), kp2, None)) # + # FLANN parameters FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(np.float32(desc1), np.float32(desc2), k=3) # Need to draw only good matches, so create a mask matchesMask = [[0,0] for i in range(len(matches))] # ratio test as per Lowe's paper for i, x in enumerate(matches): m, n, o = x if m.distance < 0.7*n.distance: matchesMask[i]=[1,0] draw_params = dict(matchColor = (0,255,0), singlePointColor = (255,0,0), matchesMask = matchesMask, flags=0) Image.fromarray(cv2.drawMatchesKnn(np.array(watches[0]), kp1, np.array(watches[1]), kp2, matches, None, **draw_params)) # - kp1, desc1 = features(np.array(watches[0]), extractor) Image.fromarray(cv2.drawKeypoints(np.array(watches[0]), kp1, None)) kp2, desc2 = features(np.array(coffee_mugs[0]), extractor) Image.fromarray(cv2.drawKeypoints(np.array(coffee_mugs[0]), kp2, None)) # + # FLANN parameters FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(np.float32(desc1), np.float32(desc2), k=3) # Need to draw only good matches, so create a mask matchesMask = [[0,0] for i in range(len(matches))] # ratio test as per Lowe's paper for i, x in enumerate(matches): m, n, o = x if m.distance < 0.7*n.distance: matchesMask[i]=[1,0] draw_params = dict(matchColor = (0,255,0), singlePointColor = (255,0,0), matchesMask = matchesMask, flags=0) Image.fromarray(cv2.drawMatchesKnn(np.array(watches[0]), kp1, np.array(coffee_mugs[0]), kp2, matches, None, **draw_params)) # - kp1, desc1 = features(np.array(coffee_mugs[0]), extractor) Image.fromarray(cv2.drawKeypoints(np.array(coffee_mugs[0]), kp1, None)) kp2, desc2 = features(np.array(coffee_mugs[1]), extractor) Image.fromarray(cv2.drawKeypoints(np.array(coffee_mugs[1]), kp2, None)) # + # FLANN parameters FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(np.float32(desc1), np.float32(desc2), k=3) # Need to draw only good matches, so create a mask matchesMask = [[0,0] for i in range(len(matches))] # ratio test as per Lowe's paper for i, x in enumerate(matches): m, n, o = x if m.distance < 0.7*n.distance: matchesMask[i]=[1,0] draw_params = dict(matchColor = (0,255,0), singlePointColor = (255,0,0), matchesMask = matchesMask, flags=0) Image.fromarray(cv2.drawMatchesKnn(np.array(coffee_mugs[0]), kp1, np.array(coffee_mugs[1]), kp2, matches, None, **draw_params)) # - Image.fromarray(np.array(coffee_mugs[1]) * 3) Image.fromarray(np.array(coffee_mugs[0]) * 3) np.array(watches[0]).mean() np.array(coffee_mugs[0]).mean() np.array(images[1]).mean() (np.array(images[1]) * 3).mean() Image.fromarray(np.array(images[1]) * 3)
notebooks/2.1-tg-image-processing-orb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + code_folding=[23] from sklearn.svm import LinearSVR from sklearn.linear_model import Lasso, HuberRegressor, BayesianRidge, Ridge from sklearn.model_selection import GroupKFold from sklearn.metrics import mean_absolute_error from logging import getLogger, INFO, StreamHandler, FileHandler, Formatter from IPython import display from glob import glob from fastprogress import progress_bar import lightgbm as lgb import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random import seaborn as sns import time import warnings warnings.filterwarnings("ignore") def seed_everything(seed=7777): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) # torch.manual_seed(seed) # torch.cuda.manual_seed(seed) #torch.backends.cudnn.deterministic = True # tf.random.set_seed(seed) SEED = 7777 seed_everything(SEED) # + def get_logger(filename='log'): logger = getLogger(__name__) logger.setLevel(INFO) handler1 = StreamHandler() handler1.setFormatter(Formatter("%(message)s")) handler2 = FileHandler(filename=f"{filename}.log") handler2.setFormatter(Formatter("%(message)s")) logger.addHandler(handler1) logger.addHandler(handler2) return logger logger = get_logger('after_competition') # - base_dir = './input/osic-pulmonary-fibrosis-progression/' # # preprocess # ## construct train df # + code_folding=[5] train_df = pd.read_csv(base_dir + 'train.csv') output = pd.DataFrame() gb = train_df.groupby('Patient') tqdm = progress_bar(gb, total=len(gb)) for ID, usr_df in tqdm: usr_output = pd.DataFrame() for week, tmp_df in usr_df.groupby('Weeks'): rename_cols = { 'Weeks': 'base_Week', 'FVC': 'base_FVC', 'Percent': 'base_Percent', } tmp_df = tmp_df.rename(columns=rename_cols) drop_cols = ['Age', 'Sex', 'SmokingStatus', 'Percent'] _usr_op = usr_df.drop(columns=drop_cols) _usr_op = _usr_op.rename(columns={'Weeks': 'target_week'}) _usr_op = _usr_op.merge(tmp_df, on='Patient') _usr_op['weeks_passed'] = _usr_op['target_week'] - _usr_op['base_Week'] usr_output = pd.concat([usr_output, _usr_op]) output = pd.concat([output, usr_output]) #train = output[output['Week_passed'] != 0].reset_index(drop=True) train_df = output.reset_index(drop=True) train_df['phase'] = 'train' print(train_df.shape) #train_df = train_df[train_df.weeks_passed > 0] train_df # - # ## construct test input df # + sub_df = pd.read_csv(base_dir + 'sample_submission.csv') sub_df['Patient'] = sub_df.Patient_Week.apply(lambda x: x.split('_')[0]) sub_df['target_week'] = sub_df.Patient_Week.apply(lambda x: x.split('_')[1]).astype(int) test_df = pd.read_csv(base_dir + 'test.csv') test_df = test_df.rename(columns=rename_cols) test_df = sub_df.drop(columns=['FVC', 'Confidence']).merge(test_df, on='Patient') test_df['weeks_passed'] = test_df['target_week'] - test_df['base_Week'] test_df['phase'] = 'test' print(test_df.shape) test_df.sample(5) sub_df = pd.read_csv(base_dir + 'sample_submission.csv') sub_df.sample(10) # - all_df = pd.concat([train_df, test_df], axis=0) all_df # ## feature # + all_df['percent_reciprocal'] = 1 / all_df.base_Percent all_df['percent_ratio'] = all_df.base_FVC / all_df.base_Percent def calculate_height(row): height = 0 if row['Sex'] == 'Male' or 'Female': height = (((row['base_FVC'] / 933.33) + 0.026 * row['Age'] + 2.89) / 0.0443) return int(height) all_df['Height'] = all_df.apply(calculate_height, axis=1) def FEV1(row): FEV = 0 if row['Sex'] == 'Male': FEV = (0.84 * row['base_FVC'] - 0.23) else: FEV = (0.84 * row['base_FVC'] - 0.36) return FEV all_df['FEV'] = all_df.apply(FEV1, axis=1) all_df['FEV_ratio'] = all_df.FEV / all_df.base_FVC # onehot all_df = pd.concat([ all_df, pd.get_dummies(all_df.Sex), pd.get_dummies(all_df.SmokingStatus) ], axis=1) all_df # - # ## numerical # + from sklearn.preprocessing import RobustScaler, StandardScaler numerical_col = [ 'target_week', 'base_Week', 'base_FVC', 'base_Percent', 'Age', 'weeks_passed', 'Height', 'FEV', 'percent_reciprocal', 'FEV_ratio', 'percent_ratio', ] t = RobustScaler().fit(all_df[all_df.phase == 'train'][numerical_col]) #t = StandardScaler().fit(all_df[all_df.phase == 'train'][numerical_col]) all_df[numerical_col] = t.transform(all_df[numerical_col]) all_df # + feature_columns = [ 'target_week', 'base_Week', 'base_FVC', 'base_Percent', 'Age', 'weeks_passed', 'Height', 'FEV', 'percent_reciprocal', 'Female', 'Male', 'Currently smokes', 'Ex-smoker', 'Never smoked', #'Sex', #'SmokingStatus', 'FEV_ratio', 'percent_ratio', ] train_df = all_df.loc[all_df.phase == 'train'].reset_index(drop=True) test_df = all_df.loc[all_df.phase == 'test'] train_df.reset_index(inplace=True) train_df[feature_columns] # - # # LGB # + code_folding=[] def get_scored_index(df): scored_index = [] gb = df.groupby(['Patient', 'base_Week']) for g in gb: scored_index.append(g[1].target_week.index.values[-3:]) return np.array(scored_index).flatten() def laplace_log_likelihood(preds, ys, confidence, mean=True, index=None): std_clip = np.maximum(confidence, 70) delta = np.minimum(np.abs(preds - ys), 1000) metric = -2**0.5 * delta / std_clip - np.log(2 ** 0.5 * std_clip) if np.any(index): metric = metric[index] if mean: return metric.mean() return metric # + code_folding=[3, 87] scored_index = get_scored_index(train_df) def run_single(models, train_df, test_df, fold_idx, feature_columns, target='FVC', fold_num=0, submit=False): # index trn_idx = fold_idx[0] val_idx = fold_idx[1] # shuffle #trn_idx = np.random.permutation(trn_idx) # data x_trn = train_df[feature_columns].iloc[trn_idx].values y_trn = train_df['FVC'].iloc[trn_idx].values x_val = train_df[feature_columns].iloc[val_idx].values y_val = train_df['FVC'].iloc[val_idx].values # fit for m in models: m.fit(x_trn, y_trn) # test test_preds = [m.predict(test_df[feature_columns]) for m in models] predict = test_preds[0] confidence = test_preds[1] - test_preds[2] #submit if submit: return predict, confidence # predict trn_preds = [m.predict(x_trn) for m in models] trn_conf = trn_preds[1] - trn_preds[2] score = laplace_log_likelihood(trn_preds[0], y_trn, trn_conf, index=np.where( np.isin(trn_idx, scored_index))[0]) val_preds = [m.predict(x_val) for m in models] val_conf = val_preds[1] - val_preds[2] val_score = laplace_log_likelihood(val_preds[0], y_val, val_conf, index=np.where( np.isin(val_idx, scored_index))[0]) # oof oof = np.zeros(len(train_df)) oof_conf = np.zeros(len(train_df)) oof[val_idx] = val_preds[0] oof_conf[val_idx] = val_conf # feature importance try: feature_importance = models[0].feature_importances_ except AttributeError: try: feature_importance = models[0].coef_ except AttributeError: feature_importance = 0 #show_feature_importance(feature_importance, feature_columns) return oof, oof_conf, predict, confidence, feature_importance, val_score def show_feature_importance(feature_importance, feature_columns, name=None, save=True): plt.figure(figsize=(6, 4)) sns.barplot(x=feature_importance, y=feature_columns) plt.suptitle(f'{name}') if save: name = time.ctime().replace(' ', '_').replace(':', '-') plt.savefig(f'feature_importance_{name}.png') plt.show() def run_kfolds(models, train_df, test_df, feature_columns, target='FVC', folds=5, submit=False): oof = np.zeros(len(train_df)) oof_conf = np.zeros(len(train_df)) preds = np.zeros(len(test_df)) preds_conf = np.zeros(len(test_df)) feature_importance = 0 cv_scores = [] gkf = GroupKFold(folds) for n, fold_idx in enumerate(gkf.split(train_df, groups=train_df.Patient)): oof_, oof_conf_, pred_, conf_, feat_import_, score = run_single( models, train_df, test_df, fold_idx, feature_columns, target=target, fold_num=n, submit=submit) oof += oof_ oof_conf += oof_conf_ preds += pred_ / folds preds_conf += conf_ / folds feature_importance += feat_import_ / folds cv_scores.append(score) feature_importance = np.array(feature_importance) / folds show_feature_importance(feature_importance, feature_columns, models[0].__class__) # log logger.info( f'==================={models[0].__class__} fold========================' ) logger.info(f'CV mae: {mean_absolute_error(train_df[target], oof): .6f}') cv_score = laplace_log_likelihood(oof, train_df[target], oof_conf, index=scored_index) logger.info( f'CV score: {np.mean(cv_score): .6f} ± {np.std(cv_scores): .6f}') logger.info(f"\n") return oof, oof_conf, preds, preds_conf # - # ## state 1 # + code_folding=[0] # state 1 param = { 'num_leaves': 5, 'bagging_fraction': 0.9, 'bagging_freq': 4, 'feature_fraction': 0.8, 'max_depth': 4, 'metric': 'rmse', 'min_child_samples': 31, 'min_child_weight': 0.001, 'n_estimators': 70, 'reg_alpha': 2.1, 'reg_lambda': 1.4, 'force_col_wise':True } alpha = 0.75 model_list = [ BayesianRidge(), # -6.707331 HuberRegressor(max_iter=200), # -6.696623 Lasso(), # -6.693470 lgb.LGBMRegressor(objective='regression_l1', # -6.737370 alpha=0.5, **param), LinearSVR(C=50), # -6.697384 Ridge(alpha=0.5), # -6.707246 ] oof_df = pd.DataFrame() pred_df = pd.DataFrame() for idx, m in enumerate(model_list): models = [ m, lgb.LGBMRegressor(objective='quantile', alpha=alpha, **param), lgb.LGBMRegressor(objective='quantile', alpha=1 - alpha, **param) ] oof, oof_conf, predict, confidence = run_kfolds( models, train_df, test_df, feature_columns, target='FVC', folds=10, submit=False, ) oof_df[idx] = oof pred_df[idx] = predict # - score = laplace_log_likelihood(oof_df.mean(axis=1), train_df.FVC, oof_conf, index=scored_index) mae = mean_absolute_error(oof_df[-3:].mean(axis=1), train_df.FVC[-3:]) logger.info(f'state 1: cv score: {score: .6f}, cv mae: {mae:.6f}\n') # ## stack 2 # + # state 2 feature # normalize oof_df['mean'] = oof_df.mean(axis=1) pred_df['mean'] = pred_df.mean(axis=1) t2 = RobustScaler().fit(oof_df) oof_df = pd.DataFrame(t2.transform(oof_df)) pred_df = pd.DataFrame(t2.transform(pred_df)) # create state dataframe train_df2 = train_df.copy() test_df2 = test_df.copy() model_name = [f'model{i}' for i in range(len(model_list))] model_name.append('mean') train_df2[model_name] = oof_df test_df2[model_name] = pred_df feature_columns2 = feature_columns.copy() for i in range(len(model_list)): feature_columns2.append(f'model{i}') feature_columns2.append('mean') # + code_folding=[] # state 2 param = { 'num_leaves': 5, 'bagging_fraction': 0.9, 'bagging_freq': 4, 'feature_fraction': 0.8, 'max_depth': 4, 'metric': 'rmse', 'min_child_samples': 31, 'min_child_weight': 0.001, 'n_estimators': 70, 'reg_alpha': 2.1, 'reg_lambda': 1.4, } alpha = 0.75 model_list = [ #HuberRegressor(max_iter=200), # -6.684595 #Lasso(), # -6.707719 LinearSVR(C=50), # -6.665948 #Ridge(alpha=0.5), # -6.709253 ] oof_df = pd.DataFrame() pred_df = pd.DataFrame() for idx, m in enumerate(model_list): models = [ m, lgb.LGBMRegressor(objective='quantile', alpha=alpha, **param), lgb.LGBMRegressor(objective='quantile', alpha=1 - alpha, **param) ] oof, oof_conf, predict, confidence = run_kfolds( models, train_df2, test_df2, #model_name, feature_columns2, target='FVC', folds=10, submit=False, ) oof_df[idx] = oof pred_df[idx] = predict # - score = laplace_log_likelihood(oof_df.mean(axis=1), train_df.FVC, oof_conf, index=scored_index) mae = mean_absolute_error(oof_df[-3:].mean(axis=1), train_df.FVC[-3:]) logger.info(f'state 2: cv score: {score: .6f}, cv mae: {mae:.6f}') # # result oof_preds = oof_df.mean(axis=1) pred_df = pred_df.mean(axis=1) # + plt.figure(figsize=(5, 5)) plt.scatter(train_df['FVC'], oof_preds) plt.plot(range(800, 6300), range(800, 6300), color='C1') plt.ylabel('predictions') plt.xlabel('FVC (labels)') plt.show() # - delta = oof_preds - train_df['FVC'] plt.hist(delta, bins=50) plt.show() score = laplace_log_likelihood(oof_df.mean(axis=1), train_df.FVC, oof_conf, mean=False) train_df['score'] = score train_df['predict'] = oof_df.mean(axis=1) train_df['confidence'] = oof_conf bad_df = train_df.iloc[scored_index][score < -6.9].copy() bad_df # + bad_df[numerical_col] = t.inverse_transform(bad_df[numerical_col]) bad_df['preds_diff'] = bad_df['predict'] - bad_df['FVC'] bad_df[[ 'Patient', 'base_Week', 'weeks_passed', 'base_FVC', 'FVC', 'predict', 'preds_diff', 'confidence', 'score', 'Age', 'Sex', 'SmokingStatus' ]].sort_values('score', ascending=True).head(50) # - # + submission = pd.DataFrame() submission['Patient_Week'] = test_df.Patient_Week submission['FVC'] = pred_df submission['Confidence'] = confidence submission # - submission.to_csv('submission.csv', index=False)
6_after_competition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Word2Vec v2: "Mistake Not" # ### Connect to Database # + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" executionInfo={"elapsed": 3001, "status": "ok", "timestamp": 1578954025043, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCBd8BZPrGsSITPUox_UKbSmoT6f0h8PucNwTr60w=s64", "userId": "15095415326628117346"}, "user_tz": 300} id="iQWJUpFjdlyj" outputId="f44ae211-ffec-4beb-930f-24240491e14d" # ! pip3 install psycopg2-binary --user import pandas as pd import psycopg2 import numpy as np from getpass import getpass # + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" executionInfo={"elapsed": 5789, "status": "ok", "timestamp": 1578954032020, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCBd8BZPrGsSITPUox_UKbSmoT6f0h8PucNwTr60w=s64", "userId": "15095415326628117346"}, "user_tz": 300} id="at19RYvtdlyw" outputId="0be27d51-9199-483a-f79d-635dcb902f35" # connect to database connection = psycopg2.connect( database = "postgres", user = "postgres", password = <PASSWORD>(), host = "movie-rec-scrape.cvslmiksgnix.us-east-1.rds.amazonaws.com", port = '5432' ) # Enter database password below and press Enter. # + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" executionInfo={"elapsed": 2765, "status": "ok", "timestamp": 1578955267613, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCBd8BZPrGsSITPUox_UKbSmoT6f0h8PucNwTr60w=s64", "userId": "15095415326628117346"}, "user_tz": 300} id="d417YjdVdly9" outputId="bc2f5955-c858-40ca-8e71-f0e3d2410afd" # create cursor that is used throughout try: c = connection.cursor() print("Connected!") except: print("Connection problem chief!") # - # ### Prepare data and train. # 1. Get the list of reviewers whose reviews we want (about 17k) # 2. Get the dataframe of reviewers, movie IDs with positive reviews # 3. Inner join the above two dataframes. # 4. Run the list constructor on the join table to construct the training data. # - Training data is of this format: [['movieid1', 'movieid2', ...], ...] # 5. Train Word2Vec on the list of watch histories (which are themselves lists of movie IDs). # 6. Save the model. # + colab={} colab_type="code" id="tfmu_UdndlzG" # Get reviewers with at least 10 positive reviews (rating 7-10 inclusive) c.execute(""" select username from reviews where user_rating between 7 and 10 group by username having count(username) >= 10 order by count(username) desc """) reviewers = c.fetchall() # + colab={} colab_type="code" id="8A_os2zUdlzQ" # Get positive reviews from database c.execute("SELECT movie_id, username FROM reviews WHERE user_rating > 6") result = c.fetchall() # create reviews dataframe df = pd.DataFrame(result, columns = ['movieid', 'userid']) df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" executionInfo={"elapsed": 6880, "status": "ok", "timestamp": 1578955272419, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCBd8BZPrGsSITPUox_UKbSmoT6f0h8PucNwTr60w=s64", "userId": "15095415326628117346"}, "user_tz": 300} id="_47oo4s1dlzY" outputId="9f3542e2-2648-445d-a601-18820c9d9723" # create reviewers dataframe df_reviewers = pd.DataFrame(reviewers, columns = ['userid']) # - # merge to get only the IDs relevant to training df = df.merge(df_reviewers, how='inner', on='userid') df.shape # + colab={"base_uri": "https://localhost:8080/", "height": 73} colab_type="code" executionInfo={"elapsed": 3472, "status": "ok", "timestamp": 1578955409541, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCBd8BZPrGsSITPUox_UKbSmoT6f0h8PucNwTr60w=s64", "userId": "15095415326628117346"}, "user_tz": 300} id="z2OBA-xqdl0m" outputId="faa65cc8-cd3d-4f08-fb57-5099851a509c" # # ! sudo su # # ! yum update -y # # ! yum -y install python-pip # # ! python -V # + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" executionInfo={"elapsed": 3298, "status": "ok", "timestamp": 1578955411283, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCBd8BZPrGsSITPUox_UKbSmoT6f0h8PucNwTr60w=s64", "userId": "15095415326628117346"}, "user_tz": 300} id="a8DITE9wdl0q" outputId="79c68333-c722-48a2-ee72-49aa5f1c7a15" # # ! which pip # - # ! python -m pip install tqdm # # ! python -c 'import tqdm' # ! python -m pip install gensim # + colab={"base_uri": "https://localhost:8080/", "height": 646} colab_type="code" executionInfo={"elapsed": 13545, "status": "ok", "timestamp": 1578955421790, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCBd8BZPrGsSITPUox_UKbSmoT6f0h8PucNwTr60w=s64", "userId": "15095415326628117346"}, "user_tz": 300} id="ZvEmewUhdl05" outputId="3e8068d2-0604-4d06-9dc0-47989994f120" import random from tqdm import tqdm from gensim.models import Word2Vec import matplotlib.pyplot as plt # %matplotlib inline import warnings; warnings.filterwarnings('ignore') # + colab={} colab_type="code" id="smscgWxjdl0_" # list to capture watch history of the users watched_train = [] # populate the list with the movie codes for i in tqdm(reviewers): temp = df[df["userid"] == i[0]]["movieid"].tolist() watched_train.append(temp) len(watched_train) # - # save the model for later import pickle pickle.dump(watched_train, open('watched_train.sav', 'wb')) # + # #save the model in protocol 2 so it can be opened in python 2.7 # import pickle # temp = pickle.load(open('watched_train.sav', 'rb')) # pickle.dump(temp, open('watched_train.sav', 'wb'), protocol=2) # - # ### Train the Model # # **Important:** The previous model was trained on movie IDs that were inside lists of length 1, with watch histories being lists of lists. # # This model eschews the inner lists. Each watch history is simply a list of strings. # + colab={} colab_type="code" id="gdGxB2_gdl1S" outputId="b367f0ff-8e54-4c9b-dba5-fd3287d4e413" # train word2vec model model = Word2Vec(window = 10, sg = 1, hs = 0, negative = 10, # for negative sampling alpha=0.03, min_alpha=0.0007, seed = 14) model.build_vocab(watched_train, progress_per=200) model.train(watched_train, total_examples = model.corpus_count, epochs=10, report_delay=1) # + colab={} colab_type="code" id="-sWPKYgTdl1W" # save word2vec model model.save("w2v_mistakenot.model") # - # ### Test the model # + colab={} colab_type="code" id="vSeGqA97dl1Z" # load model import gensim model = gensim.models.Word2Vec.load("w2v_mistakenot.model") # + colab={} colab_type="code" id="tFH5khHrdl1d" # prunes the model, making it faster but unable to train any more. model.init_sims(replace=True) # + colab={} colab_type="code" id="jsHJnjXtdl1g" print(model) # + colab={} colab_type="code" id="izJD5tDVdl1k" outputId="e628d91e-1b25-46d6-923b-e4004a0050e3" # extract all vectors X = model[model.wv.vocab] X.shape # - # IDs are words in the model, and callable as such. model['0110912'] # + def get_title(id): """Takes an id string and returns the movie title.""" try: c.execute(f""" select primary_title, start_year from movies where movie_id = '{id}'""") except: return f"Movie title unknown. ID:{id}" title = c.fetchone() return title def predict(model, input, num_recs=6): """For the input, do the predictions and return them. Args: model: the word2vec model object. input: a list of movie IDs. num_recs: the number of recommendations to return. """ def _aggregate_vectors(movies): # get the vector average of the movies in the input. # discard unrecognized IDs. movie_vec = [] for i in movies: try: movie_vec.append(model[i]) except KeyError: continue return np.mean(movie_vec, axis=0) def _similar_movies(v, n): # extract most similar movies for the input vector return model.similar_by_vector(v, topn= n+1)[1:] # aggregate input and find similar vectors. recs = _similar_movies(_aggregate_vectors(input), num_recs) # get titles recs = [get_title(y[0]) for y in recs] return recs # + # test cases # A list of some Coen Bros movies. coen_bros = ['116282', '2042568', '1019452', '1403865', '190590', '138524', '335245', '477348', '887883', '101410'] # Data scientist's recent watches. cooper_recent = ['0053285', '0038650', '0046022', '4520988', '1605783', '6751668', '0083791', '0115685', '0051459', '8772262', '0061184', '0041959', '7775622'] # dirkh public letterboxd recent watches. dirkh = ['7975244', '8106534', '1489887', '1302006', '7286456', '6751668', '8364368', '2283362', '6146586', '2194499', '7131622', '6857112'] # - predict(model=model, input=dirkh, num_recs=20)
SageMaker/Word2Vec_v2_train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Lambda School Data Science # # *Unit 2, Sprint 2, Module 4* # # --- # + # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/' # !pip install category_encoders==2.* # !pip install pandas-profiling==2.* # If you're working locally: else: DATA_PATH = '../data/' # + [markdown] colab_type="text" id="nCc3XZEyG3XV" # # Module Project: Classification Metrics # # This sprint, the module projects will focus on creating and improving a model for the Tanazania Water Pump dataset. Your goal is to create a model to predict whether a water pump is functional, non-functional, or needs repair. # # Dataset source: [DrivenData.org](https://www.drivendata.org/competitions/7/pump-it-up-data-mining-the-water-table/). # # ## Directions # # The tasks for this project are as follows: # # - **Task 1:** Use `wrangle` function to import training and test data. # - **Task 2:** Split training data into feature matrix `X` and target vector `y`. # - **Task 3:** Split training data into training and validation sets. # - **Task 4:** Establish the baseline accuracy score for your dataset. # - **Task 5:** Build `model`. # - **Task 6:** Calculate the training and validation accuracy score for your model. # - **Task 7:** Plot the confusion matrix for your model. # - **Task 8:** Print the classification report for your model. # - **Task 9:** Identify likely `'non-functional'` pumps in the test set. # - **Task 10:** Find likely `'non-functional'` pumps serving biggest populations. # - **Task 11 (`stretch goal`):** Plot pump locations from Task 10. # # You should limit yourself to the following libraries for this project: # # - `category_encoders` # - `matplotlib` # - `pandas` # - `pandas-profiling` # - `plotly` # - `sklearn` # # # # I. Wrangle Data # - def wrangle(fm_path, tv_path=None): if tv_path: df = pd.merge(pd.read_csv(fm_path, na_values=[0, -2.000000e-08]), pd.read_csv(tv_path)).set_index('id') else: df = pd.read_csv(fm_path, na_values=[0, -2.000000e-08], index_col='id') # Drop constant columns df.drop(columns=['recorded_by'], inplace=True) # Drop HCCCs cutoff = 100 drop_cols = [col for col in df.select_dtypes('object').columns if df[col].nunique() > cutoff] df.drop(columns=drop_cols, inplace=True) # Drop duplicate columns dupe_cols = [col for col in df.head(15).T.duplicated().index if df.head(15).T.duplicated()[col]] df.drop(columns=dupe_cols, inplace=True) return df # **Task 1:** Using the above `wrangle` function to read `train_features.csv` and `train_labels.csv` into the DataFrame `df`, and `test_features.csv` into the DataFrame `X_test`. df = ... X_test = ... # # II. Split Data # # **Task 2:** Split your DataFrame `df` into a feature matrix `X` and the target vector `y`. You want to predict `'status_group'`. # # **Note:** You won't need to do a train-test split because you'll use cross-validation instead. X = ... y = ... # **Task 3:** Using a randomized split, divide `X` and `y` into a training set (`X_train`, `y_train`) and a validation set (`X_val`, `y_val`). X_train, X_val, y_train, y_val = ..., ..., ..., ... # # III. Establish Baseline # # **Task 4:** Since this is a **classification** problem, you should establish a baseline accuracy score. Figure out what is the majority class in `y_train` and what percentage of your training observations it represents. baseline_acc = ... print('Baseline Accuracy Score:', baseline_acc) # # IV. Build Models # # **Task 5:** Build and train your `model`. Include the transformers and predictor that you think are most appropriate for this problem. model = ... # # V. Check Metrics # # **Task 6:** Calculate the training and validation accuracy scores for `model`. # + training_acc = ... val_acc = ... print('Training Accuracy Score:', training_acc) print('Validation Accuracy Score:', val_acc) # - # **Task 7:** Plot the confusion matrix for your model, using your validation data. # # **Note:** Since there are three classes in your target vector, the dimensions of your matrix will be 3x3. # + # Plot 3x3 confusion matrix # - # Calculating precision and recall for a multiclass problem is a bit of a mess. Fortunately, we can use `sklearn`'s classification report. # # **Task 8:** Print the classification report for your `model`, using your validation data. # + # Print classification report # - # # VI. Tune Model # # Usually, we use this part of the ML workflow to adjust the hyperparameters of the our model to increase performance based on metrics like accuracy. Today, we'll use it to help maximize the impact of our water pump repairs when resources are scarce. What if we only had funds to repair 100 water pumps? # # (This activity is based on a [post](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050) by <NAME> <NAME>.) # # **Task 9:** Using your model's `predict_proba` method, identify the observations in your **test set** where the model is more than 95% certain that a pump is `'non-functional'`. Put these observations in the DataFrame `X_test_nf`. X_test_nf = ... # **Task 10:** Limit `X_test_nf` to the 100 pumps with the largest associated populations. X_test_nf = ... # # VII. Communicate Results # # **Task 11 (`stretch goal`):** Create a scatter plot with the location of the 100 pumps in `X_test_nf`. # # **Note:** If you want to make this a **`super stretch goal`**, create a Mapbox scatter plot using [Plotly](https://plotly.github.io/plotly.py-docs/generated/plotly.express.scatter_mapbox.html).
module4-classification-metrics/LS_DS_224_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="5rmpybwysXGV" # ##### Copyright 2020 The TensorFlow Authors. # + cellView="form" id="m8y3rGtQsYP2" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="hrXv0rU9sIma" # # Basic training loops # + [markdown] id="7S0BwJ_8sLu7" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/basic_training_loops"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/basic_training_loops.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/basic_training_loops.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/basic_training_loops.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="k2o3TTG4TFpt" # In the previous guides, you have learned about [tensors](./tensor.ipynb), [variables](./variable.ipynb), [gradient tape](autodiff.ipynb), and [modules](./intro_to_modules.ipynb). In this guide, you will fit these all together to train models. # # TensorFlow also includes the [tf.Keras API](keras/overview.ipynb), a high-level neural network API that provides useful abstractions to reduce boilerplate. However, in this guide, you will use basic classes. # + [markdown] id="3LXMVuV0VhDr" # ## Setup # + id="NiolgWMPgpwI" import tensorflow as tf # + [markdown] id="iKD__8kFCKNt" # ## Solving machine learning problems # # Solving a machine learning problem usually consists of the following steps: # # - Obtain training data. # - Define the model. # - Define a loss function. # - Run through the training data, calculating loss from the ideal value # - Calculate gradients for that loss and use an *optimizer* to adjust the variables to fit the data. # - Evaluate your results. # # For illustration purposes, in this guide you'll develop a simple linear model, $f(x) = x * W + b$, which has two variables: $W$ (weights) and $b$ (bias). # # This is the most basic of machine learning problems: Given $x$ and $y$, try to find the slope and offset of a line via [simple linear regression](https://en.wikipedia.org/wiki/Linear_regression#Simple_and_multiple_linear_regression). # + [markdown] id="qutT_fkl_CBc" # ## Data # # Supervised learning uses *inputs* (usually denoted as *x*) and *outputs* (denoted *y*, often called *labels*). The goal is to learn from paired inputs and outputs so that you can predict the value of an output from an input. # # Each input of your data, in TensorFlow, is almost always represented by a tensor, and is often a vector. In supervised training, the output (or value you'd like to predict) is also a tensor. # # Here is some data synthesized by adding Gaussian (Normal) noise to points along a line. # + id="NzivK2ATByOz" # The actual line TRUE_W = 3.0 TRUE_B = 2.0 NUM_EXAMPLES = 1000 # A vector of random x values x = tf.random.normal(shape=[NUM_EXAMPLES]) # Generate some noise noise = tf.random.normal(shape=[NUM_EXAMPLES]) # Calculate y y = x * TRUE_W + TRUE_B + noise # + id="IlFd_HVBFGIF" # Plot all the data import matplotlib.pyplot as plt plt.scatter(x, y, c="b") plt.show() # + [markdown] id="UH95XUzhL99d" # Tensors are usually gathered together in *batches*, or groups of inputs and outputs stacked together. Batching can confer some training benefits and works well with accelerators and vectorized computation. Given how small this dataset is, you can treat the entire dataset as a single batch. # + [markdown] id="gFzH64Jn9PIm" # ## Define the model # # Use `tf.Variable` to represent all weights in a model. A `tf.Variable` stores a value and provides this in tensor form as needed. See the [variable guide](./variable.ipynb) for more details. # # Use `tf.Module` to encapsulate the variables and the computation. You could use any Python object, but this way it can be easily saved. # # Here, you define both *w* and *b* as variables. # + id="_WRu7Pze7wk8" class MyModel(tf.Module): def __init__(self, **kwargs): super().__init__(**kwargs) # Initialize the weights to `5.0` and the bias to `0.0` # In practice, these should be randomly initialized self.w = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.w * x + self.b model = MyModel() # List the variables tf.modules's built-in variable aggregation. print("Variables:", model.variables) # Verify the model works assert model(3.0).numpy() == 15.0 # + [markdown] id="rdpN_3ssG9D5" # The initial variables are set here in a fixed way, but Keras comes with any of a number of [initalizers](https://www.tensorflow.org/api_docs/python/tf/keras/initializers) you could use, with or without the rest of Keras. # + [markdown] id="xa6j_yXa-j79" # ### Define a loss function # # A loss function measures how well the output of a model for a given input matches the target output. The goal is to minimize this difference during training. Define the standard L2 loss, also known as the "mean squared" error: # + id="Y0ysUFGY924U" # This computes a single loss value for an entire batch def loss(target_y, predicted_y): return tf.reduce_mean(tf.square(target_y - predicted_y)) # + [markdown] id="-50nq-wPBsAW" # Before training the model, you can visualize the loss value by plotting the model's predictions in red and the training data in blue: # + id="_eb83LtrB4nt" plt.scatter(x, y, c="b") plt.scatter(x, model(x), c="r") plt.show() print("Current loss: %1.6f" % loss(y, model(x)).numpy()) # + [markdown] id="sSDP-yeq_4jE" # ### Define a training loop # # The training loop consists of repeatedly doing three tasks in order: # # * Sending a batch of inputs through the model to generate outputs # * Calculating the loss by comparing the outputs to the output (or label) # * Using gradient tape to find the gradients # * Optimizing the variables with those gradients # # For this example, you can train the model using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). # # There are many variants of the gradient descent scheme that are captured in `tf.keras.optimizers`. But in the spirit of building from first principles, here you will implement the basic math yourself with the help of `tf.GradientTape` for automatic differentiation and `tf.assign_sub` for decrementing a value (which combines `tf.assign` and `tf.sub`): # + id="MBIACgdnA55X" # Given a callable model, inputs, outputs, and a learning rate... def train(model, x, y, learning_rate): with tf.GradientTape() as t: # Trainable variables are automatically tracked by GradientTape current_loss = loss(y, model(x)) # Use GradientTape to calculate the gradients with respect to W and b dw, db = t.gradient(current_loss, [model.w, model.b]) # Subtract the gradient scaled by the learning rate model.w.assign_sub(learning_rate * dw) model.b.assign_sub(learning_rate * db) # + [markdown] id="RwWPaJryD2aN" # For a look at training, you can send the same batch of *x* and *y* through the training loop, and see how `W` and `b` evolve. # + id="XdfkR223D9dW" model = MyModel() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) # Define a training loop def training_loop(model, x, y): for epoch in epochs: # Update the model with the single giant batch train(model, x, y, learning_rate=0.1) # Track this before I update Ws.append(model.w.numpy()) bs.append(model.b.numpy()) current_loss = loss(y, model(x)) print("Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f" % (epoch, Ws[-1], bs[-1], current_loss)) # + id="iRuNUghs1lHY" print("Starting: W=%1.2f b=%1.2f, loss=%2.5f" % (model.w, model.b, loss(y, model(x)))) # Do the training training_loop(model, x, y) # Plot it plt.plot(epochs, Ws, "r", epochs, bs, "b") plt.plot([TRUE_W] * len(epochs), "r--", [TRUE_B] * len(epochs), "b--") plt.legend(["W", "b", "True W", "True b"]) plt.show() # + id="tpTEjWWex568" # Visualize how the trained model performs plt.scatter(x, y, c="b") plt.scatter(x, model(x), c="r") plt.show() print("Current loss: %1.6f" % loss(model(x), y).numpy()) # + [markdown] id="DODMMmfLIiOC" # ## The same solution, but with Keras # # It's useful to contrast the code above with the equivalent in Keras. # # Defining the model looks exactly the same if you subclass `tf.keras.Model`. Remember that Keras models inherit ultimately from module. # + id="Z86hCI0x1YX3" class MyModelKeras(tf.keras.Model): def __init__(self, **kwargs): super().__init__(**kwargs) # Initialize the weights to `5.0` and the bias to `0.0` # In practice, these should be randomly initialized self.w = tf.Variable(5.0) self.b = tf.Variable(0.0) def call(self, x): return self.w * x + self.b keras_model = MyModelKeras() # Reuse the training loop with a Keras model training_loop(keras_model, x, y) # You can also save a checkpoint using Keras's built-in support keras_model.save_weights("my_checkpoint") # + [markdown] id="6kw5P4jt2Az8" # Rather than write new training loops each time you create a model, you can use the built-in features of Keras as a shortcut. This can be useful when you do not want to write or debug Python training loops. # # If you do, you will need to use `model.compile()` to set the parameters, and `model.fit()` to train. It can be less code to use Keras implementations of L2 loss and gradient descent, again as a shortcut. Keras losses and optimizers can be used outside of these convenience functions, too, and the previous example could have used them. # + id="-nbLLfPE2pEl" keras_model = MyModelKeras() # compile sets the training parameters keras_model.compile( # By default, fit() uses tf.function(). You can # turn that off for debugging, but it is on now. run_eagerly=False, # Using a built-in optimizer, configuring as an object optimizer=tf.keras.optimizers.SGD(learning_rate=0.1), # Keras comes with built-in MSE error # However, you could use the loss function # defined above loss=tf.keras.losses.mean_squared_error, ) # + [markdown] id="lrlHODiZccu2" # Keras `fit` expects batched data or a complete dataset as a NumPy array. NumPy arrays are chopped into batches and default to a batch size of 32. # # In this case, to match the behavior of the hand-written loop, you should pass `x` in as a single batch of size 1000. # + id="zfAYqtu136PO" print(x.shape[0]) keras_model.fit(x, y, epochs=10, batch_size=1000) # + [markdown] id="8zKZIO9P5s1G" # Note that Keras prints out the loss after training, not before, so the first loss appears lower, but otherwise this shows essentially the same training performance. # + [markdown] id="vPnIVuaSJwWz" # ## Next steps # # In this guide, you have seen how to use the core classes of tensors, variables, modules, and gradient tape to build and train a model, and further how those ideas map to Keras. # # This is, however, an extremely simple problem. For a more practical introduction, see [Custom training walkthrough](../tutorials/customization/custom_training_walkthrough.ipynb). # # For more on using built-in Keras training loops, see [this guide](keras/train_and_evaluate.ipynb). For more on training loops and Keras, see [this guide](keras/writing_a_training_loop_from_scratch.ipynb). For writing custom distributed training loops, see [this guide](distributed_training.ipynb#using_tfdistributestrategy_with_basic_training_loops_loops).
src/basics/basic_training_loops.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="UJTM99_e0-TX" outputId="72d9bed9-0476-416f-cd78-d51be911ba02" #Rode isso e reinicie o ambiente de execução antes de executar a célula referente a base de dados # !pip install dnspython # !pip install category_encoders # + id="eMs-c8eKYAFJ" colab={"base_uri": "https://localhost:8080/"} outputId="39fa9415-86d0-4c1f-8d9d-6252688bbfb2" import category_encoders as ce import seaborn as sns import numpy as np import statistics import math import matplotlib.pyplot as plt from scipy import stats import numpy from collections import Counter import pandas as pd from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import OrdinalEncoder #Classificadores from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.manifold import TSNE from sklearn.metrics import confusion_matrix,accuracy_score,precision_score, recall_score,make_scorer, silhouette_score, davies_bouldin_score from sklearn.model_selection import GridSearchCV from sklearn.ensemble import IsolationForest import plotly.express as px import pymongo from pymongo import MongoClient # + [markdown] id="RjWtecmKaBeW" # #Descrição dos Dados # + [markdown] id="831hGcBYxn_9" # ##Recuperação dos dados na base de dados # + id="SgSt44rnxDPW" cluster = MongoClient("mongodb+srv://admin:Q0jPEiUucrXovgwg@<EMAIL>.mongodb.net/PGE?retryWrites=true&w=majority") db = cluster['PGE'] collection = db['processo'] result = collection.find({}) data = list(result) # + id="Pdaa4p0WnRkH" ##Conversão do dicionário obtido da base de dados para pandas.DataFrame #Os dados foram divididos em três Dataframes, de processos, de movimentações e de manifestações processos_list = [] movimentacoes_list = [] manifestacoes_list = [] for processo in data: for movimentacao in processo['MOVIMENTACOES']: for manifestacao in movimentacao['MANIFESTACOES']: manifestacao['CDPROCESSO'] = processo['CDPROCESSO'] manifestacoes_list.append(manifestacao) movimentacao.pop('MANIFESTACOES') movimentacao['CDPROCESSO'] = processo['CDPROCESSO'] movimentacoes_list.append(movimentacao) processo.pop('MOVIMENTACOES') processos_list.append(processo) proc_df = pd.DataFrame(processos_list) mov_df = pd.DataFrame(movimentacoes_list) man_df = pd.DataFrame(manifestacoes_list) # + [markdown] id="uZykw_YIbjOT" # ## Informações sobre dataframes criados # + colab={"base_uri": "https://localhost:8080/", "height": 513} id="nJhbq3geueUN" outputId="9a09e67c-d29e-41bb-9a8d-82a55637d3ca" proc_df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="EHsjy_87u-pP" outputId="209185de-0722-418c-cdae-5be337b4a70c" mov_df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="tdGWnKv_uu-h" outputId="28189728-3e89-4033-df3d-7c36aa07ee48" man_df.head() # + [markdown] id="CfapdAaIagYV" # #Análise dos Dados # + [markdown] id="fMokJ9RoDWsD" # ##Frequência Absoluta, Relativa, Limite Inferior, Superior e Ponto Médio # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="EdWCsvwovKoJ" outputId="af537c2b-4e9b-4d03-de4c-0f04702ddfa8" def frequencia_classes(proc_df): freq_abs = Counter(proc_df.COMPLEXIDADEPROC); freq_a = pd.DataFrame.from_dict(freq_abs, orient='index').reset_index() total = len(proc_df.index) freq_r = freq_a[0]/total freq_a[1] = freq_r freq_a.columns = ['Complexidade Processo','Frequência absoluta','Frequência relativa'] return freq_a frequencia_classes(proc_df) # + id="gtrPYLwPau6h" df_test = mov_df.sample(5) # + colab={"base_uri": "https://localhost:8080/", "height": 272} id="p8F92LbPa1M7" outputId="a630a03e-5898-437c-f13b-7aa28ba3a718" df_test.head() # + colab={"base_uri": "https://localhost:8080/"} id="bTu0ONiaD9yy" outputId="8122b20e-05b0-49b8-f577-045ffc04d1de" print('Dados sobre o atributo QTDPAGINASPROCESSOTOTAL:\n') print(proc_df['QTDPAGINASPROCESSOTOTAL'].describe()) print('\nDados sobre o atributo COMPLEXIDADEPROC:\n') print(proc_df['COMPLEXIDADEPROC'].describe()) # + colab={"base_uri": "https://localhost:8080/"} id="zZUKcHagg_k7" outputId="2dce70ec-5667-4f38-9420-5cc0de9eea25" valores = proc_df.QTDPAGINASPROCESSOTOTAL; Q1 = valores.quantile(.25) Q3 = valores.quantile(.75) IIQ = (Q3 - Q1) limite_inferior = (Q1 - 1.5 * IIQ) limite_superior = (Q3 + 1.5 * IIQ) print('QTDPAGINASPROCESSOTOTAL') print("Limite inferior: ", limite_inferior) print("Limite superior: ", limite_superior) # + [markdown] id="RJ0_UCqIEvA_" # ## Histograma e Gráfico de Dispersão # </p> O histograma, também conhecido como distribuição de frequências, é a representação gráfica em colunas ou em barras de um conjunto de dados previamente tabulado e dividido em classes uniformes ou não uniformes. A base de cada retângulo representa uma classe.</p> # # </p> Os diagramas de dispersão ou gráficos de dispersão são representações de dados de duas ou mais variáveis que são organizadas em um gráfico. Ele pode auxiliar no estudo dos dados e detecção de outliers e clusters.</p> # # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="foQNXo225zVf" outputId="4292094d-9eb8-4b9b-942c-5d9709647914" sns.set_style('darkgrid') sns.countplot(x = 'COMPLEXIDADEPROC', data=proc_df, order=['Muito Baixa','Baixa','Média','Alta','Muito Alta']).set_title('Qnt. de processos por complexidade') #['Muito Baixa','Baixa','Média','Alta','Muito Alta'] # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="TuTuLf-Z6X8h" outputId="74352860-4df4-46ac-faf7-8c1369f21b2d" sns.countplot(x = 'COMPLEXIDADEMOV', data=mov_df, order=['Muito Baixa','Baixa','Média','Alta','Muito Alta']).set_title('Qnt. de movimentações por complexidade') # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="dQTSOPRe63Hx" outputId="152528c8-2ee8-4296-da77-1d7a4df77fca" sns.countplot(x = 'COMPLEXIDADEMANIF', data=man_df, order=['Muito Baixa','Baixa','Média','Alta','Muito Alta']).set_title('Qnt. de manifestações por complexidade') # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="PK2sZl1aL394" outputId="7de0b180-dc52-44a8-cd86-aa6ab2897d5d" sns.histplot(x = "QTDPAGINASPROCESSOTOTAL", data=proc_df,bins=20).set_title('Qnt. de páginas dos processos') # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="puIDv9PKNInE" outputId="4111bec7-ed6b-4f16-d008-df6d1a28e528" proc_df_aux = proc_df[proc_df['QTDPAGINASPROCESSOTOTAL'] < limite_superior] sns.histplot(x = "QTDPAGINASPROCESSOTOTAL", data=proc_df_aux,bins=10).set_title('Qnt. de páginas dos processos desconsiderando os outliers') # + [markdown] id="X22pJ94iNAx7" # # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="6QGTJQzGpCYr" outputId="d113fe32-3149-4287-e785-0c35cd3e40e0" sns.scatterplot(x="COMPLEXIDADEPROC", y="QTDPAGINASPROCESSOTOTAL", data=proc_df).set_title('Dispersão dos processos') # + id="iYSbJzweN3nl" # proc_df_aux = proc_df[proc_df['QTDPAGINASPROCESSOTOTAL'] < limite_superior] # sns.scatterplot(x="COMPLEXIDADEPROC", y="QTDPAGINASPROCESSOTOTAL", data=proc_df_aux).set_title('Dispersão dos processos desconsiderando os outliers') # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="IY-onPzAGXsw" outputId="7977bc62-b5f9-43c4-a1cf-ed46ec26c77c" sns.scatterplot(x='COMPLEXIDADEPROC',y='VLACAO',data=proc_df) # + [markdown] id="Vqnndd0n-9po" # ## Box plot # O boxplot nos fornece uma análise visual da posição, dispersão, simetria, caudas e valores discrepantes (outliers) do conjunto de dados. # No gráfico a seguir, temos uma dificuldade para analisar os dados, pois há alguns outliers muito distantes do limite superior dos Box plots. # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="Xi85ds8HOPyT" outputId="09158d07-9e74-486b-aa2b-0c189c2e1d08" sns.boxplot(x="COMPLEXIDADEPROC", y="QTDPAGINASPROCESSOTOTAL", data=proc_df,order=['Muito Baixa','Baixa','Média','Alta','Muito Alta']); # + [markdown] id="HMLY3J5Z_3MN" # Para obter uma melhor visualização das informações fornecidas pelo Boxplot, removemos os dados que estão acima do limite superior do atributo QTDPAGINASPROCESSOTOTAL # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="_0ODWuWf9_VE" outputId="8637f8c8-b6ef-4025-e57e-84e60b601aa6" proc_df_aux = proc_df[proc_df['QTDPAGINASPROCESSOTOTAL'] < limite_superior] sns.boxplot(x="COMPLEXIDADEPROC", y="QTDPAGINASPROCESSOTOTAL", data=proc_df_aux,order=['Muito Baixa','Baixa','Média','Alta','Muito Alta']); # + [markdown] id="H<KEY>" # # Pré-processamentos dos Dados # Nessa etapa iremos realizar a preparação, limpeza e organização de dados para que possamos utiliza-los em modelos de aprendizagem e análise dos dados. # + [markdown] id="fBPcqUseb0nT" # ## Substituição de valores nulos # + [markdown] id="dXBPwWq5YdfV" # Podemos identificar que o atributo VLACAO possui varios valores representados por '-'. Por esse motivo decidimos substituir esses valores pela mediana dessa coluna. # + id="DtnWzp5gGZHT" proc_df_orig = proc_df.copy() # + colab={"base_uri": "https://localhost:8080/"} id="4Jn7WHvL0uZa" outputId="735e1f65-dad1-4148-8b29-98c9b8a2f9d5" proc_df['VLACAO'] = proc_df['VLACAO'].replace('-', np.nan) proc_df['VLACAO'] = pd.to_numeric(proc_df['VLACAO']) print(proc_df.info()) # + colab={"base_uri": "https://localhost:8080/"} id="pLBDeIdXHCs6" outputId="fa695e07-799e-4251-ab22-6629926c60db" proc_df['VLACAO'].fillna(proc_df['VLACAO'].median(),inplace=True) proc_df.info() # + [markdown] id="yFcq46yDb8Vr" # ## Correção de dados inconsistentes # + id="4EwylHN7b_dC" # + [markdown] id="iOp5pBa7mI11" # ##Remoção de linhas não necessárias # + id="LuBx3yH2mM__" proc_df = proc_df[proc_df.COMPLEXIDADEPROC != "Muito Alta" ] proc_df = proc_df[proc_df.COMPLEXIDADEPROC != "Muito Baixa" ] # + [markdown] id="-JlUqx3Nhwc4" # ## Remoção de Outliers # + id="WF-BVkwiHvB_" def calculate_sup_inf_thr(df,column,hue_column,comp): valores = df[df[hue_column]==comp][column]; Q1 = valores.quantile(.25) Q3 = valores.quantile(.75) IIQ = (Q3 - Q1) limite_inferior = (Q1 - 1.5 * IIQ) limite_superior = (Q3 + 1.5 * IIQ) return limite_superior, limite_inferior #Os outliers são detectados separadamente em cada nivel de complexidade def remove_outliers_by_complexity(df,column,hue_column,complexities = []): df = df.copy() for comp in complexities: limite_superior, limite_inferior = calculate_sup_inf_thr(df,column,hue_column,comp) print('Qtd outliers: ',df[df[column] > limite_superior][df[hue_column]==comp][column].count(), ' complexity: ',comp) df[df[hue_column]==comp] = df[df[column] < limite_superior][df[hue_column]==comp] return df # + [markdown] id="eLO8-vhEB_BC" # Remoção de outliers referentes a quantidade de paginas do processo # + colab={"base_uri": "https://localhost:8080/", "height": 976} id="dGQsZR6LhymP" outputId="8f74ef78-b9c6-42ea-8641-11b7af21870c" sns.boxplot(x="COMPLEXIDADEPROC", y="QTDPAGINASPROCESSOTOTAL", data=proc_df,order=['Muito Baixa','Baixa','Média','Alta','Muito Alta']); proc_df = remove_outliers_by_complexity(proc_df,'QTDPAGINASPROCESSOTOTAL','COMPLEXIDADEPROC',['Muito Baixa','Baixa','Média','Alta','Muito Alta']) # + [markdown] id="P-AaLDSqP_DP" # Resuldado da remoção dos outliers # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="0IcD1EolOnVC" outputId="38a27ca4-95f1-4318-9339-0f1526a3ddf5" sns.boxplot(x="COMPLEXIDADEPROC", y="QTDPAGINASPROCESSOTOTAL", data=proc_df,order=['Muito Baixa','Baixa','Média','Alta','Muito Alta']); # + [markdown] id="Y394pfklDBjA" # Remoção de outliers referentes ao valor da ação do processo # + colab={"base_uri": "https://localhost:8080/", "height": 987} id="YB34Zd2gC_kF" outputId="06cf0444-e730-479c-e920-53f959b50af7" sns.boxplot(x="COMPLEXIDADEPROC", y="VLACAO", data=proc_df,order=['Muito Baixa','Baixa','Média','Alta','Muito Alta']); proc_df = remove_outliers_by_complexity(proc_df,'VLACAO','COMPLEXIDADEPROC',['Muito Baixa','Baixa','Média','Alta','Muito Alta']) # + [markdown] id="0HIsAkigGMi2" # # + [markdown] id="fbJftXZ9TCez" # Resuldado da remoção dos outliers # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="1z7oV3LaiA5F" outputId="2d95c198-b465-43ae-81ea-2ea059b72ea6" sns.boxplot(x="COMPLEXIDADEPROC", y="VLACAO", data=proc_df,order=['Muito Baixa','Baixa','Média','Alta','Muito Alta']); # + id="xuhqPrGm_Vw1" proc_df = proc_df.dropna() # + [markdown] id="qlnAVNoUh9Qz" # ## Criação de novas colunas # + [markdown] id="6GlTZdvbAQKR" # <p>A coluna ASSUNTO é composta por classificação de subclassificações, seguindo o seguinte modelo : <classificação> - <subclassificação> - <subclassificação> ...<p/> # # <p> Então a seguir vamos separar em duas colunas: CLASSIFICACAO E SUBCLASSIFICAO # # + id="S6iZL5T0_ssO" proc_df_without_outliers = proc_df.copy() # + id="0g5rSrAi3jQU" def trim_all_columns(df): """ Trim whitespace from ends of each value across all series in dataframe """ trim_strings = lambda x: x.strip() if isinstance(x, str) else x return df.applymap(trim_strings) # + colab={"base_uri": "https://localhost:8080/"} id="sYNDR3lH0SO-" outputId="65dab9bd-b39b-4c75-a93f-cf4db0097c6d" #df = proc_df.copy() proc_df[['CLASSIFICACAO', 'SUBCLASSIFICACAO']] = proc_df['ASSUNTO'].str.split('-', 1, expand=True) proc_df = trim_all_columns(proc_df) proc_df[['SUBCLASSIFICACAO', 'CLASSIFICACAORESTANTE']] = proc_df['SUBCLASSIFICACAO'].str.split('-', 1, expand=True) proc_df.info() # + [markdown] id="ab42moG4BgBG" # Mesma coisa irá ser realizada na tabela de movimentações e na coluna MOVIMENTACAO # + colab={"base_uri": "https://localhost:8080/"} id="GU_Ue2dbBlOk" outputId="7a0a75ee-b244-48e3-f89b-26e36c0e2683" mov_df[['CLASSIFICACAOMOV', 'SUBCLASSIFICACAOMOV']] = mov_df['MOVIMENTACAO'].str.split('-', 1, expand=True) mov_df = trim_all_columns(mov_df) mov_df[['SUBCLASSIFICACAOMOV', 'CLASSIFICACAORESTANTE']] = mov_df['SUBCLASSIFICACAOMOV'].str.split('-', 1, expand=True) mov_df = trim_all_columns(mov_df) mov_df.info() # + colab={"base_uri": "https://localhost:8080/"} id="ScD7QqiVbmMO" outputId="40b5544c-5ad3-4b8a-e94f-a28a308670c6" print(proc_df['CLASSIFICACAO'].unique()) # + colab={"base_uri": "https://localhost:8080/"} id="Off6aZwZb0yM" outputId="e210a5a1-6b64-4b13-d447-be459abe6947" print(proc_df['SUBCLASSIFICACAO'].unique()) # + id="WTHvfql0I3Eo" # + [markdown] id="h8bMEbmIH4Ja" # ##Codificação de atributos nominais # + colab={"base_uri": "https://localhost:8080/", "height": 610} id="5yBAlS12cH93" outputId="8ee77551-0016-4e49-96e3-d887ccd8034f" ce_one_hot = ce.OneHotEncoder(cols = ['CLASSIFICACAO','SUBCLASSIFICACAO']) proc_class_df = ce_one_hot.fit_transform(proc_df) proc_class_df.head() # + id="kvwn6dmzxLaO" ce_ordinal = OrdinalEncoder(categories={0:['Muito Baixa' ,'Baixa', 'Média','Alta', 'Muito Alta' ]}) ce_ordinal.fit(proc_class_df['COMPLEXIDADEPROC'].values.reshape(-1, 1)) proc_class_df['COMPLEXIDADEPROC']=ce_ordinal.transform(proc_class_df['COMPLEXIDADEPROC'].values.reshape(-1, 1)) # + [markdown] id="F_DH3jnXb8Qq" # ##Normalização de atributos numéricos # + colab={"base_uri": "https://localhost:8080/"} id="5aeodn5nIC1Z" outputId="d962350f-775d-46b6-829e-c182c05b6f29" sc = StandardScaler() proc_class_df['VLACAO_NORM'] = sc.fit_transform(proc_class_df['VLACAO'].values.reshape(-1, 1)) print(proc_class_df['VLACAO'].head()) # + id="EbsrGa7acZ1a" proc_df = proc_class_df # + [markdown] id="9capaY9SHKAt" # #Classificação # Nesta etapa iremos realizar a aplicação de algoritmos de aprendizagem supervisionada para realizar a classificação de processos em relação a sua complexidade. Serão utilizados com entradas a classificação e subclassificação dos processos e o valor da ação. # # + [markdown] id="9BXmKiP-pPYZ" # Primeiramente será realizada a divisão do dataset em treinamento e teste # + id="FbcfM5PYMlg0" proc_class_df_orig = proc_class_df.copy() # + id="VIchDWorNxlG" proc_class_df = proc_class_df_orig # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="cJpH79aMMx4W" outputId="2d525ce4-0035-485a-8b43-6daa00264e07" frequencia_classes(proc_class_df) # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="tWOytuxcKUVU" outputId="7a04bd66-e674-4ff5-f7a8-4cafed921506" def balance_dataset(focus_class, df,amount): class_df = df[df.COMPLEXIDADEPROC == focus_class].sample(amount) df = df[df.COMPLEXIDADEPROC != focus_class] df = pd.concat([class_df,df]) return df proc_class_df = balance_dataset(2,proc_class_df,2000) frequencia_classes(proc_class_df) # + id="lku2HWATpGPk" from sklearn.model_selection import train_test_split X = proc_class_df.drop(['_id' ,'CDPROCESSO','CLASSIFICACAORESTANTE', 'DATAPRIMEIRADISTRIB','TIPOACAO', 'ASSUNTO','TRIBUNAL', 'ORGAOJUD','JUIZO', 'SITUACAOPROCESSO','TIPOPROCESSO', 'COMPLEXIDADEPROC','QTDPAGINASPROCESSOTOTAL'],axis=1) y =proc_class_df.COMPLEXIDADEPROC X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # + [markdown] id="PzKREeaKpR9E" # Utilização da árvore de decisão # + colab={"base_uri": "https://localhost:8080/"} id="EtOR_AF7MAYv" outputId="64997588-cb1f-4873-c4c9-3725adf6dc70" classifier = DecisionTreeClassifier() # classifier.fit(X,y) param_dist = { 'criterion':['gini','entropy'], 'max_depth':[6,7,8,9,10,11,12,13,None] } scoring = {'Accuracy': make_scorer(accuracy_score), 'Precision':make_scorer(precision_score,average='macro'), 'Recall':make_scorer(recall_score,average='macro')} grid = GridSearchCV(classifier, param_grid = param_dist, cv=5, n_jobs=-1, scoring = scoring, refit='Accuracy') grid.fit(X,y) # + colab={"base_uri": "https://localhost:8080/"} id="13aHjTEIYaMY" outputId="b2d999d7-d891-43b0-a410-5bae681bad22" dt_classifier = grid.best_estimator_ dt_classifier # + colab={"base_uri": "https://localhost:8080/", "height": 627} id="XkjOrQmYZx_B" outputId="4e7d9a9e-ec13-4cee-fb6a-b8151a89a06c" cv_results_df = pd.DataFrame(grid.cv_results_) cv_results_df cv_results_df[['param_criterion','param_max_depth','mean_test_Accuracy','mean_test_Precision','mean_test_Recall','rank_test_Accuracy','rank_test_Precision','rank_test_Recall']] # + colab={"base_uri": "https://localhost:8080/", "height": 445} id="7M1GsfkhyFKU" outputId="79b1ccf7-892d-47d6-df6a-25196de88d37" y_pred = dt_classifier.predict(X_test) cm = confusion_matrix(y_test, y_pred) cm= cm / np.expand_dims(cm.sum(axis=1),axis=1) # cm = cm/cm.sum() plt.figure(figsize = (10,7)) sns.heatmap(cm, annot=True, cmap ='inferno_r' ) # + id="FfHmpiOuchxC" mov_orig_df = mov_df.copy() proc_orig_df = proc_df.copy() # + [markdown] id="R6q298h5qwpG" # #Agrupamento # + id="WQ_k0yr_kL2H" mov_df = mov_orig_df.copy() proc_df = proc_orig_df.copy() # + id="Z6_fqpPzq01p" ce_ordinal = OrdinalEncoder(categories={0:['Muito Baixa' ,'Baixa', 'Média','Alta', 'Muito Alta' ]}) ce_ordinal.fit(mov_df['COMPLEXIDADEMOV'].values.reshape(-1, 1)) mov_df['COMPLEXIDADEMOV_ENC']=ce_ordinal.transform(mov_df['COMPLEXIDADEMOV'].values.reshape(-1, 1)) # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="utANIW4eb2pj" outputId="3596ae8e-06ec-46fe-a9b0-d83cf1a03a60" df_groups_mov = mov_df.groupby(by=["CDPROCESSO"]) proc_list = [] for k, row in proc_df.iterrows(): proc = row['CDPROCESSO'] group = df_groups_mov.get_group(proc) media_comp = group.COMPLEXIDADEMOV_ENC.mean() mediana_comp = group.COMPLEXIDADEMOV_ENC.median() tempo_total = group.TEMPOMOV_DIAS.sum() # freq = {'Muito Baixa':0,'Baixa':0,'Média':0,'Alta':0,'Muito Alta':0} # freq_abs = Counter(mov_df.COMPLEXIDADEMOV); # freq_abs = {k: freq_abs.get(k, 0) + freq.get(k, 0) for k in set(freq)} qtd_mov = len(group) processo = {'MEDIA_COMPLEXIDADE': media_comp, 'MEDIANA_COMPLEXIDADE': mediana_comp, 'QUANTIDADE_MOV': qtd_mov, 'TEMPO_TOTAL':tempo_total} proc_list.append(processo) aux_df = pd.DataFrame(proc_list) proc_df = pd.concat([proc_df.reset_index(),aux_df], axis=1) proc_df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="bUYulcuSdJke" outputId="bcd079df-ecff-4a8f-91bd-8d278f8bccbe" sns.scatterplot(data=proc_df, x="QUANTIDADE_MOV", y="MEDIANA_COMPLEXIDADE",hue="COMPLEXIDADEPROC") # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="ulcnVKq8k8W1" outputId="b689099a-ee2c-483a-b7cd-0763e85c3753" sns.scatterplot(data=proc_df, x="QUANTIDADE_MOV", y="MEDIA_COMPLEXIDADE",hue="COMPLEXIDADEPROC") # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="4gqxTV0cX_gk" outputId="a2052add-b2b0-4cb2-e734-2c316ce75f66" proc_df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="bhroRI9RlE-f" outputId="eda2fd50-07db-4a95-8d86-2dde9546916c" test_df = proc_df.copy() test_df['par_aux'] = test_df["QUANTIDADE_MOV"]*test_df["MEDIA_COMPLEXIDADE"] test_df['zeros'] = 0 test_df.head() sns.scatterplot(data=test_df, x="par_aux", y="zeros",hue="COMPLEXIDADEPROC") # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="htYylkkam3T1" outputId="f5cb8682-26e8-4c12-cdc5-88747aae9eab" sns.scatterplot(data=test_df, x="par_aux",hue="COMPLEXIDADEPROC") # + id="WeIC_nmrnV3V" #ax = sns.heatmap() # + colab={"base_uri": "https://localhost:8080/"} id="fmV-4nI2WQEI" outputId="b826529d-c9bb-4147-ca2b-be3085e0c7e5" test_df[["QUANTIDADE_MOV","MEDIA_COMPLEXIDADE"]].to_numpy() # + colab={"base_uri": "https://localhost:8080/"} id="wPWwgEB4Wcun" outputId="528622d1-8e3a-4d6d-ee0f-5d64c844a930" label_df_proc = proc_df.drop(['index','_id' ,'CDPROCESSO','CLASSIFICACAORESTANTE', 'DATAPRIMEIRADISTRIB','TIPOACAO', 'ASSUNTO','TRIBUNAL', 'ORGAOJUD','JUIZO', 'SITUACAOPROCESSO','TIPOPROCESSO', 'COMPLEXIDADEPROC'],axis=1) print(label_df_proc) # + id="1fGVUuRDpEbg" #X_embedded = TSNE(n_components=2).fit_transform(label_df_proc) # + colab={"base_uri": "https://localhost:8080/"} id="JbYF4PCf5-4-" outputId="f8b16014-7d96-435d-eebd-ba1a8b42c21d" label_df_proc_values = label_df_proc.values X = label_df_proc_values print(X) # + id="OWlcNQozUwqD" colab={"base_uri": "https://localhost:8080/"} outputId="72f58598-34ff-4207-9e57-79f965062e94" from sklearn.preprocessing import MaxAbsScaler # Instancia o MaxAbsScaler p=MaxAbsScaler() # Analisa os dados e prepara o padronizador p.fit(X) print(p.transform(X)) # + id="7RZfPpQ-6CX7" colab={"base_uri": "https://localhost:8080/"} outputId="786cdfb4-5537-4eaf-da83-c4a3891b12f6" #KMeans kmeans = KMeans(n_clusters=3, init= 'k-means++', algorithm='auto') kmeans.fit(X) kmeans2 = KMeans(n_clusters=3, init= 'k-means++', algorithm='full') kmeans2.fit(X) # + id="pDVFSGqp7QGY" s = silhouette_score(X, kmeans.labels_) s2 = silhouette_score(X, kmeans2.labels_) # + id="Dq64oECcpvwx" b = davies_bouldin_score(X, kmeans.labels_) b2 = davies_bouldin_score(X, kmeans2.labels_) # + id="TMwSK6YmFwm7" colab={"base_uri": "https://localhost:8080/"} outputId="c2e78e1e-8819-417b-f137-14bad2eddfcd" data = np.array([[kmeans.n_clusters, kmeans.algorithm, kmeans.init, s, b], [kmeans2.n_clusters, kmeans2.algorithm, kmeans2.init, s2, b2]]) f = pd.DataFrame(data,columns=['n_clusters', 'param_algorithm', 'param_init', 'silhouette_score', 'davies_bouldin_score']) print(f) # + id="O4prK1zV6MFZ" colab={"base_uri": "https://localhost:8080/"} outputId="4eeb4e92-9a1e-4b96-ba19-7502cda2212b" #Centróides print(kmeans.cluster_centers_) label_df_proc['label_goups'] = kmeans.labels_ #Rotulos print(list(set(kmeans.labels_))) # + [markdown] id="Dv_Le2_ZfpcE" # # + id="WBwRwLWqHnMJ" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="0d94b3a3-46de-44fb-ddb8-bb4fa2af93b5" #Distribuição do kmeans com os centróides - 3 clusters plt.scatter(X[:,0], X[:,1], c=kmeans5.labels_, cmap='rainbow') plt.scatter(kmeans5.cluster_centers_[:,0] ,kmeans5.cluster_centers_[:,1], color='black', label = 'Centroids') plt.xlabel('Valor_acao') plt.ylabel('Qnt_paginas') plt.legend() # + id="3kwAy8qZoW71" #TSNE X_embedded = TSNE(n_components=2).fit_transform(X) # + colab={"base_uri": "https://localhost:8080/"} id="xO3XrucSogUc" outputId="8e90b8c9-bf65-4e1a-dbb3-4132433a1f1d" #KMeans + TSNE kmeans3 = KMeans(n_clusters=3, init= 'k-means++', algorithm='auto') kmeans3.fit(X_embedded) kmeans4 = KMeans(n_clusters=3, init= 'k-means++', algorithm='full') kmeans4.fit(X_embedded) # + id="0pCnSR_rol9C" s3 = silhouette_score(X_embedded, kmeans3.labels_) s4 = silhouette_score(X_embedded, kmeans4.labels_) # + id="YKhLMavQopJR" b3 = davies_bouldin_score(X_embedded, kmeans3.labels_) b4 = davies_bouldin_score(X_embedded, kmeans4.labels_) # + colab={"base_uri": "https://localhost:8080/"} id="t2VncFsrotbT" outputId="1510be24-e025-4748-a30e-f26bd8bf9611" data = np.array([[kmeans3.n_clusters, kmeans3.algorithm, kmeans3.init, s3, b3], [kmeans4.n_clusters, kmeans4.algorithm, kmeans4.init, s4, b4]]) f = pd.DataFrame(data,columns=['n_clusters', 'param_algorithm', 'param_init', 'silhouette_score', 'davies_bouldin_score']) print(f) # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="heD02lN9o0WL" outputId="<PASSWORD>" #Distribuição do kmeans com os centróides - 3 clusters plt.scatter(X_embedded[:,0], X_embedded[:,1], c=kmeans3.labels_, cmap='rainbow') plt.scatter(kmeans3.cluster_centers_[:,0] ,kmeans3.cluster_centers_[:,1], color='black', label = 'Centroids') plt.xlabel('TSNE1') plt.ylabel('TSNE2') plt.legend()
2020.1/Grupo 7/code/PGE_datamining_final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Global TF Kernel (Python 3) # language: python # name: global-tf-python-3 # --- # + import keras import tensorflow as tf from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.optimizers import Adadelta from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer # - batch_size = 128 num_classes = 10 epochs = 12 # + (x_train, y_train), (x_test, y_test) = mnist.load_data() img_rows, img_cols = 28, 28 x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') # - # same thing as applying minmaxscaler in the (0,1) range x_train = x_train / 255 x_test = x_test / 255 # one hot encoding y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) with tf.device('/cpu:0'): model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss="categorical_crossentropy", optimizer=Adadelta(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
python3/notebooks/meetup/mnist-cnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import molsysmt as msm # # Test with a lipid file_crd = '../../popc/conf1/popc_462.crd' file_pdb = msm.demo_systems.files['1tcd.pdb'] system = msm.convert(file_pdb, to_form='pytraj.Topology') system_msm = msm.convert(file_pdb, to_form='molsysmt.Topology') msm.get(system, target='atom', molecule_name=True) msm.get(system_msm, target='atom', molecule_type=True) import numpy as np aa=np.array([3,4,5,6]) dd = dict(zip(aa,range(4))) np.vectorize(dd.__getitem__)(aa) cc = dict(aa,zip(range(4))) cc bb.fromkeys(np.arange(aa.shape[0]), aa) aa = system.residue(0) aa.original_resid aa = list(system.mols)[0] help(aa) mol=list(system.mols)[0] aa=system.atom(0) aa.
docs/contents/Lipid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The chain rule is a powerful and useful derivation technique that allows the derivation of functions that would not be straightforward or possible with the only the previously discussed rules at our disposal. The rule takes advantage of the "compositeness" of a function. For example, consider the function: # $$ f(x) = \sin{4x} $$ # This function can be broken into a composite function $f \circ g$ by observing that: # $$ y = f(u) = \sin{u} \space \text{and} \space u = g(x) = 4x $$ # Therefore we can reinterpret the original function as: # $$ F(x) = f(g(x)), \space \text{or} \space F = f \circ g $$ # The chain rule gives us the ability the find the derivatives of $f$ and $g$ using the tools we previously discussed. We can state the chain rule more precisely as: # Assuming $g$ is differentiable at $x$ and the derivative of $f(g(x))$ exists, then we can state the composite function $F = f \circ g$ as $F(x) = f(g(x))$ and $F^\prime$ is given by: # $$ F^\prime (x) = f^\prime (g(x)) \circ g^\prime(x) $$ # In Leibniz notation, assuming $y = f(u)$ and $u = g(x)$ have derivatives, the chain rule can be expressed as: # $$ \frac{dy}{dx} = \frac{dy}{du} \space \frac{du}{dx} $$ # With the chain rule, we can now find the derivative of the function $f(x) = \sin{4x}$. # $$ u = 4x, \qquad y = \sin{u} $$ # $$ \frac{du}{dx} = 4, \qquad \frac{dy}{du} = \cos{u} $$ # $$ \frac{dy}{dx} = 4 \cos{4x} $$ # We can check our answer using [SymPy](http://www.sympy.org/en/index.html) from sympy import symbols, diff, sin, cos, sqrt, simplify, init_printing init_printing() x = symbols('x') diff(sin(4 * x)) # ## Examples # These and the previous example are taken from James Stewart's *Calculus Early Transcendentals* (Section 3.4, pp. 203) # Example 1: Find the derivative of $f(x) = (1 - x^2)^{10}$ # $$ u = 1 - x^2, \qquad y = u^{10} $$ # $$ \frac{dy}{du} = 10u^9, \qquad \frac{du}{dx} = -2x $$ # $$ \frac{dy}{dx} = 10(1 - x^2)^9 * - 2x = -20x (1 - x^2)^9 $$ diff((1 - x ** 2) ** 10) # Example 2: Find the derivative of $f(x) = e^{\sqrt{x}}$ # $$ y = e^{\sqrt{u}}, \qquad u = \sqrt{x} $$ # $$ \frac{dy}{du} = e^u, \qquad \frac{du}{dx} = \frac{1}{2}x^{-\frac{1}{2}} = \frac{1}{2\sqrt{x}} $$ # $$ \frac{dy}{dx} = e^{\sqrt{x}} * \frac{1}{2\sqrt{x}} = \frac{e^{\sqrt{x}}}{2\sqrt{x}} $$ # Import the constant `e` from the `mpmath` library for SymPy to calculate the derivative. from mpmath import e diff(e ** sqrt(x)) # Example 3: Find the derivative of $f(x) = \sqrt[4]{1 + 2x + x^3}$ # $$ u = 1 + 2x + x^3, \qquad y = u^\frac{1}{4} $$ # $$ \frac{du}{dx} = 3x^2 + 2, \qquad \frac{dy}{du} = \frac{1}{4} u^{-\frac{3}{4}} = \frac{3}{4\sqrt{u}} $$ # $$ \frac{dy}{dx} = \frac{3x + 2}{4 (1 + 2x + x^3)^\frac{3}{4}} $$ diff((1 + 2 * x + x ** 3) ** (1/4)) # Example 4: Find the derivative of $f(x) = \frac{1}{(t^4 + 1)^3}$ # $$ u = t^4 + 1, \qquad y = \frac{1}{u^3} = u^{-3} $$ # $$ \frac{du}{dt} = 4t^3, \qquad \frac{dy}{du} = u^{-3} = -3u^{-4} = -\frac{3}{u^4} $$ # $$ \frac{dy}{dx} = 4t^3 * -\frac{3}{(t^4 + 1)^4} = -\frac{12t^3}{(t^4 + 1)^4} $$ diff(1 / (x ** 4 + 1) ** 3) # Example 5: Find the derivative of $f(x) = \cos{(a^3 + x^3)}$ # $$ u = a^3 + x^3, \qquad y = \cos{u} $$ # $$ \frac{du}{dx} = 3x^2, \qquad \frac{dy}{du} = -\sin{u} $$ # $$ \frac{dy}{dx} = -3x^2 \sin{a^3 + x^3} $$ a = symbols('a') # define a variable that we'll treat as constant # Because there is more than variable, must specify which we're interested in for SymPy to compute the derivative. diff(cos(a ** 3 + x ** 3), x) # ## References # [<NAME>. (2007). Essential calculus: Early transcendentals. Belmont, CA: Thomson Higher Education.](https://amzn.to/38dnRV0) # # [<NAME>. (2010). Calculus. Wellesley, MA: Wellesley-Cambridge.](https://amzn.to/2vVY0SZ)
content/posts/Chain Rule.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Standard imports import time # Databass imports from databass import * def run_query(db, qstr): plan = parse(qstr) plan = plan.to_plan() print("QUERY PLAN", plan.pretty_print()) return run_plan(db, plan) def run_plan(db, plan): databass_rows = list() plan = Optimizer(db)(plan) # plan = Optimizer(db)(plan) for row in plan: vals = [] for v in row: if isinstance(v, str): vals.append(v) else: vals.append(float(v)) databass_rows.append(vals) return databass_rows def setup_row(): print("=== ROW MODE: SETUP ===\n") mode = Mode.ROW # either Mode.ROW or Mode.COLUMN_ALL or Mode.COLUMN_SELECT print("[setup] db in mode...", mode) db = Database.db(mode) print("[setup] {num} tables...OK".format(num=len(db.tablenames)), db.tablenames) print("[setup] ...OK") return db def setup_col(): print("=== RUNNING IN COL MODE ===\n") mode = Mode.COLUMN_ALL # either Mode.ROW or Mode.COLUMN_ALL or Mode.COLUMN_SELECT print("[setup] db in mode...", mode) db = Database.db(mode) print("[setup] {num} tables...OK".format(num=len(db.tablenames)), db.tablenames) print("[setup] ...OK") return db # - db_col = setup_col() # + simple_test = [ "SELECT * FROM data", "SELECT a, b FROM data", "SELECT data.e FROM data GROUP BY data.e", "SELECT data.a, data4.a FROM data, data4 WHERE data.a = data4.a" ] #p_partkey,p_name,p_mfgr,p_category,p_brand,p_color,p_type,p_size,p_container experiment_one = [ # "SELECT lo_custkey, lo_partkey from lineorder", # "SELECT * FROM part", "SELECT s_name, c_name FROM supplier, customer WHERE supplier.s_city == customer.c_city", # "SELECT lo_custkey, lo_suppkey FROM lineorder, supplier WHERE lineorder.lo_suppkey = supplier.s_suppkey", # "SELECT sum(lo_extendedprice * lo_discount) AS revenue FROM lineorder, date WHERE lo_orderdate = d_datekey AND d_year = 1993 AND lo_discount BETWEEN 1 AND 3 AND lo_quantity < 25" ] experiment_two = [ "SELECT lo_orderkey FROM lineorder LIMIT 20", "SELECT p_category FROM part", "SELECT c_nation FROM customer WHERE c_nation = 'UNITED STATES'" ] def run_exp(db, queries): print("\n=== RUNNING QUERIES ===\n") for qstr in queries: print("[query] ", qstr) start = time.time() output = run_query(db, qstr) print("[query] took %0.5f sec\n" % (time.time()-start)) # print("[output] ", output) print("\n=== DONE WITH ALL QUERIES ===\n") # - run_exp(db_col, experiment_one)
test_col.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="zgLRWEpJ0DvD" colab_type="text" # ### Resnet 18 # + id="U0fHlqekz9i-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="95bf59ec-91b7-4c03-d862-169df0c750e5" # !ls data/hymenoptera_data/ # + id="VT-Gf_CR5ilv" colab_type="code" colab={} import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, transforms import numpy as np import matplotlib.pyplot as plt import time import os import copy import resnet_models # + id="B_H1TUR-5m9Y" colab_type="code" colab={} # Load Data # Data Augemntation and Normalization for Training and Validation data data_transforms = { "train": transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # imagenet_stats ]), "val": transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # imagenet_stats ]) } data_dir = 'data/hymenoptera_data' image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir,x), data_transforms[x]) for x in ['train', 'val']} data_loaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # + id="n1Ew4cF15pau" colab_type="code" colab={} # Visualize a few images def show_images(normalized_image_tensor, title=None): """ Imshow for Tensor """ inp = normalized_image_tensor.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = inp.clip(0,1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # + id="bSE2zT0l5p1g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 175} outputId="12ea7460-ce81-4b33-8fe8-9e46d91c325a" # get a batch of training data inputs, classes = next(iter(data_loaders['train'])) # make grid of inputs out = torchvision.utils.make_grid(inputs) show_images(out, title=[class_names[x] for x in classes]) # + id="Cz3b4Yov52Qu" colab_type="code" colab={} def train_model(model, criterion, optimizer, scheduler, num_epochs=25): start_time = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print(f'Epoch : {epoch+1}/{num_epochs}') print("-" * 2) for phase in ["train", "val"]: if phase == "train": scheduler.step() model.train() # set model to training mode else: model.eval() # set model to evaluate mode running_loss = 0 running_corrects = 0 # Iterate over data for inputs, labels in data_loaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward pass # track history if and only if train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase=="train": loss.backward() optimizer.step() # statistics running_loss += loss.item() running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss /dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print(f'{phase} loss: {epoch_loss:.4f}, acc: {epoch_acc:.4f}') # deep copy the model if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - start_time print(f'training completed in {time_elapsed // 60}m {time_elapsed % 60}') print(f'Best validation accuracy: {best_acc:.4f}') # load best_model weights model.load_state_dict(best_model_wts) return model # + id="w4MWCSVp57ff" colab_type="code" colab={} def visualize_model(model, num_images=6): was_training = model.training model.eval() images_so_far = 0 fig = plt.figure() with torch.no_grad(): for i, (inputs, labels) in enumerate(data_loaders['val']): inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) _, preds = torch.max(outputs, 1) for j in range(inputs.size()[0]): images_so_far += 1 ax = plt.subplot(num_images//2, 2, images_so_far) ax.axis('off') ax.set_title('predicted: {}'.format(class_names[preds[j]])) show_images(inputs.cpu().data[j]) if images_so_far == num_images: model.train(mode=was_training) return model.train(mode=was_training) # + id="qIXCEQQ1059M" colab_type="code" colab={} # Finetuning the convnet # Load a pretrained model and reset final fully connected layer. model_ftn = resnet_models.resnet18(pretrained=True) num_fltrs = model_ftn.fc.in_features model_ftn.fc = nn.Linear(num_fltrs, 2) model_ftn = model_ftn.to(device) criterion = nn.CrossEntropyLoss() optimizer_ftn = optim.SGD(model_ftn.parameters(), lr=1e-3, momentum=0.9) # Decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ftn, step_size=7, gamma=0.1) # + id="yIcFIZ1L6NoX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1909} outputId="b2c22a4a-3084-401c-f9e3-ac6275c3eac5" # Train and evaluate model_ftn = train_model(model_ftn, criterion, optimizer_ftn, exp_lr_scheduler, num_epochs=25) # + id="1dDNh-ND6I8k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 821} outputId="af7b2092-1565-410c-f8d7-693265827d9c" # Visualize the model visualize_model(model_ftn)
resnet/training_resnets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Error Bars # This code shows some information to describe how the last Mean Square Error of the Artificial Neural Networks variates when training several times. # ## Importing the libraries import pybrain from pybrain.structure import RecurrentNetwork, FeedForwardNetwork, LinearLayer, SigmoidLayer, FullConnection from IPython.display import Image from pybrain.structure import connections import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as mpatches import matplotlib.lines as mlines from pybrain.datasets import SupervisedDataSet from pybrain.supervised import BackpropTrainer from pybrain.tools.customxml import NetworkWriter from pybrain.tools.customxml import NetworkReader # %pylab inline --no-import-all # ## Defining the functions # The **Eval_FF**, **Eval_R** and **Eval_R2** functions train a network (Feedforward and Recurrent, respectively) with a *dataset*, in a determinated number of *epochs*, for an a given number of *samples* def mse(predictions, targets): return ((predictions - targets) ** 2).mean() def Eval_FF (net,data,s,t,epochs,samples): a = np.array([]) ## MSE array ##_______________________________________________________________________________________________________ for n in range (0,samples): ##Repeat the training 'samples' times ##____________________________________create the FF network__________________________________________ netF = NetworkReader.readFrom(net) #____________________________________________Training____________________________________________ trainer = BackpropTrainer(netF, data, learningrate = 0.001, momentum = 0.99, verbose=False) errors = np.array([]) for epoch in range(0,epochs): temp = trainer.train() er2=np.array([]) for inp in s: j=netF.activate(inp) er2=np.append(er2,j) er2=np.reshape(er2,[len(s),2]) errors=np.append(errors,mse(er2,t)) a= np.append(a,errors[epochs-1]) return a def Eval_R (net,data,s,t,epochs,samples): a = np.array([]) ##_________________________________________________________________________________________________ for n in range (0,samples): netR = NetworkReader.readFrom(net) #____________________________________________Training____________________________________________ trainer = BackpropTrainer(netR, data, learningrate = 0.001, momentum = 0.99, verbose=False) errors = np.array([]) for epoch in range(0,epochs): netR.reset() temp = trainer.train() er2=np.array([]) for inp in s: j=netF.activate(inp) er2=np.append(er2,j) er2=np.reshape(er2,[len(s),2]) errors=np.append(errors,mse(er2,t)) a= np.append(a,errors[epochs-1]) return a def Eval_R2 (net,data,s,t,epochs,samples): a = np.array([]) ##________________________________________________________________________________________________ for n in range (0,samples): netR = NetworkReader.readFrom(net) netR.maxoffset=2 #____________________________________________Training____________________________________________ trainer = BackpropTrainer(netR, data, learningrate = 0.001, momentum = 0.99, verbose=False) errors = np.array([]) for epoch in range(0,epochs): netR.reset() temp = trainer.train() er2=np.array([]) for inp in s: j=netF.activate(inp) er2=np.append(er2,j) er2=np.reshape(er2,[len(s),2]) errors=np.append(errors,mse(er2,t)) a= np.append(a,errors2[epochs-1]) return a # ## Create the datasets def getcsv( filename ): temp = np.fromfile( filename, sep=';' ) numcols = len( temp ) del( temp ) df = pd.read_csv( filename, sep=';', names = [ x+1 for x in range(numcols)] ) return df train=getcsv('train.csv') val = getcsv( 'val.csv' ) cols = [ \ 'Record(1)', 'Temperature(1)', 'RelHum(1)', \ 'Ventilation(1)', 'Screening(1)', 'Heating(1)', 'Cooling(1)', \ 'LAI(1)', 'OutTemp(1)', 'OutRelHum(1)', 'OutRad(1)', 'OutWindVel(1)', \ 'HourAngle(1)', 'Declination(1)', 'Elevation(1)', 'RadTheor(1)', \ 'Record(0)', 'Temperature(0)', 'RelHum(0)', \ 'Ventilation(0)', 'Screening(0)', 'Heating(0)', 'Cooling(0)', \ 'LAI(0)', 'OutTemp(0)', 'OutRelHum(0)', 'OutRad(0)', 'OutWindVel(0)', \ 'HourAngle(0)', 'Declination(0)', 'Elevation(0)', 'RadTheor(0)', \ 'Record(-1)', 'Temperature(-1)', 'RelHum(-1)', \ 'Ventilation(-1)', 'Screening(-1)', 'Heating(-1)', 'Cooling(-1)', \ 'LAI(-1)', 'OutTemp(-1)', 'OutRelHum(-1)', 'OutRad(-1)', 'OutWindVel(-1)', \ 'HourAngle(-1)', 'Declination(-1)', 'Elevation(-1)', 'RadTheor(-1)', \ 'Record(-2)', 'Temperature(-2)', 'RelHum(-2)', \ 'Ventilation(-2)', 'Screening(-2)', 'Heating(-2)', 'Cooling(-2)', \ 'LAI(-2)', 'OutTemp(-2)', 'OutRelHum(-2)', 'OutRad(-2)', 'OutWindVel(-2)', \ 'HourAngle(-2)', 'Declination(-2)', 'Elevation(-2)', 'RadTheor(-2)', \ 'Record(-3)', 'Temperature(-3)', 'RelHum(-3)', \ 'Ventilation(-3)', 'Screening(-3)', 'Heating(-3)', 'Cooling(-3)', \ 'LAI(-3)', 'OutTemp(-3)', 'OutRelHum(-3)', 'OutRad(-3)', 'OutWindVel(-3)', \ 'HourAngle(-3)', 'Declination(-3)', 'Elevation(-3)', 'RadTheor(-3)', \ ] train.columns = cols val.columns = cols # ## Data normalization # + indexm=(["maximos","minimos"]) mm = pd.DataFrame(columns=train.columns,index=indexm) for n in range(1,train.shape[1]): max1=max(train[train.columns[n]]) max2=max(val[val.columns[n]]) mm[mm.columns[n]]["maximos"]=max(max1,max2) min1=min(train[train.columns[n]]) min2=min(val[val.columns[n]]) mm[mm.columns[n]]["minimos"]=min(min1,min2) # - def norm(x,maxi,mini): Vnorm=(2*((x-mini)/(maxi-mini)))-1 return Vnorm Ntrain = pd.DataFrame(columns=train.columns) for n in range(1,train.shape[1]): ar=train[train.columns[n]] Ntrain[Ntrain.columns[n]]=norm(ar,mm[mm.columns[n]]["maximos"],mm[mm.columns[n]]["minimos"]) # + s=np.array([Ntrain['Temperature(-2)'],Ntrain['RelHum(-2)'],Ntrain['Ventilation(-2)'],Ntrain['Screening(-2)'], \ Ntrain['HourAngle(-2)'],Ntrain['Declination(-2)'],Ntrain['Elevation(-2)'],Ntrain['RadTheor(-2)'], \ Ntrain['Temperature(-1)'],Ntrain['RelHum(-1)'],Ntrain['Ventilation(-1)'],Ntrain['Screening(-1)'], \ Ntrain['HourAngle(-1)'],Ntrain['Declination(-1)'],Ntrain['Elevation(-1)'],Ntrain['RadTheor(-1)'], \ Ntrain['Temperature(0)'],Ntrain['RelHum(0)'],Ntrain['Ventilation(0)'],Ntrain['Screening(0)'], \ Ntrain['HourAngle(0)'],Ntrain['Declination(0)'],Ntrain['Elevation(0)'],Ntrain['RadTheor(0)']]).T t=np.array([Ntrain['Temperature(1)'],Ntrain['RelHum(1)']]).T # + sR=np.array([Ntrain['Temperature(0)'],Ntrain['RelHum(0)'],Ntrain['Ventilation(0)'],Ntrain['Screening(0)'], \ Ntrain['HourAngle(0)'],Ntrain['Declination(0)'],Ntrain['Elevation(0)'],Ntrain['RadTheor(0)']]).T tR=np.array([Ntrain['Temperature(1)'],Ntrain['RelHum(1)']]).T # - #a=len(s) a=50 # + dataF = SupervisedDataSet(24,2) dataR = SupervisedDataSet(8,2) for m in range(0,a): dataF.addSample(s[m],t[m]) dataR.addSample(sR[m],tR[m]) # - # ## Evaluating the Networks # + ## Number of epochs n=50 ## Number of Samples m=10 # - F=Eval_FF ('NET_FF.xml',dataF,s,t,n,m) ##For n epochs, training m times R=Eval_R ('NET_R.xml',dataR,sR,tR,n,m) ##For n epochs, training m times R2=Eval_R2 ('NET_R2.xml',dataR,sR,tR,n,m) ##For n epochs, training m times Errors=([F, R, R2]) # ## Calculating the Mean # The sum of the data points divided by the number data points # $$M=\frac{\sum_{i=0}^n (x_i)}{n}$$ MEAN=np.array([]) for n in range(0,len(Errors)): MEAN=np.append(MEAN,Errors[n].mean()) # ## Calculating the Standar Deviation # Average difference between the data points and their mean: # # $$SD=\sqrt{\sum \frac{(x-M)^2}{n-1}}$$ Sd=np.array([]) for n in range(0,len(Errors)): Sd=np.append(Sd,Errors[n].std()) # + fig, ax = plt.subplots(1, sharex=True, figsize=(10,7)) a=len(Errors) ind = np.arange(a) width = 0.35 # the width of the bars mean = ax.bar(ind+width, MEAN, width, color='green',yerr=Sd, label='Mean Square Error') measurements=ax.plot(ind+0.9*width,Errors,'ro',color='blue', label='Last error') ax.set_xticks(ind+1.5*width) ax.set_xticklabels(('FeedForward', 'Recurrent', 'Recurrent 2')) blue_line = mlines.Line2D([], [], color='blue', marker='|', markersize=8, label='Standar Deviation') blue_point = mlines.Line2D([], [], color='blue', marker='o', markersize=8, label='Last training error') plt.legend(handles=[mean,blue_line,blue_point]) ax.set_ylabel('Mean Square Error') ax.set_title('Descriptive Error Bars for 20 trainings',size='xx-large') plt.show() # - 5
Evaluation2.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Apache Toree - Scala // language: scala // name: apache_toree_scala // --- // <a href="https://cocl.us/Data_Science_with_Scalla_top"><img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/SC0103EN/adds/Data_Science_with_Scalla_notebook_top.png" width = 750, align = "center"></a> // <br/> // <a><img src="https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png" width="200" align="center"></a>" // // # Module 2: Preparing Data - Data Normalization // // ## Data Normalization // // ### Lesson Objectives // // - After completing this lesson, you should be able to: // - Normalize a dataset to have unit p-norm // - Normalize a dataset to have unit standard deviation and zero mean // - Normalize a dataset to have given minimum and maximum values // // // ## Normalizer // // - A Transformer which transforms a dataset of Vector rows, normalizing each Vector to have unit norm // - Takes a parameter P, which specifies the p-norm used for normalization (p=2 by default) // - Standardize input data and improve the behavior of learning algorithms // + attributes={"classes": ["scala"], "id": ""} import org.apache.spark.sql.SparkSession val spark = SparkSession.builder().getOrCreate() import spark.implicits._ // + attributes={"classes": ["scala"], "id": ""} // Continuing from Previous Example import org.apache.spark.ml.feature.VectorAssembler import org.apache.spark.sql.functions._ val dfRandom = spark.range(0, 10).select("id"). withColumn("uniform", rand(10L)). withColumn("normal1", randn(10L)). withColumn("normal2", randn(11L)) val assembler = new VectorAssembler(). setInputCols(Array("uniform","normal1","normal2")). setOutputCol("features") val dfVec = assembler.transform(dfRandom) // Continuing from Previous Example dfVec.select("id","features").show() // + attributes={"classes": ["scala"], "id": ""} // A Simple Normalizer import org.apache.spark.ml.feature.Normalizer val scaler1 = new Normalizer().setInputCol("features").setOutputCol("scaledFeat").setP(1.0) scaler1.transform(dfVec.select("id","features")).show(5) // - // ## Standard Scaler // // - A Model which can be fit on a dataset to produce a `StandardScalerModel` // - A Transformer which transforms a dataset of `Vector` rows, normalizing each feature to have unit standard deviation and/or zero mean // - Takes two parameters: // - `withStd`: scales the data to unit standard deviation (default: true) // - `withMean`: centers the data with mean before scaling (default: false) // - It builds a dense output, sparse inputs will raise an exception // - If the standard deviation of a feature is zero, it returns 0.0 in the Vector for that feature // + attributes={"classes": ["scala"], "id": ""} // A Simple Standard Scaler import org.apache.spark.ml.feature.StandardScaler val scaler2 = new StandardScaler(). setInputCol("features"). setOutputCol("scaledFeat"). setWithStd(true). setWithMean(true) val scaler2Model = scaler2.fit(dfVec.select("id","features")) scaler2Model.transform(dfVec.select("id","features")).show(5) // - // ## MinMax Scaler // // - A Model which can be fit on a dataset to produce a `MinMaxScalerModel` // - A Transformer which transforms a dataset of `Vector` rows, rescaling each feature to a specific range (often `[0,1]`) // - Takes two parameters: // - min: lower bound after transformation, shared by all features (default:0.0) // - max: upper bound after transformation, shared by all features (default: 1.0) // - Since zero values are likely to be transformed to non-zero values, sparse inputs may result in dense outputs // + attributes={"classes": ["scala"], "id": ""} // A Simple MinMax Scaler import org.apache.spark.ml.feature.MinMaxScaler val scaler3 = new MinMaxScaler(). setInputCol("features").setOutputCol("scaledFeat"). setMin(-1.0).setMax(1.0) val scaler3Model = scaler3.fit(dfVec.select("id","features")) scaler3Model.transform(dfVec.select("id","features")).show(5) // - // ## Lesson Summary // // - Having completed this lesson, you should be able to: // - Normalize a dataset to have unit p-norm // - Normalize a dataset to have unit standard deviation and zero mean // - Normalize a dataset to have given minimum and maximum values // // ### About the Authors // // [<NAME>](https://www.linkedin.com/in/vpetro) is Consulting Manager at Lightbend. He holds a Masters degree in Computer Science with specialization in Intelligent Systems. He is passionate about functional programming and applications of AI.
Scala Programming for Data Science/Data Science with Scala/Module 2: Preparing Data/3.2.4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: nams # language: python # name: nams # --- # ##### Let's change gears and talk about Game of thrones or shall I say Network of Thrones. # # It is suprising right? What is the relationship between a fatansy TV show/novel and network science or python(it's not related to a dragon). # # If you haven't heard of Game of Thrones, then you must be really good at hiding. Game of Thrones is the hugely popular television series by HBO based on the (also) hugely popular book series A Song of Ice and Fire by <NAME>. In this notebook, we will analyze the co-occurrence network of the characters in the Game of Thrones books. Here, two characters are considered to co-occur if their names appear in the vicinity of 15 words from one another in the books. # ![](images/got.png) # <NAME>, an associate professor of mathematics at Macalester College, and <NAME>, an undergraduate created a network from the book A Storm of Swords by extracting relationships between characters to find out the most important characters in the book(or GoT). # # The dataset is publicly avaiable for the 5 books at https://github.com/mathbeveridge/asoiaf. This is an interaction network and were created by connecting two characters whenever their names (or nicknames) appeared within 15 words of one another in one of the books. The edge weight corresponds to the number of interactions. # # Credits: # # Blog: https://networkofthrones.wordpress.com # # Math Horizons Article: https://www.maa.org/sites/default/files/pdf/Mathhorizons/NetworkofThrones%20%281%29.pdf # + import pandas as pd import networkx as nx import matplotlib.pyplot as plt import community import numpy as np import warnings warnings.filterwarnings('ignore') # %matplotlib inline # - # ##### Let's load in the datasets book1 = pd.read_csv('datasets/game_of_thrones_network/asoiaf-book1-edges.csv') book2 = pd.read_csv('datasets/game_of_thrones_network/asoiaf-book2-edges.csv') book3 = pd.read_csv('datasets/game_of_thrones_network/asoiaf-book3-edges.csv') book4 = pd.read_csv('datasets/game_of_thrones_network/asoiaf-book4-edges.csv') book5 = pd.read_csv('datasets/game_of_thrones_network/asoiaf-book5-edges.csv') # The resulting DataFrame book1 has 5 columns: Source, Target, Type, weight, and book. Source and target are the two nodes that are linked by an edge. A network can have directed or undirected edges and in this network all the edges are undirected. The weight attribute of every edge tells us the number of interactions that the characters have had over the book, and the book column tells us the book number. # # book1.head() # Once we have the data loaded as a pandas DataFrame, it's time to create a network. We create a graph for each book. It's possible to create one MultiGraph instead of 5 graphs, but it is easier to play with different graphs. G_book1 = nx.Graph() G_book2 = nx.Graph() G_book3 = nx.Graph() G_book4 = nx.Graph() G_book5 = nx.Graph() # Let's populate the graph with edges from the pandas DataFrame. for row in book1.iterrows(): G_book1.add_edge(row[1]['Source'], row[1]['Target'], weight=row[1]['weight'], book=row[1]['book']) for row in book2.iterrows(): G_book2.add_edge(row[1]['Source'], row[1]['Target'], weight=row[1]['weight'], book=row[1]['book']) for row in book3.iterrows(): G_book3.add_edge(row[1]['Source'], row[1]['Target'], weight=row[1]['weight'], book=row[1]['book']) for row in book4.iterrows(): G_book4.add_edge(row[1]['Source'], row[1]['Target'], weight=row[1]['weight'], book=row[1]['book']) for row in book5.iterrows(): G_book5.add_edge(row[1]['Source'], row[1]['Target'], weight=row[1]['weight'], book=row[1]['book']) books = [G_book1, G_book2, G_book3, G_book4, G_book5] # Let's have a look at these edges. list(G_book1.edges(data=True))[16] list(G_book1.edges(data=True))[400] # ### Finding the most important node i.e character in these networks. # # Is it <NAME>, Tyrion, Daenerys, or someone else? Let's see! Network Science offers us many different metrics to measure the importance of a node in a network as we saw in the first part of the tutorial. Note that there is no "correct" way of calculating the most important node in a network, every metric has a different meaning. # # First, let's measure the importance of a node in a network by looking at the number of neighbors it has, that is, the number of nodes it is connected to. For example, an influential account on Twitter, where the follower-followee relationship forms the network, is an account which has a high number of followers. This measure of importance is called degree centrality. # # Using this measure, let's extract the top ten important characters from the first book (book[0]) and the fifth book (book[4]). deg_cen_book1 = nx.degree_centrality(books[0]) deg_cen_book5 = nx.degree_centrality(books[4]) sorted(deg_cen_book1.items(), key=lambda x:x[1], reverse=True)[0:10] sorted(deg_cen_book5.items(), key=lambda x:x[1], reverse=True)[0:10] # Plot a histogram of degree centrality plt.hist(list(nx.degree_centrality(G_book4).values())) plt.show() d = {} for i, j in dict(nx.degree(G_book4)).items(): if j in d: d[j] += 1 else: d[j] = 1 x = np.log2(list((d.keys()))) y = np.log2(list(d.values())) plt.scatter(x, y, alpha=0.9) plt.show() # ### Exercise # # Create a new centrality measure, weighted_degree(Graph, weight) which takes in Graph and the weight attribute and returns a weighted degree dictionary. Weighted degree is calculated by summing the weight of the all edges of a node and find the top five characters according to this measure. def weighted_degree(G, weight): result = dict() for node in G.nodes(): weight_degree = 0 for n in G.edges([node], data=True): weight_degree += n[2]['weight'] result[node] = weight_degree return result plt.hist(list(weighted_degree(G_book1, 'weight').values())) plt.show() sorted(weighted_degree(G_book1, 'weight').items(), key=lambda x:x[1], reverse=True)[0:10] # ### Let's do this for Betweeness centrality and check if this makes any difference # # Haha, evil laugh # + # First check unweighted, just the structure sorted(nx.betweenness_centrality(G_book1).items(), key=lambda x:x[1], reverse=True)[0:10] # + # Let's care about interactions now sorted(nx.betweenness_centrality(G_book1, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10] # - # #### PageRank # The billion dollar algorithm, PageRank works by counting the number and quality of links to a page to determine a rough estimate of how important the website is. The underlying assumption is that more important websites are likely to receive more links from other websites. # by default weight attribute in pagerank is weight, so we use weight=None to find the unweighted results sorted(nx.pagerank_numpy(G_book1, weight=None).items(), key=lambda x:x[1], reverse=True)[0:10] sorted(nx.pagerank_numpy(G_book1, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10] # ### Is there a correlation between these techniques? # # #### Exercise # # Find the correlation between these four techniques. # # - pagerank # - betweenness_centrality # - weighted_degree # - degree centrality cor = pd.DataFrame.from_records([nx.pagerank_numpy(G_book1, weight='weight'), nx.betweenness_centrality(G_book1, weight='weight'), weighted_degree(G_book1, 'weight'), nx.degree_centrality(G_book1)]) # + # cor.T # - cor.T.corr() # ## Evolution of importance of characters over the books # According to degree centrality the most important character in the first book is <NAME> but he is not even in the top 10 of the fifth book. The importance changes over the course of five books, because you know stuff happens ;) # # Let's look at the evolution of degree centrality of a couple of characters like <NAME>, <NAME>, Tyrion which showed up in the top 10 of degree centrality in first book. # # We create a dataframe with character columns and index as books where every entry is the degree centrality of the character in that particular book and plot the evolution of degree centrality Eddard Stark, Jon Snow and Tyrion. # We can see that the importance of Eddard Stark in the network dies off and with <NAME> there is a drop in the fourth book but a sudden rise in the fifth book evol = [nx.degree_centrality(book) for book in books] evol_df = pd.DataFrame.from_records(evol).fillna(0) evol_df[['Eddard-Stark', 'Tyrion-Lannister', 'Jon-Snow']].plot() set_of_char = set() for i in range(5): set_of_char |= set(list(evol_df.T[i].sort_values(ascending=False)[0:5].index)) set_of_char # ##### Exercise # # Plot the evolution of weighted degree centrality of the above mentioned characters over the 5 books, and repeat the same exercise for betweenness centrality. evol_df[list(set_of_char)].plot(figsize=(29,15)) # + evol = [nx.betweenness_centrality(graph, weight='weight') for graph in [G_book1, G_book2, G_book3, G_book4, G_book5]] evol_df = pd.DataFrame.from_records(evol).fillna(0) set_of_char = set() for i in range(5): set_of_char |= set(list(evol_df.T[i].sort_values(ascending=False)[0:5].index)) evol_df[list(set_of_char)].plot(figsize=(19,10)) # - # ### So what's up with <NAME>? nx.draw(nx.barbell_graph(5, 1), with_labels=True) sorted(nx.degree_centrality(G_book5).items(), key=lambda x:x[1], reverse=True)[:5] sorted(nx.betweenness_centrality(G_book5).items(), key=lambda x:x[1], reverse=True)[:5] # #### Community detection in Networks # A network is said to have community structure if the nodes of the network can be easily grouped into (potentially overlapping) sets of nodes such that each set of nodes is densely connected internally. # # We will use louvain community detection algorithm to find the modules in our graph. # + plt.figure(figsize=(15, 15)) partition = community.best_partition(G_book1) size = float(len(set(partition.values()))) pos = nx.kamada_kawai_layout(G_book1) count = 0 colors = ['red', 'blue', 'yellow', 'black', 'brown', 'purple', 'green', 'pink'] for com in set(partition.values()): list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == com] nx.draw_networkx_nodes(G_book1, pos, list_nodes, node_size = 20, node_color = colors[count]) count = count + 1 nx.draw_networkx_edges(G_book1, pos, alpha=0.2) plt.show() # - d = {} for character, par in partition.items(): if par in d: d[par].append(character) else: d[par] = [character] d nx.draw(nx.subgraph(G_book1, d[3])) nx.draw(nx.subgraph(G_book1, d[1])) nx.density(G_book1) nx.density(nx.subgraph(G_book1, d[4])) nx.density(nx.subgraph(G_book1, d[4]))/nx.density(G_book1) # #### Exercise # # Find the most important node in the partitions according to degree centrality of the nodes. # + max_d = {} deg_book1 = nx.degree_centrality(G_book1) for group in d: temp = 0 for character in d[group]: if deg_book1[character] > temp: max_d[group] = character temp = deg_book1[character] # - max_d # ## A bit about power law in networks G_random = nx.erdos_renyi_graph(100, 0.1) nx.draw(G_random) G_ba = nx.barabasi_albert_graph(100, 2) nx.draw(G_ba) # Plot a histogram of degree centrality plt.hist(list(nx.degree_centrality(G_random).values())) plt.show() plt.hist(list(nx.degree_centrality(G_ba).values())) plt.show() G_random = nx.erdos_renyi_graph(2000, 0.2) G_ba = nx.barabasi_albert_graph(2000, 20) d = {} for i, j in dict(nx.degree(G_random)).items(): if j in d: d[j] += 1 else: d[j] = 1 x = np.log2(list((d.keys()))) y = np.log2(list(d.values())) plt.scatter(x, y, alpha=0.9) plt.show() d = {} for i, j in dict(nx.degree(G_ba)).items(): if j in d: d[j] += 1 else: d[j] = 1 x = np.log2(list((d.keys()))) y = np.log2(list(d.values())) plt.scatter(x, y, alpha=0.9) plt.show()
archive/7-game-of-thrones-case-study-instructor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # `pandas` # ### What it `pandas` ? # ### What is a dataframe ? # ![](img/table.jpg) import pandas as pd import matplotlib as plt # %matplotlib inline plt.style.use('ggplot') # # Exercise # # How to find the version of `pandas` installed? # + #Answer here # - # # 1. Frame # # Discuss | whiteboard # # 2. Acquire # 5000+ movie data scraped from IMDB. # Available here: https://www.kaggle.com/deepmatrix/imdb-5000-movie-dataset # # #Read the data imdb = pd.read_csv("imdb_movies.csv") # ### Exercise # Display the first few records of the data # + #Answer here # - # How do we find the column names of this data? # + #Answer here # + #How do we find the column types? # - # # 3. Refine # # Due to time constraint, we will skip this session for today # # 4. Transform # + #Find number of observations and variables in the dataset # - #Find unique values of color pd.unique(imdb['color']) # + #find number of movies for each of those unique values in color imdb['color'].value_counts() # - # ### Brief discussion on `pandas` label and index # #### Three main ways to selecting data from dataframe # # 1. `loc` # 2. `ix` # 3. `iloc` # #### `loc` Example: # `df.loc[row_indexer,column_indexer]` # # 1. One row,one column # 2. One row, many columns # 3. many rows, one column # 4. many rows, many columns # #### `ix` Example: # .ix supports mixed integer and label based access. # #### `iloc` Example: # .iloc is primarily integer position based (from 0 to length-1 of the axis), but may also be used with a boolean array # #### Filtering # + #Select only movies that got released in 2008 # + #Select only those observations that #has running duration greater than 150 # + #Get the observations that are color, #having critics reviews more than 500 and #was released on or after 2010 or on or before 2000 # - # #### group by # + #Find number of movies by the year it got released # + #Find number of movies by the year it got released and its color # - # #### Adding columns imdb.plot(kind="scatter", x="title_year", y="budget") # + #We will do more viz in explore. #But as you can clearly see, there is an outlier. #Assign everything above 0.2+e10 as HIGH BUDGET #and remaining as MODERATE BUDGET # + #Find the maximum value of budget # - #Use where to filter columns # + #create the column now and set it to moderate # - #Subset those above 2000000000 and set it to High # + #Use of applymap # + #Create a function budget_type that takes argument as budget #and return moderate or high # + #Use applymap and run it on imdb data # + #Sort budget and print the lowest 5 values # - # + #Sort budget and print the highest 5 values # - # # 5. Explore # # Visually explore # # 6. Model # # Correlation # + #What is the correlation between title_year and budget # - #Find the Correlation between imdb score #and the number of users who voted # # 7. Insights # # # ### Additional Stuff: Challenging problem # + #Find the most common actor1 and actor2 combination #Brainstorm how to approach this ! # -
2. introduction-to-pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import warnings warnings.filterwarnings('ignore') # %matplotlib inline import numpy as np from sklearn.decomposition import PCA X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2], [4, 3], [4, -1]]) # X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2], [4, 3], [0, 0]]) # X = np.array([[-1, 1], [-2, 2], [-3, 3], [1, 1], [2, 2], [3, 3], [4, 4]]) X import matplotlib.pyplot as plt plt.figure(figsize=(10,10)) plt.scatter(X[:, 0], X[:, 1]) # plt.savefig('original.png') pca = PCA(n_components=2) pca.fit(X) pca.explained_variance_ # sum is 1, first pc has a very high variance, i.e. is very good, second could be deleted pca.explained_variance_ratio_ X_transformed = pca.transform(X) X_transformed plt.figure(figsize=(10,10)) plt.scatter(X_transformed[:, 0], X_transformed[:, 1]) # plt.savefig('reduced.png') # ### Reduction to 1 pca = PCA(n_components=1) pca.fit(X) pca.explained_variance_ # sum is 1, first pc has a very high variance, i.e. is very good, second could be deleted pca.explained_variance_ratio_ X_transformed = pca.transform(X) X_transformed plt.figure(figsize=(10,10)) plt.plot(X_transformed) # plt.savefig('reduced.png')
notebooks/unsupervised/pca-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + hideCode=true hideOutput=true hidePrompt=false import os, sys try: from synapse.lib.jupyter import * except ImportError as e: # Insert the root path of the repository to sys.path. # This assumes the notebook is located three directories away # From the root synapse directory. It may need to be varied synroot = os.path.abspath('../../../') sys.path.insert(0, synroot) from synapse.lib.jupyter import * # + hideCode=true hideOutput=true # For printing command example output import synapse.lib.cell as s_cell import synapse.lib.stormsvc as s_stormsvc class MySvcApi(s_cell.CellApi, s_stormsvc.StormSvc): _storm_svc_name = 'mysvc' _storm_svc_vers = (0, 0, 1) _storm_svc_pkgs = ( { 'name': 'mysvc', 'version': (0, 0, 1), 'commands': ( { 'name': 'mysvc.get', 'descr': 'Example Storm service command.', 'storm': '$lib.print("Hello Storm service.")', }, ), }, ) class MySvc(s_cell.Cell): cellapi = MySvcApi # + hideCode=false hidePrompt=false active="" # .. toctree:: # :titlesonly: # # .. _dev_stormservices: # # Storm Service Development # ######################### # # Anatomy of a Storm Service # ========================== # # A Storm Service (see :ref:`gloss-service`) is a standalone application that extends the capabilities of the Cortex. # One common use case for creating a service is to add a Storm command that will query third-party data, # translate the results into the Synapse datamodel, and then ingest them into the hypergraph. # # In order to leverage core functionalities it is recommended that Storm services are created as Cell implementations, # and the documentation that follows will assume this. For additional information see :ref:`dev_architecture`. # # A Storm service generally implements the following components: # # - A :ref:`gloss-package` that contains the new :ref:`stormservice-cmd` and optional new :ref:`stormservice-mod`. # # - A subclass of ``synapse.lib.CellApi`` which uses the ``synapse.lib.StormSvc`` mixin and contains the following information: # # - The service name, version, packages, and events as defined in ``synapse.lib.StormSvc``. # - Custom methods which will be accessible as Telepath API endpoints, and therefore available for use within defined Storm commands. # # - A subclass of ``synapse.lib.Cell`` which includes additional configuration definitions and methods required to implement the service. # # When implemented as a Cell, methods can also optionally have custom permissions applied to them. # If a specific rule is added it should be namespaced with the service name, e.g. ``svcname.rule1``. # Alternatively, a method can wrapped with ``@s_cell.adminapi()`` to only allow admin access. # # For additional details see :ref:`stormservice-example`. # # Connecting a service # -------------------- # # For instructions on configuring and starting a Cell service see :ref:`devops-cell-config`. # # Before connecting a service to a Cortex it is a best practice to add a new service user, # which can be accomplished with ``synapse.tools.cellauth``. For example:: # # python -m synapse.tools.cellauth tcp://root:<root_passwd>@<svc_ip>:<svc_port> modify svcuser1 --adduser # python -m synapse.tools.cellauth tcp://root:<root_passwd>@<svc_ip>:<svc_port> modify svcuser1 --passwd secret # # If the service requires specific permissions for a new user they can also be added:: # # python -m synapse.tools.cellauth tcp://root:<root_passwd>@<svc_ip>:<svc_port> modify svcuser1 --addrule svcname.rule1 # # Permissions to access the service can be granted by adding the ``service.get.<svc_iden>`` rule to the appropriate users / roles in the Cortex. # # A Storm command can be run on the Cortex to add the new service, and the new service will now be present in the service list and Storm ``help``. # # .. highlight:: none # # # + hideCode=true hideOutput=true cmdr, svcprox = await getTempCoreCmdrStormsvc('mysvc', MySvc.anit, svcconf=None, outp=None) svcs = await cmdr.core.callStorm('return($lib.service.list())') assert len(svcs) == 1 svcurl = svcs[0]['url'] svciden = svcs[0]['iden'] await cmdr.storm(f'$lib.service.del({svciden})') svcs = await cmdr.core.callStorm('return($lib.service.list())') assert len(svcs) == 0 # + [markdown] hideCode=false # Services are added to a Cortex with the ``service.add`` command. # + hideCode=true await cmdr.runCmdLine(f'storm service.add mysvc {svcurl}') # - # Services that have been connected to the Cortex can be listed with the ``service.list`` command. # + hideCode=true await cmdr.runCmdLine(f'storm service.list') # + hideCode=false hidePrompt=false active="" # .. _stormservice-cmd: # # Storm Service Commands # ====================== # # Implementation # -------------- # # Multiple Storm commands can be added to a Storm service package, with each defining the following attributes: # # - ``name``: Name of the Storm command to expose in the Cortex. # - ``descr``: Description of the command which will be available in ``help`` displays. # - ``cmdargs``: An optional list of arguments for the command. # - ``cmdconf``: An optional dictionary of additional configuration variables to provide to the command Storm execution. # - ``forms``: List of input and output forms for the command. # - ``storm``: The Storm code, as a string, that will be executed when the command is called. # # Typically, the Storm code will start by getting a reference to the service via ``$svc = $lib.service.get($cmdconf.svciden)`` # and reading in any defined ``cmdargs`` that are available in ``$cmdopts``. The methods defined in the service's Cell API # can then be called by, for example, ``$retn = $svc.mysvcmethod($cmdopts.query)``. # # Input/Output Conventions # ------------------------ # # Most commands that enrich or add additional context to nodes should simply yield the nodes they were given as inputs. # If they don’t know how to enrich or add additional context to a given form, nodes of that form should be yielded rather than producing an error. # This allows a series of enrichment commands to be pipelined regardless of the different inputs that a given command knows how to operate on. # # Argument Conventions # -------------------- # # ``--verbose`` # ~~~~~~~~~~~~~ # # In general, Storm commands should operate silently over their input nodes and should especially avoid printing anything "per node". # However, when an error occurs, the command may use ``$lib.warn()`` to print a warning message per-node. # Commands should implement a ``--verbose`` command line option to enable printing "per node" informational output. # # ``--debug`` # ~~~~~~~~~~~ # # For commands where additional messaging would assist in debugging a ``--debug`` command line option should be implemented. # For example, a Storm command that is querying a third-party data source could use ``$lib.print()`` to print the raw query string # and raw response when the ``--debug`` option is specified. # # ``--yield`` # ~~~~~~~~~~~ # # For commands that create additional nodes, it may be beneficial to add a ``--yield`` option to allow a query to operate on the newly created nodes. # Some guidelines for ``--yield`` options: # # - The command should *not* yield the input node(s) when a ``--yield`` is specified # - The ``--yield`` option should *not* be implemented when pivoting from the input node to reach the newly created node is a “refs out” or 1-to-1 direct pivot. For example, there is no need to have a ``--yield`` option on the ``maxmind`` command even though it may create an ``inet:asn`` node for an input ``inet:ipv4`` node due to the 1-to-1 pivot ``-> inet:asn`` being possible. # - The ``--yield`` option should ideally determine a “primary” node form to yield even when the command may create many forms in order to tag them or update .seen times. # # .. _stormservice-mod: # # Storm Service Modules # ===================== # # Modules can be added to a Storm service package to expose reusable Storm functions. # Each module defines a ``name``, which is used for importing elsewhere via ``$lib.import()``, # and a ``storm`` string. The Storm code in this case contains callable functions with the format:: # # function myfunc(var1, var2) { # // function Storm code # } # # .. _stormservice-example: # # Minimal Storm Service Example # ============================= # # A best practice is to separate the Storm and service code into separate files, and nest within a ``synmods`` directory to avoid Python namespace conflicts:: # # service-example # ├── synmods # │   └── example # │   ├── __init__.py # │   ├── service.py # │   ├── storm.py # │   └── version.py # # The Storm package and the service should also maintain consistent versioning. # # For convenience, the example below shows the Storm code included in the ``service.py`` file. # # ``service.py`` # -------------- # .. highlight:: python3 # # # + import sys import asyncio import synapse.lib.cell as s_cell import synapse.lib.stormsvc as s_stormsvc # The Storm definitions below are included here for convenience # but are typically contained in a separate storm.py file and imported to service.py. # Other Storm commands could be created to call the additional Telepath endpoints. svc_name = 'example' svc_guid = '0ecc1eb65659a0f07141bc1a360abda3' # can be generated with synapse.common.guid() svc_vers = (0, 0, 1) svc_evts = { 'add': { 'storm': f'[(meta:source={svc_guid} :name="Example data")]' } } svc_mod_ingest_storm = ''' function ingest_ips(data, srcguid) { $results = $lib.set() for $ip in $data { [ inet:ipv4=$ip ] // Lightweight edge back to meta:source { [ <(seen)+ { meta:source=$srcguid } ] } { +inet:ipv4 $results.add($node) } } | spin | return($results) } ''' # The first line of this description will display in the Storm help svc_cmd_get_desc = ''' Query the Example service. Examples: # Query the service and create an IPv4 node inet:fqdn=good.com | example.get # Query the service and yield the created inet:ipv4 node inet:fqdn=good.com | example.get --yield ''' svc_cmd_get_forms = { 'input': [ 'inet:fqdn', ], 'output': [ 'inet:ipv4', ], } svc_cmd_get_args = ( ('--yield', {'default': False, 'action': 'store_true', 'help': 'Whether to yield the created nodes to the output stream.'}), ('--debug', {'default': False, 'action': 'store_true', 'help': 'Enable debug output.'}), ) svc_cmd_get_conf = { 'srcguid': svc_guid, } svc_cmd_get_storm = ''' init { $svc = $lib.service.get($cmdconf.svciden) $ingest = $lib.import(example.ingest) $srcguid = $cmdconf.srcguid $debug = $cmdopts.debug $yield = $cmdopts.yield } // $node is a special variable that references the inbound Node object $form = $node.form() switch $form { "inet:fqdn": { $query=$node.repr() } *: { $query="" $lib.warn("Example service does not support {form} nodes", form=$form) } } // Yield behavior to drop the inbound node if $yield { spin } // Call the service endpoint and ingest the results if $query { if $debug { $lib.print("example.get query: {query}", query=$query) } $retn = $svc.getData($query) if $retn.status { $results = $ingest.ingest_ips($retn.data, $srcguid) if $yield { for $result in $results { $lib.print($result) yield $result } } } else { $lib.warn("example.get error: {err}", err=$retn.mesg) } } ''' svc_cmds = ( { 'name': f'{svc_name}.get', 'descr': svc_cmd_get_desc, 'cmdargs': svc_cmd_get_args, 'cmdconf': svc_cmd_get_conf, 'forms': svc_cmd_get_forms, 'storm': svc_cmd_get_storm, }, ) svc_pkgs = ( { 'name': svc_name, 'version': svc_vers, 'modules': ( { 'name': f'{svc_name}.ingest', 'storm': svc_mod_ingest_storm, }, ), 'commands': svc_cmds, }, ) class ExampleApi(s_cell.CellApi, s_stormsvc.StormSvc): ''' A Telepath API for the Example service. ''' # These defaults must be overridden from the StormSvc mixin _storm_svc_name = svc_name _storm_svc_vers = svc_vers _storm_svc_evts = svc_evts _storm_svc_pkgs = svc_pkgs async def getData(self, query): return await self.cell.getData(query) async def getInfo(self): await self._reqUserAllowed(('example', 'info')) return await self.cell.getInfo() @s_cell.adminapi() async def getAdminInfo(self): return await self.cell.getAdminInfo() class Example(s_cell.Cell): cellapi = ExampleApi confdefs = { 'api_key': { 'type': 'string', 'description': 'API key for accessing an external service.', }, 'api_url': { 'type': 'string', 'description': 'The URL for an external service.', 'default': 'https://example.com', }, } async def __anit__(self, dirn, conf): await s_cell.Cell.__anit__(self, dirn, conf=conf) self.apikey = self.conf.get('api_key') self.apiurl = self.conf.get('api_url') async def getData(self, query): # Best practice is to also return a status and optional message in case of an error retn = { 'status': True, 'data': None, 'mesg': None, } # Retrieving and parsing data would go here if query == 'good.com': data = ['1.2.3.4', '5.6.7.8'] retn['data'] = data else: retn['status'] = False retn['mesg'] = 'An error occurred during data retrieval.' return retn async def getInfo(self): info = { 'generic': 'info', } return info async def getAdminInfo(self): info = { 'admin': 'info', } return info # + hideCode=true hideOutput=true cmdr, svcprox = await getTempCoreCmdrStormsvc('example', Example.anit, svcconf=None, outp=None) msgs = await cmdr.storm('service.list') assert(any(['True (example)' in str(msg) for msg in msgs])) nodes = await cmdr.eval(f'meta:source={svc_guid}') assert(len(nodes) == 1) await cmdr.eval('[inet:fqdn=good.com inet:fqdn=bad.com]') nodes = await cmdr.eval(f'inet:fqdn=good.com | example.get --yield') assert(len(nodes) == 2) assert(all(node[0][0] == 'inet:ipv4' for node in nodes)) nodes = await cmdr.eval(f'inet:fqdn=good.com | example.get') assert(len(nodes) == 1) assert(nodes[0][0][0] == 'inet:fqdn') msgs = await cmdr.storm('inet:fqdn=bad.com | example.get') assert(any(['error occurred' in str(msg) for msg in msgs])) info = await svcprox.getInfo() assert(info == {'generic': 'info'}) info = await svcprox.getAdminInfo() assert(info == {'admin': 'info'}) _ = await cmdr.fini() _ = await svcprox.fini()
docs/synapse/devguides/stormservices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Computer Vision Nanodegree # # ## Project: Image Captioning # # --- # # The Microsoft **C**ommon **O**bjects in **CO**ntext (MS COCO) dataset is a large-scale dataset for scene understanding. The dataset is commonly used to train and benchmark object detection, segmentation, and captioning algorithms. # # ![Sample Dog Output](images/coco-examples.jpg) # # You can read more about the dataset on the [website](http://cocodataset.org/#home) or in the [research paper](https://arxiv.org/pdf/1405.0312.pdf). # # In this notebook, you will explore this dataset, in preparation for the project. # # ## Step 1: Initialize the COCO API # # We begin by initializing the [COCO API](https://github.com/cocodataset/cocoapi) that you will use to obtain the data. # + import os import sys sys.path.append('/opt/cocoapi/PythonAPI') from pycocotools.coco import COCO # initialize COCO API for instance annotations dataDir = '/opt/cocoapi' dataType = 'val2014' instances_annFile = os.path.join(dataDir, 'annotations/instances_{}.json'.format(dataType)) coco = COCO(instances_annFile) # initialize COCO API for caption annotations captions_annFile = os.path.join(dataDir, 'annotations/captions_{}.json'.format(dataType)) coco_caps = COCO(captions_annFile) # get image ids ids = list(coco.anns.keys()) # - # ## Step 2: Plot a Sample Image # # Next, we plot a random image from the dataset, along with its five corresponding captions. Each time you run the code cell below, a different image is selected. # # In the project, you will use this dataset to train your own model to generate captions from images! # + import numpy as np import skimage.io as io import matplotlib.pyplot as plt # %matplotlib inline # pick a random image and obtain the corresponding URL ann_id = np.random.choice(ids) img_id = coco.anns[ann_id]['image_id'] img = coco.loadImgs(img_id)[0] url = img['coco_url'] # print URL and visualize corresponding image print(url) I = io.imread(url) plt.axis('off') plt.imshow(I) plt.show() # load and display captions annIds = coco_caps.getAnnIds(imgIds=img['id']); anns = coco_caps.loadAnns(annIds) coco_caps.showAnns(anns) # - # ## Step 3: What's to Come! # # In this project, you will use the dataset of image-caption pairs to train a CNN-RNN model to automatically generate images from captions. You'll learn more about how to design the architecture in the next notebook in the sequence (**1_Preliminaries.ipynb**). # # ![Image Captioning CNN-RNN model](images/encoder-decoder.png)
image_captioning/0_Dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Part 2: Handling the Cold Start Problem with Content-Based Filtering # # Collaborative filtering relies solely on user-item interactions within the utility matrix. The issue with this approach is that brand new users or items with no interactions get excluded from the recommendation system. This is called the "cold start" problem. Content-based filtering is a way to handle this problem by generating recommendations based on user and item features. # # In this tutorial, we will generate item-item recommendations using content-based filtering. # ### Step 1: Import Dependencies # # We will be using the following Python packages: # # - [numpy](https://numpy.org/): for scientific computing # - [pandas](https://pandas.pydata.org/): for data manipulation # - [scikit-learn](https://scikit-learn.org/stable/): for machine learning # - [matplotlib](https://matplotlib.org/), [seaborn](https://seaborn.pydata.org/): for data visualization # + import numpy as np import pandas as pd import sklearn import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.simplefilter(action='ignore', category=FutureWarning) # - # ### Step 2: Load the Data # # Let's assume we're building a recommender system for a new movie platform and we know very little about our current users. We don't have enough interactios data in our system to rely on collaborative filtering. # # What we do have is movie features: which genres a movie belongs to, and which year it was released. movies = pd.read_csv("https://s3-us-west-2.amazonaws.com/recommender-tutorial/movies.csv") movies.head() # ### Step 3: Data Cleaning and Exploration # # Before exploring our movie features dataset, we need to clean in two ways: # # 1. Genres is expressed as a string with a pipe | separating each genre. We will manipulate this string into a list, which will make it much easier to analyze. # 2. Title currently has (year) appended at the end. We will extract year from each title string and create a new column for it. # #### Converting Genres from String Format to List # # The genres column is currently a string separated with pipes. Let's convert this into a list using the "split" function. # # We want # `"Adventure|Children|Fantasy"` # to convert to this: # `[Adventure, Children, Fantasy]`. movies['genres'] = movies['genres'].apply(lambda x: x.split("|")) movies.head() # #### How many movie genres are there? # # We can use Python's Counter to create a dictionary containing frequency counts of each genre in our dataset. # + from collections import Counter genres_counts = Counter(g for genres in movies['genres'] for g in genres) print(f"There are {len(genres_counts)} genre labels.") genres_counts # - # There are 20 genre labels and 19 genres that are used to describe movies in this dataset. Some movies don't have any genres, hence the label `(no genres listed)`. # # Let's remove all movies having `(no genres listed)` as its genre label. We'll also remove this from our `genre_counts` dictionary. # + movies = movies[movies['genres']!='(no genres listed)'] del genres_counts['(no genres listed)'] # - # #### What are the most popular genres? # # We can use `Counter`'s [most_common()](https://docs.python.org/2/library/collections.html#collections.Counter.most_common) method to get the genres with the highest movie counts. print("The 5 most common genres: \n", genres_counts.most_common(5)) # The top 5 genres are: `Drama`, `Comedy`, `Thriller`, `Action` and `Romance`. # # Let's also visualize genres popularity with a barplot. # + genres_counts_df = pd.DataFrame([genres_counts]).T.reset_index() genres_counts_df.columns = ['genres', 'count'] genres_counts_df = genres_counts_df.sort_values(by='count', ascending=False) plt.figure(figsize=(10,5)) sns.barplot(x='genres', y='count', data=genres_counts_df, palette='viridis') plt.xticks(rotation=90) plt.show() # - # The plot above shows that `Drama` and `Comedy` are the two most popular movie genres. The least popular movie genres are `Westerns`, `IMAX`, and `Film-Noir`. # #### Parsing out year from movie title # # In our dataset, movie titles currently the year of release appended to it in brackets, e.g., `"Toy Story (1995)"`. We want to use the year of a movie's release as a feature, so let's parse it out from the title string and create a new `year` column for it. # # We can start with writing a function that parses out year from the title string. In the code below, `extract_year_from_title()` takes in the title and does the following: # # - generates a list by splitting out each word by spaces (e.g., `["Toy", "Story", "(1995)"]`) # - gets the last element of the list (e.g., `"(1995)"`) # - if the last element has brackets surrounding it, these `()` brackets get stripped (e.g., `"1995"`) # - converts the year into an integer # + import re def extract_year_from_title(title): t = title.split(' ') year = None if re.search(r'\(\d+\)', t[-1]): year = t[-1].strip('()') year = int(year) return year # - # We can test out this function with our example of `"Toy Story (1995)"`: title = "Toy Story (1995)" year = extract_year_from_title(title) print(f"Year of release: {year}") print(type(year)) # Our function `extract_year_from_title()` works! It's able to successfully parse out year from the title string as shown above. We can now apply this to all titles in our `movies` dataframe using Pandas' [apply()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.apply.html) method. movies['year'] = movies['title'].apply(extract_year_from_title) movies.head() # #### How many different years of release are covered in our dataset? movies['year'].nunique() # There are over 100 years of release in our dataset. Let's collapse this down into decades to get a general sense of when movies were released in our dataset. # #### What was the most popular decade of movie release? # # Before we begin, we'll remove all movies with null year. print(f"Original number of movies: {movies['movieId'].nunique()}") movies = movies[~movies['year'].isnull()] print(f"Number of movies after removing null years: {movies['movieId'].nunique()}") # We filtered out 24 movies that don't have a year of release. # # Now, there are two ways to get the decade of a year: # # 1. converting year to string, replacing the fourth (last) number with a 0 # 2. rounding year down to the nearest 10 # # We'll show both implementations in the code below: # + x = 1995 def get_decade(year): year = str(year) decade_prefix = year[0:3] # get first 3 digits of year decade = f'{decade_prefix}0' # append 0 at the end return int(decade) get_decade(x) # + def round_down(year): return year - (year%10) round_down(x) # - # The two functions `get_decade()` and `round_down()` both accomplish the same thing: they both get the decade of a year. # # We can apply either of these functions to all years in our `movies` dataset. We'll use `round_down()` in this example to a create a new column called `'decade'`: movies['decade'] = movies['year'].apply(round_down) plt.figure(figsize=(10,6)) sns.countplot(movies['decade'], palette='Blues') plt.xticks(rotation=90) # As we can see from the plot above, the most common decade is the 2000s followed by the 1990s for movies in our dataset. # ### Step 4: Transforming the Data # # In order to build a content-based filtering recommender, we need to set up our dataset so that rows represent movies and columns represent features (i.e., genres and decades). # # First, we need to manipulate the `genres` column so that each genre is represented as a separate binary feature. "1" indicates that the movie falls under a given genre, while "0" does not. # + genres = list(genres_counts.keys()) for g in genres: movies[g] = movies['genres'].transform(lambda x: int(g in x)) # - # Let's take a look at what the movie genres columns look like: movies[genres].head() # Great! Our genres columns are represented as binary feautres. The next step is to wrangle our `decade` column so that each decade has its own column. We can do this using pandas' [get_dummies()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.get_dummies.html) function, which works by creating a categorical variable into binary variables. movie_decades = pd.get_dummies(movies['decade']) movie_decades.head() # Now, let's create a new `movie_features` dataframe by combining our genres features and decade features. We can do this using pandas' [concat](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html) function which concatenates (appends) genres and decades into a single dataframe. movie_features = pd.concat([movies[genres], movie_decades], axis=1) movie_features.head() # Our `movie_features` dataframe is ready. The next step is to start building our recommender. # ### Step 5: Building a "Similar Movies" Recommender Using Cosine Similarity # # We're going to build our item-item recommender using a similarity metric called [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity). # # Cosine similarity looks at the cosine angle between two vectors (e.g., $A$ and $B$). The smaller the cosine angle, the higher the degree of similarity between $A$ and $B$. You can calculate the similarity between $A$ and $B$ with this equation: # # $$\cos(\theta) = \frac{A\cdot B}{||A|| ||B||}$$ # # In this tutorial, we're going to use scikit-learn's cosine similarity [function](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.cosine_similarity.html) to generate a cosine similarity matrix of shape $(n_{\text{movies}}, n_{\text{movies}})$. With this cosine similarity matrix, we'll be able to extract movies that are most similar to the movie of interest. # + from sklearn.metrics.pairwise import cosine_similarity cosine_sim = cosine_similarity(movie_features, movie_features) print(f"Dimensions of our movie features cosine similarity matrix: {cosine_sim.shape}") # - # As expected, after passing the `movie_features` dataframe into the `cosine_similarity()` function, we get a cosine similarity matrix of shape $(n_{\text{movies}}, n_{\text{movies}})$. # # This matrix is populated with values between 0 and 1 which represent the degree of similarity between movies along the x and y axes. # ### Let's create a movie finder function # # Let's say we want to get recommendations for movies that are similar to Jumanji. To get results from our recommender, we need to know the exact title of a movie in our dataset. # # In our dataset, Jumanji is actually listed as `'Jumanji (1995)'`. If we misspell Jumanji or forget to include its year of release, our recommender won't be able to identify which movie we're interested in. # # To make our recommender more user-friendly, we can use a Python package called [fuzzywuzzy](https://pypi.org/project/fuzzywuzzy/) which will find the most similar title to a string that you pass in. Let's create a function called `movie_finder()` which take advantage of `fuzzywuzzy`'s string matching algorithm to get the most similar title to a user-inputted string. # + from fuzzywuzzy import process def movie_finder(title): all_titles = movies['title'].tolist() closest_match = process.extractOne(title,all_titles) return closest_match[0] # - # Let's test this out with our Jumanji example. title = movie_finder('juminji') title # To get relevant recommendations for Jumanji, we need to find its index in the cosine simialrity matrix. To identify which row we should be looking at, we can create a movie index mapper which maps a movie title to the index that it represents in our matrix. # # Let's create a movie index dictionary called `movie_idx` where the keys are movie titles and values are movie indices: movie_idx = dict(zip(movies['title'], list(movies.index))) idx = movie_idx[title] idx # Using this handy `movie_idx` dictionary, we know that Jumanji is represented by index 1 in our matrix. Let's get the top 10 most similar movies to Jumanji. n_recommendations=10 sim_scores = list(enumerate(cosine_sim[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:(n_recommendations+1)] similar_movies = [i[0] for i in sim_scores] # `similar_movies` is an array of indices that represents Jumanji's top 10 recommendations. We can get the corresponding movie titles by either creating an inverse `movie_idx` mapper or using `iloc` on the title column of the `movies` dataframe. print(f"Because you watched {title}:") movies['title'].iloc[similar_movies] # Cool! These recommendations seem pretty relevant and similar to Jumanji. The first 5 movies are family-friendly films from the 90s. # # We can test our recommender further with other movie titles. For your convenience, I've packaged the steps into a single function which takes in the movie title of interest and number of recommendations. def get_content_based_recommendations(title_string, n_recommendations=10): title = movie_finder(title_string) idx = movie_idx[title] sim_scores = list(enumerate(cosine_sim[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:(n_recommendations+1)] similar_movies = [i[0] for i in sim_scores] print(f"Recommendations for {title}:") print(movies['title'].iloc[similar_movies]) get_content_based_recommendations('aladin', 5)
part-2-cold-start-problem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np def read_iq(file, dtype=np.dtype('<f4'), n=-1, header_bytes=0): """ :param file: string; representing full path to IQ file to be ingested; must be interleaved IQ :param dtype: numpy.dtype; describes how the bytes in the fixed-size block of memory corresponding to an array item should be interpreted (see https://numpy.org/doc/stable/reference/arrays.dtypes.html#arrays-dtypes-constructing) :param n: integer; number of items to read :param header_bytes: integer; number of bytes to skip at the beginning of the file (default 0) :return: numpy array of complex numbers representing IQ data """ with open(file, 'rb') as data_file: data = np.fromfile(data_file, dtype=dtype, count=n, offset=header_bytes) return data.astype(np.float64).view(np.complex128) filepath = '/path/to/iq_file' data = read_iq(filepath, dtype=np.dtype('<f4'), n=1000 * 1024)
IQ_ingest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import climtas import climtas.daskutil import dask a = dask.array.random.random((1000,1000), chunks=(500,500)) a climtas.daskutil.visualize_block((a + 2).mean(axis=1)) import pandas t = pandas.date_range('2001', '2010', closed='left') t_chunks = pandas.Series(0, index=t).resample('M').count().values dask.array.concatenate([dask.array.zeros((c, 100, 100), chunks=(-1, 50, 50)) for c in t_chunks])
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #python3 wann_test.py -p p/biped_var.json -i log/multi_var_best.out --nReps 1 --view True --nVals 8 # !rm -r log/multi_var* # !python3 wann_train.py -n 1 -p p/biped_var_multi.json -o multi_var #.container { width:100% !important; } #
fdm/fdm_code/Multi Variance in Bipedal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # WBAI Aphasia (Pure Alexia) Handson 00 # <!-- green '#007879' --> # # <br> # <div align='center'> # <font size='+2' color='#0070FF' align='right'>17/Sep/2018</font><br><br> # <!--<font size='+2' color='#0070FF' align='center'><strong>浅川 伸一</strong> &lt;<EMAIL>&gt;</font>--> # <font size='+2' color='#0070FF' align='center'><strong><a href="http://www.cis.twcu.ac.jp/~asakawa/">浅川 伸一</a> &lt;<EMAIL>&gt;</strong></font> # <br><br> # </div> # <br> # <img src='https://wba-initiative.org/wp-content/uploads/2015/05/logo.png' width='29%' align='cener'> # <br> # -*-: coding utf-8 -*- import sys import numpy as np import codecs from sklearn.neural_network import MLPClassifier import matplotlib.pyplot as plt # %matplotlib inline # + ### preparation to draw graphs plt.rcParams['figure.figsize'] = (12, 8) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' np.set_printoptions(precision=3, suppress=True) # - import wbai_aphasia as handson from wbai_aphasia import tanh, sigmoid, relu from wbai_aphasia import xavier_initializer # Here the enviroment that I am computing. # !date; printf '\n' # !uname -a; printf '\n' # !type python; printf '\n' # !python --version; printf '\n' # !gcc --version; printf '\n' # !conda --version; printf '\n' original_file='../data/PMSP96.orig' with codecs.open(original_file,'r') as f: lines = f.readlines() # --- # <img src='./assets/pmsp96Fig1.png' align='center' width='39%'> # <div align='center'>Plaut et. al. (1996) Fig. 1</div> # <p> # # - <NAME>., <NAME>., <NAME>., & <NAME>. (1996). Understanding normal and impaired word reading: Computational principles in quasi-regular domains. _Psychological Review_, 103, 56-115. # # <font color='green'>We will refer to the above as PMSP96 henceforth.</font> # # --- # # --- # <img src="./assets/pmsp96Tab2.png" width="74%" align="center"> # <div align='center'>PMSP96 Table 2</div> # # --- filename = '../data/PMSP96.orig' x = codecs.open(filename,'r','utf-8').readlines() x[:4] # + inp, inpStr, out, outStr, freq = list(), list(), list(), list(), list() wrd_class = {} # dict for i, line in enumerate(lines): x = lines[i].strip().split() if i % 3 == 0: inpStr.append(x[2]) outStr.append(x[3]) freq.append(x[4]) #if x[5] in wrd_class: # wrd_class[x[5]] += 1 #else: # wrd_class[x[5]] = 1 wrd_class[x[5]] = wrd_class[x[5]] + 1 if x[5] in wrd_class else 1 elif i % 3 == 1: inp.append(np.array(x,dtype=np.int32)) else: out.append(np.array(x,dtype=np.int32)) X = np.array(inp) y = np.array(out) # - #n = 0 #for k in wrd_class: # n += wrd_class[k] n = 0 for k in sorted(wrd_class): n += wrd_class[k] if k is not '#' else 0 print('{0:7s} {1:03d}'.format(k, wrd_class[k])) print('---\nTotal: {} except for #'.format(n)) # --- # <img src="./assets/pmsp96A1.png" width="74%" align="center"> # <div align='center'>PMS96 Appendix A</div> # # --- # --- # # Today task that we must tackle with is below # # <img src='./assets/pmsp96Tab7.png' align='center' width='74%'> # <div align='center'>PMSP96 Table 7</div> # # --- # <img src='./assets/1979GlushkoA1.jpg' align='center' width='74%'> # <div align='center'>Glushko (1979) Appendix Table 1</div> # # - <NAME>. (1979). The organization and activation of orthographic knowledge in reading aloud. _Journal of Experimental Psyhology: Human Perception and Performance_, 5, 674-691. # # --- nKfold = 5 random_state = 2 perms = np.random.RandomState(random_state).permutation(len(X)) % nKfold # + hidden_layers = (128, 128) params = [{'hidden_layer_sizes': hidden_layers, 'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0.5, 'nesterovs_momentum': False, 'learning_rate_init': 0.1, 'activation': 'relu'}, {'hidden_layer_sizes': hidden_layers, 'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0.5, 'nesterovs_momentum': False, 'learning_rate_init': 0.1, 'activation': 'tanh'}, {'hidden_layer_sizes': hidden_layers, 'solver': 'adam', 'learning_rate_init': 0.01, 'activation': 'relu'}, {'hidden_layer_sizes': hidden_layers, 'solver': 'adam', 'learning_rate_init': 0.01, 'activation': 'tanh'} ] labels = [ 'SGD, relu', 'SGD, tanh', 'Adam, relu', 'Adam, tanh'] plot_args = [{'c': 'red', 'linestyle': '-', 'label': 'SGD, relu', 'linewidth': 1}, {'c': 'green', 'linestyle': '--', 'label': 'SGD, tanh', 'linewidth': 3}, {'c': 'blue', 'linestyle': '-', 'label': 'Adam, relu', 'linewidth': 1}, {'c': 'black', 'linestyle': '--', 'label' : 'Adam, tanh', 'linewidth': 3}] # - mlps = [] for label, param, plot_arg in zip(labels, params, plot_args): print('-' * 16) print('training: {}'.format(label)) print('-' * 16) mlp = MLPClassifier(max_iter=200, alpha=1e-4, verbose=False, tol=1e-4, random_state=0, early_stopping=False, **param) for i in range(nKfold): X_train, y_train = X[perms != i], y[perms != i] X_test, y_test = X[perms == i], y[perms == i] mlp.fit(X_train, y_train) plt.plot(mlp.loss_curve_, **plot_arg) print("Training accuracy: {:.3f}".format(mlp.score(X_train, y_train)), end=' ') print("Test accuracy: {:.3f}".format(mlp.score(X_test,y_test))) plt.legend(loc='upper right')
examples/2018wbai_aphasia_handson_baseline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="DweYe9FcbMK_" # ##### Copyright 2018 The TensorFlow Authors. # # # + cellView="form" colab={} colab_type="code" id="AVV2e0XKbJeX" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="sUtoed20cRJJ" # # tf.data を使ったテキストの読み込み # + [markdown] colab_type="text" id="1ap_W4aQcgNT" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/tutorials/load_data/text"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tutorials/load_data/text.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tutorials/load_data/text.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="RSywPQ2n736s" # Note: これらのドキュメントは私たちTensorFlowコミュニティが翻訳したものです。コミュニティによる 翻訳は**ベストエフォート**であるため、この翻訳が正確であることや[英語の公式ドキュメント](https://www.tensorflow.org/?hl=en)の 最新の状態を反映したものであることを保証することはできません。 この翻訳の品質を向上させるためのご意見をお持ちの方は、GitHubリポジトリ[tensorflow/docs](https://github.com/tensorflow/docs)にプルリクエストをお送りください。 コミュニティによる翻訳やレビューに参加していただける方は、 [<EMAIL> メーリングリスト](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ja)にご連絡ください。 # + [markdown] colab_type="text" id="NWeQAo0Ec_BL" # このチュートリアルでは、`tf.data.TextLineDataset` を使ってテキストファイルからサンプルを読み込む方法を例示します。`TextLineDataset` は、テキストファイルからデータセットを作成するために設計されています。この中では、元のテキストファイルの一行一行がサンプルです。これは、(たとえば、詩やエラーログのような)基本的に行ベースのテキストデータを扱うのに便利でしょう。 # # このチュートリアルでは、おなじ作品であるホーマーのイリアッドの異なる 3 つの英語翻訳版を使い、テキスト 1 行から翻訳者を特定するモデルを訓練します。 # + [markdown] colab_type="text" id="fgZ9gjmPfSnK" # ## 設定 # + colab={} colab_type="code" id="baYFZMW_bJHh" from __future__ import absolute_import, division, print_function, unicode_literals try: # # %tensorflow_version only exists in Colab. # !pip install tf-nightly except Exception: pass import tensorflow as tf import tensorflow_datasets as tfds import os # + [markdown] colab_type="text" id="YWVWjyIkffau" # 3 つの翻訳のテキストは次のとおりです。 # # - [<NAME>](https://en.wikipedia.org/wiki/William_Cowper) — [text](https://storage.googleapis.com/download.tensorflow.org/data/illiad/cowper.txt) # # - [<NAME>](https://en.wikipedia.org/wiki/Edward_Smith-Stanley,_14th_Earl_of_Derby) — [text](https://storage.googleapis.com/download.tensorflow.org/data/illiad/derby.txt) # # - [<NAME>](https://en.wikipedia.org/wiki/Samuel_Butler_%28novelist%29) — [text](https://storage.googleapis.com/download.tensorflow.org/data/illiad/butler.txt) # # このチュートリアルで使われているテキストファイルは、ヘッダ、フッタ、行番号、章のタイトルの削除など、いくつかの典型的な前処理を行ったものです。前処理後のファイルをダウンロードしましょう。 # + colab={} colab_type="code" id="4YlKQthEYlFw" DIRECTORY_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/' FILE_NAMES = ['cowper.txt', 'derby.txt', 'butler.txt'] for name in FILE_NAMES: text_dir = tf.keras.utils.get_file(name, origin=DIRECTORY_URL+name) parent_dir = os.path.dirname(text_dir) parent_dir # + [markdown] colab_type="text" id="q3sDy6nuXoNp" # ## テキストをデータセットに読み込む # # ファイルをイテレートし、それぞれを別々のデータセットに読み込みます。 # # サンプルはそれぞれにラベル付けが必要なので、ラベル付け関数を適用するために `tf.data.Dataset.map` を使います。このメソッドは、データセット中のすべてのサンプルをイテレートし、(`example, label`)というペアを返します。 # + colab={} colab_type="code" id="K0BjCOpOh7Ch" def labeler(example, index): return example, tf.cast(index, tf.int64) labeled_data_sets = [] for i, file_name in enumerate(FILE_NAMES): lines_dataset = tf.data.TextLineDataset(os.path.join(parent_dir, file_name)) labeled_dataset = lines_dataset.map(lambda ex: labeler(ex, i)) labeled_data_sets.append(labeled_dataset) # + [markdown] colab_type="text" id="M8PHK5J_cXE5" # ラベル付けの終わったデータセットを結合して一つのデータセットにし、シャッフルします。 # + colab={} colab_type="code" id="6jAeYkTIi9-2" BUFFER_SIZE = 50000 BATCH_SIZE = 64 TAKE_SIZE = 5000 # + colab={} colab_type="code" id="Qd544E-Sh63L" all_labeled_data = labeled_data_sets[0] for labeled_dataset in labeled_data_sets[1:]: all_labeled_data = all_labeled_data.concatenate(labeled_dataset) all_labeled_data = all_labeled_data.shuffle( BUFFER_SIZE, reshuffle_each_iteration=False) # + [markdown] colab_type="text" id="r4JEHrJXeG5k" # `tf.data.Dataset.take` と `print` を使って、`(example, label)` のペアがどのようなものかを見ることができます。`numpy` プロパティがそれぞれのテンソルの値を示します。 # + colab={} colab_type="code" id="gywKlN0xh6u5" for ex in all_labeled_data.take(5): print(ex) # + [markdown] colab_type="text" id="5rrpU2_sfDh0" # ## テキスト行を数字にエンコードする # # 機械学習モデルが扱うのは単語ではなくて数字であるため、文字列は数字のリストに変換する必要があります。このため、一意の単語を一意の数字にマッピングします。 # # ### ボキャブラリーの構築 # # まず最初に、テキストをトークン化し、個々の一意な単語の集まりとして、ボキャブラリーを構築します。これを行うには、TensorFlow やPython を使ういくつかの方法があります。ここでは次のようにします。 # # 1. 各サンプルの `numpy` 値をイテレートします。 # 2. `tfds.features.text.Tokenizer` を使って、それをトークンに分割します。 # 3. 重複を排除するため、トークンを Python の集合に集約します。 # 4. あとで使用するため、ボキャブラリーのサイズを取得します。 # + colab={} colab_type="code" id="YkHtbGnDh6mg" tokenizer = tfds.features.text.Tokenizer() vocabulary_set = set() for text_tensor, _ in all_labeled_data: some_tokens = tokenizer.tokenize(text_tensor.numpy()) vocabulary_set.update(some_tokens) vocab_size = len(vocabulary_set) vocab_size # + [markdown] colab_type="text" id="0W35VJqAh9zs" # ### サンプルをエンコードする # # `vocabulary_set` を `tfds.features.text.TokenTextEncoder` に渡してエンコーダーを作成します。エンコーダーの `encode` メソッドは、テキスト文字列を引数にとり、整数のリストを返します。 # + colab={} colab_type="code" id="gkxJIVAth6j0" encoder = tfds.features.text.TokenTextEncoder(vocabulary_set) # + [markdown] colab_type="text" id="v6S5Qyabi-vo" # 1行だけにこれを適用し、出力がどの様になるか確かめることができます。 # + colab={} colab_type="code" id="jgxPZaxUuTbk" example_text = next(iter(all_labeled_data))[0].numpy() print(example_text) # + colab={} colab_type="code" id="XoVpKR3qj5yb" encoded_example = encoder.encode(example_text) print(encoded_example) # + [markdown] colab_type="text" id="p9qHM0v8k_Mg" # 次に、このエンコーダーを `tf.py_function` でラッピングして、データセットの `map` メソッドに渡し、データセットに適用します。 # + colab={} colab_type="code" id="HcIQ7LOTh6eT" def encode(text_tensor, label): encoded_text = encoder.encode(text_tensor.numpy()) return encoded_text, label def encode_map_fn(text, label): # py_func doesn't set the shape of the returned tensors. encoded_text, label = tf.py_function(encode, inp=[text, label], Tout=(tf.int64, tf.int64)) # `tf.data.Datasets` work best if all components have a shape set # so set the shapes manually: encoded_text.set_shape([None]) label.set_shape([]) return encoded_text, label all_encoded_data = all_labeled_data.map(encode_map_fn) # + [markdown] colab_type="text" id="_YZToSXSm0qr" # ## データセットを、テスト用と訓練用のバッチに分割する # # `tf.data.Dataset.take`と`tf.data.Dataset.skip`を使って、小さなテスト用データセットと、より大きな訓練用セットを作成します。 # # モデルに渡す前に、データセットをバッチ化する必要があります。通常、バッチの中のサンプルはおなじサイズと形状である必要があります。しかし、これらのデータセットの中のサンプルはすべておなじサイズではありません。テキストの各行の単語数は異なっています。このため、(`batch`の代わりに)`tf.data.Dataset.padded_batch` メソッドを使ってサンプルをおなじサイズにパディングします。 # + colab={} colab_type="code" id="r-rmbijQh6bf" train_data = all_encoded_data.skip(TAKE_SIZE).shuffle(BUFFER_SIZE) train_data = train_data.padded_batch(BATCH_SIZE) test_data = all_encoded_data.take(TAKE_SIZE) test_data = test_data.padded_batch(BATCH_SIZE) # + [markdown] colab_type="text" id="Xdz7SVwmqi1l" # もう、`test_data` と `train_data` は、(`example, label`)というペアのコレクションではなく、バッチのコレクションです。それぞれのバッチは、(**たくさんのサンプル**, **たくさんのラベル**)という配列のペアです。 # # 見てみましょう。 # + colab={} colab_type="code" id="kMslWfuwoqpB" sample_text, sample_labels = next(iter(test_data)) sample_text[0], sample_labels[0] # + [markdown] colab_type="text" id="UI4I6_Sa0vWu" # (ゼロをパディングに使用した)新しいトークン番号を1つ導入したので、ボキャブラリーサイズは1つ増えています。 # + colab={} colab_type="code" id="IlD1Lli91vuc" vocab_size += 1 # + [markdown] colab_type="text" id="K8SUhGFNsmRi" # ## モデルを構築する # + colab={} colab_type="code" id="QJgI1pow2YR9" model = tf.keras.Sequential() # + [markdown] colab_type="text" id="wi0iiKLTKdoF" # 最初の層は、整数表現を密なベクトル埋め込みに変換します。詳細は[単語埋め込み](../../tutorials/sequences/word_embeddings)のチュートリアルを参照ください。 # + colab={} colab_type="code" id="DR6-ctbY638P" model.add(tf.keras.layers.Embedding(vocab_size, 64)) # + [markdown] colab_type="text" id="_8OJOPohKh1q" # 次の層は[Long Short-Term Memory](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) 層です。この層により、モデルは単語をほかの単語の文脈の中で解釈します。LSTM の Bidirectional ラッパーにより、データポイントを、その前とその後のデータポイントとの関連で学習することができます。 # + colab={} colab_type="code" id="x6rnq6DN_WUs" model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64))) # + [markdown] colab_type="text" id="cdffbMr5LF1g" # 最後に、一つ以上の全結合層があり、最後の層は出力層です。出力層はラベルすべての確率を生成します。もっと複雑なも確率の高いラベルが、モデルが予測するサンプルのラベルです。 # + colab={} colab_type="code" id="QTEaNSnLCsv5" # 1 つ以上の Dense 層 # `for` 行の中のリストを編集して、層のサイズの実験をしてください for units in [64, 64]: model.add(tf.keras.layers.Dense(units, activation='relu')) # 出力層 最初の引数はラベルの数 model.add(tf.keras.layers.Dense(3, activation='softmax')) # + [markdown] colab_type="text" id="zLHPU8q5DLi_" # 最後にモデルをコンパイルします。ソフトマックスによるカテゴリー分類モデルでは、損失関数として `sparse_categorical_crossentropy` を使用します。ほかのオプティマイザを使うこともできますが、`adam` がよく使われます。 # + colab={} colab_type="code" id="pkTBUVO4h6Y5" model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # + [markdown] colab_type="text" id="DM-HLo5NDhql" # ## モデルを訓練する # # このモデルをこのデータに適用すると(約83%の)まともな結果が得られます。 # + colab={} colab_type="code" id="aLtO33tNh6V8" model.fit(train_data, epochs=3, validation_data=test_data) # + colab={} colab_type="code" id="KTPCYf_Jh6TH" eval_loss, eval_acc = model.evaluate(test_data) print('\nEval loss: {}, Eval accuracy: {}'.format(eval_loss, eval_acc))
site/ja/tutorials/load_data/text.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import time import numpy as np import pandas as pd import imageio as io import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import ImageGrid from os import listdir, makedirs, getcwd, remove from os.path import isfile, join, abspath, exists, isdir, expanduser # - # !ls MURA-v1.0/ data_dir = join(getcwd(),'MURA-v1.0') train_dir = join(data_dir, 'train') train_csv = join(data_dir,'train.csv') val_dir = join(data_dir, 'valid') val_csv = join(data_dir,'valid.csv') test_dir = join(data_dir, 'test') # some sanity checks on our directory structure . . . assert isdir(data_dir) and isdir(train_dir) and isdir(val_dir) and isdir(test_dir) assert exists(train_csv) and isfile(train_csv) and exists(val_csv) and isfile(val_csv) # ## Loading the dataset directly # + train_df = pd.read_csv(train_csv, names=['filename','abnormal']) fig = plt.figure(1, figsize=(16, 4)) plt.title('Normal X-ray?') plt.axis('off') grid = ImageGrid(fig, 111, nrows_ncols=(1, 5), axes_pad=0.05) left, width = .25, .5 bottom, height = .25, .5 right = left + width top = bottom + height img_idx=0 for (i, row) in train_df.sample(5).iterrows(): ax = grid[img_idx] ax.text(0.5*(left+right), 0.5*(bottom+top), row.abnormal,ha='center',va='center', fontsize=20, color='red',transform=ax.transAxes) ax.grid(False) img = imageio.imread(row.filename) print(img.shape) ax.imshow(img) img_idx+=1 plt.show() print(train_df.describe) # - # ## Loading the dataset in pytorch # + import torch.utils.data as data from skimage import io, transform class MuraDataset(data.Dataset): def __init__(self, csv_file, root_dir, transform=None, loader=default_loader): self.mura_dataset_df = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.mura_dataset_df) def __getitem__(self, idx): img_name = join( self.mura_dataset_df.iloc[idx, 0]) image = Image.open(img_name).convert('RGB') label = self.mura_dataset_df.iloc[idx, 1:].as_matrix()[0] if self.transform is not None: image = self.transform(image) return image,label import torch import torchvision from torchvision.utils import make_grid from torchvision import transforms, utils import torchvision.transforms as transforms def imshow(img): plt.figure(1, figsize=(16, 4)) plt.imshow(np.transpose(img.numpy(), (1, 2, 0))) plt.pause(0.001) # get some random training images trs = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), ]) trainset = MuraDataset(csv_file=train_csv, root_dir=data_dir, transform = trs) trainloader = torch.utils.data.DataLoader(trainset, batch_size=8, shuffle=True, num_workers=8) print(len(trainset)) dataiter = iter(trainloader) images, labels = dataiter.next() imshow(torchvision.utils.make_grid(images)) images, labels = dataiter.next() imshow(torchvision.utils.make_grid(images)) # -
exploring-data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="R12Yn6W1dt9t" """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. """ # If you're using Google Colab and not running locally, run this cell. ## Install dependencies # !pip install wget # !apt-get install sox libsndfile1 ffmpeg # !pip install unidecode # ## Install NeMo # !python -m pip install --upgrade git+https://github.com/NVIDIA/NeMo.git@main#egg=nemo_toolkit[asr] ## Install TorchAudio # !pip install torchaudio>=0.6.0 -f https://download.pytorch.org/whl/torch_stable.html ## Grab the config we'll use in this example # !mkdir configs # - # # Introduction # # This VAD tutorial is based on the MatchboxNet model from the paper "[MatchboxNet: 1D Time-Channel Separable Convolutional Neural Network Architecture for Speech Commands Recognition](https://arxiv.org/abs/2004.08531)" with a modified decoder head to suit classification tasks. # # The notebook will follow the steps below: # # - Dataset preparation: Instruction of downloading datasets. And how to convert it to a format suitable for use with nemo_asr # - Audio preprocessing (feature extraction): signal normalization, windowing, (log) spectrogram (or mel scale spectrogram, or MFCC) # # - Data augmentation using SpecAugment "[SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://arxiv.org/abs/1904.08779)" to increase number of data samples. # # - Develop a small Neural classification model which can be trained efficiently. # # - Model training on the Google Speech Commands dataset and Freesound dataset in NeMo. # # - Evaluation of error cases of the model by audibly hearing the samples # # - Add more evaluation metrics and transfer learning/fine tune # # + colab={} colab_type="code" id="I62_LJzc-p2b" # Some utility imports import os from omegaconf import OmegaConf # + [markdown] colab={} colab_type="text" id="K_M8wpkwd7d7" # # Data Preparation # # ## Download the background data # We suggest to use the background categories of [freesound](https://freesound.org/) dataset as our non-speech/background data. # We provide scripts for downloading and resampling it. Please have a look at [NeMo docs VAD Data Preparation]( https://docs.nvidia.com/deeplearning/nemo/developer_guide/en/v0.11.0/voice_activity_detection/tutorial.html#data-preparation). Note that downloading this dataset may takes hours. # # **NOTE:** Here, this tutorial serves as a demonstration on how to train and evaluate models for vad using NeMo. We avoid using freesound dataset, and use `_background_noise_` category in Google Speech Commands Dataset as non-speech/background data. # - # ## Download the speech data # # We will use the open source Google Speech Commands Dataset (we will use V2 of the dataset for the tutorial, but require very minor changes to support V1 dataset) as our speech data. Google Speech Commands Dataset V2 will take roughly 6GB disk space. These scripts below will download the dataset and convert it to a format suitable for use with nemo_asr. # # # **NOTE**: You may additionally pass `--test_size` or `--val_size` flag for spliting train val and test data. # You may additionally pass `--seg_len` flag for indicating the segment length. Dafault is 0.63s. # # **NOTE**: You may additionally pass a `--rebalance_method='fixed|over|under'` at the end of the script to rebalance the class samples in the manifest. # * 'fixed': Fixed number of samples for each class. For example, train 500, val 100, and test 200. (Change number in script if you want) # * 'over': Oversampling rebalance method # * 'under': Undersampling rebalance method # # **NOTE**: We only take a small subset of speech data for demonstration, if you want to use entire speech data. Don't forget to **delete `--demo`** and change rebalance method/number. `_background_noise_` category only has **6** audio files. So we would like to generate more based on the audio files to enlarge our background training data. If you want to use your own background noise data, just change the `background_data_root` and **delete `--demo`** # tmp = 'src' data_folder = 'data' if not os.path.exists(tmp): os.makedirs(tmp) if not os.path.exists(data_folder): os.makedirs(data_folder) script = os.path.join(tmp, 'process_vad_data.py') if not os.path.exists(script): # !wget -P $tmp https://raw.githubusercontent.com/NVIDIA/NeMo/main/scripts/process_vad_data.py speech_data_root = os.path.join(data_folder, 'google_dataset_v2') background_data_root = os.path.join(data_folder, 'google_dataset_v2/google_speech_recognition_v2/_background_noise_')# your <resampled freesound data directory> out_dir = os.path.join(data_folder, 'manifest') if not os.path.exists(speech_data_root): os.mkdir(speech_data_root) # This may take a few minutes # !python $script \ # --out_dir={out_dir} \ # --speech_data_root={speech_data_root} \ # --background_data_root={background_data_root}\ # --log \ # --demo \ # --rebalance_method='fixed' # + [markdown] colab_type="text" id="TTsxp0nZ1zqo" # ## Preparing the manifest file # # Manifest files are the data structure used by NeMo to declare a few important details about the data : # # 1) `audio_filepath`: Refers to the path to the raw audio file <br> # 2) `label`: The class label (speech or background) of this sample <br> # 3) `duration`: The length of the audio file, in seconds.<br> # 4) `offset`: The start of the segment, in seconds. # + colab={} colab_type="code" id="ytTFGVe0g9wk" # change below if you don't have or don't want to use rebalanced data train_dataset = 'data/manifest/balanced_background_training_manifest.json,data/manifest/balanced_speech_training_manifest.json' val_dataset = 'data/manifest/background_validation_manifest.json,data/manifest/speech_validation_manifest.json' test_dataset = 'data/manifest/balanced_background_testing_manifest.json,data/manifest/balanced_speech_testing_manifest.json' # + [markdown] colab_type="text" id="s0SZy9SEhOBf" # ## Read a few rows of the manifest file # # Manifest files are the data structure used by NeMo to declare a few important details about the data : # # 1) `audio_filepath`: Refers to the path to the raw audio file <br> # 2) `command`: The class label (or speech command) of this sample <br> # 3) `duration`: The length of the audio file, in seconds. # - sample_test_dataset = test_dataset.split(',')[0] # + colab={} colab_type="code" id="HYBidCMIhKQV" # !head -n 5 {sample_test_dataset} # - # # Training - Preparation # # We will be training a MatchboxNet model from paper "[MatchboxNet: 1D Time-Channel Separable Convolutional Neural Network Architecture for Speech Commands Recognition](https://arxiv.org/abs/2004.08531)" evolved from [QuartzNet](https://arxiv.org/pdf/1910.10261.pdf) model. The benefit of QuartzNet over JASPER models is that they use Separable Convolutions, which greatly reduce the number of parameters required to get good model accuracy. # # MatchboxNet models generally follow the model definition pattern QuartzNet-[BxRXC], where B is the number of blocks, R is the number of convolutional sub-blocks, and C is the number of channels in these blocks. Each sub-block contains a 1-D masked convolution, batch normalization, ReLU, and dropout. # # + colab={} colab_type="code" id="ieAPOM9thTN2" # NeMo's "core" package import nemo # NeMo's ASR collection - this collections contains complete ASR models and # building blocks (modules) for ASR import nemo.collections.asr as nemo_asr # + [markdown] colab_type="text" id="ss9gLcDv30jI" # ## Model Configuration # The MatchboxNet Model is defined in a config file which declares multiple important sections. # # They are: # # 1) `model`: All arguments that will relate to the Model - preprocessors, encoder, decoder, optimizer and schedulers, datasets and any other related information # # 2) `trainer`: Any argument to be passed to PyTorch Lightning # + MODEL_CONFIG = "matchboxnet_3x1x64_vad.yaml" if not os.path.exists(f"configs/{MODEL_CONFIG}"): # !wget -P configs/ "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/asr/conf/{MODEL_CONFIG}" # + colab={} colab_type="code" id="yoVAs9h1lfci" # This line will print the entire config of the MatchboxNet model config_path = f"configs/{MODEL_CONFIG}" config = OmegaConf.load(config_path) print(config.pretty()) # + colab={} colab_type="code" id="m2lJPR0a3qww" # Preserve some useful parameters labels = config.model.labels sample_rate = config.sample_rate # + [markdown] colab_type="text" id="8_pmjeed78rJ" # ### Setting up the datasets within the config # # If you'll notice, there are a few config dictionaries called `train_ds`, `validation_ds` and `test_ds`. These are configurations used to setup the Dataset and DataLoaders of the corresponding config. # # # + colab={} colab_type="code" id="DIe6Qfs18MiQ" print(config.model.train_ds.pretty()) # + [markdown] colab_type="text" id="Fb01hl868Uc3" # ### `???` inside configs # # You will often notice that some configs have `???` in place of paths. This is used as a placeholder so that the user can change the value at a later time. # # Let's add the paths to the manifests to the config above. # + colab={} colab_type="code" id="m181HXev8T97" config.model.train_ds.manifest_filepath = train_dataset config.model.validation_ds.manifest_filepath = val_dataset config.model.test_ds.manifest_filepath = test_dataset # + [markdown] colab_type="text" id="pbXngoCM5IRG" # ## Building the PyTorch Lightning Trainer # # NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem! # # Let's first instantiate a Trainer object! # + colab={} colab_type="code" id="bYtvdBlG5afU" import torch import pytorch_lightning as pl # + colab={} colab_type="code" id="jRN18CdH51nN" print("Trainer config - \n") print(config.trainer.pretty()) # + colab={} colab_type="code" id="gHf6cHvm6H9b" # Lets modify some trainer configs for this demo # Checks if we have GPU available and uses it cuda = 1 if torch.cuda.is_available() else 0 config.trainer.gpus = cuda # Reduces maximum number of epochs to 5 for quick demonstration config.trainer.max_epochs = 5 # Remove distributed training flags config.trainer.distributed_backend = None # + colab={} colab_type="code" id="UB9nr7G56G3L" trainer = pl.Trainer(**config.trainer) # + [markdown] colab_type="text" id="2wt603Vq6sqX" # ## Setting up a NeMo Experiment # # NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it ! # + colab={} colab_type="code" id="TfWJFg7p6Ezf" from nemo.utils.exp_manager import exp_manager # + colab={} colab_type="code" id="SC-QPoW44-p2" exp_dir = exp_manager(trainer, config.get("exp_manager", None)) # + colab={} colab_type="code" id="Yqi6rkNR7Dph" # The exp_dir provides a path to the current experiment for easy access exp_dir = str(exp_dir) exp_dir # + [markdown] colab_type="text" id="t0zz-vHH7Uuh" # ## Building the MatchboxNet Model # # MatchboxNet is an ASR model with a classification task - it generates one label for the entire provided audio stream. Therefore we encapsulate it inside the `EncDecClassificationModel` as follows. # + colab={} colab_type="code" id="FRMrKhyf5vhy" vad_model = nemo_asr.models.EncDecClassificationModel(cfg=config.model, trainer=trainer) # + [markdown] colab_type="text" id="jA9UND-Q_oyw" # # Training a MatchboxNet Model # # As MatchboxNet is inherently a PyTorch Lightning Model, it can easily be trained in a single line - `trainer.fit(model)` ! # # # # Training the model # # Even with such a small model (73k parameters), and just 5 epochs (should take just a few minutes to train), you should be able to get a test set accuracy score around 98.83% (this result is for the [freesound](https://freesound.org/) dataset) with enough training data. # # **NOTE:** If you follow our tutorial and user the generated background data, you may notice the below results are acceptable, but please remember, this tutorial is only for **demonstration** and the dataset is not good enough. Please change background dataset and train with enough data for improvement! # # Experiment with increasing the number of epochs or with batch size to see how much you can improve the score! # # **NOTE:** Noise robustness is quite important for VAD task. Below we list the augmentation we used in this demo. # Please refer to [05_Online_Noise_Augmentation.ipynb](https://github.com/NVIDIA/NeMo/blob/candidate/tutorials/asr/05_Online_Noise_Augmentation.ipynb) for understanding noise augmentation in NeMo. # # # # - # Noise augmentation print(config.model.train_ds.augmentor.pretty()) # noise augmentation print(config.model.spec_augment.pretty()) # SpecAug data augmentation # If you are interested in **pretrained** model, please have a look at [Transfer Leaning & Fine-tuning on a new dataset](#Transfer-Leaning-&-Fine-tuning-on-a-new-dataset) and incoming tutorial 07 Offline_and_Online_VAD_Demo # # + [markdown] colab_type="text" id="3ngKcRFqBfIF" # ### Monitoring training progress # # Before we begin training, lets first create a Tensorboard visualization to monitor progress # # + colab={} colab_type="code" id="Cyfec0PDBsXa" try: from google import colab COLAB_ENV = True except (ImportError, ModuleNotFoundError): COLAB_ENV = False # Load the TensorBoard notebook extension if COLAB_ENV: # %load_ext tensorboard # %tensorboard --logdir {exp_dir} else: print("To use tensorboard, please use this notebook in a Google Colab environment.") # + [markdown] colab_type="text" id="ZApuELDIKQgC" # ### Training for 5 epochs # We see below that the model begins to get modest scores on the validation set after just 5 epochs of training # + colab={} colab_type="code" id="9xiUUJlH5KdD" trainer.fit(vad_model) # - # # Fast Training # # We can dramatically improve the time taken to train this model by using Multi GPU training along with Mixed Precision. # # For multi-GPU training, take a look at [the PyTorch Lightning Multi-GPU training section](https://pytorch-lightning.readthedocs.io/en/latest/multi_gpu.html) # # For mixed-precision training, take a look at [the PyTorch Lightning Mixed-Precision training section](https://pytorch-lightning.readthedocs.io/en/latest/apex.html) # # ```python # # Mixed precision: # trainer = Trainer(amp_level='O1', precision=16) # # # Trainer with a distributed backend: # trainer = Trainer(gpus=2, num_nodes=2, distributed_backend='ddp') # # # Of course, you can combine these flags as well. # ``` # + [markdown] colab_type="text" id="Dkds1jSvKgSc" # # Evaluation # # ## Evaluation on the Test set # # Lets compute the final score on the test set via `trainer.test(model)` # + colab={} colab_type="code" id="mULTrhEJ_6wV" trainer.test(vad_model, ckpt_path=None) # + [markdown] colab_type="text" id="ifDHkunjM8y6" # ## Evaluation of incorrectly predicted samples # # Given that we have a trained model, which performs reasonably well, let's try to listen to the samples where the model is least confident in its predictions. # + [markdown] colab_type="text" id="PcJrZ72sNCkM" # ### Extract the predictions from the model # # We want to possess the actual logits of the model instead of just the final evaluation score, so we can define a function to perform the forward step for us without computing the final loss. Instead, we extract the logits per batch of samples provided. # + [markdown] colab_type="text" id="rvxdviYtOFjK" # ### Accessing the data loaders # # We can utilize the `setup_test_data` method in order to instantiate a data loader for the dataset we want to analyze. # # For convenience, we can access these instantiated data loaders using the following accessors - `vad_model._train_dl`, `vad_model._validation_dl` and `vad_model._test_dl`. # + colab={} colab_type="code" id="CB0QZCAmM656" vad_model.setup_test_data(config.model.test_ds) test_dl = vad_model._test_dl # + [markdown] colab_type="text" id="rA7gXawcPoip" # ### Partial Test Step # # Below we define a utility function to perform most of the test step. For reference, the test step is defined as follows: # # ```python # def test_step(self, batch, batch_idx, dataloader_idx=0): # audio_signal, audio_signal_len, labels, labels_len = batch # logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len) # loss_value = self.loss(logits=logits, labels=labels) # correct_counts, total_counts = self._accuracy(logits=logits, labels=labels) # return {'test_loss': loss_value, 'test_correct_counts': correct_counts, 'test_total_counts': total_counts} # ``` # + colab={} colab_type="code" id="sBsDOm5ROpQI" @torch.no_grad() def extract_logits(model, dataloader): logits_buffer = [] label_buffer = [] # Follow the above definition of the test_step for batch in dataloader: audio_signal, audio_signal_len, labels, labels_len = batch logits = model(input_signal=audio_signal, input_signal_length=audio_signal_len) logits_buffer.append(logits) label_buffer.append(labels) print(".", end='') print() print("Finished extracting logits !") logits = torch.cat(logits_buffer, 0) labels = torch.cat(label_buffer, 0) return logits, labels # + colab={} colab_type="code" id="mZSdprUlOuoV" cpu_model = vad_model.cpu() cpu_model.eval() logits, labels = extract_logits(cpu_model, test_dl) print("Logits:", logits.shape, "Labels :", labels.shape) # + colab={} colab_type="code" id="9Wd0ukgNXRBz" # Compute accuracy - `_accuracy` is a PyTorch Lightning Metric ! correct_count, total_count = cpu_model._accuracy(logits=logits, labels=labels) print("Accuracy : ", float(correct_count * 100.) / float(total_count)) # + [markdown] colab_type="text" id="NwN9OSqCauSH" # ### Filtering out incorrect samples # Let us now filter out the incorrectly labeled samples from the total set of samples in the test set # + colab={} colab_type="code" id="N1YJvsmcZ0uE" import librosa import json import IPython.display as ipd # + colab={} colab_type="code" id="jZAT9yGAayvR" # First lets create a utility class to remap the integer class labels to actual string label class ReverseMapLabel: def __init__(self, data_loader): self.label2id = dict(data_loader.dataset.label2id) self.id2label = dict(data_loader.dataset.id2label) def __call__(self, pred_idx, label_idx): return self.id2label[pred_idx], self.id2label[label_idx] # + colab={} colab_type="code" id="X3GSXvYHa4KJ" # Next, lets get the indices of all the incorrectly labeled samples sample_idx = 0 incorrect_preds = [] rev_map = ReverseMapLabel(test_dl) # Remember, evaluated_tensor = (loss, logits, labels) probs = torch.softmax(logits, dim=-1) probas, preds = torch.max(probs, dim=-1) incorrect_ids = (preds != labels).nonzero() for idx in incorrect_ids: proba = float(probas[idx][0]) pred = int(preds[idx][0]) label = int(labels[idx][0]) idx = int(idx[0]) + sample_idx incorrect_preds.append((idx, *rev_map(pred, label), proba)) print(f"Num test samples : {total_count.item()}") print(f"Num errors : {len(incorrect_preds)}") # First lets sort by confidence of prediction incorrect_preds = sorted(incorrect_preds, key=lambda x: x[-1], reverse=False) # + [markdown] colab_type="text" id="0JgGo71gcDtD" # ### Examine a subset of incorrect samples # Let's print out the (test id, predicted label, ground truth label, confidence) tuple of first 20 incorrectly labeled samples # + colab={} colab_type="code" id="x37wNJsNbcw0" for incorrect_sample in incorrect_preds[:20]: print(str(incorrect_sample)) # + [markdown] colab_type="text" id="tDnwYsDKcLv9" # ### Define a threshold below which we designate a model's prediction as "low confidence" # + colab={} colab_type="code" id="dpvzeh4PcGJs" # Filter out how many such samples exist low_confidence_threshold = 0.8 count_low_confidence = len(list(filter(lambda x: x[-1] <= low_confidence_threshold, incorrect_preds))) print(f"Number of low confidence predictions : {count_low_confidence}") # + [markdown] colab_type="text" id="ERXyXvCAcSKR" # ### Lets hear the samples which the model has least confidence in ! # + colab={} colab_type="code" id="kxjNVjX8cPNP" # First lets create a helper function to parse the manifest files def parse_manifest(manifest): data = [] for line in manifest: line = json.loads(line) data.append(line) return data # + colab={} colab_type="code" id="IWxqw5k-cUVd" # Next, lets create a helper function to actually listen to certain samples def listen_to_file(sample_id, pred=None, label=None, proba=None): # Load the audio waveform using librosa filepath = test_samples[sample_id]['audio_filepath'] audio, sample_rate = librosa.load(filepath, offset = test_samples[sample_id]['offset'], duration = test_samples[sample_id]['duration']) if pred is not None and label is not None and proba is not None: print(f"filepath: {filepath}, Sample : {sample_id} Prediction : {pred} Label : {label} Confidence = {proba: 0.4f}") else: print(f"Sample : {sample_id}") return ipd.Audio(audio, rate=sample_rate) # + colab={} colab_type="code" id="HPj1tFNIcXaU" import json # Now lets load the test manifest into memory all_test_samples = [] for _ in test_dataset.split(','): print(_) with open(_, 'r') as test_f: test_samples = test_f.readlines() all_test_samples.extend(test_samples) print(len(all_test_samples)) test_samples = parse_manifest(all_test_samples) # + colab={} colab_type="code" id="Nt7b_uiScZcC" # Finally, lets listen to all the audio samples where the model made a mistake # Note: This list of incorrect samples may be quite large, so you may choose to subsample `incorrect_preds` count = min(count_low_confidence, 20) # replace this line with just `count_low_confidence` to listen to all samples with low confidence for sample_id, pred, label, proba in incorrect_preds[:count]: ipd.display(listen_to_file(sample_id, pred=pred, label=label, proba=proba)) # - # ## Adding evaluation metrics # # Here is an example of how to use more metrics (e.g. from pytorch_lightning) to evaluate your result. # # **Note:** If you would like to add metrics for training and testing, have a look at # ```python # NeMo/nemo/collections/common/metrics # ``` # from pytorch_lightning.metrics.functional import confusion_matrix _, pred = logits.topk(1, dim=1, largest=True, sorted=True) confusion_matrix(pred=pred, target=labels) # # Transfer Leaning & Fine-tuning on a new dataset # For transfer learning, please refer to [**Transfer learning** part of ASR tutorial](https://github.com/NVIDIA/NeMo/blob/candidate/tutorials/asr/01_ASR_with_NeMo.ipynb) # # More details on saving and restoring checkpoint, and exporting a model in its entirety, please refer to [**Fine-tuning on a new dataset** & **Advanced Usage parts** of Speech Command tutorial](https://github.com/NVIDIA/NeMo/blob/candidate/tutorials/asr/02_Speech_Commands.ipynb) # # # # # + [markdown] colab_type="text" id="LyIegk2CPNsI" # # Inference and more # If you are interested in **pretrained** model and **streaming inference**, please have a look at our [VAD inference tutorial](https://github.com/NVIDIA/NeMo/blob/main/tutorials/asr/07_Online_Offline_Microphone_VAD_Demo.ipynb) # #
tutorials/asr/06_Voice_Activiy_Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.6 (''env'': venv)' # name: pythonjvsc74a57bd082e518efe277ee5a00982e862e28e1995e74f636b5cf3678ef966f3fbeb51464 # --- # %load_ext autoreload # %autoreload 2 # # Download 17-18.05.21 from src.data.download import download_whole_city from src.settings import DATA_RAW_DIR from tqdm import tqdm RESOLUTION = 9 cities = [ "Tirana, Albania", "Yerevan, Armenia", "Vienna, Austria", "Baku, Azerbaijan", "Minsk, Belarus", "Brussels, Belgium", "Sarajevo, Bosnia and Herzegovina", "Sofia, Bulgaria", "Zagreb, Croatia", "Nicosia, Cyprus", "Prague, Czech Republic", "Tallinn, Estonia", "Helsinki, Finland", "Paris, France", "Tbilisi, Georgia", "Berlin, Germany", "Budapest, Hungary", "Reykjavík, Iceland", "Dublin, Ireland", "Rome, Italy", "Nur-Sultan, Kazakhstan", "Latvia, Riga", "Vilnius, Lithuania", "Luxembourg City, Luxembourg", "Valletta, Malta", "Chișinău, Moldova", "Podgorica, Montenegro", "Amsterdam, Netherlands", "Skopje, North Macedonia", "Oslo, Norway", "Warszawa, PL", "Kraków, PL", "Łódź, PL", "Wrocław, PL", "Poznań, PL", "Gdańsk, PL", "Szczecin, PL", "Lisbon, Portugal", "Bucharest, Romania", ['Moscow, Russia', 'Zelenogradsky Administrative Okrug', 'Western Administrative Okrug', 'Novomoskovsky Administrative Okrug', 'Troitsky Administrative Okrug'], "Belgrade, Serbia", "Bratislava, Slovakia", "Ljubljana, Slovenia", "Madrid, Spain", "Stockholm, Sweden", "Bern, Switzerland", "Ankara, Turkey", "Kyiv, Ukraine", ["London, United Kingdom", "City of London"], "New York City, USA", "Chicago, USA", "Los Angeles, USA", "San Francisco, USA", "Philadelphia, USA", ] for city in tqdm(cities): download_whole_city(city, DATA_RAW_DIR) # # Process dataset, select tags, add h3 indices of selected resolution # ## Append h3 indices of selected resolution from src.data.make_dataset import add_h3_indices_to_city for city in tqdm(cities): add_h3_indices_to_city(city, 8) # ## Group selected tags in cities from src.data.load_data import load_filter from src.data.make_dataset import group_city_tags cities = [ "Vienna, Austria", "Minsk, Belarus", "Brussels, Belgium", "Sofia, Bulgaria", "Zagreb, Croatia", "Prague, Czech Republic", "Tallinn, Estonia", "Helsinki, Finland", "Paris, France", "Berlin, Germany", "Reykjavík, Iceland", "Dublin, Ireland", "Rome, Italy", "Nur-Sultan, Kazakhstan", "Latvia, Riga", "Vilnius, Lithuania", "Luxembourg City, Luxembourg", "Amsterdam, Netherlands", "Oslo, Norway", "Warszawa, PL", "Kraków, PL", "Łódź, PL", "Wrocław, PL", "Poznań, PL", "Gdańsk, PL", "Lisbon, Portugal", "Moscow, Russia", "Belgrade, Serbia", "Bratislava, Slovakia", "Ljubljana, Slovenia", "Madrid, Spain", "Stockholm, Sweden", "Bern, Switzerland", "London, United Kingdom", "New York City, USA", "Chicago, USA", "San Francisco, USA", ] SELECTED_TAGS = [ "aeroway", "amenity", "building", "healthcare", "historic", "landuse", "leisure", "military", "natural", "office", "shop", "sport", "tourism", "water", "waterway", ] key_values_to_drop = { "natural": [ "tree", "tree_row", "valley", 'scrub', 'heath', 'moor', 'grassland', 'fell', 'bare_rock', 'scree', 'shingle', 'sand', 'wetland', 'glacier', 'reef', 'spring', 'hot_spring', 'geyser', 'peak', 'volcano', 'peninsula', 'isthmus', 'ridge', 'arete', 'cliff', 'saddle', 'dune', 'rock', 'stone', 'sinkhole', 'cave_entrance', ], "amenity": [ 'bench', 'grit_bin', 'parking_space', 'post_box', 'sanitary_dump_station', 'recycling', 'waste_basket', 'waste_disposal', 'waste_transfer_station', ], "landuse": [ "grass" ] } TAG_FILTER = load_filter("from_wiki.json") for city in tqdm(cities): group_city_tags(city, 8, filter_values=TAG_FILTER, fill_missing=True) # # Group all cities # + from src.data.make_dataset import group_cities df = group_cities(cities, RESOLUTION) # - df['city'].value_counts() from keplergl import KeplerGl KeplerGl(data={"cities": df}) city_buildings = df[['city', *[c for c in df.columns if c.startswith("building")]]].groupby(['city']).sum() city_buildings.reset_index().set_index('city') city_buildings_percent = city_buildings.div(city_buildings.sum(axis=1), axis=0).reset_index() city_buildings_percent melted = city_buildings_percent.melt(id_vars = 'city').sort_values(by=['city', 'value'], ascending=[True, False]) melted melted[melted['variable'] == 'building_yes'] szczecin = df[df['city']=='Szczecin, PL'][['h3', *[c for c in df.columns if c.startswith('building')]]] KeplerGl(data={'szczecin': szczecin}) szczecin.drop(columns='h3', ).melt().sort_values('value', ascending=False)
notebooks/make-dataset-multiple-cities.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content-dl/blob/main/tutorials/W1D1_BasicsAndPytorch/student/W1D1_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Tutorial 1: PyTorch # **Week 1, Day 1: Basics and PyTorch** # # **By Neuromatch Academy** # # # __Content creators:__ <NAME>, <NAME>, <NAME>, <NAME> # # __Content reviewers:__ <NAME>, <NAME>, <NAME> # # __Content editors:__ <NAME>, <NAME> # # __Production editors:__ <NAME>, <NAME> # **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** # # <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p> # --- # # Tutorial Objectives # # Then have a few specific objectives for this tutorial: # * Learn about PyTorch and tensors # * Tensor Manipulations # * Data Loading # * GPUs and Cuda Tensors # * Train NaiveNet # * Get to know your pod # * Start thinking about the course as a whole # + cellView="form" # @title Tutorial slides # @markdown These are the slides for the videos in this tutorial today from IPython.display import IFrame IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480) # - # --- # # Setup # Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions). # # Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells. # # If you start building your own projects built on this code base we highly recommend looking at them in more detail. # + cellView="form" # @title Install dependencies # !pip install pandas --quiet # !pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet # + # Imports import time import torch import random import numpy as np import pandas as pd import matplotlib.pyplot as plt from torch import nn from torchvision import datasets from torchvision.transforms import ToTensor from torch.utils.data import DataLoader from evaltools.airtable import AirtableForm # + cellView="form" # @title Figure Settings import ipywidgets as widgets # %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle") # + cellView="form" # @title Helper Functions atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d') def checkExercise1(A, B, C, D): """ Helper function for checking exercise. Args: A: torch.Tensor B: torch.Tensor C: torch.Tensor D: torch.Tensor Returns: Nothing. """ errors = [] # TODO better errors and error handling if not torch.equal(A.to(int),torch.ones(20, 21).to(int)): errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})") if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)): errors.append("B is not a tensor containing the elements of Z ") if C.shape != (20, 21): errors.append("C is not the correct shape ") if not torch.equal(D, torch.arange(4, 41, step=2)): errors.append("D does not contain the correct elements") if errors == []: print("All correct!") else: [print(e) for e in errors] def timeFun(f, dim, iterations, device='cpu'): iterations = iterations t_total = 0 for _ in range(iterations): start = time.time() f(dim, device) end = time.time() t_total += end - start print(f"time taken for {iterations} iterations of {f.__name__}({dim}): {t_total:.5f}") # - # **Important note: Google Colab users** # # *Scratch Code Cells* # # If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook. # # To open a new scratch cell go to *Insert* → *Scratch code cell*. # # Section 1: Welcome to Neuromatch Deep learning course # + cellView="form" # @title Video 1: Welcome and History from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing atform.add_event('Video 1: Welcome and History') display(out) # - # *This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our # [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing). # # + cellView="form" # @title Video 2: Why DL is cool from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 2: Why DL is cool') display(out) # - # **Describe what you hope to get out of this course in about 100 words.** # --- # # Section 2: The Basics of PyTorch # PyTorch is a Python-based scientific computing package targeted at two sets of # audiences: # # - A replacement for NumPy to use the power of GPUs # - A deep learning platform that provides significant flexibility # and speed # # At its core, PyTorch provides a few key features: # # - A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration. # - An optimized **autograd** engine for automatically computing derivatives. # - A clean, modular API for building and deploying **deep learning models**. # # You can find more information about PyTorch in the appendix. # ## Section 2.1: Creating Tensors # # + cellView="form" # @title Video 3: Making Tensors from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 3: Making Tensors') display(out) # - # There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. # **Construct tensors directly:** # # --- # # # + # we can construct a tensor directly from some common python iterables, # such as list and tuple nested iterables can also be handled as long as the # dimensions make sense # tensor from a list a = torch.tensor([0, 1, 2]) #tensor from a tuple of tuples b = ((1.0, 1.1), (1.2, 1.3)) b = torch.tensor(b) # tensor from a numpy array c = np.ones([2, 3]) c = torch.tensor(c) print(f"Tensor a: {a}") print(f"Tensor b: {b}") print(f"Tensor c: {c}") # - # **Some common tensor constructors:** # # --- # + # the numerical arguments we pass to these constructors # determine the shape of the output tensor x = torch.ones(5, 3) y = torch.zeros(2) z = torch.empty(1, 1, 5) print(f"Tensor x: {x}") print(f"Tensor y: {y}") print(f"Tensor z: {z}") # - # Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. # **Creating random tensors and tensors like other tensors:** # # --- # + # there are also constructors for random numbers # uniform distribution a = torch.rand(1, 3) # normal distribution b = torch.randn(3, 4) # there are also constructors that allow us to construct # a tensor according to the above constructors, but with # dimensions equal to another tensor c = torch.zeros_like(a) d = torch.rand_like(c) print(f"Tensor a: {a}") print(f"Tensor b: {b}") print(f"Tensor c: {c}") print(f"Tensor d: {d}") # - # *Reproducibility*: # # - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA) # # ```python # import torch # torch.manual_seed(0) # ``` # - For custom operators, you might need to set python seed as well: # # ```python # import random # random.seed(0) # ``` # # - Random number generators in other libraries # # ```python # import numpy as np # np.random.seed(0) # ``` # # Here, we define for you a function called `set_seed` that does the job for you! def set_seed(seed=None, seed_torch=True): """ Function that controls randomness. NumPy and random modules must be imported. Args: seed : Integer A non-negative integer that defines the random state. Default is `None`. seed_torch : Boolean If `True` sets the random seed for pytorch tensors, so pytorch module must be imported. Default is `True`. Returns: Nothing. """ if seed is None: seed = np.random.choice(2 ** 32) random.seed(seed) np.random.seed(seed) if seed_torch: torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True print(f'Random seed {seed} has been set.') # Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same. def simplefun(seed=True, my_seed=None): if seed: set_seed(seed=my_seed) # uniform distribution a = torch.rand(1, 3) # normal distribution b = torch.randn(3, 4) print("Tensor a: ", a) print("Tensor b: ", b) simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed` # **Numpy-like number ranges:** # --- # The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy. # + a = torch.arange(0, 10, step=1) b = np.arange(0, 10, step=1) c = torch.linspace(0, 5, steps=11) d = np.linspace(0, 5, num=11) print(f"Tensor a: {a}\n") print(f"Numpy array b: {b}\n") print(f"Tensor c: {c}\n") print(f"Numpy array d: {d}\n") # - # ### Coding Exercise 2.1: Creating Tensors # # Below you will find some incomplete code. Fill in the missing code to construct the specified tensors. # # We want the tensors: # # $A:$ 20 by 21 tensor consisting of ones # # $B:$ a tensor with elements equal to the elements of numpy array $Z$ # # $C:$ a tensor with the same number of elements as $A$ but with values $ # \sim U(0,1)$ # # $D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive. # # + def tensor_creation(Z): """A function that creates various tensors. Args: Z (numpy.ndarray): An array of shape Returns: A : 20 by 21 tensor consisting of ones B : a tensor with elements equal to the elements of numpy array Z C : a tensor with the same number of elements as A but with values ∼U(0,1) D : a 1D tensor containing the even numbers between 4 and 40 inclusive. """ ################################################# ## TODO for students: fill in the missing code ## from the first expression raise NotImplementedError("Student exercise: say what they should have done") ################################################# A = ... B = ... C = ... D = ... return A, B, C, D # add timing to airtable atform.add_event('Coding Exercise 2.1: Creating Tensors') # numpy array to copy later Z = np.vander([1, 2, 3], 4) # Uncomment below to check your function! # A, B, C, D = tensor_creation(Z) # checkExercise1(A, B, C, D) # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D1_BasicsAndPytorch/solutions/W1D1_Tutorial1_Solution_ad4f6c0f.py) # # # - # ``` # All correct! # ``` # ## Section 2.2: Operations in PyTorch # # **Tensor-Tensor operations** # # We can perform operations on tensors using methods under ```torch.``` # # # + cellView="form" # @title Video 4: Tensor Operators from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 4: Tensor Operators') display(out) # - # **Tensor-Tensor operations** # # We can perform operations on tensors using methods under ```torch.``` # + a = torch.ones(5, 3) b = torch.rand(5, 3) c = torch.empty(5, 3) d = torch.empty(5, 3) # this only works if c and d already exist torch.add(a, b, out=c) #Pointwise Multiplication of a and b torch.multiply(a, b, out=d) print(c) print(d) # - # However, in PyTorch most common Python operators are overridden. # The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations x = torch.tensor([1, 2, 4, 8]) y = torch.tensor([1, 2, 3, 4]) x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation # **Tensor Methods** # Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) # # All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!) # + x = torch.rand(3, 3) print(x) print("\n") # sum() - note the axis is the axis you move across when summing print(f"Sum of every element of x: {x.sum()}") print(f"Sum of the columns of x: {x.sum(axis=0)}") print(f"Sum of the rows of x: {x.sum(axis=1)}") print("\n") print(f"Mean value of all elements of x {x.mean()}") print(f"Mean values of the columns of x {x.mean(axis=0)}") print(f"Mean values of the rows of x {x.mean(axis=1)}") # - # **Matrix Operations** # # The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). # # Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.t```. Note the lack of brackets for ```Tensor.t``` - it is an attribute, not a method. # ### Coding Exercise 2.2 : Simple tensor operations # # Below are two expressions involving operations on matrices. # # $$ \textbf{A} = # \begin{bmatrix}2 &4 \\5 & 7 # \end{bmatrix} # \begin{bmatrix} 1 &1 \\2 & 3 # \end{bmatrix} # + # \begin{bmatrix}10 & 10 \\ 12 & 1 # \end{bmatrix} # $$ # # # and # # # $$ b = # \begin{bmatrix} 3 \\ 5 \\ 7 # \end{bmatrix} \cdot # \begin{bmatrix} 2 \\ 4 \\ 8 # \end{bmatrix} # $$ # # The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines. # # # + def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor): ################################################ ## TODO for students: complete the first computation using the argument matricies raise NotImplementedError("Student exercise: fill in the missing code to complete the operation") ################################################ # multiplication of tensor a1 with tensor a2 and then add it with tensor a3 answer = ... return answer # add timing to airtable atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations') # Computing expression 1: # init our tensors a1 = torch.tensor([[2, 4], [5, 7]]) a2 = torch.tensor([[1, 1], [2, 3]]) a3 = torch.tensor([[10, 10], [12, 1]]) ## uncomment to test your function # A = simple_operations(a1, a2, a3) # print(A) # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D1_BasicsAndPytorch/solutions/W1D1_Tutorial1_Solution_5562ea1d.py) # # # - # ``` # tensor([[20, 24], # [31, 27]]) # ``` # + def dot_product(b1: torch.Tensor, b2: torch.Tensor): ############################################### ## TODO for students: complete the first computation using the argument matricies raise NotImplementedError("Student exercise: fill in the missing code to complete the operation") ############################################### # Use torch.dot() to compute the dot product of two tensors product = ... return product # add timing to airtable atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product') # Computing expression 2: b1 = torch.tensor([3, 5, 7]) b2 = torch.tensor([2, 4, 8]) ## Uncomment to test your function # b = dot_product(b1, b2) # print(b) # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D1_BasicsAndPytorch/solutions/W1D1_Tutorial1_Solution_00491ea4.py) # # # - # ``` # tensor(82) # ``` # ## Section 2.3 Manipulating Tensors in Pytorch # # # + cellView="form" # @title Video 5: Tensor Indexing from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 5: Tensor Indexing') display(out) # - # **Indexing** # # Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing. # # For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements. x = torch.arange(0, 10) print(x) print(x[-1]) print(x[1:3]) print(x[:-2]) # When we have multidimensional tensors, indexing rules work the same way as numpy. # + # make a 5D tensor x = torch.rand(1, 2, 3, 4, 5) print(f" shape of x[0]:{x[0].shape}") print(f" shape of x[0][0]:{x[0][0].shape}") print(f" shape of x[0][0][0]:{x[0][0][0].shape}") # - # **Flatten and reshape** # # There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods. # + z = torch.arange(12).reshape(6, 2) print(f"Original z: \n {z}") # 2D -> 1D z = z.flatten() print(f"Flattened z: \n {z}") # and back to 2D z = z.reshape(3, 4) print(f"Reshaped (3x4) z: \n {z}") # - # You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. # **Squeezing tensors** # # When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easily mess up your matrix operations if you don't plan on it being there... # # In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite. # # + x = torch.randn(1, 10) # printing the zeroth element of the tensor will not give us the first number! print(x.shape) print(f"x[0]: {x[0]}") # - # Because of that pesky singleton dimension, x[0] gave us the first row instead! # # # lets get rid of that singleton dimension and see what happens now x = x.squeeze(0) print(x.shape) print(f"x[0]: {x[0]}") # + # adding singleton dimensions works a similar way, and is often used when tensors # being added need same number of dimensions y = torch.randn(5, 5) print(f"shape of y: {y.shape}") # lets insert a singleton dimension y = y.unsqueeze(1) print(f"shape of y: {y.shape}") # - # **Permutation** # Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()``` # # + # `x` has dimensions [color,image_height,image_width] x = torch.rand(3, 48, 64) # we want to permute our tensor to be [ image_height , image_width , color ] x = x.permute(1, 2, 0) # permute(1,2,0) means: # the 0th dim of my new tensor = the 1st dim of my old tensor # the 1st dim of my new tensor = the 2nd # the 2nd dim of my new tensor = the 0th print(x.shape) # - # You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once. # **Concatenation** # In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ). # + # Create two tensors of the same shape x = torch.arange(12, dtype=torch.float32).reshape((3, 4)) y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) #concatenate them along rows cat_rows = torch.cat((x, y), dim=0) # concatenate along columns cat_cols = torch.cat((x, y), dim=1) # printing outputs print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows)) print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols)) # - # **Conversion to Other Python Objects** # # Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory. # # When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow! # + x = torch.randn(5) print(f"x: {x} | x type: {x.type()}") y = x.numpy() print(f"y: {y} | y type: {type(y)}") z = torch.tensor(y) print(f"z: {z} | z type: {z.type()}") # - # To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions. a = torch.tensor([3.5]) a, a.item(), float(a), int(a) # ### Coding Exercise 2.3: Manipulating Tensors # Using a combination of the methods discussed above, complete the functions below. # **Function A** # # This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.,: # # $ A = \begin{bmatrix} # 1 & 1 \\ # 1 & 1 # \end{bmatrix} \,$ # and # $ B = \begin{bmatrix} # 1 & 2 & 3\\ # 1 & 2 & 3 # \end{bmatrix} \,$ # so # $ \, Out = \begin{bmatrix} 2 & 2 \\ # \end{bmatrix} \cdot 12 = \begin{bmatrix} # 24 & 24\\ # \end{bmatrix}$ # # **Function B** # # This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,: # # $ C = \begin{bmatrix} # 2 & 3 \\ # -1 & 10 # \end{bmatrix} \,$ # so # $ \, Out = \begin{bmatrix} # 0 & 2 \\ # 1 & 3 \\ # 2 & -1 \\ # 3 & 10 # \end{bmatrix}$ # # **Hint:** pay close attention to singleton dimensions # # **Function C** # # This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,: # # $ D = \begin{bmatrix} # 1 & -1 \\ # -1 & 3 # \end{bmatrix} \,$ # and # $ E = \begin{bmatrix} # 2 & 3 & 0 & 2 \\ # \end{bmatrix} \, $ # so # $ \, Out = \begin{bmatrix} # 3 & 2 \\ # -1 & 5 # \end{bmatrix}$ # # $ D = \begin{bmatrix} # 1 & -1 \\ # -1 & 3 # \end{bmatrix}$ # and # $ \, E = \begin{bmatrix} # 2 & 3 & 0 \\ # \end{bmatrix} \,$ # so # $ \, Out = \begin{bmatrix} # 1 & -1 & -1 & 3 & 2 & 3 & 0 # \end{bmatrix}$ # # **Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor # # + def functionA(my_tensor1, my_tensor2): """ This function takes in two 2D tensors `my_tensor1` and `my_tensor2` and returns the column sum of `my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`, i.e., a scalar. Args: my_tensor1: torch.Tensor my_tensor2: torch.Tensor Retuns: output: torch.Tensor The multiplication of the column sum of `my_tensor1` by the sum of `my_tensor2`. """ ################################################ ## TODO for students: complete functionA raise NotImplementedError("Student exercise: complete function A") ################################################ # TODO multiplication the sum of the tensors output = ... return output def functionB(my_tensor): """ This function takes in a square matrix `my_tensor` and returns a 2D tensor consisting of a flattened `my_tensor` with the index of each element appended to this tensor in the row dimension. Args: my_tensor: torch.Tensor Retuns: output: torch.Tensor Concatenated tensor. """ ################################################ ## TODO for students: complete functionB raise NotImplementedError("Student exercise: complete function B") ################################################ # TODO flatten the tensor `my_tensor` my_tensor = ... # TODO create the idx tensor to be concatenated to `my_tensor` idx_tensor = ... # TODO concatenate the two tensors output = ... return output def functionC(my_tensor1, my_tensor2): """ This function takes in two 2D tensors `my_tensor1` and `my_tensor2`. If the dimensions allow it, it returns the elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`; else this function returns a 1D tensor that is the concatenation of the two tensors. Args: my_tensor1: torch.Tensor my_tensor2: torch.Tensor Retuns: output: torch.Tensor Concatenated tensor. """ ################################################ ## TODO for students: complete functionB raise NotImplementedError("Student exercise: complete function C") ################################################ # TODO check we can reshape `my_tensor2` into the shape of `my_tensor1` if ...: # TODO reshape `my_tensor2` into the shape of `my_tensor1` my_tensor2 = ... # TODO sum the two tensors output = ... else: # TODO flatten both tensors my_tensor1 = ... my_tensor2 = ... # TODO concatenate the two tensors in the correct dimension output = ... return output # add timing to airtable atform.add_event('Coding Exercise 2.3: Manipulating Tensors') ## Implement the functions above and then uncomment the following lines to test your code # print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]]))) # print(functionB(torch.tensor([[2, 3], [-1, 10]]))) # print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]]))) # print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]]))) # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D1_BasicsAndPytorch/solutions/W1D1_Tutorial1_Solution_ea1718cb.py) # # # - # ``` # tensor([24, 24]) # tensor([[ 0, 2], # [ 1, 3], # [ 2, -1], # [ 3, 10]]) # tensor([[ 3, 2], # [-1, 5]]) # tensor([ 1, -1, -1, 3, 2, 3, 0]) # ``` # ## Section 2.4: GPUs # # + cellView="form" # @title Video 6: GPU vs CPU from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 6: GPU vs CPU') display(out) # - # # By default, when we create a tensor it will *not* live on the GPU! x = torch.randn(10) print(x.device) # When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. # # By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs. # # Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell. # # (For more information on the GPU usage policy you can view in the appendix) # **Now we have a GPU** # # The cell below should return True. print(torch.cuda.is_available()) # CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.* # # In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python! # # Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as # # ```python # DEVICE = set_device() # ``` # # Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA. def set_device(): device = "cuda" if torch.cuda.is_available() else "cpu" if device != "cuda": print("GPU is not enabled in this notebook. \n" "If you want to enable it, in the menu under `Runtime` -> \n" "`Hardware accelerator.` and select `GPU` from the dropdown menu") else: print("GPU is enabled in this notebook. \n" "If you want to disable it, in the menu under `Runtime` -> \n" "`Hardware accelerator.` and select `None` from the dropdown menu") return device # Let's make some CUDA tensors! # + # common device agnostic way of writing code that can run on cpu OR gpu # that we provide for you in each of the tutorials DEVICE = set_device() # we can specify a device when we first create our tensor x = torch.randn(2, 2, device=DEVICE) print(x.dtype) print(x.device) # we can also use the .to() method to change the device a tensor lives on y = torch.randn(2, 2) print(f"y before calling to() | device: {y.device} | dtype: {y.type()}") y = y.to(DEVICE) print(f"y after calling to() | device: {y.device} | dtype: {y.type()}") # - # **Operations between cpu tensors and cuda tensors** # # Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices? # # # # + x = torch.tensor([0, 1, 2], device=DEVICE) y = torch.tensor([3, 4, 5], device="cpu") # Uncomment the following line and run this cell # z = x + y # - # We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error if CUDA is not enabled in your machine. # # Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call. # + x = torch.tensor([0, 1, 2], device=DEVICE) y = torch.tensor([3, 4, 5], device="cpu") z = torch.tensor([6, 7, 8], device=DEVICE) # moving to cpu x = x.to("cpu") # alternatively, you can use x = x.cpu() print(x + y) # moving to gpu y = y.to(DEVICE) # alternatively, you can use y = y.cuda() print(y + z) # - # ### Coding Exercise 2.4: Just how much faster are GPUs? # # Below is a simple function. Complete the second function, such that it is performs the same operations as the first function, but entirely on the GPU. We will use the helper function `timeFun(f, dim, iterations, device)`. dim = 10000 iterations = 1 # + def simpleFun(dim, device): """ Args: dim: integer device: "cpu" or "cuda:0" Returns: Nothing. """ ############################################### ## TODO for students: recreate the above function, but ## ensure all computation happens on the GPU raise NotImplementedError("Student exercise: fill in the missing code to create the tensors") ############################################### x = ... y = ... z = ... x = ... y = ... del x del y del z ## TODO: Implement the function above and uncomment the following lines to test your code # timeFun(f=simpleFun, dim=dim, iterations=iterations) # timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE) # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D1_BasicsAndPytorch/solutions/W1D1_Tutorial1_Solution_032dcba8.py) # # # - # Sample output (depends on your hardware) # # ``` # time taken for 1 iterations of simpleFun(10000): 28.50481 # time taken for 1 iterations of simpleFunGPU(10000): 0.91102 # ``` # **Discuss!** # # Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? # # ## Section 2.5: Datasets and Dataloaders # # # + cellView="form" # @title Video 7: Getting Data from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 7: Getting Data') display(out) # - # When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples. # # Import dataset and dataloaders related packages from torchvision import datasets from torchvision.transforms import ToTensor from torch.utils.data import DataLoader from torchvision.transforms import Compose, Grayscale # **Datasets** # # The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals. # # Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels. # + # Download and load the images from the CIFAR10 dataset cifar10_data = datasets.CIFAR10( root="data", # path where the images will be stored download=True, # all images should be downloaded transform=ToTensor() # transform the images to tensors ) # Print the number of samples in the loaded dataset print(f"Number of samples: {len(cifar10_data)}") print(f"Class names: {cifar10_data.classes}") # - # We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label. # Choose a random sample random.seed(2021) image, label = cifar10_data[random.randint(0, len(cifar10_data))] print(f"Label: {cifar10_data.classes[label]}") print(f"Image size: {image.shape}") # Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. # ### Coding Exercise 2.5: Display an image from the dataset # # Let's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$. # # You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original. # # **Code hint:** # # ```python # # create a tensor of size 2 x 4 # input_var = torch.randn(2, 4) # # print its size and the tensor # print(input_var.size()) # print(input_var) # # # dimensions permuted # input_var = input_var.permute(1, 0) # # print its size and the permuted tensor # print(input_var.size()) # print(input_var) # ``` # + # TODO: Uncomment the following line to see the error that arises from the current image format # plt.imshow(image) # TODO: Comment the above line and fix this code by reordering the tensor dimensions # plt.imshow(image.permute(...)) # plt.show() # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D1_BasicsAndPytorch/solutions/W1D1_Tutorial1_Solution_b04bd357.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=835.0 height=827.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/W1D1_BasicsAndPytorch/static/W1D1_Tutorial1_Solution_b04bd357_0.png> # # # + cellView="form" #@title Video 8: Train and Test from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 8: Train and Test') display(out) # - # **Training and Test Datasets** # # When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days. # + # Load the training samples training_data = datasets.CIFAR10( root="data", train=True, download=True, transform=ToTensor() ) # Load the test samples test_data = datasets.CIFAR10( root="data", train=False, download=True, transform=ToTensor() ) # + cellView="form" # @title Video 9: Data Augmentation - Transformations from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 9: Data Augmentation - Transformations') display(out) # - # **Dataloader** # # Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches. # # Create dataloaders with train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True) test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True) # *Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility: # # # ```python # def seed_worker(worker_id): # worker_seed = torch.initial_seed() % 2**32 # numpy.random.seed(worker_seed) # random.seed(worker_seed) # # # g_seed = torch.Generator() # g_seed.manual_seed(my_seed) # # DataLoader( # train_dataset, # batch_size=batch_size, # num_workers=num_workers, # worker_init_fn=seed_worker, # generator=g_seed # ) # ``` # **Note:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more. # We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`. # # We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$. # + # Load the next batch batch_images, batch_labels = next(iter(train_dataloader)) print('Batch size:', batch_images.shape) # Display the first image from the batch plt.imshow(batch_images[0].permute(1, 2, 0)) plt.show() # - # **Transformations** # # Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. # ### Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale images # # The goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility. # + def my_data_load(): ############################################### ## TODO for students: recreate the above function, but ## ensure all computation happens on the GPU raise NotImplementedError("Student exercise: fill in the missing code to load the data") ############################################### ## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors data = datasets.CIFAR10(..., transform=...) # Display a random grayscale image image, label = data[random.randint(0, len(data))] plt.imshow(image.squeeze(), cmap="gray") plt.show() return data set_seed(seed=2021) ## After implementing the above code, uncomment the following lines to test your code # data = my_data_load() # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D1_BasicsAndPytorch/solutions/W1D1_Tutorial1_Solution_6052d728.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=835.0 height=827.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/W1D1_BasicsAndPytorch/static/W1D1_Tutorial1_Solution_6052d728_1.png> # # # - # --- # # Section 3: Neural Networks # # Now it's time for you to create your first neural network using PyTorch. This section will walk you through the process of: # - Creating a simple neural network model # - Training the network # - Visualizing the results of the network # - Tweeking the network # # # + cellView="form" # @title Video 10: CSV Files from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 10: CSV Files') display(out) # - # ## Section 3.1: Data Loading # # First we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file. # + cellView="form" # @title Generate sample data # @markdown we used `scikit-learn` module from sklearn.datasets import make_moons # Create a dataset of 256 points with a little noise X, y = make_moons(256, noise=0.1) # Store the data as a Pandas data frame and save it to a CSV file df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y)) df.to_csv('sample_data.csv') # - # Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names. # + # Load the data from the CSV file in a Pandas DataFrame data = pd.read_csv("sample_data.csv") # Create a 2D numpy array from the x0 and x1 columns X_orig = data[["x0", "x1"]].to_numpy() # Create a 1D numpy array from the y column y_orig = data["y"].to_numpy() # Print the sizes of the generated 2D points X and the corresponding labels Y print(f"Size X:{X_orig.shape}") print(f"Size y:{y_orig.shape}") # Visualize the dataset. The color of the points is determined by the labels `y_orig`. plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig) plt.show() # - # **Prepare Data for PyTorch** # # Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors. # + # Initialize the device variable DEVICE = set_device() # Convert the 2D points to a float32 tensor X = torch.tensor(X_orig, dtype=torch.float32) # Upload the tensor to the device X = X.to(DEVICE) print(f"Size X:{X.shape}") # Convert the labels to a long interger tensor y = torch.from_numpy(y_orig).type(torch.LongTensor) # Upload the tensor to the device y = y.to(DEVICE) print(f"Size y:{y.shape}") # - # ## Section 3.2: Create a Simple Neural Network # + cellView="form" # @title Video 11: Generating the Neural Network from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 11: Generating the Neural Network') display(out) # - # For this example we want to have a simple neural network consisting of 3 layers: # - 1 input layer of size 2 (our points have 2 coordinates) # - 1 hidden layer of size 16 (you can play with different numbers here) # - 1 output layer of size 2 (we want the have the scores for the two classes) # # During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end. # # **Programing the Network** # # PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods: # # `__init__` # # In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc. # # `forward` # # All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it. # # `predict` # # This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score. # # `train` # # This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook. # # # > Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`. # Inherit from nn.Module - the base class for neural network modules provided by Pytorch class NaiveNet(nn.Module): # Define the structure of your network def __init__(self): super(NaiveNet, self).__init__() # The network is defined as a sequence of operations self.layers = nn.Sequential( nn.Linear(2, 16), # Transformation from the input to the hidden layer nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any # negative input, but for any positive value x, it returns that value back. nn.Linear(16, 2), # Transformation from the hidden to the output layer ) # Specify the computations performed on the data def forward(self, x): # Pass the data through the layers return self.layers(x) # Choose the most likely label predicted by the network def predict(self, x): # Pass the data through the networks output = self.forward(x) # Choose the label with the highest score return torch.argmax(output, 1) # Train the neural network (will be implemented later) def train(self, X, y): pass # **Check that your network works** # # Create an instance of your model and visualize it # + # Create new NaiveNet and transfer it to the device model = NaiveNet().to(DEVICE) # Print the structure of the network print(model) # - # ### Coding Exercise 3.2: Classify some samples # # Now let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. # # The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results. # + ## Get the samples # X_samples = ... # print("Sample input:\n", X_samples) ## Do a forward pass of the network # output = ... # print("\nNetwork output:\n", output) ## Predict the label of each point # y_predicted = ... # print("\nPredicted labels:\n", y_predicted) # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D1_BasicsAndPytorch/solutions/W1D1_Tutorial1_Solution_af8ae0ff.py) # # # - # ``` # Sample input: # tensor([[ 0.9066, 0.5052], # [-0.2024, 1.1226], # [ 1.0685, 0.2809], # [ 0.6720, 0.5097], # [ 0.8548, 0.5122]], device='cuda:0') # # Network output: # tensor([[ 0.1543, -0.8018], # [ 2.2077, -2.9859], # [-0.5745, -0.0195], # [ 0.1924, -0.8367], # [ 0.1818, -0.8301]], device='cuda:0', grad_fn=<AddmmBackward>) # # Predicted labels: # tensor([0, 0, 1, 0, 0], device='cuda:0') # ``` # ## Section 3.3: Train Your Neural Network # # # + cellView="form" # @title Video 12: Train the Network from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 12: Train the Network') display(out) # - # Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we wil cover training in much more details in the next days. For now, the goal is just to see your network in action! # # You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell. # + cellView="form" # @title Helper function to plot the decision boundary # Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/ from pathlib import Path def plot_decision_boundary(model, X, y, device): # Transfer the data to the CPU X = X.cpu().numpy() y = y.cpu().numpy() # Check if the frames folder exists and create it if needed frames_path = Path("frames") if not frames_path.exists(): frames_path.mkdir() # Set min and max values and give it some padding x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid grid_points = np.c_[xx.ravel(), yy.ravel()] grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor) Z = model.predict(grid_points.to(device)).cpu().numpy() Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary) # + # Implement the train function given a training dataset X and correcsponding labels y def train(model, X, y): # The Cross Entropy Loss is suitable for classification problems loss_function = nn.CrossEntropyLoss() # Create an optimizer (Stochastic Gradient Descent) that will be used to train the network learning_rate = 1e-2 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Number of epochs epochs = 15000 # List of losses for visualization losses = [] for i in range(epochs): # Pass the data through the network and compute the loss # We'll use the whole dataset during the training instead of using batches # in to order to keep the code simple for now. y_logits = model.forward(X) loss = loss_function(y_logits, y) # Clear the previous gradients and compute the new ones optimizer.zero_grad() loss.backward() # Adapt the weights of the network optimizer.step() # Store the loss losses.append(loss.item()) # Print the results at every 1000th epoch if i % 1000 == 0: print(f"Epoch {i} loss is {loss.item()}") plot_decision_boundary(model, X, y, DEVICE) plt.savefig('frames/{:05d}.png'.format(i)) return losses # Create a new network instance a train it model = NaiveNet().to(DEVICE) losses = train(model, X, y) # - # **Plot the loss during training** # # Plot the loss during the training to see how it reduces and converges. plt.plot(np.linspace(1, len(losses), len(losses)), losses) plt.xlabel("Epoch") plt.ylabel("Loss") # + cellView="form" # @title Visualize the training process # @markdown ### Execute this cell! # !pip install imageio --quiet # !pip install pathlib --quiet import imageio from IPython.core.interactiveshell import InteractiveShell from IPython.display import Image, display from pathlib import Path InteractiveShell.ast_node_interactivity = "all" # Make a list with all images images = [] for i in range(10): filename = "frames/0"+str(i)+"000.png" images.append(imageio.imread(filename)) # Save the gif imageio.mimsave('frames/movie.gif', images) gifPath = Path("frames/movie.gif") with open(gifPath,'rb') as f: display(Image(data=f.read(), format='png')) # + cellView="form" # @title Video 13: Play with it from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 13: Play with it') display(out) # - # ### Exercise 3.3: Tweak your Network # # You can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try: # - Increase or decrease the number of epochs for training # - Increase or decrease the size of the hidden layer # - Add one additional hidden layer # # Can you get the network to better fit the data? # + cellView="form" # @title Video 14: XOR Widget from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') # add timing to airtable atform.add_event('Video 14: XOR Widget') display(out) # - # Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false. # # In case of two inputs ($X$ and $Y$) the following truth table is applied: # # \begin{array}{ccc} # X & Y & \text{XOR} \\ # \hline # 0 & 0 & 0 \\ # 0 & 1 & 1 \\ # 1 & 0 & 1 \\ # 1 & 1 & 0 \\ # \end{array} # # Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. # ### Interactive Demo 3.3: Solving XOR # # Here we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground). # * Play with the widget and observe that you can not solve the continuous XOR dataset. # * Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly. # # For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left. # Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: # # \begin{equation} # y = f(x_1)+f(x_2)-f(x_1+x_2) # \end{equation} # # Try to set the weights and biases to implement this function after you played enough :) # + cellView="form" # @markdown ###Play with the parameters to solve XOR from IPython.display import HTML HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor&regDataset=reg-plane&learningRate=0.03&regularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>') # + cellView="form" # @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units? w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No'] if w1_min_xor == 'No': print("Correct!") else: print("How about giving it another try?") # - # --- # # Section 4: Ethics And Course Info # # + cellView="form" # @title Video 15: Ethics from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # + cellView="form" # @title Video 16: Be a group from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # + cellView="form" # @title Video 17: Syllabus from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # Meet our lecturers: # # Week 1: the building blocks # * [<NAME>](https://kordinglab.com) # * [<NAME>](https://www.saxelab.org/) # * [<NAME>](https://ganguli-gang.stanford.edu/) # * [<NAME>](http://mitliagkas.github.io/) # * [<NAME>](https://www.cis.upenn.edu/~ungar/) # # Week 2: making things work # * [<NAME>](https://webdocs.cs.ualberta.ca/~alona/) # * [<NAME>](https://eckerlab.org/) # * [<NAME>](https://sociology.uchicago.edu/directory/james-evans) # * [<NAME>](https://hhexiy.github.io/) # * [<NAME>](https://tnel.ucsd.edu/bio) and [<NAME>](https://akashgit.github.io/) # # Week 3: more magic # * [<NAME>](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Bl<NAME>](https://www.mcgill.ca/neuro/blake-richards-phd) # * [<NAME>](http://www.janexwang.com/) and [<NAME>](https://feryal.github.io/) # * [<NAME>](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Bl<NAME>ards](https://www.mcgill.ca/neuro/blake-richards-phd) # * [<NAME>](https://jovo.me/) and [<NAME>](https://www.vincenzolomonaco.com/) # # Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? # # --- # # Submit to Airtable # + cellView="form" # @title Video 18: Submission info from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable. # <br> # <img src="https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/static/DapperLion.png" alt="Darryl"> # <br><br> # At the end of each tutorial there will be an <b>Airtable Submission</b> Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable. # <br><br> # if it is the last tutorial of the day your button will look like this and take you to the end of day survey: # <br> # <img src="https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/static/SurveyButton.png?raw=1" alt="Survey Button"> # # otherwise it look like this: # <br> # <img src="https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/static/AirTableSubmissionButton.png?raw=1" alt="Submission Button"> # <br><br> # # It is critical that you push the submit button for every tutorial you run. <b><u> even if you don't finish the tutorial, still submit!</b></u> # Submitting is the only way we can verify that you attempted each tutorial, which is critical for the award of your completion certificate at the end of the course. # <br><br><br> # # Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()` in the coding exercises, just know that is for saving airtable information only.<b> It will not affect the code that is being run around it in any way</b> , so please do not modify, comment out, or worry about any of those lines of code. # <br><br><br> # Now, lets try submitting today's course to Airtable by running the next cell and clicking the button when it appears. # + cellView="form" # @title Airtable Submission Link from IPython import display display.HTML( f""" <div> <a href= "{atform.url()}" target="_blank"> <img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1" alt="button link to survey" style="width:410px"></a> </div>""" ) # - # --- # # Bonus - 60 years of Machine Learning Research in one Plot # # by [<NAME>](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from <NAME>. # # In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network. # # *The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did. # + cellView="form" # @title Import `altair` and load the data # !pip install altair vega_datasets --quiet import altair as alt # altair is defining data visualizations # Source data files # Position data file maps ID to x,y positions POS_FILE = 'http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json' # Metadata file maps ID to title, abstract, author,.... META_FILE = 'http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv' # data loading and wrangling def load_data(): positions = pd.read_json(POS_FILE) positions[['x', 'y']] = positions['pos'].to_list() meta = pd.read_csv(META_FILE) return positions.merge(meta, left_on='id', right_on='paper_id') # load data data = load_data() # + cellView="form" # @title Define Visualization using ALtair YEAR_PERIOD = "quinquennial" # @param selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend') data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5 chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800, height=800).mark_circle(radius=2, opacity=0.2).encode( alt.Color(YEAR_PERIOD+':O', scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))), # legend=alt.Legend(title='Total Records') ), alt.Size('citation_count', scale=alt.Scale(type="pow", exponent=1, range=[15, 300]) ), alt.X('x:Q', scale=alt.Scale(zero=False), axis=alt.Axis(labels=False) ), alt.Y('y:Q', scale=alt.Scale(zero=False), axis=alt.Axis(labels=False) ), tooltip=['title', 'authors'], # size='citation_count', # color="decade:O", opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)), ).add_selection( selection ).interactive() # - # Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020. # # The view is **interactive** and allows for three main interactions. Try them and play around. # 1. hover over a dot to see a tooltip (title, author) # 2. select a year in the legend (right) to filter dots # 2. zoom in/out with scroll -- double click resets view chart # ## Questions # # By playing around, can you find some answers to the follwing questions? # # 1. Can you find topical clusters? What cluster might occur because of a filtering error? # 2. Can you see a temporal trend in the data and clusters? # 2. Can you determine when deep learning methods started booming ? # 3. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color) # ## Methods # # Here is what we did: # 1. Filtering of all papers who fullfilled the criterria: # - are categorized as `Computer Science` or `Mathematics` # - one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "` # 2. per year, remove all papers that are below the 99 percentile of citation count in that year # 3. embed each paper by using abstract+title in SPECTER model # 4. project based on embedding using UMAP # 5. visualize using Altair # ### Find Authors # + cellView="form" # @title Edit the `AUTHOR_FILTER` variable to full text search for authors. AUTHOR_FILTER = "Rush " # @param space at the end means "word border" ### Don't ignore case when searching... FLAGS = 0 ### uncomment do ignore case # FLAGS = re.IGNORECASE ## --- FILTER CODE.. make it your own --- import re data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, ) if data['issel'].mean()<0.0000000001: print('No match found') ## --- FROM HERE ON VIS CODE --- alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800, height=800) \ .mark_circle(stroke="black", strokeOpacity=1).encode( alt.Color(YEAR_PERIOD+':O', scale=alt.Scale(scheme='viridis', reverse=False), # legend=alt.Legend(title='Total Records') ), alt.Size('citation_count', scale=alt.Scale(type="pow", exponent=1, range=[15, 300]) ), alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None), alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None), alt.X('x:Q', scale=alt.Scale(zero=False), axis=alt.Axis(labels=False) ), alt.Y('y:Q', scale=alt.Scale(zero=False), axis=alt.Axis(labels=False) ), tooltip=['title', 'authors'], ).interactive() # - # --- # # Appendix # # ## Official PyTorch resources: # ### Tutorials # https://pytorch.org/tutorials/ # # ### Documentation # # https://pytorch.org/docs/stable/tensors.html (tensor methods) # # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view (The view method in particular) # # https://pytorch.org/vision/stable/datasets.html (pre-loaded image datasets) # # ## Google Colab Resources: # https://research.google.com/colaboratory/faq.html (FAQ including guidance on GPU usage) # # ## Books for reference: # # https://www.deeplearningbook.org/ (Deep Learning by <NAME>, <NAME> and <NAME>) # #
tutorials/W1D1_BasicsAndPytorch/student/W1D1_Tutorial1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # + [markdown] nbpresent={"id": "bf74d2e9-2708-49b1-934b-e0ede342f475"} # # HyperParameter tuning # # - # Let's get started. First let's import some Python libraries. # + nbpresent={"id": "c377ea0c-0cd9-4345-9be2-e20fb29c94c3"} # %matplotlib inline import numpy as np import os import matplotlib import matplotlib.pyplot as plt # + nbpresent={"id": "edaa7f2f-2439-4148-b57a-8c794c0945ec"} import azureml from azureml.core import Workspace, Run # check core SDK version number print("Azure ML SDK Version: ", azureml.core.VERSION) # - # ## Initialize workspace # Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`. # + from azureml.core.workspace import Workspace ws = Workspace.from_config() print('Workspace name: ' + ws.name, 'Azure region: ' + ws.location, 'Subscription id: ' + ws.subscription_id, 'Resource group: ' + ws.resource_group, sep = '\n') # + [markdown] nbpresent={"id": "59f52294-4a25-4c92-bab8-3b07f0f44d15"} # ## Create an Azure ML experiment # Let's create an experiment and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure. # + nbpresent={"id": "bc70f780-c240-4779-96f3-bc5ef9a37d59"} from azureml.core import Experiment script_folder = './keras-cifar10' exp = Experiment(workspace=ws, name='keras-cifar10') # - # ## Create Batch AI cluster as compute target # [Batch AI](https://docs.microsoft.com/en-us/azure/batch-ai/overview) is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's create a new Batch AI cluster in the current workspace, if it doesn't already exist. We will then run the training script on this compute target. # ## Intelligent hyperparameter tuning # We have trained the model with one set of hyperparameters, now let's how we can do hyperparameter tuning by launching multiple runs on the cluster. First let's define the parameter space using random sampling. # + from azureml.train.hyperdrive import * ps = RandomParameterSampling( { '--batch-size': choice(25, 50, 100), '--decay': choice(1e-7, 1e-6, 1e-5), '--learning-rate': choice(1e-5, 1e-4, 1e-3) } ) # - # Next, we will create a new estimator without the above parameters since they will be passed in later. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep. est = TensorFlow(source_directory=script_folder, script_params={'--data-folder': ws.get_default_datastore().as_mount(), '--epochs': 25}, compute_target=compute_target, entry_script='cifar10_azureml.py', conda_packages=['keras', 'h5py'], use_gpu=True) # Now we will define an early termnination policy. The `BanditPolicy` basically states to check the job every 2 iterations. If the primary metric (defined later) falls outside of the top 10% range, Azure ML terminate the job. This saves us from continuing to explore hyperparameters that don't show promise of helping reach our target metric. policy = BanditPolicy(evaluation_interval=2, slack_factor=0.1) # Now we are ready to configure a run configuration object, and specify the primary metric `validation_acc` that's recorded in your training runs. If you go back to visit the training script, you will notice that this value is being logged after every epoch (a full batch set). We also want to tell the service that we are looking to maximizing this value. We also set the number of samples to 20, and maximal concurrent job to 4, which is the same as the number of nodes in our computer cluster. htc = HyperDriveRunConfig(estimator=est, hyperparameter_sampling=ps, primary_metric_name='validation_acc', primary_metric_goal=PrimaryMetricGoal.MAXIMIZE, max_total_runs=20, max_concurrent_runs=4) # Finally, let's launch the hyperparameter tuning job. htr = exp.submit(config=htc) # We can use a run history widget to show the progress. Be patient as this might take a while to complete. RunDetails(htr).show() htr.wait_for_completion(show_output = True) # ## Find and register best model # When all the jobs finish, we can find out the one that has the highest accuracy. best_run = htr.get_best_run_by_primary_metric() print(best_run) # Now let's list the model files uploaded during the run. print(best_run.get_file_names() # We can then register the folder (and all files in it) as a model named `tf-dnn-mnist` under the workspace for deployment. model = best_run.register_model(model_name='tf-dnn-mnist', model_path='outputs/model')
Tutorials/MLADS-fall-2018/demo/train-hyperparameter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tweepy import json import pandas as pd import csv #Twitter API credentials consumer_key = 'pe7gsS8WNkANobhPvKU5q9PPv' consumer_secret = '<KEY>' access_token = '<KEY>' access_token_secret = '<KEY>' # + #para escribir sobre un txt (tomar decision si txt csv o database) #file = open('tweets.txt', 'a') #Definicion palabras claves busqueda keywords = ['ODS', 'sostenibilidad'] class MyStreamListener(tweepy.StreamListener): def on_data(self,data): # Twitter returns data in JSON format - we need to decode it first try: decoded = json.loads(data) except Exception as e: print ("Error on_data: %s" % str(e)) #we don't want the listener to stop return True #En caso de estar geolocalizado guardar la geolocalizacion #Si esta geolocalizado dentro de un bounding box (no exacta) if decoded.get('place') is not None: place_type = decoded.get('place').get('place_type') place_name =decoded.get('place').get('name') #Si es localizacion exacta if decoded.get('geo') is not None: coord = decoded.get('geo').get('coordinates') else: place_type = 'No found' place_name = 'No found' coord = 'No found' user_name = '@' + decoded.get('user').get('screen_name') #nombre cuenta @itdUPM user_id=decoded.get('user').get('id') #id de la cuenta (int) created_at = decoded.get('created_at') #Fecha text = decoded['text'].replace('\n',' ') #Contenido tweet tweet_id = decoded['id'] #tweet id (int64) source = decoded['source'] #string source (web client, android, iphone) interesante??? reply_count = decoded['reply_count'] #Number of times this Tweet has been replied to retweet_count = decoded['retweet_count'] #Number of times this Tweet has been retweeted favorite_count = decoded['favorite_count'] #how many times this Tweet has been liked by Twitter users. #in_reply_to_status_id Si es una respuesta id del tweet al que responde #in_reply_to_user_id Si es una respuesta user_id al que responde #in_reply_to_screen_name Si es una respuesta screen_name al que responde tweet = ['%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s\n' % (tweet_id,user_name,user_id,created_at,text,source, reply_count,retweet_count,favorite_count, place_type,place_name,coord )] # Writing tweets data with open('OutputStreamingKeywords.txt', 'a') as f: writer = csv.writer(f) writer.writerow(tweet) print(tweet) return True def on_error(self, status_code): if status_code == 420: #returning False in on_error disconnects the stream return False # returning non-False reconnects the stream, with backoff. if __name__ == '__main__': print ('Starting') #authorize twitter, initialize tweepy auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth, wait_on_rate_limit=True) #create the api and the stream object myStreamListener = MyStreamListener() myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener) #Filter the stream by keywords myStream.filter(track = keywords) # Writing csv titles with open('OutputStreamingKeywords.txt', 'w') as f: writer = csv.writer(f) writer.writerow(['tweet_id','user_name','user_id','created_at','text','source', 'reply_count','retweet_count','favorite_count', 'place_type','place_name','coord']) # -
Twitter_bot_keywords_v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import nltk nltk.download('punkt') nltk.download('stopwords') # # Stop Words # Combine the steps you learned so far to normalize, tokenize, and remove stop words from the text below. # # **Note: All solution notebooks can be found by clicking on the Jupyter icon on the top left of this workspace.** # import statements from nltk.tokenize import word_tokenize text = "The first time you see The Second Renaissance it may look boring. Look at it at least twice and definitely watch part 2. It will change your view of the matrix. Are the human people the ones who started the war ? Is AI a bad thing ?" print(text) # Normalize text text = text.lower() print(text) # Tokenize text words = word_tokenize(text) print(words) # + from nltk.corpus import stopwords # Remove stop words words = [w for w in words if w not in stopwords.words('english')] print(words) # - # Take a look at the stop words included in nltk's corpus! print(stopwords.words("english"))
lessons/5.2 NLP_Pipelines/04 stop_words_practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/namanphy/Controllable-Image-Captioning-App/blob/master/S5/eva_session5%20-%20iter%202.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="2rN1Sty-UEQH" # # # Iteration 2 # # # + [markdown] id="o3IkbGWWvpSN" # # Target # 1. To reduce the number of parameters. # # Results # 1. parameters = 8,734 # 2. Best Train Accuracy = 99.34 # 3. Best Test Accuracy = 99.00 # # Analysis # 1. Good training accuracy with reduced number of parameters. # 2. Overfitting - The model is still overfitting. # 3. Also the best training and test accuracies are reduced. # 4. Number of model parameters is under required target. # # --- # + colab={"base_uri": "https://localhost:8080/"} id="0m2JWFliFfKT" outputId="12118685-fff4-471e-df02-0de0f6ebf1b3" import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms # !pip install torchsummary from torchsummary import summary # + [markdown] id="yxRPMAKpIdY6" # ## Getting to know our data # + id="nKGRFC-IKoh8" # Downloading the dataset and setting up dataloader for visualisationn only train_set = datasets.MNIST('../data', train=True, download=True, transform=transforms.ToTensor()) test_set = datasets.MNIST('../data', train=False, transform=transforms.ToTensor()) train_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True) # + colab={"base_uri": "https://localhost:8080/", "height": 554} id="EbqFY5LuItVR" outputId="c7c8fb9b-931c-4d31-db54-b326d6f23fc0" # We'd need to convert it into Numpy! Remember above we have converted it into tensors already train_data = train_set.train_data train_data = train_set.transform(train_data.numpy()) print('[Train]') print(' - Numpy Shape:', train_set.train_data.cpu().numpy().shape) print(' - Tensor Shape:', train_set.train_data.size()) print(' - min:', torch.min(train_data)) print(' - max:', torch.max(train_data)) print(' - mean:', torch.mean(train_data)) print(' - std:', torch.std(train_data)) print(' - var:', torch.var(train_data)) dataiter = iter(train_loader) images, labels = dataiter.next() print(images.shape) print(labels.shape) # Let's visualize some of the images # %matplotlib inline import matplotlib.pyplot as plt plt.imshow(images[0].numpy().squeeze(), cmap='gray_r') # + colab={"base_uri": "https://localhost:8080/", "height": 246} id="JMv1Ep1bLPIe" outputId="cafb3928-ddad-4910-c9bd-52591a229d3e" figure = plt.figure() num_of_images = 60 for index in range(1, num_of_images + 1): plt.subplot(6, 10, index) plt.axis('off') plt.imshow(images[index].numpy().squeeze(), cmap='gray_r') # + [markdown] id="lYp1u6p0IKQE" # ## Setting up Dataloaders and Transformations # # + id="Vn8BEDoTXIPR" # Train Phase transformations train_transforms = transforms.Compose([ # transforms.Resize((28, 28)), # transforms.ColorJitter(brightness=0.10, contrast=0.1, saturation=0.10, hue=0.1), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) # The mean and std have to be sequences (e.g., tuples), therefore you should add a comma after the values. # Note the difference between (0.1307) and (0.1307,) ]) # Test Phase transformations test_transforms = transforms.Compose([ # transforms.Resize((28, 28)), # transforms.ColorJitter(brightness=0.10, contrast=0.1, saturation=0.10, hue=0.1), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) # + id="L85gYyFtX-I5" use_cuda = torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # + id="DqTWLaM5GHgH" torch.manual_seed(1) if use_cuda: torch.cuda.manual_seed(1) batch_size = 128 kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {} train_set = datasets.MNIST('../data', train=True, download=True, transform=train_transforms) test_set = datasets.MNIST('../data', train=False, transform=test_transforms) train_loader = torch.utils.data.DataLoader(train_set , batch_size=batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=True, **kwargs) # + [markdown] id="eA75paYdIdAq" # ## Making our model # + id="h_Cx9q2QFgM7" class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(1, 10, 3, padding=1, bias=False), # RF - 3x3 nn.ReLU(), # RF - 3x3 nn.Conv2d(10, 10, 3, bias=False), # RF - 5x5 nn.ReLU(), # RF - 5x5 nn.MaxPool2d(2, 2) # RF - 10x10 ) self.conv2 = nn.Sequential( nn.Conv2d(10, 16, 3, padding=1, bias=False), # RF - 12x12 nn.ReLU(), # RF - 12x12 nn.Conv2d(16, 16, 3, bias=False), # RF - 14x14 nn.ReLU(), # RF - 14x14 nn.MaxPool2d(2, 2) # RF - 28x28 ) self.conv3 = nn.Sequential( nn.Conv2d(16, 10, 5, bias=False) ) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) # x = x.mean(dim=[-2,-1]) x = x.view(-1, 10) return F.log_softmax(x) # + colab={"base_uri": "https://localhost:8080/"} id="xdydjYTZFyi3" outputId="3a724d4d-5840-43f6-8ddd-cb0cf625448a" model = Net().to(device) summary(model, input_size=(1, 28, 28)) # + id="8fDefDhaFlwH" from tqdm import tqdm train_losses = [] test_losses = [] train_acc = [] test_acc = [] def train(model, device, train_loader, optimizer, epoch): print(f"EPOCH - {epoch}") model.train() pbar = tqdm(train_loader, position=0, leave=True) correct = 0 processed = 0 for batch_idx, (data, target) in enumerate(pbar): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) train_losses.append(loss) loss.backward() optimizer.step() pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() processed += len(data) pbar.set_description(desc= f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}') train_acc.append(100*correct/processed) def test(model, device, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) test_losses.append(test_loss) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) test_acc.append(100. * correct / len(test_loader.dataset)) # + colab={"base_uri": "https://localhost:8080/"} id="SYzgAz-KdaXK" outputId="f22dbe53-f87b-429a-e6c6-7dc8cc1f0882" data, target = next(iter(train_loader)) print(data.size(), target.size()) # + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="MMWbLWO6FuHb" outputId="fdf2129d-4c5e-4d2c-eac7-21616fe6b9f5" model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) for epoch in range(1, 16): train(model, device, train_loader, optimizer, epoch) test(model, device, test_loader) # + colab={"base_uri": "https://localhost:8080/", "height": 570} id="QvUCjt7mRAxd" outputId="e1021e30-ba6f-42ea-c3a9-bcffcda16f9d" fig, axs = plt.subplots(2,2,figsize=(15,10)) axs[0, 0].plot(train_losses) axs[0, 0].set_title("Training Loss") axs[1, 0].plot(train_acc) axs[1, 0].set_title("Training Accuracy") axs[0, 1].plot(test_losses) axs[0, 1].set_title("Test Loss") axs[1, 1].plot(test_acc) axs[1, 1].set_title("Test Accuracy") # + id="8HGK8Z_0ytdD"
S5/eva_session5 - iter 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Feature Encoding # # O objetivo desse notebook é demonstrar o conceito de feature encoding. Nessa introdução ao tema vamos monstrar as duas técnicas de feature encoding mais utilizadas: **label encoding** e **one hot encoding** # + import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder # - # ## Carregando a base # + df = pd.read_csv('titanic_limpo.csv') # Vamos tirar as colunas PassangerId e Name, já que elas são unicas e não trazem muita informação para nós agora df.drop(columns=['PassengerId', 'Name'], inplace=True) # - df.shape df.head() # Vamos ver o tipo de dado de cada variável # ## Explorando df.dtypes # Agora para cada feature categórica vamos ver qual é sua **cardinalidade** isto é, qual é o número de valores únicos que a variável possui for col in df: if df[col].dtype == object: print(col, df[col].nunique()) # ## Label Encoding # # - No label encoding cada valor unico é mapeado para um inteiro que substitui esse valor em toda a coluna # - Label encoding é recomendado quando o medelo utilizado é baseado em árvores # + df_label = df.copy(deep=True) #Copiando nosso dataframe original for col in df_label.columns: if df_label[col].dtype != object: continue encoder = LabelEncoder() encoder.fit(df_label[col]) df_label[col] = encoder.transform(df_label[col]) df_label.head() # - # Podemos ver que as colunas Sex, Ticket e Embarked tiveram seus valores substituidos por inteiros # ## One-Hot Encoding # # - O one-hot encoding cria uma coluna para cada valor que a feature assume, tendo valor 1 quando a instancia tinha aquele valor e 0 caso contráro # - É preciso tomar cuidado com a cardinalidade das features já que estamos criando uma nova coluna para cada valor único # - One-hot enconding é utilizado com classificadores lineares, kNN e redes neurais # A forma mais simples de realizar One-Hot Encoding é com a função *get_dummies* do Pandas. É só passar o DataFrame para essa função que ele retorna as colunas categoricas com o encoding já aplicado. Obs: É bom pensar se isso não vai dar algum problema de memória, já que será feita uma cópia do DataFrame. # # (Dica: Pesquise sobre as classes OneHotEncoder e LabelBinarizer do sklearn que também fazem essa tarefa. Vai ser bom para pegar um entendimento melhor de como as estruturas do numpy, pandas e sklearn conversam entre si) df_one_hot = pd.get_dummies(df) df_one_hot.head() # Pelas features Sex e Embarked parece que tudo funcionou com esperado. Para a coluna Ticket podemos ver que o encoding foi de fato realizado, mas pela sua cardinalidade talvez não tenha sido a melhor escolha possível
Aula05/feature-encoding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Set up import numpy as np import pandas as pd import matplotlib.pyplot as plt # #### a) # **Wczytaj dane IRIS.** from sklearn import datasets iris = datasets.load_iris() # + # iris.data # iris.feature_names # iris.target_names # iris.target # - # #### b) # **Przedstaw wczytane dane na wykresie w poniższy sposób (zwróć uwagę na kolory i typ # znaczników, opisy osi i tytuł wykresu):** # # + sepal_length = iris.data[:,0] sepal_width = iris.data[:,1] plt.title('Iris') plt.xlabel('sepal length (cm)') plt.ylabel('sepal width (cm)') plt.xlim(4.0, 8.0) plt.ylim(2.0, 4.5) plt.scatter(sepal_length, sepal_width, c='red', s=10) # - # #### c) # **Zmień zakres osi: oś X powinna pokazywać wartości od 3 do 9, a oś Y od 1 do 5.** plt.xlim(3.0, 9.0) plt.ylim(1.0, 5) plt.scatter(sepal_length, sepal_width, c='red', s=10) # #### d) # **Zmień podziałki na osiach, tak aby zaznaczone były tylko liczby całkowite.** # + from matplotlib.ticker import MaxNLocator iris_plot = plt.figure().gca() iris_plot.yaxis.set_major_locator(MaxNLocator(integer=True)) iris_plot.xaxis.set_major_locator(MaxNLocator(integer=True)) iris_plot.set_xlim(3, 9) iris_plot.set_ylim(1, 5) iris_plot.scatter(sepal_length, sepal_width, c='red', s=10) # - # #### e) # **Każdy gatunek irysa zaznacz innym kolorem.** # + from matplotlib.ticker import MaxNLocator from matplotlib.colors import ListedColormap cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) iris_plot.clear() iris_plot.yaxis.set_major_locator(MaxNLocator(integer=True)) iris_plot.xaxis.set_major_locator(MaxNLocator(integer=True)) iris_plot.set_xlim(3, 9) iris_plot.set_ylim(1, 5) iris_y = np.array(iris['target']) iris_plot.scatter(sepal_length, sepal_width, c=iris_y, s=10, cmap=cmap_bold) iris_plot.figure # - # #### f) # **Zapisz rysunek do pliku zadanie1.png.** iris_plot.figure.savefig('iris_plot.png')
Data Mining/Assignment_1/Task_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from pyvista import set_plot_theme set_plot_theme('document') pyvista._wrappers['vtkPolyData'] = pyvista.PolyData # Terrain Following Mesh {#terrain_following_mesh_example} # ====================== # # Use a topographic surface to create a 3D terrain-following mesh. # # Terrain following meshes are common in the environmental sciences, for # instance in hydrological modelling (see [Maxwell # 2013](https://www.sciencedirect.com/science/article/abs/pii/S0309170812002564) # and [ParFlow](https://parflow.org)). # # In this example, we demonstrate a simple way to make a 3D grid/mesh that # follows a given topographic surface. In this example, it is important to # note that the given digital elevation model (DEM) is structured (gridded # and not triangulated): this is common for DEMs. # # sphinx_gallery_thumbnail_number = 3 import pyvista as pv import numpy as np from pyvista import examples # Download a gridded topography surface (DEM) # dem = examples.download_crater_topo() dem # Now let\'s subsample and extract an area of interest to make this # example simple (also the DEM we just load is pretty big). Since the DEM # we loaded is a `pyvista.UniformGrid`{.interpreted-text role="class"} # mesh, we can use the # `pyvista.UniformGridFilters.extract_subset`{.interpreted-text # role="func"} filter: # subset = dem.extract_subset((500, 900, 400, 800, 0, 0), (5,5,1)) subset.plot(cpos="xy") # Now that we have a region of interest for our terrain following mesh, # lets make a 3D surface of that DEM: # terrain = subset.warp_by_scalar() terrain terrain.plot() # And now we have a 3D structured surface of the terrain! We can now # extend that structured surface into a 3D mesh to form a terrain # following grid. To do this, we first our cell spacings in the # z-direction (these start from the terrain surface). Then we repeat the # XYZ structured coordinates of the terrain mesh and decrease each Z level # by our Z cell spacing. Once we have those structured coordinates, we can # create a `pyvista.StructuredGrid`{.interpreted-text role="class"}. # # + z_cells = np.array([25]*5 + [35]*3 + [50]*2 + [75, 100]) xx = np.repeat(terrain.x, len(z_cells), axis=-1) yy = np.repeat(terrain.y, len(z_cells), axis=-1) zz = np.repeat(terrain.z, len(z_cells), axis=-1) - np.cumsum(z_cells).reshape((1, 1, -1)) mesh = pv.StructuredGrid(xx, yy, zz) mesh["Elevation"] = zz.ravel(order="F") mesh # + cpos = [(1826736.796308761, 5655837.275274233, 4676.8405505181745), (1821066.1790519988, 5649248.765538796, 943.0995128226014), (-0.2797856225380979, -0.27966946337594883, 0.9184252809434081)] mesh.plot(show_edges=True, lighting=False, cpos=cpos)
notebooks/official/terrain-mesh.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="zLSy94NEQi-e" colab_type="code" colab={} """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. """ # If you're using Google Colab and not running locally, run this cell. ## Install dependencies # !pip install wget # !apt-get install sox libsndfile1 ffmpeg # !pip install unidecode # ## Install NeMo BRANCH = 'v1.0.0b2' # !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all] ## Install TorchAudio # !pip install torchaudio>=0.6.0 -f https://download.pytorch.org/whl/torch_stable.html ## Grab the config we'll use in this example # !mkdir configs # + [markdown] id="7LfkL2r2Q1tr" colab_type="text" # # NeMo - A toolkit for Conversational AI applications # # NeMo is a toolkit for creating [Conversational AI](https://developer.nvidia.com/conversational-ai#started) applications. # # NeMo toolkit makes it possible for researchers to easily compose complex neural network architectures for conversational AI using reusable components - Neural Modules. Neural Modules are conceptual blocks of neural networks that take typed inputs and produce typed outputs. Such modules typically represent data layers, encoders, decoders, language models, loss functions, or methods of combining activations. # # The toolkit comes with extendable collections of pre-built modules and ready-to-use models for automatic speech recognition (ASR), natural language processing (NLP) and text synthesis (TTS). Built for speed, NeMo can utilize NVIDIA's Tensor Cores and scale out training to multiple GPUs and multiple nodes. # # For more information, please visit https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/# # + [markdown] id="6G2TZkaxcM0e" colab_type="text" # ## Foundations of NeMo # --------- # # NeMo models leverage [PyTorch Lightning](https://github.com/PyTorchLightning/pytorch-lightning) Module, and are compatible with the entire PyTorch ecosystem. This means that users have the full flexibility of using the higher level APIs provided by PyTorch Lightning (via Trainer), or write their own training and evaluation loops in PyTorch directly (by simply calling the model and the individual components of the model). # # For NeMo developers, a "Model" is the neural network(s) as well as all the infrastructure supporting those network(s), wrapped into a singular, cohesive unit. As such, all NeMo models are constructed to contain the following out of the box (at the bare minimum, some models support additional functionality too!) - # # - Neural Network architecture - all of the modules that are required for the model. # # - Dataset + Data Loaders - all of the components that prepare the data for consumption during training or evaluation. # # - Preprocessing + Postprocessing - all of the components that process the datasets so they can easily be consumed by the modules. # # - Optimizer + Schedulers - basic defaults that work out of the box, and allow further experimentation with ease. # # - Any other supporting infrastructure - tokenizers, language model configuration, data augmentation etc. # # # # + id="XxAwtqWBQrNk" colab_type="code" colab={} import nemo nemo.__version__ # + [markdown] id="H01SHfKQh-gV" colab_type="text" # ## NeMo Collections # # NeMo is sub-divided into a few fundamental collections based on their domains - `asr`, `nlp`, `tts`. When you performed the `import nemo` statement above, none of the above collections were imported. This is because you might not need all of the collections at once, so NeMo allows partial imports of just one or more collection, as and when you require them. # # ------- # Let's import the above three collections - # + id="J09NNa8fhth7" colab_type="code" colab={} import nemo.collections.asr as nemo_asr import nemo.collections.nlp as nemo_nlp import nemo.collections.tts as nemo_tts # + [markdown] id="bSvYoeBrjPza" colab_type="text" # ## NeMo Models in Collections # # NeMo contains several models for each of its collections, pertaining to certain common tasks involved in conversational AI. At a brief glance, let's look at all the Models that NeMo offers for the above 3 collections. # + id="9LbbC_92i41f" colab_type="code" colab={} asr_models = [model for model in dir(nemo_asr.models) if model.endswith("Model")] asr_models # + id="t5_ax9Z8j9FC" colab_type="code" colab={} nlp_models = [model for model in dir(nemo_nlp.models) if model.endswith("Model")] nlp_models # + id="bQdR6RJdkezq" colab_type="code" colab={} tts_models = [model for model in dir(nemo_tts.models) if model.endswith("Model")] tts_models # + [markdown] id="iWKxKQnSkj9Z" colab_type="text" # ## The NeMo Model # # Let's dive deeper into what a NeMo model really is. There are many ways we can create these models - we can use the constructor and pass in a config, we can instantiate the model from a pre-trained checkpoint, or simply pass a pre-trained model name and instantiate a model directly from the cloud ! # # --------- # For now, let's try to work with an ASR model - [QuartzNet](https://arxiv.org/abs/1910.10261) # + id="n-XOQaW1kh3v" colab_type="code" colab={} quartznet = nemo_asr.models.EncDecCTCModel.from_pretrained('QuartzNet15x5Base-En') # + id="YP4X7KVPli6g" colab_type="code" colab={} quartznet.summarize(); # + [markdown] id="MB91Swu0pIKr" colab_type="text" # ## Model Configuration using OmegaConf # -------- # # So we could download, instantiate and analyse the high level structure of the `QuartzNet` model in a few lines! Now let's delve deeper into the configuration file that makes the model work. # # First, we import [OmegaConf](https://omegaconf.readthedocs.io/en/latest/). OmegaConf is an excellent library that is used throughout NeMo in order to enable us to perform yaml configuration management more easily. Additionally, it plays well with another library, [Hydra](https://hydra.cc/docs/intro/), that is used by NeMo to perform on the fly config edits from the command line, dramatically boosting ease of use of our config files ! # + id="RkgrDJvumFER" colab_type="code" colab={} from omegaconf import OmegaConf # + [markdown] id="CktakfBluA56" colab_type="text" # All NeMo models come packaged with their model configuration inside the `cfg` attribute. While technically it is meant to be config declaration of the model as it has been currently constructed, `cfg` is an essential tool to modify the behaviour of the Model after it has been constructed. It can be safely used to make it easier to perform many essential tasks inside Models. # # To be doubly sure, we generally work on a copy of the config until we are ready to edit it inside the model # + id="ISd6z7sXt9Mm" colab_type="code" colab={} import copy # + id="N2_SiLHRve8A" colab_type="code" colab={} cfg = copy.deepcopy(quartznet.cfg) print(OmegaConf.to_yaml(cfg)) # + [markdown] id="W_V3e3W7vqOb" colab_type="text" # ## Analysing the contents of the Model config # ---------- # # Above we see a configuration for the QuartzNet model. As discussed in the beginning, NeMo models contain the entire definition of the neural network(s) as well as most of the surrounding infrastructure to support that model within themselves. Here, we see a perfect example of this behaviour. # # QuartzNet contains within its config - # # - `preprocessor` - MelSpectrogram preprocessing layer # - `encoder` - The acoustic encoder model. # - `decoder` - The CTC decoder layer. # - `optim` (and potentially `sched`) - Optimizer configuration. Can optionally include Scheduler information. # - `spec_augment` - Spectrogram Augmentation support. # - `train_ds`, `validation_ds` and `test_ds` - Dataset and data loader construction information. # + [markdown] id="sIwhdXkwxn6R" colab_type="text" # ## Modifying the contents of the Model config # ---------- # # Say we want to experiment with a different preprocessor (we want MelSpectrogram, but with different configuration than was provided in the original configuration). Or say we want to add a scheduler to this model during training. # # OmegaConf makes this a very simple task for us! # + id="WlSZ8EA4yGKo" colab_type="code" colab={} # OmegaConf won't allow you to add new config items, so we temporarily disable this safeguard. OmegaConf.set_struct(cfg, False) # Let's see the old optim config print("Old Config: ") print(OmegaConf.to_yaml(cfg.optim)) sched = {'name': 'CosineAnnealing', 'warmup_steps': 1000, 'min_lr': 1e-6} sched = OmegaConf.create(sched) # Convert it into a DictConfig # Assign it to cfg.optim.sched namespace cfg.optim.sched = sched # Let's see the new optim config print("New Config: ") print(OmegaConf.to_yaml(cfg.optim)) # Here, we restore the safeguards so no more additions can be made to the config OmegaConf.set_struct(cfg, True) # + [markdown] id="-nMDN66502kn" colab_type="text" # ## Updating the model from config # ---------- # # NeMo Models can be updated in a few ways, but we follow similar patterns within each collection so as to maintain consistency. # # Here, we will show the two most common ways to modify core components of the model - using the `from_config_dict` method, and updating a few special parts of the model. # # Remember, all NeMo models are PyTorch Lightning modules, which themselves are PyTorch modules, so we have a lot of flexibility here! # + [markdown] id="qrKzFYkZ20aa" colab_type="text" # ### Update model using `from_config_dict` # # In certain config files, you will notice the following pattern : # # ```yaml # preprocessor: # _target_: nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor # params: # normalize: per_feature # window_size: 0.02 # sample_rate: 16000 # window_stride: 0.01 # window: hann # features: 64 # n_fft: 512 # frame_splicing: 1 # dither: 1.0e-05 # stft_conv: false # ``` # # You might ask, why are we using `_target_` and `params`? Well, it is generally rare for the preprocessor, encoder, decoder and perhaps a few other details to be changed often from the command line when experimenting. In order to stabilize these settings, we enforce that our preprocessor will always be of type `AudioToMelSpectrogramPreprocessor` for this model by setting its `_target_` attribute in the config. In order to provide its parameters in the class constructor, we use `params`. # # --------- # Note, we can still change all of the parameters of this `AudioToMelSpectrogramPreprocessor` class from the command line using hydra, so we don't lose any flexibility once we decide what type of preprocessing class we want ! # # This also gives us a flexible way to instantiate parts of the model from just the config object ! # + id="1Be08R4szkT3" colab_type="code" colab={} new_preprocessor_config = copy.deepcopy(cfg.preprocessor) new_preprocessor = quartznet.from_config_dict(new_preprocessor_config) print(new_preprocessor) # + [markdown] id="UzJQ7Y8H4S_U" colab_type="text" # So how do we actually update our model's internal preprocessor with something new? Well, since NeMo Model's are just pytorch Modules, we can just replace their attribute ! # + id="WdtnPKX84OJ-" colab_type="code" colab={} quartznet.preprocessor = new_preprocessor # + id="OMz2KR-24xTO" colab_type="code" colab={} quartznet.summarize(); # + [markdown] id="gPb_BdPN40Ro" colab_type="text" # -------- # This might look like nothing changed - because we didn't actually modify the config for the preprocessor at all ! But as we showed above, we can easily modify the config for the preprocessor, instantiate it from config, and then just set it to the model. # + [markdown] id="IV8WKJkD5E_Q" colab_type="text" # ------- # **NOTE**: Preprocessors don't generally have weights, so this was easy, but say we want to replace a part of the model which actually has trained parameters? # # Well, the above approach will still work, just remember the fact that the new module you inserted into `quartznet.encoder` or `quartznet.decoder` actually won't have pretrained weights. You can easily rectify that by loading the state dict for the module *before* you set it to the Model though! # + [markdown] id="YplQcgfG6S1U" colab_type="text" # ### Preserving the new config # # So we went ahead and updated the preprocessor of the model. We however also need to perform a crucial step - **preserving the updated config**! # # Why do we want to do this? NeMo has many ways of saving and restoring its models, which we will discuss a bit later. All of them depend on having an updated config that defines the model in its entirety, so if we modify anything, we should also update the corresponding part of the config to safely save and restore models. # + id="dsxQHBV86R4a" colab_type="code" colab={} # Update the config copy cfg.preprocessor = new_preprocessor_config # Update the model config quartznet.cfg = cfg # + [markdown] id="eXRRBnJk5tCv" colab_type="text" # ## Update a few special components of the Model # --------- # # While the above approach is good for most major components of the model, NeMo has special utilities for a few components. # # They are - # # - `setup_training_data` # - `setup_validation_data` and `setup_multi_validation_data` # - `setup_test_data` and `setup_multi_test_data` # - `setup_optimization` # # These special utilities are meant to help you easily setup training, validation, testing once you restore a model from a checkpoint. # # ------ # One of the major tasks of all conversational AI models is fine-tuning onto new datasets - new languages, new corpus of text, new voices etc. It is often insufficient to have just a pre-trained model. So these setup methods are provided to enable users to adapt models *after* they have been already trained or provided to you. # # # # + [markdown] id="B7Y7wt2x9goJ" colab_type="text" # You might remember having seen a few warning messages the moment you tried to instantiate the pre-trained model. Those warnings are in fact reminders to call the appropriate setup methods for the task you want to perform. # # Those warnings are simply displaying the old config that was used to train that model, and are a basic template that you can easily modify. You have the ability to modify the `train_ds`, `validation_ds` and `test_ds` sub-configs in their entirety in order to evaluate, fine-tune or train from scratch the model, or any further purpose as you require it. # # # + [markdown] id="1hXXdaup-QmG" colab_type="text" # Let's discuss how to add the scheduler to the model below (which initially had just an optimizer in its config) # + id="cveKWvMZ4zBo" colab_type="code" colab={} # Let's print out the current optimizer print(OmegaConf.to_yaml(quartznet.cfg.optim)) # + id="XVguw3k0-f6b" colab_type="code" colab={} # Now let's update the config quartznet.setup_optimization(cfg.optim); # + [markdown] id="1JZBCQeW-21X" colab_type="text" # ------- # We see a warning - # # ``` # Neither `max_steps` nor `iters_per_batch` were provided to `optim.sched`, cannot compute effective `max_steps` ! # Scheduler will not be instantiated ! # ``` # # We don't have a train dataset setup, nor do we have max_steps in the config. Most NeMo schedulers cannot be instantiated without computing how many train steps actually exist! # # Here, we can temporarily allow the scheduler construction by explicitly passing a max_steps value to be 100 # + id="mqC89hfE-tqf" colab_type="code" colab={} OmegaConf.set_struct(cfg.optim.sched, False) cfg.optim.sched.max_steps = 100 OmegaConf.set_struct(cfg.optim.sched, True) # + id="r22IqOBK_q6l" colab_type="code" colab={} # Now let's update the config and try again quartznet.setup_optimization(cfg.optim); # + [markdown] id="U7Eezf_sAVS0" colab_type="text" # You might wonder why we didnt explicitly set `quartznet._cfg.optim = cfg.optim`. # # This is because the `setup_optimization()` method does it for you! You can still update the config manually. # + [markdown] id="THqhXy_lQ7i8" colab_type="text" # ### Optimizer & Scheduler Config # # Optimizers and schedulers are common components of models, and are essential to train the model from scratch. # # They are grouped together under a unified `optim` namespace, as schedulers often operate on a given optimizer. # # # + [markdown] id="6HY51nuoSJs5" colab_type="text" # ### Let's breakdown the general `optim` structure # ```yaml # optim: # name: novograd # lr: 0.01 # # # optimizer arguments # betas: [0.8, 0.25] # weight_decay: 0.001 # # # scheduler setup # sched: # name: CosineAnnealing # # # Optional arguments # max_steps: null # computed at runtime or explicitly set here # monitor: val_loss # reduce_on_plateau: false # # # scheduler config override # warmup_steps: 1000 # warmup_ratio: null # min_lr: 1e-9 # ``` # # Essential Optimizer components - # # - `name`: String name of the optimizer. Generally a lower case of the class name. # - `lr`: Learning rate is a required argument to all optimizers. # # Optional Optimizer components - after the above two arguments are provided, any additional arguments added under `optim` will be passed to the constructor of that optimizer as keyword arguments # # - `betas`: List of beta values to pass to the optimizer # - `weight_decay`: Optional weight decay passed to the optimizer. # # Optional Scheduler components - `sched` is an optional setup of the scheduler for the given optimizer. # # If `sched` is provided, only one essential argument needs to be provided : # # - `name`: The name of the scheduler. Generally, it is the full class name. # # Optional Scheduler components - # # - `max_steps`: Max steps as an override from the user. If one provides `trainer.max_steps` inside the trainer configuration, that value is used instead. If neither value is set, the scheduler will attempt to compute the `effective max_steps` using the size of the train data loader. If that too fails, then the scheduler will not be created at all. # # - `monitor`: Used if you are using an adaptive scheduler such as ReduceLROnPlateue. Otherwise ignored. Defaults to `loss` - indicating train loss as monitor. # # - `reduce_on_plateau`: Required to be set to true if using an adaptive scheduler. # # Any additional arguments under `sched` will be supplied as keyword arguments to the constructor of the scheduler. # # # # + [markdown] id="V3pQM2aj_6WX" colab_type="text" # ## Difference between the data loader setup methods # ---------- # # You might notice, we have multiple setup methods for validation and test data sets. We also don't have an equivalent `setup_multi_train_data`. # # In general, the `multi` methods refer to multiple data sets / data loaders. # # + [markdown] id="g33nMx9WCJdj" colab_type="text" # ### Where's `setup_multi_train_data`? # With the above in mind, let's tackle why we don't have `setup_multi_train_data`. # # NeMo is concerned with multiple domains - `asr`, `nlp` and `tts`. The way datasets are setup and used in these domains is dramatically different. It is often unclear what it means to have multiple train datasets - do we concatenate them? Do we randomly sample (with same or different probability) from each of them? # # Therefore we leave such support for multiple datasets up to the model itself. For example, in ASR, you can concatenate multiple train manifest files by using commas when providing the `manifest_filepath` value! # + [markdown] id="BjI2Q5LECJib" colab_type="text" # ### What are multi methods? # # In many cases, especially true for ASR and NLP, we may have multiple validation and test datasets. The most common example for this in ASR is `Librispeech`, which has `dev_clean`, `dev_other`, `test_clean`, `test_other`. # # NeMo standardizes how to handle multiple data loaders for validation and testing, so that all of our collections have a similar look and feel, as well as ease development of our models. During evaluation, these datasets are treated independently and prepended with resolved names so that logs are separate! # # The `multi` methods are therefore generalizations of the single validation and single test data setup methods, with some additional functionality. If you provide multiple datasets, you still have to write code for just one dataset and NeMo will automatically attach the appropriate names to your logs so you can differentiate between them! # # Furthermore, they also automatically preserve the config the user passes to them when updating the validation or test data loaders. # # **In general, it is preferred to call the `setup_multi_validation_data` and `setup_multi_test_data` methods, even if you are only using single datasets, simply for the automated management they provide.** # + [markdown] id="ZKURHn0jH_52" colab_type="text" # ## Creating Model from constructor vs restoring a model # --------- # # You might notice, we discuss all of the above setup methods in the context of model after it is restored. However, NeMo scripts do not call them inside any of the example train scripts themselves. # # This is because these methods are automatically called by the constructor when the Model is created for the first time, but these methods are skipped during restoration (either from a PyTorch Lightning checkpoint using `load_from_checkpoint`, or via `restore_from` method inside NeMo Models). # # This is done as most datasets are stored on a user's local directory, and the path to these datasets is set in the config (either set by default, or set by Hydra overrides). On the other hand, the models are meant to be portable. On another user's system, the data might not be placed at exactly the same location, or even on the same drive as specified in the model's config! # # Therefore we allow the constructor some brevity and automate such dataset setup, whereas restoration warns that data loaders were not set up and provides the user with ways to set up their own datasets. # # ------ # # Why are optimizers not restored automatically? Well, optimizers themselves don't face an issue, but as we saw before, schedulers depend on the number of train steps in order to calculate their schedule. # # However, if you don't wish to modify the optimizer and scheduler, and prefer to leave them to their default values, that's perfectly alright. The `setup_optimization()` method is automatically called by PyTorch Lightning for you when you begin training your model! # + [markdown] id="g91FE8mlMcnh" colab_type="text" # ## Saving and restoring models # ---------- # # NeMo provides a few ways to save and restore models. If you utilize the Experiment Manager that is part of all NeMo train scripts, PyTorch Lightning will automatically save checkpoints for you in the experiment directory. # # We can also use packaged files using the specialized `save_to` and `restore_from` methods. # + [markdown] id="NzMxga7QNYn8" colab_type="text" # ### Saving and Restoring from PTL Checkpoints # ---------- # # The PyTorch Lightning Trainer object will periodically save checkpoints when the experiment manager is being used during training. # # PyTorch Lightning checkpoints can then be loaded and evaluated / fine-tuned just as always using the class method `load_from_checkpoint`. # # For example, restore a QuartzNet model from a checkpoint - # # ```python # quartznet = nemo_asr.models.EncDecCTCModel.load_from_checkpoint(<path to checkpoint>) # ``` # + [markdown] id="W4YzAG-KOBkZ" colab_type="text" # ### Saving and Restoring from .nemo files # ---------- # # There are a few models which might require external dependencies to be packaged with them in order to restore them properly. # # One such example is an ASR model with an external BPE tokenizer. It is preferred if the model includes all of the components required to restore it, but a binary file for a tokenizer cannot be serialized into a PyTorch Lightning checkpoint. # # In such cases, we can use the `save_to` and `restore_from` method to package the entire model + its components (here, the tokenizer file(s)) into a tarfile. This can then be easily imported by the user and used to restore the model. # + id="P6_vMSwXNJ74" colab_type="code" colab={} # Save the model quartznet.save_to('quartznet_15x5.nemo') # + id="HrBhgaqyP4rU" colab_type="code" colab={} # !ls -d -- *.nemo # + id="Tyht1E0DQGb_" colab_type="code" colab={} # Restore the model temp_qn = nemo_asr.models.EncDecCTCModel.restore_from('quartznet_15x5.nemo') # + id="dqNpmYYJQS2H" colab_type="code" colab={} temp_qn.summarize(); # + id="A5e42EoiZYjf" colab_type="code" colab={} # Note that the preprocessor + optimizer config have been preserved after the changes we made ! print(OmegaConf.to_yaml(temp_qn.cfg)) # + [markdown] id="OI3RxwpcV-UF" colab_type="text" # Note, that .nemo file is a simple .tar.gz with checkpoint, configuration and, potentially, other artifacts such as tokenizer configs being used by the model # + id="jFBAGcaDWLiu" colab_type="code" colab={} # !cp quartznet_15x5.nemo quartznet_15x5.tar.gz # !tar -xvf quartznet_15x5.tar.gz # + [markdown] id="mkau4Q9jZo1l" colab_type="text" # ### Extracting PyTorch checkpoints from NeMo tarfiles (Model level) # ----------- # # While the .nemo tarfile is an excellent way to have a portable model, sometimes it is necessary for researchers to have access to the basic PyTorch save format. NeMo aims to be entirely compatible with PyTorch, and therefore offers a simple method to extract just the PyTorch checkpoint from the .nemo tarfile. # + id="qccPANeycCoq" colab_type="code" colab={} import torch # + id="A4zswOKHar9q" colab_type="code" colab={} state_dict = temp_qn.extract_state_dict_from('quartznet_15x5.nemo', save_dir='./pt_ckpt/') # !ls ./pt_ckpt/ # + [markdown] id="ACB-0dfnbFG3" colab_type="text" # As we can see below, there is now a single basic PyTorch checkpoint available inside the `pt_ckpt` directory, which we can use to load the weights of the entire model as below # + id="4ZAF_A0uc5bB" colab_type="code" colab={} temp_qn.load_state_dict(torch.load('./pt_ckpt/model_weights.ckpt')) # + [markdown] id="Hkq6EM99cS6y" colab_type="text" # ### Extracting PyTorch checkpoints from NeMo tarfiles (Module level) # ---------- # # While the above method is exceptional when extracting the checkpoint of the entire model, sometimes there may be a necessity to load and save the individual modules that comprise the Model. # # The same extraction method offers a flag to extract the individual model level checkpoints into their individual files, so that users have access to per-module level checkpoints. # + id="LW6wve2zbT9D" colab_type="code" colab={} state_dict = temp_qn.extract_state_dict_from('quartznet_15x5.nemo', save_dir='./pt_module_ckpt/', split_by_module=True) # !ls ./pt_module_ckpt/ # + [markdown] id="DtV5vpb5d1ni" colab_type="text" # Now, we can load and assign the weights of the individual modules of the above QuartzNet Model ! # + id="rVHylSKFdywn" colab_type="code" colab={} temp_qn.preprocessor.load_state_dict(torch.load('./pt_module_ckpt/preprocessor.ckpt')) temp_qn.encoder.load_state_dict(torch.load('./pt_module_ckpt/encoder.ckpt')) temp_qn.decoder.load_state_dict(torch.load('./pt_module_ckpt/decoder.ckpt')) # + [markdown] id="88vOGV7VYcuu" colab_type="text" # # NeMo with Hydra # # [Hydra](https://hydra.cc/docs/intro/) is used throughout NeMo as a way to enable rapid prototyping using predefined config files. Hydra and OmegaConf offer great compatibility with each other, and below we show a few general helpful tips to improve productivity with Hydra when using NeMo. # + [markdown] id="DfY6Ha3qYcxG" colab_type="text" # ## Hydra Help # -------- # # Since our scripts are written with hydra in mind, you might notice that using `python <script.py> --help` returns you a config rather than the usual help format from argparse. # # Using `--help` you can see the default config attached to the script - every NeMo script has at least one default config file attached to it. This gives you a guide on how you can modify values for an experiment. # # Hydra also has a special `--hydra-help` flag, which will offer you more help with respect to hydra itself as it is set up in the script. # # + [markdown] id="gEsZlnfaYc3X" colab_type="text" # ## Changing config paths and files # --------- # # While all NeMo models come with at least 1 default config file, one might want to switch configs without changing code. This is easily achieved by the following commands : # # - `--config-path`: Path to the directory which contains the config files # - `--config-name`: Name of the config file we wish to load. # # Note that these two arguments need to be at the very beginning of your execution statement, before you provide any command line overrides to your config file. # + [markdown] id="ZyNHlArpYc9A" colab_type="text" # ## Overriding config from the command line # ---------- # # Hydra allows users to provide command line overrides to any part of the config. There are three cases to consider - # # - Override existing value in config # - Add new value in config # - Remove old value in config # + [markdown] id="96CKbvn6Yc7f" colab_type="text" # ### Overriding existing values in config # # Let's take the case where we want to change the optimizer from `novograd` to `adam`. Let's also change the beta values to default adam values. # # Hydra overrides are based on the `.` syntax - each `.` representing a level in the config itself. # # ```sh # $ python <script>.py \ # --config-path="dir to config" \ # --config-name="name of config" \ # model.optim.name="adam" \ # model.optim.betas=[0.9,0.999] # ``` # # It is to be noted, if lists are passed, there cannot be any spaces between items. # # ------ # # We can also support multi validation datasets with the above list syntax, but it depends on the model level support. # # For ASR collection, the following syntax is widely supported in ASR, ASR-BPE and classification models. Let's take an example of a model being trained on LibriSpeech - # # ```sh # $ python <script>.py \ # --config-path="dir to config" \ # --config-name="name of config" \ # model.validation_ds.manifest_filepath=["path to dev clean","path to dev other"] \ # model.test_ds.manifest_filepath=["path to test clean","path to test other"] # ``` # + [markdown] id="Wj7oMkepYc17" colab_type="text" # ### Add new values in config # ---------- # # Hydra allows us to inject additional parameters inside the config using the `+` syntax. # # Let's take an example of adding `amsgrad` fix for the `novograd` optimizer above. # # ```sh # $ python <script>.py \ # --config-path="dir to config" \ # --config-name="name of config" \ # +model.optim.amsgrad=true # ``` # + [markdown] id="p23327hsYc0Z" colab_type="text" # ### Remove old value in config # --------- # # Hydra allows us to remove parameters inside the config using the `~` syntax. # # Let's take an example of removing `weight_decay` inside the Novograd optimizer # # ```sh # $ python <script>.py \ # --config-path="dir to config" \ # --config-name="name of config" \ # ~model.optim.weight_decay # ``` # + [markdown] id="8VSWIbzjYzDi" colab_type="text" # ## Setting a value to `None` from the command line # # We may sometimes choose to disable a feature by setting the value to `None`. # # We can accomplish this by using the keyword `null` inside the command line. # # Let's take an example of disabling the validation data loader inside an ASR model's config - # # # ```sh # $ python <script>.py \ # --config-path="dir to config" \ # --config-name="name of config" \ # model.test_ds.manifest_filepath=null # ``` # + [markdown] id="ah8rgrvvsw5R" colab_type="text" # # NeMo Examples # # NeMo supports various pre-built models for ASR, NLP and TTS tasks. One example we see in this notebook is the ASR model for Speech to Text - by using the QuartzNet model. # # The NeMo repository has a dedicated `examples` directory with scripts to train and evaluate models for various tasks - ranging from ASR speech to text, NLP question answering and TTS text to speech using models such as `Tacotron2` and `WaveGlow`. # # NeMo constantly adds new models and new tasks to these examples, such that these examples serve as the basis to train and evaluate models from scratch with the provided config files. # # NeMo Examples directory can be found here - https://github.com/NVIDIA/NeMo/tree/main/examples # + [markdown] id="999KAomdtWlu" colab_type="text" # ## Structure of NeMo Examples # ------- # # The NeMo Examples directory is structured by domain, as well as sub-task. Similar to how we partition the collections supported by NeMo, the examples themselves are separated initially by domain, and then by sub-tasks of that domain. # # All these example scripts are bound to at least one default config file. These config files contain all of the information of the model, as well as the PyTorch Lightning Trainer configuration and Experiment Manager configuration. # # In general, once the model is trained and saved to a PyTorch Lightning checkpoint, or to a .nemo tarfile, it will no longer contain the training configuration - no configuration information for the Trainer or Experiment Manager. # # **These config files have good defaults pre-set to run an experiment with NeMo, so it is adviced to base your own training configuration on these configs.** # # # Let's take a deeper look at some of the examples inside each domain. # # # + [markdown] id="8Fk2grx0uSBQ" colab_type="text" # ## ASR Examples # ------- # # NeMo supports multiple Speech Recognition models such as Jasper and QuartzNet, which can be trained on various datasets. We also provide pretrained checkpoints for these models trained on standard datasets so that they can be used immediately. These scripts are made available in `speech_to_text.py`. # # ASR examples also supports sub-tasks such as speech classification - MatchboxNet trained on the Google Speech Commands Dataset is available in `speech_to_label.py`. Voice Activity Detection is also supported with the same script, by simply changing the config file passed to the script! # # NeMo also supports training Speech Recognition models with Byte Pair/Word Piece encoding of the corpus, via the `speech_to_text_bpe.py` example. Since these models are still under development, their configs fall under the `experimental/configs` directory. # # Finally, in order to simply perform inference on some dataset using these models, prefer to use the `speech_to_text_infer.py` example, which provides a look at how to compute WER over a dataset provided by the user. # + [markdown] id="HhtzYATsuSJV" colab_type="text" # ## NLP Examples # --------- # # NeMo supports a wide variety of tasks in NLP - from text classification and language modelling all the way to glue benchmarking! # # All NLP models require text tokenization as data preprocessing steps. The list of tokenizers can be found in nemo.collections.common.tokenizers, and include WordPiece tokenizer, SentencePiece tokenizer or simple tokenizers like Word tokenizer. # # A non-exhaustive list of tasks that NeMo currently supports in NLP is - # # - Language Modelling - Assigns a probability distribution over a sequence of words. Can be either generative e.g. vanilla left-right-transformer or BERT with a masked language model loss. # - Text Classification - Classifies an entire text based on its content into predefined categories, e.g. news, finance, science etc. These models are BERT-based and can be used for applications such as sentiment analysis, relationship extraction # - Token Classification - Classifies each input token separately. Models are based on BERT. Applications include named entity recognition, punctuation and capitalization, etc. # - Intent Slot Classification - used for joint recognition of Intents and Slots (Entities) for building conversational assistants. # - Question Answering - Currently only SQuAD is supported. This takes in a question and a passage as input and predicts a span in the passage, from which the answer is extracted. # - Glue Benchmarks - A benchmark of nine sentence- or sentence-pair language understanding tasks # # + [markdown] id="F2m4BT2AuSM_" colab_type="text" # ## TTS Examples # --------- # # NeMo supports Text To Speech (TTS, aka Speech Synthesis) via a two step inference procedure. First, a model is used to generate a mel spectrogram from text. Second, a model is used to generate audio from a mel spectrogram. # # Supported Models: # # Mel Spectrogram Generators: # * Tacotron 2 # * Glow-TTS # # Audio Generators (Vocoders): # * WaveGlow # + [markdown] id="XKJPRgUns2On" colab_type="text" # # NeMo Tutorials # # Alongside the example scripts provided above, NeMo provides in depth tutorials for usage of these models for each of the above domains inside the `tutorials` directory found in the NeMo repository. # # Tutorials are meant to be more in-depth explanation of the workflow in the discussed task - usually involving a small amount of data to train a small model on a task, along with some explanation of the task itself. # # White the tutorials are a great example of the simplicity of NeMo, please note for the best performance when training on real datasets, we advice the use of the example scripts instead of the tutorial notebooks. # # NeMo Tutorials directory can be found here - https://github.com/NVIDIA/NeMo/tree/main/tutorials
tutorials/00_NeMo_Primer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- def one_edit_away(s1,s2): if len(s1) == len(s2): return one_edit_replace(s1,s2) if len(s1) + 1 == len(s2): return one_edit_insert(s1, s2) if len(s1) - 1 ==len(s2): return one_edit_insert(s2, s1) return False def one_edit_replace(s1,s2): found_different_chars = False for i in range(len(s1)): if s1[i] != s2[i]: if found_different_chars: return False else: found_different_chars = True return True def one_edit_insert(s1,s2): i1=0 for i2 in range(len(s2)): if i2 == len(s2)-1: return True if s1[i1] != s2[i2]: print("i1=", i1, "i2=", i2) if i1 != i2: return False else: print("i1=", i1, "i2=", i2) i1+=1 return True one_edit_away("pale","pales") one_edit_away("pales","plesddf")
arrays_and_strings/05_one_edit_away.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # R Serving with FastAPI # ## Dockerfile # * The dockerfile defines the environment in which our server will be executed. # * Below, you can see that the entrypoint for our container will be [deploy.R](deploy.R) # %pycat Dockerfile # ## Code: deploy.R # # **deploy.R** handles the following steps # * Loads the R libraries used by the server. # * Loads a pretrained `xgboost` model that has been trained on the classical [Iris](https://archive.ics.uci.edu/ml/datasets/iris) dataset. # * <NAME>. and <NAME>. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. # * Defines an inference function that takes a matrix of iris features and returns predictions for those iris examples. # * Wraps the inference function to make it thread-safe for passing to python through reticulate. # * Finally, it generates the [endpoints.py](endpoints.py) from python and launches the FastAPI server app using those endpoint definitions. # %pycat deploy.R # ## Code: endpoints.py # **endpoints.py** defines two routes: # * `/ping` returns a status of 'Alive' to indicate that the application is healthy # * `/invocations` applies the previously defined inference function to the input features from the request body # # Note, that FastAPI is typed. The `Example` class define the type of the input that we expect to receive from the request. # # For more information about the requirements for building your own inference container, see: # [Use Your Own Inference Code with Hosting Services](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html) # %pycat endpoints.py # ## Build the Serving Image # !docker build -t r-fastapi . # ## Launch the Serving Container # !echo "Launching FastAPI" # !docker run -d --rm -p 5000:8080 r-fastapi # !echo "Waiting for the server to start.." && sleep 10 # !docker container list # ## Define Simple Python Client # + import requests from tqdm import tqdm import pandas as pd pd.set_option("display.max_rows", 500) # - def get_predictions(examples, instance=requests, port=5000): payload = {"features": examples} return instance.post(f"http://127.0.0.1:{port}/invocations", json=payload) def get_health(instance=requests, port=5000): instance.get(f"http://127.0.0.1:{port}/ping") # ## Define Example Inputs # We define example inputs from the Iris dataset. column_names = ["Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Label"] iris = pd.read_csv( "s3://sagemaker-sample-files/datasets/tabular/iris/iris.data", names=column_names ) iris_features = iris[["Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width"]] example_inputs = iris_features.values.tolist() # ### Plumber predicted = get_predictions(example_inputs).json()["output"] iris["predicted"] = predicted iris # ### Stop All Serving Containers # Finally, we will shut down the serving container we launched for the test. # !docker kill $(docker ps -q)
r_examples/r_serving_with_fastapi/FastAPI Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from palmerpenguins import load_penguins import altair as alt import pandas as pd import numpy as np import pytest test_df = pd.DataFrame({'animal': ['falcon', 'dog', 'spider', 'fish'], 'num_legs': [np.NaN, 4, 8, 0], 'num_wings': [np.NaN, 0, 0, 0], 'num_specimen_seen': [10, 2, 1, 8]}) test_df # + tags=[] sum_missing_rows = penguins.isnull().sum(axis = 1) sum(sum_missing_rows) # - input_dataframe = penguins num_complete_rows = input_dataframe.shape[0] - sum(sum_missing_rows) num_complete_rows input_dataframe.shape[0] len(list(input_dataframe.select_dtypes(include=[np.number]).columns.values)) num_of_all_missing_columns input_dataframe.isnull().sum().sum() num_complete_rows input_dataframe.memory_usage(deep=True).sum() info_df = pd.DataFrame({'rows': input_dataframe.shape[0], 'columns': input_dataframe.shape[1], 'numeric_columns': len(list(input_dataframe.select_dtypes(include=[np.number]).columns.values)), 'all_missing_columns': num_of_all_missing_columns, 'total_missing_values': input_dataframe.isnull().sum().sum(), 'complete_rows': num_complete_rows, 'total_observations': input_dataframe.shape[0] * input_dataframe.shape[1], 'memory_usage': input_dataframe.memory_usage(deep=True).sum() }, index = [0]) info_df info_df_rows = info_df.shape[0] print(info_df_rows) info_df_cols = info_df.shape[1] print(info_df_cols) assert info_df_rows == 1 assert info_df_cols == 8 float(info_df['numeric_columns']/info_df['columns']) plot_df = pd.DataFrame({'Metrics': ['Numeric Columns', 'All Missing Columns', 'Missing Observations', 'Complete Rows'], 'Value': [float(info_df['numeric_columns']/info_df['columns']), float(info_df['all_missing_columns']/info_df['columns']), float(info_df['total_missing_values']/info_df['total_observations']), float(info_df['complete_rows']/info_df['rows'])], 'Dimension': ['column', 'column', 'observation', 'row'] }) plot_df plot_df_rows = plot_df.shape[0] plot_df_cols = plot_df.shape[1] assert plot_df_rows == 4 assert plot_df_cols == 3 # + plot_title = 'Memory Usage: ' + str(float(info_df['memory_usage'])) + 'kb' intro_plot = alt.Chart(plot_df, title=plot_title).mark_bar().encode( alt.X('Value', axis=alt.Axis(format='%')), alt.Y('Metrics'), color=alt.Color('Dimension')) intro_plot # - assert intro_plot.encoding.x.field == 'Value', 'x_axis should be mapped to the x axis' assert intro_plot.encoding.y.field == 'Metrics', 'y_axis should be mapped to the y axis' # + tags=[] assert intro_plot.mark == 'bar', 'the result plot should be a bar plot' # - assert isinstance(intro_plot, alt.Chart), "output should be an altair Chart object" assert isinstance(sum(sum_missing_rows), int) sum_missing_columns = penguins.isnull().sum(axis = 0) sum(sum_missing_columns) # + # sum_missing_columns[sum_missing_columns.iloc == test_df.shape[0]] # - num_of_all_missing_columns = sum(sum_missing_columns) num_of_all_missing_columns penguins = load_penguins() penguins input_dataframe = penguins # + sum_missing_columns = input_dataframe.isnull().sum(axis = 0) num_of_all_missing_columns = sum(sum_missing_columns) assert isinstance(num_of_all_missing_columns, int) # Check whether the num of complete rows is an integer sum_missing_rows = input_dataframe.isnull().sum(axis = 1) num_complete_rows = input_dataframe.shape[0] - sum(sum_missing_rows) assert isinstance(num_complete_rows, int) # - info_df = pd.DataFrame({'rows': input_dataframe.shape[0], 'columns': input_dataframe.shape[1], 'numeric_columns': len(list(input_dataframe.select_dtypes(include=[np.number]).columns.values)), 'all_missing_columns': num_of_all_missing_columns, 'total_missing_values': input_dataframe.isnull().sum().sum(), 'complete_rows': num_complete_rows, 'total_observations': input_dataframe.shape[0] * input_dataframe.shape[1], 'memory_usage': input_dataframe.memory_usage(deep=True).sum(), }, index = [0]) info_df_rows = info_df.shape[0] info_df_cols = info_df.shape[1] assert info_df_rows == 1 assert info_df_cols == 8 info_df plot_df = pd.DataFrame({'Metrics': ['Numeric Columns', 'All Missing Columns', 'Missing Observations', 'Complete Rows'], 'Value': [float(info_df['numeric_columns']/info_df['columns']), float(info_df['all_missing_columns']/info_df['columns']), float(info_df['total_missing_values']/info_df['total_observations']), float(info_df['complete_rows']/info_df['rows'])], 'Dimension': ['column', 'column', 'observation', 'row'], }) plot_df_rows = plot_df.shape[0] plot_df_cols = plot_df.shape[1] assert plot_df_rows == 4 assert plot_df_cols == 3 plot_df plot_title = 'Memory Usage: ' + str(float(info_df['memory_usage'])) + 'kb' theme_config='Dimension' test_plot = alt.Chart(plot_df, title=plot_title).mark_bar().encode( alt.X('Value', axis=alt.Axis(format='%')), alt.Y('Metrics'), color=alt.Color(theme_config)) test_plot assert test_plot.encoding.x.field == 'Value', 'x_axis should be mapped to the x axis' # + penguins.select_dtypes(include=['datetime']) penguins.select_dtypes(include=['number']) # - penguins.select_dtypes(include=['object']) alt.renderers.enable(embed_options={'theme': 'ggplot2'}) penguins.select_dtypes(include='number').columns.tolist() alt.Chart(penguins).mark_bar().encode( alt.X('bill_length_mm', bin=alt.Bin(maxbins=50)), y='count()') alt.Chart(penguins).mark_bar().encode( x=alt.X('count()'), y=alt.Y('species', sort='-x') ) # + def plot_basic_distb_numeric(df, col_name): return alt.Chart(penguins).mark_bar().encode( alt.X(col_name, bin=alt.Bin(maxbins=50)), y='count()') def plot_basic_distb_string(df, col_name): return alt.Chart(penguins).mark_bar().encode( x=alt.X('count()'), y=alt.Y(col_name, sort='-x') ) def plot_basic_distributions(df, cols=None, include=None, vega_theme="ggplot2"): """Takes a dataframe and generates plots based on types Parameters ----------- df: pd.DataFrame Dataframe from which to generate plots for each column from cols: list, optional List of columns to generate plots for. By default, None (builds charts for all columns). include: string, optional Select the data types to include. Supported values include None, "string" and "number". By default, None - it will return both string and number columns. vega_theme : string, optional Select the vega.themes for the altair plots. The options include: excel, ggplot2, quartz, vox, fivethirtyeight, dark, latimes, urbaninstitute, and googlecharts. By default, it uses ggplot2. Returns ------- dict_plots: dict of altair.Chart objects using the column name as the key dictionary of generated altair.Chart objects with the column name as the key Examples ------- >>> example_df = pd.DataFrame({'animal': ['falcon', 'dog', 'spider', 'fish'], 'num_legs': [2, 4, 8, 0], 'num_wings': [2, 0, 0, 0], 'num_specimen_seen': [10, 2, 1, 8]}) >>> instaeda_py.plot_distribution(example_df) """ if not isinstance(df, pd.DataFrame): raise TypeError("The df parameter must be a pandas dataframe") if vega_theme not in ('excel','ggplot2','quartz','vox','fivethirtyeight', 'dark', 'latimes', 'urbaninstitute', 'googlecharts'): warnings.warn("You have selected a theme that is not one of the default Vega color themes.") dict_plots = {} df_data = None # Set vega theme alt.renderers.enable(embed_options={'theme': vega_theme}) # First filter: select columns if cols is None: df_data = df else: df_data = df[cols] if include not in (None, 'number', 'string'): raise KeyError("The include parameter must be None, 'number' or 'string'") # Second filter: select types to include if include == 'number' or include is None: df_data_number = df_data.select_dtypes(include="number") for col in df_data_number.columns.tolist(): dict_plots[col] = alt.Chart(df_data_number).mark_bar().encode( alt.X(col, bin=alt.Bin(maxbins=50)), y='count()') if include == 'string' or include is None: df_data_string = df_data.select_dtypes(include="object") for col in df_data_string.columns.tolist(): dict_plots[col] = alt.Chart(df_data_string).mark_bar().encode( x=alt.X('count()'), y=alt.Y(col, sort='-x') ) return dict_plots # - dict_plot = plot_basic_distributions(penguins) dict_plot['bill_length_mm'].mark, dict_plot['sex'].mark len(dict_plot) # + dict_plot['bill_length_mm'].encoding.y['shorthand']=='count()' dict_plot['sex'].encoding.x['shorthand']=='count()' len(dict_plot.keys()) == 8 list(dict_plot.keys()) == ['bill_length_mm', 'bill_depth_mm', 'flipper_length_mm', 'body_mass_g', 'year', 'species', 'island', 'sex'] # - dict_plot # # Divide & Fill prototypes import numpy as np from sklearn.impute import SimpleImputer def divide_and_fill( dataframe, cols=None, missing_values=np.nan, strategy="mean", fill_value=None, random=False, parts=1, verbose=0, ): """Takes a dataframe, subsets selected columns and divides into parts for imputation of missing values and returns a data frame. Parameters ----------- dataframe: pd.DataFrame Dataframe from which to take columns and check for missing values. cols: list, optional List of columns to perform imputation on. By default, None (perform on all numeric columns). missing_values: int, float, str, np.nan or None The placeholder for the missing values. All occurences of missing values will be imputed. strategy : string, optional imputation strategy, one of: {'mean', 'median', 'constant', 'most_frequent'}. By default, 'mean'. fill_value : string or numerical value, optional When strategy == 'constant', fill_value is used to replace all occurences of missing_values. If left to default, fill_value will be 0 when filling numerical data and 'missing' for strings or object data types. random : boolean, optional When random == True, shuffles data frame before filling. By default, False. parts : integer, optional The number of parts to divide rows of data frame into. By default, 1. verbose : integer, optional Controls the verbosity of the divide and fill. By default, 0. Returns ------- dataframe : pandas.DataFrame object Data frame obtained after divide and fill on the corresponding columns. Examples ------- >>> import numpy as np >>> from instaeda import divide_and_fill >>> example_df = pd.DataFrame({'animal': ['falcon', 'dog', 'spider', 'fish'], 'num_legs': [2, 4, 8, np.nan], 'num_wings': [2, np.nan, 0, 0], 'num_specimen_seen': [10, 2, np.nan, np.nan]}) >>> divide_and_fill(example_df) """ filled_df = None allowed_strategies = ["mean", "median", "constant", "most_frequent"] # Checking inputs if verbose: print("Checking inputs") if not isinstance(dataframe, pd.DataFrame): raise Exception("The input data must be of type pandas.DataFrame!") if cols == None: cols = list(dataframe.select_dtypes(include="number").columns) if ( not isinstance(cols, list) or not all(isinstance(x, str) for x in cols) or not set(cols).issubset(set(dataframe.columns)) ): raise Exception( "The input cols must be a list of strings belong to the column names for input dataframe!" ) if ( not isinstance(missing_values, int) and not isinstance(missing_values, float) and not isinstance(missing_values, str) and (missing_values is not None) ): raise Exception( "The input missing values must be one of the following: (int, float, str, np.nan, None)" ) if strategy not in allowed_strategies: raise ValueError( "Can only use these strategies: {0} got strategy = {1}".format( allowed_strategies, strategy ) ) if ( (fill_value is not None) and not isinstance(fill_value, int) and not isinstance(fill_value, float) and not isinstance(fill_value, str) ): raise Exception( "The input fill values must be one of the following: (int, float, str, None)" ) if not isinstance(random, bool): raise Exception("The input random must be True or False") if not isinstance(parts, int) or (parts < 1): raise ValueError("Can only use positive integer parts.") if not isinstance(verbose, int): raise ValueError("Can only use integer for verbose.") # Constructing filled dataframe skeleton. if verbose: print("Constructing filled dataframe skeleton.") if random: filled_df = dataframe.copy().sample(frac=1).reset_index(drop=True) else: filled_df = dataframe.copy() if (set(cols) <= set(dataframe.select_dtypes(include="number").columns)): if isinstance(fill_value, str) : raise ValueError( "For numeric columns, can only use fill values: (int, float, None)" ) elif (set(cols) <= set(dataframe.select_dtypes(exclude="number").columns)): if isinstance(fill_value, int) or isinstance(fill_value, float): raise ValueError( "For non-numeric columns, can only use fill values: (None, str)" ) else: raise Exception("All items in list cols must be numeric, or non-numeric.") # Filling data frame spacing = filled_df.shape[0]/(parts + 1) indexing = np.arange( 0, filled_df.shape[0] + spacing, spacing, dtype=int ) for i in range(len(indexing) - 1): imputer = SimpleImputer( missing_values=missing_values, strategy=strategy, fill_value=fill_value ) filled_df.loc[indexing[i] : indexing[i + 1], cols] = imputer.fit_transform( filled_df.loc[indexing[i] : indexing[i + 1], cols] ) if verbose: print("Returning data frame.") return filled_df na_numerical_dataframe = pd.DataFrame( {"col_1": [1, 2], "col_2": [np.nan, 0.9], "col_3": ["a", "b"]} ) df_new = pd.DataFrame({'a':[1,2,3],'b':[3,np.nan,4],'c':[2,np.nan,np.nan]}) df_new divide_and_fill(df_new) from pandas._testing import assert_frame_equal assert_frame_equal(divide_and_fill(penguins, cols = ['year']), penguins, check_dtype = False)
explore/plot_basic_distribution_prototypes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Testing extracting OSM data using Osmium # + import os, sys, time, importlib import osmnx import geopandas as gpd import pandas as pd import networkx as nx import numpy as np sys.path.append("../../../GOSTNets") import GOSTnets as gn # pip install osmium import osmium, logging import shapely.wkb as wkblib from shapely.geometry import LineString, Point import time # - # set file some_file = './colombo.osm.pbf' # ## simplest example of using Osmium # + class HotelHandler(osmium.SimpleHandler): def __init__(self): super(HotelHandler, self).__init__() self.hotels = [] def node(self, o): if o.tags.get('tourism') == 'hotel' and 'name' in o.tags: self.hotels.append(o.tags['name']) h = HotelHandler() h.apply_file(some_file) print(sorted(h.hotels)) # - # ## Extracting highways and nodes using Osmium # + start_time = time.time() wkbfab = osmium.geom.WKBFactory() # extract highways class HighwayExtractor(osmium.SimpleHandler): def __init__(self): osmium.SimpleHandler.__init__(self) self.nodes = [] #self.raw_h = [] self.highways = [] self.broken_highways = [] self.total = 0 self.num_nodes = 0 def node(self, n): wkb = wkbfab.create_point(n) shp = wkblib.loads(wkb, hex = True) self.nodes.append([n.id, shp, shp.x, shp.y]) #self.num_nodes += 1 #self.nodes.append(shp) def way(self, w): #self.raw_h.append(w) try: nodes = [x.ref for x in w.nodes] wkb = wkbfab.create_linestring(w) shp = wkblib.loads(wkb, hex=True) if 'highway' in w.tags: info = [w.id, nodes, shp, w.tags['highway']] self.highways.append(info) # self.highways.append(w.tags['highway']) # self.total += shp.length except: print('hit exception') nodes = [x for x in w.nodes if x.location.valid()] if len(nodes) > 1: shp = LineString([Point(x.location.x, x.location.y) for x in nodes]) info = [w.id, nodes, shp, w.tags['highway']] self.highways.append(info) else: self.broken_highways.append(w) logging.warning("Error Processing OSM Way %s" % w.id) h = HighwayExtractor() h.apply_file(some_file, locations=True) #print(len(h.nodes)) print(len(h.highways)) print(len(h.broken_highways)) end_time = time.time() print(end_time - start_time) # - # ## Even though we cannot get the nodes of ways using the ogr osm driver, still time it # + start_time = time.time() from osgeo import ogr from shapely.wkt import loads driver = ogr.GetDriverByName("OSM") data = driver.Open(some_file) sql_lyr = data.ExecuteSQL("SELECT * FROM lines WHERE highway IS NOT NULL") roads = [] for feature in sql_lyr: if feature.GetField("highway") is not None: osm_id = feature.GetField("osm_id") shapely_geo = loads(feature.geometry().ExportToWkt()) if shapely_geo is None: continue highway = feature.GetField("highway") roads.append([osm_id,highway,shapely_geo]) data = driver.Open(some_file) sql_lyr = data.ExecuteSQL("SELECT * FROM points") curRes = {} for nodes in sql_lyr: nodes_vals = nodes.items() nodes_vals['geometry'] = loads(feature.geometry().ExportToWkt()) curRes[nodes_vals['osm_id']] = nodes_vals end_time = time.time() print(end_time - start_time) # - # ### results: # Extracting highways and nodes independently using the OGR OSM driver took about 6.5 seconds. While using Osmium to extract just the highways with their nodes took about 44 seconds for Colombo. Using Osmium to extract the highway nodes and highways seperately took about 185 seconds. nodes_df = pd.DataFrame(h.nodes, columns = ["osm_id", "geometry", "x", "y"]) # ## Split up the highways into seperate edges between each node. This involves doing a lookup based on osm_id from the nodes_df for each segment # + start_time = time.time() all_h = [] for x in h.highways: for n_idx in range(0, (len(x[1]) - 1)): try: osm_id_from = x[1][n_idx].ref except: osm_id_from = x[1][n_idx] try: osm_id_to = x[1][n_idx+1].ref except: osm_id_to = x[1][n_idx+1] try: from_pt = nodes_df.loc[nodes_df['osm_id'] == osm_id_from,'geometry'].iloc[0] to_pt = nodes_df.loc[nodes_df['osm_id'] == osm_id_to ,'geometry'].iloc[0] edge = LineString([from_pt, to_pt]) attr = {'osm_id':x[0], 'Wkt':edge, 'length':edge.length, 'infra_type':x[3]} #Create an edge from the list of nodes in both directions all_h.append([osm_id_from, osm_id_to, attr]) all_h.append([osm_id_to, osm_id_from, attr]) except: logging.warning(f"Error adding edge between nodes {osm_id_from} and {osm_id_to}") end_time = time.time() print(end_time - start_time) # - # ### results: # Took about 2404 seconds to run all_h[:5] # + start_time = time.time() G = nx.MultiDiGraph() G.add_nodes_from([[osm_id, {'shape':shp, 'x':x, 'y':y}] for osm_id, shp, x, y in h.nodes]) G.add_edges_from(all_h) end_time = time.time() print(end_time - start_time) # - # ### results: # Took about 26 seconds to run len(G.edges) gn.save(G, "osmium_graph", "./")
Implementations/FY21/ACC_mapbox_traffic/testing_osmium.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/python # The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt # # # This is an example illustrating the use of a binary SVM classifier tool from # the dlib C++ Library. In this example, we will create a simple test dataset # and show how to learn a classifier from it. # # # COMPILING/INSTALLING THE DLIB PYTHON INTERFACE # You can install dlib using the command: # pip install dlib # # Alternatively, if you want to compile dlib yourself then go into the dlib # root folder and run: # python setup.py install # or # python setup.py install --yes USE_AVX_INSTRUCTIONS # if you have a CPU that supports AVX instructions, since this makes some # things run faster. # # Compiling dlib should work on any operating system so long as you have # CMake installed. On Ubuntu, this can be done easily by running the # command: # sudo apt-get install cmake # # - #pickle模块实现了基本的数据序列化和反序列化。通过pickle模块的序列化操作我们能够将程序中运行的对象信息保存到文件中去,永久存储; #通过pickle模块的反序列化操作,我们能够从文件中创建上一次程序保存的对象。 import dlib try: import cPickle as pickle except ImportError: import pickle # + x = dlib.vectors() y = dlib.array() # Make a training dataset. Here we have just two training examples. Normally # you would use a much larger training dataset, but for the purpose of example # this is plenty. For binary classification, the y labels should all be either +1 or -1. x.append(dlib.vector([1, 2, 3, -1, -2, -3])) y.append(+1) x.append(dlib.vector([-1, -2, -3, 1, 2, 3])) y.append(-1) # - # Now make a training object. This object is responsible for turning a # training dataset into a prediction model. This one here is a SVM trainer # that uses a linear kernel. If you wanted to use a RBF kernel or histogram # intersection kernel you could change it to one of these lines: # svm = dlib.svm_c_trainer_histogram_intersection() # svm = dlib.svm_c_trainer_radial_basis() svm = dlib.svm_c_trainer_linear() svm.be_verbose() svm.set_c(10) # Now train the model. The return value is the trained model capable of making predictions. classifier = svm.train(x, y) # Now run the model on our data and look at the results. print("prediction for first sample: {}".format(classifier(x[0]))) print("prediction for second sample: {}".format(classifier(x[1]))) # classifier models can also be pickled in the same was as any other python object. with open('saved_model.pickle', 'wb') as handle: pickle.dump(classifier, handle, 2)#将对象obj保存到文件file中去。
face_struct/dlib_svm_binary_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 64-bit # name: python385jvsc74a57bd0a89ed08815272de879559165cd445f83ec1092d708f739d2a7c0690790beb904 # --- # + deletable=true editable=true from subprocrunner import SubprocessRunner runner = SubprocessRunner(["echo", "test"]) print(runner) print(f"return code: {runner.run()}") print(f"stdout: {runner.stdout}") runner = SubprocessRunner(["ls", "__not_exist_dir__"]) print(runner) print(f"return code: {runner.run()}") print(f"stderr: {runner.stderr}") # + # dry run from subprocrunner import SubprocessRunner runner = SubprocessRunner("echo test", dry_run=True) print(runner) print(f"return code: {runner.run()}") print(f"stdout: {runner.stdout}") # + deletable=true editable=true from subprocrunner import SubprocessRunner SubprocessRunner.clear_history() SubprocessRunner.is_save_history = True SubprocessRunner(["echo", "hoge"]).run() SubprocessRunner(["echo", "foo"]).run() print("\n".join(SubprocessRunner.get_history())) # -
examples/subprocrunner.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 量子アルゴリズム # ### 概論 # # アルゴリズムとは一般に、ある問題を解くためにコンピュータに実行させる手順を指します。 # # 量子アルゴリズムとは、量子コンピュータで実行させることを前提としたアルゴリズムです。 # ブラケット記号を用いた数式で記述されることが多いですが、最終的には具体的な量子ゲートを用いた量子回路に落とし込まれます。 # # 量子アルゴリズムの目的は、量子コンピュータを用いて古典コンピュータより高速な計算、または古典コンピュータでは物理的な制約(例:メモリ容量)により事実上不可能な計算を行うことです。 # これには量子ビットの持つ「重ね合わせ」や「もつれ」といった性質を利用します。 # $n$ 個の量子ビットは(古典)ビットと異なり、$2^n$ 通りの状態を同時に、重ね合わせて保持することができます。 # # 一見 $2^n$ 通りの計算を並列に実行可能と考えたくなりますが、残念ながらそう単純ではありません。 # なぜなら量子コンピュータでは"測定"過程によって、$2^n$ 通りの状態のうち1つのみが確率的に出力されるためです。 # つまり、量子コンピュータは内部的には膨大な情報を持つことができますが、それに対して出力できる情報は非常に絞られます。 # よって量子アルゴリズムは、少ない出力回数で求める解が得られるよう、"上手"に設計される必要があります。 # # 現状、量子アルゴリズムが古典アルゴリズムより高速であると示されている計算は非常に限られています。 # その限られた例に含まれるのが、有名なショアのアルゴリズムやグローバーのアルゴリズムです。 # こうしたアルゴリズムを学ぶ過程で、量子コンピュータが古典コンピュータに比べて優位性を出しやすい計算がどのようなものかが掴めると思います。   # そうして新たに量子アルゴリズムが高速となるような計算を発見することが、将来の量子コンピュータ利用の後押しとなります。 # ### NISQ アルゴリズム # 現在(2021年)の量子コンピュータはまだ規模が小さく、またノイズが計算結果に与える影響を無視できません。このような量子コンピュータは Noisy Intermediate-Scale Quantum (NISQ) デバイス[1]と呼ばれ、区別されます。 # 上記のような古典アルゴリズムより高速であることが理論的に示された量子アルゴリズムは、NISQデバイス上で実用的な規模、精度で動かすことができません。 # これができるような量子コンピュータ(Learge scale, fault tolerant quantum computer)の登場には数十年程度を要すると言われています。 # # 一方"量子超越"として発表されたように、現状の最大のNISQデバイスは既存の古典コンピュータでは効率的にシミュレートできない領域に達しています[2]。 # よって数十年を待たずとも、NISQデバイスを用いて既存の古典コンピュータよりも高速に実行できる計算が存在すると期待されています。 # そのようなアルゴリズムはNISQアルゴリズムと呼ばれ、研究開発競争が現在世界中で行われています。 # ### NISQ, 非NISQアルゴリズムの学習について # 以降のチュートリアルでは、非NISQアルゴリズム ("Universal Quantum Algorithm" と称しています) とNISQアルゴリズムを区別して掲載しています。 # 量子回路の基本的な作り方や、使用する量子ゲートはどちらも共通です。 # NISQアルゴリズムは非NISQアルゴリズムと比較し、少ない量子ビット数でも動作可能なこと、計算におけるエラーを許容できることが重視されます。 # # 学ぶにあたっては非NISQ量子アルゴリズムの方が、量子計算が古典計算より高速となる仕組みがより見えやすいと思います。   # 一方、より近未来の実用に的を絞りたいのでしたら、NISQアルゴリズムを重点的に学ぶのが良いと思います。 # ### 参考文献 # # [1] Bharti, Kishor, et al. "Noisy intermediate-scale quantum (NISQ) algorithms." arXiv preprint arXiv:2101.08448 (2021). # [2] Arute, Frank, et al. "Quantum supremacy using a programmable superconducting processor." Nature 574.7779 (2019): 505-51
tutorial-ja/008_quantum_algorithm_ja.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="iovQcSbqJl0_" # ## Objectives: # - Student should be able to Explain why we care about linear algebra in the scope of data science # - Student should be able to Conceptualize and utilize vectors and matrices through matrix operations and properties such as: square matrix, identity matrix, transpose and inverse # - Student should be able to Show when two vectors/matrices are orthogonal and explain the intuitive implications of orthogonality # - Student should be able to Calculate (by hand for small examples, with numpy for large) and understand importance of eigenvalues, eigenvectors # # + [markdown] colab_type="text" id="P1wyKUbyoFmR" # # Why Linear Algebra? (ELI5 + Soapbox) # # Data Science, Machine Learning, and Artificial intelligence is all about getting computers to do things for us better, cheaper, and faster than we could do them ourselves. # # How do we do that? Computers are good at doing small repetitive tasks (like arithmetic). if we tell them what small repetitive tasks to do in the right order then sometimes all of those combined behaviors will result in something that looks like a human's behavior (or at least the decisions/output look like something a human might decide to do/create). # # <center><img alt="Le Comte de Belamy - GAN Art" src="https://obvious-art.com/assets/img/comtedorures.jpg" width="300"></center> # # <center>[Le Comte de Belamy](https://obvious-art.com/le-comte-de-belamy.htm)</center> # # The set of instructions that we give to a computer to complete certain tasks is called an **algorithm**. The better that we can organize the set of instructions, the faster that computers can do them. The method that we use to organize and store our set of instructions so that the computer can do them super fast is called a **data structure**. The practice of optimizing the organization of our data structures so that they run really fast and efficiently is called **computer science**. (This is why we will have a unit dedicated solely to computer science in a few months). Data Scientists should care how fast computers can process their sets of instructions (algorithms). # + [markdown] colab_type="text" id="_3uzRFr5_53h" # ## A set of ordered instructions # # Here's a simple data structure, in Python it's known as a **list**. It's one of the simplest ways that we can store things (data) and maintain their order. When giving instructions to a computer, it's important that the computer knows in what order to execute them. # + colab={} colab_type="code" id="XPwuTmmF88Vc" selfDrivingCarInstructions = [ "open door", "sit on seat", "put key in ignition", "turn key to the right until it stops", "push brake pedal", "change gear to 'Drive'", "release brake pedal", "push gas pedal", '''turn wheel to navigate streets with thousands of small rules and exeptions to rules all while avoiding collision with other objects/humans/cars, obeying traffic laws, not running out of fuel and getting there in a timely manner''', "close door" ] # We'll have self-driving cars next week for sure. NBD # + [markdown] colab_type="text" id="6UaV8JLt-0EU" # # Maintaining the order of our sets of ordered instruction-sets # Here's another data structure we can make by putting lists inside of lists, this is called a two-dimensional list. Sometimes it is also known as a two-dimensional array or --if you put some extra methods on it-- a dataframe. As you can see things are starting to get a little bit more complicated. # + colab={} colab_type="code" id="TVxRAWPi_IWE" holdMyData = [ [1,2,3], [4,5,6], [7,8,9] ] # Disregard the quality of these bad instructions # + [markdown] colab_type="text" id="M8hglzzK9A-4" # ## Linear Algebra - organize and execute big calculations/operations really fast # # So why linear algebra? Because the mathematical principles behinds **vectors** and **matrices** (lists and 2D lists) will help us understand how we can tell computers how to do an insane number of calculations in a very short amount of time. # # Remember when we said that computers are really good at doing small and repetitive tasks very quickly? # # ## I Give You... Matrix Multiplication: # # <center><img src="https://2000thingswpf.files.wordpress.com/2013/04/794-002.png?w=630" width="400"></center> # # <center>If you mess up any of those multiplications or additions you're up a creek.</center> # # ## I Give You... Finding the Determinant of a Matrix: (an introductory linear algebra topic) # # ## 2x2 Matrix # # <center><img src="http://cdn.virtualnerd.com/tutorials/Alg2_04_01_0017/assets/Alg2_04_01_0017_D_01_16.png" width="400"></center> # <center>Just use the formula!</center> # # ## 3x3 Matrix # # <center><img src="https://www.thecalculator.co/includes/forms/assets/img/Matrix%20determinant%203x3%20formula.jpg" width='400'></center> # <center>Just calculate the determinant of 3 different 2x2 matrices and multiply them by 3 other numbers and add it all up.</center> # # ## 4x4 Matrix # # <center><img src="https://semath.info/img/inverse_cofactor_ex4_02.jpg" width='400'></center> # <center>Just calculate 3 diferent 3x3 matrix determinants which will require the calculating of 9 different 2x2 matrix determinants, multiply them all by the right numbers and add them all up. And if you mess up any of those multiplications or additions you're up a creek.</center> # # ## 5x5 Matrix! # # # ## ... # # # ## ... # # Just kidding, any linear algebra professor who assigns the hand calculation of a 5x5 matrix determinant (or larger) is a sadist. This is what computers were invented for! Why risk so much hand calculation in order to do something that computers **never** make a mistake at? # # By the way, when was the last time that you worked with a dataframe that was 4 rows x 4 columns or smaller? # # Quick, find the determinant of this 42837x42837 dataframe by hand! # + [markdown] colab_type="text" id="SjWIJpHBPiq1" # # Common Applications of Linear Algebra in Data Science: # # - Vectors: Rows, Columns, lists, arrays # # - Matrices: tables, spreadsheets, dataframes # # - Linear Regression: (You might remember from the intro course) # # <center><img src="http://www.ryanleeallred.com/wp-content/uploads/2018/08/OLS-linear-algebra.png" width="400"></center> # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="lbZfHf6habbz" outputId="e4a5b0c0-972c-45ed-ef65-e313c3ff5ef6" # Linear Regression Example import matplotlib.pyplot as plt import numpy as np import pandas as pd # Read CSV df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ice_Cream_Sales.csv') # Create Column of 1s df['Ones'] = np.ones(11) # Format X and Y Matrices X = df[['Ones', 'Farenheit']].as_matrix() Y = df['Dollars'].as_matrix().reshape(-1, 1) # Calculate Beta Values beta = np.matmul(np.linalg.inv(np.matmul(np.transpose(X), X)), np.matmul(np.transpose(X), Y)) print(beta) # + colab={"base_uri": "https://localhost:8080/", "height": 377} colab_type="code" id="1OY1lXBMb8Jj" outputId="971602b5-d568-4040-c616-d5408b89545a" # Assign Beta Values to Variables beta_0 = beta[0,0] beta_1 = beta[1,0] # Plot points with line of best fit plt.scatter(df['Farenheit'], df['Dollars']) axes = plt.gca() x_vals = np.array(axes.get_xlim()) y_vals = beta_0 + beta_1 * x_vals plt.plot(x_vals, y_vals, '-', color='b') plt.title('Ice Cream Sales Regression Line') plt.xlabel('Farenheit') plt.ylabel('Dollars') plt.show() # + [markdown] colab_type="text" id="_ESowKHqcXfy" # - Dimensionality Reduction Techniques: Principle Component Analysis (PCA) and Singular Value Decomposition (SVD) # # Take a giant dataset and distill it down to its important parts. (typically as a pre-processing step for creating visualizations or putting into other models.) # # <center><img src="http://www.sthda.com/english/sthda-upload/figures/principal-component-methods/006-principal-component-analysis-scatter-plot-data-mining-1.png" width="400"></center> # + [markdown] colab_type="text" id="Egs4F1Wgcb3J" # - Deep Learning: Convolutional Neural Networks, (Image Recognition) # # # # "Convolving" is the process of passing a filter/kernel (small matrix) over the pixels of an image, multiplying them together, and using the result to create a new matrix. The resulting matrix will be a new image that has been modified by the filter to emphasize certain qualities of an image. This is entirely a linear algebra-based process. A convolutional neural network learns the filters that help it best identify certain aspects of images and thereby classify immages more accurately. # # <center><img src="https://icecreamlabs.com/wp-content/uploads/2018/08/33-con.gif"></center> # + colab={"base_uri": "https://localhost:8080/", "height": 87} colab_type="code" id="2P8WEcCagBBx" outputId="280f6413-98c3-4ee0-b072-77aded5fa1c0" # !pip install imageio # + colab={"base_uri": "https://localhost:8080/", "height": 351} colab_type="code" id="5zVI4VWxknx-" outputId="6a508bf4-d83c-443d-a593-5d2075b76371" # Convolution in action import imageio import matplotlib.pyplot as plt import numpy as np import scipy.ndimage as nd from skimage.exposure import rescale_intensity img = imageio.imread('https://www.dropbox.com/s/dv3vtiqy439pzag/all_the_things.png?raw=1') plt.axis('off') plt.imshow(img); # + colab={"base_uri": "https://localhost:8080/", "height": 368} colab_type="code" id="DjvrPkWVlH11" outputId="9d8e903f-0305-408b-8858-fed890f4af8f" # Convert I to grayscale, so it will be MxNx1 instead of MxNx4 from skimage import color grayscale = rescale_intensity(1-color.rgb2gray(img)) print(grayscale.shape) plt.axis('off') plt.imshow(grayscale); # + colab={"base_uri": "https://localhost:8080/", "height": 351} colab_type="code" id="oX_-JddjlJ9u" outputId="4feafe6a-404a-4727-f93d-8623fc336716" laplacian = np.array([[0,0,1,0,0], [0,0,2,0,0], [1,2,-16,2,1], [0,0,2,0,0], [0,0,1,0,0]]) laplacian_image = nd.convolve(grayscale, laplacian) plt.axis('off') plt.imshow(laplacian_image); # + colab={"base_uri": "https://localhost:8080/", "height": 351} colab_type="code" id="zTp35yHxcx8M" outputId="911c3e4e-f2fd-4e4a-9cfd-72ec37bcf6e9" sobel_x = np.array([ [-1,0,1], [-2,0,2], [-1,0,1] ]) sobel_x_image = nd.convolve(grayscale, sobel_x) plt.axis('off') plt.imshow(sobel_x_image); # + colab={"base_uri": "https://localhost:8080/", "height": 351} colab_type="code" id="1klzU7gucyEb" outputId="5065e555-9e72-4c0d-b091-0485175719cc" sobel_y = np.array([ [1,2,1], [0,0,0], [-1,-2,-1] ]) sobel_y_image = nd.convolve(grayscale, sobel_y) plt.axis('off') plt.imshow(sobel_y_image); # + [markdown] colab_type="text" id="gc0fReyWSrXH" # ## Are we going to learn to do Linear Algebra by hand? # # Let me quote your seventh grade maths teacher: # # <center><img src="http://www.ryanleeallred.com/wp-content/uploads/2018/12/carry-a-calculator-around.jpg" width="400"></center> # # Of course you're going to carry a calculator around everywhere, so mostly **NO**, we're not going to do a lot of hand calculating. We're going to try and refrain from calculating things by hand unless it is absolutely necessary in order to understand and implement the concepts. # # We're not trying to re-invent the wheel. # # We're learning how to **use** the wheel. # + [markdown] colab_type="text" id="YceHUblDSHOF" # # Linear Algebra Overview/Review: # + [markdown] colab_type="text" id="ke1ffJRNMOnv" # ## Scalars: # + [markdown] colab_type="text" id="ODzKTJLGTnBz" # A single number. Variables representing scalars are typically written in lower case. # # Scalars can be whole numbers or decimals. # # \begin{align} # a = 2 # \qquad # b = 4.815162342 # \end{align} # # They can be positive, negative, 0 or any other real number. # # \begin{align} # c = -6.022\mathrm{e}{+23} # \qquad # d = \pi # \end{align} # + colab={"base_uri": "https://localhost:8080/", "height": 363} colab_type="code" id="SIyzbJFxamu1" outputId="bc411f5f-de0e-4c05-e179-d8c5b59d47e2" import math import matplotlib.pyplot as plt import numpy as np # Start with a simple vector blue = [.5, .5] # Then multiply it by a scalar green = np.multiply(2, blue) red = np.multiply(math.pi, blue) orange = np.multiply(-0.5, blue) # Plot the Scaled Vectors plt.arrow(0,0, red[0], red[1],head_width=.05, head_length=0.05, color ='red') plt.arrow(0,0, green[0], green[1],head_width=.05, head_length=0.05, color ='green') plt.arrow(0,0, blue[0], blue[1],head_width=.05, head_length=0.05, color ='blue') plt.arrow(0,0, orange[0], orange[1],head_width=.05, head_length=0.05, color ='orange') plt.xlim(-1,2) plt.ylim(-1,2) plt.title("Scaled Vectors") plt.show() # + [markdown] colab_type="text" id="d3tU-l96SNa3" # ## Vectors: # + [markdown] colab_type="text" id="TRhtzFMwTz0Z" # ### Definition # A vector of dimension *n* is an **ordered** collection of *n* elements, which are called **components** (Note, the components of a vector are **not** referred to as "scalars"). Vector notation variables are commonly written as a bold-faced lowercase letters or italicized non-bold-faced lowercase characters with an arrow (→) above the letters: # # Written: $\vec{v}$ # # Examples: # # \begin{align} # \vec{a} = # \begin{bmatrix} # 1\\ # 2 # \end{bmatrix} # \qquad # \vec{b} = # \begin{bmatrix} # -1\\ # 0\\ # 2 # \end{bmatrix} # \qquad # \vec{c} = # \begin{bmatrix} # 4.5 # \end{bmatrix} # \qquad # \vec{d} = # \begin{bmatrix} # Pl\\ # a\\ # b\\ # \frac{2}{3} # \end{bmatrix} # \end{align} # # The above vectors have dimensions 2, 3, 1, and 4 respectively. # # Why do the vectors below only have two components? # + colab={"base_uri": "https://localhost:8080/", "height": 363} colab_type="code" id="GzxZcC8fdf5s" outputId="4c294194-a9eb-4284-8e88-cf30d5cd681d" # Vector Examples yellow = [.5, .5] red = [.2, .1] blue = [.1, .3] plt.arrow(0, 0, .5, .5, head_width=.02, head_length=0.01, color = 'y') plt.arrow(0, 0, .2, .1, head_width=.02, head_length=0.01, color = 'r') plt.arrow(0, 0, .1, .3, head_width=.02, head_length=0.01, color = 'b') plt.title('Vector Examples') plt.show() # + [markdown] colab_type="text" id="Q2pNvak3gDGY" # In domains such as physics it is emphasized that vectors have two properties: direction and magnitude. It's rare that we talk about them in that sense in Data Science unless we're specifically in a physics context. We just note that the length of the vector is equal to the number of dimensions of the vector. # # What happens if we add a third component to each of our vectors? # + colab={} colab_type="code" id="BezrgDoH-Ky-" from mpl_toolkits.mplot3d import Axes3D import numpy as np yellow = [.5, .5, .5] red = [.2, .1, .0] blue = [.1, .3, .3 ] vectors = np.array([[0, 0, 0, .5, .5, .5], [0, 0, 0, .2, .1, .0], [0, 0, 0, .1, .3, .3]]) X, Y, Z, U, V, W = zip(*vectors) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.quiver(X, Y, Z, U, V, W, length=1) ax.set_xlim([0, 1]) ax.set_ylim([0, 1]) ax.set_zlim([0, 1]) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') plt.show() # + [markdown] colab_type="text" id="Pzg99dFw-Oj8" # ### Norm of a Vector (Magnitude or length) # # The *Norm* or *Magnitude* of a vector is nothing more than the **length** of the vector. Since a vector is just a line (essentially) if you treat it as the hypotenuse of a triangle you could use the pythagorean theorem to find the equation for the norm of a vector. We're essentially just generalizing the equation for the hypotenuse of a triangle that results from the pythagorean theorem to n dimensional space. # # We denote the norm of a vector by wrapping it in double pipes (like double absolute value signs) # # \begin{align} # ||v|| = # \sqrt{v_{1}^2 + v_{2}^2 + \ldots + v_{n}^2} # \\ # \vec{a} = # \begin{bmatrix} # 3 & 7 & 2 & 4 # \end{bmatrix} # \\ # ||a|| = \sqrt{3^2 + 7^2 + 2^2 + 4^2} \\ # ||a|| = \sqrt{9 + 49 + 4 + 16} \\ # ||a|| = \sqrt{78} # \end{align} # # The Norm is the square root of the sum of the squared elements of a vector. # # Properties of the Norm: # # The norm is always positive or zero $||x|| \geq 0$ # # The norm is only equal to zero if all of the elements of the vector are zero. # # The Triangle Inequality: $|| x + y ||\leq ||x|| + ||y||$ # # + [markdown] colab_type="text" id="UOr0BCxn-Wlh" # ### Dot Product # # The dot product of two vectors $\vec{a}$ and $\vec{b}$ is a scalar quantity that is equal to the sum of pair-wise products of the components of vectors a and b. # # \begin{align} \vec{a} \cdot \vec{b} = (a_{1} \times b_{1}) + (a_{2} \times b_{2}) + \ldots + ( a_{n} \times b_{n}) \end{align} # # Example: # # \begin{align} # \vec{a} = # \begin{bmatrix} # 3 & 7 & 2 & 4 # \end{bmatrix} # \qquad # \vec{b} = # \begin{bmatrix} # 4 & 1 & 12 & 6 # \end{bmatrix} # \end{align} # # The dot product of two vectors would be: # \begin{align} # a \cdot b = (3)(4) + (7)(1) + (2)(12) + (4)(6) \\ # = 12 + 7 + 24 + 24 \\ # = 67 # \end{align} # # The dot product is commutative: $ \vec{} \cdot b = b \cdot a$ # # The dot product is distributive: $a \cdot (b + c) = a \cdot b + a \cdot c$ # # Two vectors must have the same number of components in order for the dot product to exist. If their lengths differ the dot product is undefined. # + [markdown] colab_type="text" id="IUeBZtVr-nro" # ### Cross Product # # The Cross Product is the vector equivalent of multiplication. The result is a third vector that is perpendicular to the first two vectors. # # It is written with a regular looking multiplication sign like $a \times b$ but it is read as "a cross b" # # The cross product equation is a little complicated, and gaining an intuition for it is going to take a little bit more time than we have here. I think it's the least useful of the vector operations, but I'll give you a short example anyway. # # # Assume that we have vectors $x$ and $y$. # # \begin{align} # x = \begin{bmatrix} x_1 & x_2 & x_3 \end{bmatrix} # \qquad # y = \begin{bmatrix} y_1 & y_2 & y_3 \end{bmatrix} # \end{align} # # The cross product can be found by taking these two vectors and adding a third unit vector to create a 3x3 matrix and then finding the determinant of the 3x3 matrix like follows: # # \begin{align} # x = \begin{vmatrix} # i & j & k \\ # x_1 & x_2 & x_3 \\ # y_1 & y_2 & y_3 # \end{vmatrix} # \end{align} # \begin{align} = # i\begin{vmatrix} # x_2 & x_3 \\ # y_2 & y_3 # \end{vmatrix} # # + j\begin{vmatrix} # x_1 & x_3 \\ # y_1 & y_3 # \end{vmatrix} # # + k\begin{vmatrix} # x_1 & x_2 \\ # y_1 & y_2 # \end{vmatrix} # \end{align} # # # + [markdown] colab_type="text" id="drCVY_F8SaPI" # ## Matrices: # + [markdown] colab_type="text" id="dKYeFAUbaP4j" # A **matrix** is a rectangular grid of numbers arranged in rows and columns. Variables that represent matrices are typically written as capital letters (boldfaced as well if you want to be super formal). # # \begin{align} # A = # \begin{bmatrix} # 1 & 2 & 3\\ # 4 & 5 & 6\\ # 7 & 8 & 9 # \end{bmatrix} # \qquad # B = \begin{bmatrix} # 1 & 2 & 3\\ # 4 & 5 & 6 # \end{bmatrix} # \end{align} # # + [markdown] colab_type="text" id="4oKAHHil-0bm" # ### Dimensionality # # The number of rows and columns that a matrix has is called its **dimension**. # # When listing the dimension of a matrix we always list rows first and then columns. # # The dimension of matrix A is 3x3. (Note: This is read "Three by Three", the 'x' isn't a multiplication sign.) # # What is the Dimension of Matrix B? # + [markdown] colab_type="text" id="VqWzluoD-9Pf" # ### Matrix Equality # # In order for two Matrices to be equal the following conditions must be true: # # 1) They must have the same dimensions. # # 2) Corresponding elements must be equal. # # \begin{align} # \begin{bmatrix} # 1 & 4\\ # 2 & 5\\ # 3 & 6 # \end{bmatrix} # \neq # \begin{bmatrix} # 1 & 2 & 3\\ # 4 & 5 & 6 # \end{bmatrix} # \end{align} # + [markdown] colab_type="text" id="MYwxF4R8_Gnf" # ### Matrix Multiplication # # You can multipy any two matrices where the number of columns of the first matrix is equal to the number of rows of the second matrix. # # The unused dimensions of the factor matrices tell you what the dimensions of the product matrix will be. # # ![Matrix Multiplication Dimensions](https://images.tutorvista.com/cms/images/113/product-matrix.jpg) # # There is no commutative property of matrix multiplication (you can't switch the order of the matrices and always get the same result). # # Matrix multiplication is best understood in terms of the dot product. Remember: # # \begin{align} \vec{a} \cdot \vec{b} = (a_{1} \times b_{1}) + (a_{2} \times b_{2}) + \ldots + ( a_{n} \times b_{n}) \end{align} # # To multiply to matrices together, we will take the dot product of each row of the first matrix with each column of the second matrix. The position of the resulting entries will correspond to the row number and column number of the row and column vector that were used to find that scalar. Lets look at an example to make this more clear. # # ![Dot Product Matrix Multiplication](https://www.mathsisfun.com/algebra/images/matrix-multiply-a.svg) # # \begin{align} # \begin{bmatrix} # 1 & 2 & 3 \\ # 4 & 5 & 6 # \end{bmatrix} # \times # \begin{bmatrix} # 7 & 8 \\ # 9 & 10 \\ # 11 & 12 # \end{bmatrix} # = # \begin{bmatrix} # (1)(7)+(2)(9)+(3)(11) & (1)(8)+(2)(10)+(3)(12)\\ # (4)(7)+(5)(9)+(6)(11) & (4)(8)+(5)(10)+(6)(12) # \end{bmatrix} # = # \begin{bmatrix} # (7)+(18)+(33) & (8)+(20)+(36)\\ # (28)+(45)+(66) & (32)+(50)+(72) # \end{bmatrix} # = # \begin{bmatrix} # 58 & 64\\ # 139 & 154 # \end{bmatrix} # \end{align} # + [markdown] colab_type="text" id="aJt-S8zA_Lk9" # ## Transpose # # A transposed matrix is one whose rows are the columns of the original and whose columns are the rows of the original. # # Common notation for the transpose of a matrix is to have a capital $T$ superscript or a tick mark: # # \begin{align} # B^{T} # \qquad # B^{\prime} # \end{align} # # The first is read "B transpose" the second is sometimes read as "B prime" but can also be read as "B transpose". # # The transpose of any matrix can be found easily by fixing the elements on the main diagonal and flipping the placement of all other elements across that diagonal. # # <center>![Matrix Transpose](http://xaktly.com/Images/Mathematics/MatrixAlgebra/MatrixOperations/MatrixTranspose.png) # # \begin{align} # B = # \begin{bmatrix} # 1 & 2 & 3 \\ # 4 & 5 & 6 # \end{bmatrix} # \qquad # B^{T} = # \begin{bmatrix} # 1 & 4 \\ # 2 & 5 \\ # 3 & 6 # \end{bmatrix} # \end{align} # + [markdown] colab_type="text" id="dDTzd-Mw_Q0G" # ## Square Matrix: # # In a true linear algebra class after the first few weeks you would deal almost exclusively with square matrices. They have very nice properties that their lopsided sisters and brothers just don't possess. # # A square matrix is any matrix that has the same number of rows as columns: # # \begin{align} # A = # \begin{bmatrix} # a_{1,1} # \end{bmatrix} # \qquad # B = # \begin{bmatrix} # b_{1,1} & b_{1,2} \\ # b_{2,1} & b_{2,2} # \end{bmatrix} # \qquad # C = # \begin{bmatrix} # c_{1,1} & c_{1,2} & c_{1,3} \\ # c_{2,1} & c_{2,2} & c_{2,3} \\ # c_{3,1} & c_{3,2} & c_{3,3} # \end{bmatrix} # \end{align} # + [markdown] colab_type="text" id="fSfLokfx_T4x" # ### Special Kinds of Square Matrices # # **Diagonal:** Values on the main diagonal, zeroes everywhere else. # # \begin{align} # A = # \begin{bmatrix} # a_{1,1} & 0 & 0 \\ # 0 & a_{2,2} & 0 \\ # 0 & 0 & a_{3,3} # \end{bmatrix} # \end{align} # # **Upper Triangular:** Values on and above the main diagonal, zeroes everywhere else. # # \begin{align} # B = # \begin{bmatrix} # b_{1,1} & b_{1,2} & b_{1,3} \\ # 0 & b_{2,2} & b_{2,3} \\ # 0 & 0 & b_{3,3} # \end{bmatrix} # \end{align} # # **Lower Triangular:** Values on and below the main diagonal, zeroes everywhere else. # # \begin{align} # C = # \begin{bmatrix} # c_{1,1} & 0 & 0 \\ # c_{2,1} & c_{2,2} & 0 \\ # c_{3,1} & c_{3,2} & c_{3,3} # \end{bmatrix} # \end{align} # # **Identity Matrix:** A diagonal matrix with ones on the main diagonal and zeroes everywhere else. The product of the any square matrix and the identity matrix is the original square matrix $AI == A$. Also, any matrix multiplied by its inverse will give the identity matrix as its product. $AA^{-1} = I$ # # \begin{align} # D = # \begin{bmatrix} # 1 # \end{bmatrix} # \qquad # E = # \begin{bmatrix} # 1 & 0 \\ # 0 & 1 # \end{bmatrix} # \qquad # F = # \begin{bmatrix} # 1 & 0 & 0 \\ # 0 & 1 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # \end{align} # # **Symmetric:** The numbers above the main diagonal are mirrored below/across the main diagonal. # # \begin{align} # G = # \begin{bmatrix} # 1 & 4 & 5 \\ # 4 & 2 & 6 \\ # 5 & 6 & 3 # \end{bmatrix} # \end{align} # # # # + [markdown] colab_type="text" id="rBwAaji5San4" # ## Determinant # # The determinant is a property that all square matrices possess and is denoted $det(A)$ or using pipes (absolute value symbols) $|A|$ # # The equation given for finding the determinant of a 2x2 matrix is as follows: # # \begin{align} # A = \begin{bmatrix} # a & b \\ # c & d # \end{bmatrix} # \qquad # |A| = ad-bc # \end{align} # # The determinant of larger square matrices is recursive - by finding the determinats of the smaller matrics that make up the large matrix. # # For example: # # <center><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/14f2f2a449d6d152ee71261e47551aa0a31c801e" width=500></center> # # The above equation is **very** similar to the equation that we use to find the cross-product of a 3x3 matrix. The only difference is the negative sign in front of the $b$. # + [markdown] colab_type="text" id="8aGG5d4HSauH" # ## Inverse # # There are multiple methods that we could use to find the inverse of a matrix by hand. I would suggest you explore those methods --if this content isn't already overwhelming enough. The inverse is like the reciprocal of the matrix that was used to generate it. Just like $\frac{1}{8}$ is the reciprocal of 8, $A^{-1}$ acts like the reciprocal of $A$. The equation for finding the determinant of a 2x2 matrix is as follows: # # \begin{align} # A = \begin{bmatrix} # a & b \\ # c & d # \end{bmatrix} # \qquad # A^{-1} = \frac{1}{ad-bc}\begin{bmatrix} # d & -b\\ # -c & a # \end{bmatrix} # \end{align} # + [markdown] colab_type="text" id="tlOXGq4QMSZy" # ### What happens if we multiply a matrix by its inverse? # # The product of a matrix multiplied by its inverse is the identity matrix of the same dimensions as the original matrix. There is no concept of "matrix division" in linear algebra, but multiplying a matrix by its inverse is very similar since $8\times\frac{1}{8} = 1$. # # \begin{align} # A^{-1}A = I # \end{align} # # + [markdown] colab_type="text" id="46cJIv0YL7Jp" # ### Not all matrices are invertible # # Matrices that are not square are not invertible. # # A matrix is invertible if and only if its determinant is non-zero. You'll notice that the fraction on the left side of the matrix is $\frac{1}{det(A)}$. # # As you know, dividing anything by 0 leads to an undefined quotient. Therefore, if the determinant of a matrix is 0, then the entire inverse becomes undefined. # + [markdown] colab_type="text" id="Iq0TYH-jMBwK" # ### What leads to a 0 determinant? # # A square matrix that has a determinant of 0 is known as a "singular" matrix. One thing that can lead to a matrix having a determinant of 0 is if two rows or columns in the matrix are perfectly collinear. Another way of saying this is that the determinant will be zero if the rows or columns of a matrix are not linearly dependent. # # One of the most common ways that a matrix can end up having rows that are linearly dependent is if one column a multiple of another column. Lets look at an example: # # \begin{align} # C =\begin{bmatrix} # 1 & 5 & 2 \\ # 2 & 7 & 4 \\ # 3 & 2 & 6 # \end{bmatrix} # \end{align} # # Look at the columns of the above matrix, column 3 is exactly double column 1. (could be any multiple or fraction) Think about if you had some measure in a dataset of distance in miles, but then you also wanted to convert its units to feet, so you create another column and multiply the mile measure by 5,280 (Thanks Imperial System). But then you forget to drop one of the columns so you end up with two columns that are linearly dependent which causes the determinant of your dataframe to be 0 and will cause certain algorithms to fail. We'll go deeper into this concept next week (this can cause problems with linear regression) so just know that matrices that have columns that are a multiple or fraction of another column will cause the determinant of that matrix to be 0. # # For more details about when a matrix is invertible google the "Invertible Matrix Theorem" but be prepared for some heavy linear algebra jargon. # + [markdown] colab_type="text" id="aLj9XAZHQgSP" # ## Who's ready to get familiar with NumPy??? # # [Helpful NumPy Linear Algebra Functions](https://docs.scipy.org/doc/numpy-1.15.1/reference/routines.linalg.html) # # + colab={} colab_type="code" id="UVhixZZZQfUw" ### What should we do first? :) Want to see anything demonstrated?
05-Linear-Algebra/01_Linear_Algebra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ACMEFIT K8s # # This repo contains a Polyglot demo application comprised of (presently) 6 microservices and 4 datastores. # # The contents here are the necessary YAML files to deploy the ACMEFIT application in a kubernetes cluster. # # This app is developed by team behind www.cloudjourney.io # # The current version of the application passes JSON Web Tokens (JWT) for authentication on certain API calls. The application will not work as expected if the `users` service is not present to issue / authenticate these tokens. # # ## Datastore Dependent Services # # This section covers the deployment of the datastore dependent microservices. It is recommended to deploy these services first. # # ### Cart Service # # Before deploying the cart datastore (Redis) and cart service please add a secret for the service to use in authenticating with the cache. # *Note: Please replace 'value' in the command below with the desired password text. Changing the name of the secret object or the 'password' key may cause deployment issues* # # !kubectl create secret generic cart-redis-pass --from-literal=password=<PASSWORD>!<PASSWORD>! # Once the secret object is created, deploy the redis cache and cart service: # # !kubectl apply -f cart-redis-total.yaml # !kubectl apply -f cart-total.yaml # ### Catalog Service # # Before deploying the catalog datastore (mongo) and catalog service please add a secret for the service to use in authenticating with the cache. # *Note: Please replace 'value' in the command below with the desired password text. Changing the name of the secret object or the 'password' key may cause deployment issues* # !kubectl create secret generic catalog-mongo-pass --from-literal=password=<PASSWORD>! # Run the following command to initialize the catalog database with items: # !kubectl create -f catalog-db-initdb-configmap.yaml # Finally, deploy the mongo instance and catalog service: # # !kubectl apply -f catalog-db-total.yaml # !kubectl apply -f catalog-total.yaml # + ### Payment Service The payment service does not have an associated datastore. It can be deployed with the following command: # - # !kubectl apply -f payment-total.yaml # NOTE: PAYMENT SERVICE MUST BE UP FIRST IN ORDER FOR ORDER SERVICE TO PROPERLY COMPLETE TRANSACTIONS # # ### Order Service # # Before deploying the orders datastore (postgres) and order service please add a secret for the service to use in authenticating with the cache. # *Note: Please replace 'value' in the command below with the desired password text. Changing the name of the secret object or the 'password' key may cause deployment issues* # # Before running order please add the following secret: # # !kubectl create secret generic order-postgres-pass --from-literal=password=<PASSWORD>! # Once the secret object is created, deploy the mongo instance and order service: # # !kubectl apply -f order-db-total.yaml # !kubectl apply -f order-total.yaml # ### Users Service # # Before deploying the users datastore (mongo), users cache (redis) and users service please add secrets for the service to use in authenticating with the database and cache. # *Note: Please replace 'value' in the command below with the desired password text. Changing the name of the secret object or the 'password' key may cause deployment issues* # # Before running order please add the following secret: # # !kubectl create secret generic users-mongo-pass --from-literal=password=<PASSWORD>! # !kubectl create secret generic users-redis-pass --from-literal=password=<PASSWORD>! # Next you need to run the following to initialize the database with an initial set of users: # # !kubectl create -f users-db-initdb-configmap.yaml # Once the secret object is created, and the users database is seeded, deploy the users database and users service: # # !kubectl apply -f users-db-total.yaml # !kubectl apply -f users-redis-total.yaml # !kubectl apply -f users-total.yaml # **_NOTE: The base set of users is preconfigured. For now, please login as one of this set (eric, dwight, han, or phoebe). The password for these users is '<PASSWORD>!'_** # # ## Datastore Independent Services # # ### Front End Service # # The front end service also functions without an associated datastore. The manifests in this repository deploy the front end service as a NodePort type for testing purposes. If suitable for the deployment environment, the service type could be changed to 'LoadBalancer' in the `frontend-total.yaml` manifest in this repository. # # To deploy the front end service, run the following command: # # !kubectl apply -f frontend-total.yaml # To find the external port on which to access the site in browser, run the following command: # # !kubectl get services -l service=frontend # The output of the above command should be similar to this: # # $ kubectl get services -l service=frontend # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # frontend NodePort 10.0.0.81 <none> 3000:30430/TCP 3d # # The external value appears under 'PORT(S)'. It is after the '3000:' and before the '/TCP' portion of the string. Appending it to the public address of the Kubernetes cluster (or loadbalancer fronting the cluster) to access the site. # # ### Point-of-Sales # # Just like the front end service, the Point-of-Sales app functions without any associated datastores. The only prerequisite is that the FrontEnd service is deployed. The manifests in this repository deploy the Point-of-Sales service as a NodePort type for testing purposes. If you're running the Point-of-Sales app on a different Kubernetes cluster, or as a standalone container, you'll have to update the value of `FRONTEND_HOST` (set to `frontend.default.svc.cluster.local` by default) to match the IP or FQDN of the front end service. # # To deploy the service, run the following command: # # # !kubectl apply -f point-of-sales-total.yaml # To find the external port on which to access the site in browser, run the following command: # # !kubectl get services -l service=pos # The output of the above command should be similar to this: # # ``` # $ kubectl get services -l service=frontend # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE # pos NodePort 10.0.0.81 <none> 3000:30431/TCP 3d # ``` # # The external value appears under 'PORT(S)'. It is after the '3000:' and before the '/TCP' portion of the string. Appending it to the public address of the Kubernetes cluster (or loadbalancer fronting the cluster) to access the Point-of-Sales app. # ## Distributed Tracing # # **Note: Distributed tracing is advanced functionality which requires additional configuration to use successfully. Please read this section carefully before attempting to test / demonstrate tracing** # # The current version of the application has been augmented with distributed tracing funcionality. Each of the services has two relevant environment vairables `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT`. Regardless of the span aggregator being used, the code expects that these two values to be populates with the hostname and port of whichever span collecter is being used *likely the jaeger agent*. # # To avoid issues with unresolvable hostnames, `JAEGER_AGENT_HOST` is set to `localhost` in all of the manifests in this repo. To use tracing, this value will need to be replaced. If using the `jaeger-all-in-one.yml` manifest included in this repo, this value should be changed to `<jaeger namespace>.jaeger`. # # It is strongly recommended that the `JAEGER_AGENT_PORT` values not be modified as the tracing library implementations for specific languages favor certain ports.
kubernetes-manifests/.ipynb_checkpoints/ACME Fitness App k8s Deployment Notebook-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Calculate the policy of the agent # * State Variable: x = [w, n, e, s, A], action variable a = [c, b, k], both of them are numpy array # + # %pylab inline # %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import numpy as np import pandas as pd from scipy.interpolate import interp2d from multiprocessing import Pool from functools import partial from pyswarm import pso import warnings from scipy import optimize warnings.filterwarnings("ignore") np.printoptions(precision=2) # time line T_min = 0 T_max = 70 T_R = 45 beta = 1/(1+0.02) # All the money amount are denoted in thousand dollars earningShock = [0.8,1.2] # Define transition matrix of economical states # GOOD -> GOOD 0.8, BAD -> BAD 0.6 Ps = np.array([[0.6, 0.4],[0.2, 0.8]]) # current risk free interest rate r_f = np.array([0.01 ,0.03]) # stock return depends on current and future econ states r_m = np.array([[-0.2, 0.15],[-0.15, 0.2]]) # expected return on stock market r_bar = 0.0667 # probability of survival Pa = np.load("prob.npy") # probability of employment transition Pe = np.array([[[[0.3, 0.7], [0.1, 0.9]], [[0.25, 0.75], [0.05, 0.95]]], [[[0.25, 0.75], [0.05, 0.95]], [[0.2, 0.8], [0.01, 0.99]]]]) # deterministic income detEarning = np.load("detEarning.npy") # tax rate tau_L = 0.2 tau_R = 0.1 # minimum consumption c_bar = 3 # + #Define the utility function def u(c): gamma = 2 return (np.float_power(max(c-c_bar,0),1-gamma) - 1)/(1 - gamma) #Define the bequeath function, which is a function of wealth def uB(w): B = 2 return B*u(w) #Define the earning function def y(t, x): w, n, s, e, A = x if A == 0: return 0 else: if t <= T_R: return detEarning[t] * earningShock[int(s)] * e + (1-e)*5 else: return detEarning[t] # Define the reward funtion def R(x, a): c, b, k = a w, n, s, e, A = x if A == 0: return uB(w+n) else: return u(c) # Define the transtiion of state (test) def transition(x, a, t): ''' Input: x current state: (w, n, e, s, A) a action taken: (c, b, k) Output: the next possible states with corresponding probabilities ''' c, b, k = a w, n, s, e, A = x x_next = [] prob_next = [] # Agent is dead if A == 0: for s_next in [0, 1]: x_next.append([0, 0, s_next, 0, 0]) return np.array(x_next), Ps[int(s)] else: # after retirement calculate the annuity payment N = np.sum(Pa[t:]) discounting = ((1+r_bar)**N - 1)/(r_bar*(1+r_bar)**N) # A = 1, agent is still alive and for the next period Pat = [1-Pa[t], Pa[t]] r_bond = r_f[int(s)] for s_next in [0, 1]: r_stock = r_m[int(s), s_next] w_next = b*(1+r_bond) + k*(1+r_stock) if t < T_R: # before retirement agents put 5% of income to 401k n_next = (n+0.05*y(t,x))(1+r_stock) else: n_next = (n-n/discounting)*(1+r_stock) for e_next in [0,1]: for A_next in [0,1]: # Age reaches 65 or agent is dead directly results in unemployment if t > T_R or A_next == 0: if A_next == 0: n_next = n*(1+r_stock) x_next.append([w_next, n_next, s_next, 0, A_next]) else: x_next.append([w_next, n_next, s_next, e_next, A_next]) prob_next.append(Ps[int(s)][s_next] * Pat[A_next] * Pe[int(s),s_next,int(e),e_next]) return np.array(x_next), np.array(prob_next) # Value function is a function of state and time t def V(x, t, Vmodel): # Define the objective function as a function of action w, n, s, e, A = x if A == 0: return np.array([R(x,[0,0,0]),[0,0,0]]) else: N = np.sum(Pa[t:]) discounting = ((1+r_bar)**N - 1)/(r_bar*(1+r_bar)**N) def obj(bkTheta): bk,theta = bkTheta b = bk * theta k = bk * (1-theta) if t < T_R: c = (1-tau_L)*(y(t, x) * (1-0.05)) + w - bk else: c = (1-tau_R)*y(t, x) + w + n/discounting - bk if c <= c_bar: return 9999999999 a = (c,b,k) x_next, prob_next = transition(x, a, t) return -(R(x, a) + beta * np.dot(Vmodel[int(s)][int(e)][int(A)](x_next[:,0], x_next[:,1]), prob_next)) if t < T_R: ub = [(1-tau_L)*(y(t, x) * (1-0.05)) + w, 1] else: ub = [(1-tau_R)*y(t, x) + w + n/discounting, 1] lb = [0, 0] rranges = rranges = ((lb[0], ub[0]), (lb[1], ub[1])) resbrute = optimize.brute(obj, rranges, full_output=True, finish=optimize.fmin) xopt = resbrute[0] max_val = -resbrute[1] # xopt, fopt = pso(obj, lb, ub) # max_val = -fopt bk_m, theta_m = xopt b_m = bk_m * theta_m k_m = bk_m * (1-theta_m) if t < T_R: c_m = (1-tau_L)*(y(t, x) * (1-0.05)) + w - bk_m else: c_m = (1-tau_R)*y(t, x) + w + n/discounting - bk_m return np.array([max_val, [c_m, b_m, k_m]]) # + # wealth discretization w_grid_size = 100 w_lower = 5 w_upper = 20000 # 401k amount discretization n_grid_size = 50 n_lower = 5 n_upper = 500 def powspace(start, stop, power, num): start = np.power(start, 1/float(power)) stop = np.power(stop, 1/float(power)) return np.power( np.linspace(start, stop, num=num), power) # initialize the state discretization x_T = np.array([[w,n,e,s,0] for w in powspace(w_lower, w_upper, 3, w_grid_size) for n in np.linspace(n_lower, n_upper, n_grid_size) for s in [0,1] for e in [0,1] for A in [0,1]]).reshape((w_grid_size, n_grid_size, 2,2,2,5)) xgrid = np.array([[w,n,e,s,A] for w in powspace(w_lower, w_upper, 3, w_grid_size) for n in powspace(n_lower, n_upper, 3, n_grid_size) for s in [0,1] for e in [0,1] for A in [0,1]]).reshape((w_grid_size, n_grid_size, 2,2,2,5)) Vgrid = np.zeros((w_grid_size, n_grid_size, 2, 2, 2, T_max+1)) cgrid = np.zeros((w_grid_size, n_grid_size, 2, 2, 2, T_max+1)) bgrid = np.zeros((w_grid_size, n_grid_size, 2, 2, 2, T_max+1)) kgrid = np.zeros((w_grid_size, n_grid_size, 2, 2, 2, T_max+1)) # apply function to state space, need to reshape the matrix and shape it back to the size def applyFunToCalculateValue(fun, x = xgrid): return np.array(list(map(fun, x.reshape((w_grid_size * n_grid_size * 2 * 2 * 2, 5))))).reshape((w_grid_size, n_grid_size, 2,2,2)) Vgrid[:,:,:,:,:, T_max] = applyFunToCalculateValue(partial(R, a = [0,0,0]), x = x_T) # - # ### Backward Induction Part # + # %%time ws = xgrid[:,:,1,1,1,0].flatten() ns = xgrid[:,:,1,1,1,1].flatten() xs = xgrid.reshape((w_grid_size * n_grid_size * 2 * 2 * 2, 5)) pool = Pool() for t in range(T_max-1, 0, -1): print(t) cs = [[[np.vectorize(interp2d(ws, ns, Vgrid[:,:,s,e,A,t+1].flatten(),kind= 'cubic')) for A in [0,1]] for e in [0,1]] for s in [0,1]] f = partial(V, t = t, Vmodel = cs) results = np.array(pool.map(f, xs)) Vgrid[:,:,:,:,:,t] = results[:,0].reshape((w_grid_size,n_grid_size, 2,2,2)) #########################################################[test function part] fig = plt.figure(figsize = [12, 8]) ax = fig.add_subplot(111, projection='3d') ax.plot_surface(X = ws.reshape((w_grid_size, n_grid_size)), Y = ns.reshape((w_grid_size, n_grid_size)), Z= Vgrid[:,:,1,1,1,t]) plt.show() ######################################################### cgrid[:,:,:,:,:,t] = np.array([r[0] for r in results[:,1]]).reshape((w_grid_size, n_grid_size,2,2,2)) bgrid[:,:,:,:,:,t] = np.array([r[1] for r in results[:,1]]).reshape((w_grid_size, n_grid_size,2,2,2)) kgrid[:,:,:,:,:,t] = np.array([r[2] for r in results[:,1]]).reshape((w_grid_size, n_grid_size,2,2,2)) pool.close() # - def summaryPlotChoiceVStime(w_level, n_level, s, e, A, V = Vgrid, C = cgrid, B = bgrid, K = kgrid): plt.figure(figsize = [12,8]) plt.plot(list(range(20,91)), cgrid[w_level, n_level, s,e,A,:], label= "Consumption") plt.plot(list(range(20,91)), bgrid[w_level, n_level, s,e,A,:], label= "Bond Holding") plt.plot(list(range(20,91)), kgrid[w_level, n_level, s,e,A,:], label= "Stock Holding") plt.legend() plt.show() summaryPlotChoiceVStime(50, 0, 1, 1) def summaryPlotWealthVSChoice(t, s, e, A, V = Vgrid, C = cgrid, B = bgrid, K = kgrid): plt.figure(figsize = [12,8]) plt.plot(ws, cgrid[:,s,e,A,t], label="Consumption") plt.plot(ws, bgrid[:,s,e,A,t], label="Bond Holding") plt.plot(ws, kgrid[:,s,e,A,t], label="Stock Holding") plt.legend() plt.show() summaryPlotWealthVSChoice(60, 0, 1, 1) # ### Simulation Part # + import quantecon as qe mc = qe.MarkovChain(Ps) def action(t, x): w,s,e,A = x if A == 1: c = interp1d(ws, cgrid[:,s,e,A,t], kind = "linear", fill_value = "extrapolate")(w) b = interp1d(ws, bgrid[:,s,e,A,t], kind = "linear", fill_value = "extrapolate")(w) k = interp1d(ws, kgrid[:,s,e,A,t], kind = "linear", fill_value = "extrapolate")(w) else: c = 0 b = 0 k = 0 return (c,b,k) # Define the transtiion of state def transition(x, a, t, s_next): ''' Input: x current state: (w, n, s, A) a action taken: (c, b, k) Output: the next possible states with corresponding probabilities ''' c, b, k = a w, s, e, A = x x_next = [] prob_next = [] if A == 0: return [0, s_next, 0, 0] else: # A = 1, agent is still alive and for the next period Pat = [1-Pa[t], Pa[t]] r_bond = r_f[int(s)] r_stock = r_m[int(s), s_next] w_next = b*(1+r_bond) + k*(1+r_stock) for e_next in [0,1]: for A_next in [0,1]: x_next.append([w_next, s_next, e_next, A_next]) prob_next.append(Pat[A_next] * Pe[int(s),s_next,int(e),e_next]) return x_next[np.random.choice(4, 1, p = prob_next)[0]] # - def simulation(num): for sim in range(num): if sim%100 == 0: print(sim) # simulate an agent age 15 starting with wealth of 10 w = 20 wealth = [] Consumption = [] Bond = [] Stock = [] Salary = [] econState = mc.simulate(ts_length=T_max - T_min) alive = True for t in range(len(econState)-1): if rd.random() > prob[t]: alive = False wealth.append(w) s = econState[t] s_next = econState[t+1] a = action(t, w, s, alive) if alive: Salary.append(y(t+T_min, s)) else: Salary.append(0) Consumption.append(a[0]) Bond.append(a[1]) Stock.append(a[2]) w = fixTransition(w,s,s_next, a, alive) # dictionary of lists dictionary = {'wealth': wealth, 'Consumption': Consumption, 'Bond': Bond, 'Stock': Stock, 'Salary': Salary} if sim == 0: df = pd.DataFrame(dictionary) else: df = df + pd.DataFrame(dictionary) return df/num # simulate an agent age 0 starting with wealth of 70 econState = mc.simulate(ts_length=T_max - T_min) def simulateAgent(sim): wealth = [] Consumption = [] Bond = [] Stock = [] Salary = [] employ = [] live = [] x = [20, 0, 0, 1] for t in range(len(econState)-1): s = econState[t] s_next = econState[t+1] a = action(t, x) c, b, k = a w,_,e,A = x wealth.append(w) Consumption.append(c) Bond.append(b) Stock.append(k) Salary.append(y(t, x)) employ.append(e) live.append(A) x = transition(x, a, t, s_next) # dictionary of lists dictionary = {'wealth': wealth, 'Consumption': Consumption, 'Bond': Bond, 'Stock': Stock, 'Salary': Salary, 'employ': employ, 'live': live} return pd.DataFrame(dictionary) pool = Pool() sim = 10000 agents = pool.map(simulateAgent, list(range(sim))) pool.close() df = pd.DataFrame() for agent in agents: if df.size == 0: df = agent else: df = df + agent df = df/sim df = df/10000 df.plot() df[["wealth","Consumption","Bond","Stock"]].plot()
20200601/20200527/.ipynb_checkpoints/lifeCycleModel-housing-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # %matplotlib inline import tensorflow as tf import numpy as np import skimage.io as io import os, sys from PIL import Image from matplotlib import pyplot as plt sys.path.append("tf-image-segmentation/") sys.path.append("/home/dpakhom1/workspace/my_models/slim/") fcn_16s_checkpoint_path = '/home/dpakhom1/tf_projects/segmentation/model_fcn8s_final.ckpt' os.environ["CUDA_VISIBLE_DEVICES"] = '1' slim = tf.contrib.slim from tf_image_segmentation.models.fcn_8s import FCN_8s from matplotlib import pyplot as plt from tf_image_segmentation.utils.pascal_voc import pascal_segmentation_lut from tf_image_segmentation.utils.tf_records import read_tfrecord_and_decode_into_image_annotation_pair_tensors from tf_image_segmentation.utils.inference import adapt_network_for_any_size_input from tf_image_segmentation.utils.visualization import visualize_segmentation_adaptive pascal_voc_lut = pascal_segmentation_lut() tfrecord_filename = 'pascal_augmented_val.tfrecords' number_of_classes = 21 filename_queue = tf.train.string_input_producer( [tfrecord_filename], num_epochs=1) image, annotation = read_tfrecord_and_decode_into_image_annotation_pair_tensors(filename_queue) # Fake batch for image and annotation by adding # leading empty axis. image_batch_tensor = tf.expand_dims(image, axis=0) annotation_batch_tensor = tf.expand_dims(annotation, axis=0) # Be careful: after adaptation, network returns final labels # and not logits FCN_8s = adapt_network_for_any_size_input(FCN_8s, 32) pred, fcn_16s_variables_mapping = FCN_8s(image_batch_tensor=image_batch_tensor, number_of_classes=number_of_classes, is_training=False) # Take away the masked out values from evaluation weights = tf.to_float( tf.not_equal(annotation_batch_tensor, 255) ) # Define the accuracy metric: Mean Intersection Over Union miou, update_op = slim.metrics.streaming_mean_iou(predictions=pred, labels=annotation_batch_tensor, num_classes=number_of_classes, weights=weights) # The op for initializing the variables. initializer = tf.local_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: sess.run(initializer) saver.restore(sess, "/home/dpakhom1/tf_projects/segmentation/model_fcn8s_final.ckpt") coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) # There are 904 images in restricted validation dataset for i in xrange(904): image_np, annotation_np, pred_np, tmp = sess.run([image, annotation, pred, update_op]) # Display the image and the segmentation result #upsampled_predictions = pred_np.squeeze() #plt.imshow(image_np) #plt.show() #visualize_segmentation_adaptive(upsampled_predictions, pascal_voc_lut) coord.request_stop() coord.join(threads) res = sess.run(miou) print("Pascal VOC 2012 Restricted (RV-VOC12) Mean IU: " + str(res))
Image Segmentation/tf-image-segmentation/tf_image_segmentation/recipes/pascal_voc/FCNs/fcn_8s_test_pascal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # COGS 108 Final Project import sys # !conda install --yes --prefix {sys.prefix} beautifulsoup4 # + # %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import json # - import requests import bs4 from bs4 import BeautifulSoup # ## Financial Data # We looked into a few different options for pulling the financial data that we need. One of the options was pulling from API's like a Yahoo Finance API. However we also realized that Yahoo Finance had a download option where we could export the data we wanted in a timeframe to a CSV. We decided to use the data in this format since it is how we have been working with data in this class on the assignments. # + #Read in csv data df_amzn = pd.read_csv('AMZN_recent.csv') #Drop unimportant rows df_amzn = df_amzn.drop(['Adj Close','Volume'],1) #Add new column named Daily Avg that averages the high and low of that day. df_amzn['Daily Avg'] = df_amzn[['High','Low']].mean(axis=1) df_amzn['Daily Change']= df_amzn['Close'].diff() #Change Date column to date that can be read by matplotlib and pandas df_amzn['Date'] = pd.to_datetime(df_amzn['Date']) # + #Read in csv data df_nike = pd.read_csv('NKE_6month.csv') #Drop unimportant rows df_nike = df_nike.drop(['Adj Close','Volume'],1) #Add new column named Daily Avg that averages the high and low of that day. df_nike['Daily Avg'] = df_nike[['High','Low']].mean(axis=1) df_nike['Daily Change']= df_nike['Close'].diff() #Change Date column to date that can be read by matplotlib and pandas df_nike['Date'] = pd.to_datetime(df_nike['Date']) # + #Plot Daily Avg over time plt.plot(df_amzn['Date'],df_amzn['Daily Avg']) # - plt.plot(df_nike['Date'],df_nike['Daily Avg']) # Web Scraping Test # + # function to extract new headlines and dates. # Takes starting URL and a limit of how many entries we want our table to be # It iterates over it until i has a a list of 1000 entries # Number of entries can be modified by changing the variable 'limit' # Returns a json file with titles and dates def get_headlines(site, user_limit): # Creates empty to list where titles will be added df = pd.DataFrame(columns=['Date','Headline']) page = requests.get(site) soup = BeautifulSoup(page.content, 'html.parser') newsRead = 0 while newsRead < user_limit: #repeat = False; news = soup.find_all("div", {"class": "title"}) for newsItem in news[1:]: df.loc[newsRead] = [newsItem.div.string,newsItem.span.string] newsRead += 1 nextPage = soup.find("div", {"class": "numberedpagenav"}).find("a", {"class": "active"}).find_next_sibling() if(nextPage['class'] == "disabled"): break else: # Changes the page to parse to the next page page = requests.get(nextPage['href']) soup = BeautifulSoup(page.content, 'html.parser') return df # return json.dumps(my_list) # - # Calling the function to extract data from website nand printing the panda representation # Takes about 30 sec to 1 minute because it is iterating through all the pages until it the list goes over # the limit, which in this case I set it to a 1000 #https://markets.financialcontent.com/stocks/quote/news?Limit=500&Symbol=321%3A957150 site = 'https://markets.financialcontent.com/stocks/quote/news?Limit=500&Symbol=537%3A645156' #df_amazon = pd.read_json(get_headlines(site, 10)) df_amazon =get_headlines(site, 2000) df_amazon.to_json("Amazon_news.json") # Calling the function to extract data from website nand printing the panda representation # Takes about 30 sec to 1 minute because it is iterating through all the pages until it the list goes over # the limit, which in this case I set it to a 1000 site = 'https://markets.financialcontent.com/stocks/quote/news?Limit=500&Symbol=321%3A957150' df_nike_head =get_headlines(site, 2000) df_nike_head df_nike_head.to_json('Nike_news.json') #Only look at articles with Nike in title df_nike_clean = df_nike_head[df_nike_head['Headline'].str.contains("Nike")] df_nike_clean.to_json('Nike_news_clean.json') def get_headlines2(site, user_limit): title_list = [] while len(title_list) < user_limit: page = requests.get(site) soup = BeautifulSoup(page.content, 'html.parser') spans = soup.find_all('span', {'class': 'fontS14px'}) title_count = 0 for span in spans: if title_count < 10: title = span.find('a').text title_list.append(title) title_count += 1 page_link_list = soup.find_all('a', {'class' : 'pagerlink'}, text='next >') if(page_link_list): for page_link in page_link_list: site = page_link['href'] else: break return title_list site = 'https://www.nasdaq.com/symbol/nke/news-headlines' get_headlines2(site, 1000) # Analysis amazonfreq_df = pd.read_json('Frequency+Date(Feb28-Jan09).json') amazonfreq_df = amazonfreq_df.sort_values('Date') amazonfreq_df['Freq Adj'] = amazonfreq_df['Frequency'].div(amazonfreq_df['Number of News']) amazonfreq_df # + sns.set_style('whitegrid') plt.rcParams['figure.figsize'] = (10,5) fig,ax = subplots(2,1) fig.tight_layout() axs = ax[0] axs.bar(amazonfreq_df['Date'],amazonfreq_df['Freq Adj']) axs.set_title('News Correlation') axs = ax[1] axs.bar(df_amzn['Date'],df_amzn['Daily Change']) axs.set_title('Amazon Price Change (Daily)') # - new_df = amazonfreq_df.merge(df_amzn,how='outer') new_df = new_df.fillna(0) new_df # + from scipy.stats import pearsonr stat, pval = pearsonr(new_df['Freq Adj'],new_df['Daily Change']) print("same day: correlated:", stat) # positively correlated if (pval > 0.05): print("not significant") else: print("significant") # -
Draft/Project 003.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #importing the required libraries import numpy as np import pandas as pd import pickle import matrix_factorization_utilities import scipy.sparse as sp from scipy.sparse.linalg import svds # Reading the ratings data ratings = pd.read_csv('Dataset/ratings.csv') len(ratings) #Just taking the required columns ratings = ratings[['userId', 'movieId','rating']] # Checking if the user has rated the same movie twice, in that case we just take max of them ratings_df = ratings.groupby(['userId','movieId']).aggregate(np.max) # In this case there are no such cases where the user has rated the same movie twice. len(ratings_df) # Inspecting the data ratings.head() ratings_df.head() # Counting no of unique users len(ratings['userId'].unique()) #Getting the percentage count of each rating value count_ratings = ratings.groupby('rating').count() count_ratings['perc_total']=round(count_ratings['userId']*100/count_ratings['userId'].sum(),1) count_ratings #Visualising the percentage total for each rating count_ratings['perc_total'].plot.bar() #reading the movies dataset movie_list = pd.read_csv('Dataset/movies.csv') len(movie_list) # insepcting the movie list dataframe movie_list.head() # reading the tags datast tags = pd.read_csv('Dataset/tags.csv') # inspecting the tags data frame tags.head() # inspecting various genres genres = movie_list['genres'] genres.head() genre_list = "" for index,row in movie_list.iterrows(): genre_list += row.genres + "|" #split the string into a list of values genre_list_split = genre_list.split('|') #de-duplicate values new_list = list(set(genre_list_split)) #remove the value that is blank new_list.remove('') #inspect list of genres new_list # + #Enriching the movies dataset by adding the various genres columns. movies_with_genres = movie_list.copy() for genre in new_list : movies_with_genres[genre] = movies_with_genres.apply(lambda _:int(genre in _.genres), axis = 1) # - movies_with_genres.head() # + #Calculating the sparsity #evaluation metric 1 no_of_users = len(ratings['userId'].unique()) no_of_movies = len(ratings['movieId'].unique()) sparsity = round(1.0 - len(ratings)/(1.0*(no_of_movies*no_of_users)),3) print(sparsity) # - # Counting the number of unique movies in the dataset. len(ratings['movieId'].unique()) # Finding the average rating for movie and the number of ratings for each movie avg_movie_rating = pd.DataFrame(ratings.groupby('movieId')['rating'].agg(['mean','count'])) avg_movie_rating['movieId']= avg_movie_rating.index # inspecting the average movie rating data frame avg_movie_rating.head() len(avg_movie_rating) #calculate the percentile count. It gives the no of ratings at least 70% of the movies have np.percentile(avg_movie_rating['count'],70) #Get the average movie rating across all movies avg_rating_all=ratings['rating'].mean() avg_rating_all #set a minimum threshold for number of reviews that the movie has to have min_reviews=30 min_reviews movie_score = avg_movie_rating.loc[avg_movie_rating['count']>min_reviews] movie_score.head() len(movie_score) #create a function for weighted rating score based off count of reviews def weighted_rating(x, m=min_reviews, C=avg_rating_all): v = x['count'] R = x['mean'] # Calculation based on the IMDB formula return (v/(v+m) * R) + (m/(m+v) * C) #Calculating the weighted score for each movie movie_score['weighted_score'] = movie_score.apply(weighted_rating, axis=1) movie_score.head() #join movie details to movie ratings movie_score = pd.merge(movie_score,movies_with_genres,on='movieId') #join movie links to movie ratings #movie_score = pd.merge(movie_score,links,on='movieId') movie_score.head() #list top scored movies over the whole range of movies pd.DataFrame(movie_score.sort_values(['weighted_score'],ascending=False)[['title','count','mean','weighted_score','genres']][:10]) # Gives the best movies according to genre based on weighted score which is calculated using IMDB formula def best_movies_by_genre(genre,top_n): return pd.DataFrame(movie_score.loc[(movie_score[genre]==1)].sort_values(['weighted_score'],ascending=False)[['title','count','mean','weighted_score']][:top_n]) #run function to return top recommended movies by genre best_movies_by_genre('Musical',10) #run function to return top recommended movies by genre best_movies_by_genre('Action',10) #run function to return top recommended movies by genre best_movies_by_genre('Children',10) #run function to return top recommended movies by genre best_movies_by_genre('Drama',10) # Creating a data frame that has user ratings accross all movies in form of matrix used in matrix factorisation ratings_df = pd.pivot_table(ratings, index='userId', columns='movieId', aggfunc=np.max) ratings_df.head() # Apply low rank matrix factorization to find the latent features U, M = matrix_factorization_utilities.low_rank_matrix_factorization(ratings_df.as_matrix(), num_features=5, regularization_amount=1.0) ratings_df #merging ratings and movies dataframes ratings_movies = pd.merge(ratings,movie_list, on = 'movieId') ratings_movies.head() ratings_movies #Gets the other top 10 movies which are watched by the people who saw this particular movie def get_other_movies(movie_name): #get all users who watched a specific movie df_movie_users_series = ratings_movies.loc[ratings_movies['title']==movie_name]['userId'] #convert to a data frame df_movie_users = pd.DataFrame(df_movie_users_series,columns=['userId']) #get a list of all other movies watched by these users other_movies = pd.merge(df_movie_users,ratings_movies,on='userId') #get a list of the most commonly watched movies by these other user other_users_watched = pd.DataFrame(other_movies.groupby('title')['userId'].count()).sort_values('userId',ascending=False) other_users_watched['perc_who_watched'] = round(other_users_watched['userId']*100/other_users_watched['userId'][0],1) return other_users_watched[:10] # Getting other top 10 movies which are watched by the people who saw 'Gone Girl' get_other_movies('Gone Girl (2014)') from sklearn.neighbors import NearestNeighbors avg_movie_rating.head() #only include movies with more than 10 ratings movie_plus_10_ratings = avg_movie_rating.loc[avg_movie_rating['count']>=10] print(len(movie_plus_10_ratings)) movie_plus_10_ratings filtered_ratings = pd.merge(movie_plus_10_ratings, ratings, on="movieId") len(filtered_ratings) filtered_ratings.head() #create a matrix table with movieIds on the rows and userIds in the columns. #replace NAN values with 0 movie_wide = filtered_ratings.pivot(index = 'movieId', columns = 'userId', values = 'rating').fillna(0) movie_wide.head() #specify model parameters model_knn = NearestNeighbors(metric='cosine',algorithm='brute') #fit model to the data set model_knn.fit(movie_wide) #Gets the top 10 nearest neighbours got the movie def print_similar_movies(query_index) : #get the list of user ratings for a specific userId query_index_movie_ratings = movie_wide.loc[query_index,:].values.reshape(1,-1) #get the closest 10 movies and their distances from the movie specified distances,indices = model_knn.kneighbors(query_index_movie_ratings,n_neighbors = 11) #write a lopp that prints the similar movies for a specified movie. for i in range(0,len(distances.flatten())): #get the title of the random movie that was chosen get_movie = movie_list.loc[movie_list['movieId']==query_index]['title'] #for the first movie in the list i.e closest print the title if i==0: print('Recommendations for {0}:\n'.format(get_movie)) else : #get the indiciees for the closest movies indices_flat = indices.flatten()[i] #get the title of the movie get_movie = movie_list.loc[movie_list['movieId']==movie_wide.iloc[indices_flat,:].name]['title'] #print the movie print('{0}: {1}, with distance of {2}:'.format(i,get_movie,distances.flatten()[i])) print_similar_movies(112552) print_similar_movies(1) print_similar_movies(96079) movies_with_genres.head() #Getting the movies list with only genres like Musical and other such columns movie_content_df_temp = movies_with_genres.copy() movie_content_df_temp.set_index('movieId') movie_content_df = movie_content_df_temp.drop(columns = ['movieId','title','genres']) movie_content_df = movie_content_df.as_matrix() movie_content_df # + # Import linear_kernel from sklearn.metrics.pairwise import linear_kernel # Compute the cosine similarity matrix cosine_sim = linear_kernel(movie_content_df,movie_content_df) # - # Similarity of the movies based on the content cosine_sim #create a series of the movie id and title indicies = pd.Series(movie_content_df_temp.index, movie_content_df_temp['title']) indicies #Gets the top 10 similar movies based on the content def get_similar_movies_based_on_content(movie_index) : sim_scores = list(enumerate(cosine_sim[movie_index])) # Sort the movies based on the similarity scores sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) # Get the scores of the 10 most similar movies sim_scores = sim_scores[0:11] print(sim_scores) # Get the movie indices movie_indices = [i[0] for i in sim_scores] print(movie_indices) similar_movies = pd.DataFrame(movie_content_df_temp[['title','genres']].iloc[movie_indices]) return similar_movies indicies["Skyfall (2012)"] get_similar_movies_based_on_content(19338) #get ordered list of movieIds item_indices = pd.DataFrame(sorted(list(set(ratings['movieId']))),columns=['movieId']) #add in data frame index value to data frame item_indices['movie_index']=item_indices.index #inspect data frame item_indices.head() #get ordered list of movieIds user_indices = pd.DataFrame(sorted(list(set(ratings['userId']))),columns=['userId']) #add in data frame index value to data frame user_indices['user_index']=user_indices.index #inspect data frame user_indices.head() #join the movie indices df_with_index = pd.merge(ratings,item_indices,on='movieId') #join the user indices df_with_index=pd.merge(df_with_index,user_indices,on='userId') #inspec the data frame df_with_index.head() #import train_test_split module from sklearn.model_selection import train_test_split #take 80% as the training set and 20% as the test set df_train, df_test= train_test_split(df_with_index,test_size=0.2) print(len(df_train)) print(len(df_test)) df_train.head() df_test.head() n_users = ratings.userId.unique().shape[0] n_items = ratings.movieId.unique().shape[0] print(n_users) print(n_items) #Create two user-item matrices, one for training and another for testing train_data_matrix = np.zeros((n_users, n_items)) #for every line in the data for line in df_train.itertuples(): #set the value in the column and row to #line[1] is userId, line[2] is movieId and line[3] is rating, line[4] is movie_index and line[5] is user_index train_data_matrix[line[5], line[4]] = line[3] train_data_matrix.shape #Create two user-item matrices, one for training and another for testing test_data_matrix = np.zeros((n_users, n_items)) #for every line in the data for line in df_test[:1].itertuples(): #set the value in the column and row to #line[1] is userId, line[2] is movieId and line[3] is rating, line[4] is movie_index and line[5] is user_index #print(line[2]) test_data_matrix[line[5], line[4]] = line[3] #train_data_matrix[line['movieId'], line['userId']] = line['rating'] test_data_matrix.shape pd.DataFrame(train_data_matrix).head() df_train['rating'].max() from sklearn.metrics import mean_squared_error from math import sqrt def rmse(prediction, ground_truth): #select prediction values that are non-zero and flatten into 1 array prediction = prediction[ground_truth.nonzero()].flatten() #select test values that are non-zero and flatten into 1 array ground_truth = ground_truth[ground_truth.nonzero()].flatten() #return RMSE between values return sqrt(mean_squared_error(prediction, ground_truth)) #Calculate the rmse sscore of SVD using different values of k (latent features) rmse_list = [] for i in [1,2,5,20,40,60,100,200]: #apply svd to the test data u,s,vt = svds(train_data_matrix,k=i) #get diagonal matrix s_diag_matrix=np.diag(s) #predict x with dot product of u s_diag and vt X_pred = np.dot(np.dot(u,s_diag_matrix),vt) #calculate rmse score of matrix factorisation predictions rmse_score = rmse(X_pred,test_data_matrix) rmse_list.append(rmse_score) print("Matrix Factorisation with " + str(i) +" latent features has a RMSE of " + str(rmse_score)) #Convert predictions to a DataFrame mf_pred = pd.DataFrame(X_pred) mf_pred.head() df_names = pd.merge(ratings,movie_list,on='movieId') df_names.head() #choose a user ID user_id = 1 #get movies rated by this user id users_movies = df_names.loc[df_names["userId"]==user_id] #print how many ratings user has made print("User ID : " + str(user_id) + " has already rated " + str(len(users_movies)) + " movies") #list movies that have been rated users_movies user_index = df_train.loc[df_train["userId"]==user_id]['user_index'][:1].values[0] #get movie ratings predicted for this user and sort by highest rating prediction sorted_user_predictions = pd.DataFrame(mf_pred.iloc[user_index].sort_values(ascending=False)) #rename the columns sorted_user_predictions.columns=['ratings'] #save the index values as movie id sorted_user_predictions['movieId']=sorted_user_predictions.index print("Top 10 predictions for User " + str(user_id)) #display the top 10 predictions for this user pd.merge(sorted_user_predictions,movie_list, on = 'movieId')[:10] #count number of unique users numUsers = df_train.userId.unique().shape[0] #count number of unitque movies numMovies = df_train.movieId.unique().shape[0] print(len(df_train)) print(numUsers) print(numMovies) #Separate out the values of the df_train data set into separate variables Users = df_train['userId'].values Movies = df_train['movieId'].values Ratings = df_train['rating'].values print(Users),print(len(Users)) print(Movies),print(len(Movies)) print(Ratings),print(len(Ratings)) #import libraries import keras from keras.layers import Embedding, Reshape, Merge from keras.models import Sequential from keras.optimizers import Adam from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.utils import plot_model # Couting no of unique users and movies len(ratings.userId.unique()), len(ratings.movieId.unique()) # Assigning a unique value to each user and movie in range 0,no_of_users and 0,no_of_movies respectively. ratings.userId = ratings.userId.astype('category').cat.codes.values ratings.movieId = ratings.movieId.astype('category').cat.codes.values # Splitting the data into train and test. train, test = train_test_split(ratings, test_size=0.2) train.head() test.head() n_users, n_movies = len(ratings.userId.unique()), len(ratings.movieId.unique()) # Returns a neural network model which performs matrix factorisation def matrix_factorisation_model_with_n_latent_factors(n_latent_factors) : movie_input = keras.layers.Input(shape=[1],name='Item') movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors, name='Movie-Embedding')(movie_input) movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding) user_input = keras.layers.Input(shape=[1],name='User') user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors,name='User-Embedding')(user_input)) prod = keras.layers.merge([movie_vec, user_vec], mode='dot',name='DotProduct') model = keras.Model([user_input, movie_input], prod) model.compile('adam', 'mean_squared_error') return model model = matrix_factorisation_model_with_n_latent_factors(5) model.summary() #Training the model history = model.fit([train.userId, train.movieId], train.rating, epochs=50, verbose=0) y_hat = np.round(model.predict([test.userId, test.movieId]),0) y_true = test.rating from sklearn.metrics import mean_absolute_error mean_absolute_error(y_true, y_hat) #Getting summary of movie embeddings movie_embedding_learnt = model.get_layer(name='Movie-Embedding').get_weights()[0] pd.DataFrame(movie_embedding_learnt).describe() # Getting summary of user embeddings from the model user_embedding_learnt = model.get_layer(name='User-Embedding').get_weights()[0] pd.DataFrame(user_embedding_learnt).describe() from keras.constraints import non_neg # Returns a neural network model which performs matrix factorisation with additional constraint on embeddings(that they can't be negative) def matrix_factorisation_model_with_n_latent_factors_and_non_negative_embedding(n_latent_factors) : movie_input = keras.layers.Input(shape=[1],name='Item') movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors, name='Non-Negative-Movie-Embedding',embeddings_constraint=non_neg())(movie_input) movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding) user_input = keras.layers.Input(shape=[1],name='User') user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors,name='Non-Negative-User-Embedding',embeddings_constraint=non_neg())(user_input)) prod = keras.layers.merge([movie_vec, user_vec], mode='dot',name='DotProduct') model = keras.Model([user_input, movie_input], prod) model.compile('adam', 'mean_squared_error') return model model2 = matrix_factorisation_model_with_n_latent_factors_and_non_negative_embedding(5) model2.summary() history_nonneg = model2.fit([train.userId, train.movieId], train.rating, epochs=50, verbose=0) movie_embedding_learnt = model2.get_layer(name='Non-Negative-Movie-Embedding').get_weights()[0] pd.DataFrame(movie_embedding_learnt).describe() y_hat = np.round(model2.predict([test.userId, test.movieId]),0) y_true = test.rating mean_absolute_error(y_true, y_hat) # Returns a neural network model which does recommendation def neural_network_model(n_latent_factors_user, n_latent_factors_movie): movie_input = keras.layers.Input(shape=[1],name='Item') movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors_movie, name='Movie-Embedding')(movie_input) movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding) movie_vec = keras.layers.Dropout(0.2)(movie_vec) user_input = keras.layers.Input(shape=[1],name='User') user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors_user,name='User-Embedding')(user_input)) user_vec = keras.layers.Dropout(0.2)(user_vec) concat = keras.layers.merge([movie_vec, user_vec], mode='concat',name='Concat') concat_dropout = keras.layers.Dropout(0.2)(concat) dense = keras.layers.Dense(100,name='FullyConnected')(concat) dropout_1 = keras.layers.Dropout(0.2,name='Dropout')(dense) dense_2 = keras.layers.Dense(50,name='FullyConnected-1')(concat) dropout_2 = keras.layers.Dropout(0.2,name='Dropout')(dense_2) dense_3 = keras.layers.Dense(20,name='FullyConnected-2')(dense_2) dropout_3 = keras.layers.Dropout(0.2,name='Dropout')(dense_3) dense_4 = keras.layers.Dense(10,name='FullyConnected-3', activation='relu')(dense_3) result = keras.layers.Dense(1, activation='relu',name='Activation')(dense_4) adam = Adam(lr=0.005) model = keras.Model([user_input, movie_input], result) model.compile(optimizer=adam,loss= 'mean_absolute_error') return model model3 = neural_network_model(10,13) history_neural_network = model3.fit([train.userId, train.movieId], train.rating, epochs=50, verbose=0) model3.summary() y_hat = np.round(model3.predict([test.userId, test.movieId]),0) y_true = test.rating mean_absolute_error(y_true, y_hat)
.ipynb_checkpoints/MovieLens_Recommendation_Notebook-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Sarah-Hular/OOP-58001/blob/main/Copy_of_Fundamentals_of_Python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="P7TgJ071t9Iu" # Python Variables # + colab={"base_uri": "https://localhost:8080/"} id="r7sSD5kRuB6h" outputId="697b4aa5-f450-43a2-c910-1f903e274aa8" x = float(1) a, b = 0, -1 a, b, c= "Andrea", "Dexter", "Eleazar" print('This is a sample') print(a) print(b) # + [markdown] id="TY7BqjJLuvfV" # Casting # + colab={"base_uri": "https://localhost:8080/"} id="zdHsPDw_ux2q" outputId="83276ec9-ff61-4d7f-d833-b27adb94e624" print(x) # + [markdown] id="1p2bB9Equ4LF" # Type() Function # + colab={"base_uri": "https://localhost:8080/"} id="E-MDZyXqvYMh" outputId="e469f58a-7beb-4990-8525-4fa83f646579" y = "Johnny" print(type(y)) print(type(x)) # + [markdown] id="HFOkOmFWwxID" # Double Quotes and Single Quotes # + colab={"base_uri": "https://localhost:8080/"} id="9tDGNz-Gw35R" outputId="dde5eb85-ae79-4d60-83ad-ccd0ee65c087" h= "Alora" v= 1 v= 2 print(h) print(v) print(v+1) # + [markdown] id="7VPBRYePxUPK" # Multiple Variables # # + colab={"base_uri": "https://localhost:8080/"} id="ZHSkiZChxWcD" outputId="573ddd05-baa2-4cc6-9188-171941a34439" x,y,z="one","two","Three" print(x) print(y) print(z) print(x,y,z) # + [markdown] id="6wzevbxoxqSy" # One to Multiple Variables # # + colab={"base_uri": "https://localhost:8080/"} id="QXuCanRUxtg8" outputId="0febd4d9-b0cc-454f-a36c-5222c78935ce" x = y = z = "Sarah" print(x,y,z) # + [markdown] id="GN_BfvJFyLci" # Output Variables # + colab={"base_uri": "https://localhost:8080/"} id="qfpfHsUFyNc5" outputId="3b4542df-c1d0-4ebf-a454-c4aee6d3aa9e" x= "enjoying" print("Python is " + x) x = "Hi" y = "beautiful" print(x+""+" "+y) # + [markdown] id="KfmQbHDB0BY5" # Python Indentation # # + colab={"base_uri": "https://localhost:8080/"} id="8-56DnPe0E4b" outputId="430b9de2-4b28-47ce-dc2b-0abfc75b0b94" if 5>4: print( "Yes") # + [markdown] id="ynJkd4OW0qye" # Python Comments # # + colab={"base_uri": "https://localhost:8080/"} id="sqrleepk0aFF" outputId="9d268d50-da05-4e60-b972-4e6369b23863" #This is a comment print( "Hello, Philippines") # + [markdown] id="I_fPNMSZ5Cuh" # Assignment Operators # + colab={"base_uri": "https://localhost:8080/"} id="WtgBpV_W5Exm" outputId="560779d1-a2db-43cc-a236-3e4eb59ef883" q=10 q+=5 print(q) # + [markdown] id="OIaJhMQx5WZ-" # Arithmetic Operations # + colab={"base_uri": "https://localhost:8080/"} id="rm0v8-ll5Zo1" outputId="8220c613-d3c3-4943-ac77-324a3907c1b0" j = 2 f = 4 l = 6 print(f+j) print(f-j) print(f*l) print(int(l/f)) print(2/f) print(2%f) print(2//f) print(2**2) # + [markdown] id="8o43EP5y68Tu" # Bitwise Operator shift right/left # + colab={"base_uri": "https://localhost:8080/"} id="uzo14MCP7AER" outputId="6a063549-a893-418a-a1fe-e34469b5150c" u=2 print(u<<1) g=10 print(g<<1)
Copy_of_Fundamentals_of_Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # --- # # _You are currently looking at **version 1.2** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._ # # --- # # Assignment 3 - More Pandas # All questions are weighted the same in this assignment. This assignment requires more individual learning then the last one did - you are encouraged to check out the [pandas documentation](http://pandas.pydata.org/pandas-docs/stable/) to find functions or methods you might not have used yet, or ask questions on [Stack Overflow](http://stackoverflow.com/) and tag them as pandas and python related. And of course, the discussion forums are open for interaction with your peers and the course staff. # ### Question 1 (20%) # Load the energy data from the file `Energy Indicators.xls`, which is a list of indicators of [energy supply and renewable electricity production](Energy%20Indicators.xls) from the [United Nations](http://unstats.un.org/unsd/environment/excel_file_tables/2013/Energy%20Indicators.xls) for the year 2013, and should be put into a DataFrame with the variable name of **energy**. # # Keep in mind that this is an Excel file, and not a comma separated values file. Also, make sure to exclude the footer and header information from the datafile. The first two columns are unneccessary, so you should get rid of them, and you should change the column labels so that the columns are: # # `['Country', 'Energy Supply', 'Energy Supply per Capita', '% Renewable]` # # Convert `Energy Supply` to gigajoules (there are 1,000,000 gigajoules in a petajoule). For all countries which have missing data (e.g. data with "...") make sure this is reflected as `np.NaN` values. # # Rename the following list of countries (for use in later questions): # # ```"Republic of Korea": "South Korea", # "United States of America": "United States", # "United Kingdom of Great Britain and Northern Ireland": "United Kingdom", # "China, Hong Kong Special Administrative Region": "Hong Kong"``` # # There are also several countries with parenthesis in their name. Be sure to remove these, e.g. `'Bolivia (Plurinational State of)'` should be `'Bolivia'`. # # <br> # # Next, load the GDP data from the file `world_bank.csv`, which is a csv containing countries' GDP from 1960 to 2015 from [World Bank](http://data.worldbank.org/indicator/NY.GDP.MKTP.CD). Call this DataFrame **GDP**. # # Make sure to skip the header, and rename the following list of countries: # # ```"Korea, Rep.": "South Korea", # "Iran, Islamic Rep.": "Iran", # "Hong Kong SAR, China": "Hong Kong"``` # # <br> # # Finally, load the [Sciamgo Journal and Country Rank data for Energy Engineering and Power Technology](http://www.scimagojr.com/countryrank.php?category=2102) from the file `scimagojr-3.xlsx`, which ranks countries based on their journal contributions in the aforementioned area. Call this DataFrame **ScimEn**. # # Join the three datasets: GDP, Energy, and ScimEn into a new dataset (using the intersection of country names). Use only the last 10 years (2006-2015) of GDP data and only the top 15 countries by Scimagojr 'Rank' (Rank 1 through 15). # # The index of this DataFrame should be the name of the country, and the columns should be ['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations', # 'Citations per document', 'H index', 'Energy Supply', # 'Energy Supply per Capita', '% Renewable', '2006', '2007', '2008', # '2009', '2010', '2011', '2012', '2013', '2014', '2015']. # # *This function should return a DataFrame with 20 columns and 15 entries.* # + def get_energy(_debug=False): if _debug: print('\nget_energy:') # Load data import pandas as pd # 1. skip header and footer; 2. NaN data is '...' df1 = pd.read_excel("Energy Indicators.xls", skiprows=17, skipfooter=38, na_values="...") if _debug: print('Type: {}'.format(type(df1))) # dataframe # Correct column names df1 = df1.drop(df1.columns[[0, 1]], axis=1) # remove useless columns df1.rename(columns={df1.columns[0]: 'Country'}, inplace=True) df1.rename(columns={df1.columns[1]: 'Energy Supply'}, inplace=True) df1.rename(columns={df1.columns[2]: 'Energy Supply per Capita'}, inplace=True) df1.rename(columns={df1.columns[3]: '% Renewable'}, inplace=True) if _debug: print('df1 Columns: {}'.format(df1.columns)) # Clean data import re rename_country1 = { "Republic of Korea": "South Korea", "United States of America": "United States", "United Kingdom of Great Britain and Northern Ireland": "United Kingdom", "China, Hong Kong Special Administrative Region": "Hong Kong"} df1['Energy Supply'] *= 1000000 # 理论上DataFrame不应该做循环处理, 看看怎么改进 country = pd.Series(len(df1)) # to set value for i in range(len(df1)): row = df1.loc[i, :] # Rename country name: # Step 1: remove number. e.g. Greenland7 -> Greenland country[i] = re.sub("\d", "", row['Country']) # Step 2: remove (), break the row and only keep index=0 country[i] = country[i].split(' (')[0] # Step 3: replace name by rename_country if country[i] in rename_country1.keys(): country[i] = rename_country1.get(country[i]) if _debug: print('{} -> {}'.format(row['Country'], country[i])) df1['Country'] = country df1.set_index('Country', inplace=True) if _debug: print('df1 Columns: {}'.format(df1.columns)) print(df1.loc['China']) print(df1.loc['South Korea']) # print(list(df1.index)) return df1 def get_gdp(_debug=False): if _debug: print('\nget_gdp:') # Load data import pandas as pd df2 = pd.read_csv('world_bank.csv', skiprows=4, encoding="utf8") if _debug: print('df2 Columns: {}'.format(df2.columns)) # Only needs country name and last 10 years data df2 = df2[['Country Name', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']] # Here is [[]] df2.rename(columns={'Country Name': 'Country'}, inplace=True) if _debug: print('df2 Columns: {}'.format(df2.columns)) # Clean data rename_country2 = {"Korea, Rep.": "South Korea", "Iran, Islamic Rep.": "Iran", "Hong Kong SAR, China": "Hong Kong"} country = pd.Series(len(df2)) # to set value for i in range(len(df2)): row = df2.iloc[i, :] # replace name by rename_country country[i] = row['Country'] if country[i] in rename_country2.keys(): country[i] = rename_country2.get(country[i]) if _debug: print('{} -> {}'.format(row['Country'], country[i])) df2['Country'] = country df2.set_index('Country', inplace=True) if _debug: print('df2 Columns: {}'.format(df2.columns)) print(df2.loc['China']) print(df2.loc['South Korea']) # print(list(df2.index)) return df2 def get_rank(_debug=False): if _debug: print('\nget_rank:') # Load data import pandas as pd df3 = pd.read_excel("scimagojr-3.xlsx") if _debug: print('df3 Columns: {}'.format(df3.columns)) df3.set_index('Country', inplace=True) if _debug: print('df3 Columns: {}'.format(df3.columns)) # print(list(df3.index)) return df3 def answer_one(_debug=False): energy = get_energy(_debug) GDP = get_gdp(_debug) ScimEn = get_rank(_debug) ScimEn = ScimEn[ScimEn['Rank'] < 16] # top 15 # left join by the index import pandas as pd df = pd.merge(ScimEn, energy, how='inner', left_index=True, right_index=True) df = pd.merge(df, GDP, how='inner', left_index=True, right_index=True) if _debug: print('df Length: {}'.format(len(df))) print('df Columns: {}'.format(df.columns)) print('{}'.format(df.columns == ['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations', 'Citations per document', 'H index', 'Energy Supply', 'Energy Supply per Capita', '% Renewable', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015'])) # print(df) return df # - # ### Question 2 (6.6%) # The previous question joined three datasets then reduced this to just the top 15 entries. When you joined the datasets, but before you reduced this to the top 15 items, how many entries did you lose? # # *This function should return a single number.* # %%HTML <svg width="800" height="300"> <circle cx="150" cy="180" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="blue" /> <circle cx="200" cy="100" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="red" /> <circle cx="100" cy="100" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="green" /> <line x1="150" y1="125" x2="300" y2="150" stroke="black" stroke-width="2" fill="black" stroke-dasharray="5,3"/> <text x="300" y="165" font-family="Verdana" font-size="35">Everything but this!</text> </svg> def answer_two(): energy = get_energy() GDP = get_gdp() ScimEn = get_rank() print('Length: energy={}, GDP={}, ScimEn={}'.format(len(energy), len(GDP), len(ScimEn))) # Length: energy = 227, GDP = 264, ScimEn = 191 # outer join by index import pandas as pd df_inner = pd.merge(energy, GDP, how='inner', left_index=True, right_index=True) df_inner = pd.merge(df_inner, ScimEn, how='inner', left_index=True, right_index=True) df_outer = pd.merge(energy, GDP, how='outer', left_index=True, right_index=True) df_outer = pd.merge(df_outer, ScimEn, how='outer', left_index=True, right_index=True) # print(list(df_outer.index)) return df_outer.shape[0] - df_inner.shape[0] # ### Question 3 (6.6%) # What are the top 15 countries for average GDP over the last 10 years? # # *This function should return a Series named `avgGDP` with 15 countries and their average GDP sorted in descending order.* def answer_three(): Top15 = answer_one() years = ['2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015'] return Top15[years].mean(axis=1).sort_values(ascending=False) # ### Question 4 (6.6%) # By how much had the GDP changed over the 10 year span for the country with the 6th largest average GDP? # # *This function should return a single number.* def answer_four(): Top15_avg = answer_three() Top6_country = Top15_avg.index[5] # index start at 0 print(Top6_country) # United Kingdom Top15 = answer_one() Top6 = Top15.loc[Top6_country] diff = Top6['2015'] - Top6['2006'] print(type(diff)) # <type 'numpy.float64'> return diff # ### Question 5 (6.6%) # What is the mean energy supply per capita? # # *This function should return a single number.* def answer_five(): Top15 = answer_one() return Top15['Energy Supply per Capita'].mean(axis=0) # ### Question 6 (6.6%) # What country has the maximum % Renewable and what is the percentage? # # *This function should return a tuple with the name of the country and the percentage.* def answer_six(): Top15 = answer_one() max_country = Top15['% Renewable'].argmax() country = Top15.loc[max_country] return max_country, country['% Renewable'] # ### Question 7 (6.6%) # Create a new column that is the ratio of Self-Citations to Total Citations. # What is the maximum value for this new column, and what country has the highest ratio? # # *This function should return a tuple with the name of the country and the ratio.* def answer_seven(): Top15 = answer_one() Top15['Ratio'] = Top15['Self-citations'] / Top15['Citations'] max_country = Top15['Ratio'].argmax() country = Top15.loc[max_country] return max_country, country['Ratio'] # ### Question 8 (6.6%) # # Create a column that estimates the population using Energy Supply and Energy Supply per capita. # What is the third most populous country according to this estimate? # # *This function should return a single string value.* def answer_eight(): Top15 = answer_one() Top15['Population'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita'] return Top15['Population'].sort_values(ascending=False).index[2] # ### Question 9 # Create a column that estimates the number of citable documents per person. # What is the correlation between the number of citable documents per capita and the energy supply per capita? Use the `.corr()` method, (Pearson's correlation). # # *This function should return a single number.* # # *(Optional: Use the built-in function `plot9()` to visualize the relationship between Energy Supply per Capita vs. Citable docs per Capita)* def answer_nine(): Top15 = answer_one() Top15['Population'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita'] Top15['Citable documents per Capita'] = Top15['Citable documents'] / Top15['Population'] return Top15['Citable documents per Capita'].corr(Top15['Energy Supply per Capita']) def plot9(): import matplotlib as plt # %matplotlib inline Top15 = answer_one() Top15['PopEst'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita'] Top15['Citable docs per Capita'] = Top15['Citable documents'] / Top15['PopEst'] Top15.plot(x='Citable docs per Capita', y='Energy Supply per Capita', kind='scatter', xlim=[0, 0.0006]) # + #plot9() # Be sure to comment out plot9() before submitting the assignment! # - # ### Question 10 (6.6%) # Create a new column with a 1 if the country's % Renewable value is at or above the median for all countries in the top 15, and a 0 if the country's % Renewable value is below the median. # # *This function should return a series named `HighRenew` whose index is the country name sorted in ascending order of rank.* def answer_ten(): Top15 = answer_one() median = Top15['% Renewable'].median() # 17.02028 Top15['HighRenew'] = [1 if x >= median else 0 for x in Top15['% Renewable']] return Top15['HighRenew'] # ### Question 11 (6.6%) # Use the following dictionary to group the Countries by Continent, then create a dateframe that displays the sample size (the number of countries in each continent bin), and the sum, mean, and std deviation for the estimated population of each country. # # ```python # ContinentDict = {'China':'Asia', # 'United States':'North America', # 'Japan':'Asia', # 'United Kingdom':'Europe', # 'Russian Federation':'Europe', # 'Canada':'North America', # 'Germany':'Europe', # 'India':'Asia', # 'France':'Europe', # 'South Korea':'Asia', # 'Italy':'Europe', # 'Spain':'Europe', # 'Iran':'Asia', # 'Australia':'Australia', # 'Brazil':'South America'} # ``` # # *This function should return a DataFrame with index named Continent `['Asia', 'Australia', 'Europe', 'North America', 'South America']` and columns `['size', 'sum', 'mean', 'std']`* def answer_eleven(): Top15 = answer_one() ContinentDict = {'China': 'Asia', 'United States': 'North America', 'Japan': 'Asia', 'United Kingdom': 'Europe', 'Russian Federation': 'Europe', 'Canada': 'North America', 'Germany': 'Europe', 'India': 'Asia', 'France': 'Europe', 'South Korea': 'Asia', 'Italy': 'Europe', 'Spain': 'Europe', 'Iran': 'Asia', 'Australia': 'Australia', 'Brazil': 'South America'} Top15['Continent'] = [ContinentDict[x] for x in Top15.index] # init continents = sorted(set(ContinentDict.values())) columns = ['size', 'sum', 'mean', 'std'] import pandas as pd df = pd.DataFrame( [pd.Series() * len(continents)], index=continents, columns=columns) # population Top15['Population'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita'] for c in continents: population_of_country_in_continent = Top15[Top15['Continent'] == c]['Population'] df.set_value(c, ['size'], population_of_country_in_continent.count()) df.set_value(c, ['sum'], population_of_country_in_continent.sum()) df.set_value(c, ['mean'], population_of_country_in_continent.mean()) df.set_value(c, ['std'], population_of_country_in_continent.std()) return df # ### Question 12 (6.6%) # Cut % Renewable into 5 bins. Group Top15 by the Continent, as well as these new % Renewable bins. How many countries are in each of these groups? # # *This function should return a Series with a MultiIndex of `Continent`, then the bins for `% Renewable`. Do not include groups with no countries.* def answer_twelve(): Top15 = answer_one() # % Renewable bin import pandas as pd Top15['bins for % Renewable'] = pd.cut(Top15['% Renewable'], 5) # continent ContinentDict = {'China': 'Asia', 'United States': 'North America', 'Japan': 'Asia', 'United Kingdom': 'Europe', 'Russian Federation': 'Europe', 'Canada': 'North America', 'Germany': 'Europe', 'India': 'Asia', 'France': 'Europe', 'South Korea': 'Asia', 'Italy': 'Europe', 'Spain': 'Europe', 'Iran': 'Asia', 'Australia': 'Australia', 'Brazil': 'South America'} Top15['Continent'] = [ContinentDict[x] for x in Top15.index] Top15 = Top15.reset_index() import numpy as np df = Top15.groupby(['Continent', 'bins for % Renewable']).agg({'Country': np.count_nonzero}) # print(df) # Country # Continent bins for % Renewable # Asia (2.212, 15.753] 4 # (15.753, 29.227] 1 # Australia (2.212, 15.753] 1 # Europe (2.212, 15.753] 1 # (15.753, 29.227] 3 # (29.227, 42.701] 2 # North America (2.212, 15.753] 1 # (56.174, 69.648] 1 # South America (56.174, 69.648] 1 return df.T.iloc[0] # convert dataframe vector to series # ### Question 13 (6.6%) # Convert the Population Estimate series to a string with thousands separator (using commas). Use all significant digits (do not round the results). # # e.g. 12345678.90 -> 12,345,678.90 # # *This function should return a Series `PopEst` whose index is the country name and whose values are the population estimate string.* def answer_thirteen(): Top15 = answer_one() Top15['Population'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita'] return Top15['Population'].apply(lambda x: "{:,}".format(x)) # ### Optional # # Use the built in function `plot_optional()` to see an example visualization. def plot_optional(): import matplotlib as plt # %matplotlib inline Top15 = answer_one() ax = Top15.plot(x='Rank', y='% Renewable', kind='scatter', c=['#e41a1c','#377eb8','#e41a1c','#4daf4a','#4daf4a','#377eb8','#4daf4a','#e41a1c', '#4daf4a','#e41a1c','#4daf4a','#4daf4a','#e41a1c','#dede00','#ff7f00'], xticks=range(1,16), s=6*Top15['2014']/10**10, alpha=.75, figsize=[16,6]); for i, txt in enumerate(Top15.index): ax.annotate(txt, [Top15['Rank'][i], Top15['% Renewable'][i]], ha='center') print("This is an example of a visualization that can be created to help understand the data. \ This is a bubble chart showing % Renewable vs. Rank. The size of the bubble corresponds to the countries' \ 2014 GDP, and the color corresponds to the continent.") # + #plot_optional() # Be sure to comment out plot_optional() before submitting the assignment!
1_introduction/w3_advanced_pandas/4_assignment (ipynb)/Assignment 3 Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib import matplotlib.pyplot as plt import numpy X = numpy.load('Dataset/data_0inches.npy') y = range(100) # print(len(X)) # print(len(y)) # y = [2,4,6,8,10,12,14,16,18,20] # X = numpy.arange(10) # fig = plt.figure() # # ax = plt.subplot(111) # ax.plot(y, X[0][0:5]) # plt.title('Data') # ax.legend() # plt.show() for i in range(3): fig, ax = plt.subplots() ax.set_title('Data for observation #{0}'.format(i+1)) ax.plot(y, X[i][0:100]) plt.show() # fig.savefig('plots/plot.png')
Plot_testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # [Polytropic TOV](https://en.wikipedia.org/wiki/Tolman%E2%80%93Oppenheimer%E2%80%93Volkoff_equation) Initial Data # # ## Authors: <NAME>, <NAME>, & <NAME> # ### Formatting improvements courtesy <NAME> # # ## This module sets up initial data for a [TOV](https://en.wikipedia.org/wiki/Tolman%E2%80%93Oppenheimer%E2%80%93Volkoff_equation) star in *spherical, isotropic coordinates* # # **Notebook Status:** <font color='green'><b> Validated </b></font> # # **Validation Notes:** This module has been validated to exhibit convergence to zero of the Hamiltonian constraint violation at the expected order to the exact solution (see [start-to-finish TOV module](Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_TOV_initial_data.ipynb) for full test). Note that convergence at the surface of the star is lower order due to the sharp drop to zero in $T^{\mu\nu}$. # # ### NRPy+ Source Code for this module: [TOV/TOV_Solver.py](../edit/TOV/TOV_Solver.py) # # [comment]: <> (Introduction: TODO) # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This notebook is organized as follows: # # 1. [Step 1](#initializenrpy): Initialize core Python/NRPy+ modules # 1. [Step 2](#tov): The TOV Equations # 1. [Step 3](#code_validation): Code Validation against `TOV.TOV_Solver` NRPy+ module # 1. [Step 4](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file # <a id='initializenrpy'></a> # # # Step 1: Initialize core Python/NRPy+ modules \[Back to [top](#toc)\] # $$\label{initializenrpy}$$ # Step 1: Import needed Python/NRPy+ modules import numpy as np # NumPy: A numerical methods module for Python import scipy.integrate as si # SciPy: Python module for mathematics, science, and engineering applications import math, sys # Standard Python modules for math; multiplatform OS-level functions import TOV.Polytropic_EOSs as ppeos # NRPy+: Piecewise polytrope equation of state support # <a id='tov'></a> # # # Step 2: The TOV equations \[Back to [top](#toc)\] # $$\label{tov}$$ # # The [TOV line element](https://en.wikipedia.org/wiki/Tolman%E2%80%93Oppenheimer%E2%80%93Volkoff_equation) in terms of the *Schwarzschild coordinate* $r$ is written (in the $-+++$ form): # $$ # ds^2 = - c^2 e^\nu dt^2 + \left(1 - \frac{2Gm}{rc^2}\right)^{-1} dr^2 + r^2 d\Omega^2, # $$ # where $m(r)$ is the mass-energy enclosed at a given $r$, and is equal to the total star's mass outside the stellar radius $r=R$. # # In terms of the *isotropic coordinate* $\bar{r}$ with $G=c=1$ (i.e., the coordinate system and units we'd prefer to use), the ($-+++$ form) line element is written: # $$ # ds^2 = - e^{\nu} dt^2 + e^{4\phi} \left(d\bar{r}^2 + \bar{r}^2 d\Omega^2\right), # $$ # where $\phi$ here is the *conformal factor*. # # Setting components of the above line element equal to one another, we get (in $G=c=1$ units): # # \begin{align} # r^2 &= e^{4\phi} \bar{r}^2 \implies e^{4\phi} = \frac{r^2}{\bar{r}^2} \\ # \left(1 - \frac{2m}{r}\right)^{-1} dr^2 &= e^{4\phi} d\bar{r}^2 \\ # \implies \frac{d\bar{r}(r)}{dr} &= \left(1 - \frac{2m}{r} \right)^{-1/2} \frac{\bar{r}(r)}{r}. # \end{align} # # The TOV equations provide radial ODEs for the pressure and $\nu$ (from [the Wikipedia article on the TOV solution](https://en.wikipedia.org/wiki/Tolman%E2%80%93Oppenheimer%E2%80%93Volkoff_equation)): # # \begin{align} # \frac{dP}{dr} &= - \frac{1}{r} \left( \frac{\rho + P}{2} \right) \left(\frac{2 m}{r} + 8 \pi r^2 P\right) \left(1 - \frac{2 m}{r}\right)^{-1} \\ # \frac{d \nu}{d r} &= \frac{1}{r}\left(1 - \frac{2 m}{r}\right)^{-1} \left(\frac{2 m}{r} + 8 \pi r^2 P\right) \\ # \end{align} # # Assuming a polytropic equation of state, which relates the pressure $P$ to the baryonic rest-mass density $\rho_B$, # # $$ # P(\rho_B) = K \rho_B^\Gamma, # $$ # the specific internal energy will be given by # $$ # \epsilon = \frac{P}{\rho_B (\Gamma - 1)}, # $$ # # so the total mass-energy density $\rho$ is given by # $$ # \rho = \rho_B (1 + \epsilon). # $$ # # Given this, the mass-energy $m(r)$ density is the solution to the ODE: # $$ # \frac{dm(r)}{dr} = 4\pi r^2 \rho(r) # $$ # # Thus the full set of ODEs that need to be solved is given by # # $$ # \boxed{ # \begin{matrix} # \frac{dP}{dr} &=& - \frac{1}{r} \left( \frac{\rho + P}{2} \right) \left(\frac{2 m}{r} + 8 \pi r^2 P\right) \left(1 - \frac{2 m}{r}\right)^{-1} \\ # \frac{d \nu}{d r} &=& \frac{1}{r}\left(1 - \frac{2 m}{r}\right)^{-1} \left(\frac{2 m}{r} + 8 \pi r^2 P\right) \\ # \frac{m(r)}{dr} &=& 4\pi r^2 \rho(r) \\ # \frac{d\bar{r}(r)}{dr} &=& \left(1 - \frac{2m}{r} \right)^{-1/2} \frac{\bar{r}(r)}{r} # \end{matrix} # }\ . # $$ # # The following code solves these equations, and was largely written by <NAME>. # + # Step 2: The TOV equations ## TOV SOLVER FOR SINGLE AND PIECEWISE POLYTROPES ## Authors: <NAME>, <NAME>, <NAME> # Full documentation for this module may be found in the NRPy+ tutorial Jupyter notebook: # Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_TOV_initial_data.ipynb # Inputs: # * Output data file name # * rho_baryon_central, the central density of the TOV star. # * n, the polytropic equation of state index. n=1 models cold, degenerate neutron star matter. # * K_Polytrope, the polytropic constant. # * Verbose output toggle (default = True) # Output: An initial data file (default file name = "outputTOVpolytrope.txt") that well # samples the (spherically symmetric) solution both inside and outside the star. # It is up to the initial data module to perform the 1D interpolation to generate # the solution at arbitrary radius. The file has the following columns: # Column 1: Schwarzschild radius # Column 2: rho(r), *total* mass-energy density (as opposed to baryonic rest-mass density) # Column 3: P(r), Pressure # Column 4: m(r), mass enclosed # Column 5: e^{nu(r)}, g_{tt}(r) # Column 6: e^{4 phi(r)}, conformal factor g_{rr}(r) # Column 7: rbar(r), Isotropic radius # rbar refers to the isotropic radius, and # R_Schw refers to the Schwarzschild radius def TOV_Solver(eos, outfile = "outputTOVpolytrope.txt", rho_baryon_central = 0.129285, verbose = True, return_M_and_RSchw = False, accuracy = "medium", integrator_type = "default", no_output_File = False, pressure_renormalization=1): # reset the pressure to stellar oscillations studies def TOV_rhs(r_Schw, y) : # In \tilde units # P = y[0] m = y[1] # nu = y[2] # nu is not needed as input into TOV_rhs rbar = y[3] # Compute rho_b and eps_cold, to be used below # to compute rho_(total) rho_baryon, eps_cold = ppeos.Polytrope_EOS__compute_rhob_and_eps_cold_from_P_cold(eos,P) # with open("rhob_P_cold_and_eps_cold.dat","a+") as file: # file.write(str(r_Schw).format("%.15e")+" "+str(rho_baryon).format("%.15e")+" "+str(P).format("%.15e")+" "+str(eps_cold).format("%.15e")+"\n") # Compute rho, the *total* mass-energy density: # .------------------------------. # | rho = (1 + eps)*rho_(baryon) | # .------------------------------. # with eps = eps_cold, for the initial data. rho = (1.0 + eps_cold)*rho_baryon # m = 4*math.pi/3. * rho*r_Schw**3 if( r_Schw < 1e-4 or m <= 0.): # From https://github.com/natj/tov/blob/master/tov.py#L33: # dPdr = -cgs.G*(eden + P/cgs.c**2)*(m + 4.0*pi*r**3*P/cgs.c**2) # dPdr = dPdr/(r*(r - 2.0*cgs.G*m/cgs.c**2)) dPdrSchw = -(rho + P)*(4.*math.pi/3.*r_Schw*rho + 4.*math.pi*r_Schw*P)/(1.-8.*math.pi*rho*r_Schw*r_Schw) drbardrSchw = 1./(1. - 8.*math.pi*rho*r_Schw*r_Schw)**0.5 else: dPdrSchw = -(rho + P)*(m + 4.*math.pi*r_Schw**3*P)/(r_Schw*r_Schw*(1.-2.*m/r_Schw)) drbardrSchw = 1./(1. - 2.*m/r_Schw)**0.5*rbar/r_Schw dmdrSchw = 4.*math.pi*r_Schw*r_Schw*rho dnudrSchw = -2./(P + rho)*dPdrSchw return [dPdrSchw, dmdrSchw, dnudrSchw, drbardrSchw] def integrateStar( eos, P, dumpData = False ): if accuracy == "medium": min_step_size = 1e-5 max_step_size = 1e-2 integrator = 'dop853' elif accuracy == "low": min_step_size = 1e-3 max_step_size = 1e-1 integrator = 'dopri5' elif accuracy == "verylow": min_step_size = 1e-1 max_step_size = 5e-1 integrator = 'dopri5' elif accuracy == "high": min_step_size = 1e-5 max_step_size = 1e-5 integrator = 'dop853' elif accuracy == "veryhigh": min_step_size = 1e-7 max_step_size = 1e-6 integrator = 'dop853' else: print("Unknown accuracy option: "+str(accuracy)) if integrator_type == "default": pass else: integrator = integrator_type integrator = si.ode(TOV_rhs).set_integrator(integrator)#,rtol=1e-4,atol=1e-4) # integrator = si.ode(TOV_rhs).set_integrator('dopri5',rtol=1e-4) y0 = [P, 0., 0., 0.] r_Schw = 0. integrator.set_initial_value(y0,r_Schw) dr_Schw = min_step_size P = y0[0] PArr = [] r_SchwArr = [] mArr = [] nuArr = [] rbarArr = [] while integrator.successful() and P > 1e-19*y0[0] : P, m, nu, rbar = integrator.integrate(r_Schw + dr_Schw) # Update the value of r_Schw to the latest integrated value r_Schw += dr_Schw dPdrSchw, dmdrSchw, dnudrSchw, drbardrSchw = TOV_rhs( r_Schw, [P,m,nu,rbar]) dr_Schw = 0.1*min(abs(P/dPdrSchw), abs(m/dmdrSchw)) dr_Schw = min(dr_Schw, max_step_size) PArr.append(P) r_SchwArr.append(r_Schw) mArr.append(m) nuArr.append(nu) rbarArr.append(rbar) M = mArr[-1] R_Schw = r_SchwArr[-1] if no_output_File == True: return R_Schw, M # Apply integration constant to ensure rbar is continuous across TOV surface for ii in range(len(rbarArr)): rbarArr[ii] *= 0.5*(np.sqrt(R_Schw*(R_Schw - 2.0*M)) + R_Schw - M) / rbarArr[-1] nuArr_np = np.array(nuArr) # Rescale solution to nu so that it satisfies BC: exp(nu(R))=exp(nutilde-nu(r=R)) * (1 - 2m(R)/R) # Thus, nu(R) = (nutilde - nu(r=R)) + log(1 - 2*m(R)/R) nuArr_np = nuArr_np - nuArr_np[-1] + math.log(1.-2.*mArr[-1]/r_SchwArr[-1]) r_SchwArrExtend_np = 10.**(np.arange(0.01,5.0,0.01))*r_SchwArr[-1] r_SchwArr.extend(r_SchwArrExtend_np) mArr.extend(r_SchwArrExtend_np*0. + M) PArr.extend(r_SchwArrExtend_np*0.) exp2phiArr_np = np.append( np.exp(nuArr_np), 1. - 2.*M/r_SchwArrExtend_np) nuArr.extend(np.log(1. - 2.*M/r_SchwArrExtend_np)) rbarArr.extend( 0.5*(np.sqrt(r_SchwArrExtend_np**2 - 2.*M*r_SchwArrExtend_np) + r_SchwArrExtend_np - M) ) #phiArr_np = np.append( np.exp(nuArr_np), 1. - 2.*M/r_SchwArrExtend_np) # Appending to a Python array does what one would reasonably expect. # Appending to a numpy array allocates space for a new array with size+1, # then copies the data over... over and over... super inefficient. r_SchwArr_np = np.array(r_SchwArr) PArr_np = np.array(PArr) rho_baryonArr_np = np.array(PArr) # This is just to initialize the array for j in range(len(PArr_np)): # Compute rho_b from P rho_baryonArr_np[j] = ppeos.Polytrope_EOS__compute_rhob_from_P_cold(eos,PArr_np[j]) mArr_np = np.array(mArr) rbarArr_np = np.array(rbarArr) confFactor_exp4phi_np = (r_SchwArr_np/rbarArr_np)**2 # Compute the *total* mass-energy density (as opposed to the *baryonic* mass density) rhoArr_np = [] for i in range(len(PArr)): rho_baryon, eps_cold = ppeos.Polytrope_EOS__compute_rhob_and_eps_cold_from_P_cold(eos,PArr[i]) rho = (1.0 + eps_cold ) * rho_baryon rhoArr_np.append(rho) if verbose: print(len(r_SchwArr_np),len(rhoArr_np),len(rho_baryonArr_np),len(PArr_np),len(mArr_np),len(exp2phiArr_np)) PArr_np *= pressure_renormalization # set for pressure renormalization studies # Special thanks to <NAME> for pointing out this issue with zip() if sys.version_info[0] < 3: np.savetxt(outfile, zip(r_SchwArr_np,rhoArr_np,rho_baryonArr_np,PArr_np,mArr_np,exp2phiArr_np,confFactor_exp4phi_np,rbarArr_np), fmt="%.15e") else: np.savetxt(outfile, list(zip(r_SchwArr_np,rhoArr_np,rho_baryonArr_np,PArr_np,mArr_np,exp2phiArr_np,confFactor_exp4phi_np,rbarArr_np)), fmt="%.15e") return R_Schw, M # Set initial condition from rho_baryon_central P_initial_condition = ppeos.Polytrope_EOS__compute_P_cold_from_rhob(eos, rho_baryon_central) # Integrate the initial condition R_Schw_TOV, M_TOV = integrateStar(eos, P_initial_condition, True) if verbose: print("Just generated a TOV star with R_Schw = %.15e , M = %.15e , M/R_Schw = %.15e ." %(R_Schw_TOV,M_TOV,(M_TOV / R_Schw_TOV))) if return_M_and_RSchw: return M_TOV, R_Schw_TOV ############################ # Single polytrope example # ############################ # Set neos = 1 (single polytrope) neos = 1 # Set rho_poly_tab (not needed for a single polytrope) rho_poly_tab = [] # Set Gamma_poly_tab Gamma_poly_tab = [2.0] # Set K_poly_tab0 K_poly_tab0 = 1. # ZACH NOTES: CHANGED FROM 100. # Set the eos quantities eos = ppeos.set_up_EOS_parameters__complete_set_of_input_variables(neos,rho_poly_tab,Gamma_poly_tab,K_poly_tab0) # Set initial condition (Pressure computed from central density) rho_baryon_central = 0.129285 M_TOV, R_Schw_TOV = TOV_Solver(eos,outfile="outputTOVpolytrope.txt",rho_baryon_central=0.129285,verbose = True, return_M_and_RSchw=True,accuracy="medium",integrator_type="default", pressure_renormalization=1.0) # - # <a id='code_validation'></a> # # # Step 3: Code Validation \[Back to [top](#toc)\] # $$\label{code_validation}$$ # # Here, as a code validation check, we verify agreement in the SymPy expressions for these TOV initial data between # # 1. this tutorial and # 2. the NRPy+ [TOV.TOV_Solver](../edit/TOV/TOV_Solver.py) module. # + # Step 3: Code Validation against TOV.TOV_Solver module import filecmp import TOV.TOV_Solver as TOV TOV.TOV_Solver(eos, outfile="outputTOVpolytrope-validation.txt", rho_baryon_central=0.129285, verbose = True, accuracy="medium", integrator_type="default", no_output_File = False) if filecmp.cmp('outputTOVpolytrope.txt', 'outputTOVpolytrope-validation.txt') == False: print("ERROR: TOV initial data test FAILED!") sys.exit(1) else: print("TOV initial data test PASSED.") # - # <a id='latex_pdf_output'></a> # # # Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-ADM_Initial_Data-TOV](Tutorial-ADM_Initial_Data-TOV.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ADM_Initial_Data-TOV")
Tutorial-ADM_Initial_Data-TOV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="dUeKVCYTbcyT" # #### Copyright 2019 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="4ellrPx7tdxq" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="7JfLUlawto_D" # # Classification on imbalanced data # + [markdown] colab_type="text" id="DwdpaTKJOoPu" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/beta/tutorials/keras/imbalanced_data"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/keras/imbalanced_data.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/keras/imbalanced_data.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/tutorials/keras/imbalanced_data.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="mthoSGBAOoX-" # This tutorial demonstrates how to classify a highly imbalanced dataset in which the number of examples in one class greatly outnumbers the examples in another. You will work with the [Credit Card Fraud Detection](https://www.kaggle.com/mlg-ulb/creditcardfraud) dataset hosted on Kaggle. The aim is to detect a mere 492 fraudulent transactions from 284,807 transactions in total. You will use [Keras](https://www.tensorflow.org/beta/guide/keras/overview) to define the model and [class weights](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model) to help the model learn from the imbalanced data. You will display metrics for precision, recall, true positives, false positives, true negatives, false negatives, and AUC while training the model. These are more informative than accuracy when working with imbalanced datasets classification. # # This tutorial contains complete code to: # # * Load a CSV file using Pandas. # * Create train, validation, and test sets. # * Define and train a model using Keras (including setting class weights). # * Evaluate the model using various metrics (including precision and recall). # + [markdown] colab_type="text" id="kRHmSyHxEIhN" # ## Import TensorFlow and other libraries # + colab={} colab_type="code" id="yJHVo_K_v20i" from __future__ import absolute_import, division, print_function, unicode_literals # + colab={} colab_type="code" id="fYBlUQ5FvzxP" try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x except Exception: pass # + colab={} colab_type="code" id="jZk7QMofhnk_" # !pip install imblearn # + colab={} colab_type="code" id="JM7hDSNClfoK" import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from imblearn.over_sampling import SMOTE # + [markdown] colab_type="text" id="4sA9WOcmzH2D" # ## Use Pandas to get the Kaggle Credit Card Fraud data set # # Pandas is a Python library with many helpful utilities for loading and working with structured data and can be used to download CSVs into a dataframe. # # Note: This dataset has been collected and analysed during a research collaboration of Worldline and the [Machine Learning Group](http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available [here](https://www.researchgate.net/project/Fraud-detection-5) and the page of the [DefeatFraud](https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/) project # + colab={} colab_type="code" id="pR_SnbMArXr7" raw_df = pd.read_csv('https://storage.googleapis.com/download.tensorflow.org/data/creditcard.csv') raw_df.head() # + [markdown] colab_type="text" id="6qox6ryyzwdr" # ## Split the dataframe into train, validation, and test # # Split the dataset into train, validation, and test sets. The validation set is used during the model fitting to evaluate the loss and any metrics, however the model is not fit with this data. The test set is completely unused during the training phase and is only used at the end to evaluate how well the model generalizes to new data. This is especially important with imbalanced datasets where [overfitting](https://developers.google.com/machine-learning/crash-course/generalization/peril-of-overfitting) is a significant concern from the lack of training data. # + colab={} colab_type="code" id="IO-qEUmJ5JQg" # Use a utility from sklearn to split and shuffle our dataset. train_df, test_df = train_test_split(raw_df, test_size=0.2) train_df, val_df = train_test_split(train_df, test_size=0.2) # Form np arrays of labels and features. train_labels = np.array(train_df.pop('Class')) val_labels = np.array(val_df.pop('Class')) test_labels = np.array(test_df.pop('Class')) train_features = np.array(train_df) val_features = np.array(val_df) test_features = np.array(test_df) # Normalize the input features using the sklearn StandardScaler. # This will set the mean to 0 and standard deviation to 1. scaler = StandardScaler() train_features = scaler.fit_transform(train_features) val_features = scaler.transform(val_features) test_features = scaler.transform(test_features) print('Training labels shape:', train_labels.shape) print('Validation labels shape:', val_labels.shape) print('Test labels shape:', test_labels.shape) print('Training features shape:', train_features.shape) print('Validation features shape:', val_features.shape) print('Test features shape:', test_features.shape) # + [markdown] colab_type="text" id="xWKB_CVZFLpB" # ## Examine the class label imbalance # # Let's look at the dataset imbalance: # + colab={} colab_type="code" id="HCJFrtuY2iLF" neg, pos = np.bincount(train_labels) total = neg + pos print('{} positive samples out of {} training samples ({:.2f}% of total)'.format( pos, total, 100 * pos / total)) # + [markdown] colab_type="text" id="KnLKFQDsCBUg" # This shows a small fraction of positive samples. # + [markdown] colab_type="text" id="qFK1u4JX16D8" # ## Define the model and metrics # # Define a function that creates a simple neural network with three densely connected hidden layers, an output sigmoid layer that returns the probability of a transaction being fraudulent, and two [dropout](https://developers.google.com/machine-learning/glossary/#dropout_regularization) layers as an effective way to reduce overfitting. # + colab={} colab_type="code" id="3JQDzUqT3UYG" def make_model(): model = keras.Sequential([ keras.layers.Dense(256, activation='relu', input_shape=(train_features.shape[-1],)), keras.layers.Dense(256, activation='relu'), keras.layers.Dropout(0.3), keras.layers.Dense(256, activation='relu'), keras.layers.Dropout(0.3), keras.layers.Dense(1, activation='sigmoid'), ]) metrics = [ keras.metrics.Accuracy(name='accuracy'), keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='auc') ] model.compile( optimizer='adam', loss='binary_crossentropy', metrics=metrics) return model # + [markdown] colab_type="text" id="SU0GX6E6mieP" # ## Understanding useful metrics # # Notice that there are a few metrics defined above that can be computed by the model that will be helpful when evaluating the performance. # # # # * **False** negatives and **false** positives are samples that were **incorrectly** classified # * **True** negatives and **true** positives are samples that were **correctly** classified # * **Accuracy** is the percentage of examples correctly classified # > $\frac{\text{true samples}}{\text{total samples}}$ # * **Precision** is the percentage of **predicted** positives that were correctly classified # > $\frac{\text{true positives}}{\text{true positives + false positives}}$ # * **Recall** is the percentage of **actual** positives that were correctly classified # > $\frac{\text{true positives}}{\text{true positives + false negatives}}$ # * **AUC** refers to the Area Under the Curve of a Receiver Operating Characteristic curve (ROC-AUC). This metric is equal to the probability that a classifier will rank a random positive sample higher than than a random negative sample. # # <br> # # Read more: # * [True vs. False and Positive vs. Negative](https://developers.google.com/machine-learning/crash-course/classification/true-false-positive-negative) # * [Accuracy](https://developers.google.com/machine-learning/crash-course/classification/accuracy) # * [Precision and Recall](https://developers.google.com/machine-learning/crash-course/classification/precision-and-recall) # * [ROC-AUC](https://developers.google.com/machine-learning/crash-course/classification/roc-and-auc) # + [markdown] colab_type="text" id="IDbltVPg2m2q" # ## Train a baseline model # # Now create and train your model using the function that was defined earlier. Notice that the model is fit using a larger than default batch size of 2048, this is important to ensure that each batch has a decent chance of containing a few positive samples. If the batch size was too small, they would likely have no fraudelent transactions to learn from. # # # Note: this model will not handle the class imbalance well. You will improve it later in this tutorial. # + colab={} colab_type="code" id="yZKAc8NCDnoR" model = make_model() EPOCHS = 10 BATCH_SIZE = 2048 history = model.fit( train_features, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(val_features, val_labels)) # + [markdown] colab_type="text" id="iSaDBYU9xtP6" # ## Plot metrics on the training and validation sets # In this section, you will produce plots of your model's accuracy and loss on the training and validation set. These are useful to check for overfitting, which you can learn more about in this [tutorial](https://www.tensorflow.org/beta/tutorials/keras/overfit_and_underfit). # # Additionally, you can produce these plots for any of the metrics you created above. False negatives are included as an example. # + colab={} colab_type="code" id="WTSkhT1jyGu6" epochs = range(EPOCHS) plt.title('Accuracy') plt.plot(epochs, history.history['accuracy'], color='blue', label='Train') plt.plot(epochs, history.history['val_accuracy'], color='orange', label='Val') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend() _ = plt.figure() plt.title('Loss') plt.plot(epochs, history.history['loss'], color='blue', label='Train') plt.plot(epochs, history.history['val_loss'], color='orange', label='Val') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() _ = plt.figure() plt.title('False Negatives') plt.plot(epochs, history.history['fn'], color='blue', label='Train') plt.plot(epochs, history.history['val_fn'], color='orange', label='Val') plt.xlabel('Epoch') plt.ylabel('False Negatives') plt.legend() # + [markdown] colab_type="text" id="oyOhKsc0yYxg" # ## Evaluate the baseline model # # Evaluate your model on the test dataset and display results for the metrics you created above. # + colab={} colab_type="code" id="rPfIaXn3Jr6B" results = model.evaluate(test_features, test_labels) for name, value in zip(model.metrics_names, results): print(name, ': ', value) # + [markdown] colab_type="text" id="gpdsFyp64DhY" # It looks like the precision is relatively high, but the recall and AUC aren't as high as you might like. Classifiers often face challenges when trying to maximize both precision and recall, which is especially true when working with imbalanced datasets. However, because missing fraudulent transactions (false negatives) may have significantly worse business consequences than incorrectly flagging fraudulent transactions (false positives), recall may be more important than precision in this case. # + [markdown] colab_type="text" id="aJC1booryouo" # ## Examine the confusion matrix # # You can use a [confusion matrix](https://developers.google.com/machine-learning/glossary/#confusion_matrix) to summarize the actual vs. predicted labels where the X axis is the predicted label and the Y axis is the actual label. # + colab={} colab_type="code" id="poh_hZngt2_9" predicted_labels = model.predict(test_features) cm = confusion_matrix(test_labels, np.round(predicted_labels)) plt.matshow(cm, alpha=0) plt.title('Confusion matrix') plt.ylabel('Actual label') plt.xlabel('Predicted label') for (i, j), z in np.ndenumerate(cm): plt.text(j, i, str(z), ha='center', va='center') plt.show() print('Legitimate Transactions Detected (True Negatives): ', cm[0][0]) print('Legitimate Transactions Incorrectly Detected (False Positives): ', cm[0][1]) print('Fraudulent Transactions Missed (False Negatives): ', cm[1][0]) print('Fraudulent Transactions Detected (True Positives): ', cm[1][1]) print('Total Fraudulent Transactions: ', np.sum(cm[1])) # + [markdown] colab_type="text" id="PyZtSr1v6L4t" # If the model had predicted everything perfectly, this would be a [diagonal matrix](https://en.wikipedia.org/wiki/Diagonal_matrix) where values off the main diagonal, indicating incorrect predictions, would be zero. In this case the matrix shows that you have relatively few false positives, meaning that there were relatively few legitimate transactions that were incorrectly flagged. However, you would likely want to have even fewer false negatives despite the cost of increasing the number of false positives. This trade off may be preferable because false negatives would allow fraudulent transactions to go through, whereas false positives may cause an email to be sent to a customer to ask them to verify their card activity. # + [markdown] colab_type="text" id="ePGp6GUE1WfH" # ## Using class weights for the loss function # # The goal is to identify fradulent transactions, but you don't have very many of those positive samples to work with, so you would want to have the classifier heavily weight the few examples that are available. You can do this by passing Keras weights for each class through a parameter. These will cause the model to "pay more attention" to examples from an under-represented class. # + colab={} colab_type="code" id="qjGWErngGny7" weight_for_0 = 1 / neg weight_for_1 = 1 / pos class_weight = {0: weight_for_0, 1: weight_for_1} print('Weight for class 0: {:.2e}'.format(weight_for_0)) print('Weight for class 1: {:.2e}'.format(weight_for_1)) # + [markdown] colab_type="text" id="Mk1OOE2ZSHzy" # ## Train a model with class weights # # Now try re-training and evaluating the model with class weights to see how that affects the predictions. # # Note: Using `class_weights` changes the range of the loss. This may affect the stability of the training depending on the optimizer. Optimizers who's step size is dependent on the magnitude of the gradient, like `optimizers.SGD`, may fail. The optimizer used here, `optimizers.Adam`, is unaffected by the scaling change. Also note that because of the weighting, the total losses are not comparable between the two models. # + colab={} colab_type="code" id="UJ589fn8ST3x" weighted_model = make_model() weighted_history = weighted_model.fit( train_features, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(val_features, val_labels), class_weight=class_weight) # + colab={} colab_type="code" id="owKL2vdMBJr6" weighted_results = weighted_model.evaluate(test_features, test_labels) for name, value in zip(weighted_model.metrics_names, weighted_results): print(name, ': ', value) # + [markdown] colab_type="text" id="PTh1rtDn8r4-" # Here you can see that with class weights the accuracy and precision are lower because there are more false positives, but conversely the recall and AUC are higher because the model also found more true positives. Despite having lower overall accuracy, this approach may be better when considering the consequences of failing to identify fraudulent transactions driving the prioritization of recall. Depending on how bad false negatives are, you might use even more exaggerated weights to further improve recall while dropping precision. # + [markdown] colab_type="text" id="18VUHNc-UF5w" # ## Oversampling the minority class # # A related approach would be to resample the dataset by oversampling the minority class, which is the process of creating more positive samples using something like sklearn's [imbalanced-learn library](https://github.com/scikit-learn-contrib/imbalanced-learn). This library provides methods to create new positive samples by simply duplicating random existing samples, or by interpolating between them to generate synthetic samples using variations of [SMOTE](https://en.wikipedia.org/wiki/Oversampling_and_undersampling_in_data_analysis#Oversampling_techniques_for_classification_problems). TensorFlow also provides a way to do [Random Oversampling](https://www.tensorflow.org/api_docs/python/tf/data/experimental/sample_from_datasets). # + colab={} colab_type="code" id="jOlgyG1D6kCU" # with default args this will oversample the minority class to have an equal # number of observations smote = SMOTE() res_features, res_labels = smote.fit_sample(train_features, train_labels) res_neg, res_pos = np.bincount(res_labels) res_total = res_neg + res_pos print('{} positive samples out of {} training samples ({:.2f}% of total)'.format( res_pos, res_total, 100 * res_pos / res_total)) # + [markdown] colab_type="text" id="XZ1BvEpcBVHP" # ## Train and evaluate a model on the resampled data # # Now try training the model with the resampled data set instead of using class weights to see how these methods compare. # + colab={} colab_type="code" id="7Hz_-DuLA6Yd" resampled_model = make_model() resampled_history = resampled_model.fit( res_features, res_labels, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(val_features, val_labels)) # + colab={} colab_type="code" id="FO0mMOYUDWFk" resampled_results = resampled_model.evaluate(test_features, test_labels) for name, value in zip(resampled_model.metrics_names, resampled_results): print(name, ': ', value) # + [markdown] colab_type="text" id="w3CDoej5GOui" # This approach can be worth trying, but may not provide better results than using class weights because the synthetic examples may not accurately represent the underlying data. # + [markdown] colab_type="text" id="3o3f0ywl8uqW" # ## Applying this tutorial to your problem # # Imbalanced data classification is an inherantly difficult task since there are so few samples to learn from. You should always start with the data first and do your best to collect as many samples as possible and give substantial thought to what features may be relevant so the model can get the most out of your minority class. At some point your model may struggle to improve and yield the results you want, so it is important to keep in mind the context of the problem to evaluate how bad your false positives or negatives really are.
site/en/r2/tutorials/keras/imbalanced_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="No1KjCLfBjb6" colab_type="code" colab={} # #!pip install datadotworld # #!pip install datadotworld[pandas] # + id="zs5oq_p9CP87" colab_type="code" colab={} # #!dw configure # + id="S-malQQ8BX9g" colab_type="code" colab={} from google.colab import drive import pandas as pd import numpy as np import datadotworld as dw # + id="fBgDo6wMCoqw" colab_type="code" colab={} #drive.mount("/content/drive") # + id="BR-Wb5wBCvIo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a5129251-1664-4b3e-a74c-bad0b7c88560" executionInfo={"status": "ok", "timestamp": 1581538698892, "user_tz": -60, "elapsed": 2919, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} # ls # + id="U6OOLFWSDVjL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e4725a56-c18b-4ff4-acd2-041b0b799516" executionInfo={"status": "ok", "timestamp": 1581538914901, "user_tz": -60, "elapsed": 749, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} # cd dw_matrix # + id="l5kRBC4SD2yN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3a165364-a5e5-4bd5-910d-2f9c2aa851a5" executionInfo={"status": "ok", "timestamp": 1581539105091, "user_tz": -60, "elapsed": 2539, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} # ls # + id="mg76xwqAEQOU" colab_type="code" colab={} # !mkdir data # + id="Bk20KniXFBqV" colab_type="code" colab={} # !echo 'data' > .gitignore # + id="e6N9POEaFhNp" colab_type="code" colab={} # !git add .gitignore # + id="eHcaA0RAFlPw" colab_type="code" colab={} data = dw.load_dataset('datafiniti/mens-shoe-prices') # + id="EN2qN5dNHEVO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="865e7460-210a-4901-e987-4923064501f4" executionInfo={"status": "ok", "timestamp": 1581540510851, "user_tz": -60, "elapsed": 1839, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} df = data.dataframes['7004_1'] df.shape # + id="SHIzZ66XHPgE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 615} outputId="e0597d1f-c0ee-41a8-eb50-29adfb8de069" executionInfo={"status": "ok", "timestamp": 1581540514151, "user_tz": -60, "elapsed": 1162, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} df.sample(5) # + id="Jrba7UPJHzba" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 241} outputId="c19459a7-9cd4-4e17-bb12-497fcefa727a" executionInfo={"status": "ok", "timestamp": 1581540517862, "user_tz": -60, "elapsed": 917, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} df.columns # + id="HLivUcSOH5lu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="e1e665fa-b935-450d-e6c9-fd2c5746b548" executionInfo={"status": "ok", "timestamp": 1581540531788, "user_tz": -60, "elapsed": 3938, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} df.prices_currency.unique() # + id="booOvbNdIM6s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="d2388944-1a46-46a0-8e4a-97f1b7db7c69" executionInfo={"status": "ok", "timestamp": 1581540536027, "user_tz": -60, "elapsed": 998, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} df.prices_currency.value_counts() # + id="NjPJCf50KYi-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="b24ce506-92b3-4b60-c2f4-35ae790871ac" executionInfo={"status": "ok", "timestamp": 1581540550887, "user_tz": -60, "elapsed": 917, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} df.prices_currency.value_counts(normalize=True) # + id="shzGMCU0H-zw" colab_type="code" colab={} df_usd = df[df['prices_currency']=='USD'].copy() # + id="C034NdQ-IEuW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3d952744-e798-46cb-ef71-8faf96806a82" executionInfo={"status": "ok", "timestamp": 1581540634841, "user_tz": -60, "elapsed": 1167, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} df_usd.shape # + id="rdEGFWqSIGBF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="d598316f-1dbb-44b3-a6a4-f4999573fe28" executionInfo={"status": "ok", "timestamp": 1581540695302, "user_tz": -60, "elapsed": 732, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} df_usd.prices_amountmin.head() # + id="hRzWgBXcK9nU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="54f8249d-fdc9-43e1-e657-6aac790afc15" executionInfo={"status": "ok", "timestamp": 1581541716862, "user_tz": -60, "elapsed": 917, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float) df_usd['prices_amountmin'].hist() # + id="0u8SGNCgLF6E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e4755a94-ea3d-413c-fbee-d4c65c9b126f" executionInfo={"status": "ok", "timestamp": 1581541832928, "user_tz": -60, "elapsed": 658, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} # jak widać są wartości odstające, a więc należałoby je usunąć filter_max = np.percentile(df_usd['prices_amountmin'],99) filter_max # oznacz to, że 99% butów kosztule 895$ lub mniej # + id="5QrETCZTPM6Q" colab_type="code" colab={} df_usd_filter = df_usd[df_usd['prices_amountmin'] < filter_max] # + id="7aZnzM0DPsMG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="459e7e79-19f4-4237-d862-e8d61fe4bf0b" executionInfo={"status": "ok", "timestamp": 1581541948318, "user_tz": -60, "elapsed": 588, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} df_usd_filter.prices_amountmin.hist() # + id="x2JZbtgvPvkB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="89ed2aeb-867f-4892-a0b3-aedd6ea5ab16" executionInfo={"status": "ok", "timestamp": 1581541965832, "user_tz": -60, "elapsed": 966, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00972280872958272022"}} df_usd_filter.prices_amountmin.hist(bins=100) #bins - zagęszcza przedziały cenowe - w tym przypadku krop 895/100 = 8.95 = 9 # + id="Xa4aCJ_HPzvq" colab_type="code" colab={}
matrix_one/day3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import pickle import networkx as nx import chexmix.graph as graph from chexmix.utils import remove_symbols # + pycharm={"name": "#%%\n"} ## KPEB data load with open('../data/KPEB/KPEB_name_taxID.json', 'r') as f: kpeb_data = json.load(f) kpeb_data = {int(k): v for k,v in kpeb_data.items()} ##viridiplantae load with open('../data/viridiplanae.txt', 'rb') as f: viridiplantae = pickle.load(f) # - path = '../data/network/' keyword = 'podophyllum peltatum' parent_node_type = graph.TaxParentType.Genus # Genus or Family input_names = ['podophyllum peltatum', 'taxus cuspidata', 'dermal papilla'] # + pycharm={"name": "#%%\n"} biographs_of_keyword = [] # create keyword graph for input_name in input_names: file_name = path + remove_symbols(input_name) pubmed_graph = graph.PubMedGraph.from_keyword(keyword) article_ids = pubmed_graph.get_article_ids() pubtator_graph = graph.PubTatorGraph.from_article_ids(article_ids) tax_graph = graph.TaxonomyGraph.from_pubtator_bioentities(parent_node_type, pubtator_graph.get_bioentities('TAXO'), viridiplantae, 'KPEB', kpeb_data) mesh_graph = graph.MeSHGraph.from_pubtator_bioentities(pubtator_graph.get_bioentities('MESH'), ['D','C']) biograph_of_keyword = graph.BioGraph() biograph_of_keyword.add_edges_from([(keyword, root_node) for root_node in tax_graph.find_roots()]) mesh_entities_graph = mesh_graph.intersection(pubtator_graph) biograph_of_keyword.add_edges_from([(keyword, appeared_node) for appeared_node in mesh_entities_graph.nodes()]) biograph_of_keyword.nodes[keyword]['type'] = "Keyword" biograph_of_keyword.inherit_attr(tax_graph.union(mesh_graph)) nx.write_graphml(biograph_of_keyword, f'{file_name}.graphml') biographs_of_keyword.append(biograph_of_keyword) # + pycharm={"name": "#%%\n"} union_graph = biographs_of_keyword[0] intersect_graph = biographs_of_keyword[0] for g in biographs_of_keyword[1:]: union_graph.union(g) intersect_graph.intersection(g) # + pycharm={"name": "#%%\n"} union_graph.set_attribute('highlight', True, intersect_graph.nodes()) # - # bio graphs for input_name in input_names: file_name = path+remove_symbols(input_name) pubmed_graph = graph.PubMedGraph.from_keyword(keyword) article_ids = pubmed_graph.get_article_ids() pubtator_graph = graph.PubTatorGraph.from_article_ids(article_ids) bioentities = pubtator_graph.get_bioentities('TAXO') tax_graph = graph.TaxonomyGraph.from_pubtator_bioentities(parent_node_type, bioentities, viridiplantae, 'KPEB', kpeb_data) bioentities = pubtator_graph.get_bioentities('MESH') mesh_graph = graph.MeSHGraph.from_pubtator_bioentities(bioentities, ['D','C']) nx.write_graphml(pubmed_graph.to_graphml(), f'{file_name}_pubmed.graphml') nx.write_graphml(pubtator_graph, f'{file_name}_pubtator.graphml') nx.write_graphml(tax_graph, f'{file_name}_taxonomy.graphml') nx.write_graphml(mesh_graph, f'{file_name}_mesh.graphml') # + # threshold threshold = 3 # + pycharm={"name": "#%%\n"} sub_graph = tax_graph.remain_by_edge_types([graph.EdgeType.INCLUDES]) roots = sub_graph.find_roots() selected_roots = [root for root in roots if sub_graph.total_count(nx.descendants(sub_graph, root)) > threshold] sub_graphs = [sub_graph.subgraph_from_root(r) for r in selected_roots] trimmed_graph = nx.compose_all(sub_graphs) nx.write_graphml(trimmed_graph, f'./{input_name}_tax_thres_{threshold}.graphml')
notebooks/create_graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import gpsro_tools # # Import MAM Colocations and ERA-5 all data maps # + mar_06 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2006_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_07 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2007_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_08 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2008_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_09 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2009_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_10 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2010_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_11 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2011_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_12 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2012_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_13 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2013_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_14 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2014_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_15 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2015_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_16 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2016_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_17 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2017_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_18 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2018_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_19 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2019_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_20 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/march_2020_ERA_5_colocated_occultations.npy', allow_pickle=True) mar_combined = np.concatenate([mar_06, mar_07, mar_08, mar_09, mar_10, mar_11, mar_12, mar_13, mar_14, mar_15, mar_16, mar_17, mar_18, mar_19, mar_20]) colocations_df = pd.DataFrame(mar_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp']) colocations_df['Lon'] = colocations_df['Lon'] - 180 colocations_df_upper = colocations_df[colocations_df['Lon'] >= 0] colocations_df_lower = colocations_df[colocations_df['Lon'] < 0] colocations_df_upper['Lon'] = colocations_df_upper['Lon'] - 180 colocations_df_lower['Lon'] = colocations_df_lower['Lon'] + 180 colocations_mar_df = pd.concat([colocations_df_upper, colocations_df_lower]) colocations_mar_df['Year'] = colocations_mar_df['Year'].astype(int) ####################################################################################################### era_5_mar_06_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2006_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_07_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2007_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_08_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2008_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_09_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2009_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_10_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2010_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_11_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2011_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_12_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2012_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_13_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2013_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_14_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2014_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_15_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2015_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_16_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2016_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_17_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2017_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_18_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2018_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_19_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2019_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_mar_20_5x10 = np.load('../../ERA_5_monthly_TLS_maps/march_2020_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_box_mar_combined = np.concatenate([era_5_mar_06_5x10, era_5_mar_07_5x10, era_5_mar_08_5x10, era_5_mar_09_5x10, era_5_mar_10_5x10, era_5_mar_11_5x10, era_5_mar_12_5x10, era_5_mar_13_5x10, era_5_mar_14_5x10, era_5_mar_15_5x10, era_5_mar_16_5x10, era_5_mar_17_5x10, era_5_mar_18_5x10, era_5_mar_19_5x10, era_5_mar_20_5x10]) box_mar_df = pd.DataFrame(era_box_mar_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp']) # + apr_06 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2006_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_07 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2007_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_08 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2008_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_09 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2009_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_10 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2010_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_11 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2011_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_12 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2012_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_13 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2013_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_14 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2014_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_15 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2015_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_16 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2016_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_17 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2017_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_18 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2018_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_19 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2019_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_20 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/april_2020_ERA_5_colocated_occultations.npy', allow_pickle=True) apr_combined = np.concatenate([apr_06, apr_07, apr_08, apr_09, apr_10, apr_11, apr_12, apr_13, apr_14, apr_15, apr_16, apr_17, apr_18, apr_19, apr_20]) colocations_df = pd.DataFrame(apr_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp']) colocations_df['Lon'] = colocations_df['Lon'] - 180 colocations_df_upper = colocations_df[colocations_df['Lon'] >= 0] colocations_df_lower = colocations_df[colocations_df['Lon'] < 0] colocations_df_upper['Lon'] = colocations_df_upper['Lon'] - 180 colocations_df_lower['Lon'] = colocations_df_lower['Lon'] + 180 colocations_apr_df = pd.concat([colocations_df_upper, colocations_df_lower]) colocations_apr_df['Year'] = colocations_apr_df['Year'].astype(int) ####################################################################################################### era_5_apr_06_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2006_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_07_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2007_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_08_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2008_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_09_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2009_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_10_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2010_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_11_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2011_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_12_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2012_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_13_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2013_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_14_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2014_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_15_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2015_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_16_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2016_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_17_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2017_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_18_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2018_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_19_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2019_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_apr_20_5x10 = np.load('../../ERA_5_monthly_TLS_maps/april_2020_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_box_apr_combined = np.concatenate([era_5_apr_06_5x10, era_5_apr_07_5x10, era_5_apr_08_5x10, era_5_apr_09_5x10, era_5_apr_10_5x10, era_5_apr_11_5x10, era_5_apr_12_5x10, era_5_apr_13_5x10, era_5_apr_14_5x10, era_5_apr_15_5x10, era_5_apr_16_5x10, era_5_apr_17_5x10, era_5_apr_18_5x10, era_5_apr_19_5x10, era_5_apr_20_5x10]) box_apr_df = pd.DataFrame(era_box_apr_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp']) # + may_06 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2006_ERA_5_colocated_occultations.npy', allow_pickle=True) may_07 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2007_ERA_5_colocated_occultations.npy', allow_pickle=True) may_08 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2008_ERA_5_colocated_occultations.npy', allow_pickle=True) may_09 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2009_ERA_5_colocated_occultations.npy', allow_pickle=True) may_10 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2010_ERA_5_colocated_occultations.npy', allow_pickle=True) may_11 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2011_ERA_5_colocated_occultations.npy', allow_pickle=True) may_12 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2012_ERA_5_colocated_occultations.npy', allow_pickle=True) may_13 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2013_ERA_5_colocated_occultations.npy', allow_pickle=True) may_14 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2014_ERA_5_colocated_occultations.npy', allow_pickle=True) may_15 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2015_ERA_5_colocated_occultations.npy', allow_pickle=True) may_16 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2016_ERA_5_colocated_occultations.npy', allow_pickle=True) may_17 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2017_ERA_5_colocated_occultations.npy', allow_pickle=True) may_18 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2018_ERA_5_colocated_occultations.npy', allow_pickle=True) may_19 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2019_ERA_5_colocated_occultations.npy', allow_pickle=True) may_20 = np.load('/usb/ERA_5_sythetic_TLS_maps/ERA_5_occultation_colocated_TLS/may_2020_ERA_5_colocated_occultations.npy', allow_pickle=True) may_combined = np.concatenate([may_06, may_07, may_08, may_09, may_10, may_11, may_12, may_13, may_14, may_15, may_16, may_17, may_18, may_19, may_20]) colocations_df = pd.DataFrame(may_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp']) colocations_df['Lon'] = colocations_df['Lon'] - 180 colocations_df_upper = colocations_df[colocations_df['Lon'] >= 0] colocations_df_lower = colocations_df[colocations_df['Lon'] < 0] colocations_df_upper['Lon'] = colocations_df_upper['Lon'] - 180 colocations_df_lower['Lon'] = colocations_df_lower['Lon'] + 180 colocations_may_df = pd.concat([colocations_df_upper, colocations_df_lower]) colocations_may_df['Year'] = colocations_may_df['Year'].astype(int) ####################################################################################################### era_5_may_06_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2006_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_07_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2007_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_08_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2008_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_09_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2009_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_10_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2010_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_11_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2011_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_12_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2012_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_13_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2013_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_14_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2014_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_15_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2015_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_16_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2016_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_17_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2017_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_18_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2018_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_19_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2019_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_5_may_20_5x10 = np.load('../../ERA_5_monthly_TLS_maps/may_2020_ERA_5_daily_zonal_mean_TLS_map_5_10.npy', allow_pickle=True) era_box_may_combined = np.concatenate([era_5_may_06_5x10, era_5_may_07_5x10, era_5_may_08_5x10, era_5_may_09_5x10, era_5_may_10_5x10, era_5_may_11_5x10, era_5_may_12_5x10, era_5_may_13_5x10, era_5_may_14_5x10, era_5_may_15_5x10, era_5_may_16_5x10, era_5_may_17_5x10, era_5_may_18_5x10, era_5_may_19_5x10, era_5_may_20_5x10]) box_may_df = pd.DataFrame(era_box_may_combined, columns=['Day', 'Hour', 'Year', 'Lat', 'Lon', 'Temp']) # - # # Begin Processing of MAM Colocations # + daily_era5_box_removal_mar = gpsro_tools.background_and_bias_remover(colocations_mar_df, box_mar_df) daily_era5_box_removal_apr = gpsro_tools.background_and_bias_remover(colocations_apr_df, box_apr_df) daily_era5_box_removal_may = gpsro_tools.background_and_bias_remover(colocations_may_df, box_may_df) daily_era5_no_removal = pd.concat([colocations_mar_df, colocations_apr_df, colocations_may_df]) daily_era5_box_removal = pd.concat([daily_era5_box_removal_mar, daily_era5_box_removal_apr, daily_era5_box_removal_may]) # + daily_era5_no_removal['latbin'] = daily_era5_no_removal.Lat.map(gpsro_tools.to_bin_lat) daily_era5_no_removal['lonbin'] = daily_era5_no_removal.Lon.map(gpsro_tools.to_bin_lon) final_hour_box = gpsro_tools.box_mean_remover(daily_era5_box_removal) final_hour_map = gpsro_tools.box_mean_remover(daily_era5_no_removal) diurnal_cycles_5_10_mean_removed_by_lats, diurnal_cycles_5_10_mean_removed_by_boxes = gpsro_tools.diurnal_binner(final_hour_box) diurnal_cycles_no_mean_removed_by_lats, diurnal_cycles_no_mean_removed_by_boxes = gpsro_tools.diurnal_binner(final_hour_map) # - np.save('MAM_colocations_5x10_boxes', diurnal_cycles_5_10_mean_removed_by_boxes) np.save('MAM_colocations_5x10_no_removal', diurnal_cycles_no_mean_removed_by_boxes)
SONnotebooks/.ipynb_checkpoints/SONcolocations-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # bootstrap_point632_score # An implementation of the .632 bootstrap to evaluate supervised learning algorithms. # > `from mlxtend.evaluate import bootstrap_point632_score` # ## Overview # Originally, the bootstrap method aims to determine the statistical properties of an estimator when the underlying distribution was unknown and additional samples are not available. Now, in order to exploit this method for the evaluation of predictive models, such as hypotheses for classification and regression, we may prefer a slightly different approach to bootstrapping using the so-called Out-Of-Bag (OOB) or Leave-One-Out Bootstrap (LOOB) technique. Here, we use out-of-bag samples as test sets for evaluation instead of evaluating the model on the training data. Out-of-bag samples are the unique sets of instances that are not used for model fitting as shown in the figure below [1]. # # ![](BootstrapOutOfBag_files/bootstrap_concept.png) # # # The figure above illustrates how three random bootstrap samples drawn from an exemplary ten-sample dataset ($X_1,X_2, ..., X_{10}$) and their out-of-bag sample for testing may look like. In practice, <NAME> and <NAME> recommend drawing 50 to 200 bootstrap samples as being sufficient for reliable estimates [2]. # # # ### .632 Bootstrap # # # In 1983, <NAME> described the *.632 Estimate*, a further improvement to address the pessimistic bias of the bootstrap cross-validation approach described above [3]. The pessimistic bias in the "classic" bootstrap method can be attributed to the fact that the bootstrap samples only contain approximately 63.2% of the unique samples from the original dataset. For instance, we can compute the probability that a given sample from a dataset of size *n* is *not* drawn as a bootstrap sample as # # $$P (\text{not chosen}) = \bigg(1 - \frac{1}{n}\bigg)^n,$$ # # which is asymptotically equivalent to $\frac{1}{e} \approx 0.368$ as $n \rightarrow \infty.$ # # Vice versa, we can then compute the probability that a sample *is* chosen as $P (\text{chosen}) = 1 - \bigg(1 - \frac{1}{n}\bigg)^n \approx 0.632$ for reasonably large datasets, so that we'd select approximately $0.632 \times n$ uniques samples as bootstrap training sets and reserve $ 0.368 \times n $ out-of-bag samples for testing in each iteration. # # # Now, to address the bias that is due to this the sampling with replacement, <NAME> proposed the *.632 Estimate* that we mentioned earlier, which is computed via the following equation: # # $$\text{ACC}_{boot} = \frac{1}{b} \sum_{i=1}^b \big(0.632 \cdot \text{ACC}_{h, i} + 0.368 \cdot \text{ACC}_{r, i}\big), $$ # # where $\text{ACC}_{r, i}$ is the resubstitution accuracy, and $\text{ACC}_{h, i}$ is the accuracy on the out-of-bag sample. # # ### .632+ Bootstrap # # Now, while the *.632 Boostrap* attempts to address the pessimistic bias of the estimate, an optimistic bias may occur with models that tend to overfit so that <NAME> and <NAME> proposed the *The .632+ Bootstrap Method* (Efron and Tibshirani, 1997). Instead of using a fixed "weight" $\omega = 0.632$ in # # $$ # ACC_{\text{boot}} = \frac{1}{b} \sum_{i=1}^b \big(\omega \cdot \text{ACC}_{h, i} + (1-\omega) \cdot \text{ACC}_{r, i} \big), $$ # # we compute the weight $\gamma$ as # # $$\omega = \frac{0.632}{1 - 0.368 \times R},$$ # # where *R* is the *relative overfitting rate* # # $$R = \frac{(-1) \times (\text{ACC}_{h, i} - \text{ACC}_{r, i})}{\gamma - (1 -\text{ACC}_{h, i})}.$$ # # (Since we are plugging $\omega$ into the equation for computing $$ACC_{boot}$$ that we defined above, $$\text{ACC}_{h, i}$$ and $\text{ACC}_{r, i}$ still refer to the resubstitution and out-of-bag accuracy estimates in the *i*th bootstrap round, respectively.) # # Further, we need to determine the *no-information rate* $\gamma$ in order to compute *R*. For instance, we can compute $\gamma$ by fitting a model to a dataset that contains all possible combinations between samples $x_{i'}$ and target class labels $y_{i}$ &mdash; we pretend that the observations and class labels are independent: # # $$\gamma = \frac{1}{n^2} \sum_{i=1}^{n} \sum_{i '=1}^{n} L(y_{i}, f(x_{i '})).$$ # # Alternatively, we can estimate the no-information rate $\gamma$ as follows: # # $$\gamma = \sum_{k=1}^K p_k (1 - q_k),$$ # # where $p_k$ is the proportion of class $k$ samples observed in the dataset, and $q_k$ is the proportion of class $k$ samples that the classifier predicts in the dataset. # ### References # # - [1] https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html # - [2] <NAME>, and <NAME>. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997. # [3] Efron, Bradley. 1983. “Estimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.” Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. # - [4] Efron, Bradley, and <NAME>. 1997. “Improvements on Cross-Validation: The .632+ Bootstrap Method.” Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. # ## Example 1 -- Evaluating the predictive performance of a model via the classic out-of-bag Bootstrap # The `bootstrap_point632_score` function mimics the behavior of scikit-learn's `cross_val_score, and a typically usage example is shown below: # + from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y, method='oob') acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) # - # ## Example 2 -- Evaluating the predictive performance of a model via the .632 Bootstrap # + from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y) acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) # - # ## Example 3 -- Evaluating the predictive performance of a model via the .632+ Bootstrap # + from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y, method='.632+') acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) # - # ## API with open('../../api_modules/mlxtend.evaluate/bootstrap_point632_score.md', 'r') as f: s = f.read() print(s)
mlxtend/docs/sources/user_guide/evaluate/bootstrap_point632_score.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import traitlets class MyObject(traitlets.HasTraits): name = traitlets.Unicode("unnamed") age = traitlets.Int(0) my_obj = MyObject() my_obj.name, my_obj.age obj2 = MyObject(name = "Weezer", age = 26) # + def name_changed(change): print(change['new']) my_obj.observe(name_changed, ['name']) # - my_obj.name = "<NAME>" # + def name_changed2(change): print("I just changed '%s' to '%s'" % (change['old'], change['new'])) obj2.observe(name_changed2, ['name']) # - obj2.name = "<NAME>" obj3 = MyObject() # + def trait_change(change): print("Change dictionary", change) obj3.observe(trait_change) # - obj3.name = "Something Else" obj3.age = 10 "String formatting is sticking stuff %s the middle: %0.5f" % ("in", 10.135) "{2} nicer way {0} to do this, {1}".format("is", "see", "The") # + import ipywidgets @ipywidgets.interact(name = ['Weezer', '<NAME>', '<NAME>']) def print_bandname(name): print(name) # - itext = ipywidgets.IntText() # added: from IPython.display import display display(itext) display(itext) itext.value itext.value = 10 ip = ipywidgets.IntProgress() display(ip) ip.value = 90 irange = ipywidgets.IntRangeSlider(min = -10, max = 10, step = 1) display(irange) irange.value m = MyObject(name = "Weezer", age = 26) l = ipywidgets.Label() ipywidgets.link( (m, "name"), (l, "value") ) display(l) m.name = "<NAME>" button1 = ipywidgets.Button(description = "Cow Clicker") display(button1) def say_click(event): print("Click.") button1.on_click(say_click) # + ta1 = ipywidgets.Textarea("Hi, this is a box of text. (1)") ta2 = ipywidgets.Textarea("Hi, this is a box of text. (2)") ta3 = ipywidgets.Textarea("Hi, this is a box of text. (3)") ta4 = ipywidgets.Textarea("Hi, this is a box of text. (4)") # - display(ta1) tabs = ipywidgets.Tab([ta1, ta2, ta3, ta4]) display(tabs) acc = ipywidgets.Accordion([ta1, ta2, ta3, ta4]) display(acc) ipywidgets.HBox([ta1, ta2, ta3, ta4]) ipywidgets.VBox([ta1, ta2, ta3, ta4]) ipywidgets.VBox( [ipywidgets.HBox([ta1, ta2]), ipywidgets.Label("Hello there!"), ipywidgets.HBox([ta3, ta4])] ) ip = ipywidgets.IntProgress() button_plus = ipywidgets.Button(description = "+10") button_minus = ipywidgets.Button(description = "-10") ip.value def click_down(event): ip.value -= 10 button_minus.on_click(click_down) def click_up(event): ip.value += 10 button_plus.on_click(click_up) ipywidgets.HBox([button_minus, ip, button_plus]) cp = ipywidgets.ColorPicker() islider = ipywidgets.IntSlider(min = 0, max = 10, step = 1, orientation = 'vertical') islider.style.handle_color = "#750075" islider ipywidgets.link( (cp, 'value'), (islider.style, 'handle_color')) ipywidgets.VBox([cp, islider]) ipywidgets.DatePicker() ipywidgets.Play() play = ipywidgets.Play(interval = 50, value = 50, min = 1, max = 100, step = 1, description = "Press Play") slider = ipywidgets.IntSlider() ipywidgets.link((play, 'min'), (slider, 'min')) ipywidgets.link((play, 'max'), (slider, 'max')) ipywidgets.link((play, 'value'), (slider, 'value')) ipywidgets.HBox([play, slider]) slider import bqplot import numpy as np x = np.arange(100) y = np.random.random(100) + 5 x_sc = bqplot.LinearScale() y_sc = bqplot.LinearScale() lines = bqplot.Lines(x = x, y = y, scales = {'x': x_sc, 'y': y_sc}) ax_x = bqplot.Axis(scale = x_sc, label = 'X Value') ax_y = bqplot.Axis(scale = y_sc, label = 'Y Value', orientation = 'vertical') fig = bqplot.Figure(marks = [lines], axes = [ax_x, ax_y]) display(fig) pz = bqplot.interacts.PanZoom( scales = {'x': [x_sc], 'y': [y_sc]}) fig = bqplot.Figure(marks = [lines], axes = [ax_x, ax_y], interaction = pz) display(fig) x = np.mgrid[0.0:10.0:100j] y1 = x * 2 y2 = x**2 x_sc = bqplot.LinearScale(min = 0, max = 10) y_sc1 = bqplot.LinearScale(min = 1, max = 20) y_sc2 = bqplot.LogScale(min = 1, max = 100) lines1 = bqplot.Lines(x = x, y = y1, scales = {'x': x_sc, 'y': y_sc1}) lines2 = bqplot.Lines(x = x, y = y2, scales = {'x': x_sc, 'y': y_sc2}) ax_x = bqplot.Axis(scale = x_sc, label = 'X Value') ax_y1 = bqplot.Axis(scale = y_sc1, label = 'Y1 Value', orientation = 'vertical') ax_y2 = bqplot.Axis(scale = y_sc2, label = 'Y2 Value', orientation = 'vertical', side = 'right') pz = bqplot.interacts.PanZoom(scales = {'x': [x_sc], 'y': [y_sc1, y_sc2]}) fig = bqplot.Figure(marks = [lines1, lines2], axes = [ax_x, ax_y1, ax_y2], interaction=pz) display(fig) import pandas as pd buildings = pd.read_csv("/Users/jillnaiman1/Downloads/building_inventory.csv", na_values = {'Year Acquired': 0, 'Year Constructed': 0, 'Square Footage': 0}) # + x_sc = bqplot.LinearScale() y_sc = bqplot.LinearScale() x_ax = bqplot.Axis(scale = x_sc, label = 'Year Constructed') y_ax = bqplot.Axis(scale = y_sc, label = 'Year Acquired', orientation = 'vertical') # - scatters = bqplot.Scatter(x = buildings['Year Constructed'], y = buildings['Year Acquired'], scales = {'x': x_sc, 'y': y_sc}) selector = bqplot.interacts.FastIntervalSelector( scale = x_sc, marks = [scatters]) fig = bqplot.Figure(marks = [scatters], axes = [x_ax, y_ax], interaction = selector) display(fig) selector.selected label = ipywidgets.Label("Total square footage: ") display(label) def on_selection_change(change): min_val, max_val = change['new'] selected = (buildings["Year Constructed"] < max_val) & (buildings["Year Constructed"] > min_val) total_sqft = buildings["Square Footage"][selected].sum() label.value = "Total square footage: %s" % (total_sqft) selector.observe(on_selection_change, ['selected']) lasso = bqplot.interacts.LassoSelector(scales = {'x': x_sc, 'y': y_sc}, marks = [scatters]) fig = bqplot.Figure(marks = [scatters], axes = [x_ax, y_ax], interaction = lasso) display(fig) # + x_sc = bqplot.LinearScale() y_sc = bqplot.LinearScale() x_ax = bqplot.Axis(scale = x_sc, label = 'Year Constructed') y_ax = bqplot.Axis(scale = y_sc, label = 'Year Acquired', orientation = 'vertical') tooltip = bqplot.Tooltip(fields = ["x", "y"]) scatters = bqplot.Scatter(x = buildings['Year Constructed'], y = buildings['Year Acquired'], scales = {'x': x_sc, 'y': y_sc}, tooltip = tooltip) selector = bqplot.interacts.FastIntervalSelector( scale = x_sc, marks = [scatters]) fig = bqplot.Figure(marks = [scatters], axes = [x_ax, y_ax], interaction = selector) tb = bqplot.Toolbar(figure=fig) display(ipywidgets.VBox([fig, tb])) # - scatters.y = scatters.y * 2 # + x_sc = bqplot.LinearScale() y_sc = bqplot.LinearScale() x_ax = bqplot.Axis(scale = x_sc) y_ax = bqplot.Axis(scale = y_sc, orientation = 'vertical') hist = bqplot.Hist(sample = buildings["Year Acquired"], scales = {'sample': x_sc, 'count': y_sc}, bins = 128, normalized = True, colors = ["#FFFFFF"]) hist2 = bqplot.Hist(sample = buildings["Year Acquired"], opacity = 0.1, normalized = True, scales = {'sample': x_sc, 'count': y_sc}, bins = 128) fig = bqplot.Figure(marks = [hist, hist2], axes = [x_ax, y_ax]) islider = ipywidgets.IntSlider(min = 8, max = 128, step = 1) ipywidgets.link((islider, 'value'), (hist, 'bins')) #cp = ipywidgets.ColorPicker() #ipywidgets.link((cp, 'value'), (hist, 'colors')) #ipywidgets.link((islider, 'value'), (hist2, 'bins')) # - display(ipywidgets.VBox([fig, islider])) hist2.selected
week06/_examples_week06.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: ir # kernelspec: # display_name: R # language: R # name: ir # --- # # Panel Data # # # Economists mostly work with observational data. # The data generation process is out of the researchers' control. # If we only have a cross sectional dataset at hand, it is difficult to control heterogeneity among the individuals. # On the other hand, panel data offers a chance to control heterogeneity of some particular forms. # # # A panel dataset tracks the same individuals across time $t=1,\ldots,T$. # We assume the observations are independent across $i=1,\ldots,n$, # while we allow some form of dependence within a group across # $t=1,\ldots,T$ for the same $i$. We maintain the linear equation # $$y_{it}=\beta_{1}+x_{it}\beta_{2}+u_{it},\ i=1,\ldots,n;t=1,\ldots,T\label{eq:basic_eq}$$ # where $u_{it}=\alpha_{i}+\epsilon_{it}$ is called the *composite error*. # Note that $\alpha_{i}$ is the time-invariant unobserved heterogeneity, # while $\epsilon_{it}$ varies across individuals and time periods. # # **Data Example** Below is a dataset from [NBER-CES Manufacturing Industry Database](http://www.nber.org/nberces/). # The data size is about 4M. Downloading would take up to a few minutes if the network is slow. # Here we have saved the data in a csv file. The dataset contains annual information of 473 USA industries during 1958 to 2009. # To have some idea what a panel data looks like, we display a few rows and columns. g0 <- read.csv("naics5809.csv") g0[c(1:10, 50:60), 1:10] # # **Data Example** # `install.packages("plm")` if you use the package [plm](http://cran.r-project.org/web/packages/plm/) for the first time. An introduction can be found [here](http://cran.r-project.org/web/packages/plm/vignettes/plm.pdf). Load the package. library(plm) g <- pdata.frame( g0, index = c("naics", "year") ) # + # the regression equation equation <- emp~invest+cap # Nothing prevents from running an OLS. g.ols <- lm(equation, data=g) summary(g.ols) # + # The OLS coefficient estimates are exactly the same as the pooled OLS. # The only difference in the summary is that the later shows the panel structure # of the data. g.pool <- plm(equation,data=g,model="pooling") summary(g.pool) # - # The most important techniques of panel data estimation are the fixed effect regression and the random effect regression. The asymptotic distributions of both estimators can be derived from knowledge about the OLS regression. In this sense, panel data estimation becomes applied examples of the theory that we have covered in this course. It highlights the fundamental role of theory in econometrics. # ## Fixed Effect # # OLS is consistent for # the linear projection model. # Since $\alpha_i$ is unobservable, it is absorbed into the composite error # $u_{it} = \alpha_i + \epsilon_{it}$. If # $\mathrm{cov}\left(\alpha_{i},x_{it}\right)=0$, the OLS is consistent; # otherwise the consistency breaks down. The fixed # effect model allows $\alpha_{i}$ and $x_{it}$ to be arbitrarily # correlated. The trick to regain consistency is to eliminate # $\alpha_{i},i=1,\ldots,n$ . The rest of this section develops the # consistency and asymptotic distribution of the *within estimator*, the # default fixed-effect (FE) estimator. The within estimator transforms the # data by subtracting all the observable variables by the corresponding # group means. Averaging the $T$ equations of the original regression for the # same $i$, we have # $$\overline{y}_{i}=\beta_{1}+\overline{x}_{i}\beta_{2}+\bar{u}_{it}=\beta_{1}+\overline{x}_{i}\beta_{2}+\alpha_{i}+\bar{\epsilon}_{it}.\label{eq:group_mean}$$ # where $\overline{y}_{i}=\frac{1}{T}\sum_{t=1}^{T}y_{it}$. Subtracting # the averaged equation from the original equation gives # $$\tilde{y}_{it}=\tilde{x}_{it}\beta_{2}+\tilde{\epsilon}_{it}\label{eq:FE_demean}$$ # where $\tilde{y}_{it}=y_{it}-\overline{y}_{i}$. We then run OLS with the # demeaned data, and obtain the within estimator # $$\widehat{\beta}_{2}^{FE}=\left(\tilde{X}'\tilde{X}\right)^{-1}\tilde{X}'\tilde{y},$$ # where $\tilde{y}=\left(y_{it}\right)_{i,t}$ stacks all the $nT$ # observations into a vector, and similarly defined is $\tilde{X}$ as an # $nT\times K$ matrix, where $K$ is the dimension of $\beta_{2}$. # We know that OLS would be consistent if # $E\left[\tilde{\epsilon}_{it}|\tilde{x}_{it}\right]=0$. Below we # provide a sufficient condition, which is often called *strict # exogeneity*. # # **Assumption FE.1** # $E\left[\epsilon_{it}|\alpha_{i},\mathbf{x}_{i}\right]=0$ where # $\mathbf{x}_{i}=\left(x_{i1},\ldots,x_{iT}\right)$. # # Its strictness is relative to the contemporary exogeneity # $E\left[\epsilon_{it}|\alpha_{i},x_{it}\right]=0$. FE.1 is more # restrictive as it assumes that the error $\epsilon_{it}$ is mean # independent of the past, present and future explanatory variables. # # When we talk about the consistency in panel data, typically we are # considering $n\to\infty$ while $T$ stays fixed. This asymptotic # framework is appropriate for panel datasets with many individuals but # only a few time periods. # # **Proposition** If FE.1 is satisfied, then $\widehat{\beta}_{2}^{FE}$ is consistent. # The variance estimation for the FE estimator is a little bit tricky. We # assume a homoskedasitcity condition to simplify the calculation. # Violation of this assumption changes the form of the asymptotic # variance, but does not jeopardize the asymptotic normality. # # **Assumption FE.2** # $\mathrm{var}\left(\epsilon_{i}|\alpha_{i},\mathbf{x}_{i}\right)=\sigma_{\epsilon}^{2}I_{T}$. # # Under FE.1 and FE.2, # $\widehat{\sigma}_{\epsilon}^{2}=\frac{1}{n\left(T-1\right)}\sum_{i=1}^{n}\sum_{t=1}^{T}\widehat{\tilde{\epsilon}}_{it}^{2}$ # is a consistent estimator of $\sigma_{\epsilon}^{2}$, where # $\widehat{\tilde{\epsilon}}=\tilde{y}_{it}-\tilde{x}_{it}\widehat{\beta}_{2}^{FE}$. # Note that the denominator is $n\left(T-1\right)$, not $nT$. # The necessity of adjusting the degree of freedom can be easily seen from the FWL theorem: # the FE estimator for the slope coefficient is numerical the same as # its counterpart in the full regression with a dummy variable for each cross sectional unit. # # If FE.1 and FE.2 are satisfied, then # $$\left(\widehat{\sigma}_{\epsilon}^{2}\left(\tilde{X}'\tilde{X}\right)^{-1}\right)^{-1/2}\left(\widehat{\beta}_{2}^{FE}-\beta_{2}^{0}\right)\stackrel{d}{\to} N\left(0,I_{K}\right).$$ # # We implicitly assume some regularity conditions that allow us to invoke # a law of large numbers and a central limit theorem. We ignore those # technical details here. # # It is important to notice that the within-group demean in FE eliminates # all time-invariant explanatory variables, including the intercept. # Therefore from FE we cannot obtain the coefficient estimates of these # time-invariant variables. # # **Data Example** In reality we do not need to compute the estimator or the variance by hand. `R` handles them automatically. g.fe <- plm(equation, data=g, model="within") # statisticians call the FE estimator 'within' estimator as it carries out # a within-group transformation summary(g.fe) # **Publication Example** Lin, <NAME> (1992): [Rural Reforms and Agricultural # Growth in China](http://www.jstor.org/stable/2117601), *The American # Economic Review*, Vol.82, No.1, pp.34-51. # # The 1978 fundamental Chinese economic reform set off from the rural # sector. The Chinese agricultural industry witnessed a dramatic growth # during 1978-1984. The output was multiple times higher than the average # over of the preceding period. It was in debate whether the growth was # attributed to the household-responsibility system (HRS) reform, or other # factors such as the rising prices and inputs. # # Lin (1992) attempts to disentangle these factors using panel data of 28 # mainland provinces from 1970 to 1987. He estimates the following # fixed-effect regression model by OLS. # # $$ # \begin{aligned} # ln Y_{it} & = \alpha_1 + \alpha_2 # + \ln(\mathrm{Land}_{it}) + \alpha_3 \ln (\mathrm{Labor}_{it}) + # \alpha_4 \ln (\mathrm{Capital}_{it}) + \alpha_5 \ln # (\mathrm{Fert}_{it} ) + \alpha_6 \mathrm{HRS}_{it} \\ # & + # \alpha_7 \mbox{MP}_{t-1} # + \alpha_8 \mathrm{GP}_t + \alpha_9 # \mbox{NGCA}_{it} + \alpha_{10} \mbox{MCI}_{it} + \alpha_{11} # T_t + \sum_{j=12}^{39} \alpha_{j} D_j + # \epsilon_{it}. # \end{aligned}$$ # # Given the OLS estimates, he calculates the serial correlation \[last # row, Table 5, p.43\], which suggests the presence of serial correlation. # He then estimates the regression by GLS and the results are reported in # Table 5 \[p.43, Column (1) and (3)\]. The empirical findings are robust # across several specifications. For example, Equation (1′) \[page 42\] # adds time dummies to capture the possible time trend. The importance of # HRS is supported across specifications by the highly significant # coefficient. Based on the estimates, he also evaluates how much of the # relative contribution to the agricultural growth was made by the reform, # and how much was by the change in inputs. # # ## Random Effect # # The random effect estimator pursues efficiency at a knife-edge special # case $\mathrm{cov}\left(\alpha_{i},x_{it}\right)=0$. As mentioned above, # FE is consistent when $\alpha_{i}$ and $x_{it}$ are uncorrelated. # However, an inspection of the covariance matrix reveals that OLS is # inefficient. # # The starting point is again the original model, while we assume # # **Assumption RE.1** # $E\left[\epsilon_{it}|\alpha_{i},\mathbf{x}_{i}\right]=0$ and # $E\left[\alpha_{i}|\mathbf{x}_{i}\right]=0$. # # RE.1 obviously implies $\mathrm{cov}\left(\alpha_{i},x_{it}\right)=0$, # so # $$S=\mathrm{var}\left(u_{i}|\mathbf{x}_{i}\right)=\sigma_{\alpha}^{2}\mathbf{1}_{T}\mathbf{1}_{T}'+\sigma_{\epsilon}^{2}I_{T},\ \mbox{for all }i=1,\ldots,n.$$ # Because the covariance matrix is not a scalar multiplication of the # identity matrix, OLS is inefficient. # As mentioned before, FE estimation kills all time-invariant regressors. # In contrast, RE allows time-invariant explanatory variables. Let us # rewrite the original equation as # $$y_{it}=w_{it}\boldsymbol{\beta}+u_{it},$$ where # $\boldsymbol{\beta}=\left(\beta_{1},\beta_{2}'\right)'$ and # $w_{it}=\left(1,x_{it}\right)$ are $K+1$ vectors, i.e., # $\boldsymbol{\beta}$ is the parameter including the intercept, and # $w_{it}$ is the explanatory variables including the constant. Had we # known $S$, the GLS estimator would be # $$\widehat{\boldsymbol{\beta}}^{RE}=\left(\sum_{i=1}^{n}\mathbf{w}_{i}'S^{-1}\mathbf{w}_{i}\right)^{-1}\sum_{i=1}^{n}\mathbf{w}_{i}'S^{-1}\mathbf{y}_{i}=\left(W'\mathbf{S}^{-1}W\right)^{-1}W'\mathbf{S}^{-1}y$$ # where $\mathbf{S}=I_{T}\otimes S$. (“$\otimes$” denotes the Kronecker # product.) In practice, $\sigma_{\alpha}^{2}$ and $\sigma_{\epsilon}^{2}$ # in $S$ are unknown, so we seek consistent estimators. Again, we impose a # simplifying assumption parallel to FE.2. # **Assumption RE.2** # $\mathrm{var}\left(\epsilon_{i}|\mathbf{x}_{i},\alpha_{i}\right)=\sigma_{\epsilon}^{2}I_{T}$ # and # $\mathrm{var}\left(\alpha_{i}|\mathbf{x}_{i}\right)=\sigma_{\alpha}^{2}.$ # # Under this assumption, we can consistently estimate the variances from # the residuals # $\widehat{u}_{it}=y_{it}-x_{it}\widehat{\boldsymbol{\beta}}^{RE}$. That # is # $$\begin{aligned} # \widehat{\sigma}_{u}^{2} & = \frac{1}{nT}\sum_{i=1}^{n}\sum_{t=1}^{T}\widehat{u}_{it}^{2}\\ # \widehat{\sigma}_{\epsilon}^{2} & = \frac{1}{n}\sum_{i=1}^{n}\frac{1}{T\left(T-1\right)}\sum_{t=1}^{T}\sum_{r=1}^{T}\sum_{r\neq t}\widehat{u}_{it}\widehat{u}_{ir}. # \end{aligned}$$ # # Again, we claim the asymptotic normality. # # If RE.1 and RE.2 are satisfied, then # $$\left(\widehat{\sigma}_{u}^{2}\left(W'\widehat{\mathbf{S}}^{-1}W\right)^{-1}\right)^{-1/2}\left(\widehat{\boldsymbol{\beta}}^{RE}-\boldsymbol{\beta}_{0}\right) # \stackrel{d}{\to} # N\left(0,I_{K+1}\right)$$ # where $\widehat{\mathbf{S}}$ is a consistent estimator of $\mathbf{S}$. # # The complicated formula of the RE estimator is not important because again it will be handled by an econometric package automatically. # what is important is the conceptual difference of FE and RE on their treatment of the unobservable individual heterogeneity. # # **Data Example** RE regression g.re <- plm(equation, data=g, model="random") summary(g.re) # Which model is preferred? # The Hausman test favors the fixed-effect model. phtest(g.re, g.fe)
lec_notes_ipynb/lec_panel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: ipykernel_py2 # --- # ## Diversifiable and Non-Diversifiable Risk of a Portfolio # Import the same dataset we used in the previous lecture – Microsoft and Apple stock. Timeframe – 1st of January 2000 until today. <br /> # *Hint: To save time, we have written the code you need!* # + import numpy as np import pandas as pd from pandas_datareader import data as wb import matplotlib.pyplot as plt sec_data = pd.read_csv('D:/Python/MSFT_AAPL_2000_2017.csv', index_col='Date') sec_data # - # Then, calculate the diversifiable and the non-diversifiable risk of a portfolio, composed of these two stocks: # a) with weights 0.5 and 0.5; # ### Calculating Portfolio Variance # Equal weightings scheme: # Portfolio Variance: # ### Calculating Diversifiable and Non-Diversifiable Risk of a Portfolio # Diversifiable Risk: # Or: # Calculating Diversifiable Risk: # Calculating Non-Diversifiable Risk: # ***** # b) With weights 0.2 for Microsoft and 0.8 for Apple. # ### Calculating Portfolio Variance # Portfolio Variance: # ### Calculating Diversifiable and Non-Diversifiable Risk of a Portfolio # Calculating Diversifiable Risk: # Calculating Non-Diversifiable Risk:
Python for Finance - Code Files/79 Diversifiable and Non-Diversifiable Risk/CSV/Python 2 CSV/Diversifiable and Non-Diversifiable Risk of a Portfolio - Exercise_CSV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Week 5-Assignment # # ## MSDS 600 - Introduction to Data Science # # ## Data Science Automation # # ## Professor: Dr. <NAME> # # ### Prepared By: <NAME> # Using our prepared churn data from week 2: # - use pycaret to find an ML algorithm that performs best on the data # - Choose a metric you think is best to use for finding the best model; by default, it is accuracy but it could be AUC, precision, recall, etc. The week 3 FTE has some information on these different metrics. # - save the model to disk # - create a Python script/file/module with a function that takes a pandas dataframe as an input and returns the probability of churn for each row in the dataframe # - your Python file/function should print out the predictions for new data (new_churn_data.csv) # - the true values for the new data are [1, 0, 0, 1, 0] if you're interested # - test your Python module and function with the new data, new_churn_data.csv # - write a short summary of the process and results at the end of this notebook # - upload this Jupyter Notebook and Python file to a Github repository, and turn in a link to the repository in the week 5 assignment dropbox # # *Optional* challenges: # - return the probability of churn for each new prediction, and the percentile where that prediction is in the distribution of probability predictions from the training dataset (e.g. a high probability of churn like 0.78 might be at the 90th percentile) # - use other autoML packages, such as TPOT, H2O, MLBox, etc, and compare performance and features with pycaret # - create a class in your Python module to hold the functions that you created # - accept user input to specify a file using a tool such as Python's `input()` function, the `click` package for command-line arguments, or a GUI # - Use the unmodified churn data (new_unmodified_churn_data.csv) in your Python script. This will require adding the same preprocessing steps from week 2 since this data is like the original unmodified dataset from week 1. # # Load Data # Loading Data and setting Index column as customerID. Updated `TotalCharges_tenure_ratio` Column name to `charge_per_tenure` in `prepped_Churn_data.csv` file to match with column name in `new_churn_data.csv` file to make predictions. # + import pandas as pd df = pd.read_csv('prepped_Churn_data.csv', index_col='customerID') df # - # # AutoML with PyCaret # Installed pycaret package. It didn't work as expected at first due to higher version of `scikit-learn`. So Installed supported version of `scikit-learn` `0.23.1` as per instruction on PyCaret website. `PyCaret` is used for autoML, it helps in comparing machine learning models. from pycaret.classification import setup, compare_models, predict_model, save_model, load_model automl = setup(df, target='Churn', preprocess=False, numeric_features=['Contract', 'PaymentMethod', 'PhoneService']) # Updated Data Type of `Contract` , `PaymentMethod` and `PhoneService` to numeric as it was showing as categorial. It helps in xgboost and lightgbm working. automl[13] # Running autoML to find best model. best_model = compare_models() best_model # Best Model is Logistic Regression.As we are not preprocessing data, best_model object is used to make predictions. Though I also got CatBoost Classifier as well as best model in some runs. df.iloc[-2:-1].shape # Indexing [-2:-1] is used to select last row and to make is as 2D. predict_model(best_model, df.iloc[-2:-1]) # Above example is of predict_model function which is used when preprocessing is used with autoML. `Label` column has predicted Label which is `1` in this case. As score is >= .5 it rounded it up to `1`. # ## Saving and loading our model # Saving trained macine learning model which is CBC as python file to be used later. Pickle module is used to save file as pickle file (Saves Data in Binary format). save_model(best_model, 'LGR') # + import pickle with open('LGR_model.pk', 'wb') as k: pickle.dump(best_model, k) # - # `open` function is used to open `CBC_model.pk` file. `w` is for writing the file and `b` is for binary format. `with` is used for automatically closing the file. with open('LGR_model.pk', 'rb') as k: loaded_model = pickle.load(k) new_data = df.iloc[-2:-1].copy() new_data.drop('Churn', axis=1, inplace=True) loaded_model.predict(new_data) # Using load_model function from pycaret to load saved model `CBC`. Then using predict_model to get the prediction. loaded_cbc = load_model('LGR') predict_model(loaded_cbc, new_data) # # Making a Python Module to make predictions # Using machine learning model in a Python file to provide new data as input and get the predictions. I used `VS Code` IDE to create a Python file. # + from IPython.display import Code Code('predict_churn.py') # - # Running the Python file using magic command `%run` # %run predict_churn.py # I was getting some other models as best models in some runs but `Logistic Regression` was the one that came up frequently. Comparing with True Values for `new_churn_data.csv` i.e. [1, 0, 0, 1, 0] we have one false negative. # `VS Code` Screenshot of Python File # # ![VS_Code-3.JPG](attachment:VS_Code-3.JPG) # # # Saving our code to GitHub # I already had account on GitHub. But as instructed by Professor in (George, 2021) downloaded GUI Desktop application and created respository using it. # # Summary # First updated `TotalCharges_tenure_ratio` Column name to `charge_per_tenure` in `prepped_Churn_data.csv` file to match with column name in `new_churn_data.csv` file to make predictions. Then loaded data from `prepped_Churn_data.csv` file using pandas. Then installed `pycaret` package, also installed compatible `scikit-learn` version that supports `PyCaret`. PyCaret is used for AutoML and compares different machine learning models. Data type of `Contract` , `PaymentMethod` and `PhoneService` was Categorical, so updated it to numeric, also set preprocess to False for getting xgboost and lightgbm working. On comparing models `Logistic Regression` turned out to be best model. # # Next step is to Save and Load the best model. Used pickle module to save data in Binary format, file is saved as pickle file. `load_model` function was used to load saved best model which is `Logistic Regression` to make prediction. `VS Code` IDE was used to create Python file. Loaded `new_churn_data.csv` in to data frame and pycaret best model was used to get prediction. On making prediction, we had one false negative. So basically best moded performed fairly well, though it is not perfect. # # Also downloaded GitHub GUI Desktop application as I already had GitHub Account logged in and created repository `MSDS_600_Week5`. [Link to Week 5 Assignment GitHub Repository](https://github.com/dkamaal/MSDS_600_Week5) # # # References # <NAME>. (2022) MSDS 600 - From the Experts: Data Science Automation. World Class. Anderson College of Business & Computing. Regis University. # # <NAME>. (2021) MSDS 600 - From the Experts: Data Science Automation. World Class. Anderson College of Business & Computing. Regis University. # # <NAME>. & <NAME>. (2021) Automated Machine Learning. Packt Publishing. [O’Reilly Version] Retrieved from https://learning.oreilly.com/library/view/automated-machine-learning/9781800567689/
Week_5_Assignment_Kamaal_Danish.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp core # - # # pplyr # # > pplyr lets you use dplyrs grammar of data manipulation from within python without any code changes. #hide from nbdev.showdoc import * # + #export import pandas as pd from os import system from tempfile import NamedTemporaryFile def pplyr(df, dplyr_code, verbose = False, fread = False): """ pplyr lets you use dplyrs grammar of data manipulation from within python without any code changes. pplyr writes a dataframe to csv, creates an R script with your dplyr code. The result is then returned as a pandas Dataframe. For now, the dataframe passed to R is stored as "df" within R. This might change in future versions. By default, pplyr uses R's read.csv() and write.csv() functions. These functions are notoriously slow. If you deal with larger dataframes, make sure to install the "data.tables" R package and pass fread = True to pplyr to speed up read/write operations. Please note that the output is slightly different for fread/fwrite and read.csv/write.csv. See https://fdewes.github.io/pplyr/ for docs. """ dpylr_data_in = NamedTemporaryFile() dpylr_data_out = NamedTemporaryFile() r_script_file = NamedTemporaryFile() script_messages = NamedTemporaryFile() if(fread): libs = ("suppressMessages(library(dplyr)); suppressMessages(library(data.table))") load = ("df = fread('" + dpylr_data_in.name + "') %>% as.data.frame()") write = ("fwrite(df, '" + dpylr_data_out.name + "')") else: libs = ("suppressMessages(library(dplyr));") load = ("df = read.csv('" + dpylr_data_in.name + "') %>% as.data.frame()") write = ("write.csv(df, '" + dpylr_data_out.name + "')") df.to_csv(dpylr_data_in.name) r_script_code = libs + "\n" + load + "\n" + dplyr_code + "\n" +write + "\n" if(verbose): print(r_script_code) with open(r_script_file.name, "w") as f: f.write(r_script_code) r_system_call = "Rscript --no-site-file --no-init-file " + r_script_file.name + " > " + script_messages.name + " 2>&1" system(r_system_call) if(verbose): print("R output:") r_output = open(script_messages.name, "r") print(r_output.read()) df = pd.read_csv(dpylr_data_out.name, index_col = 0) return df
00_pplyr_core.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ejercicio 3.1 # # Utilizando Metropolis Hastings construya muestras en dos dimenciones ($x$ y $y$) # que sigan la función de distribución $f(x,y)=\exp\left( -\frac{1}{2}(x^2 + \frac{y^2}{9} - \frac{xy}{12}) \right)$. # # Abajo tiene un ejemplo para visualizar esta función de distribución en 2D (izquierda) # y cómo visualizar el histograma 2D de una distribución de puntos aleatoria (que **no** sigue la distribución propuesta). # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline def dens(x, y): return np.exp(-0.5*(x**2/4 + y**2 +x*y/1.5)) N=100000 # Genera puntos sobre una grid x_line = np.linspace(-5,5,100) y_line = np.linspace(-5,5,100) x_grid, y_grid = np.meshgrid(x_line, y_line) z_grid = dens(x_grid, y_grid) # Genera lista de puntos x_lista = np.random.normal(size=N) y_lista = np.random.normal(scale=3.0, size=N) fig, (ax0, ax1) = plt.subplots(1,2) # grafica los puntos de la grid im = ax0.pcolormesh(x_grid, y_grid, z_grid) # grafica el histograma bidimensional a partir de la lista de puntos _ = plt.hist2d(x_lista, y_lista, bins=50) # -
Metodos Computacionales Avanzados/secciones/05.MCMC/03_Metropolis_Multidimensional.ipynb