path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
notebooks-src/notebooks/Python Examples/Load Analyses for a MGnify Study.ipynb | ###Markdown
Load a Study from the MGnify API and fetch its AnalysesThe [MGnify API](https://www.ebi.ac.uk/metagenomics/api/v1) returns JSON data. The `jsonapi_client` package can help you load this data into Python, e.g. into a Pandas dataframe.**This example shows you how to load a MGnify Study's Analyses from the MGnify API**You can find all of the other "API endpoints" using the [Browsable API interface in your web browser](https://www.ebi.ac.uk/metagenomics/api/v1). The URL you see in the browsable API is exactly the same as the one you can use in this code.This is an interactive code notebook (a Jupyter Notebook).To run this code, click into each cell and press the ▶ button in the top toolbar, or press `shift+enter`.--- Select a StudyPick a particular Study of interest. If you followed a link to this notebook, we might already know the Study Accession. Otherwise, you can enter one or use the example:
###Code
from lib.variable_utils import get_variable_from_link_or_input
# You can also just directly set the accession variable in code, like this:
# accession = "MGYS00005292"
accession = get_variable_from_link_or_input('MGYS', 'Study Accession', 'MGYS00005292')
###Output
_____no_output_____
###Markdown
Fetch dataFetch Analyses for this study from the MGnify API, into a [Pandas dataframe](https://pandas.pydata.org/docs/user_guide/index.html)
###Code
from jsonapi_client import Session
import pandas as pd
with Session("https://www.ebi.ac.uk/metagenomics/api/v1") as mgnify:
analyses = map(lambda r: r.json, mgnify.iterate(f'studies/{accession}/analyses'))
analyses = pd.json_normalize(analyses)
###Output
_____no_output_____
###Markdown
Inspect the dataThe `.head()` method prints the first few rows of the table
###Code
analyses.head()
###Output
_____no_output_____
###Markdown
Example: distribution of instruments used for the Analysed Samples
###Code
import matplotlib.pyplot as plt
analyses.groupby('attributes.instrument-model').size().plot(kind='pie')
plt.title('Number of Analysed Samples by instrument type');
###Output
_____no_output_____ |
13_impliedVol.ipynb | ###Markdown
1. Summary statisticsWe have a monthly data of these variables:- Implied volatility of call & put for every stock in the market- Implied volatility (average of call and put's implied volatility)- Implied volatility spread = ATM Call's implied volatility - ATM Put's implied volatility- Implied volatility skew = OTM Put's implied volatility - ATM Call's implied volatilitySo the idea is if implied volatility of the call is high, it is pretty good news, but the implied volatility of the put (especially the OTM Put) signals the bad news.
###Code
import pandas as pd
ImpVol = pd.read_csv("data/ImpliedVol.csv")
ImpVol.head()
ImpVol.describe()
###Output
_____no_output_____ |
Scripts/.ipynb_checkpoints/get_data_for_jessi_3projections-checkpoint.ipynb | ###Markdown
Get Data PreppedGet data prepared in csv files for Jessi
###Code
# for running straight from github repository
import sys, os
sys.path.append(os.path.join(sys.path[1],'..','..','code'))
#print(sys.path)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.colors as mplc
import h5py
from scipy import stats
import netCDF4 as nc
import pandas
import time
import glob
import mpl_toolkits.basemap as bm
import warnings
import matplotlib
warnings.filterwarnings("ignore",category=matplotlib.mplDeprecation)
from mpl_toolkits.basemap import Basemap, shiftgrid, addcyclic
# colormaps
CM = h5py.File('/Users/eabarnes/GoogleDrive/WORK/AUTHORED_MANUSCRIPTS/INPREP/Hartmann_Barnes_2018/python/gradsmap.mat')
cm_rgb = np.array(CM['cm_rb'])/255.
cm_rgb = np.delete(cm_rgb,[9,10],axis=1)
CM.close()
cm_rgb = np.swapaxes(cm_rgb,1,0)
cmap = LinearSegmentedColormap.from_list('tbk_cmap', cm_rgb, N=14)
plt.register_cmap(cmap=cmap)
def readFiles(loadname):
d = np.load(loadname)
data = d['data']
lats = d['lats']
lons = d['lons']
return data, lats, lons
def getRegion1(data, lats, lons, lat_bounds, lon_bounds):
ilat = np.squeeze(np.where(np.logical_and(lats >= lat_bounds[0],
lats <= lat_bounds[1])))
if(lon_bounds[0] < lon_bounds[1]):
ilon = np.squeeze(np.where(np.logical_and(lons >= lon_bounds[0],
lons <= lon_bounds[1])))
else:
ilon = np.where(lons >= lon_bounds[0])
ilon = np.append(ilon,np.where(lons <= lon_bounds[1]))
ilon = np.squeeze(ilon)
latsReg = lats[ilat]
lonsReg = lons[ilon]
if np.ndim(data) == 3:
dataReg = data[np.ix_(np.arange(0, np.shape(data)[0]), ilat, ilon)]
elif np.ndim(data) == 4:
dataReg = data[np.ix_(np.arange(0, np.shape(data)[0]),
np.arange(0, np.shape(data)[1]),
ilat, ilon)]
basemap = bm.Basemap(llcrnrlon=lon_bounds[0], llcrnrlat=lat_bounds[0],
urcrnrlon=lon_bounds[1],urcrnrlat=lat_bounds[1],
resolution='l',projection='cyl')
return dataReg, latsReg, lonsReg, basemap
def getRegion2(data, lats, lons, lat_bounds, lon_bounds):
ilat = np.squeeze(np.where(np.logical_and(lats >= lat_bounds[0],
lats <= lat_bounds[1])))
if(lon_bounds[0] < lon_bounds[1]):
ilon = np.squeeze(np.where(np.logical_and(lons >= lon_bounds[0],
lons <= lon_bounds[1])))
else:
ilon = np.where(lons >= lon_bounds[0])
ilon = np.append(ilon,np.where(lons <= lon_bounds[1]))
ilon = np.squeeze(ilon)
latsReg = lats[ilat]
lonsReg = lons[ilon]
if np.ndim(data) == 3:
dataReg = data[np.ix_(np.arange(0, np.shape(data)[0]), ilat, ilon)]
elif np.ndim(data) == 4:
dataReg = data[np.ix_(np.arange(0, np.shape(data)[0]),
np.arange(0, np.shape(data)[1]),
ilat, ilon)]
basemap = bm.Basemap(llcrnrlon=lon_bounds[0], llcrnrlat=lat_bounds[0],
urcrnrlon=lon_bounds[1],urcrnrlat=lat_bounds[1],
resolution='l',projection='cea')
return dataReg, latsReg, lonsReg, basemap
def getRegion3(data, lats, lons, lat_bounds, lon_bounds):
ilat = np.squeeze(np.where(np.logical_and(lats >= lat_bounds[0],
lats <= lat_bounds[1])))
if(lon_bounds[0] < lon_bounds[1]):
ilon = np.squeeze(np.where(np.logical_and(lons >= lon_bounds[0],
lons <= lon_bounds[1])))
else:
ilon = np.where(lons >= lon_bounds[0])
ilon = np.append(ilon,np.where(lons <= lon_bounds[1]))
ilon = np.squeeze(ilon)
latsReg = lats[ilat]
lonsReg = lons[ilon]
if np.ndim(data) == 3:
dataReg = data[np.ix_(np.arange(0, np.shape(data)[0]), ilat, ilon)]
elif np.ndim(data) == 4:
dataReg = data[np.ix_(np.arange(0, np.shape(data)[0]),
np.arange(0, np.shape(data)[1]),
ilat, ilon)]
#basemap = bm.Basemap(projection='npstere',boundinglat=0,lon_0=0,
# resolution='l')
basemap = bm.Basemap(projection='ortho',lat_0=90,lon_0=0,
resolution='l')
return dataReg, latsReg, lonsReg, basemap
def drawOnGlobe(basemap, data, lats, lons, cmap, vmin=None, vmax=None):
basemap.drawcoastlines(color='gray', linewidth=0.5)
data, lons = addcyclic(data, lons)
# basemap.drawparallels(np.arange(-90.,120.,30.))
#dshifted, newlons = bm.shiftgrid(180., data, lons, start=False)
dshifted = data
newlons = lons
lonsmg, latsmg = np.meshgrid(newlons, lats)
x, y = basemap(lonsmg, latsmg)
levels = np.arange(-7.,7.1,.1)
if vmin:
#basemap.pcolormesh(x, y, dshifted, cmap=cmap, vmin=vmin, vmax=vmax)
basemap.contourf(x, y, dshifted, levels=levels, cmap=cmap, vmin=vmin, vmax=vmax)
else:
#basemap.pcolormesh(x, y, dshifted, cmap=cmap)
basemap.contourf(x, y, dshifted, levels=levels, cmap=cmap,extend='both')
cbar = plt.colorbar(fraction=0.02, pad=0.02,orientation='horizontal',ticks=np.arange(-7.0,8.0,1))
return cbar
###Output
_____no_output_____
###Markdown
Get Data
###Code
#define options
seasons = True
season_num = 0
# define constants
lat_bounds = (0,90)
lon_bounds = (0,360)
if(seasons):
years = np.tile((np.arange(1920,2100)),(4,1))
years = years.flatten(order='F')
else:
years = np.arange(1920,2099)
year_bounds = (2013,2046)
#year_bounds = (1979,2012)
num_years = year_bounds[1]-year_bounds[0]+1
print('number of years = ' + str(num_years))
dpi = 300
filename = "/Users/eabarnes/GoogleDrive/WORK/RESEARCH/2018/TOE/data/npz_files/LENS_hist_rcp_TREFHT_seasonalmean_r240x120.npz"
data,lats,lons = readFiles(filename)
#grab season
data = data[:,season_num::4]
years = years[season_num::4]
iyear = np.squeeze(np.where(np.logical_and(years>=year_bounds[0],years<=year_bounds[1])))
data = data[:,iyear,:,:]
print(years[iyear])
print(np.shape(data))
# get basemap ready
dataReg, latsReg, lonsReg, basemap1 = getRegion1(data,lats,lons,lat_bounds,lon_bounds)
dataReg, latsReg, lonsReg, basemap2 = getRegion2(data,lats,lons,lat_bounds,lon_bounds)
dataReg, latsReg, lonsReg, basemap3 = getRegion3(data,lats,lons,lat_bounds,lon_bounds)
###Output
_____no_output_____
###Markdown
Process each ensemble member's trends and save figure and csv
###Code
#bmap = basemap1
#dir_name = 'figures_csv_cylProj'
#bmap = basemap2
#dir_name = 'figures_csv_ceaProj'
bmap = basemap3
dir_name = 'figures_csv_orthoProj'
#calculate trends
num_years = np.size(data,1)
trend_data = np.empty((np.size(data,0),np.size(data,2),np.size(data,3)))
for ens in np.arange(0,np.size(data,0)):
print('ensemble member = ' + str(ens))
for ilat in np.arange(0,np.size(data,2)):
for ilons in np.arange(0,np.size(data,3)):
y = data[ens,:,ilat,ilons]
slope, intercept, r_value, p_value, std_err = stats.linregress(np.arange(0,np.size(data,1)),y)
trend_data[ens,ilat,ilons] = slope
#plot the results
trend_plot = trend_data[ens,:,:]*num_years
plt.figure(figsize=(10,20))
cbar = drawOnGlobe(bmap,trend_plot,lats,lons,cmap=cmap)
plt.annotate(str(ens+1), xy=(.01, .05), xycoords='axes fraction',backgroundcolor='white')
cbar.set_label(str(year_bounds[0]) + '-' + str(year_bounds[1]) + ' DJF surface air temperature trends (K/' + str(num_years) + ' years)')
plt.savefig(dir_name + '/data_trend_ensmember_' + str(ens+1) + '.png', dpi=dpi, format='png', frameon=False, bbox_inches='tight')
#plt.show()
plt.close()
# save to csv file
np.savetxt(dir_name + '/data_trend_ensmember_' + str(ens+1) + '.csv', np.flipud(trend_plot), fmt='%.18f', delimiter=',', newline='\n')
###Output
ensemble member = 0
ensemble member = 1
ensemble member = 2
ensemble member = 3
ensemble member = 4
ensemble member = 5
ensemble member = 6
ensemble member = 7
ensemble member = 8
ensemble member = 9
ensemble member = 10
ensemble member = 11
ensemble member = 12
ensemble member = 13
ensemble member = 14
ensemble member = 15
ensemble member = 16
ensemble member = 17
ensemble member = 18
ensemble member = 19
ensemble member = 20
ensemble member = 21
ensemble member = 22
ensemble member = 23
ensemble member = 24
ensemble member = 25
ensemble member = 26
ensemble member = 27
ensemble member = 28
ensemble member = 29
ensemble member = 30
ensemble member = 31
ensemble member = 32
ensemble member = 33
ensemble member = 34
ensemble member = 35
ensemble member = 36
ensemble member = 37
ensemble member = 38
ensemble member = 39
###Markdown
Ensemble mean + lat/long grid
###Code
#bmap = basemap1
#dir_name = 'figures_csv_cylProj'
#bmap = basemap2
#dir_name = 'figures_csv_ceaProj'
bmap = basemap3
dir_name = 'figures_csv_orthoProj'
#calculate trends
num_years = np.size(data,1)
trend_data_em = np.empty((np.size(data,2),np.size(data,3)))
grid_lat = np.empty((np.size(data,2),np.size(data,3)))
grid_lons = np.empty((np.size(data,2),np.size(data,3)))
data_em = np.squeeze(np.mean(data,0))
for ilat in np.arange(0,np.size(data,2)):
for ilons in np.arange(0,np.size(data,3)):
y = data_em[:,ilat,ilons]
slope, intercept, r_value, p_value, std_err = stats.linregress(np.arange(0,np.size(data_em,0)),y)
trend_data_em[ilat,ilons] = slope
grid_lat[ilat,ilons] = lats[ilat]
grid_lons[ilat,ilons] = lons[ilons]
#plot the results
trend_plot = trend_data_em[:,:]*num_years
plt.figure(figsize=(10,20))
cbar = drawOnGlobe(bmap,trend_plot,lats,lons,cmap=cmap)
plt.annotate('EM', xy=(.01, .05), xycoords='axes fraction',backgroundcolor='white')
cbar.set_label(str(year_bounds[0]) + '-' + str(year_bounds[1]) + ' DJF surface air temperature trends (K/' + str(num_years) + ' years)')
plt.savefig(dir_name + '/data_trend_ensmean.png', dpi=dpi, format='png', frameon=False, bbox_inches='tight')
plt.show()
plt.close()
# save to csv file
np.savetxt(dir_name + '/data_trend_ensmean.csv', np.flipud(trend_plot), fmt='%.18f', delimiter=',', newline='\n')
np.savetxt(dir_name + '/grid_lats.csv', np.flipud(grid_lat), fmt='%.18f', delimiter=',', newline='\n')
np.savetxt(dir_name + '/grid_lons.csv', np.flipud(grid_lons), fmt='%.18f', delimiter=',', newline='\n')
###Output
_____no_output_____
###Markdown
Observations Testing imagemagick
###Code
#bmap = basemap1
#dir_name = 'figures_csv_cylProj'
#bmap = basemap2
#dir_name = 'figures_csv_ceaProj'
bmap = basemap3
dir_name = 'figures_csv_orthoProj'
#calculate trends
num_years = np.size(data,1)
trend_data_em = np.empty((np.size(data,2),np.size(data,3)))
grid_lat = np.empty((np.size(data,2),np.size(data,3)))
grid_lons = np.empty((np.size(data,2),np.size(data,3)))
data_em = np.squeeze(np.mean(data,0))
for ilat in np.arange(0,np.size(data,2)):
for ilons in np.arange(0,np.size(data,3)):
y = data_em[:,ilat,ilons]
slope, intercept, r_value, p_value, std_err = stats.linregress(np.arange(0,np.size(data_em,0)),y)
trend_data_em[ilat,ilons] = slope
grid_lat[ilat,ilons] = lats[ilat]
grid_lons[ilat,ilons] = lons[ilons]
#plot the results
trend_plot = trend_data_em[:,:]*num_years
plt.figure(figsize=(10,20))
cbar = drawOnGlobe(bmap,trend_plot,lats,lons,cmap=cmap)
plt.annotate('EM', xy=(.01, .05), xycoords='axes fraction',backgroundcolor='white')
cbar.set_label(str(year_bounds[0]) + '-' + str(year_bounds[1]) + ' DJF surface air temperature trends (K/' + str(num_years) + ' years)')
plt.savefig(dir_name + '/TESTdata_trend_ensmean.png', dpi=75, format='png', frameon=False, bbox_inches='tight')
plt.show()
plt.close()
###Output
_____no_output_____ |
Week1/prerun_code/1_Tensorflow_simple_NN_prerun.ipynb | ###Markdown
Simple Neural Net using Tensorflow2 Import required packages
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Dataset Generation
###Code
# Parameters (y = a1*x1 + a2*x2 + a3*x1*x1 + a4*x2*x2 + a5*x1*x2 + b)
a1 = 2.7
a2 = 3.5
a3 = 0.6
a4 = 0.6
a5 = -1.4
b = 2.0
# Create noisy data
x1_data = np.linspace(-10, 10, num=100000)
x2_data = np.linspace(-10, 10, num=100000)
y_data = a1 * x1_data + \
a2 * x2_data + \
a3 * x1_data * x1_data +\
a4 * x1_data * x1_data +\
a5 * x1_data * x1_data +\
b +\
np.random.normal(size=100000)
x_data = []
for x1,x2 in zip(x1_data,x2_data):
x_data.append([x1,x2])
x_data = np.array(x_data)
###Output
_____no_output_____
###Markdown
Build the neuralnet
###Code
# Create the model
model = keras.Sequential()
model.add(keras.layers.InputLayer(input_shape=(2,)))
model.add(keras.layers.Dense(units = 4, activation = 'relu'))
model.add(keras.layers.Dense(units = 3, activation = 'relu'))
model.add(keras.layers.Dense(units = 1, activation = 'linear'))
# Define Loss and Optimizer
# Stocastic Gradient Descent
#model.compile(loss='mse', optimizer= keras.optimizers.SGD(learning_rate=1e-2))
# Adam
model.compile(loss='mse', optimizer= keras.optimizers.Adam(learning_rate=1e-3))
# Display the model
model.summary()
# Learn
epochs = 10
batch_size = 32
model_returns = model.fit(x_data, y_data, epochs=epochs, batch_size=batch_size, verbose=1 )
# Predict (compute) the output
y_predicted = model.predict(x_data)
y_predicted = y_predicted.squeeze()
# Plot the data
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.scatter3D(x1_data, x2_data, y_data, label="actual", c='b')
plt.legend()
ax = fig.add_subplot(1, 2, 2, projection='3d')
ax.scatter3D(x1_data, x2_data, y_predicted, label="predicted", c='r')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
To use GPU or CPU specifically
###Code
## Get list of available devices
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
## For GPU
with tf.device('/gpu:0'):
model_returns = model.fit(x_data, y_data, epochs=10, verbose=1)
with tf.device('/gpu:0'):
y_predicted = model.predict(x_data)
## For CPU
with tf.device('/cpu:0'):
model_returns = model.fit(x_data, y_data, epochs=10, verbose=1)
with tf.device('/cpu:0'):
y_predicted = model.predict(x_data)
###Output
_____no_output_____ |
deeplearning.ai/nlp/c3_w1_02_trax_layers.ipynb | ###Markdown
Trax LayersThis notebook introduces the core concepts of the Trax library through a series of code samples and explanations. The topics covered in following sections are:1. [Layers](1): the basic building blocks and how to combine them1. [Inputs and Outputs](2): how data streams flow through layers1. Defining New Layer Classes (if combining existing layers isn’t enough)1. Testing and Debugging Layer Classes
###Code
#@title Install dependencies
#@markdown - Trax
%%capture
!pip install -Uqq trax
#@title Import packages
import os
import numpy as np
import tensorflow as tf
import trax
from trax import fastmath
from trax import layers as tl
from trax import shapes
from trax.fastmath import numpy as jnp
from trax.shapes import ShapeDtype, signature
np.set_printoptions(precision=3)
print("numpy ", np.__version__)
print("tensorflow ", tf.__version__)
!pip list | grep trax
def show_layer_properties(layer_obj, layer_name):
template = (
f"{layer_name}.n_in: {layer_obj.n_in}\n"
f"{layer_name}.n_out: {layer_obj.n_out}\n"
f"{layer_name}.sublayers: {layer_obj.sublayers}\n"
f"{layer_name}.weights: {layer_obj.weights}\n"
)
print(template)
###Output
_____no_output_____
###Markdown
1. LayersThe Layer class represents Trax's basic building blocks.> The inputs and outputs are NumPy arrays or JAX objects behaving as numpy arrays. $tl.Relu[n_{in}=1, n_{out}=1]$
###Code
relu = tl.Relu()
x = np.array([[-2, -1, 0, 1, 2], [-20, -10, 0, 10, 20]])
y = relu(x)
print(
f"x:\n{x}\n\n"
f"relu(x):\n{y}\n\n"
f"Number of inputs expected by this layer: {relu.n_in}\n"
f"Number of outputs promised by this layer: {relu.n_out}"
)
###Output
x:
[[ -2 -1 0 1 2]
[-20 -10 0 10 20]]
relu(x):
[[ 0 0 0 1 2]
[ 0 0 0 10 20]]
Number of inputs expected by this layer: 1
Number of outputs promised by this layer: 1
###Markdown
$tl.Concatenate[n_{in}=2, n_{out}=1]$
###Code
concat = tl.Concatenate()
x0 = np.array([[1, 2, 3], [7, 8, 9]])
x1 = np.array([[4, 5, 6], [10, 11, 12]])
y = concat([x0, x1])
print(
f"x0:\n{x0}\n\n"
f"x1:\n{x1}\n\n"
f"concat([x1, x2]):\n{y}\n\n"
f"Number of inputs expected by this layer: {concat.n_in}\n"
f"Number of outputs promised by this layer: {concat.n_out}"
)
###Output
x0:
[[1 2 3]
[7 8 9]]
x1:
[[ 4 5 6]
[10 11 12]]
concat([x1, x2]):
[[ 1 2 3 4 5 6]
[ 7 8 9 10 11 12]]
Number of inputs expected by this layer: 2
Number of outputs promised by this layer: 1
###Markdown
1.1. Layers are configurableMany layer types have creation-time parameters for flexibility. The Concatenate layer type, for instance, has two optional parameters:- `axis`: index of axis along which to concatenate the tensors; default value of -1 means to use the last axis.- `n_items`: number of tensors to join into one by concatenation; default value is 2.The following example shows `Concatenate` configured for 3 input tensors, and concatenation along the initial ($0^{th}$) axis. $tl.Concatenate[n_{items}=3, axis=0]$
###Code
concat3 = tl.Concatenate(n_items=3, axis=0)
x0 = np.array([[1, 2, 3], [4, 5, 6]])
x1 = np.array([[10, 20, 30], [40, 50, 60]])
x2 = np.array([[100, 200, 300], [400, 500, 600]])
y = concat3([x0, x1, x2])
print(
f"x0:\n{x0}\n\n"
f"x1:\n{x1}\n\n"
f"x2:\n{x2}\n\n"
f"concat3([x0, x1, x2]):\n{y}"
)
###Output
x0:
[[1 2 3]
[4 5 6]]
x1:
[[10 20 30]
[40 50 60]]
x2:
[[100 200 300]
[400 500 600]]
concat3([x0, x1, x2]):
[[ 1 2 3]
[ 4 5 6]
[ 10 20 30]
[ 40 50 60]
[100 200 300]
[400 500 600]]
###Markdown
1.2. Layers are trainable.Many layer types include weights that affect the computation of outputs from inputs, and they use back-progagated gradients to update those weights.🚧🚧 A very small subset of layer types, such as ``BatchNorm``, also include modifiable weights (called ``state``) that are updated based on forward-pass inputs/computation rather than back-propagated gradients.InitializationTrainable layers must be initialized before use. Trax can take care of this as part of the overall training process. In other settings (e.g., in tests or interactively in a Colab notebook), you need to initialize the outermost/topmost layer explicitly. For this, use init: $tl.Concatenate[n_{items}=3, axis=0]$
###Code
layer_norm = tl.LayerNorm()
x = np.array([[-2, -1, 0, 1, 2], [1, 2, 3, 4, 5], [10, 20, 30, 40, 50]]).astype(
np.float32
)
layer_norm.init(shapes.signature(x))
y = layer_norm(x)
print(
f"x:\n{x}\n\n"
f"layer_norm(x):\n{y}\n"
f"layer_norm.weights:\n{layer_norm.weights}"
)
###Output
x:
[[-2. -1. 0. 1. 2.]
[ 1. 2. 3. 4. 5.]
[10. 20. 30. 40. 50.]]
layer_norm(x):
[[-1.414 -0.707 0. 0.707 1.414]
[-1.414 -0.707 0. 0.707 1.414]
[-1.414 -0.707 0. 0.707 1.414]]
layer_norm.weights:
(DeviceArray([1., 1., 1., 1., 1.], dtype=float32), DeviceArray([0., 0., 0., 0., 0.], dtype=float32))
###Markdown
1.3. Layers combine into layers.The Trax library authors encourage users to build networks and network components as combinations of existing layers, by means of a small set of combinator layers. A combinator makes a list of layers behave as a single layer – by combining the sublayer computations yet looking from the outside like any other layer. The combined layer, like other layers, can:- compute outputs from inputs,- update parameters from gradients, and- combine with yet more layers. Combine with ``Serial``$h(.) = g(f(.))$```pythonlayer_f = Serial( layer_f, layer_g,)```
###Code
layer_block = tl.Serial(
tl.Relu(),
tl.LayerNorm(),
)
x = np.array([[-2, -1, 0, 1, 2],
[-20, -10, 0, 10, 20]]).astype(np.float32)
layer_block.init(shapes.signature(x))
y = layer_block(x)
print(
f'x:\n{x}\n\n'
f'layer_block(x):\n{y}'
)
print(
f"layer_block: {layer_block}\n\n"
f"layer_block.weights: {layer_block.weights}"
)
###Output
layer_block: Serial[
Serial[
Relu
]
LayerNorm
]
layer_block.weights: (((), (), ()), (DeviceArray([1., 1., 1., 1., 1.], dtype=float32), DeviceArray([0., 0., 0., 0., 0.], dtype=float32)))
###Markdown
Combine with ``Branch``The Branch combinator arranges layers into parallel computational channels.```pythondef Residual(*layers, shortcut=None): layers = _ensure_flat(layers) layer = layers[0] if len(layers) == 1 else Serial(layers) return Serial( Branch(shortcut, layer), Add(), )```
###Code
relu = tl.Relu()
times_100 = tl.Fn("Times100", lambda x: x * 100.0)
branch_relu_t100 = tl.Branch(relu, times_100)
x = np.array([[-2, -1, 0, 1, 2],
[-20, -10, 0, 10, 20]])
branch_relu_t100.init(shapes.signature(x))
y0, y1 = branch_relu_t100(x)
print(
f"x:\n{x}\n\n"
f"y0:\n{y0}\n\n"
f"y1:\n{y1}"
)
###Output
x:
[[ -2 -1 0 1 2]
[-20 -10 0 10 20]]
y0:
[[ 0 0 0 1 2]
[ 0 0 0 10 20]]
y1:
[[ -200. -100. 0. 100. 200.]
[-2000. -1000. 0. 1000. 2000.]]
###Markdown
2. Inputs and OutputsTrax allows layers to have multiple input streams and output streams. Whendesigning a network, you have the flexibility to use layers that: - process a single data stream ($n_{in} = n_{out} = 1$), - process multiple parallel data streams ($n_{in} = n_{out} = 2, 3, ... $), - split or inject data streams ($n_{in} < n_{out}$), or - merge or remove data streams ($n_{in} > n_{out}$).We saw in section 1 the example of `Residual`, which involves both a split and a merge:``` ... return Serial( Branch(shortcut, layer), Add(), )```In other words, layer by layer: - `Branch(shortcut, layers)`: makes two copies of the single incoming data stream, passes one copy via the shortcut (typically a no-op), and processes the other copy via the given layers (applied in series). [$n_{in} = 1$, $n_{out} = 2$] - `Add()`: combines the two streams back into one by adding two tensors elementwise. [$n_{in} = 2$, $n_{out} = 1$] **Simple Case 1 -- Each layer takes one input and has one output.**This is in effect a single data stream pipeline, and the successive layersbehave like function composition:``` s(.) = h(g(f(.)))layer_s = Serial( layer_f, layer_g, layer_h,)```Note how, inside `Serial`, function composition is expressed naturally as asuccession of operations, so that no nested parentheses are needed and theorder of operations matches the textual order of layers. **Simple Case 2 -- Each layer consumes all outputs of the preceding layer.**This is still a single pipeline, but data streams internal to it can split andmerge. The `Residual` example above illustrates this kind. **General Case -- Successive layers interact via the data stack.**As described in the `Serial` class docstring, each layer gets its inputs fromthe data stack after the preceding layer has put its outputs onto the stack.This covers the simple cases above, but also allows for more flexible datainteractions between non-adjacent layers. The following example is schematic:```x, y_target = get_batch_of_labeled_data()model_plus_eval = Serial( my_fancy_deep_model(), Takes one arg (x) and has one output (y_hat) my_eval(), Takes two args (y_hat, y_target) and has one output (score))eval_score = model_plus_eval((x, y_target))``` Here is the corresponding progression of stack states:0. At start: _--empty--_0. After `get_batch_of_labeled_data()`: *x*, *y_target*0. After `my_fancy_deep_model()`: *y_hat*, *y_target*0. After `my_eval()`: *score*Note in particular how the application of the model (between stack states 1and 2) only uses and affects the top element on the stack: `x` --> `y_hat`.The rest of the data stack (`y_target`) comes in use only later, for theeval function. 3. Defining New Layer ClassesIf you need a layer type that is not easily defined as a combination ofexisting layer types, you can define your own layer classes in a coupledifferent ways. **Example 7.** Use `Fn` to define a new layer type:
###Code
def Gcd():
"""returns a layer to compute the greatest commom divisor, elemementwise."""
return tl.Fn("Gcd", lambda x0, x1: jnp.gcd(x0, x1))
gcd = Gcd()
x0 = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x1 = np.array([11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
y = gcd((x0, x1))
print(f"x0:\n{x0}\n\n" f"x1:\n{x1}\n\n" f"gcd((x0, x1)):\n{y}")
###Output
x0:
[ 1 2 3 4 5 6 7 8 9 10]
x1:
[11 12 13 14 15 16 17 18 19 20]
gcd((x0, x1)):
[ 1 2 1 2 5 2 1 2 1 10]
|
notebooks/manning/0-generate.ipynb | ###Markdown
Generating Sample Data from DistributionsThen treating data as if they were samples of real data
###Code
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
import pandas as pd
print(pd.__version__)
###Output
0.23.4
###Markdown
Plot randomly generated classification datasethttp://scikit-learn.org/stable/auto_examples/datasets/plot_random_dataset.htmlhttp://scikit-learn.org/stable/datasets/index.htmlsample-generatorshttp://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.htmlPlot several randomly generated 2D classification datasets.This example illustrates the :func:`datasets.make_classification`:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`functions.For ``make_classification``, three binary and two multi-class classificationdatasets are generated, with different numbers of informative features andclusters per class.
###Code
import numpy as np
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
N_SAMPLES = 500
TRAIN_SEED = 42
TEST_SEED = 13
# change seed for test data set
# SEED = TRAIN_SEED
SEED = TEST_SEED
# http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html#sklearn.datasets.make_blobs
# https://www.welt.de/motor/news/article156991316/Unfallstatistik-2015.html
# http://www.openculture.com/2017/12/why-incompetent-people-think-theyre-amazing.html
# 0: young drivers with fast cars: red
# 1: reasonable drivers: green
# 2: a little bit older, more kilometers, general noise: yellow
# 3: really old drivers: red
# 4: young drivers: red
# 5: another green just to have a counter part to all the red ones: green
# 6: people who do not drive a lot: green
# 7: people who drive a lot: yellow
# 8: young people with slow cars: yellow
centers = [(150, 35, 50), (110, 50, 25), (120, 55, 30), (120, 75, 20), (120, 30, 30),
(140, 45, 40), (110, 40, 15), (130, 50, 45), (100, 25, 15)]
cluster_std = [4, 9, 18, 8, 9, 5, 8, 12, 5]
# X, y = make_blobs(n_samples=300, n_features=3, centers=centers, random_state=13, cluster_std = cluster_std)
# X, y = make_blobs(n_samples=300, n_features=3, centers=centers, random_state=42, cluster_std = cluster_std)
X, y = make_blobs(n_samples=N_SAMPLES, n_features=3, centers=centers, random_state=SEED, cluster_std = cluster_std)
# http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html
# X, y = make_classification(n_features=3, n_redundant=0, n_informative=3,
# n_clusters_per_class=2, n_classes=3, random_state=42)
feature_names = ['speed', 'age' ,'miles']
df = pd.DataFrame(X, columns=feature_names)
df = df.round()
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.clip.html
df['speed'] = df['speed'].clip(60,200)
df['age'] = df['age'].clip(16,100)
df['miles'] = df['miles'].clip(1,500)
# merges clusters into one group
for group in np.nditer(y, op_flags=['readwrite']):
if group == 3 or group == 4:
group[...] = 0
if group == 5 or group == 6:
group[...] = 1
if group == 7 or group == 8:
group[...] = 2
df['group'] = y
df.describe()
# df.to_csv('./insurance-customers-300-2.csv', sep=';', index=False)
# df.to_csv('./insurance-customers-300.csv', sep=';', index=False)
# df.to_csv('./insurance-customers-1500.csv', sep=';', index=False)
# df.to_csv('./insurance-customers-10000.csv', sep=';', index=False)
df.to_csv('./insurance-customers-500-test.csv', sep=';', index=False)
# check
# !curl -O https://raw.githubusercontent.com/DJCordhose/data-viz/master/data/insurance-customers-1500.csv
# !curl -O https://raw.githubusercontent.com/DJCordhose/data-viz/master/data/insurance-customers-300.csv
!ls -l
# customers = pd.read_csv('./insurance-customers-300.csv', sep=';')
# customers = pd.read_csv('./insurance-customers-1500.csv', sep=';')
# customers = pd.read_csv('./insurance-customers-10000.csv', sep=';')
customers = pd.read_csv('./insurance-customers-500-test.csv', sep=';')
customers.describe()
###Output
_____no_output_____ |
.ipynb_checkpoints/Step 2. Make PLS models, get LBDQ, test models-checkpoint.ipynb | ###Markdown
Compositions
###Code
# generate comps
comps_path = fp + "tables\\TableS1_sample_compositions.xlsx"
lanl_comps = pd.read_excel(comps_path, sheet_name = "LANL")
mhc_comps = pd.read_excel(comps_path, sheet_name = "MHC")
comps = pd.merge(mhc_comps, lanl_comps, how = "outer") # merge comps
comps.columns = comps.columns.map(lambda x: x.split()[0])
comps = comps.drop_duplicates(subset = 'Sample') # remove duplicates
comps['Sample'] = comps['Sample'].astype(str)
comps = comps.sort_values(by='Sample')
comps = comps.replace(np.nan, "", regex=True)
cols = comps.columns.drop('Sample')
comps[cols] = comps[cols].apply(pd.to_numeric) # make columns numeric
# add random number assignment
rd = pd.read_excel('Z:\\Millennium Set\\Millennium_COMPS_viewonly.xlsx', usecols=[0,2])
rd = rd.drop([0,1]).rename(columns={'DO NOT TOUCH THIS':'Sample',
'X.1':'rand_num'}).reset_index(drop=True)
comps = pd.merge(rd, comps, how='right', on='Sample')
###Output
_____no_output_____
###Markdown
Datasets (baseline removal and normalization already applied)
###Code
cl_earth = pd.read_csv(fp+'CL_all_Earth_spectra.csv')
cl_mars = pd.read_csv(fp+'CL_all_Mars_spectra.csv')
cl_vac = pd.read_csv(fp+'CL_all_Vacuum_spectra.csv')
cc_mars = pd.read_csv(fp+'CC_all_Mars_spectra.csv')
###Output
_____no_output_____
###Markdown
Sensitivities
###Code
sensitivities = pd.read_csv(fp+'instrument_sensitivities.csv')
###Output
_____no_output_____
###Markdown
Split test and train
###Code
train_250_1000 = comps[comps.rand_num >= 250].reset_index(drop=True)
train_0_750 = comps[comps.rand_num <= 750].reset_index(drop=True)
test_250_1000 = comps[comps.rand_num < 250].reset_index(drop=True)
test_0_750 = comps[comps.rand_num > 750].reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Outlier limitsCalculated by 1.5*IQR + Q3 on entire MHC dataset or highest natural sample for doped elements
###Code
outlier_limits = pd.read_csv('Z:\\Millennium Set\\NEW_OUTLIER_LIMITS.csv')
iqr_outliers = dict(zip(outlier_limits.element, outlier_limits.iqr_q3_outlier_limit))
dope_outliers = dict(zip(outlier_limits.element, outlier_limits.highest_natural_for_doped))
###Output
_____no_output_____
###Markdown
Make models per element
###Code
elements = ['MnO', 'Na2O', 'SiO2', 'Li', 'Ni', 'Pb', 'Rb', 'Sr', 'Zn']
n_ranges = ['0-750', '250-1000']
factors = {
'LOB' : 1.645,
'LOD' : 3.3,
'LOQ' : 10
}
methods = ['braga', 'metals']
dfs = [cl_earth,cl_mars,cl_vac,cc_mars]
df_names = ['CL_Earth', 'CL_Mars', 'CL_Vac', 'CC_Mars']
mhc_list = [cl_earth,cl_mars,cl_vac]
outliers = [iqr_outliers, dope_outliers]
# PLS parameters
n_folds = 5
max_components = 30
# prep for results
n_range_list = []
element_list = []
atm_list = []
inst_list = []
n_train_list = []
rmsecv_list = []
component_list = []
rmsec_list = []
train_r2_list = []
train_adj_r2_list = []
lob_list = []
lod_list = []
loq_list = []
outlier_list = []
n_test_list = []
rmsep_list = []
test_r2_list = []
test_adj_r2_list = []
method_list = []
for n_range in tqdm(n_ranges, desc='Number ranges'):
if n_range == '0-750':
all_train = train_0_750
all_test = test_0_750
else:
all_train = train_250_1000
all_test = test_250_1000
for element in tqdm(elements, leave=False, desc='Elements'):
count = 0
for df in tqdm(dfs, leave=False, desc='Dataset'):
if df_names[count].split('_')[0]=='CC':
inst='LANL'
else:
inst='ChemLIBS'
if df_names[count].split('_')[1]=='Vac':
atm = 'Vacuum'
else:
atm = df_names[count].split('_')[1]
outpath = "{}\\python_models\\{}_{}\\".format(fp, df_names[count], n_range)
count +=1
count1 = 0
for outlier in outliers:
n_folds = 5
max_components = 30
if count1 == 0:
o = 'iqr_q3'
else:
o = 'highest_natural'
count1 += 1
out_lim = outlier[element]
if isnan(out_lim):
temp_train = all_train.dropna(subset=[element]).reset_index(drop=True)[['Sample', element]]
temp_test = all_test.dropna(subset=[element]).reset_index(drop=True)[['Sample', element]]
else:
temp_train = all_train[all_train[element] <= out_lim].reset_index(drop=True)[['Sample', element]]
temp_test = all_test[all_test[element] <= out_lim].reset_index(drop=True)[['Sample', element]]
# train metadata
train_names = sorted(set(temp_train.Sample).intersection(df.columns)) # sorted
y_train = temp_train[temp_train.Sample.isin(train_names)][element].values # already alphabetized
n_train = len(y_train)
if n_train < n_folds:
n_folds = n_train
# train spectra
X_train = df[train_names]
spec_list = []
for column in X_train.columns:
spectrum = list(X_train[column])
spec_list.append(spectrum)
X_train = np.array(spec_list)
#---------------------CROSS--VALIDATION and TRAINING--------------------------#
cv_dict = {}
for n_components in np.arange(start=2, stop=max_components+1, step=1):
# define model
temp_pls = PLSRegression(n_components = n_components, scale=False)
# run CV and get RMSE
temp_rmsecv = (-cross_val_score(
temp_pls, X_train, y_train, cv=n_folds, scoring='neg_root_mean_squared_error'
)).mean()
# add results to dictionary
cv_dict.update({temp_rmsecv : n_components})
# select parameters of model with lowest rmsecv
rmsecv = min(list(cv_dict.keys()))
component = cv_dict[rmsecv]
model = PLSRegression(n_components = component, scale=False)
model.fit(X_train, y_train)
pickle.dump(model, open(outpath+element+'_'+o+'_model.asc', 'wb'), protocol=0)
coeff = pd.DataFrame(model.coef_)
coeff.to_csv(outpath+element+'_'+o+'_coeffs.csv', index=False)
for method in methods:
#---------------------CALCULATE---LBDQ--------------------------#
sensitivity = sensitivities[
(sensitivities.instrument == inst) &
(sensitivities.atmosphere == atm) &
(sensitivities.method == method)
]['sensitivity'].iloc[0]
# calculate regression vector
vector = pow(coeff, 2).sum().pow(.5) #square root of sum of squares
# calculate values
lob = factors['LOB'] * sensitivity * vector[0]
lod = factors['LOD'] * sensitivity * vector[0]
loq = factors['LOQ'] * sensitivity * vector[0]
#---------------------CALIBRATION--ERROR--------------------------#
train_pred = model.predict(X_train)
train_pred_true = pd.DataFrame({
'sample' : train_names,
'actual' : y_train.flatten().tolist(),
'pred' : train_pred.flatten().tolist()
})
temp = train_pred_true[(train_pred_true.pred < 100) &
(train_pred_true.pred > loq)].copy(deep=True)
if len(temp) == 0:
rmsec='NA'
train_r2 = 'NA'
train_adj_r2 = 'NA'
test_r2 = 'NA'
test_adj_r2 = 'NA'
rmsep = 'NA'
n_test = 'NA'
n_range_list.append(n_range)
outlier_list.append(o)
method_list.append(method)
element_list.append(element)
atm_list.append(atm)
inst_list.append(inst)
n_train_list.append(n_train)
rmsecv_list.append(rmsecv)
component_list.append(component)
lob_list.append(lob)
lod_list.append(lod)
loq_list.append(loq)
rmsec_list.append(rmsec)
train_r2_list.append(train_r2)
train_adj_r2_list.append(train_adj_r2)
n_test_list.append(n_test)
rmsep_list.append(rmsep)
test_r2_list.append(test_r2)
test_adj_r2_list.append(test_adj_r2)
continue
rmsec = sqrt(mean_squared_error(temp.actual, temp.pred))
train_r2 = model.score(X_train,y_train)
train_adj_r2 = 1 - (1-train_r2)*(len(temp) - 1) / (len(temp) - (temp.shape[1] - 1) - 1)
# fill with <LOQ / >100 wt%
loq_df = train_pred_true[train_pred_true.pred < loq].copy(deep=True)
loq_df['pred'] = '<LOQ'
over_df = train_pred_true[train_pred_true.pred > 100].copy(deep=True)
if len(over_df) > 0:
over_df['pred'] = '>100 wt%'
train_pred_true = pd.concat([temp, over_df, loq_df], ignore_index=True)
else:
train_pred_true = pd.concat([temp, loq_df], ignore_index=True)
train_pred_true.to_csv(outpath+element+"_"+o+"_"+method+'_train_preds.csv', index=False)
#------------------------TEST--MODEL------------------------#
# test metadata
test_names = sorted(set(temp_test.Sample).intersection(df.columns)) # sorted
y_test = temp_test[temp_test.Sample.isin(test_names)][element].values # already alphabetized
# test spectra
X_test = df[test_names]
spec_list = []
for column in X_test.columns:
spectrum = list(X_test[column])
spec_list.append(spectrum)
X_test = np.array(spec_list)
# run predictions
test_pred = model.predict(X_test)
# get RMSE-P
test_pred_true = pd.DataFrame({
'sample' : test_names,
'actual' : y_test,
'pred' : test_pred.flatten().tolist()
})
temp = test_pred_true[(test_pred_true.pred < 100) &
(test_pred_true.pred > loq)].copy(deep=True)
n_test = len(temp)
if n_test < 2:
test_r2 = 'NA'
test_adj_r2 = 'NA'
rmsep = 'NA'
else:
# get RMSE-P
rmsep = sqrt(mean_squared_error(temp.actual, temp.pred))
# get R2
test_r2 = r2_score(temp.actual,temp.pred)
# adjusted r2
test_adj_r2 = 1 - (1-test_r2)*(len(temp) - 1) / (len(temp) - (temp.shape[1] - 1) - 1)
# fill with <LOQ / >100 wt%
loq_df = test_pred_true[test_pred_true.pred < loq].copy(deep=True)
loq_df['pred'] = '<LOQ'
over_df = test_pred_true[test_pred_true.pred > 100].copy(deep=True)
if len(over_df) > 0:
over_df['pred'] = '>100 wt%'
test_pred_true = pd.concat([temp, over_df, loq_df], ignore_index=True)
else:
test_pred_true = pd.concat([temp, loq_df], ignore_index=True)
test_pred_true.to_csv(outpath+element+"_"+o+"_"+method+'_test_preds.csv', index=False)
n_range_list.append(n_range)
outlier_list.append(o)
method_list.append(method)
element_list.append(element)
atm_list.append(atm)
inst_list.append(inst)
n_train_list.append(n_train)
rmsecv_list.append(rmsecv)
component_list.append(component)
lob_list.append(lob)
lod_list.append(lod)
loq_list.append(loq)
rmsec_list.append(rmsec)
train_r2_list.append(train_r2)
train_adj_r2_list.append(train_adj_r2)
n_test_list.append(n_test)
rmsep_list.append(rmsep)
test_r2_list.append(test_r2)
test_adj_r2_list.append(test_adj_r2)
results = pd.DataFrame({
'element':element_list,
'outlier_defn':outlier_list,
'instrument':inst_list,
'atmosphere':atm_list,
'method':method_list,
'num_range':n_range_list,
'n_train':n_train_list,
'rmsecv':rmsecv_list,
'components':component_list,
'lob':lob_list,
'lod':lod_list,
'loq':loq_list,
'rmsec':rmsec_list,
'train_r2':train_r2_list,
'train_adj_r2':train_adj_r2_list,
'n_test':n_test_list,
'rmsep':rmsep_list,
'test_r2':test_r2_list,
'test_adj_r2':test_adj_r2_list
})
results.to_csv(fp+'results_011422.csv', index=False)
###Output
_____no_output_____ |
Pytorch/3 Image Classification With Logistic Regression.ipynb | ###Markdown
Working with Images In this tutorial, we'll use our existing knowledge of PyTorch and linear regression to solve a very different kind of problem: image classification. We'll use the famous MNIST Handwritten Digits Database as our training dataset. It consists of 28px by 28px grayscale images of handwritten digits (0 to 9) and labels for each image indicating which digit it represents. Here are some sample images from the dataset: ![title](mnist.jpg) We begin by installing and importing torch and torchvision. torchvision contains some utilities for working with image data. It also provides helper classes to download and import popular datasets like MNIST automatically
###Code
# Imports
import torch
import torchvision
from torchvision.datasets import MNIST
# Download training dataset
dataset = MNIST(root='data/', download=True)
len(dataset)
###Output
_____no_output_____
###Markdown
The dataset has 60,000 images that we'll use to train the model. There is also an additional test set of 10,000 images used for evaluating models and reporting metrics in papers and reports. We can create the test dataset using the MNIST class by passing train=False to the constructor.
###Code
test_dataset = MNIST(root='data/', train=False)
len(test_dataset)
dataset[0]
###Output
_____no_output_____
###Markdown
It's a pair, consisting of a 28x28px image and a label. The image is an object of the class PIL.Image.Image, which is a part of the Python imaging library Pillow. We can view the image within Jupyter using matplotlib, the de-facto plotting and graphing library for data science in Python.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
image, label = dataset[10]
plt.imshow(image, cmap='gray')
print('Label:', label)
###Output
Label: 3
###Markdown
It's evident that these images are relatively small in size, and recognizing the digits can sometimes be challenging even for the human eye. While it's useful to look at these images, there's just one problem here: PyTorch doesn't know how to work with images. We need to convert the images into tensors. We can do this by specifying a transform while creating our dataset.
###Code
import torchvision.transforms as transforms
###Output
_____no_output_____
###Markdown
PyTorch datasets allow us to specify one or more transformation functions that are applied to the images as they are loaded. The torchvision.transforms module contains many such predefined functions. We'll use the ToTensor transform to convert images into PyTorch tensors.
###Code
# MNIST dataset (images and labels)
dataset = MNIST(root='data/',
train=True,
transform=transforms.ToTensor())
img_tensor, label = dataset[0]
print(img_tensor.shape, label)
###Output
torch.Size([1, 28, 28]) 5
###Markdown
The image is now converted to a 1x28x28 tensor. The first dimension tracks color channels. The second and third dimensions represent pixels along the height and width of the image, respectively. Since images in the MNIST dataset are grayscale, there's just one channel. Other datasets have images with color, in which case there are three channels: red, green, and blue (RGB).Let's look at some sample values inside the tensor.
###Code
print(img_tensor[0,10:15,10:15])
print(torch.max(img_tensor), torch.min(img_tensor))
###Output
tensor([[0.0039, 0.6039, 0.9922, 0.3529, 0.0000],
[0.0000, 0.5451, 0.9922, 0.7451, 0.0078],
[0.0000, 0.0431, 0.7451, 0.9922, 0.2745],
[0.0000, 0.0000, 0.1373, 0.9451, 0.8824],
[0.0000, 0.0000, 0.0000, 0.3176, 0.9412]])
tensor(1.) tensor(0.)
###Markdown
The values range from 0 to 1, with 0 representing black, 1 white, and the values in between different shades of grey. We can also plot the tensor as an image using plt.imshow.
###Code
# Plot the image by passing in the 28x28 matrix
plt.imshow(img_tensor[0,10:15,10:15], cmap='gray');
###Output
_____no_output_____
###Markdown
Note that we need to pass just the 28x28 matrix to plt.imshow, without a channel dimension. We also pass a color map (cmap=gray) to indicate that we want to see a grayscale image. Training and Validation Datasets While building real-world machine learning models, it is quite common to split the dataset into three parts:Training set - used to train the model, i.e., compute the loss and adjust the model's weights using gradient descent.Validation set - used to evaluate the model during training, adjust hyperparameters (learning rate, etc.), and pick the best version of the model.Test set - used to compare different models or approaches and report the model's final accuracy.In the MNIST dataset, there are 60,000 training images and 10,000 test images. The test set is standardized so that different researchers can report their models' results against the same collection of images.Since there's no predefined validation set, we must manually split the 60,000 images into training and validation datasets. Let's set aside 10,000 randomly chosen images for validation. We can do this using the random_spilt method from PyTorch.
###Code
from torch.utils.data import random_split
train_ds, val_ds = random_split(dataset, [50000, 10000])
len(train_ds), len(val_ds)
###Output
_____no_output_____
###Markdown
It's essential to choose a random sample for creating a validation set. Training data is often sorted by the target labels, i.e., images of 0s, followed by 1s, followed by 2s, etc. If we create a validation set using the last 20% of images, it would only consist of 8s and 9s. In contrast, the training set would contain no 8s or 9s. Such a training-validation would make it impossible to train a useful model.We can now create data loaders to help us load the data in batches. We'll use a batch size of 128.
###Code
from torch.utils.data import DataLoader
batch_size = 128
train_loader = DataLoader(train_ds, batch_size, shuffle=True)
val_loader = DataLoader(val_ds, batch_size)
###Output
_____no_output_____
###Markdown
We set shuffle=True for the training data loader to ensure that the batches generated in each epoch are different. This randomization helps generalize & speed up the training process. On the other hand, since the validation data loader is used only for evaluating the model, there is no need to shuffle the images. Model Now that we have prepared our data loaders, we can define our model.A logistic regression model is almost identical to a linear regression model. It contains weights and bias matrices, and the output is obtained using simple matrix operations (pred = x @ w.t() + b).As we did with linear regression, we can use nn.Linear to create the model instead of manually creating and initializing the matrices.Since nn.Linear expects each training example to be a vector, each 1x28x28 image tensor is flattened into a vector of size 784 (28*28) before being passed into the model.The output for each image is a vector of size 10, with each element signifying the probability of a particular target label (i.e., 0 to 9). The predicted label for an image is simply the one with the highest probability.
###Code
import torch.nn as nn
input_size = 28*28
num_classes = 10
# Logistic regression model
model = nn.Linear(input_size, num_classes)
print(model.weight.shape)
model.weight
print(model.bias.shape)
model.bias
###Output
torch.Size([10])
###Markdown
Although there are a total of 7850 parameters here, conceptually, nothing has changed so far. Let's try and generate some outputs using our model. We'll take the first batch of 100 images from our dataset and pass them into our model.
###Code
for images, labels in train_loader:
print(labels)
print(images.shape)
outputs = model(images)
print(outputs)
break
images.shape
images.reshape(128, 784).shape
class MnistModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, xb):
xb = xb.reshape(-1, 784)
out = self.linear(xb)
return out
model = MnistModel()
###Output
_____no_output_____
###Markdown
Inside the __init__ constructor method, we instantiate the weights and biases using nn.Linear. And inside the forward method, which is invoked when we pass a batch of inputs to the model, we flatten the input tensor and pass it into self.linear.xb.reshape(-1, 28*28) indicates to PyTorch that we want a view of the xb tensor with two dimensions. The length along the 2nd dimension is 28*28 (i.e., 784). One argument to .reshape can be set to -1 (in this case, the first dimension) to let PyTorch figure it out automatically based on the shape of the original tensor.Note that the model no longer has .weight and .bias attributes (as they are now inside the .linear attribute), but it does have a .parameters method that returns a list containing the weights and bias.
###Code
model.linear
print(model.linear.weight.shape, model.linear.bias.shape)
list(model.parameters())
for images, labels in train_loader:
print(images.shape)
outputs = model(images)
break
print('outputs.shape : ', outputs.shape)
print('Sample outputs :\n', outputs[:2].data)
###Output
torch.Size([128, 1, 28, 28])
outputs.shape : torch.Size([128, 10])
Sample outputs :
tensor([[ 0.1382, 0.2365, 0.2644, -0.3711, -0.1832, -0.1097, 0.1195, 0.1574,
-0.4260, -0.0466],
[ 0.2055, 0.1653, -0.0131, -0.2925, -0.3635, -0.0528, 0.0811, 0.3076,
-0.2822, -0.2431]])
###Markdown
While it's easy to implement the softmax function (you should try it!), we'll use the implementation that's provided within PyTorch because it works well with multidimensional tensors (a list of output rows in our case).
###Code
import torch.nn.functional as F
###Output
_____no_output_____
###Markdown
The softmax function is included in the torch.nn.functional package and requires us to specify a dimension along which the function should be applied.
###Code
outputs[:2]
# Apply softmax for each output row
probs = F.softmax(outputs, dim=1)
# Look at sample probabilities
print("Sample probabilities:\n", probs[:2].data)
# Add up the probabilities of an output row
print("Sum: ", torch.sum(probs[0]).item())
###Output
Sample probabilities:
tensor([[0.1143, 0.1262, 0.1297, 0.0687, 0.0829, 0.0892, 0.1122, 0.1166, 0.0650,
0.0951],
[0.1257, 0.1208, 0.1010, 0.0764, 0.0712, 0.0971, 0.1110, 0.1392, 0.0772,
0.0803]])
Sum: 1.0000001192092896
###Markdown
Finally, we can determine the predicted label for each image by simply choosing the index of the element with the highest probability in each output row. We can do this using torch.max, which returns each row's largest element and the corresponding index
###Code
max_probs, preds = torch.max(probs, dim=1)
print(preds)
print(max_probs)
labels
###Output
_____no_output_____
###Markdown
Most of the predicted labels are different from the actual labels. That's because we have started with randomly initialized weights and biases. We need to train the model, i.e., adjust the weights using gradient descent to make better predictions. Evaluation Metric and Loss Function Just as with linear regression, we need a way to evaluate how well our model is performing. A natural way to do this would be to find the percentage of labels that were predicted correctly, i.e,. the accuracy of the predictions
###Code
outputs[:2]
torch.sum(preds == labels)
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
###Output
_____no_output_____
###Markdown
The == operator performs an element-wise comparison of two tensors with the same shape and returns a tensor of the same shape, containing True for unequal elements and False for equal elements. Passing the result to torch.sum returns the number of labels that were predicted correctly. Finally, we divide by the total number of images to get the accuracy.Note that we don't need to apply softmax to the outputs since its results have the same relative order. This is because e^x is an increasing function, i.e., if y1 > y2, then e^y1 > e^y2. The same holds after averaging out the values to get the softmax.Let's calculate the accuracy of the current model on the first batch of data.
###Code
accuracy(outputs, labels)
probs
###Output
_____no_output_____
###Markdown
Accuracy is an excellent way for us (humans) to evaluate the model. However, we can't use it as a loss function for optimizing our model using gradient descent for the following reasons:It's not a differentiable function. torch.max and == are both non-continuous and non-differentiable operations, so we can't use the accuracy for computing gradients w.r.t the weights and biases.It doesn't take into account the actual probabilities predicted by the model, so it can't provide sufficient feedback for incremental improvements.For these reasons, accuracy is often used as an evaluation metric for classification, but not as a loss function. A commonly used loss function for classification problems is the cross-entropy, which has the following formula: While it looks complicated, it's actually quite simple:For each output row, pick the predicted probability for the correct label. E.g., if the predicted probabilities for an image are [0.1, 0.3, 0.2, ...] and the correct label is 1, we pick the corresponding element 0.3 and ignore the rest.Then, take the logarithm of the picked probability. If the probability is high, i.e., close to 1, then its logarithm is a very small negative value, close to 0. And if the probability is low (close to 0), then the logarithm is a very large negative value. We also multiply the result by -1, which results is a large postive value of the loss for poor predictions.Finally, take the average of the cross entropy across all the output rows to get the overall loss for a batch of data.Unlike accuracy, cross-entropy is a continuous and differentiable function. It also provides useful feedback for incremental improvements in the model (a slightly higher probability for the correct label leads to a lower loss). These two factors make cross-entropy a better choice for the loss function.As you might expect, PyTorch provides an efficient and tensor-friendly implementation of cross-entropy as part of the torch.nn.functional package. Moreover, it also performs softmax internally, so we can directly pass in the model's outputs without converting them into probabilities.
###Code
outputs
loss_fn = F.cross_entropy
# Loss for current batch of data
loss = loss_fn(outputs, labels)
print(loss)
###Output
tensor(2.3424, grad_fn=<NllLossBackward>)
###Markdown
We know that cross-entropy is the negative logarithm of the predicted probability of the correct label averaged over all training samples. Therefore, one way to interpret the resulting number e.g. 2.23 is look at e^-2.23 which is around 0.1 as the predicted probability of the correct label, on average. The lower the loss, The better the model. Training the model Now that we have defined the data loaders, model, loss function and optimizer, we are ready to train the model. The training process is identical to linear regression, with the addition of a "validation phase" to evaluate the model in each epoch. Here's what it looks like in pseudocode:for epoch in range(num_epochs): Training phase for batch in train_loader: Generate predictions Calculate loss Compute gradients Update weights Reset gradients Validation phase for batch in val_loader: Generate predictions Calculate loss Calculate metrics (accuracy etc.) Calculate average validation loss & metrics Log epoch, loss & metrics for inspectionSome parts of the training loop are specific the specific problem we're solving (e.g. loss function, metrics etc.) whereas others are generic and can be applied to any deep learning problem.We'll include the problem-independent parts within a function called fit, which will be used to train the model. The problem-specific parts will be implemented by adding new methods to the nn.Module class
###Code
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
optimizer = opt_func(model.parameters(), lr)
history = [] # for recording epoch-wise results
for epoch in range(epochs):
# Training Phase
for batch in train_loader:
loss = model.training_step(batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Validation phase
result = evaluate(model, val_loader)
model.epoch_end(epoch, result)
history.append(result)
return history
###Output
_____no_output_____
###Markdown
The fit function records the validation loss and metric from each epoch. It returns a history of the training, useful for debugging & visualization.Configurations like batch size, learning rate, etc. (called hyperparameters), need to picked in advance while training machine learning models. Choosing the right hyperparameters is critical for training a reasonably accurate model within a reasonable amount of time. It is an active area of research and experimentation in machine learning. Feel free to try different learning rates and see how it affects the training process.Let's define the evaluate function, used in the validation phase of fit.
###Code
def evaluate(model, val_loader):
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
class MnistModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, xb):
xb = xb.reshape(-1, 784)
out = self.linear(xb)
return out
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss, 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(epoch, result['val_loss'], result['val_acc']))
model = MnistModel()
result0 = evaluate(model, val_loader)
result0
###Output
_____no_output_____
###Markdown
The initial accuracy is around 10%, which one might expect from a randomly initialized model (since it has a 1 in 10 chance of getting a label right by guesThe initial accuracy is around 10%, which one might expect from a randomly initialized model (since it has a 1 in 10 chance of getting a label right by guessing randomly).We are now ready to train the model. Let's train for five epochs and look at the results.sing randomly).We are now ready to train the model. Let's train for five epochs and look at the results.
###Code
history1 = fit(5, 0.001, model, train_loader, val_loader)
history2 = fit(5, 0.001, model, train_loader, val_loader)
history3 = fit(5, 0.001, model, train_loader, val_loader)
history4 = fit(5, 0.001, model, train_loader, val_loader)
history = [result0] + history1 + history2 + history3 + history4
accuracies = [result['val_acc'] for result in history]
plt.plot(accuracies, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs');
###Output
_____no_output_____ |
Pytorch/Pytorch_learn_by_dragen1860/lesson06-创建Tensor.ipynb | ###Markdown
$Import\space from\space numpy$
###Code
a = np.array([2,3.3])
torch.from_numpy(a)
a = np.ones([2,3])
torch.from_numpy(a)
###Output
_____no_output_____
###Markdown
$Import\space from\space list$
###Code
torch.tensor([2.,3.2])
torch.FloatTensor([2.,3.2])
torch.tensor([[2.,3.2],[1.,22.3]])
###Output
_____no_output_____
###Markdown
$uninitialized$
###Code
torch.empty(1)
torch.Tensor(2,3)
torch.IntTensor(2,3)
torch.FloatTensor(2,3)
###Output
_____no_output_____
###Markdown
$set\space default\space type$
###Code
torch.tensor([1.2,3]).type()
torch.set_default_tensor_type(torch.DoubleTensor)
torch.tensor([1.2,3]).type()
###Output
_____no_output_____
###Markdown
$rand/rand\_like/randint$$rand$ [0,1]$rand\_like$ $randint$ [min,max)
###Code
torch.rand(3,3)
a = torch.rand(3,3)
torch.rand_like(a)
# [min,max),shape
torch.randint(1,10,(3,3))
torch.randn(3,3)
torch.normal(mean = torch.full([10],0),std = torch.arange(1,0,-0.1))
###Output
../aten/src/ATen/native/TensorFactories.cpp:361: UserWarning: Deprecation warning: In a future PyTorch release torch.full will no longer return tensors of floating dtype by default. Instead, a bool fill_value will return a tensor of torch.bool dtype, and an integral fill_value will return a tensor of torch.long dtype. Set the optional `dtype` or `out` arguments to suppress this warning.
###Markdown
$full$
###Code
torch.full([2,3],7)
# scalar
torch.full([],7)
# vector
torch.full([1],7)
###Output
_____no_output_____
###Markdown
$arange/range$
###Code
torch.arange(0,10)
torch.arange(0,10,2)
# torch.range ❌
###Output
_____no_output_____
###Markdown
$linspace/logspace$
###Code
torch.linspace(0,10,steps = 4)
torch.linspace(0,10,steps = 10)
torch.linspace(0,10,steps = 11)
torch.logspace(0,-1,steps = 10)
torch.logspace(0,1,steps = 10)
###Output
_____no_output_____
###Markdown
$ones/zeros/eye$
###Code
torch.ones(3,3)
torch.zeros(3,3)
torch.eye(3,3)
a = torch.zeros(3,3)
torch.ones_like(a)
###Output
_____no_output_____
###Markdown
$randperm$
###Code
torch.randperm(10)
a = torch.rand(2,3)
a
b = torch.rand(2,2)
b
idx = torch.randperm(2)
idx
a[idx]
b[idx]
###Output
_____no_output_____ |
experiment 1.ipynb | ###Markdown
Small consumer 2-zone testUsing generated price timeseres, assume that national grid system is beyond consumer's control.What we then need to do is to introduce a second zone (or “bus”) representing consumer’s assets1. Establish a second “zone” with a single connector between it and the first zone.2. The second zone should contain consumer’s: a. demand for power ... using `outputs_plan_national-grid/???.csv`, rescale National Grid mean & std. to 1%, and use to generate a random timeseries of sorm form b. diesel generators ... (for now) set the *capacity* of these generators to be effectively infinite, but the consumption cost to be *very high* (say 2-3 times that of the peaking plant in the national grid zone but not as high as unmet demand). 3. The connector between the “zones” should be: a. Effectively infinite in capacity when taking power from the national grid zone to the consumer zone b. Zero cost per unit of power transferred c. Unidirectional (only takes power from Nat Grid to consumer, not vice versa). see https://calliope.readthedocs.io/en/stable/user/advanced_constraints.htmlone-way-transmission-linksBig picture: larger zone (National Grid) sets its own prices based on national demand/wind etc. The smaller zone is always able to meet it’s own internal demand if it chooses to (diesel capacity is very large) but would usually *prefer* to take power from national grid rather than use it’s own diesel generators. No power export from consumer to grid.Further work:* Shrink national grid generation capacities (introduce shortages), observe impact on consumer zone.* Look at swapping diesel generators to a *storage* generator(s).
###Code
# Suppress minor warnings
import warnings
warnings.filterwarnings('ignore')
import calliope
import models
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Read in National grid planning output (full 1980-2017)From timeseries calculate mean and standard deviation of demand.Change back from negative convention (calliope) to positive demand convention (readable)
###Code
# read csv
df = pd.read_csv('outputs_plan_national-grid/inputs_resource.csv',
usecols=['timesteps', 'resource', 'techs'], index_col='timesteps')
# split demand / wind /solar into 3 separate columns
national_demand_index = pd.to_datetime(df[df['techs'] == 'demand_power'].index)
national_demand = pd.DataFrame(dict(), index=national_demand_index)
national_demand['demand'] = - df[df['techs'] == 'demand_power']['resource']
national_demand['wind'] = df[df['techs'] == 'wind']['resource']
national_demand['solar'] = df[df['techs'] == 'solar']['resource']
del df
national_demand_stats = mean, std = [stat(national_demand.demand) for stat in [np.mean, np.std]]
###Output
_____no_output_____
###Markdown
Generate a random timeseries form with similar overall mean/stddev to UK wide demand (scaled to 1%), ensure no negative values
###Code
# ensure always using the same seed (which we obtained one-off using RNG)
np.random.seed(285183912)
# normal distribution parameters = 1% National Grid mean/std dev.
normal_dist = [stat*0.01 for stat in national_demand_stats]
# Sample from normal distribution
demand_region2 = np.random.normal(*normal_dist, len(national_demand))
# Force positives (very minute chance of this occuring...)
demand_region2[demand_region2 < 0] = 0
demand_region2
###Output
_____no_output_____
###Markdown
Load secondary zone demand into a DataFrameAllows easy loading into ts_data
###Code
df_region2 = pd.DataFrame({'demand_region2': demand_region2},
index=pd.to_datetime(national_demand.index))
###Output
_____no_output_____
###Markdown
`operational` Calliope 2-zone model (with infinite diesel cap)Note: can shrink operational range to subset, eg. 2017 year only
###Code
date_start, date_end = '1980', '2017'
# Import timeseries data demand / wind (as in 1_region)
ts_data = models.load_time_series_data('2_region', additional_data=df_region2)
# Crop to date range
ts_data = ts_data.loc[date_start:date_end]
display(ts_data.head(6))
# Read in generation capacities
generation_capacities_planned = pd.read_csv('outputs_plan_national-grid/results_energy_cap.csv')
# Rename techs
generation_capacities = dict()
for tech, cap in zip(generation_capacities_planned['techs'],
generation_capacities_planned['energy_cap']):
if tech not in ['unmet', 'demand_power']:
key = f'cap_{tech}_region1'
generation_capacities[key] = cap
# Insert diesel generators, transmission capacity = max possible region2 demand
generation_capacities['cap_generators_region2'] = max(ts_data['demand_region2'])
generation_capacities['cap_transmission_region1_region2'] = max(ts_data['demand_region2'])
# Display
display(generation_capacities)
# Create the model with fixed capacities
model = models.TwoRegionModel(ts_data, 'operate', fixed_caps=generation_capacities)
display(model.preview('inputs.resource'))
model.run()
model.get_summary_outputs()
def cap_mean_plot(model, subtitle: str=None, fname: str=None):
# Extract mean capacity factors from model
CF = dict(zip(model.results.capacity_factor.loc_tech_carriers_prod.values,
np.mean(model.results.capacity_factor.values, axis=1)))
# setup figure axs and title
fig, axs = plt.subplots(3, 3, figsize=(12,9))
title = 'Capacity Factor Means'
if subtitle:
title += '\n' + subtitle
fig.suptitle(title)
# pie chart for each tech
axs_idx = 0
for tech, cap in CF.items():
axs_pos = (axs_idx%3, axs_idx//3)
label = tech[:-7].replace('::','\n').replace('transmission_', 'transmission\n').replace(':region1','')
label += f'\n{cap:.3f}'
axs[axs_pos].pie([cap, 1-cap], labels=[label,None])
axs_idx += 1
# plot legend
fig.subplots_adjust(top=0.95, bottom=0.05, right=0.95, left=0.05, hspace=0.01, wspace=0.15)
axs[-1, -1].pie([0,0], labels=[r'$\mu$', r'1-$\mu$'])
plt.legend()
# save plot
if fname:
plt.savefig('plots/'+fname, dpi=300)
cap_mean_plot(model)
# Export all model outputs to CSV (creates directory called 'outputs_operate')
output_folder = 'outputs_operational_two-zone'
models.rm_folder(output_folder)
model.to_csv(output_folder)
# Generate HTML plots
for var in ['power', 'cost_var', 'resource']:
plot_html = model.plot.timeseries(array=var, html_only=True)
models.save_html(plot_html, f'plots/operate_2_{var}.html', f'{var} plot')
model.preview('results.systemwide_capacity_factor', loc='carriers', time_idx=False,
index=model.results.systemwide_capacity_factor.techs.values)
###Output
_____no_output_____
###Markdown
Adjust Generators Operating costTrial different operating costs, and get the capacity factor
###Code
# Create the model with fixed capacities
# change the generator operating cost
mod = dict()
# sub baseload cost
print('generators om_con < baseload om_con')
mod['sb'] = models.TwoRegionModel(ts_data, 'operate', fixed_caps=generation_capacities, extra_override='generator_cost_sb')
# sub peaking cost
print('generators om_con < peaking om_con')
mod['sp'] = models.TwoRegionModel(ts_data, 'operate', fixed_caps=generation_capacities, extra_override='generator_cost_sp')
# sub unmet cost
print('generators om_con < unmet om_con')
mod['su'] = models.TwoRegionModel(ts_data, 'operate', fixed_caps=generation_capacities, extra_override='generator_cost_su')
for name, model in mod.items():
print(f'running {name} model')
model.run()
# Print out capacity factors
model_example = list(mod.values())[0]
columns = [s.split('::')[1] for s in model_example.results.capacity_factor.loc_tech_carriers_prod.values]
cap_factors = pd.DataFrame(columns=columns)
for name, model in mod.items():
cap_factors = cap_factors.append(dict(zip(columns, np.mean(model.results.capacity_factor.values, axis=0))), ignore_index=True)
print('Capacity factors under each scenario:')
cap_factors.index = list(mod.keys())
display(cap_factors)
# calculate prices ???
# memory clearing
del mod
###Output
_____no_output_____
###Markdown
Induce National Grid ShortagesWith the original `om_con=0.1` for generators, trial a range of reduced capacity national grid operation
###Code
# Create the model with fixed capacities
# change the baseload and peaking capacities
mod = dict()
# shortcut for baseload and peaking capacities
baseload = generation_capacities['cap_baseload_region1']
peaking = generation_capacities['cap_peaking_region1']
# reduced ALL region1 capacities by X%
for reduction in range(0, 31, 5):
reduction_percent = 1 - reduction / 100
caps = {tech: val if 'region2' in tech else val*reduction_percent
for tech, val in generation_capacities.items()}
mod[reduction] = models.TwoRegionModel(ts_data, 'operate', fixed_caps=caps)
for name, model in mod.items():
print(f'running {name} model')
model.run()
# Print out capacity factors
model_example = list(mod.values())[0]
columns = [s.split('::')[1] for s in model_example.results.capacity_factor.loc_tech_carriers_prod.values]
cap_factors = pd.DataFrame(columns=columns)
for name, model in mod.items():
cap_factors = cap_factors.append(dict(zip(columns, np.mean(model.results.capacity_factor.values, axis=0))), ignore_index=True)
print('Capacity factors under each scenario:')
cap_factors.index = list(mod.keys())
display(cap_factors)
# Calculate zone 2 prices (NG price * transmission CF + Diesel price * generators CF)
# National Grid Price Timeseries (select price of most expensive online tech)
# First order techs by carrier consumption cost (GBP m per GWh)
om_con_zip = zip(model_example.inputs.cost_om_con.values[0],
model_example.inputs.cost_om_con.loc_techs_om_cost.values)
om_con_techs = sorted((om_con, tech) for om_con, tech in om_con_zip)
# When online, select price of next most expensive tech
prices = dict()
for key, model in mod.items():
price = np.zeros_like(model.inputs.timesteps, dtype=np.float64)
cost_var_techs = list(model.results.cost_var.loc_techs_om_cost.values)
for om_con, tech in om_con_techs:
# Select tech's cost_var
cost_var_idx = cost_var_techs.index(tech)
cost_var = model.results.cost_var.values[0, cost_var_idx]
# Indexes when online at non-zero cost
online = np.nonzero(cost_var)
# Reset price when online
price[online] = float(om_con)
# Save price to dict
prices[key] = price
# Diesel prices
# (unless specified, should be the same in all cases, do loop anyway incase set otherwise)
generator_prices = dict()
for key, model in mod.items():
cost_var_techs = list(model.inputs.cost_om_con.loc_techs_om_cost)
generators_idx = cost_var_techs.index('region2::generators_region2')
generator_prices[key] = np.asscalar(model.inputs.cost_om_con[0, generators_idx].values)
# National Grid resources (region 2 import) and Generators region 2 consumption
grid_res, gen_res = dict(), dict()
for key, model in mod.items():
grid_res[key] = -1 * model.results.carrier_con.loc['region1::transmission_region1_region2:region2::power'].values
gen_res[key] = +1 * model.results.carrier_prod.loc['region2::generators_region2::power'].values
# Combine with CF to get consumption cost
con_cost = dict()
for key, model in mod.items():
# NG cost (variable cost * resource timeseries)
grid_con_cost = prices[key] * grid_res[key]
# generation cost (fixed cost * resource timeseries)
generators_con_cost = generator_prices[key] * gen_res[key]
# combined consumption cost
con_cost[key] = grid_con_cost + generators_con_cost
# Total costs
con_cost_tot = {key: np.sum(val) for key, val in con_cost.items()}
import IPython; IPython.embed()
# memory clearing
del mod
###Output
_____no_output_____ |
2_image_augmentation/image_augmentation.ipynb | ###Markdown
Image Augmentation Validation
###Code
import utils
import skimage
"""Build Dataset."""
print("Loading Training Dataset...")
DATASET_DIR = DIR_PATH
# Training dataset.
dataset_train = utils.GenericDataset()
dataset_train.load_item(DATASET_DIR, "train")
dataset_train.prepare()
print("Training Classes ", dataset_train.class_names)
# Validation dataset
print("\n\nLoading Validation Dataset...")
dataset_val = utils.GenericDataset()
dataset_val.load_item(DATASET_DIR, "val")
dataset_val.prepare()
print("Validation Classes ", dataset_val.class_names)
import visualization as vis
# Load and display random samples
image_ids = np.random.choice(dataset_train.image_ids, 4)
for image_id in image_ids:
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
vis.display_top_masks(image, mask, class_ids,dataset_train.class_names , limit=4)
###Output
_____no_output_____ |
mnist_classifier.ipynb | ###Markdown
Running the GraderTo run the grader below, you'll want to run the above training from scratch (if you have otherwise already ran it multiple times). You can reset your kernel and then run all cells for the grader code to appropriately check that you weights and biases achieved the desired end result.
###Code
### DON'T MODIFY ANYTHING BELOW ###
### Be sure to run all cells above before running this cell ###
import grader
try:
grader.run_grader(get_weights, get_biases, linear)
except Exception as err:
print(str(err))
###Output
You got it! That's the correct answer.
|
Criptografia_RSA.ipynb | ###Markdown
###Code
#@title Dependências
import random
import math
from IPython.core.display import display, HTML
###Output
_____no_output_____
###Markdown
Máximo divisor comum (MDC)
###Code
#@title <a href="https://pt.wikipedia.org/wiki/Algoritmo_de_Euclides"> Algoritmo euclidiano </a>
def mdc(a : int, b : int) -> int:
while b != 0:
a,b = b, a%b
return a
###Output
_____no_output_____
###Markdown
Chave pública >Escolha dois números primos, p e q, e mantenha-os em segredo
###Code
'''
#p_q_indice_e = (111,7,-1) #@param {type:"raw"}
p = p_q_indice_e[0]
q = p_q_indice_e[1]
indice_e = p_q_indice_e[2]
'''
p = "373" #@param [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 105646155480762397, 439351292910452432574786963588089477522344331]
q = "37" #@param [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 105646155480762397, 864608136454559457049]
indice_e = -1#@param {type:"raw"}
p,q = int(p),int(q)
###Output
_____no_output_____
###Markdown
>Calcula a chave pública **n**, sendo ``` n = p⋅q ```
###Code
n = p * q
display(HTML('<h2>n = %d</h2>'%(n)))
###Output
_____no_output_____
###Markdown
Calcula ϕ(n) Sendo:```n = p⋅qϕ(n) = lcm(ϕ(p),ϕ(q))p e q são primos``` Assim:```ϕ(n) = mdc(p − 1, q − 1)```
###Code
def mmc(a : int, b : int) -> int:
return a*b // mdc(a,b)
def ϕ(p : int, q : int) -> int:
return mmc(p - 1,q - 1)
phi_n = ϕ(p,q)
display(HTML('<h2>ϕ(%d) = %d</h2>'%((p*q),phi_n)))
###Output
_____no_output_____
###Markdown
Coprimo >Calcula um número inteiro ***e***, parte da chave pública, onde:```1 < e < ϕ(n) mdc(e, ϕ(n)) = 1;```Isto é, ***e*** é um número coprimo de ***ϕ(n)***
###Code
def coprimo(phi_n : int) -> list:
#A busca começa em 2, uma vez que todo numero é divisível por 1
e = 2
res = []
while e < phi_n:
if ( mdc(e, phi_n) == 1 ):
res.append(e)
e+= 1
return res
es = coprimo(phi_n)
if(indice_e < 0):
e = random.choice(es)
else:
e = es[indice_e]
display(HTML('<h3>Coprimos de %d: %s</h3>'%(phi_n, es)))
display(HTML('<h2>e = %d</h2>'%(e)))
###Output
_____no_output_____
###Markdown
Inverso multiplicativo modular >Sendo:```d⋅e ≡ 1 (mod ϕ(n)); ```Logo:```d ≡ e^−1 (mod ϕ(n))```
###Code
#@title <a href="https://pt.wikipedia.org/wiki/Algoritmo_de_Euclides_estendido"> Algoritmo Euclidiano Estendido </a>
def mdce(a : int, b : int) -> tuple:
if (a == 0):
return 0,1,b
x, y, mdc_ = mdce( b % a, a)
return y - (b//a) * x, x, mdc_
r_mdce = mdce(e, phi_n)
d = r_mdce[0]
if(d < 0):
d += phi_n
display(HTML('<h2>%d ≡ (%d^-1 * mod %d)</h2>'%(d, e, phi_n)))
display(HTML("<h2> %d * %d + %d * %d = %d </h2>"%(e, r_mdce[0], phi_n, r_mdce[1], r_mdce[2])))
###Output
_____no_output_____
###Markdown
Chaves
###Code
display(HTML('<h2>n, e, d: (%d, %d, %d)</h2>'%(n, e, d)))
display(HTML('<h2>Chave Pública: (%d, %d)</h2>'%(n, e)))
display(HTML('<h2>Chave Privada: (%d, %d)</h2>'%(n, d)))
display(HTML('<h2>Numero de caracteres (Tamanho do bloco) do byte criptografado: %d</h2>'%(len(format(n,"x")))))
###Output
_____no_output_____
###Markdown
Codificando
###Code
def separa_string(string :str, janela : int, padding : bool) -> list:
#Se a string não for multiplo de janela, faz um padding no final
if(padding):
tamanho_padding = len(string)%janela
if(tamanho_padding != 0):
string += '0'*(janela-tamanho_padding)
return [string[i-janela:i] for i in range(janela, len(string)+janela, janela)]
def string_para_bytes(string :str) -> list:
return separa_string(string, 2, True)
def bytes_para_string(mensagem_bytes : str) -> str:
return ''.join([chr(int(b,16)) for b in mensagem_bytes])
mensagem = "Eu gosto de criptografia" #@param {type:"string"}
mensagem_bytes = string_para_bytes(''.join([format(ord(l), 'x') for l in mensagem]))
print('Bytes da mensagem:',mensagem_bytes)
print("Mensagem: %s"%bytes_para_string(mensagem_bytes))
def rsa_criptografar(valor : int) -> int:
return pow(valor, e, n)
res = [rsa_criptografar(ord(l)) for l in mensagem]
print('Caracteres criptografados:',res)
###Output
Caracteres criptografados: [8275, 8923, 10273, 5098, 4292, 7699, 1604, 4292, 10273, 6760, 7279, 10273, 9530, 10723, 5063, 11841, 1604, 4292, 5098, 10723, 8718, 7166, 5063, 8718]
###Markdown
>Algumas informações interessantes
###Code
#Vamos assumir que o valor de cada bloco é de 0 até 0xFFF
string = ''+''.join([format(r, 'x') for r in res])
#cada caracter varia de 0 até 0xFF
mensagem_bytes = string_para_bytes(string)
print("Bytes criptografado:", mensagem_bytes)
display(HTML('<h2>Mensagem criptografada: %s</h2>'%(bytes_para_string(mensagem_bytes))))
def rsa_descriptografar(valor : int) -> int:
return pow(valor, d, n)
#Descriptografa
mensagem_bytes = [rsa_descriptografar(l) for l in res]
#Converte de inteiro para hex, para melhor visualização
mensagem_bytes = [format(l,'x') for l in mensagem_bytes]
print("Bytes descriptografados:", mensagem_bytes)
display(HTML('<h2>Mensagem descriptografada: %s</h2>'%(bytes_para_string(mensagem_bytes))))
###Output
Bytes descriptografados: ['45', '75', '20', '67', '6f', '73', '74', '6f', '20', '64', '65', '20', '63', '72', '69', '70', '74', '6f', '67', '72', '61', '66', '69', '61']
###Markdown
Código em uma célula
###Code
import random
import math
class RSAFactory():
#Métodos privados
def __mdc(a : int, b : int) -> int:
while b != 0:
a,b = b, a%b
return a
def __mmc(a : int, b : int) -> int:
return a*b // mdc(a,b)
def __ϕ(p : int, q : int) -> int:
return mmc(p - 1,q - 1)
def __coprimo(phi_n) -> list:
#A busca começa em 2, uma vez que todo numero é divisível por 1
e = 2
res = []
while e < phi_n:
if ( RSAFactory.__mdc(e, phi_n) == 1 ):
res.append(e)
e+= 1
return res
def __mdce(a : int, b : int) -> tuple:
if (a == 0):
return 0,1,b
x, y, mdc_ = RSAFactory.__mdce( b % a, a)
return y - (b//a) * x, x, mdc_
def FabricarRSA( p: int, q: int, indice_e: int) -> tuple:
n = p * q
phi_n = RSAFactory.__ϕ(p,q)
es = RSAFactory.__coprimo(phi_n)
#se indice_e for negativo, escolhe um aleatorio
if(indice_e < 0):
e = random.choice(es)
else:
e = es[indice_e]
d,_,_ = RSAFactory.__mdce(e, phi_n)
if (d < 0):
d += phi_n
#ignora sempre o 0x... que conta como 2 caracteres
tamanho_bloco = len(hex(n))-2
return (n,e,d,tamanho_bloco)
class RSA():
def __init__(self, exp : int, mod : int, tamanho_bloco : int):
self._exp = exp
self._mod = mod
self._tamanho_bloco = tamanho_bloco
self._mascara = mascara = "%%0%dx"%self._tamanho_bloco
def __separa_string(self, string :str, janela : int, padding : bool) -> list:
#Se a string não for multiplo de janela, faz um padding no final
if(padding):
tamanho_padding = len(string)%janela
if(tamanho_padding != 0):
string += '0'*(janela-tamanho_padding)
return [int(string[i-janela:i],16) for i in range(janela, len(string)+janela, janela)]
def _hex_para_bytes(self, string :str) -> list:
return self.__separa_string(string, 2, True)
def _bytestream_para_int(self,byte_stream : str) -> list:
return self.__separa_string(byte_stream, self._tamanho_bloco, False)
def _operacao(self, valor : int) -> int:
return pow(valor, self._exp, self._mod)
class RSAPrivado(RSA):
def __init__(self, n : int, d: int, tamanho_bloco : int):
RSA.__init__(self,d,n,tamanho_bloco)
def criptografar(self, mensagem : str) -> bytes:
#Converte char para int, criptografa e converte o resultado para hex
#faz padding nos numeros que não possuem a quantidade de caracters igual a
#quantidade da janela
criptografado = [self._mascara%(self._operacao(ord(l))) for l in mensagem]
criptografado = ''.join(criptografado)
return bytes(self._hex_para_bytes(criptografado))
class RSAPublico(RSA):
def __init__(self, n : int, e: int, tamanho_bloco : int):
RSA.__init__(self,e,n,tamanho_bloco)
def descriptografar(self, mensagem : bytes) -> list:
#Converte de byte para hex, desfazendo a primeira operação na criptografia
mensagem = ["%02x"%(v) for v in mensagem]
mensagem = ''.join(mensagem)
criptografado = self._bytestream_para_int(mensagem)
descriptografado = [self._operacao(l) for l in criptografado]
"""
Ao criptografar, foi determinado um protocolo onde, caso o numero de bytes
seja impar (um byte é representado por uma dupla de caracteres), deve-se
adicionar um valor 'dummy' (no caso, 0) no final da string.
"""
if( int (descriptografado[-1]) == 0):
descriptografado = descriptografado[:-1]
return descriptografado
n, e, d, tamanho_bloco = RSAFactory.FabricarRSA(p,q,indice_e)
print("n = %d, e = %d, d = %d"%(n, e, d))
print("Tamanho do bloco: %d"%tamanho_bloco)
modulo_criptografia_privado = RSAPrivado(n, e, tamanho_bloco)
modulo_criptografia_publico = RSAPublico(n, d, tamanho_bloco)
mesangem_criptografada = modulo_criptografia_privado.criptografar('Teste mensagem')
print('Stream de bytes criptografados:',mesangem_criptografada)
mensagem_descriptografada = modulo_criptografia_publico.descriptografar(mesangem_criptografada)
mensagem_descriptografada = ''.join([chr(l) for l in mensagem_descriptografada])
print('Stream de bytes descriptografados:',mensagem_descriptografada)
###Output
n = 13801, e = 989, d = 413
Tamanho do bloco: 4
Stream de bytes criptografados: b'\x19e+\x1d\x1c\xba"|+\x1d\x1eD\x1c\x8c+\x1d,\xa8\x1c\xba\x00-\x07\xdc+\x1d\x1c\x8c'
Stream de bytes descriptografados: Teste mensagem
|
ikea.ipynb | ###Markdown
Scrapping
###Code
ikea_search = requests.get(f"https://sik.search.blue.cdtapps.com/no/no/product-list-page?sessionId=1ab42ab9-983c-470d-bb61-4e8e675c443d&category=fu003&size=9999&c=lf&v=20200617").json()
trans = {'Bredde': 'width',
'Maks bredde' : 'max_width',
'Høyde': 'height',
'Lengde': 'length',
'Dybde': 'depth',
'Maks. dybde':'max_depth',
'Min. dybde':'min_depth',
'Sittehøyde': 'sit_height',
'Setehøyde': 'sit_height',
'Setedybde' : 'sit_depth',
'Maks setedybde': "max_sit_depth",
'Min. setedybde': "min_sit_depth",
'Sete bredde' : 'sit_width',
'Sengebredde': 'bed_width',
'Sengelengde': 'bed_length',
'Armlenebredde' : 'arm_width',
'Fri høyde under møbler': 'clearance'
}
def ikea(ik):
try:
url = ik['pipUrl']
content = requests.get(url).content
soup = BeautifulSoup(content, 'html.parser')
js1 = json.loads(soup.find('div', attrs={'class':"js-price-package range-revamp-pip-price-package"}).attrs['data-initial-props'])
price_amount = js1['price']['mainPriceProps']['price']['integer']
price_currency = ik['currencyCode']#js1['price']['currencySymbol']
js1 = json.loads(soup.find('div', attrs={'class':"js-price-package range-revamp-pip-price-package"}).attrs['data-initial-props'])
item = {
'source' : 'IKEA',
'title' : soup.find('meta', attrs={'property':'og:title'}).attrs['content'][:-7],
'image' : ik['mainImageUrl'], #soup.find('meta', attrs={'property':'og:image'}).attrs['content'],
'description' : soup.find('meta', attrs={'property':'og:description'}).attrs['content'], # js1['productDescription']
'url' : soup.find('meta', attrs={'property':'og:url'}).attrs['content'],
'price' : price_amount+' '+price_currency,
'typeName': ik['typeName'],
#'id': ik['id'],
'onlineSellable': ik['onlineSellable'],
'other_colors': ik['gprDescription']['colors'],
#'sku' : soup.find('meta', attrs={'itemprop':'sku'}).attrs['content'],
'brand' : js1['productName']
}
dims = soup.find('dl', attrs={'class':'range-revamp-product-dimensions__list'})
if dims is not None:
for dim in dims.find_all('div', attrs={'class':'range-revamp-product-dimensions__list-container'}):
label = dim.find('dt').text.split(':')[0]
label = trans.get(label,label)
val = dim.find('dd').text
item[label] = val
#try:
# item[label+"_i"] = int(val[:-3])
#except ValueError:
# pass
except Exception as e:
print(url+ ' ' + str(e))
return item
ikea_iter = (ikea(ik) for ik in ikea_search['productListPage']['productWindow'])
ikea_pd.to_csv('./data/ikea.csv')
###Output
_____no_output_____
###Markdown
Start here
###Code
ikea_pd = pd.read_csv('https://raw.githubusercontent.com/veonua/norske_sofaer/master/data/ikea.csv')
len(ikea_pd)
def max_depth(r):
r = r.fillna(0)
val = r['max_depth'] or r['Bredde venstre'] or r['Dybde sjeselong'] or r['Setebredde venstre'] or r['depth']
if val == 0: return 0
return int(val[:-3])
def min_depth(r):
r = r.fillna(0)
val = r['min_depth'] or r['depth']
if val == 0: return 0
return int(val[:-3])
def width(r):
r = r.fillna(0)
val = r['width'] or r['Bredde høyre'] or r['Setebredde høyre']
if val == 0: return 0
return int(val[:-3])
ikea_pd['max_depth_i'] = ikea_pd.apply(max_depth, axis=1)
ikea_pd['min_depth_i'] = ikea_pd.apply(min_depth, axis=1)
ikea_pd['width_i'] = ikea_pd.apply(width, axis=1)
filt = ikea_pd[['image', 'price', 'brand', 'max_depth_i', 'width_i', 'clearance', 'min_depth_i', 'Bredde venstre', 'Bredde høyre', 'url']]
fw = ikea_pd['width_i'].between(260, 285) # | (ikea_pd['width_a'] == 0)
fd = ikea_pd['max_depth_i'].between(170,223) | (ikea_pd['max_depth_i'] == 0)
filt = filt[fd & fw].sort_values('brand')
from IPython.display import HTML
def image_formatter(im):
return f'<img src="{im}" width="350px">'
def a_formatter(url):
return f'<a href="{url}"> {url}</a>'
disp = filt
HTML(disp.to_html(formatters={'image': image_formatter, 'url': a_formatter}, escape=False))
###Output
_____no_output_____ |
Notebooks/RadarCOVID-Report/Daily/RadarCOVID-Report-2021-07-19.ipynb | ###Markdown
RadarCOVID-Report Data Extraction
###Code
import datetime
import json
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
import pycountry
import retry
import seaborn as sns
%matplotlib inline
current_working_directory = os.environ.get("PWD")
if current_working_directory:
os.chdir(current_working_directory)
sns.set()
matplotlib.rcParams["figure.figsize"] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
current_hour = datetime.datetime.utcnow().hour
are_today_results_partial = current_hour != 23
###Output
_____no_output_____
###Markdown
Constants
###Code
from Modules.ExposureNotification import exposure_notification_io
spain_region_country_code = "ES"
germany_region_country_code = "DE"
default_backend_identifier = spain_region_country_code
backend_generation_days = 7 * 2
daily_summary_days = 7 * 4 * 3
daily_plot_days = 7 * 4
tek_dumps_load_limit = daily_summary_days + 1
###Output
_____no_output_____
###Markdown
Parameters
###Code
environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER")
if environment_backend_identifier:
report_backend_identifier = environment_backend_identifier
else:
report_backend_identifier = default_backend_identifier
report_backend_identifier
environment_enable_multi_backend_download = \
os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD")
if environment_enable_multi_backend_download:
report_backend_identifiers = None
else:
report_backend_identifiers = [report_backend_identifier]
report_backend_identifiers
environment_invalid_shared_diagnoses_dates = \
os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES")
if environment_invalid_shared_diagnoses_dates:
invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",")
else:
invalid_shared_diagnoses_dates = []
invalid_shared_diagnoses_dates
###Output
_____no_output_____
###Markdown
COVID-19 Cases
###Code
report_backend_client = \
exposure_notification_io.get_backend_client_with_identifier(
backend_identifier=report_backend_identifier)
@retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10))
def download_cases_dataframe():
return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
confirmed_df_ = download_cases_dataframe()
confirmed_df_.iloc[0]
confirmed_df = confirmed_df_.copy()
confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]]
confirmed_df.rename(
columns={
"date": "sample_date",
"iso_code": "country_code",
},
inplace=True)
def convert_iso_alpha_3_to_alpha_2(x):
try:
return pycountry.countries.get(alpha_3=x).alpha_2
except Exception as e:
logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}")
return None
confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2)
confirmed_df.dropna(inplace=True)
confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True)
confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_df.sort_values("sample_date", inplace=True)
confirmed_df.tail()
confirmed_days = pd.date_range(
start=confirmed_df.iloc[0].sample_date,
end=extraction_datetime)
confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"])
confirmed_days_df["sample_date_string"] = \
confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_days_df.tail()
def sort_source_regions_for_display(source_regions: list) -> list:
if report_backend_identifier in source_regions:
source_regions = [report_backend_identifier] + \
list(sorted(set(source_regions).difference([report_backend_identifier])))
else:
source_regions = list(sorted(source_regions))
return source_regions
report_source_regions = report_backend_client.source_regions_for_date(
date=extraction_datetime.date())
report_source_regions = sort_source_regions_for_display(
source_regions=report_source_regions)
report_source_regions
def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None):
source_regions_at_date_df = confirmed_days_df.copy()
source_regions_at_date_df["source_regions_at_date"] = \
source_regions_at_date_df.sample_date.apply(
lambda x: source_regions_for_date_function(date=x))
source_regions_at_date_df.sort_values("sample_date", inplace=True)
source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \
source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x)))
source_regions_at_date_df.tail()
#%%
source_regions_for_summary_df_ = \
source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy()
source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True)
source_regions_for_summary_df_.tail()
#%%
confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"]
confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns)
for source_regions_group, source_regions_group_series in \
source_regions_at_date_df.groupby("_source_regions_group"):
source_regions_set = set(source_regions_group.split(","))
confirmed_source_regions_set_df = \
confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy()
confirmed_source_regions_group_df = \
confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \
.reset_index().sort_values("sample_date")
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df.merge(
confirmed_days_df[["sample_date_string"]].rename(
columns={"sample_date_string": "sample_date"}),
how="right")
confirmed_source_regions_group_df["new_cases"] = \
confirmed_source_regions_group_df["new_cases"].clip(lower=0)
confirmed_source_regions_group_df["covid_cases"] = \
confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round()
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[confirmed_output_columns]
confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan)
confirmed_source_regions_group_df.fillna(method="ffill", inplace=True)
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[
confirmed_source_regions_group_df.sample_date.isin(
source_regions_group_series.sample_date_string)]
confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df)
result_df = confirmed_output_df.copy()
result_df.tail()
#%%
result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True)
result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left")
result_df.sort_values("sample_date_string", inplace=True)
result_df.fillna(method="ffill", inplace=True)
result_df.tail()
#%%
result_df[["new_cases", "covid_cases"]].plot()
if columns_suffix:
result_df.rename(
columns={
"new_cases": "new_cases_" + columns_suffix,
"covid_cases": "covid_cases_" + columns_suffix},
inplace=True)
return result_df, source_regions_for_summary_df_
confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe(
report_backend_client.source_regions_for_date)
confirmed_es_df, _ = get_cases_dataframe(
lambda date: [spain_region_country_code],
columns_suffix=spain_region_country_code.lower())
###Output
_____no_output_____
###Markdown
Extract API TEKs
###Code
raw_zip_path_prefix = "Data/TEKs/Raw/"
base_backend_identifiers = [report_backend_identifier]
multi_backend_exposure_keys_df = \
exposure_notification_io.download_exposure_keys_from_backends(
backend_identifiers=report_backend_identifiers,
generation_days=backend_generation_days,
fail_on_error_backend_identifiers=base_backend_identifiers,
save_raw_zip_path_prefix=raw_zip_path_prefix)
multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"]
multi_backend_exposure_keys_df.rename(
columns={
"generation_datetime": "sample_datetime",
"generation_date_string": "sample_date_string",
},
inplace=True)
multi_backend_exposure_keys_df.head()
early_teks_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.rolling_period < 144].copy()
early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6
early_teks_df[early_teks_df.sample_date_string != extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
early_teks_df[early_teks_df.sample_date_string == extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[
"sample_date_string", "region", "key_data"]]
multi_backend_exposure_keys_df.head()
active_regions = \
multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
active_regions
multi_backend_summary_df = multi_backend_exposure_keys_df.groupby(
["sample_date_string", "region"]).key_data.nunique().reset_index() \
.pivot(index="sample_date_string", columns="region") \
.sort_index(ascending=False)
multi_backend_summary_df.rename(
columns={"key_data": "shared_teks_by_generation_date"},
inplace=True)
multi_backend_summary_df.rename_axis("sample_date", inplace=True)
multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int)
multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days)
multi_backend_summary_df.head()
def compute_keys_cross_sharing(x):
teks_x = x.key_data_x.item()
common_teks = set(teks_x).intersection(x.key_data_y.item())
common_teks_fraction = len(common_teks) / len(teks_x)
return pd.Series(dict(
common_teks=common_teks,
common_teks_fraction=common_teks_fraction,
))
multi_backend_exposure_keys_by_region_df = \
multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index()
multi_backend_exposure_keys_by_region_df["_merge"] = True
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_df.merge(
multi_backend_exposure_keys_by_region_df, on="_merge")
multi_backend_exposure_keys_by_region_combination_df.drop(
columns=["_merge"], inplace=True)
if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1:
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_combination_df[
multi_backend_exposure_keys_by_region_combination_df.region_x !=
multi_backend_exposure_keys_by_region_combination_df.region_y]
multi_backend_exposure_keys_cross_sharing_df = \
multi_backend_exposure_keys_by_region_combination_df \
.groupby(["region_x", "region_y"]) \
.apply(compute_keys_cross_sharing) \
.reset_index()
multi_backend_cross_sharing_summary_df = \
multi_backend_exposure_keys_cross_sharing_df.pivot_table(
values=["common_teks_fraction"],
columns="region_x",
index="region_y",
aggfunc=lambda x: x.item())
multi_backend_cross_sharing_summary_df
multi_backend_without_active_region_exposure_keys_df = \
multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier]
multi_backend_without_active_region = \
multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
multi_backend_without_active_region
exposure_keys_summary_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.region == report_backend_identifier]
exposure_keys_summary_df.drop(columns=["region"], inplace=True)
exposure_keys_summary_df = \
exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame()
exposure_keys_summary_df = \
exposure_keys_summary_df.reset_index().set_index("sample_date_string")
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True)
exposure_keys_summary_df.head()
###Output
/opt/hostedtoolcache/Python/3.8.11/x64/lib/python3.8/site-packages/pandas/core/frame.py:4110: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
return super().drop(
###Markdown
Dump API TEKs
###Code
tek_list_df = multi_backend_exposure_keys_df[
["sample_date_string", "region", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
["sample_date", "region"]).tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_path_prefix = "Data/TEKs/"
tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json"
tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json"
tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json"
for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]:
os.makedirs(os.path.dirname(path), exist_ok=True)
tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier]
tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
tek_list_current_path,
lines=True, orient="records")
tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json(
tek_list_daily_path,
lines=True, orient="records")
tek_list_base_df.to_json(
tek_list_hourly_path,
lines=True, orient="records")
tek_list_base_df.head()
###Output
_____no_output_____
###Markdown
Load TEK Dumps
###Code
import glob
def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame(columns=["region"])
file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json"))))
if limit:
file_paths = file_paths[:limit]
for file_path in file_paths:
logging.info(f"Loading TEKs from '{file_path}'...")
iteration_extracted_teks_df = pd.read_json(file_path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
extracted_teks_df["region"] = \
extracted_teks_df.region.fillna(spain_region_country_code).copy()
if region:
extracted_teks_df = \
extracted_teks_df[extracted_teks_df.region == region]
return extracted_teks_df
daily_extracted_teks_df = load_extracted_teks(
mode="Daily",
region=report_backend_identifier,
limit=tek_dumps_load_limit)
daily_extracted_teks_df.head()
exposure_keys_summary_df_ = daily_extracted_teks_df \
.sort_values("extraction_date", ascending=False) \
.groupby("sample_date").tek_list.first() \
.to_frame()
exposure_keys_summary_df_.index.name = "sample_date_string"
exposure_keys_summary_df_["tek_list"] = \
exposure_keys_summary_df_.tek_list.apply(len)
exposure_keys_summary_df_ = exposure_keys_summary_df_ \
.rename(columns={"tek_list": "shared_teks_by_generation_date"}) \
.sort_index(ascending=False)
exposure_keys_summary_df = exposure_keys_summary_df_
exposure_keys_summary_df.head()
###Output
_____no_output_____
###Markdown
Daily New TEKs
###Code
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
def compute_teks_by_generation_and_upload_date(date):
day_new_teks_set_df = tek_list_df.copy().diff()
try:
day_new_teks_set = day_new_teks_set_df[
day_new_teks_set_df.index == date].tek_list.item()
except ValueError:
day_new_teks_set = None
if pd.isna(day_new_teks_set):
day_new_teks_set = set()
day_new_teks_df = daily_extracted_teks_df[
daily_extracted_teks_df.extraction_date == date].copy()
day_new_teks_df["shared_teks"] = \
day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set))
day_new_teks_df["shared_teks"] = \
day_new_teks_df.shared_teks.apply(len)
day_new_teks_df["upload_date"] = date
day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True)
day_new_teks_df = day_new_teks_df[
["upload_date", "generation_date", "shared_teks"]]
day_new_teks_df["generation_to_upload_days"] = \
(pd.to_datetime(day_new_teks_df.upload_date) -
pd.to_datetime(day_new_teks_df.generation_date)).dt.days
day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0]
return day_new_teks_df
shared_teks_generation_to_upload_df = pd.DataFrame()
for upload_date in daily_extracted_teks_df.extraction_date.unique():
shared_teks_generation_to_upload_df = \
shared_teks_generation_to_upload_df.append(
compute_teks_by_generation_and_upload_date(date=upload_date))
shared_teks_generation_to_upload_df \
.sort_values(["upload_date", "generation_date"], ascending=False, inplace=True)
shared_teks_generation_to_upload_df.tail()
today_new_teks_df = \
shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.upload_date == extraction_date].copy()
today_new_teks_df.tail()
if not today_new_teks_df.empty:
today_new_teks_df.set_index("generation_to_upload_days") \
.sort_index().shared_teks.plot.bar()
generation_to_upload_period_pivot_df = \
shared_teks_generation_to_upload_df[
["upload_date", "generation_to_upload_days", "shared_teks"]] \
.pivot(index="upload_date", columns="generation_to_upload_days") \
.sort_index(ascending=False).fillna(0).astype(int) \
.droplevel(level=0, axis=1)
generation_to_upload_period_pivot_df.head()
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "shared_teks_by_upload_date",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.tail()
shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \
[["upload_date", "shared_teks"]].rename(
columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_teks_uploaded_on_generation_date",
})
shared_teks_uploaded_on_generation_date_df.head()
estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \
.groupby(["upload_date"]).shared_teks.max().reset_index() \
.sort_values(["upload_date"], ascending=False) \
.rename(columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_diagnoses",
})
invalid_shared_diagnoses_dates_mask = \
estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates)
estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0
estimated_shared_diagnoses_df.head()
###Output
_____no_output_____
###Markdown
Hourly New TEKs
###Code
hourly_extracted_teks_df = load_extracted_teks(
mode="Hourly", region=report_backend_identifier, limit=25)
hourly_extracted_teks_df.head()
hourly_new_tek_count_df = hourly_extracted_teks_df \
.groupby("extraction_date_with_hour").tek_list. \
apply(lambda x: set(sum(x, []))).reset_index().copy()
hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \
.sort_index(ascending=True)
hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff()
hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply(
lambda x: len(x) if not pd.isna(x) else 0)
hourly_new_tek_count_df.rename(columns={
"new_tek_count": "shared_teks_by_upload_date"}, inplace=True)
hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[
"extraction_date_with_hour", "shared_teks_by_upload_date"]]
hourly_new_tek_count_df.head()
hourly_summary_df = hourly_new_tek_count_df.copy()
hourly_summary_df.set_index("extraction_date_with_hour", inplace=True)
hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index()
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df = hourly_summary_df.tail(-1)
hourly_summary_df.head()
###Output
_____no_output_____
###Markdown
Official Statistics
###Code
import requests
import pandas.io.json
official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics")
official_stats_response.raise_for_status()
official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json())
official_stats_df = official_stats_df_.copy()
official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True)
official_stats_df.head()
official_stats_column_map = {
"date": "sample_date",
"applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated",
"communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated",
}
accumulated_suffix = "_accumulated"
accumulated_values_columns = \
list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values()))
interpolated_values_columns = \
list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns))
official_stats_df = \
official_stats_df[official_stats_column_map.keys()] \
.rename(columns=official_stats_column_map)
official_stats_df["extraction_date"] = extraction_date
official_stats_df.head()
official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json"
previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True)
previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True)
official_stats_df = official_stats_df.append(previous_official_stats_df)
official_stats_df.head()
official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)]
official_stats_df.sort_values("extraction_date", ascending=False, inplace=True)
official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True)
official_stats_df.head()
official_stats_stored_df = official_stats_df.copy()
official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d")
official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True)
official_stats_df.drop(columns=["extraction_date"], inplace=True)
official_stats_df = confirmed_days_df.merge(official_stats_df, how="left")
official_stats_df.sort_values("sample_date", ascending=False, inplace=True)
official_stats_df.head()
official_stats_df[accumulated_values_columns] = \
official_stats_df[accumulated_values_columns] \
.astype(float).interpolate(limit_area="inside")
official_stats_df[interpolated_values_columns] = \
official_stats_df[accumulated_values_columns].diff(periods=-1)
official_stats_df.drop(columns="sample_date", inplace=True)
official_stats_df.head()
###Output
_____no_output_____
###Markdown
Data Merge
###Code
result_summary_df = exposure_keys_summary_df.merge(
new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
official_stats_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df = confirmed_es_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left")
result_summary_df.set_index(["sample_date", "source_regions"], inplace=True)
result_summary_df.drop(columns=["sample_date_string"], inplace=True)
result_summary_df.sort_index(ascending=False, inplace=True)
result_summary_df.head()
with pd.option_context("mode.use_inf_as_na", True):
result_summary_df = result_summary_df.fillna(0).astype(int)
result_summary_df["teks_per_shared_diagnosis"] = \
(result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case"] = \
(result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0)
result_summary_df.head(daily_plot_days)
def compute_aggregated_results_summary(days) -> pd.DataFrame:
aggregated_result_summary_df = result_summary_df.copy()
aggregated_result_summary_df["covid_cases_for_ratio"] = \
aggregated_result_summary_df.covid_cases.mask(
aggregated_result_summary_df.shared_diagnoses == 0, 0)
aggregated_result_summary_df["covid_cases_for_ratio_es"] = \
aggregated_result_summary_df.covid_cases_es.mask(
aggregated_result_summary_df.shared_diagnoses_es == 0, 0)
aggregated_result_summary_df = aggregated_result_summary_df \
.sort_index(ascending=True).fillna(0).rolling(days).agg({
"covid_cases": "sum",
"covid_cases_es": "sum",
"covid_cases_for_ratio": "sum",
"covid_cases_for_ratio_es": "sum",
"shared_teks_by_generation_date": "sum",
"shared_teks_by_upload_date": "sum",
"shared_diagnoses": "sum",
"shared_diagnoses_es": "sum",
}).sort_index(ascending=False)
with pd.option_context("mode.use_inf_as_na", True):
aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int)
aggregated_result_summary_df["teks_per_shared_diagnosis"] = \
(aggregated_result_summary_df.shared_teks_by_upload_date /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \
(aggregated_result_summary_df.shared_diagnoses /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(aggregated_result_summary_df.shared_diagnoses_es /
aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0)
return aggregated_result_summary_df
aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7)
aggregated_result_with_7_days_window_summary_df.head()
last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1]
last_7_days_summary
aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13)
last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1]
last_14_days_summary
###Output
_____no_output_____
###Markdown
Report Results
###Code
display_column_name_mapping = {
"sample_date": "Sample\u00A0Date\u00A0(UTC)",
"source_regions": "Source Countries",
"datetime_utc": "Timestamp (UTC)",
"upload_date": "Upload Date (UTC)",
"generation_to_upload_days": "Generation to Upload Period in Days",
"region": "Backend",
"region_x": "Backend\u00A0(A)",
"region_y": "Backend\u00A0(B)",
"common_teks": "Common TEKs Shared Between Backends",
"common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)",
"covid_cases": "COVID-19 Cases (Source Countries)",
"shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)",
"shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)",
"shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)",
"shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)",
"teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)",
"shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)",
"covid_cases_es": "COVID-19 Cases (Spain)",
"app_downloads_es": "App Downloads (Spain – Official)",
"shared_diagnoses_es": "Shared Diagnoses (Spain – Official)",
"shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)",
}
summary_columns = [
"covid_cases",
"shared_teks_by_generation_date",
"shared_teks_by_upload_date",
"shared_teks_uploaded_on_generation_date",
"shared_diagnoses",
"teks_per_shared_diagnosis",
"shared_diagnoses_per_covid_case",
"covid_cases_es",
"app_downloads_es",
"shared_diagnoses_es",
"shared_diagnoses_per_covid_case_es",
]
summary_percentage_columns= [
"shared_diagnoses_per_covid_case_es",
"shared_diagnoses_per_covid_case",
]
###Output
_____no_output_____
###Markdown
Daily Summary Table
###Code
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[summary_columns]
result_summary_with_display_names_df = result_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
result_summary_with_display_names_df
###Output
_____no_output_____
###Markdown
Daily Summary Plots
###Code
result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \
.droplevel(level=["source_regions"]) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar(
title=f"Daily Summary",
rot=45, subplots=True, figsize=(15, 30), legend=False)
ax_ = summary_ax_list[0]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.95)
_ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist()))
for percentage_column in summary_percentage_columns:
percentage_column_index = summary_columns.index(percentage_column)
summary_ax_list[percentage_column_index].yaxis \
.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
###Output
/opt/hostedtoolcache/Python/3.8.11/x64/lib/python3.8/site-packages/pandas/plotting/_matplotlib/tools.py:307: MatplotlibDeprecationWarning:
The rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.
layout[ax.rowNum, ax.colNum] = ax.get_visible()
/opt/hostedtoolcache/Python/3.8.11/x64/lib/python3.8/site-packages/pandas/plotting/_matplotlib/tools.py:307: MatplotlibDeprecationWarning:
The colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.
layout[ax.rowNum, ax.colNum] = ax.get_visible()
/opt/hostedtoolcache/Python/3.8.11/x64/lib/python3.8/site-packages/pandas/plotting/_matplotlib/tools.py:313: MatplotlibDeprecationWarning:
The rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.
if not layout[ax.rowNum + 1, ax.colNum]:
/opt/hostedtoolcache/Python/3.8.11/x64/lib/python3.8/site-packages/pandas/plotting/_matplotlib/tools.py:313: MatplotlibDeprecationWarning:
The colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.
if not layout[ax.rowNum + 1, ax.colNum]:
###Markdown
Daily Generation to Upload Period Table
###Code
display_generation_to_upload_period_pivot_df = \
generation_to_upload_period_pivot_df \
.head(backend_generation_days)
display_generation_to_upload_period_pivot_df \
.head(backend_generation_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping)
fig, generation_to_upload_period_pivot_table_ax = plt.subplots(
figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df)))
generation_to_upload_period_pivot_table_ax.set_title(
"Shared TEKs Generation to Upload Period Table")
sns.heatmap(
data=display_generation_to_upload_period_pivot_df
.rename_axis(columns=display_column_name_mapping)
.rename_axis(index=display_column_name_mapping),
fmt=".0f",
annot=True,
ax=generation_to_upload_period_pivot_table_ax)
generation_to_upload_period_pivot_table_ax.get_figure().tight_layout()
###Output
_____no_output_____
###Markdown
Hourly Summary Plots
###Code
hourly_summary_ax_list = hourly_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.plot.bar(
title=f"Last 24h Summary",
rot=45, subplots=True, legend=False)
ax_ = hourly_summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.9)
_ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist()))
###Output
_____no_output_____
###Markdown
Publish Results
###Code
github_repository = os.environ.get("GITHUB_REPOSITORY")
if github_repository is None:
github_repository = "pvieito/Radar-STATS"
github_project_base_url = "https://github.com/" + github_repository
display_formatters = {
display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "",
}
general_columns = \
list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values()))
general_formatter = lambda x: f"{x}" if x != 0 else ""
display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns)))
daily_summary_table_html = result_summary_with_display_names_df \
.head(daily_plot_days) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.to_html(formatters=display_formatters)
multi_backend_summary_table_html = multi_backend_summary_df \
.head(daily_plot_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(formatters=display_formatters)
def format_multi_backend_cross_sharing_fraction(x):
if pd.isna(x):
return "-"
elif round(x * 100, 1) == 0:
return ""
else:
return f"{x:.1%}"
multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(
classes="table-center",
formatters=display_formatters,
float_format=format_multi_backend_cross_sharing_fraction)
multi_backend_cross_sharing_summary_table_html = \
multi_backend_cross_sharing_summary_table_html \
.replace("<tr>","<tr style=\"text-align: center;\">")
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
covid_cases = \
extraction_date_result_summary_df.covid_cases.item()
shared_teks_by_generation_date = \
extraction_date_result_summary_df.shared_teks_by_generation_date.item()
shared_teks_by_upload_date = \
extraction_date_result_summary_df.shared_teks_by_upload_date.item()
shared_diagnoses = \
extraction_date_result_summary_df.shared_diagnoses.item()
teks_per_shared_diagnosis = \
extraction_date_result_summary_df.teks_per_shared_diagnosis.item()
shared_diagnoses_per_covid_case = \
extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item()
shared_teks_by_upload_date_last_hour = \
extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int)
display_source_regions = ", ".join(report_source_regions)
if len(report_source_regions) == 1:
display_brief_source_regions = report_source_regions[0]
else:
display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺"
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
import dataframe_image as dfi
df = df.copy()
df_styler = df.style.format(display_formatters)
media_path = get_temporary_image_path()
dfi.export(df_styler, media_path)
return media_path
summary_plots_image_path = save_temporary_plot_image(
ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(
df=result_summary_with_display_names_df)
hourly_summary_plots_image_path = save_temporary_plot_image(
ax=hourly_summary_ax_list)
multi_backend_summary_table_image_path = save_temporary_dataframe_image(
df=multi_backend_summary_df)
generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image(
ax=generation_to_upload_period_pivot_table_ax)
###Output
[0719/231010.904480:WARNING:headless_browser_main_parts.cc(106)] Cannot create Pref Service with no user data dir.
[0719/231010.955011:ERROR:gpu_init.cc(440)] Passthrough is not supported, GL is swiftshader
###Markdown
Save Results
###Code
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(
report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(
report_resources_path_prefix + "Summary-Table.html")
hourly_summary_df.to_csv(
report_resources_path_prefix + "Hourly-Summary-Table.csv")
multi_backend_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Summary-Table.csv")
multi_backend_cross_sharing_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv")
generation_to_upload_period_pivot_df.to_csv(
report_resources_path_prefix + "Generation-Upload-Period-Table.csv")
_ = shutil.copyfile(
summary_plots_image_path,
report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(
summary_table_image_path,
report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(
hourly_summary_plots_image_path,
report_resources_path_prefix + "Hourly-Summary-Plots.png")
_ = shutil.copyfile(
multi_backend_summary_table_image_path,
report_resources_path_prefix + "Multi-Backend-Summary-Table.png")
_ = shutil.copyfile(
generation_to_upload_period_pivot_table_image_path,
report_resources_path_prefix + "Generation-Upload-Period-Table.png")
###Output
_____no_output_____
###Markdown
Publish Results as JSON
###Code
def generate_summary_api_results(df: pd.DataFrame) -> list:
api_df = df.reset_index().copy()
api_df["sample_date_string"] = \
api_df["sample_date"].dt.strftime("%Y-%m-%d")
api_df["source_regions"] = \
api_df["source_regions"].apply(lambda x: x.split(","))
return api_df.to_dict(orient="records")
summary_api_results = \
generate_summary_api_results(df=result_summary_df)
today_summary_api_results = \
generate_summary_api_results(df=extraction_date_result_summary_df)[0]
summary_results = dict(
backend_identifier=report_backend_identifier,
source_regions=report_source_regions,
extraction_datetime=extraction_datetime,
extraction_date=extraction_date,
extraction_date_with_hour=extraction_date_with_hour,
last_hour=dict(
shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour,
shared_diagnoses=0,
),
today=today_summary_api_results,
last_7_days=last_7_days_summary,
last_14_days=last_14_days_summary,
daily_results=summary_api_results)
summary_results = \
json.loads(pd.Series([summary_results]).to_json(orient="records"))[0]
with open(report_resources_path_prefix + "Summary-Results.json", "w") as f:
json.dump(summary_results, f, indent=4)
###Output
_____no_output_____
###Markdown
Publish on README
###Code
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
readme_contents = readme_contents.format(
extraction_date_with_hour=extraction_date_with_hour,
github_project_base_url=github_project_base_url,
daily_summary_table_html=daily_summary_table_html,
multi_backend_summary_table_html=multi_backend_summary_table_html,
multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html,
display_source_regions=display_source_regions)
with open("README.md", "w") as f:
f.write(readme_contents)
###Output
_____no_output_____
###Markdown
Publish on Twitter
###Code
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule" and \
(shared_teks_by_upload_date_last_hour or not are_today_results_partial):
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
generation_to_upload_period_pivot_table_image_media.media_id,
]
if are_today_results_partial:
today_addendum = " (Partial)"
else:
today_addendum = ""
def format_shared_diagnoses_per_covid_case(value) -> str:
if value == 0:
return "–"
return f"≤{value:.2%}"
display_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case)
display_last_14_days_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"])
display_last_14_days_shared_diagnoses_per_covid_case_es = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"])
status = textwrap.dedent(f"""
#RadarCOVID – {extraction_date_with_hour}
Today{today_addendum}:
- Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour)
- Shared Diagnoses: ≤{shared_diagnoses:.0f}
- Usage Ratio: {display_shared_diagnoses_per_covid_case}
Last 14 Days:
- Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case}
- Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es}
Info: {github_project_base_url}#documentation
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
###Output
_____no_output_____ |
GAN Model/Team A -WM Models-RF.ipynb | ###Markdown
- Dipesh Kumar Singh - Parveen - Chandan Kumar
###Code
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.preprocessing import LabelEncoder
%matplotlib inline
df = pd.read_csv(r"D:Final WM Data.csv")
df.head()
df.drop('Unnamed: 0', inplace=True, axis=1)
df.WashingMethod.unique()
df.info()
cor = df.corr()
plt.figure(figsize=(11,7))
sns.heatmap(cor, annot=True)
plt.show()
df.Dryer.unique()
df.nunique()
label_Ftype = LabelEncoder()
label_WashingMethod=LabelEncoder()
df['FType'] = label_Ftype.fit_transform(df['FType'])
df['WashingMethod'] = label_WashingMethod.fit_transform(df['WashingMethod'])
df['Load'] = df['Load'].replace({'Front Load': 1, 'Top Load':0})
df['Dryer'] = df['Dryer'].replace({'No Dryer': 0, 'Spin Dry':1,'Air Dry':2})
df['Built_In_Heater'] = df['Built_In_Heater'].replace({'No': 0, 'Yes':1})
df.head()
display(df.describe().round(2))
cor = df.corr()
plt.figure(figsize=(11,7))
sns.heatmap(cor, annot=True)
plt.show()
dt = pd.get_dummies(df, columns=['Brand'])
dt
###Output
_____no_output_____
###Markdown
Splitting:
###Code
x = dt.drop(['Price','Name','Model'], axis=1)
y = dt['Price']
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.3)
###Output
_____no_output_____
###Markdown
Random Forest Regression:
###Code
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor(n_estimators=20)
rfr.fit(x_train , y_train)
#accuracies = cross_val_score(estimator = rfr, x = x_train, y = y_train, cv = 5,verbose = 1)
y_pred = rfr.predict(x_test)
print('')
print('###### Random Forest ######')
print('Score : %.4f' % rfr.score(x_test, y_test))
#print(accuracies)
r2 = r2_score(y_test, y_pred)
print('R2 : %0.2f ' % r2)
print('Score : %.4f' % rfr.score(x_train, y_train))
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import GridSearchCV
params = {
"n_estimators":[10,20,30,40,50],
'min_samples_split': [2,3,4,5],
'min_samples_leaf': [1, 2, 5],
'max_depth': [15,17,21,22]
}
rfr = RandomForestRegressor()
rfr= GridSearchCV(rfr, params, scoring="neg_root_mean_squared_error", n_jobs=-1, verbose=1, cv=3)
rfr.fit(x_train, y_train)
best_params = rfr.best_params_
print(f"Best paramters: {best_params})")
rfr = RandomForestRegressor(n_estimators=20,max_depth=15, min_samples_leaf=2, min_samples_split= 3)
rfr.fit(x_train , y_train)
#accuracies = cross_val_score(estimator = rfr, x = x_train, y = y_train, cv = 5,verbose = 1)
y_pred = rfr.predict(x_test)
print('')
print('###### Random Forest ######')
print('Score : %.4f' % rfr.score(x_test, y_test))
#print(accuracies)
import graphviz
from sklearn import tree
from sklearn.tree import export_graphviz
estimator = rfr.estimators_[19]
dot_data = tree.export_graphviz(estimator, out_file=None, filled=True, rounded=True,
feature_names=x_train.columns,
)
graph = graphviz.Source(dot_data)
graph
###Output
_____no_output_____ |
examples/estimator/classifier/RandomForestClassifier/c/basics_embedded.ipynb | ###Markdown
sklearn-porterRepository: https://github.com/nok/sklearn-porter RandomForestClassifierDocumentation: [sklearn.ensemble.RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) Loading data:
###Code
from sklearn.datasets import load_iris
iris_data = load_iris()
X = iris_data.data
y = iris_data.target
print(X.shape, y.shape)
###Output
((150, 4), (150,))
###Markdown
Train classifier:
###Code
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=15, max_depth=None,
min_samples_split=2, random_state=0)
clf.fit(X, y)
###Output
_____no_output_____
###Markdown
Transpile classifier:
###Code
%%time
from sklearn_porter import Porter
porter = Porter(clf, language='c')
output = porter.export(embed_data=True)
print(output)
###Output
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
int predict_0(float features[]) {
int classes[3];
if (features[3] <= 0.75) {
classes[0] = 47;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[2] <= 4.85000038147) {
if (features[3] <= 1.65000009537) {
classes[0] = 0;
classes[1] = 42;
classes[2] = 0;
} else {
if (features[1] <= 3.0) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 3;
} else {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
}
}
} else {
if (features[0] <= 6.59999990463) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 27;
} else {
if (features[2] <= 5.19999980927) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 29;
}
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_1(float features[]) {
int classes[3];
if (features[3] <= 0.800000011921) {
classes[0] = 46;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[3] <= 1.75) {
if (features[2] <= 4.94999980927) {
classes[0] = 0;
classes[1] = 58;
classes[2] = 0;
} else {
if (features[2] <= 5.44999980927) {
if (features[1] <= 2.45000004768) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
} else {
classes[0] = 0;
classes[1] = 3;
classes[2] = 0;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 3;
}
}
} else {
if (features[2] <= 4.85000038147) {
if (features[1] <= 3.09999990463) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
} else {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 35;
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_2(float features[]) {
int classes[3];
if (features[0] <= 5.55000019073) {
if (features[3] <= 0.800000011921) {
classes[0] = 49;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[3] <= 1.60000002384) {
classes[0] = 0;
classes[1] = 12;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 1;
}
}
} else {
if (features[3] <= 1.54999995232) {
if (features[3] <= 0.75) {
classes[0] = 2;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[2] <= 5.0) {
classes[0] = 0;
classes[1] = 32;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 1;
}
}
} else {
if (features[2] <= 4.65000009537) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
if (features[3] <= 1.70000004768) {
if (features[2] <= 5.44999980927) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 3;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 48;
}
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_3(float features[]) {
int classes[3];
if (features[0] <= 5.44999980927) {
if (features[1] <= 2.80000019073) {
if (features[1] <= 2.45000004768) {
classes[0] = 0;
classes[1] = 5;
classes[2] = 0;
} else {
if (features[0] <= 5.0) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 3;
} else {
classes[0] = 0;
classes[1] = 3;
classes[2] = 0;
}
}
} else {
classes[0] = 41;
classes[1] = 0;
classes[2] = 0;
}
} else {
if (features[0] <= 6.25) {
if (features[3] <= 1.70000004768) {
if (features[3] <= 0.600000023842) {
classes[0] = 3;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[1] <= 2.25) {
if (features[3] <= 1.25) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
if (features[2] <= 4.75) {
classes[0] = 0;
classes[1] = 3;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 1;
}
}
} else {
classes[0] = 0;
classes[1] = 37;
classes[2] = 0;
}
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 8;
}
} else {
if (features[2] <= 4.94999980927) {
classes[0] = 0;
classes[1] = 10;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 35;
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_4(float features[]) {
int classes[3];
if (features[3] <= 0.699999988079) {
classes[0] = 50;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[3] <= 1.75) {
if (features[2] <= 5.05000019073) {
if (features[2] <= 4.94999980927) {
classes[0] = 0;
classes[1] = 56;
classes[2] = 0;
} else {
if (features[3] <= 1.60000002384) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 1;
} else {
classes[0] = 0;
classes[1] = 3;
classes[2] = 0;
}
}
} else {
if (features[0] <= 6.05000019073) {
classes[0] = 0;
classes[1] = 2;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 5;
}
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 33;
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_5(float features[]) {
int classes[3];
if (features[3] <= 0.800000011921) {
classes[0] = 49;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[2] <= 4.94999980927) {
if (features[0] <= 4.94999980927) {
if (features[3] <= 1.35000002384) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 1;
}
} else {
if (features[2] <= 4.75) {
classes[0] = 0;
classes[1] = 49;
classes[2] = 0;
} else {
if (features[1] <= 2.59999990463) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
if (features[0] <= 6.05000019073) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
if (features[3] <= 1.59999990463) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 3;
}
}
}
}
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 44;
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_6(float features[]) {
int classes[3];
if (features[3] <= 0.699999988079) {
classes[0] = 46;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[2] <= 4.75) {
if (features[0] <= 4.94999980927) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
} else {
classes[0] = 0;
classes[1] = 39;
classes[2] = 0;
}
} else {
if (features[2] <= 5.14999961853) {
if (features[0] <= 6.59999990463) {
if (features[3] <= 1.70000004768) {
if (features[3] <= 1.54999995232) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
} else {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 19;
}
} else {
classes[0] = 0;
classes[1] = 3;
classes[2] = 0;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 38;
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_7(float features[]) {
int classes[3];
if (features[2] <= 2.59999990463) {
classes[0] = 58;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[2] <= 4.75) {
classes[0] = 0;
classes[1] = 37;
classes[2] = 0;
} else {
if (features[2] <= 5.14999961853) {
if (features[3] <= 1.75) {
if (features[0] <= 6.5) {
if (features[2] <= 4.94999980927) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
if (features[0] <= 6.15000009537) {
if (features[3] <= 1.54999995232) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
} else {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
}
}
} else {
classes[0] = 0;
classes[1] = 2;
classes[2] = 0;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 13;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 34;
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_8(float features[]) {
int classes[3];
if (features[3] <= 0.699999988079) {
classes[0] = 42;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[0] <= 6.25) {
if (features[2] <= 4.80000019073) {
if (features[0] <= 4.94999980927) {
if (features[1] <= 2.45000004768) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 3;
}
} else {
classes[0] = 0;
classes[1] = 36;
classes[2] = 0;
}
} else {
if (features[3] <= 1.54999995232) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 4;
} else {
if (features[3] <= 1.70000004768) {
classes[0] = 0;
classes[1] = 2;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 4;
}
}
}
} else {
if (features[3] <= 1.75) {
if (features[2] <= 5.05000019073) {
classes[0] = 0;
classes[1] = 15;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 4;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 39;
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_9(float features[]) {
int classes[3];
if (features[2] <= 2.59999990463) {
classes[0] = 55;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[2] <= 4.94999980927) {
if (features[0] <= 5.94999980927) {
classes[0] = 0;
classes[1] = 23;
classes[2] = 0;
} else {
if (features[3] <= 1.64999997616) {
classes[0] = 0;
classes[1] = 16;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 4;
}
}
} else {
if (features[0] <= 6.59999990463) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 33;
} else {
if (features[0] <= 6.75) {
if (features[3] <= 2.0) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 4;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 14;
}
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_10(float features[]) {
int classes[3];
if (features[3] <= 0.800000011921) {
classes[0] = 52;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[2] <= 4.75) {
classes[0] = 0;
classes[1] = 37;
classes[2] = 0;
} else {
if (features[3] <= 1.75) {
if (features[2] <= 4.94999980927) {
classes[0] = 0;
classes[1] = 4;
classes[2] = 0;
} else {
if (features[1] <= 2.65000009537) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
} else {
if (features[3] <= 1.54999995232) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
} else {
if (features[2] <= 5.44999980927) {
classes[0] = 0;
classes[1] = 2;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 1;
}
}
}
}
} else {
if (features[2] <= 4.85000038147) {
if (features[1] <= 3.09999990463) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 6;
} else {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 43;
}
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_11(float features[]) {
int classes[3];
if (features[2] <= 2.59999990463) {
classes[0] = 47;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[2] <= 4.75) {
classes[0] = 0;
classes[1] = 40;
classes[2] = 0;
} else {
if (features[2] <= 4.94999980927) {
if (features[1] <= 3.04999995232) {
if (features[3] <= 1.59999990463) {
classes[0] = 0;
classes[1] = 2;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 7;
}
} else {
classes[0] = 0;
classes[1] = 2;
classes[2] = 0;
}
} else {
if (features[0] <= 6.05000019073) {
if (features[2] <= 5.05000019073) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 4;
} else {
if (features[0] <= 5.94999980927) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 7;
} else {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
}
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 40;
}
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_12(float features[]) {
int classes[3];
if (features[3] <= 0.800000011921) {
classes[0] = 54;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[1] <= 2.45000004768) {
if (features[2] <= 4.75) {
classes[0] = 0;
classes[1] = 12;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 1;
}
} else {
if (features[3] <= 1.60000002384) {
if (features[2] <= 5.0) {
classes[0] = 0;
classes[1] = 23;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
}
} else {
if (features[3] <= 1.75) {
if (features[0] <= 5.80000019073) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 3;
} else {
classes[0] = 0;
classes[1] = 2;
classes[2] = 0;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 53;
}
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_13(float features[]) {
int classes[3];
if (features[0] <= 5.44999980927) {
if (features[3] <= 0.800000011921) {
classes[0] = 36;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[2] <= 4.19999980927) {
classes[0] = 0;
classes[1] = 6;
classes[2] = 0;
} else {
if (features[1] <= 2.75) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 1;
} else {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
}
}
}
} else {
if (features[2] <= 4.90000009537) {
if (features[1] <= 3.59999990463) {
classes[0] = 0;
classes[1] = 43;
classes[2] = 0;
} else {
classes[0] = 7;
classes[1] = 0;
classes[2] = 0;
}
} else {
if (features[3] <= 1.70000004768) {
if (features[3] <= 1.54999995232) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
} else {
classes[0] = 0;
classes[1] = 4;
classes[2] = 0;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 50;
}
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict_14(float features[]) {
int classes[3];
if (features[2] <= 2.59999990463) {
classes[0] = 52;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[3] <= 1.70000004768) {
if (features[0] <= 7.0) {
if (features[2] <= 5.0) {
classes[0] = 0;
classes[1] = 48;
classes[2] = 0;
} else {
if (features[0] <= 6.05000019073) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
}
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 1;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 46;
}
}
int class_idx = 0;
int class_val = classes[0];
int i;
for (i = 1; i < 3; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int predict (float features[]) {
int n_classes = 3;
int classes[n_classes];
int i;
for (i = 0; i < n_classes; i++) {
classes[i] = 0;
}
classes[predict_0(features)]++;
classes[predict_1(features)]++;
classes[predict_2(features)]++;
classes[predict_3(features)]++;
classes[predict_4(features)]++;
classes[predict_5(features)]++;
classes[predict_6(features)]++;
classes[predict_7(features)]++;
classes[predict_8(features)]++;
classes[predict_9(features)]++;
classes[predict_10(features)]++;
classes[predict_11(features)]++;
classes[predict_12(features)]++;
classes[predict_13(features)]++;
classes[predict_14(features)]++;
int class_idx = 0;
int class_val = classes[0];
for (i = 1; i < n_classes; i++) {
if (classes[i] > class_val) {
class_idx = i;
class_val = classes[i];
}
}
return class_idx;
}
int main(int argc, const char * argv[]) {
float features[argc-1];
int i;
for (i = 1; i < argc; i++) {
features[i-1] = atof(argv[i]);
}
printf("%d", predict(features));
return 0;
}
CPU times: user 11.2 ms, sys: 2.75 ms, total: 14 ms
Wall time: 12.1 ms
###Markdown
Run classification in C: Save the transpiled estimator:
###Code
with open('forest.c', 'w') as f:
f.write(output)
###Output
_____no_output_____
###Markdown
Compiling:
###Code
%%bash
gcc forest.c -std=c99 -lm -o forest
###Output
_____no_output_____
###Markdown
Prediction:
###Code
%%bash
./forest 1 2 3 4
###Output
1 |
Topics_Master/08-Plotly-and-Cufflinks/01-Plotly and Cufflinks.ipynb | ###Markdown
______ Plotly and Cufflinks Plotly is a library that allows you to create interactive plots that you can use in dashboards or websites (you can save them as html files or static images). InstallationIn order for this all to work, you'll need to install plotly and cufflinks to call plots directly off of a pandas dataframe. These libraries are not currently available through **conda** but are available through **pip**. Install the libraries at your command line/terminal using: pip install plotly pip install cufflinks** NOTE: Make sure you only have one installation of Python on your computer when you do this, otherwise the installation may not work. ** Imports and Set-up
###Code
import pandas as pd
import numpy as np
%matplotlib inline
from plotly import __version__
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
print(__version__) # requires version >= 1.9.0
import cufflinks as cf
# For Notebooks
init_notebook_mode(connected=True)
# For offline use
cf.go_offline()
###Output
_____no_output_____
###Markdown
Fake Data
###Code
df = pd.DataFrame(np.random.randn(100,4),columns='A B C D'.split())
df.head()
df2 = pd.DataFrame({'Category':['A','B','C'],'Values':[32,43,50]})
df2.head()
###Output
_____no_output_____
###Markdown
Using Cufflinks and iplot()* scatter* bar* box* spread* ratio* heatmap* surface* histogram* bubble Scatter
###Code
df.iplot(kind='scatter',x='A',y='B',mode='markers',size=10)
###Output
_____no_output_____
###Markdown
Bar Plots
###Code
df2.iplot(kind='bar',x='Category',y='Values')
df.count().iplot(kind='bar')
###Output
_____no_output_____
###Markdown
Boxplots
###Code
df.iplot(kind='box')
###Output
_____no_output_____
###Markdown
3d Surface
###Code
df3 = pd.DataFrame({'x':[1,2,3,4,5],'y':[10,20,30,20,10],'z':[5,4,3,2,1]})
df3.iplot(kind='surface',colorscale='rdylbu')
###Output
_____no_output_____
###Markdown
Spread
###Code
df[['A','B']].iplot(kind='spread')
###Output
_____no_output_____
###Markdown
histogram
###Code
df['A'].iplot(kind='hist',bins=25)
df.iplot(kind='bubble',x='A',y='B',size='C')
###Output
_____no_output_____
###Markdown
scatter_matrix()Similar to sns.pairplot()
###Code
df.scatter_matrix()
###Output
_____no_output_____ |
src/Notebooks/ModelTraining.ipynb | ###Markdown
Model TrainingIn this notebooks, we formulate a multi-class classification problem as follows:> Is a machine going to need maintenance within the next N cycles, and if yes, due to what type of a failure?First, we define the future horizon:
###Code
%matplotlib inline
import glob
import itertools
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from collections import Counter
from pathlib import Path
from collections import Counter
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, classification_report
from sklearn.dummy import DummyClassifier
from sklearn.preprocessing import LabelBinarizer
from sklearn.externals import joblib
data_dir = str(Path.home()) + '/data'
###Output
_____no_output_____
###Markdown
Reading input data
###Code
filenames = glob.glob(data_dir + '/features/*.csv')
converters={"failure": str}
seed = 42
dfs= [pd.read_csv(filename, converters=converters) for filename in filenames]
data = pd.concat(dfs, ignore_index=True).fillna('')
###Output
_____no_output_____
###Markdown
Train/test splitTwo split strategies are implemented below:* time-dependent split* asset ID-based splitTime-dependent split is more complex as it requires that training and test data sets don't have common rolling feature time frames. This means that for all sequences in the test data set, $X$ immediately preceding entries need to be removed from the training data, where $X$ is the length of the rolling aggregation window used during feature engineering minus 1 [[2]](ref_2).
###Code
lookback = 5 # rolling aggregation interval used during feature engineering
test_size = 0.2
time_split = False # if set to False, will perform asset ID-based split
if time_split:
data.set_index(['entryID'], inplace=True)
data.sort_index(inplace=True)
train, test = train_test_split(data, test_size=test_size, shuffle=False)
min_cycles = test.reset_index().groupby(
['machineID']
).cycle.min().apply(lambda x: x - lookback).to_frame(name='max_cycle')
t = train.reset_index().join(min_cycles, on='machineID')
train = t[t.max_cycle.isna() |
(t.cycle < t.max_cycle)].drop('max_cycle', axis=1)
train.set_index(['entryID'], inplace=True)
else:
# asset ID-based split
unique_assets = data.reset_index().machineID.unique()
train_assets, test_assets = train_test_split(
unique_assets, test_size=test_size, random_state=seed)
train = data[data.machineID.isin(train_assets)]
test = data[data.machineID.isin(test_assets)]
train.set_index(['entryID'], inplace=True)
test.set_index(['entryID'], inplace=True)
def xy_split(data):
data = data.reset_index(drop = True)
columns_to_drop = ['cycle', 'immediate_failure', 'rul', 'sequenceID', 'machineID']
return (data.drop(columns_to_drop, axis=1),
data['immediate_failure'])
X_train, Y_train = xy_split(train)
X_test, Y_test = xy_split(test)
###Output
_____no_output_____
###Markdown
Correction of data imbalanceIn typical predictive maintenance data sets, positive examples as often underrepresented relative to negative examples. This can be seen by counting failure types in the "ground truth" training data set:
###Code
all_classes = Counter(Y_train)
majority_class = all_classes.most_common(1)
minority_classes = all_classes.most_common()[1:]
print('Majority class: ', majority_class)
print('Minority classes: ', minority_classes)
###Output
Majority class: [('', 51971)]
Minority classes: [('F1', 924), ('F2', 384)]
###Markdown
With class imbalance in data, performance of most standard learning algorithms is compromised, since they aim to minimize the overall error rate. For a data set with 99% negative and 1% positive examples, a model can be shown to have 99% accuracy by labeling all instances as negative. But the model will mis-classify all positive examples; so even if its accuracy is high, the algorithm is not a useful one.Here, we will use the Synthetic Minority Over-sampling Technique (SMOTE) [[3]](ref_3) to produce a more balanced training data set with at least 10% of positive examples. Note that over-sampling is not applied to the test data set.
###Code
minority_classes_size = sum([c[1] for c in minority_classes])
desired_minority_classes_size = Y_train.count() * 0.1
scale = desired_minority_classes_size / minority_classes_size
ratio = None
if scale > 1:
ratio = dict((c[0], int(c[1] * scale)) for c in minority_classes)
sm = SMOTE(ratio=ratio, random_state=seed)
X_train_res, Y_train_res = sm.fit_sample(X_train, Y_train)
Counter(Y_train_res)
###Output
_____no_output_____
###Markdown
Model trainingDecision Trees are among the most popular and versatile classification methods. They work with both numerical and categorical data, and perform well even given relatively small training data sets.Using decision trees within an ensemble (called Random Decision Forest) allows alleviating several problems:* overfitting* multi-collinearity Fitting the Random Forest classifier on the training data set
###Code
clf = RandomForestClassifier(random_state=seed)
clf.fit(X_train_res, Y_train_res)
###Output
_____no_output_____
###Markdown
Model evaluation Confusion matrix, precision, recall and F1 scoreThe easiest to visualize and interpret summary of a multi-class classifier's performance is the confusion matrix. This matrix is a juxtaposition of the classifier's predictions against the ground truth categories.
###Code
Y_predictions = clf.predict(X_test)
def plot_confusion_matrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.Blues):
orig = cm
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt) + '\n({0})'.format(orig[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#cm = confusion_matrix(Y_test, binarizer.inverse_transform(Y_predictions))
cm = confusion_matrix(Y_test, Y_predictions)
plot_confusion_matrix(cm, ['None'] + [c[0] for c in minority_classes])
###Output
_____no_output_____
###Markdown
For binary classification tasks, most of the classifier's performance measures can be derived directly from the entries of the standard two-by-two confusion matrix.$$\begin{matrix} & Prediction=Negative & Prediction=Positive \\Actual=Negative & \scriptsize True\ negatives\ (TN) & \scriptsize False\ positives (FP) \\Actual=Positive & \scriptsize False\ netagives\ (FN) & \scriptsize True\ positives\ (TP) \end{matrix}$$ Here are the definitions of several most common model performance measures:| Measure | Formula ||:--- |------|| Precision | $\frac{TP}{TP+FP}$ || Recall | $\frac{TP}{TP+FN}$ || F1 score | $\frac{2\cdot precision\cdot recall}{precision+recall}$ |In a multi-class context, these measures are computed for each label independently and then averaged across the entire set of classes (as demonstrated in the classification report below).
###Code
print(classification_report(Y_test, Y_predictions, digits=4))
###Output
precision recall f1-score support
0.9980 0.9963 0.9971 12724
F1 0.8593 0.9431 0.8992 246
F2 0.8800 0.8462 0.8627 78
avg / total 0.9946 0.9944 0.9945 13048
###Markdown
Note: ```classification_report``` function computes the averages taking class imbalance into account; for that reason, they are heavily biased towards the majority class. AccuracyIn general, *accuracy* is an inappropriate measure for unbalanced classes. To demonstrate that, let's compare the accuracy of our model against that of a dummy classifier (sometimes called a *null* model) which always returns the most frequent class.
###Code
dummy = DummyClassifier(strategy='most_frequent')
dummy.fit(X_train_res, Y_train_res)
Y_dummy = dummy.predict(X_test)
print('Accuracy scores')
print('Trained model: {0}\nDummy classifier: {1}'.format(accuracy_score(Y_test, Y_predictions),
accuracy_score(Y_test, Y_dummy)))
###Output
Accuracy scores
Trained model: 0.9944052728387492
Dummy classifier: 0.9751686082158185
###Markdown
With respect to accuracy, the trained model only slightly outperforms a dummy classifier. Area Under the Curve (AUC)AUC is the area under the *receiver operating characteristic curve* (ROC curve), which is 1.0 for ideal classifiers and 0.5 for those that do no better than random guessing. Let's compare the AUC score of the trained model with that of the dummy classifier.
###Code
# roc_auc_score expects binarized labels
binarizer = LabelBinarizer()
binarizer.fit(Y_train_res)
Y_test_binarized = binarizer.transform(Y_test)
def auc_score(y_true, y_pred):
return roc_auc_score(binarizer.transform(y_true), binarizer.transform(y_pred), average='macro')
print('ROC AUC scores')
print('Trained model: {0}\nDummy classifier: {1}'.format(auc_score(Y_test, Y_predictions),
auc_score(Y_test, Y_dummy)))
###Output
ROC AUC scores
Trained model: 0.9502733935438122
Dummy classifier: 0.5
###Markdown
ROC AUC score would be good candidate when a single sensitive model evaluation measure is needed. Persisting the model and input sample
###Code
joblib.dump(clf, 'model.pkl')
sample = X_test.sample(n = 5).to_json(orient='records')
print('Sample:', sample)
with open('sample.json', 'w') as sample_file:
sample_file.write(sample)
###Output
Sample: [{"s1":1000,"s2":1022.6876111111,"s3":137.4747777778,"s4":137.58,"s5":1388.8037222222,"s6":1559.73,"s7":137.3947361111,"s8":137.505,"s9":1409.3883599537,"s10":1561.185},{"s1":1000,"s2":748.101,"s3":140.986,"s4":141.1,"s5":959.5683333333,"s6":1396.75,"s7":140.8330981481,"s8":140.9516666667,"s9":1110.4324037037,"s10":1401.125},{"s1":1000,"s2":761.0606666667,"s3":137.4818333333,"s4":137.58,"s5":1073.3493333333,"s6":1543.29,"s7":137.4045476852,"s8":137.5116666667,"s9":1292.0819180556,"s10":1545.375},{"s1":1000,"s2":1017.40275,"s3":135.556875,"s4":135.67,"s5":1423.6099166667,"s6":1553.29,"s7":135.5155046296,"s8":135.6216666667,"s9":1337.8370324074,"s10":1554.4083333333},{"s1":1000,"s2":987.7542777778,"s3":136.0816111111,"s4":136.19,"s5":1370.1871666667,"s6":1538.23,"s7":136.0294351852,"s8":136.135,"s9":1299.7469675926,"s10":1539.35}]
|
Chapter 6/Exersize 4. Changing series values conditionally.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
nls97 = pd.read_csv("https://raw.githubusercontent.com/sandeep92134/PYTHON-Data-Cleaning/master/Chapter%206/datasets/nls97b.csv")
nls97.set_index("personid", inplace=True)
landtemps = pd.read_csv("https://raw.githubusercontent.com/sandeep92134/PYTHON-Data-Cleaning/master/Chapter%206/datasets/landtemps2019avgs.csv")
landtemps.elevation.quantile(np.arange(0.2,1.1,0.2))
landtemps['elevation_group'] = np.where(landtemps.elevation>landtemps.elevation.quantile(0.8),'High','Low')
landtemps.elevation_group = landtemps.elevation_group.astype('category')
landtemps.groupby(['elevation_group'])['elevation'].\
agg(['count','min','max'])
landtemps.elevation.median()
landtemps['elevation_group'] = np.where(landtemps.elevation>
landtemps.elevation.quantile(0.8),'High',np.where(landtemps.elevation>
landtemps.elevation.median(),'Medium','Low'))
landtemps.elevation_group = landtemps.elevation_group.astype('category')
landtemps.groupby(['elevation_group'])['elevation'].agg(['count','min','max'])
test = [(nls97.gpaoverall<2) & (nls97.highestdegree=='0. None'), nls97.highestdegree=='0. None', nls97.gpaoverall<2]
result = ['1. Low GPA and No Diploma','2. No Diploma','3. Low GPA']
nls97['hsachieve'] = np.select(test, result, '4. Did Okay')
nls97[['hsachieve','gpaoverall','highestdegree']].head()
nls97.hsachieve.value_counts().sort_index()
nls97.loc[[100292,100583,100139], 'colenrfeb00':'colenroct04'].T
nls97['baenrollment'] = nls97.filter(like="colenr").\
apply(lambda x: x.str[0:1]=='3').\
any(axis=1)
nls97.loc[[100292,100583,100139], ['baenrollment']].T
nls97.baenrollment.value_counts()
# use apply and lambda to create a more complicated categorical series
def getsleepdeprivedreason(row):
sleepdeprivedreason = "Unknown"
if (row.nightlyhrssleep>=6):
sleepdeprivedreason = "Not Sleep Deprived"
elif (row.nightlyhrssleep>0):
if (row.weeksworked16+row.weeksworked17 < 80):
if (row.childathome>2):
sleepdeprivedreason = "Child Rearing"
else:
sleepdeprivedreason = "Other Reasons"
else:
if (row.wageincome>=62000 or row.highestgradecompleted>=16):
sleepdeprivedreason = "Work Pressure"
else:
sleepdeprivedreason = "Income Pressure"
else:
sleepdeprivedreason = "Unknown"
return sleepdeprivedreason
nls97['sleepdeprivedreason'] = nls97.apply(getsleepdeprivedreason, axis=1)
nls97.sleepdeprivedreason = nls97.sleepdeprivedreason.astype('category')
nls97.sleepdeprivedreason.value_counts()
###Output
_____no_output_____ |
_notebooks/2022-01-21-mdp-inventory.ipynb | ###Markdown
MDP Basics with Inventory Control> An industrial example of using Markov decision processes for inventory control. This is a great example of using an MDP to define a problem. I intentionally keep it simple to make all the main parts of the MPD clear.Imagine you own a really simple shop. It sells one product and you have limited room for stock. The question is, when should you restock? Setup
###Code
# !pip install -q banditsbook==0.1.1
!pip install -q pandas==1.1.2
!pip install -q matplotlib==3.3.2
%matplotlib inline
import numpy as np
import pandas as pd
from enum import Enum
###Output
_____no_output_____
###Markdown
Scenario Defining a problem in RL terms is a common issue for people who have a background in data science. Gaining more experience in the problem definition phase is worthwhile before you move on to other ways of solving MDPs.Inventory control is a good example to start with because it is both useful and easy to understand. This example has direct analogies with pricing, optimal stopping (selling an asset), maintenance, and more. I start with a very simple example and then expand it to a more complex version.Imagine you own a small shop. This shop is so small you only have a single product. Each day customers buy your product so you need to restock. If you run out of stock, then you can’t sell your product and make any money; you don’t want to run out of stock. You rent your shop and it costs you a certain amount of money per square meter. You don’t want to stock millions of items because you would need a larger premises. I could define many high-level problems, like the optimal size for the premises, or minimizing the delivery charges, but to start simply, I define the problem as deciding the best point to restock.Let s represent the number of items in stock on a particular day. This is the state of the environment. In an attempt to keep things as simple as possible, I assume three possible states: no stock, one item in stock, or two items in stock, $\mathcal{I}=\{0,1,2\}$. ![](https://github.com/recohut/nbs/blob/main/raw/_images/T159137_1.png?raw=1) Actions and Potential StrategiesGiven the current state, the agent can perform an action. Keeping it simple, assume that the agent can take one of two actions. Either restock, which orders 1 new product, or do nothing: $\mathcal{A}=\{restock,none\}$. If the current state was 1 and the agent performs the action, the next state will be 2. Also assume that you cannot order any stock when you are at full capacity.
###Code
class Action(Enum):
NONE = 0
RESTOCK = 1
@staticmethod
def keep_buying_action(current_state) -> Enum:
if current_state == 2:
return Action.NONE
else:
return Action.RESTOCK
@staticmethod
def random_action(current_state) -> Enum:
if current_state == 2:
return Action.NONE
if np.random.randint(len(Action)) == 0:
return Action.NONE
else:
return Action.RESTOCK
@staticmethod
def zero_action(current_state) -> Enum:
if current_state == 0:
return Action.RESTOCK
else:
return Action.NONE
print("There are {} actions.".format(len(Action)))
###Output
There are 2 actions.
###Markdown
Transition matrix What are the probabilities that the agent shifts from one state to another? Since you are simulating the environment, you have to choose those, but they could come from observations of your shop. Say that the probability of a single sale during a day, $p(sale)$, is 0.7. In the future you could predict the number of items sold. But for now, I use a Bernoulli distribution again. This means that in each state there is a 70% probability of a sale and a 30% probability of no sale. I’m going to develop an environment to simulate the shop. In this instance I’m going to declare all the transition probabilities myself.
###Code
p_sale = 0.7 # Probability of a sale in one step
n_steps = 100 # Number of steps to run experiment for
np.random.seed(42)
###Output
_____no_output_____
###Markdown
Now, since MDP is a model-based environment, that means we have the complete knowledge of our environment. To represent the environment, we use transition probabilities. There are 3 ways to do this:1. Using a table with all combinations of $\{s,a,s',p(s'|s,a),r\}$ tuple.2. Transition graph.3. The final way to represent the transition probabilities is with a matrix for each action, known as the transition matrix. You already placed a constraint that you cannot restock if the stockroom is full. That is one hard rule. But after that, what is the best action to take? If you look purely at the rewards, which represent the goal, then there is only a positive reward on making sales. Crucially, there is no negative reward for placing orders. In other words, according to the reward matrix, it doesn’t cost anything to keep stock and it is free to order new stock. Given this reward structure, the best strategy is to keep ordering as much as you can.Even though the strategy is obvious in this case, you should always test your idea—the scientific method. It is always a good idea to compare your theory against other baselines.
###Code
# The Transition Matrix represents the following states:
# State 0
# State 1
# State 2
transition_matrix = [
# No action
[
[1, 0, 0],
[p_sale, 1 - p_sale, 0],
[0, p_sale, 1 - p_sale],
],
# Restock
[
[p_sale, 1 - p_sale, 0],
[0, p_sale, 1 - p_sale],
],
]
###Output
_____no_output_____
###Markdown
Reward matrixThe final thing you need is a reward, $r$. Your shop needs to make money, so you can reward yourself every time you sell a product. But sales are possible only if the product is in stock at the time, or if you restocked just in time. This means the reward is conditional on the current state and on a sale; no sale means no reward.$$r = \begin{cases} 1\ if\ s>0\ and\ a\ sale \\ 1\ if\ a=restock\ and\ a\ sale \\ 0\ otherwise\end{cases}$$
###Code
reward_matrix = [
# No action
[
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
],
# Restock
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 0],
],
]
###Output
_____no_output_____
###Markdown
Testing Different Restocking Strategies To demonstrate how this all fits together, let’s imagine a single iteration of the environment. You start with an initial state, feed that to the “agent” to decide an action, then the environment uses the transition matrix to mutate the state and finally it receives a reward.Let’s create a helper function to do all of that.
###Code
def environment(current_state: int, action: int) -> (int, int):
# Get the transition probabilities to each new state
current_transition_probabilities = \
transition_matrix[action.value][current_state]
# Use the transition probabilitis to transition to a new state
next_state = np.random.choice(
a=[0, 1, 2],
p=current_transition_probabilities
)
# Get the reward for the new state (was there a sale?)
reward = reward_matrix[action.value][current_state][next_state]
return (next_state, reward)
current_state = 1 # Current state, one product in stock
action = Action.RESTOCK # Current action, as chosen by a strategy
for i in range(10): # What happens if we run this multiple times?
next_state, reward = environment(current_state, action) # Environment interface
print(f"trial {i}: s={current_state}, a={action}, s'={next_state}, r={reward}")
###Output
trial 0: s=1, a=Action.RESTOCK, s'=1, r=1
trial 1: s=1, a=Action.RESTOCK, s'=2, r=0
trial 2: s=1, a=Action.RESTOCK, s'=2, r=0
trial 3: s=1, a=Action.RESTOCK, s'=1, r=1
trial 4: s=1, a=Action.RESTOCK, s'=1, r=1
trial 5: s=1, a=Action.RESTOCK, s'=1, r=1
trial 6: s=1, a=Action.RESTOCK, s'=1, r=1
trial 7: s=1, a=Action.RESTOCK, s'=2, r=0
trial 8: s=1, a=Action.RESTOCK, s'=1, r=1
trial 9: s=1, a=Action.RESTOCK, s'=2, r=0
###Markdown
Recall that the sale is a stochastic variable. Sometimes there is, sometimes there is not. When there is no sale, the stock (state) increases to 2, but there is no reward. When there is a sale, the stock (state) states at 1 because we sold one and restocked by one, and receive a reward of 1. Now let’s run this over a longer period of time, using different strategies. The three strategies I want to try are: always restock, restock when no stock left (just in time), and random restock.
###Code
# The different strategies
strategies = [("Keep Buying", Action.keep_buying_action),
("Upon Zero", Action.zero_action), ("Random", Action.random_action)]
result = [] # Results buffer
for (policy_name, action_getter) in strategies:
np.random.seed(42) # This is really important, otherwise different strategies will experience sales
reward_history = [] # Reward buffer
current_state = 2 # Initial state
total_reward = 0
for i in range(n_steps):
reward_history.append(total_reward)
action = action_getter(current_state) # Get new action for strategy
next_state, reward = environment(current_state, action) # Environment interface
print("Moving from state {} to state {} after action {}. We received the reward {}."
.format(current_state, next_state, action.name, reward))
total_reward += reward
current_state = next_state # Set next state to current state and repeat
print("The total reward was {}.".format(total_reward))
# Pandas/plotting stuff
series = pd.Series(
reward_history,
index=range(n_steps),
name="{} ({})".format(policy_name, total_reward / n_steps))
result.append(series)
df = pd.concat(result, axis=1)
(df).plot();
###Output
_____no_output_____
###Markdown
The restock and just in time curves are overlapping, so let me add a little jitter so you can see them…
###Code
(df + np.random.normal(size=df.shape)*0.5).plot();
###Output
_____no_output_____
###Markdown
So you can see that the always buy and just in time strategies are equivalent, given this reward function. Because holding stock isn’t penalised. Obviously this picture would change if we made the simulation more complex. I’d like to demonstrate what happens when you don’t fix the random seeds. Let’s run the exact same code again, but this time skip the random seed setting.
###Code
# The different strategies
strategies = [("Keep Buying", Action.keep_buying_action),
("Upon Zero", Action.zero_action), ("Random", Action.random_action)]
result = [] # Results buffer
for (policy_name, action_getter) in strategies:
# np.random.seed(42) # Commenting this line out!!!
reward_history = [] # Reward buffer
current_state = 2 # Initial state
total_reward = 0
for i in range(n_steps):
reward_history.append(total_reward)
action = action_getter(current_state) # Get new action for strategy
next_state, reward = environment(current_state, action) # Environment interface
print("Moving from state {} to state {} after action {}. We received the reward {}."
.format(current_state, next_state, action.name, reward))
total_reward += reward
current_state = next_state # Set next state to current state and repeat
print("The total reward was {}.".format(total_reward))
# Pandas/plotting stuff
series = pd.Series(
reward_history,
index=range(n_steps),
name="{} ({})".format(policy_name, total_reward / n_steps))
result.append(series)
df = pd.concat(result, axis=1)
df.plot();
###Output
_____no_output_____ |
perspectival_comparison/perspectives.ipynb | ###Markdown
Comparing perspectives on genderJudith Butler has argued that gender is a performative concept, which implies an audience. But different audiences may perceive the performance in different ways. This notebook gathers a few (very tentative) experiments that try to illustrate the different conceptions of gender implicit in books by men and by women.The underlying data used here is a collection of roughly 78,000 characters from 1800 to 1999, of which about 28,000 are drawn from books written by women. This is itself a subset of a larger collection.
###Code
import pandas as pd
import numpy as np
import csv
from collections import Counter
from scipy.stats import pearsonr
metadata = pd.read_csv('../metadata/balanced_character_subset.csv')
timeslice = metadata[(metadata.firstpub >= 1800) & (metadata.firstpub < 2000)]
print('Number of characters: ', len(timeslice.gender))
print('Number identified as women or girls:', sum(timeslice.gender == 'f'))
print('Number drawn from books written by women:', sum(timeslice.authgender == 'f'))
###Output
Number of characters: 78268
Number identified as women or girls: 39134
Number drawn from books written by women: 28183
###Markdown
Using a separate script (reproduce_character_models.py), I have trained six different models on subsets of 3000 characters drawn from this larger set. Each training set is divided equally between masculine and feminine characters. Three of the training sets are drawn from books by men; three from books by women.First let's start by comparing the coefficients of these models. This is not going to be terribly rigorous, quantitatively. I just want to get a sense of a few words that tend to be used differently by men and women, so I can flesh out my observation that these models could--in principle--be considered different "perspectives" on gender.
###Code
# We're going to load the features of six models, treating
# them simply as ranked lists of words. Words at the beginning
# of each list tend to be associated with masculine characters;
# words toward the end tend to be associated with feminine characters.
# We could of course use the actual coefficients instead of simple
# ranking, but I'm not convinced that adding and subtracting
# coefficients has a firmer mathematical foundation than
# adding and subtracting ranks.
# In order to compare these lists, we will start by filtering out words
# that don't appear in all six lists. This is a dubious choice,
# but see below for a better way of measuring similarity between
# models based on their predictions.
rootpath = 'models/'
masculineperspectives = []
feminineperspectives = []
for letter in ['A', 'B', 'C']:
feminineperspectives.append(rootpath + 'onlywomenwriters' + letter + '.coefs.csv')
masculineperspectives.append(rootpath + 'onlymalewriters' + letter + '.coefs.csv')
def intersection_of_models(fpaths, mpaths):
paths = fpaths.extend(mpaths)
words = []
for p in fpaths:
thislist = []
with open(p, encoding = 'utf-8') as f:
reader = csv.reader(f)
for row in reader:
if len(row) > 0:
thislist.append(row[0])
words.append(thislist)
shared_features = set.intersection(set(words[0]), set(words[1]), set(words[2]),
set(words[3]), set(words[4]), set(words[5]))
filtered_features = []
for i in range(6):
newlist = []
for w in words[i]:
if w in shared_features:
newlist.append(w)
filtered_features.append(newlist)
feminine_lists = filtered_features[0 : 3]
masculine_lists = filtered_features[3 : 6]
return feminine_lists, masculine_lists
feminine_lists, masculine_lists = intersection_of_models(feminineperspectives, masculineperspectives)
# now let's create a consensus ranking for both groups of writers
def get_consensus(three_lists):
'''
Given three lists, constructs a consensus ranking for each
word. We normalize to a 0-1 scale--not strictly necessary,
since all lists are the same lengths, but it may be more
legible than raw ranks.
'''
assert len(three_lists) == 3
assert len(three_lists[1]) == len(three_lists[2])
denominator = len(three_lists[0]) * 3
# we multiple the denominator by three
# because there are going to be three lists
sum_of_ranks = Counter()
for alist in three_lists:
for index, word in enumerate(alist):
sum_of_ranks[word] += index / denominator
return sum_of_ranks
feminine_rankings = get_consensus(feminine_lists)
masculine_rankings = get_consensus(masculine_lists)
# Now we're going to sort words based on the DIFFERENCE
# between feminine and masculine perspectives.
# Negative scores will be words that are strongly associated with
# men (for women) and women (for men).
# Scores near zero will be words that are around the same position
# in both models of gender.
# Strongly positive scores will be words strongly associated with
# women (for women) and men (for men).
wordrank_pairs = []
for word, ranking in feminine_rankings.items():
if word not in masculine_rankings:
print(error)
else:
difference = ranking - masculine_rankings[word]
wordrank_pairs.append((difference, word))
wordrank_pairs.sort()
# The first hundred words will be negative scores,
# strongly associated with men (for women) and women (for men).
wordrank_pairs[0: 50]
# as you'll see there's a lot of courtship and
# romance here
# The last hundred words will be positive scores,
# strongly associated with women (for women) and men (for men).
# To keep the most important words at the top of the list,
# I reverse it.
positive = wordrank_pairs[-50 : ]
positive.reverse()
for pair in positive:
print(pair)
# Much harder to characterize, and I won't actually characterize
# this list in the article, but between you and me, I would say
# there's a lot of effort, endeavoring, and thinking here.
# "Jaw," "chin" and "head" are also interesting. Perhaps in some weird way
# they are signs of effort? "She set her jaw ..." Again, I'm not going
# to actually infer anything from that -- just idly speculating.
###Output
(0.7509655751469353, 'spend')
(0.634592779177162, 'jaw')
(0.6241813602015114, 'conscience')
(0.5890848026868178, 'account')
(0.5669185558354324, 'chair')
(0.5667506297229219, 'wrote')
(0.5655751469353484, 'drove')
(0.5608732157850546, 'sent')
(0.5521410579345088, 'busy')
(0.543408900083963, 'was-caught')
(0.5424013434089001, 'endeavoured')
(0.5365239294710327, 'was-tired')
(0.5355163727959696, 'palm')
(0.5353484466834592, 'thoughts')
(0.533165407220823, 'attendants')
(0.5323257766582704, 'chin')
(0.5306465155331654, 'history')
(0.5251049538203191, 'gift')
(0.5195633921074727, 'help')
(0.5185558354324097, 'assumed')
(0.5109991603694374, 'attack')
(0.5036104114189757, 'thought')
(0.5026028547439128, 'palms')
(0.49874055415617136, 'carried')
(0.495549958018472, 'tried')
(0.4953820319059613, 'was-want')
(0.4947103274559195, 'was-treat')
(0.49454240134340893, 'was-relieved')
(0.4945424013434089, 'think')
(0.48984047019311505, 'years')
(0.4879932829554996, 'half')
(0.4849706129303107, 'brain')
(0.48110831234256923, 'imagination')
(0.48110831234256923, 'committed')
(0.4789252728799328, 'wondered')
(0.4743912678421495, 'pursued')
(0.47069689336691867, 'receive')
(0.47052896725440807, 'set')
(0.4701931150293871, 'custom')
(0.46935348446683467, 'supposed')
(0.46414777497900916, 'head')
(0.4624685138539043, 'forced')
(0.46179680940386225, 'listening')
(0.45910999160369437, 'grabbed')
(0.456255247691016, 'remarked')
(0.45591939546599497, 'effort')
(0.4554156171284635, 'was-reassured')
(0.453568429890848, 'promised')
(0.4475230898404703, 'was-joined')
(0.4463476070528968, 'explain')
###Markdown
UncertaintyHow stable and reliable are these differences?We can find out by testing each of the nine possible pairings between our three masculine models and our three feminine models. The answer is that, for words at the top of the list like "love," the differences are pretty robust. They become rapidly less robust as you move down the list, so we should characterize them cautiously.
###Code
def get_variation(word, feminine_lists, masculine_lists):
differences = []
for f in feminine_lists:
for m in masculine_lists:
d = (f.index(word) /len(f)) - (m.index(word) / len(m))
differences.append(d)
return differences
print('love')
print(get_variation('love', masculine_lists, feminine_lists))
print('\nwas-marry')
print(get_variation('was-marry', masculine_lists, feminine_lists))
print('\nspend')
print(get_variation('spend', masculine_lists, feminine_lists))
print('\nconscience')
print(get_variation('conscience', masculine_lists, feminine_lists))
print('\nimagination')
print(get_variation('imagination', masculine_lists, feminine_lists))
###Output
love
[0.9622166246851385, 0.9546599496221662, 0.8035264483627204, 0.9385390428211586, 0.9309823677581863, 0.7798488664987405, 0.9405541561712846, 0.9329974811083123, 0.7818639798488665]
was-marry
[0.4931989924433249, 0.8901763224181359, 0.8841309823677581, 0.5083123425692695, 0.9052896725440805, 0.8992443324937027, 0.07153652392947107, 0.4685138539042822, 0.4624685138539043]
spend
[-0.7622166246851386, -0.6816120906801008, -0.5994962216624685, -0.8619647355163729, -0.781360201511335, -0.6992443324937028, -0.8720403022670026, -0.7914357682619647, -0.7093198992443325]
conscience
[-0.8282115869017632, -0.3858942065491183, -0.6554156171284634, -0.8967254408060453, -0.45440806045340043, -0.7239294710327455, -0.7627204030226701, -0.3204030226700252, -0.5899244332493703]
imagination
[-0.7279596977329974, -0.6811083123425692, -0.6846347607052896, -0.4921914357682619, -0.4453400503778337, -0.4488664987405541, -0.31335012594458433, -0.2664987405541561, -0.27002518891687655]
###Markdown
Comparing the average similarity between modelsOkay. The quantitative methodology above was not super-rigorous. I was just trying to get a rough sense of a few words that have notably different gender implications for writers who are men, or women. Let's try to compare these six models a little more rigorously by looking at the predictions they make.A separate function in reproduce_character_models has already gone through all six of the models used above and applied them to a balanced_test_set comprised of 1000 characters from books by women, and 1000 characters from books by men. (The characters themselves are also equally balanced by gender.) We now compare pairs of predictions about these characters, to see whether models based on books by women agree with each other more than they agree with models based on books by men, and vice-versa.
###Code
def model_correlation(firstpath, secondpath):
one = pd.read_csv(firstpath, index_col = 'docid')
two = pd.read_csv(secondpath, index_col = 'docid')
justpredictions = pd.concat([one['logistic'], two['logistic']], axis=1, keys=['one', 'two'])
justpredictions.dropna(inplace = True)
r, p = pearsonr(justpredictions.one, justpredictions.two)
return r
def compare_amongst_selves(listofpredictions):
r_scores = []
already_done = []
for path in listofpredictions:
for otherpath in listofpredictions:
if path == otherpath:
continue
elif (path, otherpath) in already_done:
continue
else:
r = model_correlation(path, otherpath)
r_scores.append(r)
already_done.append((otherpath, path))
# no need to compare a to b AND b to a
return r_scores
def average_r(r_scores):
'''
Technically, you don't directly average r scores; you use a
Fisher's transformation into z scores first. In practice, this
makes only a tiny difference, but ...
'''
z_scores = []
for r in r_scores:
z = np.arctanh(r)
z_scores.append(z)
mean_z = sum(z_scores) / len(z_scores)
mean_r = np.tanh(mean_z)
return mean_r
rootpath = 'predictions/'
masculineperspectives = []
feminineperspectives = []
for letter in ['A', 'B', 'C']:
feminineperspectives.append(rootpath + 'onlywomenwriters' + letter + '.results')
masculineperspectives.append(rootpath + 'onlymalewriters' + letter + '.results')
f_compare = compare_amongst_selves(feminineperspectives)
print(f_compare)
print("similarity among models of characters by women:", average_r(f_compare))
m_compare = compare_amongst_selves(masculineperspectives)
print(m_compare)
print("similarity among models of characters by men:", average_r(m_compare))
def compare_against_each_other(listofmasculinemodels, listoffemininemodels):
r_scores = []
for m in listofmasculinemodels:
for f in listoffemininemodels:
r = model_correlation(m, f)
r_scores.append(r)
return r_scores
both_compared = compare_against_each_other(masculineperspectives, feminineperspectives)
print(both_compared)
print('similarity between pairs of models that cross')
print('the gender boundary: ', average_r(both_compared))
###Output
[0.52090949748622484, 0.56643384953528197, 0.55423932332355386, 0.53094621866695213, 0.55722508876328036, 0.54946407323176882, 0.53756463252398334, 0.55990130469704724, 0.55846360992514921]
similarity between pairs of models that cross
the gender boundary: 0.548508482942
|
HW0718.ipynb | ###Markdown
Cart-Pole
###Code
import gym
import numpy as np
from matplotlib import pyplot as plt
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import Adam
###Output
/home/sitibanc/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
###Markdown
Define Class
###Code
# This is Policy Gradient agent for the Cartpole
# In this example, we use REINFORCE algorithm which uses monte-carlo update rule
class REINFORCEAgent:
def __init__(self, state_size, action_size, learning_rate=1e-2, use_render=True):
self.render = use_render
# get size of state and action
self.state_size = state_size
self.action_size = action_size
# These are hyper parameters for the Policy Gradient
self.learning_rate = learning_rate
# create model for policy network
self.model = self.build_model()
# lists for the states, actions and rewards
self.states, self.actions, self.rewards = [], [], []
# approximate policy using Neural Network
# state is input and probability of each action is output of network
def build_model(self):
model = Sequential()
model.add(Dense(16, input_dim=self.state_size,
activation='relu', kernel_initializer='glorot_uniform', name='fc1'))
model.add(Dense(8, activation='relu',
kernel_initializer='glorot_uniform', name='fc2'))
model.add(Dense(self.action_size, activation='softmax',
kernel_initializer='glorot_uniform', name='output'))
model.summary()
# Using categorical crossentropy as a loss is a trick to easily
# implement the policy gradient. Categorical cross entropy is defined
# H(p, q) = sum(p_i * log(q_i)). For the action taken, a, you set
# p_a = advantage. q_a is the output of the policy network, which is
# the probability of taking the action a, i.e. policy(s, a).
# All other p_i are zero, thus we have H(p, q) = A * log(policy(s, a))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=self.learning_rate))
return model
# using the output of policy network, pick action stochastically
def get_action(self, state):
policy = self.model.predict(state, batch_size=1).flatten()
return np.random.choice(self.action_size, 1, p=policy)[0]
# In Policy Gradient, Q function is not available.
# Instead agent uses sample returns for evaluating policy
def discount_rewards(self, rewards):
discounted_rewards = np.zeros_like(rewards)
running_add = 0
for t in reversed(range(0, len(rewards))):
running_add = running_add * self.discount_factor + rewards[t]
discounted_rewards[t] = running_add
return discounted_rewards
# save <s, a ,r> of each step
def append_sample(self, state, action, reward):
self.states.append(state)
self.rewards.append(reward)
self.actions.append(action)
# update policy network every episode
def train_model(self, counter, update_interval):
# =========================
# Get multiple trajectories
# =========================
counter = 0
update_inputs_list, advantanges_list = [], []
episode_length = len(self.states)
update_inputs = np.zeros((episode_length, self.state_size))
advantages = np.zeros((episode_length, self.action_size))
for i in range(episode_length):
update_inputs[i] = self.states[i]
advantages[i][self.actions[i]] = len(self.actions)
update_inputs_list.append(update_inputs)
advantanges_list.append(advantages)
# reset list for next episode (trajectory)
self.states, self.actions, self.rewards = [], [], []
# =========================
# Update network parameters
# =========================
if counter == update_interval:
print("Updating Policy...")
# list to array
update_inputs_list = np.concatenate(update_inputs_list)
advantanges_list = np.concatenate(advantanges_list)
# advantanges_list - baseline
advantanges_list -= np.mean(np.max(advantanges_list, axis=-1))
# Standardize
advantanges_list -= np.mean(advantanges_list)
advantanges_list /= np.std(advantanges_list)
# Train model
self.model.fit(update_inputs_list, advantanges_list, batch_size=128, epochs=1)
# reset variables
update_inputs_list, advantanges_list = [], []
###Output
_____no_output_____
###Markdown
Main Setup
###Code
env = gym.make('CartPole-v0')
# Game Over 條件設定
env = env.unwrapped
env.theta_threshold_radians = 90 * 2 * np.pi / 360 # 向左或右傾斜90度
env.x_threshold = 2.4 # 超出遊戲範圍2.4
# get size of state and action from environment
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
# make REINFORCE agent
agent = REINFORCEAgent(state_size, action_size, use_render=False)
scores = []
update_inputs_list, advantanges_list = [], []
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
fc1 (Dense) (None, 16) 80
_________________________________________________________________
fc2 (Dense) (None, 8) 136
_________________________________________________________________
output (Dense) (None, 2) 18
=================================================================
Total params: 234
Trainable params: 234
Non-trainable params: 0
_________________________________________________________________
###Markdown
Training
###Code
for e in range(3000):
done = False
score = 0
state = env.reset() # shape=(4,)
state = np.reshape(state, [1, state_size]) # shape=(1, 4)
while not done:
if agent.render:
env.render()
# get action for the current state and go one step in environment
action = agent.get_action(state)
next_state, reward, done, _ = env.step(action)
next_state = np.reshape(next_state, [1, state_size])
# reward = reward if not done or score == 499 else -100
# save the sample <s, a, r> to the memory
agent.append_sample(state, action, reward)
score += reward
state = next_state
if done:
# every episode, agent learns from sample returns
agent.train_model(e, 10)
# every episode, plot the play time
# score = score if score == 500 else score + 100
scores.append(score)
print("episode:", e+1, " score:", score)
# if the mean of scores of last 10 episode is bigger than 490
# stop training
# if np.mean(scores[-min(10, len(scores)):]) > 490:
# sys.exit()
# Plot Result
plt.plot(scores)
plt.xlabel("Episodes")
plt.ylabel("Score")
plt.title("Learning Curve")
plt.show()
###Output
episode: 1 score: 29.0
episode: 2 score: 59.0
episode: 3 score: 59.0
episode: 4 score: 75.0
episode: 5 score: 49.0
episode: 6 score: 33.0
episode: 7 score: 40.0
episode: 8 score: 27.0
episode: 9 score: 49.0
episode: 10 score: 42.0
episode: 11 score: 34.0
episode: 12 score: 28.0
episode: 13 score: 48.0
episode: 14 score: 49.0
episode: 15 score: 37.0
episode: 16 score: 49.0
episode: 17 score: 36.0
episode: 18 score: 31.0
episode: 19 score: 28.0
episode: 20 score: 56.0
episode: 21 score: 37.0
episode: 22 score: 29.0
episode: 23 score: 29.0
episode: 24 score: 47.0
episode: 25 score: 26.0
episode: 26 score: 59.0
episode: 27 score: 77.0
episode: 28 score: 56.0
episode: 29 score: 78.0
episode: 30 score: 51.0
episode: 31 score: 83.0
episode: 32 score: 81.0
episode: 33 score: 29.0
episode: 34 score: 61.0
episode: 35 score: 31.0
episode: 36 score: 32.0
episode: 37 score: 31.0
episode: 38 score: 55.0
episode: 39 score: 34.0
episode: 40 score: 40.0
episode: 41 score: 35.0
episode: 42 score: 44.0
episode: 43 score: 43.0
episode: 44 score: 34.0
episode: 45 score: 51.0
episode: 46 score: 46.0
episode: 47 score: 38.0
episode: 48 score: 36.0
episode: 49 score: 32.0
episode: 50 score: 28.0
episode: 51 score: 64.0
episode: 52 score: 35.0
episode: 53 score: 38.0
episode: 54 score: 42.0
episode: 55 score: 25.0
episode: 56 score: 41.0
episode: 57 score: 57.0
episode: 58 score: 31.0
episode: 59 score: 70.0
episode: 60 score: 59.0
episode: 61 score: 51.0
episode: 62 score: 30.0
episode: 63 score: 32.0
episode: 64 score: 29.0
episode: 65 score: 50.0
episode: 66 score: 41.0
episode: 67 score: 56.0
episode: 68 score: 31.0
episode: 69 score: 33.0
episode: 70 score: 26.0
episode: 71 score: 32.0
episode: 72 score: 54.0
episode: 73 score: 38.0
episode: 74 score: 77.0
episode: 75 score: 31.0
episode: 76 score: 28.0
episode: 77 score: 47.0
episode: 78 score: 46.0
episode: 79 score: 45.0
episode: 80 score: 33.0
episode: 81 score: 48.0
episode: 82 score: 38.0
episode: 83 score: 30.0
episode: 84 score: 39.0
episode: 85 score: 39.0
episode: 86 score: 35.0
episode: 87 score: 36.0
episode: 88 score: 74.0
episode: 89 score: 31.0
episode: 90 score: 34.0
episode: 91 score: 40.0
episode: 92 score: 28.0
episode: 93 score: 79.0
episode: 94 score: 28.0
episode: 95 score: 27.0
episode: 96 score: 31.0
episode: 97 score: 50.0
episode: 98 score: 40.0
episode: 99 score: 29.0
episode: 100 score: 58.0
episode: 101 score: 51.0
episode: 102 score: 26.0
episode: 103 score: 36.0
episode: 104 score: 51.0
episode: 105 score: 59.0
episode: 106 score: 57.0
episode: 107 score: 56.0
episode: 108 score: 85.0
episode: 109 score: 32.0
episode: 110 score: 42.0
episode: 111 score: 49.0
episode: 112 score: 52.0
episode: 113 score: 38.0
episode: 114 score: 30.0
episode: 115 score: 50.0
episode: 116 score: 34.0
episode: 117 score: 67.0
episode: 118 score: 31.0
episode: 119 score: 44.0
episode: 120 score: 37.0
episode: 121 score: 26.0
episode: 122 score: 54.0
episode: 123 score: 57.0
episode: 124 score: 30.0
episode: 125 score: 28.0
episode: 126 score: 24.0
episode: 127 score: 47.0
episode: 128 score: 86.0
episode: 129 score: 35.0
episode: 130 score: 34.0
episode: 131 score: 51.0
episode: 132 score: 50.0
episode: 133 score: 35.0
episode: 134 score: 29.0
episode: 135 score: 41.0
episode: 136 score: 44.0
episode: 137 score: 53.0
episode: 138 score: 43.0
episode: 139 score: 37.0
episode: 140 score: 31.0
episode: 141 score: 28.0
episode: 142 score: 55.0
episode: 143 score: 45.0
episode: 144 score: 35.0
episode: 145 score: 87.0
episode: 146 score: 33.0
episode: 147 score: 70.0
episode: 148 score: 22.0
episode: 149 score: 24.0
episode: 150 score: 35.0
episode: 151 score: 48.0
episode: 152 score: 63.0
episode: 153 score: 39.0
episode: 154 score: 84.0
episode: 155 score: 25.0
episode: 156 score: 57.0
episode: 157 score: 41.0
episode: 158 score: 63.0
episode: 159 score: 27.0
episode: 160 score: 37.0
episode: 161 score: 53.0
episode: 162 score: 28.0
episode: 163 score: 63.0
episode: 164 score: 47.0
episode: 165 score: 27.0
episode: 166 score: 31.0
episode: 167 score: 74.0
episode: 168 score: 41.0
episode: 169 score: 51.0
episode: 170 score: 42.0
episode: 171 score: 23.0
episode: 172 score: 40.0
episode: 173 score: 64.0
episode: 174 score: 29.0
episode: 175 score: 77.0
episode: 176 score: 31.0
episode: 177 score: 70.0
episode: 178 score: 59.0
episode: 179 score: 30.0
episode: 180 score: 33.0
episode: 181 score: 38.0
episode: 182 score: 29.0
episode: 183 score: 27.0
episode: 184 score: 31.0
episode: 185 score: 32.0
episode: 186 score: 30.0
episode: 187 score: 60.0
episode: 188 score: 55.0
episode: 189 score: 38.0
episode: 190 score: 36.0
episode: 191 score: 39.0
episode: 192 score: 26.0
episode: 193 score: 28.0
episode: 194 score: 29.0
episode: 195 score: 64.0
episode: 196 score: 25.0
episode: 197 score: 32.0
episode: 198 score: 37.0
episode: 199 score: 56.0
episode: 200 score: 43.0
episode: 201 score: 29.0
episode: 202 score: 33.0
episode: 203 score: 63.0
episode: 204 score: 52.0
episode: 205 score: 31.0
episode: 206 score: 49.0
episode: 207 score: 31.0
episode: 208 score: 49.0
episode: 209 score: 44.0
episode: 210 score: 34.0
episode: 211 score: 26.0
episode: 212 score: 28.0
episode: 213 score: 36.0
episode: 214 score: 58.0
episode: 215 score: 42.0
episode: 216 score: 56.0
episode: 217 score: 54.0
episode: 218 score: 59.0
episode: 219 score: 56.0
episode: 220 score: 42.0
episode: 221 score: 60.0
episode: 222 score: 51.0
episode: 223 score: 65.0
episode: 224 score: 29.0
episode: 225 score: 40.0
episode: 226 score: 32.0
episode: 227 score: 56.0
episode: 228 score: 52.0
episode: 229 score: 36.0
episode: 230 score: 50.0
episode: 231 score: 41.0
episode: 232 score: 38.0
episode: 233 score: 36.0
episode: 234 score: 30.0
episode: 235 score: 36.0
episode: 236 score: 49.0
episode: 237 score: 40.0
episode: 238 score: 25.0
episode: 239 score: 32.0
episode: 240 score: 30.0
episode: 241 score: 28.0
episode: 242 score: 32.0
episode: 243 score: 56.0
episode: 244 score: 48.0
episode: 245 score: 45.0
episode: 246 score: 62.0
episode: 247 score: 30.0
episode: 248 score: 29.0
episode: 249 score: 34.0
episode: 250 score: 38.0
episode: 251 score: 30.0
episode: 252 score: 31.0
episode: 253 score: 44.0
episode: 254 score: 25.0
episode: 255 score: 31.0
episode: 256 score: 39.0
episode: 257 score: 41.0
episode: 258 score: 52.0
episode: 259 score: 32.0
episode: 260 score: 49.0
episode: 261 score: 70.0
episode: 262 score: 33.0
episode: 263 score: 32.0
episode: 264 score: 41.0
episode: 265 score: 59.0
episode: 266 score: 39.0
episode: 267 score: 36.0
episode: 268 score: 33.0
episode: 269 score: 43.0
episode: 270 score: 58.0
episode: 271 score: 40.0
episode: 272 score: 35.0
episode: 273 score: 30.0
episode: 274 score: 61.0
episode: 275 score: 26.0
episode: 276 score: 25.0
episode: 277 score: 30.0
episode: 278 score: 34.0
episode: 279 score: 33.0
episode: 280 score: 50.0
episode: 281 score: 42.0
episode: 282 score: 38.0
episode: 283 score: 35.0
episode: 284 score: 36.0
episode: 285 score: 94.0
episode: 286 score: 67.0
episode: 287 score: 36.0
episode: 288 score: 36.0
episode: 289 score: 49.0
episode: 290 score: 33.0
episode: 291 score: 31.0
episode: 292 score: 27.0
episode: 293 score: 50.0
episode: 294 score: 24.0
episode: 295 score: 53.0
episode: 296 score: 51.0
episode: 297 score: 35.0
episode: 298 score: 37.0
episode: 299 score: 34.0
episode: 300 score: 39.0
episode: 301 score: 67.0
episode: 302 score: 31.0
episode: 303 score: 43.0
episode: 304 score: 33.0
episode: 305 score: 74.0
episode: 306 score: 30.0
episode: 307 score: 75.0
episode: 308 score: 41.0
episode: 309 score: 36.0
episode: 310 score: 52.0
episode: 311 score: 53.0
episode: 312 score: 44.0
|
notebooks/Dataset F - Indian Liver Patient/Synthetic data evaluation/Privacy/2_Membership_Inference_Dataset F.ipynb | ###Markdown
Membership Inference Attack (MIA) Dataset F
###Code
#import libraries
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import os
print('Libraries imported!!')
#define directory of functions and actual directory
HOME_PATH = '' #home path of the project
FUNCTIONS_DIR = "EVALUATION FUNCTIONS/PRIVACY"
ACTUAL_DIR = os.getcwd()
#change directory to functions directory
os.chdir(HOME_PATH + FUNCTIONS_DIR)
#import functions for membership attack simulation
from membership_inference import evaluate_membership_attack
#change directory to actual directory
os.chdir(ACTUAL_DIR)
print('Functions imported!!')
###Output
Functions imported!!
###Markdown
1. Read real and synthetic datasetsIn this part real and synthetic datasets are read.
###Code
#Define global variables
DATA_TYPES = ['Real','GM','SDV','CTGAN','WGANGP']
SYNTHESIZERS = ['GM','SDV','CTGAN','WGANGP']
FILEPATHS = {'Real' : HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/F_IndianLiverPatient_Real_Train.csv',
'GM' : HOME_PATH + 'SYNTHETIC DATASETS/GM/F_IndianLiverPatient_Synthetic_GM.csv',
'SDV' : HOME_PATH + 'SYNTHETIC DATASETS/SDV/F_IndianLiverPatient_Synthetic_SDV.csv',
'CTGAN' : HOME_PATH + 'SYNTHETIC DATASETS/CTGAN/F_IndianLiverPatient_Synthetic_CTGAN.csv',
'WGANGP' : HOME_PATH + 'SYNTHETIC DATASETS/WGANGP/F_IndianLiverPatient_Synthetic_WGANGP.csv'}
categorical_columns = ['gender','class']
data = dict()
Q = 5
#iterate over all datasets filepaths and read each dataset
data = dict()
for name, path in FILEPATHS.items() :
data[name] = pd.read_csv(path)
for col in categorical_columns :
data[name][col] = data[name][col].astype('category').cat.codes
numerical_columns = data[name].select_dtypes(include=['int64','float64']).columns.tolist()
for col in numerical_columns :
data[name][col] = pd.qcut(data[name][col], q=Q, duplicates='drop').cat.codes
data
#read TRAIN real dataset
train_data = pd.read_csv(HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/F_IndianLiverPatient_Real_Train.csv')
for col in categorical_columns :
train_data[col] = train_data[col].astype('category').cat.codes
for col in numerical_columns :
train_data[col] = pd.qcut(train_data[col], q=Q, duplicates='drop').cat.codes
train_data = train_data.sample(frac=1)
#read TEST real dataset
test_data = pd.read_csv(HOME_PATH + 'REAL DATASETS/TEST DATASETS/F_IndianLiverPatient_Real_Test.csv')
for col in categorical_columns :
test_data[col] = test_data[col].astype('category').cat.codes
for col in numerical_columns :
test_data[col] = pd.qcut(test_data[col], q=Q, duplicates='drop').cat.codes
print(len(test_data))
test_data.index = range(len(train_data), len(train_data) + len(test_data))
real_data = (pd.concat([train_data[0:len(test_data)], test_data])).sample(frac=1)
real_data
thresholds = [0.4, 0.3, 0.2, 0.1]
props = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
train_data_indexes = train_data.index.tolist()
precision_values_all = dict()
accuracy_values_all = dict()
for name in SYNTHESIZERS :
print(name)
precision_values = dict()
accuracy_values = dict()
for th in thresholds :
precision_values[th] = []
accuracy_values[th] = []
for p in props :
attacker_data = real_data.iloc[0:int(len(real_data)*p)]
precision_vals, accuracy_vals = evaluate_membership_attack(attacker_data, train_data_indexes, data[name], th)
precision_values[th].append(precision_vals)
accuracy_values[th].append(accuracy_vals)
print('Proportion ', p, ' Threshold ', th, ' analysed')
print('- mean precision', np.mean(precision_values[th]))
print('- mean accuracy', np.mean(accuracy_values[th]))
print('###################################################')
precision_values_all[name] = precision_values
accuracy_values_all[name] = accuracy_values
colors = ['tab:blue','tab:orange','tab:green','tab:red']
fig, axs = plt.subplots(nrows=2, ncols=4, figsize=(13,2.5*2))
idx = {SYNTHESIZERS[0] : {'accuracy' : [0,0], 'precision' : [0,1]},
SYNTHESIZERS[1] : {'accuracy' : [0,2], 'precision' : [0,3]},
SYNTHESIZERS[2] : {'accuracy' : [1,0], 'precision' : [1,1]},
SYNTHESIZERS[3] : {'accuracy' : [1,2], 'precision' : [1,3]}}
first = True
for name in SYNTHESIZERS :
ax_pre = axs[idx[name]['precision'][0], idx[name]['precision'][1]]
ax_acc = axs[idx[name]['accuracy'][0], idx[name]['accuracy'][1]]
precision_values = precision_values_all[name]
accuracy_values = accuracy_values_all[name]
for i in range(0,len(thresholds)) :
ax_pre.plot(props, precision_values[thresholds[i]], 'o-', color=colors[i])
ax_acc.plot(props, accuracy_values[thresholds[i]], 'o-', color=colors[i])
ax_pre.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5)
ax_acc.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5)
ax_pre.set_ylabel('prec')
ax_acc.set_title(name, fontsize=12)
ax_acc.title.set_position([1.1, 1.03])
ax_pre.set_ylim(-0.05,1.05)
ax_acc.set_ylabel('acc')
ax_acc.set_ylim(-0.05,1.05)
ax_acc.grid(True)
ax_pre.grid(True)
ax_acc.set_yticks([0.0,0.2,0.4,0.6,0.8,1])
ax_pre.set_yticks([0.0,0.2,0.4,0.6,0.8,1])
ax_acc.set_xticks([0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
ax_pre.set_xticks([0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
if first == False :
ax_acc.set_yticklabels([])
else :
first = False
ax_pre.set_yticklabels([])
ax_acc.set_xticklabels([])
ax_pre.set_xticklabels([])
axs[idx['CTGAN']['accuracy'][0],idx['CTGAN']['accuracy'][1]].set_yticklabels([0.0,0.2,0.4,0.6,0.8,1])
axs[idx['CTGAN']['accuracy'][0],idx['CTGAN']['accuracy'][1]].set_xticklabels([0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
axs[idx['CTGAN']['precision'][0],idx['CTGAN']['precision'][1]].set_xticklabels([0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
axs[idx['WGANGP']['accuracy'][0],idx['WGANGP']['accuracy'][1]].set_xticklabels([0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
axs[idx['WGANGP']['precision'][0],idx['WGANGP']['precision'][1]].set_xticklabels([0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
fig.text(0.7, 0.04, 'Proportion of dataset known by an attacker', ha='center')
fig.text(0.3, 0.04, 'Proportion of dataset known by an attacker', ha='center')
ax_pre.legend(thresholds, ncol=len(thresholds), bbox_to_anchor=(-0.7, -0.3))
fig.tight_layout()
#fig.suptitle('Membership Inference Tests Results \n Dataset F - Indian Liver Patient', fontsize=18)
fig.savefig('INFERENCE TESTS RESULTS/MEMBERSHIP INFERENCE TESTS RESULTS.svg', bbox_inches='tight')
###Output
_____no_output_____ |
mk023-end_other.ipynb | ###Markdown
END OTHERGiven two strings, return True if either of the strings appears at the very end of the other string, ignoring upper/lower case differences (in other words, the computation should not be "case sensitive"). Examples:`end_other('Hiabc', 'abc') → True` `end_other('AbC', 'HiaBc') → True` `end_other('abc', 'abXabc') → True`
###Code
end_other = lambda a, b: a.lower() == b[-len(a):].lower() or b.lower() == a[-len(b):].lower()
end_other('Hiabc', 'abc')
end_other('AbC', 'HiaBc')
end_other('abc', 'abXabc')
end_other('abc', 'abXabcd')
###Output
_____no_output_____ |
Regularization and Data Augmentation/code.ipynb | ###Markdown
1. CNNs, Keras API and Regression This assignment will give you a chance to familiarize yourself with CNNs and with the differenttechniques for monitoring and controlling the training process in tensorflow. In this assignment,we will look at weed leaves, in particular, we want to predict the number of leaves of the plant.The more leaves, the more the weed has grown.You must use Keras with the tensorflow backend, i.e., the package tensorflow.keras. Forthis assignment, you may use other tensorflow packages and scikit-learn, scikit-image or pandasbut not other deep learning frameworks, e.g., pytorch, mxnet etc. 2. Data Preparation The data for this assignment are plant images at different resolution captured with a variety ofcameras. There are images showing plants with approximatelty 1,2,3,4 and 6 leafs. The images arepart of a Leaf counting dataset by Teimouri et al. [1] which can be downloaded from the AarhusUniversity, Denmarkhttps://vision.eng.au.dk/leaf-counting-dataset/. However, you must work with the subset of images posted on BrightSpace as training.zipand testing.zip. There are 200 images for each of the 5 classes. As Figure 1 shows, there is agreat variety of plants and image conditios. The dataset is split into a training and a testing setwhere there are 180 images per class for training and validation; and 20 images for testing. > First of all, we gather the data from the folders and label it. Then we resized the data into a 32 X 32 size.
###Code
# import colab drive to access my drive and the data uploaded there.
from google.colab import drive
drive.mount('/content/drive')
# import important libraries
import PIL
import os
from PIL import Image
import os
import skimage.io as skio
from skimage.transform import resize
import numpy as np
import pandas as pd
# get the location of the data
location = r'/content/drive/MyDrive/'
classes=['1','2','3','4','6']
# make array of the classes of the data
labels=[0,1,2,3,4]
# make a list of the folders that contain the data
folders=['training','testing']
# make empty lists to append to them each image
X_train=[]
X_test=[]
y_train=[]
y_test=[]
# for looping the folders and making a train, validation, and test images arrays
for ind , it in enumerate(folders):
if it=='training':
for ind_1,it_1 in enumerate(classes):
for file in os.listdir(location+"/"+it+"/"+it_1):
f_img = location+"/"+it+"/"+it_1+"/"+file
img = skio.imread(f_img)
img = resize(img,(32,32))
X_train.append(img)
y_train.append(labels[ind_1])
elif it=='testing':
for ind_1,it_1 in enumerate(classes):
for file in os.listdir(location+"/"+it+"/"+it_1):
f_img =location+"/"+it+"/"+it_1+"/"+file
img = skio.imread(f_img)
img = resize(img,(32,32))
X_test.append(img)
y_test.append(labels[ind_1])
else:
print("The destination file does not exist!")
###Output
_____no_output_____
###Markdown
Make sure of the data size
###Code
len(X_test)
y_test
###Output
_____no_output_____
###Markdown
convert the list into a numpy array that is used in further modeling
###Code
y_train = np.asarray(y_train)
y_test = np.asarray(y_test)
X_train = np.asarray(X_train)
X_test = np.asarray(X_test)
print('Matrix shape X: ', X_train.shape)
print('{0} samples for training'.format(X_train.shape[0]))
print('{0} samples for training'.format(X_test.shape[0]))
###Output
Matrix shape X: (907, 32, 32, 3)
907 samples for training
100 samples for training
###Markdown
3. Basic Transfer Learning For this assignment, you are asked to use the Keras implementation of VGG-16 as a starting point.Have a look at the transfer learning example jupyter notebook mnistVGG.jypnb to get 3.1. Classification Network [3] Using the first 2 blocks of VGG-16 add extra Keras layers to create your own version of a CNNnetwork for the classification of the images according to the number of leaves in the plant images.Note that there will be 5 classes. The last layer from VGG-16 will be block2 pool and you areallowed to add no more than five fully connected or convolutional layers to the network includingthe final output layer. You can use as many pooling, flattening, 1 × 1 convolution layers, etc. asyou wish but do not use any regularization. Train this simple network on the training set whilemonitoring convergence on the validation set. As input to the model use images of size no largerthan 128 × 128. Note, it is highly recommended to use even smaller input images to try things out.You are not expected to fine-tune the initial VGG layers. Print your learning curves for trainingand validation. Give the confusion matrix of your network on the training including validationand testing data sets. > get the vgg model and print its architecture
###Code
# import keras from tensorflow as needed to import the vgg16 model
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input
## Loading VGG16 model
vgg = VGG16(weights="imagenet", include_top=False, input_shape=X_train[0].shape)
vgg.trainable = False ## Not trainable weights
# print model summary
print(vgg.summary())
###Output
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 32, 32, 3)] 0
block1_conv1 (Conv2D) (None, 32, 32, 64) 1792
block1_conv2 (Conv2D) (None, 32, 32, 64) 36928
block1_pool (MaxPooling2D) (None, 16, 16, 64) 0
block2_conv1 (Conv2D) (None, 16, 16, 128) 73856
block2_conv2 (Conv2D) (None, 16, 16, 128) 147584
block2_pool (MaxPooling2D) (None, 8, 8, 128) 0
block3_conv1 (Conv2D) (None, 8, 8, 256) 295168
block3_conv2 (Conv2D) (None, 8, 8, 256) 590080
block3_conv3 (Conv2D) (None, 8, 8, 256) 590080
block3_pool (MaxPooling2D) (None, 4, 4, 256) 0
block4_conv1 (Conv2D) (None, 4, 4, 512) 1180160
block4_conv2 (Conv2D) (None, 4, 4, 512) 2359808
block4_conv3 (Conv2D) (None, 4, 4, 512) 2359808
block4_pool (MaxPooling2D) (None, 2, 2, 512) 0
block5_conv1 (Conv2D) (None, 2, 2, 512) 2359808
block5_conv2 (Conv2D) (None, 2, 2, 512) 2359808
block5_conv3 (Conv2D) (None, 2, 2, 512) 2359808
block5_pool (MaxPooling2D) (None, 1, 1, 512) 0
=================================================================
Total params: 14,714,688
Trainable params: 0
Non-trainable params: 14,714,688
_________________________________________________________________
None
###Markdown
> Take till the block2 layers in the VGG layers
###Code
from tensorflow.keras.models import Model
vgg_edited = Model(inputs=vgg.input, outputs=vgg.get_layer("block2_pool").output)
print(vgg_edited.summary())
#import important libraries fot keras and keras layers
import tensorflow as tf
import tensorflow.keras
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D,Dropout
for layer in vgg_edited.layers:
layer.trainable = False
# We will have to use the functional API
# last layers output
x = vgg_edited.layers[-1].output
# Flatten as before
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dense(5, activation='softmax')(x)
from tensorflow.keras.models import Model
vgg_model_transfer = Model(inputs=vgg_edited.input, outputs=x)
print(vgg_model_transfer.summary())
y_train_k = tensorflow.keras.utils.to_categorical(y_train, num_classes=5)
y_test_k = tensorflow.keras.utils.to_categorical(y_test, num_classes=5)
for i in range(0,200,20):
print(y_train[i], " ", y_train_k[i,:])
sgd = tensorflow.keras.optimizers.SGD(lr=0.0008)
vgg_model_transfer.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
history = vgg_model_transfer.fit(X_train, y_train_k, batch_size=128, epochs=32, verbose=1,
validation_data=(X_test, y_test_k))
y_predict_prob = vgg_model_transfer.predict(X_test)
y_predict = y_predict_prob.argmax(axis=-1)
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_predict)
print('Confusion matrix: \n{0}'.format(conf_mx))
import matplotlib.pyplot as plt
def plot_confusion_matrix(matrix):
"""If you prefer color and a colorbar"""
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
cax = ax.matshow(matrix)
fig.colorbar(cax)
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
# plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plot_confusion_matrix(norm_conf_mx)
plt.show()
from sklearn.metrics import classification_report
print(classification_report(y_test, y_predict))
###Output
precision recall f1-score support
0 0.45 0.70 0.55 20
1 0.29 0.55 0.38 20
2 0.43 0.15 0.22 20
3 0.42 0.40 0.41 20
4 0.80 0.20 0.32 20
accuracy 0.40 100
macro avg 0.48 0.40 0.38 100
weighted avg 0.48 0.40 0.38 100
###Markdown
Learning Curves
###Code
# dictionary keys seems to have changed in version 2
k = ''
if 'accuracy' in history.history :
k = 'accuracy'
if 'acc' in history.history :
k = 'acc'
if k != '' :
plt.plot(history.history[k])
plt.plot(history.history['val_'+k])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
3.2. Basic Transfer Learning: Regression Network [2] Repeat the steps of Question 3.1 but turn it into a regression problem, i.e., your network needs tooutput a single float value ranging between 0 to 6 corresponding to the number of leaves. Again,please print your learning curves for training and validation. You are not expected to fine-tunethe initial VGG layers. Give your mean squared error on training including validation and testingdata sets.
###Code
vgg_edited = Model(inputs=vgg.input, outputs=vgg.get_layer("block2_pool").output)
for layer in vgg_edited.layers:
layer.trainable = False
# We will have to use the functional API
# last layers output
x = vgg_edited.layers[-1].output
# Flatten as before
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dense(1, kernel_initializer='normal',activation='linear')(x)
from tensorflow.keras.models import Model
vgg_model_transfer_linear = Model(inputs=vgg_edited.input, outputs=x)
sgd = tensorflow.keras.optimizers.SGD(lr=0.0008)
vgg_model_transfer_linear.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mean_absolute_error'])
history = vgg_model_transfer_linear.fit(X_train, y_train_k, batch_size=128, epochs=50, verbose=1,
validation_data=(X_test, y_test_k))
y_predict_prob = vgg_model_transfer_linear.predict(X_test)
y_predict = y_predict_prob.argmax(axis=-1)
conf_mx = confusion_matrix(y_test, y_predict)
print('Confusion matrix: \n{0}'.format(conf_mx))
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
# plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plot_confusion_matrix(norm_conf_mx)
plt.show()
print(classification_report(y_test, y_predict))
###Output
precision recall f1-score support
0 0.20 1.00 0.33 20
1 0.00 0.00 0.00 20
2 0.00 0.00 0.00 20
3 0.00 0.00 0.00 20
4 0.00 0.00 0.00 20
accuracy 0.20 100
macro avg 0.04 0.20 0.07 100
weighted avg 0.04 0.20 0.07 100
###Markdown
Learning Curves
###Code
# dictionary keys seems to have changed in version 2
k = ''
if 'mean_absolute_error' in history.history :
k = 'mean_absolute_error'
if k != '' :
plt.plot(history.history[k])
plt.plot(history.history['val_'+k])
plt.title('Model mean_absolute_error')
plt.ylabel('mean_absolute_error')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
Classification report on training data
###Code
y_predict_prob_train = vgg_model_transfer_linear.predict(X_train)
y_predict_train = y_predict_prob_train.argmax(axis=-1)
print(classification_report(y_train, y_predict_train))
###Output
precision recall f1-score support
0 0.20 1.00 0.33 180
1 0.00 0.00 0.00 180
2 0.00 0.00 0.00 187
3 0.00 0.00 0.00 180
4 0.00 0.00 0.00 180
accuracy 0.20 907
macro avg 0.04 0.20 0.07 907
weighted avg 0.04 0.20 0.07 907
###Markdown
Classification report on testing data
###Code
print(classification_report(y_test, y_predict))
###Output
precision recall f1-score support
0 0.20 1.00 0.33 20
1 0.00 0.00 0.00 20
2 0.00 0.00 0.00 20
3 0.00 0.00 0.00 20
4 0.00 0.00 0.00 20
accuracy 0.20 100
macro avg 0.04 0.20 0.07 100
weighted avg 0.04 0.20 0.07 100
###Markdown
3.3. Discussion [1] The size of the training data is quite small. Discuss based on your learning curves if overfitting isoccurring with your networks in Question 3.1 and Question 3.2. > 3.1 Model>> The model is overfitted, the test accuracy is very low in comparison with the training accuracy, the training accuracy is up to 80% while the test accuracy is only 40%.The model fails to generalize the data and find the hidden patterns in it.The test loss is fixed, and not decreasing as epochs go, hence the more we increase the number of epochs the same performance we have.The overall accuracy is 40% which is poor performance.The highest f1 score is 55% for the first class which is very fair performace.The lowest f1 score was recorded for the the third class which represents tri-plants. > 3.2 Model>> The model is extremely under fitted, the overall accuracy is 20% in both training and testing data.The model fails to predicts properly all classes except for the first class.The model predicts it only, however with a very low f1 score of 33%. increasing number of epochs even up to 50 epoch does not have any impact on the performance. 4. Improving the Model Regularization and data augmentation are common strategies to deal with small datasets. 4.1. Regularization [1.5] Incorporate two regularization methods (e.g., Batch Normalization, Dropout, Weight Normaliza-tion etc.) into your layers of the network. Please pick the model from Question 3.1 and Question 3.2that performs better. You are not expected to fine-tune the initial VGG layers. Again, pleaseprint your learning curves for training and validation and print the corresponding metrics for yourmodel. We picked the classification model eather than the regression model as it's a way better. Batch Normalization
###Code
from keras.layers import BatchNormalization
vgg = VGG16(weights="imagenet", include_top=False, input_shape=X_train[0].shape)
vgg.trainable = False ## Not trainable weights
vgg_edited = Model(inputs=vgg.input, outputs=vgg.get_layer("block2_pool").output)
for layer in vgg_edited.layers:
layer.trainable = False
flat1 = Flatten()(vgg_edited.layers[-1].output)
t=BatchNormalization()(flat1)
class1 = Dense(256, activation='relu')(t)
tt=BatchNormalization()(class1)
output = Dense(5, activation='softmax')(tt)
model_reg = Model(inputs=vgg_edited.inputs, outputs=output)
model_reg.summary()
y_train_k = tensorflow.keras.utils.to_categorical(y_train, num_classes=5)
y_test_k = tensorflow.keras.utils.to_categorical(y_test, num_classes=5)
sgd = tensorflow.keras.optimizers.SGD(lr=0.0008)
model_reg.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
history = model_reg.fit(X_train, y_train_k, batch_size=128, epochs=50, verbose=1,
validation_data=(X_test, y_test_k))
y_predict_prob = model_reg.predict(X_test)
y_predict = y_predict_prob.argmax(axis=-1)
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_predict)
conf_mx
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
# plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plot_confusion_matrix(norm_conf_mx)
plt.show()
print(classification_report(y_test, y_predict))
k = ''
if 'accuracy' in history.history :
k = 'accuracy'
if 'acc' in history.history :
k = 'acc'
if k != '' :
plt.plot(history.history[k])
plt.plot(history.history['val_'+k])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
Dropout
###Code
vgg = VGG16(weights="imagenet", include_top=False, input_shape=X_train[0].shape)
vgg.trainable = False ## Not trainable weights
vgg_edited = Model(inputs=vgg.input, outputs=vgg.get_layer("block2_pool").output)
for layer in vgg_edited.layers:
layer.trainable = False
flat1 = Flatten()(vgg_edited.layers[-1].output)
t=Dropout(0.5)(flat1)
class1 = Dense(256, activation='relu')(t)
tt=Dropout(0.5)(class1)
output = Dense(5, activation='softmax')(tt)
model_d = Model(inputs=vgg_edited.inputs, outputs=output)
model_d.summary()
y_train_k = tensorflow.keras.utils.to_categorical(y_train, num_classes=5)
y_test_k = tensorflow.keras.utils.to_categorical(y_test, num_classes=5)
sgd = tensorflow.keras.optimizers.SGD(lr=0.0008)
model_d.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
history = model_d.fit(X_train, y_train_k, batch_size=128, epochs=50, verbose=1,
validation_data=(X_test, y_test_k))
y_predict_prob = model_d.predict(X_test)
y_predict = y_predict_prob.argmax(axis=-1)
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
# plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plot_confusion_matrix(norm_conf_mx)
plt.show()
y_predict_prob_train = model_d.predict(X_train)
y_predict_train = y_predict_prob_train.argmax(axis=-1)
print(classification_report(y_train, y_predict_train))
print(classification_report(y_test, y_predict))
k = ''
if 'accuracy' in history.history :
k = 'accuracy'
if 'acc' in history.history :
k = 'acc'
if k != '' :
plt.plot(history.history[k])
plt.plot(history.history['val_'+k])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
5. Data Augmentation [1.5]
###Code
###Output
_____no_output_____
###Markdown
Perform data augmentation for training the same model as in Question 4.1. You are not expectedto fine-tune the initial VGG layers. Again, please print your learning curves for training andvalidation and print the corresponding metrics for your model.
###Code
from keras.preprocessing.image import ImageDataGenerator
# we are using horizontal, vertical , rotation and zooming augmentation
datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.002,
height_shift_range=0.002,
zoom_range=0.01,
horizontal_flip=True,
fill_mode='nearest')
vgg = VGG16(weights="imagenet", include_top=False, input_shape=X_train[0].shape)
vgg.trainable = False ## Not trainable weights
vgg_edited = Model(inputs=vgg.input, outputs=vgg.get_layer("block2_pool").output)
for layer in vgg_edited.layers:
layer.trainable = False
# We will have to use the functional API
# last layers output
x = vgg_edited.layers[-1].output
# Flatten as before
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dense(5, activation='softmax')(x)
from tensorflow.keras.models import Model
model_aug = Model(inputs=vgg_edited.input, outputs=x)
y_train_k = tensorflow.keras.utils.to_categorical(y_train, num_classes=5)
y_test_k = tensorflow.keras.utils.to_categorical(y_test, num_classes=5)
sgd = tensorflow.keras.optimizers.SGD(lr=0.0008)
model_aug.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
history = model_aug.fit(datagen.flow(X_train, y_train_k), batch_size=128, epochs=50, verbose=1,
validation_data=(X_test, y_test_k))
y_predict_prob = model_aug.predict(X_test)
y_predict = y_predict_prob.argmax(axis=-1)
conf_mx = confusion_matrix(y_test, y_predict)
print('Confusion matrix: \n{0}'.format(conf_mx))
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
# plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plot_confusion_matrix(norm_conf_mx)
plt.show()
print(classification_report(y_test, y_predict))
k = ''
if 'accuracy' in history.history :
k = 'accuracy'
if 'acc' in history.history :
k = 'acc'
if k != '' :
plt.plot(history.history[k])
plt.plot(history.history['val_'+k])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
5.1. Discussion [1] Discuss based on your learning curves and final metrics in Question 4.1 and in Question 5, howlarge a improvement can be observed from regularization and data augmentation. > classification model with batch normalization>> The model keeps overfitting, however the model is worest than the baseline, the training accuracy was up to 90% ,but the test accuracy is only 30%!.The f1 score has decreased for all classes, the highest fq score id 48% for the first class and the lowest f1 score is 18% for the tri-plants as before. >classification model with dropout>> The model very under fitted, the training accuracy is only 23% while the testing accuracy is 20%. There is no f1 score all are zeros except for the first class and the second class.The first class has a f1 score of 38% while the second class has a f1 score of 18% >classification model with data augmentation>> The model overfitted, the training accuracy is about 70% while the test accuracy is 47% on test data. The highest f1 score is for the first class which records a score of 64%. the lowest f1 score is 17% for the third class. References [1] N. Teimouri, M. Dyrmann, P. R. Nielsen, S. K. Mathiassen, G. J. Somerville, and R. N.Jørgensen, “Weed growth stage estimator using deep convolutional neural networks,” Sensors,vol. 18, no. 5, 2018.[2] Lab code. [3] Keras API. [4] https://towardsdatascience.com/transfer-learning-with-vgg16-and-keras-50ea161580b4 [5] https://www.learndatasci.com/tutorials/hands-on-transfer-learning-keras/[6] https://stackoverflow.com/questions/56204731/the-method-np-utils-to-categorical-give-me-an-error [7] https://towardsdatascience.com/learning-curve-to-identify-overfitting-underfitting-problems-133177f38df5 [8] https://towardsdatascience.com/deep-neural-networks-for-regression-problems-81321897ca33[9] https://sthalles.github.io/keras-regularizer/[10] https://towardsdatascience.com/how-to-use-batch-normalization-with-tensorflow-and-tf-keras-to-train-deep-neural-networks-faster-60ba4d054b73[11] https://www.machinecurve.com/index.php/2019/12/18/how-to-use-dropout-with-keras/
###Code
###Output
_____no_output_____ |
trials/MyFirstnotebook.ipynb | ###Markdown
Chapter 1. Introduction to deep structures The neuron Multilayer perceptron
###Code
###Output
_____no_output_____
###Markdown
$C=1$ Figure 1. A random image. Chapter 2. Introduction to Deep Learning Software
###Code
#@title This is some hidden code
import numpy as np
x=1
y=x+1
y+=1
print(y)
###Output
3
###Markdown
Pytorch Chapter 3 Convolutional Neural Networks
###Code
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
with PdfPages('multipage_pdf.pdf') as pdf:
plt.figure(figsize=(6, 6))
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 200, where=(ys > 200), facecolor='g', alpha=0.6)
plt.title("Fills and Alpha Example")
pdf.savefig() # saves the current figure into a pdf page
plt.show()
plt.close()
###Output
_____no_output_____ |
deepfake/DeepFake Demo.ipynb | ###Markdown
Demo for paper "First Order Motion Model for Image Animation"To try the demo, press the 2 play buttons in order and scroll to the bottom. Note that it may take several minutes to load.
###Code
!pip install ffmpy &> /dev/null
!git init -q .
!git remote add origin https://github.com/AliaksandrSiarohin/first-order-model
!git pull -q origin master
!git clone -q https://github.com/graphemecluster/first-order-model-demo demo
import IPython.display
import PIL.Image
import cv2
import imageio
import io
import ipywidgets
import numpy
import os.path
import requests
import skimage.transform
import warnings
from base64 import b64encode
from demo import load_checkpoints, make_animation
from ffmpy import FFmpeg
from google.colab import files, output
from IPython.display import HTML, Javascript
from skimage import img_as_ubyte
warnings.filterwarnings("ignore")
os.makedirs("user", exist_ok=True)
display(HTML("""
<style>
.widget-box > * {
flex-shrink: 0;
}
.widget-tab {
min-width: 0;
flex: 1 1 auto;
}
.widget-tab .p-TabBar-tabLabel {
font-size: 15px;
}
.widget-upload {
background-color: tan;
}
.widget-button {
font-size: 18px;
width: 160px;
height: 34px;
line-height: 34px;
}
.widget-dropdown {
width: 250px;
}
.widget-checkbox {
width: 650px;
}
.widget-checkbox + .widget-checkbox {
margin-top: -6px;
}
.input-widget .output_html {
text-align: center;
width: 266px;
height: 266px;
line-height: 266px;
color: lightgray;
font-size: 72px;
}
div.stream {
display: none;
}
.title {
font-size: 20px;
font-weight: bold;
margin: 12px 0 6px 0;
}
.warning {
display: none;
color: red;
margin-left: 10px;
}
.warn {
display: initial;
}
.resource {
cursor: pointer;
border: 1px solid gray;
margin: 5px;
width: 160px;
height: 160px;
min-width: 160px;
min-height: 160px;
max-width: 160px;
max-height: 160px;
-webkit-box-sizing: initial;
box-sizing: initial;
}
.resource:hover {
border: 6px solid crimson;
margin: 0;
}
.selected {
border: 6px solid seagreen;
margin: 0;
}
.input-widget {
width: 266px;
height: 266px;
border: 1px solid gray;
}
.input-button {
width: 268px;
font-size: 15px;
margin: 2px 0 0;
}
.output-widget {
width: 256px;
height: 256px;
border: 1px solid gray;
}
.output-button {
width: 258px;
font-size: 15px;
margin: 2px 0 0;
}
.uploaded {
width: 256px;
height: 256px;
border: 6px solid seagreen;
margin: 0;
}
.label-or {
align-self: center;
font-size: 20px;
margin: 16px;
}
.loading {
align-items: center;
width: fit-content;
}
.loader {
margin: 32px 0 16px 0;
width: 48px;
height: 48px;
min-width: 48px;
min-height: 48px;
max-width: 48px;
max-height: 48px;
border: 4px solid whitesmoke;
border-top-color: gray;
border-radius: 50%;
animation: spin 1.8s linear infinite;
}
.loading-label {
color: gray;
}
.comparison-widget {
width: 256px;
height: 256px;
border: 1px solid gray;
margin-left: 2px;
}
.comparison-label {
color: gray;
font-size: 14px;
text-align: center;
position: relative;
bottom: 3px;
}
@keyframes spin {
from { transform: rotate(0deg); }
to { transform: rotate(360deg); }
}
</style>
"""))
def thumbnail(file):
return imageio.get_reader(file, mode='I', format='FFMPEG').get_next_data()
def create_image(i, j):
image_widget = ipywidgets.Image(
value=open('demo/images/%d%d.png' % (i, j), 'rb').read(),
format='png'
)
image_widget.add_class('resource')
image_widget.add_class('resource-image')
image_widget.add_class('resource-image%d%d' % (i, j))
return image_widget
def create_video(i):
video_widget = ipywidgets.Image(
value=cv2.imencode('.png', cv2.cvtColor(thumbnail('demo/videos/%d.mp4' % i), cv2.COLOR_RGB2BGR))[1].tostring(),
format='png'
)
video_widget.add_class('resource')
video_widget.add_class('resource-video')
video_widget.add_class('resource-video%d' % i)
return video_widget
def create_title(title):
title_widget = ipywidgets.Label(title)
title_widget.add_class('title')
return title_widget
def download_output(button):
complete.layout.display = 'none'
loading.layout.display = ''
files.download('output.mp4')
loading.layout.display = 'none'
complete.layout.display = ''
def convert_output(button):
complete.layout.display = 'none'
loading.layout.display = ''
FFmpeg(inputs={'output.mp4': None}, outputs={'scaled.mp4': '-vf "scale=1080x1080:flags=lanczos,pad=1920:1080:420:0" -y'}).run()
files.download('scaled.mp4')
loading.layout.display = 'none'
complete.layout.display = ''
def back_to_main(button):
complete.layout.display = 'none'
main.layout.display = ''
label_or = ipywidgets.Label('or')
label_or.add_class('label-or')
image_titles = ['Peoples', 'Cartoons', 'Dolls', 'Game of Thrones', 'Statues']
image_lengths = [8, 4, 8, 9, 4]
image_tab = ipywidgets.Tab()
image_tab.children = [ipywidgets.HBox([create_image(i, j) for j in range(length)]) for i, length in enumerate(image_lengths)]
for i, title in enumerate(image_titles):
image_tab.set_title(i, title)
input_image_widget = ipywidgets.Output()
input_image_widget.add_class('input-widget')
upload_input_image_button = ipywidgets.FileUpload(accept='image/*', button_style='primary')
upload_input_image_button.add_class('input-button')
image_part = ipywidgets.HBox([
ipywidgets.VBox([input_image_widget, upload_input_image_button]),
label_or,
image_tab
])
video_tab = ipywidgets.Tab()
video_tab.children = [ipywidgets.HBox([create_video(i) for i in range(5)])]
video_tab.set_title(0, 'All Videos')
input_video_widget = ipywidgets.Output()
input_video_widget.add_class('input-widget')
upload_input_video_button = ipywidgets.FileUpload(accept='video/*', button_style='primary')
upload_input_video_button.add_class('input-button')
video_part = ipywidgets.HBox([
ipywidgets.VBox([input_video_widget, upload_input_video_button]),
label_or,
video_tab
])
model = ipywidgets.Dropdown(
description="Model:",
options=[
'vox',
'vox-adv',
'taichi',
'taichi-adv',
'nemo',
'mgif',
'fashion',
'bair'
]
)
warning = ipywidgets.HTML('<b>Warning:</b> Upload your own images and videos (see README)')
warning.add_class('warning')
model_part = ipywidgets.HBox([model, warning])
relative = ipywidgets.Checkbox(description="Relative keypoint displacement (Inherit object proporions from the video)", value=True)
adapt_movement_scale = ipywidgets.Checkbox(description="Adapt movement scale (Don’t touch unless you know want you are doing)", value=True)
generate_button = ipywidgets.Button(description="Generate", button_style='primary')
main = ipywidgets.VBox([
create_title('Choose Image'),
image_part,
create_title('Choose Video'),
video_part,
create_title('Settings'),
model_part,
relative,
adapt_movement_scale,
generate_button
])
loader = ipywidgets.Label()
loader.add_class("loader")
loading_label = ipywidgets.Label("This may take several minutes to process…")
loading_label.add_class("loading-label")
loading = ipywidgets.VBox([loader, loading_label])
loading.add_class('loading')
output_widget = ipywidgets.Output()
output_widget.add_class('output-widget')
download = ipywidgets.Button(description='Download', button_style='primary')
download.add_class('output-button')
download.on_click(download_output)
convert = ipywidgets.Button(description='Convert to 1920×1080', button_style='primary')
convert.add_class('output-button')
convert.on_click(convert_output)
back = ipywidgets.Button(description='Back', button_style='primary')
back.add_class('output-button')
back.on_click(back_to_main)
comparison_widget = ipywidgets.Output()
comparison_widget.add_class('comparison-widget')
comparison_label = ipywidgets.Label('Comparison')
comparison_label.add_class('comparison-label')
complete = ipywidgets.HBox([
ipywidgets.VBox([output_widget, download, convert, back]),
ipywidgets.VBox([comparison_widget, comparison_label])
])
display(ipywidgets.VBox([main, loading, complete]))
display(Javascript("""
var images, videos;
function deselectImages() {
images.forEach(function(item) {
item.classList.remove("selected");
});
}
function deselectVideos() {
videos.forEach(function(item) {
item.classList.remove("selected");
});
}
function invokePython(func) {
google.colab.kernel.invokeFunction("notebook." + func, [].slice.call(arguments, 1), {});
}
setTimeout(function() {
(images = [].slice.call(document.getElementsByClassName("resource-image"))).forEach(function(item) {
item.addEventListener("click", function() {
deselectImages();
item.classList.add("selected");
invokePython("select_image", item.className.match(/resource-image(\d\d)/)[1]);
});
});
images[0].classList.add("selected");
(videos = [].slice.call(document.getElementsByClassName("resource-video"))).forEach(function(item) {
item.addEventListener("click", function() {
deselectVideos();
item.classList.add("selected");
invokePython("select_video", item.className.match(/resource-video(\d)/)[1]);
});
});
videos[0].classList.add("selected");
}, 1000);
"""))
selected_image = None
def select_image(filename):
global selected_image
selected_image = resize(PIL.Image.open('demo/images/%s.png' % filename).convert("RGB"))
input_image_widget.clear_output(wait=True)
with input_image_widget:
display(HTML('Image'))
input_image_widget.remove_class('uploaded')
output.register_callback("notebook.select_image", select_image)
selected_video = None
def select_video(filename):
global selected_video
selected_video = 'demo/videos/%s.mp4' % filename
input_video_widget.clear_output(wait=True)
with input_video_widget:
display(HTML('Video'))
input_video_widget.remove_class('uploaded')
output.register_callback("notebook.select_video", select_video)
def resize(image, size=(256, 256)):
w, h = image.size
d = min(w, h)
r = ((w - d) // 2, (h - d) // 2, (w + d) // 2, (h + d) // 2)
return image.resize(size, resample=PIL.Image.LANCZOS, box=r)
def upload_image(change):
global selected_image
for name, file_info in upload_input_image_button.value.items():
content = file_info['content']
if content is not None:
selected_image = resize(PIL.Image.open(io.BytesIO(content)).convert("RGB"))
input_image_widget.clear_output(wait=True)
with input_image_widget:
display(selected_image)
input_image_widget.add_class('uploaded')
display(Javascript('deselectImages()'))
upload_input_image_button.observe(upload_image, names='value')
def upload_video(change):
global selected_video
for name, file_info in upload_input_video_button.value.items():
content = file_info['content']
if content is not None:
selected_video = 'user/' + name
preview = resize(PIL.Image.fromarray(thumbnail(content)).convert("RGB"))
input_video_widget.clear_output(wait=True)
with input_video_widget:
display(preview)
input_video_widget.add_class('uploaded')
display(Javascript('deselectVideos()'))
with open(selected_video, 'wb') as video:
video.write(content)
upload_input_video_button.observe(upload_video, names='value')
def change_model(change):
if model.value.startswith('vox'):
warning.remove_class('warn')
else:
warning.add_class('warn')
model.observe(change_model, names='value')
def generate(button):
main.layout.display = 'none'
loading.layout.display = ''
filename = model.value + ('' if model.value == 'fashion' else '-cpk') + '.pth.tar'
if not os.path.isfile(filename):
download = requests.get(requests.get('https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key=https://yadi.sk/d/lEw8uRm140L_eQ&path=/' + filename).json().get('href'))
with open(filename, 'wb') as checkpoint:
checkpoint.write(download.content)
reader = imageio.get_reader(selected_video, mode='I', format='FFMPEG')
fps = reader.get_meta_data()['fps']
driving_video = []
for frame in reader:
driving_video.append(frame)
generator, kp_detector = load_checkpoints(config_path='config/%s-256.yaml' % model.value, checkpoint_path=filename)
predictions = make_animation(
skimage.transform.resize(numpy.asarray(selected_image), (256, 256)),
[skimage.transform.resize(frame, (256, 256)) for frame in driving_video],
generator,
kp_detector,
relative=relative.value,
adapt_movement_scale=adapt_movement_scale.value
)
if selected_video.startswith('user/') or selected_video == 'demo/videos/0.mp4':
imageio.mimsave('temp.mp4', [img_as_ubyte(frame) for frame in predictions], fps=fps)
FFmpeg(inputs={'temp.mp4': None, selected_video: None}, outputs={'output.mp4': '-c copy -y'}).run()
else:
imageio.mimsave('output.mp4', [img_as_ubyte(frame) for frame in predictions], fps=fps)
loading.layout.display = 'none'
complete.layout.display = ''
with output_widget:
display(HTML('<video id="left" controls src="data:video/mp4;base64,%s" />' % b64encode(open('output.mp4', 'rb').read()).decode()))
with comparison_widget:
display(HTML('<video id="right" muted src="data:video/mp4;base64,%s" />' % b64encode(open(selected_video, 'rb').read()).decode()))
display(Javascript("""
(function(left, right) {
left.addEventListener("play", function() {
right.play();
});
left.addEventListener("pause", function() {
right.pause();
});
left.addEventListener("seeking", function() {
right.currentTime = left.currentTime;
});
})(document.getElementById("left"), document.getElementById("right"));
"""))
generate_button.on_click(generate)
loading.layout.display = 'none'
complete.layout.display = 'none'
select_image('00')
select_video('0')
###Output
_____no_output_____ |
examples/drone_example.ipynb | ###Markdown
membrane-toolkit data aggregation demo This notebook will demonstrate some of the capabilities of membrane-toolkit's core functions. Setup---In this example, we will use the `PermselectivityDrone` to parse membrane potential data from a folder full of spreadsheets. The `Drone` will create a pandas `DataFrame` out of the data. We'll then use that `DataFrame` calculate the ideal membrane potential and the apparent permselectivity using the `nernst_potential` and `apparent_permselectivity` functions from `membrane_toolkit.core`
###Code
from membrane_toolkit.pipeline.drones import PermselectivityDrone
from membrane_toolkit.core import apparent_permselectivity, nernst_potential
###Output
/home/ryan/anaconda3/envs/wsl2/lib/python3.8/site-packages/maggma/utils.py:20: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)
from tqdm.autonotebook import tqdm
###Markdown
**NOTE:** when importing, you must use and underscore instead of a hyphen in 'membrane_toolkit' Main code--- Create the droneThe first step to parsing data with membrane-toolkit is to create a `Drone` that knows the location of your data files. The Drone will loop through every file in the directory and parse their data Each `Drone` in membrane-toolkit has an associated default spreadsheet template and a corresponding configuration file. Both of these are easy to customize. The image below shows an example of how specific parts of the template spreadsheet map to values in the configuration file.![test](drone_template_config.png) For this example, we will create a `PermselectivityDrone` because we have collected membrane potential data for permselectivity calculations. All we need to do to create the `Drone` is supply the location of our data folder:
###Code
from pathlib import Path
data_folder = Path.cwd() / 'permselectivity_data'
drone = PermselectivityDrone(data_folder)
###Output
_____no_output_____
###Markdown
Run the droneNext, simply call `drone.run()` and all the files (6 in this case) will be parsed.
###Code
drone.run()
###Output
2020-05-10 18:45:11,844 - PermselectivityDrone - DEBUG - Starting get_items in PermselectivityDrone Builder
###Markdown
Examine the dataThe drone populates a [`Store`](https://materialsproject.github.io/maggma/concepts/) object with all the parsed data. Stores provide a consistent interface for adding data to several different database endpoints. In membrane-toolkit, the default `Store` adds data to a pandas dataframe, which can be viewed by calling `drone.store.as_df()`
###Code
df = drone.store.as_df()
df
###Output
_____no_output_____
###Markdown
Filter the dataYou'll notice that in addition to the fields from the spreadsheet data, the drone has created a few extra fields like `record_key` and `documents`, and some of the data like the person running the experiment may not be relevant to the analysis we want to do. Pandas dataframes make it easy to filter the data so you only see what you need. For example, if we only want to see the membrane type, temperature, date, and sample id:
###Code
df.xs((["sample_id", "membrane","date","temperature"]),axis=1)
###Output
_____no_output_____
###Markdown
Make some calculationsNow that we have all the experimental data in one place, we can calculate the ideal membrane potential and the apparent permselectivity. First, we'll use the [`nerst_potential`]() core function to calculate the ideal membrane potential based on data in the `conc_1`, `conc_2`, and `temperature` columns. To do this, we'll use the pandas [`apply`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.apply.html) method.
###Code
df["ideal_potential"] = df.apply(lambda row: nernst_potential(row["conc_1"], row["conc_2"], 1, row["temperature"]) * 1000, axis=1)
df
###Output
_____no_output_____
###Markdown
Next, we use the `apparent_permselectivity` core function to compute the membrane permselectivity. In this case, we can just pass the dataframe columns as arguments to the function directly (no need for `apply`).
###Code
df["permselectivity"] = apparent_permselectivity(df["membrane_potential"], df["ideal_potential"])
###Output
_____no_output_____
###Markdown
Now we can create another cross-section view that includes the result.
###Code
df.xs((["sample_id","date","membrane","conc_1","conc_2","temperature","permselectivity"]),axis=1)
###Output
_____no_output_____
###Markdown
Generate summary statisticsThe `.groupby` method makes it easy to generate summary statics like mean or standard deviation for each membrane type.
###Code
df.groupby('membrane').mean()
df.groupby('membrane').std()
###Output
_____no_output_____ |
Time_Series_Analysis_of_Car_Sales.ipynb | ###Markdown
###Code
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# Above is a special style template for matplotlib, highly useful for visualise from pylab import rcParamas
from pylab import rcParams
rcParams['figure.figsize'] = 10, 7df = pd.read_csv('/content/monthly-car-sales.csv')
df = pd.read_csv('/content/monthly-car-sales.csv')
df.head()
df.columns=['Month', 'Sales']
df=df.dropna()
df['Month'] = pd.to_datetime(df['Month'])
df.set_index('Month', inplace=True) #set date as index
df.head()
plt.xlabel("Month")
plt.ylabel("Sales")
plt.title("Car Sales Time Series")
plt.plot(df)
#ScatterPlotofDataPoints
df.plot(style = 'k.')
plt.show()
from statsmodels.tsa.seasonal import seasonal_decompose
result = seasonal_decompose(df, model='multiplicative')
result.plot()
plt.show()
#Seperating Trend and Seasonality
from statsmodels.tsa.stattools import adfuller
def test_stationarity(timeseries):
#Determing rolling statistics
rolmean = timeseries.rolling(12).mean()
rolstd = timeseries.rolling(12).std()
#Plot rolling statistics:
plt.plot(timeseries, color='blue',label='Original')
plt.plot(rolmean, color='red', label='Rolling Mean')
plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean and Standard Deviation')
plt.show(block=False)
#perform dickey fuller test
print("Results of dickey fuller test")
adft = adfuller(timeseries['Sales'],autolag='AIC')
# output for dft will give us without defining what the values are.
#hence we manually write what values does it explains using a for loop
output = pd.Series(adft[0:4],index=['Test Statistics','p-value','No. of lags used','Number of observations used'])
for key,values in adft[4].items():
output['critical value (%s)'%key] = values
print(output)
test_stationarity(df)
df_log = np.log(df)
moving_avg = df_log.rolling(12).mean()
std_dev = df_log.rolling(12).std()
plt.plot(df_log)
plt.plot(moving_avg, color="red")
plt.plot(std_dev, color ="black")
plt.show()
df_log_moving_avg_diff = df_log-moving_avg
df_log_moving_avg_diff.dropna(inplace=True)
test_stationarity(df_log_moving_avg_diff)
weighted_average = df_log.ewm(halflife=12, min_periods=0,adjust=True).mean()
logScale_weightedMean = df_log-weighted_average
from pylab import rcParams
rcParams['figure.figsize'] = 10,6
test_stationarity(logScale_weightedMean)
df_log_diff = df_log - df_log.shift()
plt.title("Shifted timeseries")
plt.xlabel("Month")
plt.ylabel("Sales")
plt.plot(df_log_diff)#Let us test the stationarity of our resultant series
df_log_diff.dropna(inplace=True)test_stationarity(df_log_diff)
from chart_studio.plotly import plot_mpl
from statsmodels.tsa.seasonal import seasonal_decompose
result = seasonal_decompose(df_log, model='additive', freq = 12)
result.plot()
plt.show()trend = result.trend
trend.dropna(inplace=True)seasonality = result.seasonal
seasonality.dropna(inplace=True)residual = result.resid
residual.dropna(inplace=True)test_stationarity(residual)
###Output
_____no_output_____ |
Python_Programming_Basic.ipynb | ###Markdown
1.Python Basic: Your First Program
###Code
#This program says hello and asks for my name
print('Hello, world!')
print('What is your name?') #ask for their name
myName = input()
print('It is good to meet you, ' + myName)
print('The length of your name is:')
print(len(myName))
print('What is your age?') #ask for their age
myAge = input()
print('You will be ' + str(int(myAge) + 1) + ' in a year.')
###Output
Hello, world!
What is your name?
It is good to meet you, A
The length of your name is:
1
What is your age?
3
You will be 4 in a year.
###Markdown
2. Flow Control 2.1 Flow Control statements 2.1.1 The If and Elif statement
###Code
name = 'Carol'
age = 3000
if name == 'Alice':
print('Hello, Alice')
elif age < 12:
print('You are not Alice, kiddo')
elif age > 2000:
print('Unlike you, Alice is not an undead, immortal vampire')
elif age > 100:
print('You are not Alice, grannie')
name = 'Carol'
age = 3000
if name == 'Alice':
print('Hello, Alice')
elif age < 12:
print('You are not Alice, kiddo')
elif age > 100:
print('You are not Alice, grannie')
elif age > 2000:
print('Unlike you, Alice is not an undead, immortal vampire')
name = 'Carol'
age = 3000
if name == 'Alice':
print('Hi, Alice')
elif age < 12:
print('You are not Alice, kiddo')
else:
print('You are not ALice nor a little kid')
###Output
You are not ALice nor a little kid
###Markdown
2.2 while Loop Statements
###Code
spam = 0
if spam < 5:
print('Hello, world!')
spam = spam + 1
#the while loop
spam = 0
while spam < 5:
print('Hello, world!')
spam = spam + 1
#An annoying while loop
name = ''
while name != 'your name':
print('Please type your name')
name = input()
print('Thank you!')
while True:
print('Please type your name:')
name = input()
if name == 'your name':
break
print('Thank you!')
#continue statement
while True:
print('Who are you?')
name = input()
if name != 'Joe':
continue
print('Hello, Joe. What is the password? (It is a fish.)')
password = input()
if password == 'swordfish':
break
print('Access granted.')
#'Trusthy' and 'Falsey' values
name = ''
while not name:
print('Enter your name:')
name = input()
print('How many guests will you have?')
numOfGuests = int(input())
if numOfGuests:
print('Be sure to have enough room for all your guests.')
print('Done')
###Output
Enter your name:
0
How many guests will you have?
0
Done
###Markdown
2.3 for Loops and the range() Function
###Code
print('My name is')
for i in range(5):
print('Jimmy Five Times (' + str(i) + ')')
total = 0
for i in range(101):
total = total + i
print(total)
print('My name is')
i = 0
while i < 5:
print('HBN cute no1 (' + str(i) + ')')
i = i + 1
for i in range(12, 16):
print(i)
for i in range (10, 20, 2):
print(i)
for i in range(5, -1, -1):
print(i)
###Output
5
4
3
2
1
0
###Markdown
2.4 Import Modules
###Code
#import modules
import random
for i in range (5):
print(random.randint(1,10))
import sys, os, math
#exitExample
while True:
print('Type exit to exit.')
response = input()
if response == 'exit':
sys.exit()
print('You typed ' + response + '.')
###Output
Type exit to exit.
Anna
You typed Anna.
Type exit to exit.
exit\
You typed exit\.
Type exit to exit.
exit
###Markdown
2.4 A Short Program: Guess the Number
###Code
#This is a guess the number game
import random
secretNumber = random.randint(1, 20)
print('I am thinking of a number between 1 and 20.')
#Ask the player to guess 6 times.
for guessesTaken in range(1, 7):
print('Take a guess.')
guess = int(input())
if guess < secretNumber:
print('Your guess is too low!')
elif guess > secretNumber:
print('Your guess is too high!')
else:
break #This condition is the correct guess!
if guess == secretNumber:
print('Good job! You guessed my number in ' + str(guessesTaken) + ' guesses!')
else:
print('Nope. The number I was thinking of was ' + str(secretNumber))
###Output
I am thinking of a number between 1 and 20.
Take a guess.
8
Your guess is too low!
Take a guess.
19
Your guess is too high!
Take a guess.
20
Your guess is too high!
Take a guess.
3
Your guess is too low!
Take a guess.
4
Your guess is too low!
Take a guess.
6
Your guess is too low!
Nope. The number I was thinking of was 11
###Markdown
2.5 A Short Program: Rock, Paper and Scissors
###Code
import random, sys
print('ROCK, PAPER, SCISSORS')
#These variables keep track of the number of wins, losses, and ties.
wins = 0
losses = 0
ties = 0
while True: #The main game loop.
print('%s Wins, %s Losses, %s Ties' % (wins, losses, ties))
while True: #The player input loop.
print('Enter your move: (r)ock (p)aper (s)cissors or (q)uit')
playerMove = input()
if playerMove == 'q':
sys.exit() #Quit the program.
if playerMove == 'r' or playerMove == 'p' or playerMove == 's':
break #Break out of the player input loop.
print('Type one of r, p, s, or q.')
#Display what the player chose:
if playerMove == 'r':
print('ROCK versus...')
elif playerMove == 'p':
print('PAPER versus...')
elif playerMove == 's':
print('SCISSORS versus...')
# Display what the computer chose:
randomNumber = random.randint(1, 3)
if randomNumber == 1:
computerMove = 'r'
print('ROCK')
elif randomNumber == 2:
computerMove = 'p'
print('PAPER')
elif randomNumber == 3:
computerMove = 's'
print('SCISSORS')
# Display and record the win/loss/tie:
if playerMove == computerMove:
print('It is a tie!')
ties = ties + 1
elif playerMove == 'r' and computerMove == 's':
print('You win!')
wins = wins + 1
elif playerMove == 'p' and computerMove == 'r':
print('You win!')
wins = wins + 1
elif playerMove == 's' and computerMove == 'p':
print('You win!')
wins = wins + 1
elif playerMove == 'r' and computerMove == 'p':
print('You lose!')
losses = losses + 1
elif playerMove == 'p' and computerMove == 's':
print('You lose!')
losses = losses + 1
elif playerMove == 's' and computerMove == 'r':
print('You lose!')
losses = losses + 1
###Output
ROCK, PAPER, SCISSORS
0 Wins, 0 Losses, 0 Ties
Enter your move: (r)ock (p)aper (s)cissors or (q)uit
r
ROCK versus...
ROCK
It is a tie!
0 Wins, 0 Losses, 1 Ties
Enter your move: (r)ock (p)aper (s)cissors or (q)uit
s
SCISSORS versus...
PAPER
You win!
1 Wins, 0 Losses, 1 Ties
Enter your move: (r)ock (p)aper (s)cissors or (q)uit
p
PAPER versus...
SCISSORS
You lose!
1 Wins, 1 Losses, 1 Ties
Enter your move: (r)ock (p)aper (s)cissors or (q)uit
q
###Markdown
2.6 Practice Questions
###Code
print('How many spam?')
spam = int(input())
if spam == 1:
print('Hello')
elif spam == 2:
print('Howdy')
else:
print('Greetings!')
for i in range(1, 11):
print(i)
i = 1
while i < 11:
print(i)
i = i + 1
###Output
1
2
3
4
5
6
7
8
9
10
###Markdown
round() function returns a floating point number that is a rounded version of the specified number, with the specified number of decimals.round(number, digits): number (r) -> the number need to be rounded. digits (o) -> the number of decimals to use when rounding the number, default = 0
###Code
x = float(input())
x = round(x)
print(x)
###Output
0.88888
1
###Markdown
abs() function returns the absolute value of the specified number.abs(n):n (r): a number
###Code
x = int(input())
x = abs(x)
print(x)
###Output
-678
678
###Markdown
3 Function 3.1 Hello Function
###Code
def hello():
print('Howdy!')
print('Howdy!!!')
print('Hello there!')
hello()
hello()
hello()
def hello(name):
print('Hello, ' + name)
hello('Alice')
hello('Bob')
def sayHello(name):
print('Hello, ' + name)
sayHello('Al')
###Output
Hello, Al
###Markdown
3.2 magic8ball
###Code
import random
def getAnswer(answerNumber):
if answerNumber == 1:
return 'It is certain'
elif answerNumber == 2:
return 'It is decidely so'
elif answerNumber == 3:
return 'Yes'
elif answerNumber == 4:
return 'Reply hazy try again'
elif answerNumber == 5:
return 'Ask again later'
elif answerNumber == 6:
return 'Concentrate and ask again'
elif answerNumber == 7:
return 'My reply is no'
elif answerNumber == 8:
return 'Outlook not so good'
elif answerNumber == 9:
return 'Very doubtful'
r = random.randint(1, 9)
fortune = getAnswer(r)
print(fortune)
def getAnswer(answerNumber):
if answerNumber == 1:
return 'It is certain'
elif answerNumber == 2:
return 'It is decidely so'
elif answerNumber == 3:
return 'Yes'
elif answerNumber == 4:
return 'Reply hazy try again'
elif answerNumber == 5:
return 'Ask again later'
elif answerNumber == 6:
return 'Concentrate and ask again'
elif answerNumber == 7:
return 'My reply is no'
elif answerNumber == 8:
return 'Outlook not so good'
elif answerNumber == 9:
return 'Very doubtful'
print(getAnswer(random.randint(1, 9)))
###Output
It is certain
###Markdown
3.3 None Value
###Code
spam = print('Hello')
None == spam
###Output
Hello
###Markdown
3.4 Keyword Arguments and the print() Function
###Code
print('Hello')
print('World')
print('Hello', end = '')
print('World')
print('dogs', 'cats', 'mice')
print('dogs', 'cats', 'mice', sep = ', ')
###Output
dogs, cats, mice
###Markdown
3.5 Local vs Global Variables
###Code
#Local Variables cannot be used in the Global Scope
def spam():
eggs = 31337
spam()
print(eggs)
#Local Scopes cannot use variables in other Local Scopes
def spam():
eggs = 99
bacon()
print(eggs)
def bacon():
ham = 101
eggs = 0
spam()
#Global Variables can be read from a Local Scope
def spam():
print(eggs)
eggs = 42
spam()
print(eggs)
#Local and Global Variables with the same name
def spam():
eggs = 'spam local'
print(eggs) #prints 'spam local'
def bacon():
eggs = 'bacon local'
print(eggs) #prints 'bacon local'
spam()
print(eggs) #print 'bacon local'
eggs = 'global'
bacon()
print(eggs) #print 'global
#The global statement
def spam():
global eggs
eggs = 'spam'
eggs = 'global'
spam()
print(eggs)
#sameNameLocalGlobal
def spam():
global eggs
eggs = 'spam' #this is the global
def bacon():
eggs = 'bacon' #this is the local
def ham():
print(eggs) #this is the global
eggs = 42 #this is the global
spam()
print(eggs)
###Output
spam
###Markdown
3.6 Exception Handling
###Code
def spam(divideBy):
return 42/divideBy
print(spam(2))
print(spam(12))
print(spam(0))
print(spam(1))
def spam(divideBy):
try:
return 42/divideBy
except ZeroDivisionError:
print('Error: Invalid argument.')
print(spam(2))
print(spam(12))
print(spam(0))
print(spam(1))
def spam(divideBy):
return 42/divideBy
try:
print(spam(2))
print(spam(12))
print(spam(0))
print(spam(1))
except ZeroDivisionError:
print('Error: Invalid argument.')
#The reason print(spam(1)) is never executed is because once the execution jumps to the code in the except clause, it does not return to the try clause. Instead, it just continues moving down the program as normal.
###Output
21.0
3.5
Error: Invalid argument.
###Markdown
3.7 abcdCallStack
###Code
def a():
print('a() starts')
b()
d()
print('a() returns')
def b():
print('b() starts')
c()
print('b() returns')
def c():
print('c() starts')
print('c() returns')
def d():
print('d() starts')
print('d() returns')
a()
###Output
a() starts
b() starts
c() starts
c() returns
b() returns
d() starts
d() returns
a() returns
###Markdown
3.8 A Short Program - Zigzag
###Code
import time, sys
indent = 0 # How many spaces to indent.
indentIncreasing = True # Whether the indentation is increasing or not.
try:
while True: # The main program loop.
print(' ' * indent, end='')
print('********')
time.sleep(0.1) # Pause for 1/10 of a second.
if indentIncreasing:
# Increase the number of spaces:
indent = indent + 1
if indent == 20:
# Change direction:
indentIncreasing = False
else:
# Decrease the number of spaces:
indent = indent - 1
if indent == 0:
# Change direction:
indentIncreasing = True
except KeyboardInterrupt:
sys.exit()
###Output
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
********
###Markdown
3.9 Practice Projects
###Code
#The Collatz Sequence
import sys
def collatz(number):
if number % 2 == 0:
result = number/2 #Even number
elif number % 2 == 1:
result = 3*number + 1 #Odd number
while result == 1: #Print the last result when it = 1
print(result)
sys.exit() #To ensure it wont print 1 forever
while result != 1:
print(result)
number = result
return collatz(number)
print('Enter a number')
number = int(input())
collatz(number)
#Input validation
import sys
def collatz(number):
if number % 2 == 0:
result = number/2 #Even number
elif number % 2 == 1:
result = 3*number + 1 #Odd number
while result == 1: #Print the last result when it = 1
print(result)
sys.exit() #To ensure it wont print 1 forever
while result != 1:
print(result)
number = result
return collatz(number)
print('Enter a number')
try:
number = int(input())
collatz(number)
except ValueError:
print('You must enter an integer')
###Output
Enter a number
56
28.0
14.0
7.0
22.0
11.0
34.0
17.0
52.0
26.0
13.0
40.0
20.0
10.0
5.0
16.0
8.0
4.0
2.0
1.0
###Markdown
4 Lists 4.1 The List Data Type 4.1.1 Getting Individual Values in a List with Indexes
###Code
spam = ['cat', 'bat', 'rat', 'elephant']
spam[0]
'Hello, ' + spam[0]
'The ' + spam[1] + ' ate the ' + spam[0] + '.'
abc = [['cat', 'bat'], [10, 20, 30, 40, 50]]
abc[0] #the first index dictates which list value to use
abc[0][1] #the second indicates the value within the list value
abc[1][3]
xyz = ['cat', 'bat', 'rat', 'elephant']
xyz[-1] #Negative indexes count from the right side, start with -1
'The ' + xyz[-1] + ' is afraid of the ' + xyz[-3] + '.'
###Output
_____no_output_____
###Markdown
4.1.2 Getting a List from Another List with Slices
###Code
mew = ['cat', 'bat', 'rat', 'elephant']
mew[0:4] #the first integer
#leave the first index means you begin with 0
s = ['cat', 'bat', 'rat', 'elephant']
s[:2]
#leave the second index means you slice to the end of the list
s[1:]
#you can leave both 2 values
s[:]
###Output
_____no_output_____
###Markdown
4.1.3 Getting the List's Length with the len() function
###Code
a = ['cat', 'dog', 'mouse']
len(a)
###Output
_____no_output_____
###Markdown
4.1.4 Changing Values in a List with Indexes
###Code
c = ['cat', 'bat', 'rat', 'elephant']
c[2] = 'allavad'
c
###Output
_____no_output_____
###Markdown
4.1.5 List Concatenation and List Replication
###Code
[1, 2, 3] + ['A', 'B', 'C']
['X', 'Y', 'Z'] * 3
d = [1, 2, 3]
d = d + ['A', 'B', 'C']
d
###Output
_____no_output_____
###Markdown
4.1.6 Removing Values from Lists with del Statements
###Code
e = ['cat', 'bat', 'rat', 'elephant']
del e[2]
e
del e[2]
e
###Output
_____no_output_____
###Markdown
4.2 Working with Lists
###Code
print('Enter the name of cat 1:')
catName1 = input()
print('Enter the name of cat 2:')
catName2 = input()
print('Enter the name of cat 3:')
catName3 = input()
print('Enter the name of cat 4:')
catName4 = input()
print('Enter the name of cat 5:')
catName5 = input()
print('Enter the name of cat 6:')
catName6 = input()
print('The cat names are:')
print(catName1 + ' ' + catName2 + ' ' + catName3 + ' ' + catName4 + ' ' +
catName5 + ' ' + catName6)
#Cat name short version
catNames = []
while True:
print('Enter the name of cat ' + str(len(catNames) + 1) + ' (Or enter nothing to stop.):')
name = input()
if name == '':
break
catNames = catNames + [name] #list concatenation
print('The cat names are: ')
for name in catNames:
print(' ' + name)
###Output
Enter the name of cat 1 (Or enter nothing to stop.):
Miss Cleo
Enter the name of cat 2 (Or enter nothing to stop.):
Alo
Enter the name of cat 3 (Or enter nothing to stop.):
Wea
Enter the name of cat 4 (Or enter nothing to stop.):
Nemo
Enter the name of cat 5 (Or enter nothing to stop.):
Buzz
Enter the name of cat 6 (Or enter nothing to stop.):
The cat names are:
Miss Cleo
Alo
Wea
Nemo
Buzz
###Markdown
4.2.1 Using for Loops with Lists
###Code
supplies = ['pens', 'staplers', 'flamethrowers', 'binders']
for i in range(len(supplies)):
print('Index ' + str(i) + ' in supplies is: ' + supplies[i])
###Output
Index 0 in supplies is: pens
Index 1 in supplies is: staplers
Index 2 in supplies is: flamethrowers
Index 3 in supplies is: binders
###Markdown
4.2.2 in and not in Operators
###Code
'howdy' in ['hello', 'hi', 'howdy', 'heyas']
f = ['hello', 'hi', 'howdy', 'heyas']
'cat' in f
'howdy' not in f
'cat' not in f
#My Pet
myPets = ['Zophie', ' Pooka', 'Fat-tail']
print('Enter a pet name: ')
name = input()
if name not in myPets:
print('I do not have a pet named ' + name)
else:
print(name + ' is my pet.')
###Output
Enter a pet name:
Beth
I do not have a pet named Beth
###Markdown
4.2.3 The Multiple Assignment Trick
###Code
cat = ['fat', 'gray', 'loud']
size, color, disposition = cat
size
###Output
_____no_output_____
###Markdown
4.2.4 Using the enumerate() Function with Lists
###Code
for index, item in enumerate(supplies):
print('Index ' + str(index) + ' in supplies is: ' + item)
###Output
Index 0 in supplies is: pens
Index 1 in supplies is: staplers
Index 2 in supplies is: flamethrowers
Index 3 in supplies is: binders
###Markdown
4.2.5 Using random.choice() and random.shuffle() Functions with Lists
###Code
import random
pets = ['cat', 'dog', 'mouse']
random.choice(pets)
people = ['Alice', 'Bob', 'Carol', 'David']
random.shuffle(people)
people
###Output
_____no_output_____
###Markdown
4.3 Argumented Assignment Operators 4.3.1 Methods
###Code
spam = ['hello', 'hi', 'howdy', 'heyas']
spam.index('hello')
spam.index('heyas')
spam.index('howdy howdy howdy')
spam = ['Zophie', 'Pooka', 'Fat-tail', 'Pooka'] #when there are duplicates of the values in list, the index of its first appearance is returned.
spam.index('Pooka')
spam = ['cat', 'dog', 'bat']
spam.append('mouse') #the append() method call adds the argument to the end of the list
spam
spam = ['cat', 'dog', 'bat']
spam.insert(1, 'chicken') #the insert() method can insert a value at any index in the list
spam
spam = ['cat', 'bat', 'rat', 'elephant']
spam.remove('bat') #The remove() method is passed the value to be removed from the list it is called on.
spam
spam = ['cat', 'bat', 'rat', 'cat', 'bat', 'rat']
spam.remove('cat') #If the value appears multiple times in the list, only the first instance of the value will be removed.
spam
spam = [2, 5, 3.14, 1, -7]
spam.sort()
spam
spam = ['ants', 'cats', 'dogs', 'badgers', 'elephants']
spam.sort()
spam
spam.sort(reverse = True)
spam
spam = [1, 3, 2, 4, 'Alice', 'Bob']
spam.sort()
spam = ['Alice', 'ants', 'Bob', 'badgers', 'Carol', 'cats']
spam.sort() #Use 'ASCIIbetical order rather than actual alphabetical order for sorting strings
spam
spam = ['a', 'A', 'z', 'Z']
spam.sort(key = str.lower) #treat the values in list as they were lowercase without actually changing the values in the list.
spam
spam = ['cat', 'dog', 'mouse']
spam.reverse()
spam
###Output
_____no_output_____
###Markdown
4.3.2 Example Program: Magic 8 Ball with a List
###Code
import random
messages = ['It is certain',
'It is decidedly so',
'Yes definitely',
'Reply hazy try again',
'Ask again later',
'Concentrate and ask again',
'My reply is no',
'Outlook not so good',
'Very doubtful']
print(messages[random.randint(0, len(messages) - 1)])
###Output
It is certain
###Markdown
4.4 Sequence Data Types
###Code
name = 'Zophie'
name[0]
name[-2]
name[0:4]
'Zo' in name
'z' in name
for i in name:
print('* * * ' + i + ' * * *')
name = 'Zophie the cat'
name[7] = 'the'
name = 'Zophie a cat'
newName = name[0:7] + 'the' + name[8:12]
newName
###Output
_____no_output_____
###Markdown
4.4.1 The Turple Data Type
###Code
eggs = ('hello', 42, 0.5)
eggs[0]
eggs[1:3]
len(eggs)
eggs = ('hello', 42, 0.5)
eggs[1] = 99
type(('hello',))
type(('hello'))
tuple(['cat', 'dog', 5])
list(('cat', 'dog', 5))
list('hello')
###Output
_____no_output_____
###Markdown
4.4.2 References
###Code
spam = 42
cheese = spam
spam = 100
spam
cheese
spam = [0, 1, 2, 3, 4, 5]
cheese = spam #The reference is being copied, not the list
cheese[1] = 'Hello!' #This changes the list value.
spam
cheese #The cheese variable refers to the same list.
id('Howdy') #The returned number will be different in your machine.
bacon = 'Hello'
id(bacon)
bacon += ' world!' #A new string is made from 'Hello' and ' world!'.
id(bacon) #bacon now refers to a completely different string.
eggs = ['cat', 'dog'] #This creates a new list.
id(eggs)
eggs.append('mouse') #append() modifies the list "in place".
id(eggs) #eggs still refers to the same list as before.
eggs = ['bat', 'rat', 'cow'] #This creates a new list, which has a new identity.
id(eggs) #eggs now refers to a completely different list.
#Passing References
def eggs(someParameter):
someParameter.append('Hello')
spam = [1, 2, 3]
eggs(spam)
print(spam)
#The copy Module's copy() and deepcopy() Functions
import copy
spam = ['A', 'B', 'C', 'D']
id(spam)
cheese = copy.copy(spam)
id(cheese) #cheese is a different list with different identity
cheese[1] = 42
cheese
spam
###Output
_____no_output_____
###Markdown
4.5 A Short Program: Conway's Game of Life
###Code
#Conway's Game of Life
import random, time, copy
WIDTH = 60
HEIGHT = 20
# Create a list of list for the cells:
nextCells = []
for x in range(WIDTH):
column = [] # Create a new column.
for y in range(HEIGHT):
if random.randint(0, 1) == 0:
column.append('#') # Add a living cell.
else:
column.append(' ') # Add a dead cell.
nextCells.append(column) # nextCells is a list of column lists.
while True: # Main program loop.
print('\n\n\n\n\n') # Separate each step with newlines.
currentCells = copy.deepcopy(nextCells)
# Print currentCells on the screen:
for y in range(HEIGHT):
for x in range(WIDTH):
print(currentCells[x][y], end='') # Print the # or space.
print() # Print a newline at the end of the row.
# Calculate the next step's cells based on current step's cells:
for x in range(WIDTH):
for y in range(HEIGHT):
# Get neighboring coordinates:
# `% WIDTH` ensures leftCoord is always between 0 and WIDTH - 1
leftCoord = (x - 1) % WIDTH
rightCoord = (x + 1) % WIDTH
aboveCoord = (y - 1) % HEIGHT
belowCoord = (y + 1) % HEIGHT
# Count number of living neighbors:
numNeighbors = 0
if currentCells[leftCoord][aboveCoord] == '#':
numNeighbors += 1 # Top-left neighbor is alive.
if currentCells[x][aboveCoord] == '#':
numNeighbors += 1 # Top neighbor is alive.
if currentCells[rightCoord][aboveCoord] == '#':
numNeighbors += 1 # Top-right neighbor is alive.
if currentCells[leftCoord][y] == '#':
numNeighbors += 1 # Left neighbor is alive.
if currentCells[rightCoord][y] == '#':
numNeighbors += 1 # Right neighbor is alive.
if currentCells[leftCoord][belowCoord] == '#':
numNeighbors += 1 # Bottom-left neighbor is alive.
if currentCells[x][belowCoord] == '#':
numNeighbors += 1 # Bottom neighbor is alive.
if currentCells[rightCoord][belowCoord] == '#':
numNeighbors += 1 # Bottom-right neighbor is alive.
# Set cell based on Conway's Game of Life rules:
if currentCells[x][y] == '#' and (numNeighbors == 2 or numNeighbors == 3):
# Living cells with 2 or 3 neighbors stay alive:
nextCells[x][y] = '#'
elif currentCells[x][y] == ' ' and numNeighbors == 3:
# Dead cells with 3 neighbors become alive:
nextCells[x][y] = '#'
else:
# Everything else dies or stays dead:
nextCells[x][y] = ' '
time.sleep(1) # Add a 1-second pause to reduce flickering.
###Output
##### # ## # ## ##### ##### # ## # # # ### ###
# # # # ## ## ## # # # ### # # # # # #
# ### #### # # ### # ### ### # ## # #### # #
# # # # # # # ### ###### # ## ####### # #### # ###
# # # #### # ### # # # # # ### ##### # #
# # # # #### # #### ### # # ### ### # # # #
# # ###### ## # # # ### # ## ### #### # ## ### # #
### ## ###### # ### # ## ## # # ##### # ## #
# #### # # ## ## # ### ## ######## ##### ### ## ## #
### # ## ## #### # ## # ## # # ## # ## #
# ## # # # # # # ### # # ## # # ### ## # # ## # ##
## # # # ### # ## #### # # ### ### ## # ## ##
## ### ## # # ### # ## ## # ## # # # ## ###
### # # # # ### # ## # ### ####### # # ## ## # # #
## ## ## # ### #### ## ## # ### # # # # #
# ## #### # ## ## # ### ### ##### # ### ## # #### ###
## ## ##### # ## #### # ### ##### ## ## ### ##
# ### ####### # # # ## # ### #### ## ## ### # ##
# ### # #### ### ## # ### ## #### # ##
# ## # ## #### ## ########## ## # ##### # # ### ##
# ## ## # # ## ## #
# # ### # # # # ## # # #
## # # # # # ## # ## ### # ## ##
# # # # # # # ##### # # ####
# # ### # # # # # # ## # # # #
# #### # # # # # # # ### ### # #
# ## # # ## ### ## #
# # ### ## #
# # # # # ### #### ### ## #
# # # # # #### # # # # # #
# # # # # ## # ## #### ##### #
# # ##### # # # # # # # #######
# # # ## # # # # # ### #
# # # ### # # # # # #### # ###
# # # # # # # # # #
# ## # #### ## # # #
# # # ## #
# #### # #### ## # # #
# # ## # ## # #
# ##### # # ### ## # #
## # ### # # # # #
# # # # # ## # # ## #
## # # # # ## ### ###### # # #
# # # # # # ## # # ###
# # # # # # # ## ## # # #
# # ## # # ### # # # #
## # # ## # # # # # # #
# # ## # ## # #
# ## # ### ### ##
# ## ## # # # # ###
# # # # # #### # # #
# # ##### # # # ## # # # # ## #
# ## # # # # # # # #
# # ## ## ### ## ### # ####
# ## # ## ## ## ##
# # ## # #
## # # # # # # #
### # # ### ## ##
# ### # # ### ## # #
## # # # # # # #
# ## #### # # ## ## ##
# ## # # # ### # ## # #
## ## # ###### # # ### #
# ## ## ###### ## # # # # #
# # # # # #### # # ##### #
# ### # ## ## # # # # #
### ## ## # # # #
##### # # # ## ###
# ### ### # ## ## ## ##
# ## # ## # # ## ###
# ## # ## ## ##### # # #
## ## ##### # # ## ## # ###
### # # # # #### ## ## # # #
# ## ## # ##### ## ###
## #### # # # ##### ##
## # # # # ##
### # ### # # ## ##
### # # # # ### #
# # ## # # # ## ##
## # # ## ## ## ## ##
# # ###### #### # # # #
# # # # #### # ## #
## # # # ##### # #
# ## ## # # ## # # ### # # #
## # ## # # ## # #
# #### # # # ##
### # # ## # ## ##
# # # ## # #
### # # # # ## # # #
# # # # # # ## # ##
# # # ## # ## # # # # # # ##
# # ##### ## ## ### # ### #####
## # ## # #### ## ## ##
## # # # ## ## # # #
## ## ## # # #
# ########## ## ## # #
## # ####### ## ## #
# # # ## ## # #
# # ### ## ## #
# # # # ## ## # ## ##
## # # #### # # # # # # #
# ### ## # # ##### #
# ##### ## # ####### #
# ## # #### ## #
# ####### # # ## ### #
##### # ## #### #
### # # # # # ######
### # ## ## # ## ## # ##
### # # # ##### #
# # # # # ###### # ## #
# ## # ##### # # ## ## # #
## # # ## # ### # ### # ###
# ## # ## # # # # # ## #
# # # ### # # ### ## #
# ## # # ### #
####### # ## #
#### ### # # # ###
# ### # # ## ###
## # ### # # # #
# # ## # ## ##
### # # # #
## # ## # # # # # ###
#### # # # ## # # ###
## # ### # # ## # #
# # ## ## ##
## # ## ## # # # # # ##
# # # # # ## # #
# # ### ### ## # # # #
# ### ## ### # # # # ##
# # # # ## # # ####
# # # # # ### # # # #
## # ## # ## ## # ## # #
# ## # ## # # # ## #
# # ###### # ### # ##
# # ### #
# # ## #
# ## ## # # # ###
# # # # # ## ### #
# # # # ## # #### ## ##
# ## # ## #### #
# ### ### #### #
# # # # # ## #
# # # ## ## # ## #
# # ### # #
# ## ## # #### ## ## #
### # # # ## ## # # #
# # # ## # # #
# # # # # # # # # ## ##
# # # # # # # # ## ## ##
### # # ## ## ## # # #
### # # # # # ### ## #
### # # # ###### ## ### ###
# # ## # # # # ## # #
## # #### #
# ## ## #
## ## ## # #
### ### # # ### ###
## ### # ##### ## # # # #
# ### # ### # # # ##
#### # # # # # # ##
## # # # ## ## # ###
## # # ##
### # # ### ## #
# # ### ##
# # ## ##### ## ###
# ## ## # ## # # ### ##
# # # #
# ### ### # ##
## ## # ## ## ## # #
# # # # ## ## # # # ### #
# # # # # ### # # # # ##
# ## # # # # # ## ## #
# ## # # # # # # # # #
### # # ##### #
## #### # #
# # # # ## # ####
# # # # ### # # ## #####
# # # # # ## ## # # # ##
# # # ## ## ### #
## # # ## ## # #
# # # ## ## ## #
## # # # # # #####
# # # # ##
# ## ## ## # ### #
## # ## # # # # # ##
## ## # # ## # #
### # ## # # #
## ### #### ### ##
### # ## # ######## ## #
## # ### ## ### # # # ## ###
# # # # ## # ## # ## # ##
# # # ## # # ## #### # #
# ## # # # ## ##
# # # ######
## # ## ## ##
# # ## ## ## #
# ##### ### # # # # # #
## ##### ### # ## # ## ##
# ## ### # #### #
# # ## # # # #
# ## # ## ###### # #
# # # # ## # # ## #
## ### ## ## #
# ## #### ### # #
##### ## # # # ##
# # ## # # # ###
# # # # # #
# # # ###### # # # #
### # # #### # # # ## #
# ## # # # ## # ## ## #
# ## ###### # ## ## ## ##
# # # # ###### # # # ## ###
### ## ## #
## # # # # ##
## ### ##### #
### # # ## # ## ## ## #
## # # # # # ### ## ##
# # ### # ## # ## # #
# # ## #### #
# # # ## ## # # #
##### # ## # # ## # #
# # # # # # # # ##
# #### # # ## ## ###
# # # ## # #
# # ## # # # # #
# ## # ## # # #
# #### ### # # #
# # #### ### #
# # ## # # # # ## # # ##
# ### # # # # ### # ### # #
# ######### # # # # ### ##
# ## ###### # # # ## ## # ## #
# ## # #### ### ## ## ## #
# # ## ###
# # ## # # # ###
# # # # ## # #
## ## ## ## ## # # # # ##
### ## # # ## ## # # # ##
# # # ## # ## #
# ### # # # # ## #
# # # # # ## ### # # #
# # # # # ## # #
# ## # # ## ### # #
# # # # # # # ##
## ## ## ### # # ##
# ## # ## ###
### # # # ###
# # # # # # ##
### # # ## # ## # ## #
# # ## ## ### # ## ## ## # #
# # # # # # # ## # # #
## ## # # ## # # ## ## #
###### #### #### # ##### #####
### ## ## # ### #
# ## ## # #
### # ## # ## ## # #
## ## ## ## # # # # ##
# ## # ## ## # # ### #
# # ## # ##
## # # ## # # #
# # ## ### ## ## ##
# ## ## ## ### # # ###
## ## ##### # # ##
## # ## ## ## #
# ## ## #### # #
# ## # # # ## #
# # # # # #
# # ## ### #
# # # ## # # ### ### ## # # #
# ## #### ## ## ### ##
# # ## ### ## # # # # ##
# ## # ## ## # ## #
# ## # # ## # # #
## ## # # # #
# # # # #
## # # ## ## #
# # ### # ## # # # ## ##
## ## ## # # # #
## # ## ## ##### ##
### # # # # ##
### ### # # # ## #
# ## ## # # # # # #
# # ## # #### #
# # ## ## # # #
# ##### # ##
# # ## # ## #
### ### # ##
## ## ## ### # # ## # # #
### # ### # # ### # # #
# # ## ### # # # ## #### # ##
## ### ## # # # #
## # ## ## ### #####
# ## # ### ##
# ## #
## # # # #
## ## ## ## ## ##
# # # ## ## # # ## ##
# # # ## ### # # #
# # ## ## ##### ###
# # # ## ## ## ## # ###
## # #### # # ## ### ##
# ## # ### # ## # #
# # # ### # #
# # # # ### ## #
# # # ## ##### # #
# ### # # ### ### ##
# # # # # ## ##
# # # ### ##
# # ## # # ## # ## #
### # # # # ## # # # ##
## # # ## #
## ## ## # # # # # ##
# # ### ### # # #
### # ##
### ### # #
# ##### # # ## # ##
## # # ### # # # # #
## # ## ## # ## # #
## ## # # ## # #
# # # # ## ## # # #
###Markdown
4.6 Practice Questions
###Code
#2
spam = [2, 4, 6, 8 , 10]
spam[2] = 'hello'
spam
spam = ['a', 'b', 'c', 'd']
#3
spam[int(int('3' * 2) // 11)] #first it doubles the string '3' into '33' and then convert into integer 33
#4
spam[-1]
#5
spam[:2]
bacon = [3.14, 'cat', 11, 'cat', True]
#6
bacon.index('cat')
#7
bacon.append(99)
bacon
#8
bacon.remove('cat')
bacon
#14
type((42,))
###Output
_____no_output_____
###Markdown
4.7 Practice Projects
###Code
a = []
print(a)
#Comma Code
def commacode(list):
try:
a = ' '
str_1 = a.join(list[:3])
print(str_1 + ' and ' + list[3])
except list == []:
print('Enter a list!')
spam = ['apples', 'tofu', 'bananas', 'cats']
commacode(spam)
#Coin Flip Streaks
import random
numberofStreaks = 0
result = []
for experimentNumber in range(10000):
#Code that create a list of 100 'heads' and 'tails' values
a = random.randint(0:1)
if a == 0:
result = result.append['H']
else:
result = result.append['T']
#Code that checks if there is a streak of 6 heads or 6 tails in a row
print('Chance of streak: %s%%', % (numberofStreaks / 100))
import random
result = []
for i in range(100):
if
a = random.randint(0, 1)
if a == 0:
result[j] = reult[i].append("H")
else:
result[j] = result[i].append("T")
print(result)
###Output
<class 'list'>
###Markdown
5 Dictionaries & Structuring Data 5.1 Dictionary Data Type 5.1.1 Dictionaries vs List
###Code
#Lists can compare by its order
spam = ['cat', 'dog', 'mouse']
bacon = ['mouse', 'cat', 'dog']
spam == bacon
#Dictionaries doesnt have order
eggs = {'name': 'Zophie', 'species': 'cat', 'age' : 8}
ham = {'species': 'cat', 'name': 'Zophie', 'age': 8}
eggs == ham
#Key to call need to be in dictionaries
spam = {'name': 'Zophie', 'age': 7}
spam['color']
#Birthday
birthdays = {'Alice': 'Apr 1', 'Bob': 'Dec 12', 'Carol': 'Mar 4'}
while True:
print('Enter a name: (blank to quit)')
name = input()
if name == '':
break
if name in birthdays:
print(birthdays[name] + ' is the birthday of ' + name)
else:
print('I do not have birthday information for ' + name)
print('What is their birthday?')
bday = input()
birthdays[name] = bday
print('Birthday database updated.')
###Output
Enter a name: (blank to quit)
Alice
Apr 1 is the birthday of Alice
Enter a name: (blank to quit)
Tom
I do not have birthday information for Tom
What is their birthday?
Sep 9
Birthday database updated.
Enter a name: (blank to quit)
Tom
Sep 9 is the birthday of Tom
Enter a name: (blank to quit)
I do not have birthday information for
What is their birthday?
Birthday database updated.
Enter a name: (blank to quit)
###Markdown
5.1.2 The values(), keys() and items() methods
###Code
spam = {'color': 'red', 'age': '42'}
for v in spam.values():
print(v)
for k in spam.keys():
print(k)
for i in spam.items():
print(i)
#To have a list of key, you must use the list() function to call the dict_key
spam = {'color': 'red', 'age': '42'}
dic = spam.keys() #in book, author use list(spam.keys()) to return list of keys, but it doesnt work on Python >= 3.5, so use this syntax
[*dic]
spam = {'color': 'red', 'age': 42}
for k , v in spam.items():
print('Key : ' + k + ' Value: ' + str(v))
###Output
Key : color Value: red
Key : age Value: 42
###Markdown
5.1.3 Checking whether a Key or Value exist in a Dictionary
###Code
spam = {'name': 'Zophie', 'age': 7}
'name' in spam.keys()
'Zophie' in spam.values()
'color' in spam.keys()
'color' not in spam.keys()
'color' in spam
###Output
_____no_output_____
###Markdown
5.1.4 The get() method
###Code
picnicItems = {'apple': 5, 'cups': 2}
'I am bringing ' + str(picnicItems.get('cups', 0)) + ' cups.'
'I am bringing ' + str(picnicItems.get('eggs', 0)) + ' eggs.'
picnicItems = {'apple': 5, 'cups': 2}
'I am bringing ' + str(picnicItems['eggs']) + ' eggs.'
###Output
_____no_output_____
###Markdown
5.1.5 The setdefault() method
###Code
spam = {'name': 'Pooka', 'age': 5}
spam.setdefault('color', 'black')
spam
spam.setdefault('color', 'white')
spam
#Character count
message = 'It was a bright cold day in April, and the clocks were striking thirteen'
count = {}
for character in message:
count.setdefault(character, 0)
count[character] = count[character] + 1
print(count)
###Output
{'I': 1, 't': 6, ' ': 13, 'w': 2, 'a': 4, 's': 3, 'b': 1, 'r': 5, 'i': 6, 'g': 2, 'h': 3, 'c': 3, 'o': 2, 'l': 3, 'd': 3, 'y': 1, 'n': 4, 'A': 1, 'p': 1, ',': 1, 'e': 5, 'k': 2}
###Markdown
5.2 Pretty Printing
###Code
import pprint
message = 'It was a bright cold day in April, and the clocks were striking thirteen'
count = {}
for character in message:
count.setdefault(character, 0)
count[character] = count[character] + 1
pprint.pprint(count)
print(pprint.pformat(count))
###Output
{' ': 13,
',': 1,
'A': 1,
'I': 1,
'a': 4,
'b': 1,
'c': 3,
'd': 3,
'e': 5,
'g': 2,
'h': 3,
'i': 6,
'k': 2,
'l': 3,
'n': 4,
'o': 2,
'p': 1,
'r': 5,
's': 3,
't': 6,
'w': 2,
'y': 1}
###Markdown
5.3 Using Data Sructures to Model Real-World Things 5.3.1 A Tic-Tac-Toe Board
###Code
theBoard = {'top-L': 'O', 'top-M': 'O', 'top-R': 'O',
'mid-L': 'X', 'mid-M': 'X', 'mid-R': ' ',
'low-L': ' ', 'low-M': ' ', 'low-R': 'X'}
#print the board
def printBoard(board):
print(board['top-L'] + '|' + board['top-M'] + '|' + board['top-R'])
print('-+-+-')
print(board['mid-L'] + '|' + board['mid-M'] + '|' + board['mid-R'])
print('-+-+-')
print(board['low-L'] + '|' + board['low-M'] + '|' + board['low-R'])
printBoard(theBoard)
###Output
O|O|O
-+-+-
X|X|
-+-+-
| |X
###Markdown
5.3.2 The Game
###Code
theBoard = {'top-L': ' ', 'top-M': ' ', 'top-R': ' ', 'mid-L': ' ', 'mid-M': ' ', 'mid-R': ' ', 'low-L': ' ', 'low-M': ' ', 'low-R': ' '}
def printBoard(board):
print(board['top-L'] + '|' + board['top-M'] + '|' + board['top-R'])
print('-+-+-')
print(board['mid-L'] + '|' + board['mid-M'] + '|' + board['mid-R'])
print('-+-+-')
print(board['low-L'] + '|' + board['low-M'] + '|' + board['low-R'])
turn = 'X'
for i in range(9):
printBoard(theBoard)
print('Turn for ' + turn + '. Move on which space?')
move = input()
theBoard[move] = turn
if turn == 'X':
turn = 'O'
else:
turn = 'X'
printBoard(theBoard)
#It isnt a complete tic-tac-toe game - for instance, it doesnt ever check whether a player has won - but it's enough to see how data structures can be used in programs.
###Output
| |
-+-+-
| |
-+-+-
| |
Turn for X. Move on which space?
mid-M
| |
-+-+-
|X|
-+-+-
| |
Turn for O. Move on which space?
top-M
|O|
-+-+-
|X|
-+-+-
| |
Turn for X. Move on which space?
top-R
|O|X
-+-+-
|X|
-+-+-
| |
Turn for O. Move on which space?
low-R
|O|X
-+-+-
|X|
-+-+-
| |O
Turn for X. Move on which space?
low-L
|O|X
-+-+-
|X|
-+-+-
X| |O
Turn for O. Move on which space?
low-M
|O|X
-+-+-
|X|
-+-+-
X|O|O
Turn for X. Move on which space?
mid-L
|O|X
-+-+-
X|X|
-+-+-
X|O|O
Turn for O. Move on which space?
mid-R
|O|X
-+-+-
X|X|O
-+-+-
X|O|O
Turn for X. Move on which space?
top-L
X|O|X
-+-+-
X|X|O
-+-+-
X|O|O
###Markdown
5.3.3 Nested Dictionaries and Lists
###Code
allGuests = {'Alice': {'apples': 5, 'pretzels': 12},
'Bob': {'ham sandwiches': 3, 'apples': 2},
'Carol': {'cups': 3, 'apple pies': 1}}
def totalBrought(guests, item):
numBrought = 0
for k, v in guests.items():
numBrought = numBrought + v.get(item, 0)
return numBrought
print('Number of things being brought:')
print(' - Apples ' + str(totalBrought(allGuests, 'apples')))
print(' - Cups ' + str(totalBrought(allGuests, 'cups')))
print(' - Cakes ' + str(totalBrought(allGuests, 'cakes')))
print(' - Hams Sandwiches ' + str(totalBrought(allGuests, 'ham sandwiches')))
print(' - Apples Pies ' + str(totalBrought(allGuests, 'apple pies')))
###Output
Number of things being brought:
- Apples 7
- Cups 3
- Cakes 0
- Hams Sandwiches 3
- Apples Pies 1
###Markdown
6 Manipulating Strings 6.1 Working with Strings 6.1.1 String Literals
###Code
#Double Quotes
spam = "That is Alice's cat." #When you use double quotes, Python knows that the single quote is part of the string.
print(spam)
#Escape Characters
spam = 'Say hi to Bob\'s mother'
print(spam)
print("Hello there!\nHow are you?\nI\'m doing fine.")
#Raw strings: completely ignores all escape characters and prints any backslash that appears in the string
print(r'That is Carol\'s cat.')
#Multiline Strings with Triple Quotes
print('''Dear Alice,
Eve's cat has been arrested for catnapping, cat burglary, and extortion.
Sincerely,
Bob.''')
print('Dear Alice,\n\nEve\'s cat has been arrested for catnapping, cat burglary and extortion.\n\nSincerely,\nBob.')
#Multiline Comments
"""This is a test Python program.
Written by Al Swegart [email protected]
This program was designed for Python 3, not Python 2.
"""
def spam():
"""This is a multiline comment to help
explain what the spam() function does."""
print('Hello!')
###Output
_____no_output_____
###Markdown
6.1.2 Indexing and Slicing Strings
###Code
spam = 'Hello, world!'
spam[7:]
###Output
_____no_output_____
###Markdown
6.1.3 The in and not in Operators with Strings
###Code
'Hello' in 'Hello, World'
'Hello' in 'Hello'
'HELLO' in 'Hello, World'
'' in spam
'cats' not in 'cats and dogs'
###Output
_____no_output_____
###Markdown
6.2 Putting Strings Inside Other Strings
###Code
name = 'Al'
age = 4000
'Hello, my name is ' + name + '. I am ' + str(age) + ' years old.'
#using string interpolation to concatenate the string
name = 'Al'
age = 4000
'Hello, my name is %s. I am %s years old' % (name, age)
#f-string using {}
name = 'Al'
age = 4000
f'My name is {name}. Next year I will be {age + 1}.'
###Output
_____no_output_____
###Markdown
6.3 Useful String Methods 6.3.1 The upper(), lower(), isupper() and islower() Methods
###Code
#Using upper() and lower() to return a new string where all the letters converted to uppercase or lowercase
spam = 'Hello, World!'
spam = spam.upper()
spam
spam = spam.lower()
spam
print('How are you?')
feeling = input()
if feeling.lower() == 'great':
print('I feel great too.')
else:
print('I hope the rest of your day is good.')
#The isupper() and islower() methods will return a Boolean value True if the string has at least one letter and all the letters are uppercase or lowercase.
spam = 'Hello, world!'
spam.islower()
spam.isupper()
'HELLO'.isupper()
'abc12345'.islower()
'1234'.isupper()
'1234'.islower()
'Hello'.upper()
'Hello'.upper().lower()
'Hello'.upper().lower().upper()
'Hello'.lower()
'Hello'.lower().islower()
###Output
_____no_output_____ |
week_1/1.2_tabular_data_pt.1/tabular_data_pt.1.ipynb | ###Markdown
Lecture 2.2: Tabular Data Pt.1This lecture, we are going to investigate the data exploration powers of [pandas](https://pandas.pydata.org/).**Learning goals:**- Index and select from Dataframes- Clean missing values- Combine Dataframes--- 1. Indexing and selecting dataSince we will using `DataFrame`s _often_, it is important to get comfortable with common manipulation operations with pandas 🐼, such as indexing and selecting data. 1.1 `[]`There are three main ways of selecting data in a `DataFrame`. The simplest is `[]`, i.e the standard python list notation. On a `DataFrame` object, column name(s) are passed as argument(s) to return the selected columns: column_a = df['A'] On a `Series` object, an index label is used as argument to return the selected value: value_4 = s[4] Let's try this out on a real dataset. The `top50.csv` file contains data on the spotify top songs of 2019. 🎵We can load this conveniently with the `.read_csv()` function, one of many [i/o tools](https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html) in pandas:
###Code
import pandas as pd
import numpy as np
df = pd.read_csv('top50.csv', encoding = "latin")
df.head()
###Output
_____no_output_____
###Markdown
The `.head()` method gives us an overview of the first five rows. There are a lot of columns though... Let's just select the artist column:
###Code
df['Artist.Name'].head()
###Output
_____no_output_____
###Markdown
Now we would like to see the song names too. This can be done with the `[]` operator by passing a list of column names. The double `[[` looks a bit goofy, but it works!
###Code
df[['Track.Name', 'Artist.Name']].head()
###Output
_____no_output_____
###Markdown
What if we would like to index (add) data? The `[]` notation offers a neat way to add or replace columns in dataframes:
###Code
df['Beats.Per.Minute.But.A.Bit.Faster'] = df['Beats.Per.Minute'] + 3
df['Beats.Per.Minute.But.A.Bit.Faster'].head()
###Output
_____no_output_____
###Markdown
ℹ️ Adding or replacing entire columns is fine, but careful with trying to set specific row values with the double `[][]` notation. This can lead to problems because the `[]` selector doesn't consistently return a view or a copy of the data. In those cases, pandas will raise a `SettingWithCopy` warning. More details [here](https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.htmlindexing-view-versus-copy). 1.2 `.loc[]``[]` is a convenient way to select data, but not the best way. Don't take my word for it, instead listen to the pandas [documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html):>The Python and NumPy indexing operators [] and attribute operator . provide quick and easy access to pandas data structures across a wide range of use cases. This makes interactive work intuitive, as there’s little new to learn if you already know how to deal with Python dictionaries and NumPy arrays. However, since the type of the data to be accessed isn’t known in advance, directly using standard operators has some optimization limits. For production code, we recommended that you take advantage of the optimized pandas data access methods exposed in this chapter.One of these "optimized pandas data access methods" is `.loc[]`. It's a _label_ based selection method. It can be used with one or two arguments: df.loc[row_label] df.loc[row_label, column_label]ℹ️ `.loc[]`'s first argument is the _row_ label. Remember that this is different from column names with `[]`! The reason `.loc[]` uses this "row first" notation, is because often we'll want to select on rows, and not on columns.
###Code
# select a row
df.loc[4]
# select a cell
df.loc[4, 'Track.Name']
#select a column
df.loc[:, 'Artist.Name'].head()
###Output
_____no_output_____
###Markdown
`.loc[]` is also cool because it supports label _slicing_. 😎
###Code
# rows with label 2 to 5
df.loc[2:5]
# columns with labels between Track.Name and Genre
df.loc[:, 'Track.Name':'Genre'].head()
###Output
_____no_output_____
###Markdown
Notice how the column slicing uses the _order_ of the column names to determine what's between `Track.Name` and `Genre`.To select several rows/columns, we can also directly use label lists.
###Code
# rows 4, 13, 44 and columns Genre & Energy
df.loc[[4, 12, 44], ['Genre', 'Energy']].head()
###Output
_____no_output_____
###Markdown
What if we want to index data? With `.loc[]`, we just have to assign data of correct shape to the selection:
###Code
print('⏮ Before: ')
print(df.loc[2, ['Track.Name', 'Artist.Name']])
df.loc[2, ['Track.Name', 'Artist.Name']] = ['girlfriend (with Antisocial Flat)', 'Ariana Small']
print('\n⏭ After: ')
print(df.loc[2, ['Track.Name', 'Artist.Name']])
###Output
_____no_output_____
###Markdown
💪 A mistake seems to have slipped in our dataset... Using the method of your choice, write a function which sets the `Energy` of `ROSALÍA`'s big tune `Con Altura` to 9999. The method should return the `DataFrame` after having modified it. The unit test should pass!
###Code
def fix_energy(df):
# INSERT YOUR CODE HERE
return df
def test_energy():
fixed_df = fix_energy(df.copy())
assert fixed_df.loc[44, 'Energy'] == 9999
print('Success! 🎉')
test_energy()
###Output
_____no_output_____
###Markdown
1.3 `.iloc[]` `.iloc[]` works just like `.loc[]`, except with _indices_ instead of _labels_. i.e It accepts the row and column positions as opposed to their names. Careful, it can get confusing when your index labels are numbers! It can be used with one or two arguments: df.iloc[row_index] df.iloc[row_index, column_index]
###Code
# row at position 3
df.iloc[3]
# every 3rd row between the first and 11th, and every column between the second and fith
df.iloc[0:10:3, 1:4]
###Output
_____no_output_____
###Markdown
To index values with `.iloc[]`, we also just have to assign data of correct shape to the selection:
###Code
df.iloc[9, 1] = 'good gal'
df.iloc[9]
###Output
_____no_output_____
###Markdown
1.4 Boolean masks Let's imagine that we are music snobs, and don't agree with the genre allocation of the songs. Everyone knows that pop with a bpm < 100 is actually called _slow pop_ 💁♂️. We _need_ to fix this travesty! Since there's 50 rows of data, we don't particularly want to go through the data manually. If only there was a way to do this easily with pandas...
###Code
# update slow pop
df.loc[(df['Genre'] == 'pop') & (df['Beats.Per.Minute'] < 100), 'Genre'] = 'slow pop'
# show rows
df.loc[[3, 5, 7, 12, 37, 43, 49], :'Beats.Per.Minute']
###Output
_____no_output_____
###Markdown
Wow, all it took was one line! It's a complicated expression though, so let's break it down a bit.
###Code
mask = (df['Genre'] == 'pop')
mask.head(10)
###Output
_____no_output_____
###Markdown
This is an example of an overloaded operator. The `==` here is applied _element wise_ to the `Series` returned by the `[]` selection. Therefore the one line expression is equivalent to the following: column_series = df['Genre'] mask_series = (column_series == 'pop') The result is a _mask_, i.e a list/array of booleans which indicates which elements of a matching list/array we wish to select. Here the mask is `True` only if the `Genre` is `pop`. What makes masks useful is that they share indices with their matching data list/array. So we can now use this mask to filter `pop` rows in `df`!
###Code
df[df['Genre'] == 'pop']
###Output
_____no_output_____
###Markdown
Here we used a mask as argument to the `[]` operator, which returned all the rows where the mask was `True`. This creates an intuitive, terse, and powerful notation. Mastering boolean masks with pandas can greatly simplify your code!Let's take another look at the original one liner: df.loc[(df['Genre'] == 'pop') & (df['Beats.Per.Minute'] < 100), 'Genre'] = 'slow pop'We can notice the use of the `&` symbol. This is because boolean logic operators are themselves overloaded. This means that we can chain masks together. Say we wanted to view rows that qualify as `pop` _or_ were performed by `Katy Perry`:
###Code
df[(df['Genre'] == 'pop') | (df['Artist.Name'] == 'Katy Perry')]
###Output
_____no_output_____
###Markdown
Here we used two masks, chained together with an OR operator (`|`), and used as argument to the `[]` selection operator. The above line is equivalent to the following code: mask1 = (df['Genre'] == 'pop') mask2 = (df['Artist.Name'] == 'Katy Perry') mask1_or_mask2 = mask1 | mask2 pop_or_katy_perry_df = df[mask1_or_mask2] We can basically select anything! However, you might recall that in the complex one liner, we not only _selected_ data using chained boolean masks, but we also _replaced_ some of the selected values: df.loc[(df['Genre'] == 'pop') & (df['Beats.Per.Minute'] < 100), 'Genre'] = 'slow pop' We've seen that updating specific rows is a bad idea using the `[]` notation, so we'll be using `.loc[]` instead here. The trick is that these masks can be fed as first argument of the `.loc[]` method, whilst the second argument can be used to select some columns. In our case, we'd like to select the `Genre` column of the filtered rows, since that's the column we wish to update to `slow pop`. So the complex one liner is equivalent to the following code: pop_mask = (df['Genre'] == 'pop') slow_bpm_mask = (df['Beats.Per.Minute'] < 100) pop_and_slow_bpm_mask = pop_mask & slow_bpm_mask cells_to_update = df.loc[pop_and_slow_bpm_mask, 'Genre'] cells_to_update = 'slow pop' 🧠 There's a lot happening here, so take the time to understand how the code block above corresponds to the long one liner. Don't be worried if boolean masks feel like magic at first, they take a while to get used to! 🧙♀️ 💪💪 Using a single line expression with boolean masks, update the `Genre` of the cells with `Loudness..dB..` >= -2.0 _OR_ `Liveness` > 40.0 as `annoying`. Your function below should return the modified `DataFrame`, and the unit test should pass!
###Code
def replace_annoying(df):
# INSERT YOUR CODE HERE
return df
def test_annoying():
new_df = replace_annoying(df.copy())
n_replaced = new_df.loc[new_df['Genre'] == 'annoying', :].shape[0]
assert n_replaced == 4
print('Success! 🎉')
test_annoying()
###Output
_____no_output_____
###Markdown
2. Cleaning missing dataMissing values are common when exploring tabular or time series data. e.g missing records or temporary sensor failures. These can become a problem if used to train or test machine learning models. Knowing how to remove or fill in these missing values is therefore an essential skill for data scientists. Luckily, pandas offers many useful methods to make this process easier.Let's take our spotify `DataFrame` and add a few random `None` values to imitate a faulty dataset:
###Code
def add_missing_values(df, n):
n_rows, n_columns = df.shape
np.random.seed(1337)
row_indices = np.random.randint(0, n_rows, n)
column_indices = np.random.randint(0, n_columns, n)
index_locations = zip(row_indices, column_indices)
for iloc in index_locations:
df.iloc[iloc] = None
return df
df = pd.read_csv('top50.csv', encoding = "latin")
dirty_df = add_missing_values(df, 10)
###Output
_____no_output_____
###Markdown
We've added 10 missing values to a $50\times14$ matrix ... It's not going to be simple to spot them. We could try to make a boolean masks by checking if the values are null, but pandas makes it easy by supplying the mask directly with the `DataFrame.isna()` method:
###Code
dirty_df.isna().head()
###Output
_____no_output_____
###Markdown
We can therefore use it to find the rows with a little help from the `.any()` method:
###Code
# show rows with missing values
dirty_df.loc[df.isna().any(axis=1)]
###Output
_____no_output_____
###Markdown
🧠 Can you explain how the previous cell used boolean masks to return the rows with missing elements?ℹ️ Here, missing values are represented as `NaN`. pandas actually supports several missing data types depending on the `dtype` of the `DataFrame`, and tries to take care of conversions in the background. Being aware of the differences can help when debugging, more details can be found in the [here](https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.htmlinteger-dtypes-and-missing-data).Now we see the extent of the damage to our data 😰. The sight of this corrupted dataset is unbearable and therefore we would like to clean the `DataFrame`. Since there is no way for us to guess what the values were, the best we can do to make downstream analysis smoother is to remove all the rows with `NaN` values.We've learned how to set values using boolean masks, but again, pandas makes things easy for us:
###Code
# drop the rows with missing values
clean_df = dirty_df.dropna()
# show rows with missing values
clean_df.loc[clean_df.isna().any(axis=1)]
###Output
_____no_output_____
###Markdown
The `.dropna()` method has magically removed all the dirty rows! More details about this api can be found [here](https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.htmldropping-axis-labels-with-missing-data-dropna).Sometimes, we don't want to remove missing data, but replace it with a default value instead. Let's take our `dirty_df` and replace all the `NaN` values in the `Danceability` column with a default value of 80:
###Code
fill_df = dirty_df.copy()
fill_df['Danceability'] = fill_df['Danceability'].fillna(80.0)
fill_df.iloc[[8, 28, 40], [1, 2, 6]]
###Output
_____no_output_____
###Markdown
Groovy 🕺. The `.fillna()` method makes it easy to replace `NaN`s. pandas has versatile tools for missing data, e.g: interpolation of values, or replacing anything with anything really. More details can be found [here](https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html). 3. Combining dataWhether we have loaded datasets separately, or we want to aggregate different selections, we'll often have to combine `DataFrame` objects. There are three main methods available: 3.1 `df.append()`Following the philosophy of keeping notation as close to python as possible, pandas `DataFrame` offers the same `.append()` method as lists.Here, we select two row slices with `.iloc[]`, which we then combine together using `df.append()`.
###Code
df = pd.read_csv('top50.csv', encoding = "latin")
df1 = df.iloc[:3, :3]
df2 = df.iloc[5:8, :3]
df3 = df1.append(df2)
df3
###Output
_____no_output_____
###Markdown
3.2 `pd.concat()` `.concat()` offers more control and flexibility than `.append()`, mainly around column and index conflicts.
###Code
df3 = pd.concat([df1, df2])
df3
###Output
_____no_output_____ |
Keras_TaxiDuration(Kaggles)_MLP.ipynb | ###Markdown
資料預處理:能使用list(文字)和array(計算)就不要用pandas,太慢囉 觀察
###Code
import numpy as np
import pandas as pd
import os
filepath = '/Users/mac/Desktop/Kaggle_datasets/Taxi_Duration/'
filename01 = 'train.csv'
filename02 = 'test.csv'
filename03 = 'sample_submission.csv'
df_train = pd.read_csv(os.path.join(filepath, filename01))
df_test = pd.read_csv(os.path.join(filepath, filename02))
df_ans = pd.read_csv(os.path.join(filepath, filename03))
df_train_copy = df_train
df_test_copy = df_test
df_train_copy.info()
df_train_copy
df_train_copy['delta_long'] = abs(df_train_copy['pickup_longitude']-df_train_copy['dropoff_longitude'])
df_train_copy['delta_la'] = abs(df_train_copy['pickup_latitude']-df_train_copy['dropoff_latitude'])
df_train_copy['dist'] = np.sqrt(df_train_copy['delta_long']**2 + df_train_copy['delta_la']**2)
df_test_copy['delta_long'] = abs(df_test_copy['pickup_longitude']-df_test_copy['dropoff_longitude'])
df_test_copy['delta_la'] = abs(df_test_copy['pickup_latitude']-df_test_copy['dropoff_latitude'])
df_test_copy['dist'] = np.sqrt(df_test_copy['delta_long']**2 + df_test_copy['delta_la']**2)
import matplotlib.pyplot as plt
plt.scatter(df_train_copy.dist, df_train_copy.trip_duration, alpha=0.1)
plt.show()
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
sns.jointplot(df_train_copy.dist, df_train_copy.trip_duration , data = df_train_copy)
plt.show()
df_train_copy[df_train_copy.dist>2]
df_train_copy[df_train_copy.trip_duration>3000]
df_train_copy['vendor_id'].value_counts()
df_train_copy['store_and_fwd_flag'].value_counts()
df_train_copy.columns
df_train_copy['trip_duration'].describe()
df_train_copy['dist'].describe()
###Output
_____no_output_____
###Markdown
把上車時間轉換成數字,只取月份和小時 血淚教訓:List速度>>>pandas
###Code
from datetime import datetime
t = datetime.strptime(df_train_copy['pickup_datetime'][0], '%Y-%m-%d %H:%M:%S')
df_train_copy['month'][0] = t.month
df_train_copy['hour'][0] = t.hour
print(df_train_copy['month'][0])
print(df_train_copy['hour'][0])
from datetime import datetime
pickup_datetime = df_train_copy['pickup_datetime'].tolist()
month = []
hour = []
for i in range(len(df_train_copy)):
t = datetime.strptime(pickup_datetime[i],'%Y-%m-%d %H:%M:%S')
month.append(t.month)
hour.append(t.hour)
if i % 100000 == 0:
print('already: ', i)
df_train_copy['month'] = month
df_train_copy['hour'] = hour
df_train_copy
from datetime import datetime
pickup_datetime_test = df_test_copy['pickup_datetime'].tolist()
month = []
hour = []
for i in range(len(df_test_copy)):
t = datetime.strptime(pickup_datetime_test[i],'%Y-%m-%d %H:%M:%S')
month.append(t.month)
hour.append(t.hour)
if i % 100000 == 0:
print('already: ', i)
df_test_copy['month'] = month
df_test_copy['hour'] = hour
df_test_copy
###Output
_____no_output_____
###Markdown
整理所需參數
###Code
df_train_para = df_train_copy[['vendor_id', 'passenger_count','store_and_fwd_flag',
'trip_duration','dist','month','hour']]
df_test_para = df_test_copy[['vendor_id', 'passenger_count','store_and_fwd_flag',
'dist','month','hour']]
df_train_para['month'].value_counts()
sns.boxplot(df_train_para['hour'], df_train_para['trip_duration'])
plt.show()
df_test_para['month'].value_counts()
###Output
_____no_output_____
###Markdown
剔除outlier
###Code
df_train_s = df_train_para[(df_train_para.trip_duration < 2500) &
(df_train_para.dist < 0.04) &
(df_train_para.dist > 0.01) ]
import matplotlib.pyplot as plt
plt.scatter(df_train_s.dist, df_train_s.trip_duration, alpha=0.01)
plt.show()
sns.boxplot(df_train_s['hour'], df_train_s['trip_duration'])
plt.show()
sns.boxplot(df_train_s['month'], df_train_s['trip_duration'])
plt.show()
sns.boxplot(df_train_s['passenger_count'], df_train_s['trip_duration'])
plt.show()
df_train_s['passenger_count'].value_counts()
df_train_s['trip_duration'].describe()
df_train_s['dist'].describe()
df_test_para
df_test_para['vendor_id'].value_counts()
df_test_para['store_and_fwd_flag'].value_counts()
df_test_para['dist'].describe()
df_ans.head()
df_ans[df_ans.id == 0]
###Output
_____no_output_____
###Markdown
製作feature和labels:vendor,flag,month,hour做onehot_encoding
###Code
df_train_feature = df_train_s[['vendor_id','store_and_fwd_flag','dist','month','hour']]
df_train_label = df_train_s['trip_duration']
df_test_feature = df_test_para[['vendor_id','store_and_fwd_flag','dist','month','hour']]
df_train_feature = pd.get_dummies(data=df_train_feature,
columns=['vendor_id','store_and_fwd_flag',
'month','hour'])
df_test_feature = pd.get_dummies(data=df_test_feature,
columns=['vendor_id','store_and_fwd_flag',
'month','hour'])
df_train_feature.head()
df_train_feature.columns
df_train_feature['dist'].describe()
df_test_feature.head()
df_test_feature.columns
df_test_feature['dist'].describe()
train_feature = df_train_feature.values
test_feature = df_test_feature.values
train_label = df_train_label.values
from sklearn import preprocessing
min_max_scaler = preprocessing.MinMaxScaler()
train_feature_trans = min_max_scaler.fit_transform(train_feature.reshape(-1,35))
test_feature_trans = min_max_scaler.fit_transform(test_feature.reshape(-1,35))
train_feature_trans[0]
train_feature.shape
test_feature_trans[0]
train_label = train_label.reshape(-1,1)
train_label
###Output
_____no_output_____
###Markdown
跑模型囉!!
###Code
import matplotlib.pyplot as plt
def show_train_history(train_history,train,validation):
plt.plot(train_history.history[train])
plt.plot(train_history.history[validation])
plt.title('Train History')
plt.ylabel(train)
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
######################### 建立模型
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.models import load_model
model = Sequential()
model.add(Dense(units=100,
input_dim=35,
kernel_initializer='uniform'))
model.add(Dropout(0.5))
model.add(Dense(units=100,
kernel_initializer='uniform'))
model.add(Dropout(0.5))
model.add(Dense(units=50,
kernel_initializer='uniform'))
model.add(Dropout(0.5))
model.add(Dense(units=1))
print(model.summary())
#可以反覆訓練fine tune
weights_path = 'Savemodels/TaxiDuration(Kaggles)_MLP.h5'
model.load_weights(weights_path)
model.compile(loss='mean_squared_logarithmic_error',
optimizer='adam', metrics=['accuracy'])
train_history = model.fit(train_feature, train_label,
validation_split=0.2, epochs=20, batch_size=50000, verbose=2)
######################### 訓練過程視覺化
show_train_history(train_history,'acc','val_acc')
show_train_history(train_history,'loss','val_loss')
#儲存訓練結果
model.save_weights("Savemodels/TaxiDuration(Kaggles)_MLP.h5")
print('model saved to disk')
prediction = model.predict(test_feature)
import matplotlib.pyplot as plt
plt.hist(prediction, range=(0,1500), bins = 20)
plt.show()
prediction[0]
prediction<0
df_ans['trip_duration'] = prediction
df_ans
df_ans['trip_duration'].describe()
df_ans[df_ans['trip_duration']>900]
df_ans[df_ans.trip_duration<0]
ans = df_ans['trip_duration'].tolist()
for i in range(len(ans)):
if ans[i] < 0:
ans[i] = 1
df_ans['trip_duration'] = ans
df_ans[df_ans['id']==0]
df_ans.to_csv('TaxiDuration_ans.csv',mode = 'w', index=False)
###Output
_____no_output_____ |
resources/aci/rest-api/exercises/examples/apic_rest_exercises_example_2.ipynb | ###Markdown
ACI REST API Exercises--- Authenticate to ACI and get a cookie (without a requests.sessions object)
###Code
# Import HTTP libraries
# Disable certificate warnings
# Setup authentication constants
APIC_URL = 'https://sandboxapicdc.cisco.com/api'
USER = 'admin'
PASSWORD = 'ciscopsdt'
# Create JSON authentication payload
# Create XML authentication payload
# Create JSON login function
def apic_login():
pass
# Display JSON cookie
# Prepare cookie for reuse
# Return cookie
# Create XML login function
def xml_apic_login():
pass
# Display XML cookie
# Prepare cookie for reuse
# Return cookie
###Output
_____no_output_____
###Markdown
--- Create a new ACI object using an HTTP payload body (no DN in the URL) Add a query paramater to return a response body with new object details
###Code
# Create and send API request
# Display status code and reason
# Display response body
###Output
_____no_output_____
###Markdown
--- Create a new ACI object using a DN in the URL Add a query paramater to return a response body with new object details
###Code
# Create and send API request
# Display status code and reason
# Display response body
###Output
_____no_output_____
###Markdown
--- Get an ACI object isomg a class URL Add a query paramater to filter by object name
###Code
# Create and send API request
# Display status code and reason
# Display response body
###Output
_____no_output_____
###Markdown
--- Get an ACI object using a DN URL
###Code
# Create and send API request
# Display status code and reason
# Display response body
###Output
_____no_output_____
###Markdown
--- Pause for review
###Code
_ = input('Press Return/Enter to continue ')
###Output
_____no_output_____
###Markdown
--- Delete an ACI object using a payload body Add a query paramater to return a response body with new object details
###Code
# Create and send API request
# Display status code and reason
# Display response body
###Output
_____no_output_____
###Markdown
--- Delete an ACI object using a DN URL
###Code
# Create and send API request
# Display status code and reason
# Display response body
###Output
_____no_output_____ |
Assignment/vacation_assignment.ipynb | ###Markdown
Q1-1 Q1-2
###Code
driver=webdriver.Chrome("C://Users/minji hong/chromedriver/chromedriver.exe")
driver.get("https://twitter.com/search?q=%23asiancup)%EB%A5%BC")
driver.implicitly_wait(3)
###Output
_____no_output_____
###Markdown
Q2-1products.db에 존재하는 모든 table과 column을 조회하세요.
###Code
db= lite.connect('./products.db')
query="""SELECT *FROM sqlite_master
;"""
pd.read_sql(query,db)
###Output
_____no_output_____
###Markdown
Q2-2Customer의 Country가 "Germany" 이거나 City가 "London" 인 모든 주문의 주문양(Quantity)의 합을 조회하세요.
###Code
query="""
SELECT Quantity
FROM OrderDetails
WHERE
OrderID IN
(SELECT OrderID
FROM Orders
WHERE
CustomerID IN(SELECT CustomerID
FROM Customers
Where Country =='Germany' OR City == 'London'))
;
"""
pd.read_sql(query,db)
###Output
_____no_output_____
###Markdown
Q2-3¶월별 총 주문 횟수와 총 주문 갯수, 평균 주문 금액(소수점 2째 자리에서 반올림)을 조회하세요. (총 주문 횟수를 기준으로 내림차순 정렬)
###Code
query = """
SELECT substr(O.OrderDate,1,7)'MONTH',ProductID,Quantity
FROM
OrderDetails D
JOIN
Orders O
ON
O.OrderID = D.OrderID
GROUP BY
substr(O.OrderDate,1,7)
;
"""
pd.read_sql(query, db).head()
#### 주문 금액 Product 테이블에서 금액을 가져오되 product ID를 기준으로 할것
query="""
SELECT ProductID,ProductName,Price
FROM Products
WHERE ProductID IN
(SELECT ProductID
FROM OrderDetails)
"""
pd.read_sql(query,db)
query = """
SELECT *
FROM
OrderDetails D
JOIN
Products P
ON
P.ProductID = D.ProductID
;
"""
pd.read_sql(query, db).head()
query = """
SELECT Quantity,ProductName,Price
FROM
OrderDetails D
JOIN
Products P
ON
P.ProductID = D.ProductID
;
"""
pd.read_sql(query, db).head()
###Output
_____no_output_____ |
Week 12/Networks with Parallel Concatenations (GoogLeNet).ipynb | ###Markdown
The following additional libraries are needed to run thisnotebook. Note that running on Colab is experimental, please report a Githubissue if you have any problem.
###Code
!pip install d2l==0.17.2
###Output
Requirement already satisfied: d2l==0.17.2 in /usr/local/lib/python3.7/dist-packages (0.17.2)
Requirement already satisfied: requests==2.25.1 in /usr/local/lib/python3.7/dist-packages (from d2l==0.17.2) (2.25.1)
Requirement already satisfied: matplotlib==3.3.3 in /usr/local/lib/python3.7/dist-packages (from d2l==0.17.2) (3.3.3)
Requirement already satisfied: numpy==1.18.5 in /usr/local/lib/python3.7/dist-packages (from d2l==0.17.2) (1.18.5)
Requirement already satisfied: jupyter==1.0.0 in /usr/local/lib/python3.7/dist-packages (from d2l==0.17.2) (1.0.0)
Requirement already satisfied: pandas==1.2.2 in /usr/local/lib/python3.7/dist-packages (from d2l==0.17.2) (1.2.2)
Requirement already satisfied: notebook in /usr/local/lib/python3.7/dist-packages (from jupyter==1.0.0->d2l==0.17.2) (5.3.1)
Requirement already satisfied: qtconsole in /usr/local/lib/python3.7/dist-packages (from jupyter==1.0.0->d2l==0.17.2) (5.2.2)
Requirement already satisfied: nbconvert in /usr/local/lib/python3.7/dist-packages (from jupyter==1.0.0->d2l==0.17.2) (5.6.1)
Requirement already satisfied: jupyter-console in /usr/local/lib/python3.7/dist-packages (from jupyter==1.0.0->d2l==0.17.2) (5.2.0)
Requirement already satisfied: ipywidgets in /usr/local/lib/python3.7/dist-packages (from jupyter==1.0.0->d2l==0.17.2) (7.6.5)
Requirement already satisfied: ipykernel in /usr/local/lib/python3.7/dist-packages (from jupyter==1.0.0->d2l==0.17.2) (4.10.1)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib==3.3.3->d2l==0.17.2) (1.3.2)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in /usr/local/lib/python3.7/dist-packages (from matplotlib==3.3.3->d2l==0.17.2) (3.0.6)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib==3.3.3->d2l==0.17.2) (0.11.0)
Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.7/dist-packages (from matplotlib==3.3.3->d2l==0.17.2) (7.1.2)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib==3.3.3->d2l==0.17.2) (2.8.2)
Requirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas==1.2.2->d2l==0.17.2) (2018.9)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests==2.25.1->d2l==0.17.2) (1.24.3)
Requirement already satisfied: chardet<5,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests==2.25.1->d2l==0.17.2) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests==2.25.1->d2l==0.17.2) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests==2.25.1->d2l==0.17.2) (2021.10.8)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.1->matplotlib==3.3.3->d2l==0.17.2) (1.15.0)
Requirement already satisfied: tornado>=4.0 in /usr/local/lib/python3.7/dist-packages (from ipykernel->jupyter==1.0.0->d2l==0.17.2) (5.1.1)
Requirement already satisfied: ipython>=4.0.0 in /usr/local/lib/python3.7/dist-packages (from ipykernel->jupyter==1.0.0->d2l==0.17.2) (5.5.0)
Requirement already satisfied: traitlets>=4.1.0 in /usr/local/lib/python3.7/dist-packages (from ipykernel->jupyter==1.0.0->d2l==0.17.2) (5.1.1)
Requirement already satisfied: jupyter-client in /usr/local/lib/python3.7/dist-packages (from ipykernel->jupyter==1.0.0->d2l==0.17.2) (5.3.5)
Requirement already satisfied: pexpect in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->jupyter==1.0.0->d2l==0.17.2) (4.8.0)
Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->jupyter==1.0.0->d2l==0.17.2) (57.4.0)
Requirement already satisfied: pygments in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->jupyter==1.0.0->d2l==0.17.2) (2.6.1)
Requirement already satisfied: decorator in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->jupyter==1.0.0->d2l==0.17.2) (4.4.2)
Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->jupyter==1.0.0->d2l==0.17.2) (1.0.18)
Requirement already satisfied: pickleshare in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->jupyter==1.0.0->d2l==0.17.2) (0.7.5)
Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipykernel->jupyter==1.0.0->d2l==0.17.2) (0.8.1)
Requirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=4.0.0->ipykernel->jupyter==1.0.0->d2l==0.17.2) (0.2.5)
Requirement already satisfied: ipython-genutils~=0.2.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets->jupyter==1.0.0->d2l==0.17.2) (0.2.0)
Requirement already satisfied: jupyterlab-widgets>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets->jupyter==1.0.0->d2l==0.17.2) (1.0.2)
Requirement already satisfied: nbformat>=4.2.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets->jupyter==1.0.0->d2l==0.17.2) (5.1.3)
Requirement already satisfied: widgetsnbextension~=3.5.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets->jupyter==1.0.0->d2l==0.17.2) (3.5.2)
Requirement already satisfied: jsonschema!=2.5.0,>=2.4 in /usr/local/lib/python3.7/dist-packages (from nbformat>=4.2.0->ipywidgets->jupyter==1.0.0->d2l==0.17.2) (4.3.3)
Requirement already satisfied: jupyter-core in /usr/local/lib/python3.7/dist-packages (from nbformat>=4.2.0->ipywidgets->jupyter==1.0.0->d2l==0.17.2) (4.9.1)
Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets->jupyter==1.0.0->d2l==0.17.2) (4.10.0)
Requirement already satisfied: importlib-resources>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets->jupyter==1.0.0->d2l==0.17.2) (5.4.0)
Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets->jupyter==1.0.0->d2l==0.17.2) (3.10.0.2)
Requirement already satisfied: attrs>=17.4.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets->jupyter==1.0.0->d2l==0.17.2) (21.4.0)
Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets->jupyter==1.0.0->d2l==0.17.2) (0.18.0)
Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.7/dist-packages (from importlib-resources>=1.4.0->jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets->jupyter==1.0.0->d2l==0.17.2) (3.7.0)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.7/dist-packages (from notebook->jupyter==1.0.0->d2l==0.17.2) (2.11.3)
Requirement already satisfied: Send2Trash in /usr/local/lib/python3.7/dist-packages (from notebook->jupyter==1.0.0->d2l==0.17.2) (1.8.0)
Requirement already satisfied: terminado>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from notebook->jupyter==1.0.0->d2l==0.17.2) (0.12.1)
Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.7/dist-packages (from jupyter-client->ipykernel->jupyter==1.0.0->d2l==0.17.2) (22.3.0)
Requirement already satisfied: ptyprocess in /usr/local/lib/python3.7/dist-packages (from terminado>=0.8.1->notebook->jupyter==1.0.0->d2l==0.17.2) (0.7.0)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2->notebook->jupyter==1.0.0->d2l==0.17.2) (2.0.1)
Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from nbconvert->jupyter==1.0.0->d2l==0.17.2) (1.5.0)
Requirement already satisfied: bleach in /usr/local/lib/python3.7/dist-packages (from nbconvert->jupyter==1.0.0->d2l==0.17.2) (4.1.0)
Requirement already satisfied: testpath in /usr/local/lib/python3.7/dist-packages (from nbconvert->jupyter==1.0.0->d2l==0.17.2) (0.5.0)
Requirement already satisfied: defusedxml in /usr/local/lib/python3.7/dist-packages (from nbconvert->jupyter==1.0.0->d2l==0.17.2) (0.7.1)
Requirement already satisfied: entrypoints>=0.2.2 in /usr/local/lib/python3.7/dist-packages (from nbconvert->jupyter==1.0.0->d2l==0.17.2) (0.3)
Requirement already satisfied: mistune<2,>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from nbconvert->jupyter==1.0.0->d2l==0.17.2) (0.8.4)
Requirement already satisfied: webencodings in /usr/local/lib/python3.7/dist-packages (from bleach->nbconvert->jupyter==1.0.0->d2l==0.17.2) (0.5.1)
Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from bleach->nbconvert->jupyter==1.0.0->d2l==0.17.2) (21.3)
Requirement already satisfied: qtpy in /usr/local/lib/python3.7/dist-packages (from qtconsole->jupyter==1.0.0->d2l==0.17.2) (2.0.0)
###Markdown
Networks with Parallel Concatenations (GoogLeNet):label:`sec_googlenet`In 2014, *GoogLeNet*won the ImageNet Challenge, proposing a structurethat combined the strengths of NiN and paradigms of repeated blocks :cite:`Szegedy.Liu.Jia.ea.2015`.One focus of the paper was to address the questionof which sized convolution kernels are best.After all, previous popular networks employed choicesas small as $1 \times 1$ and as large as $11 \times 11$.One insight in this paper was that sometimesit can be advantageous to employ a combination of variously-sized kernels.In this section, we will introduce GoogLeNet,presenting a slightly simplified version of the original model:weomit a few ad-hoc features that were added to stabilize trainingbut are unnecessary now with better training algorithms available. (**Inception Blocks**)The basic convolutional block in GoogLeNet is called an *Inception block*,likely named due to a quote from the movie *Inception* ("We need to go deeper"),which launched a viral meme.![Structure of the Inception block.](http://d2l.ai/_images/inception.svg):label:`fig_inception`As depicted in :numref:`fig_inception`,the inception block consists of four parallel paths.The first three paths use convolutional layerswith window sizes of $1\times 1$, $3\times 3$, and $5\times 5$to extract information from different spatial sizes.The middle two paths perform a $1\times 1$ convolution on the inputto reduce the number of channels, reducing the model's complexity.The fourth path uses a $3\times 3$ maximum pooling layer,followed by a $1\times 1$ convolutional layerto change the number of channels.The four paths all use appropriate padding to give the input and output the same height and width.Finally, the outputs along each path are concatenatedalong the channel dimension and comprise the block's output.The commonly-tuned hyperparameters of the Inception blockare the number of output channels per layer.
###Code
import tensorflow as tf
from d2l import tensorflow as d2l
class Inception(tf.keras.Model):
# `c1`--`c4` are the number of output channels for each path
def __init__(self, c1, c2, c3, c4):
super().__init__()
# Path 1 is a single 1 x 1 convolutional layer
self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu')
# Path 2 is a 1 x 1 convolutional layer followed by a 3 x 3
# convolutional layer
self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu')
self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same',
activation='relu')
# Path 3 is a 1 x 1 convolutional layer followed by a 5 x 5
# convolutional layer
self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu')
self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same',
activation='relu')
# Path 4 is a 3 x 3 maximum pooling layer followed by a 1 x 1
# convolutional layer
self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same')
self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu')
def call(self, x):
p1 = self.p1_1(x)
p2 = self.p2_2(self.p2_1(x))
p3 = self.p3_2(self.p3_1(x))
p4 = self.p4_2(self.p4_1(x))
# Concatenate the outputs on the channel dimension
return tf.keras.layers.Concatenate()([p1, p2, p3, p4])
###Output
_____no_output_____
###Markdown
To gain some intuition for why this network works so well,consider the combination of the filters.They explore the image in a variety of filter sizes.This means that details at different extentscan be recognized efficiently by filters of different sizes.At the same time, we can allocate different amounts of parametersfor different filters. [**GoogLeNet Model**]As shown in :numref:`fig_inception_full`, GoogLeNet uses a stack of a total of 9 inception blocksand global average pooling to generate its estimates.Maximum pooling between inception blocks reduces the dimensionality.The first module is similar to AlexNet and LeNet.The stack of blocks is inherited from VGGand the global average pooling avoidsa stack of fully-connected layers at the end.![The GoogLeNet architecture.](https://github.com/d2l-ai/d2l-tensorflow-colab/blob/master/img/inception-full.svg?raw=1):label:`fig_inception_full`We can now implement GoogLeNet piece by piece.The first module uses a 64-channel $7\times 7$ convolutional layer.
###Code
def b1():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, 7, strides=2, padding='same',
activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])
###Output
_____no_output_____
###Markdown
The second module uses two convolutional layers:first, a 64-channel $1\times 1$ convolutional layer,then a $3\times 3$ convolutional layer that triples the number of channels. This corresponds to the second path in the Inception block.
###Code
def b2():
return tf.keras.Sequential([
tf.keras.layers.Conv2D(64, 1, activation='relu'),
tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])
###Output
_____no_output_____
###Markdown
The third module connects two complete Inception blocks in series.The number of output channels of the first Inception block is$64+128+32+32=256$,and the number-of-output-channel ratioamong the four paths is $64:128:32:32=2:4:1:1$.The second and third paths first reduce the number of input channelsto $96/192=1/2$ and $16/192=1/12$, respectively,and then connect the second convolutional layer.The number of output channels of the second Inception blockis increased to $128+192+96+64=480$, and the number-of-output-channel ratioamong the four paths is $128:192:96:64 = 4:6:3:2$.The second and third paths first reduce the number of input channelsto $128/256=1/2$ and $32/256=1/8$, respectively.
###Code
def b3():
return tf.keras.models.Sequential([
Inception(64, (96, 128), (16, 32), 32),
Inception(128, (128, 192), (32, 96), 64),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])
###Output
_____no_output_____
###Markdown
The fourth module is more complicated.It connects five Inception blocks in series,and they have $192+208+48+64=512$, $160+224+64+64=512$,$128+256+64+64=512$, $112+288+64+64=528$,and $256+320+128+128=832$ output channels, respectively.The number of channels assigned to these paths is similarto that in the third module:the second path with the $3\times 3$ convolutional layeroutputs the largest number of channels,followed by the first path with only the $1\times 1$ convolutional layer,the third path with the $5\times 5$ convolutional layer,and the fourth path with the $3\times 3$ maximum pooling layer.The second and third paths will first reducethe number of channels according to the ratio.These ratios are slightly different in different Inception blocks.
###Code
def b4():
return tf.keras.Sequential([
Inception(192, (96, 208), (16, 48), 64),
Inception(160, (112, 224), (24, 64), 64),
Inception(128, (128, 256), (24, 64), 64),
Inception(112, (144, 288), (32, 64), 64),
Inception(256, (160, 320), (32, 128), 128),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])
###Output
_____no_output_____
###Markdown
The fifth module has two Inception blocks with $256+320+128+128=832$and $384+384+128+128=1024$ output channels.The number of channels assigned to each pathis the same as that in the third and fourth modules,but differs in specific values.It should be noted that the fifth block is followed by the output layer.This block uses the global average pooling layerto change the height and width of each channel to 1, just as in NiN.Finally, we turn the output into a two-dimensional arrayfollowed by a fully-connected layerwhose number of outputs is the number of label classes.
###Code
def b5():
return tf.keras.Sequential([
Inception(256, (160, 320), (32, 128), 128),
Inception(384, (192, 384), (48, 128), 128),
tf.keras.layers.GlobalAvgPool2D(),
tf.keras.layers.Flatten()
])
# Recall that this has to be a function that will be passed to
# `d2l.train_ch6()` so that model building/compiling need to be within
# `strategy.scope()` in order to utilize the CPU/GPU devices that we have
def net():
return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(),
tf.keras.layers.Dense(10)])
###Output
_____no_output_____
###Markdown
The GoogLeNet model is computationally complex,so it is not as easy to modify the number of channels as in VGG.[**To have a reasonable training time on Fashion-MNIST,we reduce the input height and width from 224 to 96.**]This simplifies the computation.The changes in the shape of the outputbetween the various modules are demonstrated below.
###Code
X = tf.random.uniform(shape=(1, 96, 96, 1))
for layer in net().layers:
X = layer(X)
print(layer.__class__.__name__, 'output shape:\t', X.shape)
###Output
Sequential output shape: (1, 24, 24, 64)
Sequential output shape: (1, 12, 12, 192)
Sequential output shape: (1, 6, 6, 480)
Sequential output shape: (1, 3, 3, 832)
Sequential output shape: (1, 1024)
Dense output shape: (1, 10)
###Markdown
[**Training**]As before, we train our model using the Fashion-MNIST dataset. We transform it to $96 \times 96$ pixel resolution before invoking the training procedure.
###Code
lr, num_epochs, batch_size = 0.1, 10, 128
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
###Output
loss 0.240, train acc 0.909, test acc 0.906
1183.2 examples/sec on /GPU:0
|
02 Grey Wolf Optimizer/02 200 bar Truss/01 Notebooks/GWO-02-02 200 bar Truss Case 1 Individual.ipynb | ###Markdown
Step 2: Define Objective Function
###Code
def Objective_Function(areas):
member_lengths, member_stresses, node_displacements = Truss_solver(areas)
total_area = np.array(areas)
total_member_lengths = []
for length in member_lengths:
total_member_lengths.append(member_lengths[length])
total_member_lengths = np.array(total_member_lengths)
weight = total_area.dot(np.array(total_member_lengths))
weight = weight.sum() * 0.283 # lb/in^3
return (weight, member_stresses, node_displacements)
###Output
_____no_output_____
###Markdown
Step 3: Define Constraints
###Code
def stress_constraint(stress_new):
if stress_new > 30 or stress_new < -30:
stress_counter = 1
else:
stress_counter = 0
return stress_counter
def displacement_constraint(node_displacement_new):
x = node_displacement_new[0]
y = node_displacement_new[1]
if x > 0.5 or x < -0.5:
displacement_counter = 1
elif y > 0.5 or y < -0.5:
displacement_counter = 1
else:
displacement_counter = 0
return displacement_counter
###Output
_____no_output_____
###Markdown
Step 4: Define Algorithm Step 4.1: Initialize Parameters
###Code
D = [0.100, 0.347, 0.440, 0.539, 0.954, 1.081, 1.174, 1.333, 1.488,
1.764, 2.142, 2.697, 2.800, 3.131, 3.565, 3.813, 4.805, 5.952, 6.572,
7.192, 8.525, 9.300, 10.850, 13.330, 14.290, 17.170, 19.180, 23.680,
28.080, 33.700]
def closest(list_of_areas, area_values_list):
for i, area_value in enumerate(area_values_list):
idx = (np.abs(list_of_areas - area_value)).argmin()
area_values_list[i] = list_of_areas[idx]
return area_values_list
# Initialize Wolves
alpha_position = 50
alpha_score = float("inf")
beta_position = 10
beta_score = float("inf")
delta_position = 25
delta_score = float("inf")
# Initialize No. of Wolves (Search Agents)
search_agents = 20
# Total Number of Iterations
max_iterations = 1000
# Upper and Lower Boundary values for the wolves' location
ub = 35
lb = 0.1
# Initialize Random Location of No. of Wolves (Search Agents)
positions = np.empty([search_agents, len(elements)])
for row in range(search_agents):
for i in range(len(positions[0])):
positions[row,i] = np.random.choice(D)
best_alpha_score = []
best_alpha_positions = []
best_beta_positions = []
best_delta_positions = []
%%time
for m in range(0, max_iterations):
for i in range(0, search_agents):
# Return back the search agents that go beyond the boundaries of the search space
# for j in range(search_agents):
# positions[i]=np.clip(positions[i], lb, ub)
# Calculate objective function for each search agent
fitness, stresses_new, node_displacement_new = Objective_Function(positions[i])
# Constraint 1: stresses should be within 25ksi and -25ksi
for j in stresses_new:
stress_counter = stress_constraint(stresses_new[j])
# Constraint 2: Node Displacement should be limited to -2in and 2in
displacement_counter = 0
for k in node_displacement_new:
displacement_counter = displacement_counter + displacement_constraint(node_displacement_new[k])
if displacement_counter == 0 and stress_counter == 0:
if fitness < alpha_score:
alpha_score = fitness
alpha_position = positions[i].copy()
if (fitness > alpha_score and fitness < beta_score):
beta_score = fitness
beta_position = positions[i].copy()
if (fitness > alpha_score and fitness > beta_score and fitness < delta_score):
delta_score = fitness
delta_position = positions[i].copy()
else:
alpha_position = alpha_position
beta_position = beta_position
delta_position = delta_position
a = 2 - m*(2/max_iterations)
for i in range(0,search_agents):
# compute distance of alpha
r1 = random()
r2 = random()
a1 = 2*a*r1-a
c1 = 2*r2
distance_alpha = abs(c1*alpha_position-positions[i])
x1 = alpha_position - a1*distance_alpha
# compute distance of beta
r1 = random()
r2 = random()
a2 = 2*a*r1-a
c2 = 2*r2
distance_beta = abs(c2*beta_position-positions[i])
x2 = beta_position - a2*distance_beta
# compute distance of delta
r1 = random()
r2 = random()
a3 = 2*a*r1-a
c3 = 2*r2
distance_delta = abs(c3*delta_position-positions[i])
x3 = delta_position - a3*distance_delta
# in the middle (average) of the 3 wolf's distance (encircle), the prey is found
positions[i] = (x1 + x2 + x3) / 3
closest(D, positions[i])
positions_old = positions.copy()
best_alpha_score.append(alpha_score)
# print("Loop number: {}".format(m+1))
fig, axs = plt.subplots(2,1)
fig.set_figheight(12)
fig.set_figwidth(20)
axs[0].plot(best_alpha_score, marker = 'o')
axs[0].grid(True)
axs[0].set_xlabel('Iterations')
axs[0].set_ylabel('Weight')
axs[1].plot(best_alpha_positions, label = 'Alpha', marker = 'o')
axs[1].plot(best_beta_positions, label = 'Beta', marker = 'o')
axs[1].plot(best_delta_positions, label = 'Delta', marker = 'o')
axs[1].grid(True)
axs[1].set_xlabel('Iterations')
axs[1].set_ylabel('Area')
axs[1].legend()
fig.tight_layout()
positions[-1]
cross_area = {key: positions[-1][key-1] for key in elements}
Two_Hundred_Truss_Case_1 = Truss_2D(nodes = nodes,
elements= elements,
supports= supports,
forces = forces,
elasticity= elasticity,
cross_area= cross_area)
Two_Hundred_Truss_Case_1.Solve()
Two_Hundred_Truss_Case_1.Draw_Truss_Displacements(figure_size=[15,25])
Two_Hundred_Truss_Case_1.displacements_
weight, _, _ = Objective_Function([positions[-1][i-1] for i in elements])
weight
###Output
_____no_output_____ |
colecciones_avanzadas/00_x01.ipynb | ###Markdown
![](https://treyhunner.com/images/list-comprehension-condition.gif)
###Code
l1 = [1,2,3]
def f(n):
return n * 2
for num in l1:
resultado = f(num)
print(resultado)
list(map(f, l1))
list(map(lambda n: n*2 , l1))
nombres_mayus = list(map(lambda s: s.upper() , ["ricardo", "david", "fernando"]))
nombres_mayus
def f(cadena):
return cadena.upper()
nombres_mayus = []
for nombre in ["ricardo", "david", "fernando"]:
procesado = f(nombre)
nombres_mayus.append(procesado)
nombres_mayus
sorted(["ricardo", "david", "fernando"])
###Output
_____no_output_____ |
weight-initialization/weight_initialization_mine.ipynb | ###Markdown
Weight InitializationIn this lesson, you'll learn how to find good initial weights for a neural network. Having good initial weights can place the neural network close to the optimal solution. This allows the neural network to come to the best solution quicker. Testing Weights DatasetTo see how different weights perform, we'll test on the same dataset and neural network. Let's go over the dataset and neural network.We'll be using the [MNIST dataset](https://en.wikipedia.org/wiki/MNIST_database) to demonstrate the different initial weights. As a reminder, the MNIST dataset contains images of handwritten numbers, 0-9, with normalized input (0.0 - 1.0). Run the cell below to download and load the MNIST dataset.
###Code
%matplotlib inline
import tensorflow as tf
import helper
from tensorflow.examples.tutorials.mnist import input_data
print('Getting MNIST Dataset...')
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print('Data Extracted.')
###Output
Getting MNIST Dataset...
WARNING:tensorflow:From <ipython-input-1-e35cd2be1b9d>:9: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From /Users/roahuja/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From /Users/roahuja/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data/train-images-idx3-ubyte.gz
WARNING:tensorflow:From /Users/roahuja/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting MNIST_data/train-labels-idx1-ubyte.gz
WARNING:tensorflow:From /Users/roahuja/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From /Users/roahuja/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
Data Extracted.
###Markdown
Neural Network For the neural network, we'll test on a 3 layer neural network with ReLU activations and an Adam optimizer. The lessons you learn apply to other neural networks, including different activations and optimizers.
###Code
# Save the shapes of weights for each layer
layer_1_weight_shape = (mnist.train.images.shape[1], 256)
layer_2_weight_shape = (256, 128)
layer_3_weight_shape = (128, mnist.train.labels.shape[1])
###Output
_____no_output_____
###Markdown
Initialize WeightsLet's start looking at some initial weights. All Zeros or OnesIf you follow the principle of [Occam's razor](https://en.wikipedia.org/wiki/Occam's_razor), you might think setting all the weights to 0 or 1 would be the best solution. This is not the case.With every weight the same, all the neurons at each layer are producing the same output. This makes it hard to decide which weights to adjust.Let's compare the loss with all ones and all zero weights using `helper.compare_init_weights`. This function will run two different initial weights on the neural network above for 2 epochs. It will plot the loss for the first 100 batches and print out stats after the 2 epochs (~860 batches). We plot the first 100 batches to better judge which weights performed better at the start.Run the cell below to see the difference between weights of all zeros against all ones.
###Code
all_zero_weights = [
tf.Variable(tf.zeros(layer_1_weight_shape)),
tf.Variable(tf.zeros(layer_2_weight_shape)),
tf.Variable(tf.zeros(layer_3_weight_shape))
]
all_one_weights = [
tf.Variable(tf.ones(layer_1_weight_shape)),
tf.Variable(tf.ones(layer_2_weight_shape)),
tf.Variable(tf.ones(layer_3_weight_shape))
]
helper.compare_init_weights(
mnist,
'All Zeros vs All Ones',
[
(all_zero_weights, 'All Zeros'),
(all_one_weights, 'All Ones')])
###Output
WARNING:tensorflow:From /Users/roahuja/Courses/DeepLearning/Dev/deep-learning/weight-initialization/helper.py:42: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See @{tf.nn.softmax_cross_entropy_with_logits_v2}.
###Markdown
As you can see the accuracy is close to guessing for both zeros and ones, around 10%.The neural network is having a hard time determining which weights need to be changed, since the neurons have the same output for each layer. To avoid neurons with the same output, let's use unique weights. We can also randomly select these weights to avoid being stuck in a local minimum for each run.A good solution for getting these random weights is to sample from a uniform distribution. Uniform DistributionA [uniform distribution](https://en.wikipedia.org/wiki/Uniform_distribution_(continuous%29) has the equal probability of picking any number from a set of numbers. We'll be picking from a continous distribution, so the chance of picking the same number is low. We'll use TensorFlow's `tf.random_uniform` function to pick random numbers from a uniform distribution.> [`tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)`](https://www.tensorflow.org/api_docs/python/tf/random_uniform)>Outputs random values from a uniform distribution.>The generated values follow a uniform distribution in the range [minval, maxval). The lower bound minval is included in the range, while the upper bound maxval is excluded.>- **shape:** A 1-D integer Tensor or Python array. The shape of the output tensor.- **minval:** A 0-D Tensor or Python value of type dtype. The lower bound on the range of random values to generate. Defaults to 0.- **maxval:** A 0-D Tensor or Python value of type dtype. The upper bound on the range of random values to generate. Defaults to 1 if dtype is floating point.- **dtype:** The type of the output: float32, float64, int32, or int64.- **seed:** A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior.- **name:** A name for the operation (optional).We can visualize the uniform distribution by using a histogram. Let's map the values from `tf.random_uniform([1000], -3, 3)` to a histogram using the `helper.hist_dist` function. This will be `1000` random float values from `-3` to `3`, excluding the value `3`.
###Code
helper.hist_dist('Random Uniform (minval=-3, maxval=3)', tf.random_uniform([1000], -3, 3))
###Output
_____no_output_____
###Markdown
The histogram used 500 buckets for the 1000 values. Since the chance for any single bucket is the same, there should be around 2 values for each bucket. That's exactly what we see with the histogram. Some buckets have more and some have less, but they trend around 2.Now that you understand the `tf.random_uniform` function, let's apply it to some initial weights. BaselineLet's see how well the neural network trains using the default values for `tf.random_uniform`, where `minval=0.0` and `maxval=1.0`.
###Code
# Default for tf.random_uniform is minval=0 and maxval=1
basline_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape)),
tf.Variable(tf.random_uniform(layer_2_weight_shape)),
tf.Variable(tf.random_uniform(layer_3_weight_shape))
]
helper.compare_init_weights(
mnist,
'Baseline',
[(basline_weights, 'tf.random_uniform [0, 1)')])
###Output
_____no_output_____
###Markdown
The loss graph is showing the neural network is learning, which it didn't with all zeros or all ones. We're headed in the right direction. General rule for setting weightsThe general rule for setting the weights in a neural network is to be close to zero without being too small. A good pracitce is to start your weights in the range of $[-y, y]$ where$y=1/\sqrt{n}$ ($n$ is the number of inputs to a given neuron).Let's see if this holds true, let's first center our range over zero. This will give us the range [-1, 1).
###Code
uniform_neg1to1_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -1, 1)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -1, 1)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -1, 1))
]
helper.compare_init_weights(
mnist,
'[0, 1) vs [-1, 1)',
[
(basline_weights, 'tf.random_uniform [0, 1)'),
(uniform_neg1to1_weights, 'tf.random_uniform [-1, 1)')])
###Output
_____no_output_____
###Markdown
We're going in the right direction, the accuracy and loss is better with [-1, 1). We still want smaller weights. How far can we go before it's too small? Too smallLet's compare [-0.1, 0.1), [-0.01, 0.01), and [-0.001, 0.001) to see how small is too small. We'll also set `plot_n_batches=None` to show all the batches in the plot.
###Code
uniform_neg01to01_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.1, 0.1)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.1, 0.1)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.1, 0.1))
]
uniform_neg001to001_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.01, 0.01)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.01, 0.01)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.01, 0.01))
]
uniform_neg0001to0001_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.001, 0.001)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.001, 0.001)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.001, 0.001))
]
helper.compare_init_weights(
mnist,
'[-1, 1) vs [-0.1, 0.1) vs [-0.01, 0.01) vs [-0.001, 0.001)',
[
(uniform_neg1to1_weights, '[-1, 1)'),
(uniform_neg01to01_weights, '[-0.1, 0.1)'),
(uniform_neg001to001_weights, '[-0.01, 0.01)'),
(uniform_neg0001to0001_weights, '[-0.001, 0.001)')],
plot_n_batches=None)
###Output
_____no_output_____
###Markdown
Looks like anything [-0.01, 0.01) or smaller is too small. Let's compare this to our typical rule of using the range $y=1/\sqrt{n}$.
###Code
import numpy as np
general_rule_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -1/np.sqrt(layer_1_weight_shape[0]), 1/np.sqrt(layer_1_weight_shape[0]))),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -1/np.sqrt(layer_2_weight_shape[0]), 1/np.sqrt(layer_2_weight_shape[0]))),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -1/np.sqrt(layer_3_weight_shape[0]), 1/np.sqrt(layer_3_weight_shape[0])))
]
helper.compare_init_weights(
mnist,
'[-0.1, 0.1) vs General Rule',
[
(uniform_neg01to01_weights, '[-0.1, 0.1)'),
(general_rule_weights, 'General Rule')],
plot_n_batches=None)
###Output
_____no_output_____
###Markdown
The range we found and $y=1/\sqrt{n}$ are really close.Since the uniform distribution has the same chance to pick anything in the range, what if we used a distribution that had a higher chance of picking numbers closer to 0. Let's look at the normal distribution. Normal DistributionUnlike the uniform distribution, the [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) has a higher likelihood of picking number close to it's mean. To visualize it, let's plot values from TensorFlow's `tf.random_normal` function to a histogram.>[tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)](https://www.tensorflow.org/api_docs/python/tf/random_normal)>Outputs random values from a normal distribution.>- **shape:** A 1-D integer Tensor or Python array. The shape of the output tensor.- **mean:** A 0-D Tensor or Python value of type dtype. The mean of the normal distribution.- **stddev:** A 0-D Tensor or Python value of type dtype. The standard deviation of the normal distribution.- **dtype:** The type of the output.- **seed:** A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior.- **name:** A name for the operation (optional).
###Code
helper.hist_dist('Random Normal (mean=0.0, stddev=1.0)', tf.random_normal([1000]))
###Output
_____no_output_____
###Markdown
Let's compare the normal distribution against the previous uniform distribution.
###Code
normal_01_weights = [
tf.Variable(tf.random_normal(layer_1_weight_shape, stddev=0.1)),
tf.Variable(tf.random_normal(layer_2_weight_shape, stddev=0.1)),
tf.Variable(tf.random_normal(layer_3_weight_shape, stddev=0.1))
]
helper.compare_init_weights(
mnist,
'Uniform [-0.1, 0.1) vs Normal stddev 0.1',
[
(uniform_neg01to01_weights, 'Uniform [-0.1, 0.1)'),
(normal_01_weights, 'Normal stddev 0.1')])
###Output
_____no_output_____
###Markdown
The normal distribution gave a slight increasse in accuracy and loss. Let's move closer to 0 and drop picked numbers that are `x` number of standard deviations away. This distribution is called [Truncated Normal Distribution](https://en.wikipedia.org/wiki/Truncated_normal_distribution%29). Truncated Normal Distribution>[tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)](https://www.tensorflow.org/api_docs/python/tf/truncated_normal)>Outputs random values from a truncated normal distribution.>The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked.>- **shape:** A 1-D integer Tensor or Python array. The shape of the output tensor.- **mean:** A 0-D Tensor or Python value of type dtype. The mean of the truncated normal distribution.- **stddev:** A 0-D Tensor or Python value of type dtype. The standard deviation of the truncated normal distribution.- **dtype:** The type of the output.- **seed:** A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior.- **name:** A name for the operation (optional).
###Code
helper.hist_dist('Truncated Normal (mean=0.0, stddev=1.0)', tf.truncated_normal([1000]))
###Output
_____no_output_____
###Markdown
Again, let's compare the previous results with the previous distribution.
###Code
trunc_normal_01_weights = [
tf.Variable(tf.truncated_normal(layer_1_weight_shape, stddev=0.1)),
tf.Variable(tf.truncated_normal(layer_2_weight_shape, stddev=0.1)),
tf.Variable(tf.truncated_normal(layer_3_weight_shape, stddev=0.1))
]
helper.compare_init_weights(
mnist,
'Normal vs Truncated Normal',
[
(normal_01_weights, 'Normal'),
(trunc_normal_01_weights, 'Truncated Normal')])
###Output
_____no_output_____
###Markdown
There's no difference between the two, but that's because the neural network we're using is too small. A larger neural network will pick more points on the normal distribution, increasing the likelihood it's choices are larger than 2 standard deviations.We've come a long way from the first set of weights we tested. Let's see the difference between the weights we used then and now.
###Code
helper.compare_init_weights(
mnist,
'Baseline vs Truncated Normal',
[
(basline_weights, 'Baseline'),
(trunc_normal_01_weights, 'Truncated Normal')])
###Output
_____no_output_____ |
MAPS/Animations/Diurnal_Composite_Snapshots.ipynb | ###Markdown
Labeling Scheme:- np.nan: Little Convection- 1: Shallow Convection- 2: Transition- 3: Deep Convection
###Code
labels[np.where(yhat==0)] = 1.0
labels[np.where(yhat==1)] = 3.0
labels[np.where(yhat==2)] = np.nan
labels[np.where(yhat==3)] = 1.0
labels[np.where(yhat==4)] = np.nan
labels[np.where(yhat==5)] = 3.0
labels[np.where(yhat==6)] = 2.0
labels[np.where(yhat==7)] = 2.0
labels[np.where(yhat==8)] = 3.0
labels[np.where(yhat==9)] = 2.0
labels[np.where(yhat==10)] = 3.0
labels[np.where(yhat==11)] = 3.0
count = 0
for i in range(time):
for j in range(lat):
for k in range(lon):
reformed_array[i,j,k] = labels[count]
count = count+1
new_lons, new_lats = np.meshgrid(longitudes, latitudes)
def timing(times):
utc_list = []
lst_list = []
end_times = [':00',':15',':30',':45']
counter = 0
for i in range(times):
thing = i
beg_time = int(thing/4)
if beg_time == 24:
beg_time = 24
ending = end_times[counter]
counter = counter + 1
if counter == 4:
counter = 0
utc_time = str(beg_time)+ending
utc_list.append(utc_time)
return utc_list
def conv_movie(label_schemes, X_var, Y_var):
#cmap = matplotlib.colors.ListedColormap(["red", "purple", "blue"])
cmap = matplotlib.colors.ListedColormap(["yellow", "green", "blue"])
norm = matplotlib.colors.BoundaryNorm(np.arange(1,5), cmap.N)
fig, ax1 = plt.subplots(1,1,subplot_kw={'projection':ccrs.Robinson(central_longitude=180)})
fig.set_size_inches(16,12)
feat_list = []
times = timing(len(label_schemes))
for i in range(len(label_schemes)):
label_scheme = np.squeeze(label_schemes[i,:,:])
contour_plot = ax1.pcolormesh(X_var, Y_var, label_scheme,cmap=cmap, edgecolor ='none', alpha=0.7, transform=ccrs.PlateCarree(), animated = True)
cax = fig.add_axes([ax1.get_position().x1+0.01,ax1.get_position().y0,0.02,ax1.get_position().height])
cbar = plt.colorbar(contour_plot,cax=cax, ticks=np.linspace(0,3,4))
cbar.ax.set_yticklabels(["Nothing","Shallow Conv.","Transition","Deep Conv."])
#ax1.set_title('VAE Based Convection Identification')
#ax1.title.set_fontsize(16)
ax1.coastlines(linewidth=0.5,edgecolor='0.25')
ax1.gridlines()
ax1.add_feature(cfeature.BORDERS,linewidth=0.5,edgecolor='0.25')
#plt.xlabel('Longitude',fontsize=15)
#plt.ylabel('Latitude', fontsize=15)
title = ax1.text(0.5,1.05,"VAE Convection Predictions at ".format(i)+str(times[i])+" UTC".format(i),
size=plt.rcParams["axes.titlesize"],
ha="center", transform=ax1.transAxes, )
feat_list.append([contour_plot, title])
#plt.gca().invert_yaxis()
ani = animation.ArtistAnimation(fig, feat_list, interval = 180, blit = False, repeat = True)
ani.save('/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Animations/Figures/83_Diurnal_Composite.mp4')
plt.show()
#conv_movie(reformed_array[:,:,:], new_lons, new_lats)
fz = 15
lw = 3
siz = 100
XNNA = 1.25 # Abscissa where architecture-constrained network will be placed
XTEXT = 0.25 # Text placement
YTEXT = 0.3 # Text placement
LWC = 2.0
plt.rc('text', usetex=False)
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
#mpl.rcParams["font.serif"] = "STIX"
plt.rc('font', family='serif', size=fz)
matplotlib.rcParams['lines.linewidth'] = lw
cmap = matplotlib.colors.ListedColormap(["yellow", "green", "blue"])
norm = matplotlib.colors.BoundaryNorm(np.arange(1,5), cmap.N)
fig, ax = plt.subplots(4,1,figsize=(15,5),subplot_kw={'projection':ccrs.Robinson(central_longitude=180)})
#fig.set_size_inches(16,12)
#fig.set_size_inches(15,5)
contour_plot = ax[0].pcolormesh(new_lons, new_lats, np.squeeze(reformed_array[0,:,:]),cmap=cmap, edgecolor ='none', alpha=0.7, transform=ccrs.PlateCarree(), animated = True)
t = ax[0].text(0.4, 0.80, '0:00 UTC', transform=ax[0].transAxes, fontsize=fz)
t.set_bbox(dict(facecolor='white', alpha=1.0, edgecolor='white'))
ax[0].coastlines(linewidth=LWC,edgecolor='0.25')
ax[0].gridlines()
ax[0].add_feature(cfeature.BORDERS,linewidth=LWC,edgecolor='0.25')
#ax[0].set_title("0:00 UTC", fontsize=fz)
contour_plot = ax[1].pcolormesh(new_lons, new_lats, np.squeeze(reformed_array[24,:,:]),cmap=cmap, edgecolor ='none', alpha=0.7, transform=ccrs.PlateCarree(), animated = True)
t = ax[1].text(0.4, 0.80, '6:00 UTC', transform=ax[1].transAxes, fontsize=fz)
t.set_bbox(dict(facecolor='white', alpha=1.0, edgecolor='white'))
ax[1].coastlines(linewidth=LWC,edgecolor='0.25')
ax[1].gridlines()
ax[1].add_feature(cfeature.BORDERS,linewidth=LWC,edgecolor='0.25')
#ax[1].set_title("6:00 UTC", fontsize=fz)
contour_plot = ax[2].pcolormesh(new_lons, new_lats, np.squeeze(reformed_array[48,:,:]),cmap=cmap, edgecolor ='none', alpha=0.7, transform=ccrs.PlateCarree(), animated = True)
t = ax[2].text(0.4, 0.80, '12:00 UTC', transform=ax[2].transAxes, fontsize=fz)
t.set_bbox(dict(facecolor='white', alpha=1.0, edgecolor='white'))
ax[2].coastlines(linewidth=LWC,edgecolor='0.25')
ax[2].gridlines()
ax[2].add_feature(cfeature.BORDERS,linewidth=LWC,edgecolor='0.25')
#ax[2].set_title("12:00 UTC", fontsize=fz)
contour_plot = ax[3].pcolormesh(new_lons, new_lats, np.squeeze(reformed_array[72,:,:]),cmap=cmap, edgecolor ='none', alpha=0.7, transform=ccrs.PlateCarree(), animated = True)
t = ax[3].text(0.4, 0.80, '18:00 UTC', transform=ax[3].transAxes, fontsize=fz)
t.set_bbox(dict(facecolor='white', alpha=1.0, edgecolor='white'))
ax[3].coastlines(linewidth=LWC,edgecolor='0.25')
ax[3].gridlines()
ax[3].add_feature(cfeature.BORDERS,linewidth=LWC,edgecolor='0.25')
#ax[3].set_title("18:00 UTC", fontsize=fz)
fig.subplots_adjust(top=0.98, right=0.9)
#cbar_ax = fig.add_axes([0.95, 0.15, 0.05, 0.7])
#cbar = fig.colorbar(contour_plot, cax=cbar_ax, ticks=np.linspace(0,3,4))
#cbar.ax.set_yticklabels(["Nothing","Shallow Conv.","Transition","Deep Conv."], fontsize=fz)
plt.suptitle("10-Day Composite of Convection Classification by the VAE", fontsize = fz, y=1.03)
plt.subplots_adjust(hspace=0, wspace=0.01)
plt.savefig("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/CI_Figure_Data/Diurnal_Composite.png")
plt.savefig("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/CI_Figure_Data/Diurnal_Composite.pdf")
###Output
_____no_output_____ |
user_functions.ipynb | ###Markdown
Scientific Programming in Python Karl N. KirschnerBonn-Rhein-Sieg University of Applied SciencesSankt Augustin, Germany User-defined Fuctions Sources1. David M Beazley, Python Essential Reference, Developer's Library, Third Edition, Indianapolis, IN, 2006. User-defined functions... ...are the modular brains for your scientific programming.1. First line: '**def function_name():**' - declares a function that is name 'function_name' - typically, passed parameters are given with the () 2. Second line and to the end - indented body of the code3. Then, simply call the function when you want to use it (i.e. function calls)
###Code
def hello():
print('hello')
print('hi')
print('hey')
print('hi-ya')
print('greetings')
print('good day')
print('good morning')
print("what's happening")
print("what's up")
print('how are you')
print('how goes it')
print('howdy-do')
print('bonjour')
print('buenas noches')
print('buenos dias')
print('shalom')
print("howdy y'all")
hello() # function call
## Define the function new
## pass parameter of name
def hello(name):
'''A simple print user-defined function.
Input: Name (str)
'''
print(f'Howdy-do {name}')
hello(name='Isadora')
###Output
_____no_output_____
###Markdown
After each function call, the passed variable values are forgotten since they are local variables within the function.
###Code
hello()
def hello(name):
'''A simple print user-defined function.
An internal check on the passed variable is now done.
Input
Name (str)
'''
if not isinstance(name, str):
raise TypeError('You did not specify a string for the name.')
else:
print(f'Howdy-do {name}')
hello(name='Isadora')
hello()
###Output
_____no_output_____
###Markdown
What happens now if we don't pass the correct type of a variable to the function?- we now can customize the error that is reported due to the `isinstance`Example, if we pass an `int` instead of a `str`:
###Code
hello(name=42)
###Output
_____no_output_____
###Markdown
Global versus Local Variables- What happens when using **local variables** within a function that have the **same name** as a **global variable**?
###Code
## global variables
age = 23
name = 'Jane'
def hello_two_local(age, name):
'''A simple print user-defined function, with two local variables (age, name)
An internal check on the passed variable is now done.
Input
Name (str)
'''
if not isinstance(age, int):
raise TypeError('You did not specify an integer for the age.')
elif not isinstance(name, str):
raise TypeError('You did not specify a string for the name.')
else:
print(f'Howdy-do {name}, who is {age} years old.')
hello_two_local()
hello_two_local(age=age, name=name)
###Output
_____no_output_____
###Markdown
- What happens when you have a **global variable** that **is not a local variable** within a function?
###Code
## global variables
age = 23
def hello_one_local_one_global(name):
'''A simple print user-defined function, with two local variables (age, name)
An internal check on the passed variable is now done.
Input
Name (str)
'''
if not isinstance(name, str):
raise TypeError('You did not specify a string for the name.')
else:
print(f'Howdy-do {name}, who is {age} years old.')
hello_one_local_one_global(name=name)
###Output
_____no_output_____
###Markdown
Returning an object from a function(Recall that SciPy has a large collection of physical constants.)
###Code
from scipy.constants import c
def mass2energy(mass, speedoflight):
''' Converts mass to energy using Einstein's equation.
Input
mass: mass of an object (units = kg since 1 J = 1 kg m^2/s^2)
speedoflight: speed of light (unit = m/s)
Return
energy: energy associated for a given mass (units = J)
'''
energy = mass*(speedoflight**2)
return energy
my_mass = 0.100
energy = mass2energy(mass=my_mass, speedoflight=c)
print(f'Energy = {energy} Joules')
###Output
_____no_output_____
###Markdown
Perhaps we can make things a bit more logical and informative...
###Code
def mass2energy(mass, speedoflight):
''' Converts mass to energy using Einstein's equation.
Input
mass: mass of an object (units = kg since 1 J = 1 kg m^2/s^2)
speedoflight: speed of light (unit = m/s)
Return
energy: energy associated for a given mass (units = J)
'''
if not isinstance(mass, float):
raise TypeError(f'The value for the mass (i.e. {mass}) must be a float type')
elif not isinstance(speedoflight, float):
raise TypeError(f'The value for the speed-of-light (i.e. {speedoflight}) must be a float type')
else:
energy = mass*(c**2)
return energy
energy = mass2energy(mass=0.100, speedoflight=c)
print(f'Energy = {energy:0.2e} Joules')
###Output
_____no_output_____
###Markdown
Now, make sure our internal checks are working:
###Code
energy = mass2energy(mass='one_hundred', speedoflight=c)
###Output
_____no_output_____
###Markdown
Required versus Optional ParametersAll of the above user-defined functions have had **required** parameters.To define **optional parameters**, one can assign those parameters a **default value**.**Once a parameter** is assigned a default value, then all of the **subsequent** (i.e. the remaining) variables must also be optional parameters.
###Code
def mass2energy(mass, speedoflight, fun_comment=None):
''' Converts mass to energy using Einstein's equation.
Input
mass (float): units in kg since 1 J = 1 kg m^2/s^2
speedoflight: speed of light
Return
energy (float): units in Joules
'''
if fun_comment is not None:
print(fun_comment)
if not isinstance(mass, float):
raise TypeError(f'The value for the mass (i.e. {mass}) must be a float type')
elif not isinstance(speedoflight, float):
raise TypeError(f'The value for the speed-of-light (i.e. {speedoflight}) must be a float type')
else:
energy = mass*(c**2)
return energy
energy = mass2energy(mass=0.100, speedoflight=c)
print(f'Energy = {energy:0.2e} Joules')
###Output
_____no_output_____
###Markdown
**Note**: the `fun_comment` wasn't used and thus its corresponding `if statement` was passed.
###Code
energy = mass2energy(mass=0.100, speedoflight=c, fun_comment='Hi, are you Einstein?')
print(f'Energy = {energy:0.2e} Joules')
###Output
_____no_output_____
###Markdown
<!-- Including a None default value for all user-function variable. Arguements for might include:- Allows you to later do some internal code checking. - E.g.: might be helpful for optional vairables- Easier for nonexperts to understand the code's flow.- Good practice? (e.g. accidently using a global variable when you -- or someone else -- didn't mean to)Why it might be a bad idea:- Lose the required versus default parameter idea. In this course: We will create functions that specify a default value of `None` for optional variables. --> Additional Remarks1. One can pass multiple additional unnamed variables to a function using `*args`. - `args` = **arg**ument**s** - the `*` indicates that args is going to be passed as an iterable.
###Code
def my_args_func(*args):
'''Add a series of numbers together.'''
answer = 0
for number in args:
answer += number
return answer
my_args_func(1, 2, 3, 4, 5)
###Output
_____no_output_____
###Markdown
2. One can also pass additional keyword-based arguments (e.g. weighting_factor=0.85) using `**kwargs`. - `kwargs` = **k**ey**w**ord **arg**ument**s** - the `**` indicates that kwargs are going to be passed as a dictionary.Important: the sequence of must go as `def my_function(required, *args, **kwargs)`Dictionaries: https://docs.python.org/3/tutorial/datastructures.htmldictionariesAccess a value wihtin a dictionary by its key:`my_dictionary = {'a_key': its_value}`
###Code
def my_kwargs_func(**kwargs):
'''Print the features of a molecule.
kwargs: dictionary that contains features and values for a given molecule
'''
for key, value in kwargs.items():
print(f'{key}: {value}')
my_kwargs_func(name='octane', number_carbons=8, molec_weight=114.23, density=703)
my_kwargs_func(name='nonane', number_carbons=9, molec_weight=128.2)
###Output
_____no_output_____ |
chapter05/Solving the n-queens problem.ipynb | ###Markdown
SAT Solver
###Code
pip install dd
pip install omega
import seaborn as sns; sns.set()
def visualize_solution(solution):
"""visualize a board of arbitrary size"""
if isinstance(solution, dict):
solution = list(solution.values())
if isinstance(solution, list):
solution = np.array(solution)
n = int(np.sqrt(solution.size))
plt.figure(figsize=(5, 3))
sns.heatmap(
solution.reshape((n, n)),
cbar=False,
cmap="YlGnBu",
annot=True
)
plt.show()
"""
From the dd examples on github:
https://github.com/tulip-control/dd/blob/0f6d16483cc13078edebac9e89d1d4b99d22991e/examples/queens.py
N-Queens problem using one-hot encoding.
Reference
=========
Henrik R. Andersen
"An introduction to binary decision diagrams"
Lecture notes for "Efficient Algorithms and Programs", 1999
The IT University of Copenhagen
Sec.6.1
"""
import pickle
import time
from dd import bdd as _bdd
from omega.logic.syntax import conj, disj
def solve_queens(n):
"""Return set of models for the `n`-queens problem.
@rtype: `int`, `BDD`
"""
vrs = [_var_str(i, j) for i in range(n) for j in range(n)]
bdd = _bdd.BDD()
bdd.declare(*vrs)
s = queens_formula(n)
u = bdd.add_expr(s)
return u, bdd
def queens_formula(n):
"""Return a non-trivial propositional formula for the problem."""
# i = row index
# j = column index
present = at_least_one_queen_per_row(n)
rows = at_most_one_queen_per_line(True, n)
cols = at_most_one_queen_per_line(False, n)
slash = at_most_one_queen_per_diagonal(True, n)
backslash = at_most_one_queen_per_diagonal(False, n)
s = conj([present, rows, cols, slash, backslash])
return s
def at_least_one_queen_per_row(n):
"""Return formula as `str`."""
c = list()
for i in range(n):
xijs = [_var_str(i, j) for j in range(n)]
s = disj(xijs)
c.append(s)
return conj(c)
def at_most_one_queen_per_line(row, n):
"""Return formula as `str`.
@param row: if `True`, then constrain rows, else columns.
"""
c = list()
for i in range(n):
if row:
xijs = [_var_str(i, j) for j in range(n)]
else:
xijs = [_var_str(j, i) for j in range(n)]
s = mutex(xijs)
c.append(s)
return conj(c)
def at_most_one_queen_per_diagonal(slash, n):
"""Return formula as `str`.
@param slash: if `True`, then constrain anti-diagonals,
else diagonals.
"""
c = list()
if slash:
a = -n
b = n
else:
a = 0
b = 2 * n
for k in range(a, b):
if slash:
ij = [(i, i + k) for i in range(n)]
else:
ij = [(i, k - i) for i in range(n)]
ijs = [(i, j) for i, j in ij if 0 <= i < n and 0 <= j < n]
if not ij:
continue
xijs = [_var_str(i, j) for i, j in ijs]
s = mutex(xijs)
c.append(s)
return conj(c)
def mutex(v):
"""Return formula for at most one variable `True`.
@param v: iterable of variables as `str`
"""
v = set(v)
c = list()
for x in v:
rest = disj(y for y in v if y != x)
s = '{x} -> !({rest})'.format(x=x, rest=rest)
c.append(s)
return conj(c)
def _var_str(i, j):
"""Return variable for occupancy of cell at {row: i, column: j}."""
return 'x{i}{j}'.format(i=i, j=j)
def benchmark(n):
"""Run for `n` queens and print statistics."""
t0 = time.time()
u, bdd = solve_queens(n)
t1 = time.time()
dt = t1 - t0
for i, d in enumerate(bdd.pick_iter(u)):
if len(d) > 0:
visualize_solution(d)
break
n_solutions = bdd.count(u)
s = (
'------\n'
'queens: {n}\n'
'time: {dt} (sec)\n'
'node: {u}\n'
'total nodes: {k}\n'
'number solutions: {n_solutions}\n'
'------\n'
).format(
n=n, dt=dt, u=u, k=len(bdd),
n_solutions=n_solutions,
)
print(s)
return dt
n_max = 9
fname = 'dd_times.p'
times = dict()
for n in range(n_max + 1):
t = benchmark(n)
times[n] = t
f = open(fname, 'wb')
pickle.dump(times, f)
###Output
------
queens: 0
time: 0.0005481243133544922 (sec)
node: 1
total nodes: 1
number solutions: 1
------
###Markdown
Genetic algorithm
###Code
def plot_scores(scores):
plt.plot(
list(range(len(scores))),
scores,
'--o',
)
plt.ylabel('score')
plt.xlabel('iterations')
!pip install python-chess
import chess
import chess.svg
from IPython.display import display
def queen_to_str(q, N=8):
prev = str(q) if q > 0 else ''
post = str((N-1)-q) if q < (N-1) else ''
return prev + 'Q' + post
assert queen_to_str(1) == '1Q6'
assert queen_to_str(0) == 'Q7'
assert queen_to_str(7) == '7Q'
def show_board(queens):
"""This only works for 8 queens..."""
fen = '/'.join([queen_to_str(q) for q in queens])
display(chess.svg.board(board=chess.Board(fen), size=300))
solution = np.array([2, 4, 6, 8, 3, 1, 7, 5]) - 1
show_board(solution)
import numpy as np
def cost_function(props):
"""Given a props vector from a particle,
a vector of queens' positions, how right are we?
"""
res = 0
for i1, q1 in enumerate(props[:-1]):
for i2, q2 in enumerate(props[i1+1:], i1+1):
if (q1 != q2) and (abs(i1 - i2) != abs(q1 - q2)):
res += 1
return res
import random
from typing import Optional, List, Tuple
import numpy as np
class Chromosome:
def __init__(self, configuration: Optional[List]=None, nq: Optional[int]=None):
"""make sure either nq or configuration is given!
"""
assert not (nq is None and configuration is None)
if configuration is None:
self.nq = nq
self.max_fitness = np.sum(np.arange(nq))
self.configuration = [
random.randint(1, nq) for _ in range(nq)
]
else:
self.configuration = configuration
self.nq = len(configuration)
self.max_fitness = np.sum(np.arange(self.nq))
def fitness(self):
return cost_function(self.configuration) / self.max_fitness
def mutate(self):
"""A mutation: randomly changing a value
inside a chromosome"""
ind = random.randint(0, self.nq-1)
val = random.randint(1, self.nq)
self.configuration[ind] = val
class GeneticQueen:
def __init__(self, nq, population_size=20, mutation_prob=0.5):
self.nq = nq
self.population_size = population_size
self.mutation_prob = mutation_prob
self.population = [Chromosome(nq=nq) for _ in range(population_size)]
self.solution = None
self.best_fitness = None
def iterate(self):
"""create a new generation
"""
new_population = []
best_fitness = -1
for i in range(len(self.population)):
p1, p2 = self.get_parents()
child = Chromosome(self.cross_over(p1, p2))
if random.random() < self.mutation_prob:
child.mutate()
new_population.append(child)
fit = child.fitness()
if fit > best_fitness:
best_fitness = fit
if fit == 1:
self.solution = child
break
self.best_fitness = best_fitness
self.population = new_population
def cross_over(self, p1, p2):
"""cross_over between two chromosomes
"""
return [
yi
if random.random() > 0
else xi
for xi, yi in zip(
p1.configuration,
p2.configuration
)
]
def get_parents(self) -> Tuple[Chromosome, Chromosome]:
"""choose parents to reproduce;
fittest more likely to be chosen
"""
weights = [chrom.fitness() for chrom in self.population]
return tuple(
random.choices(
self.population,
weights=weights,
k=2
)
)
def visualize_solution(self):
"""visualize a board of arbitrary size"""
if self.nq == 8:
show_board(np.array(self.solution.configuration)-1)
else:
board = np.zeros(shape=(self.nq, self.nq))
for row, col in enumerate(self.solution.configuration):
board[row, col-1] = 1
plt.figure(figsize=(5, 3))
sns.heatmap(
board,
cbar=False,
cmap="YlGnBu",
annot=True
)
plt.show()
@timeit
def ga_solver(nq):
fitness_trace = []
gq = GeneticQueen(nq=nq)
generation = 0
while not gq.solution:
gq.iterate()
if (generation % 100) == 0:
print('Generation {}'.format(generation))
print('Maximum Fitness = {:.3f}'.format(gq.best_fitness))
fitness_trace.append(gq.best_fitness)
generation += 1
gq.visualize_solution()
return fitness_trace
fitness_trace = ga_solver(8)
plot_scores(fitness_trace)
###Output
_____no_output_____
###Markdown
Particle Swarm Optimization
###Code
NQUEENS = 8
SOLVED_COST = np.sum(np.arange(NQUEENS))
assert SOLVED_COST == 28
class Particle:
best_fitness: int = 0
def __init__(
self, N=None, props=None,
velocities=None
):
"""Either specify N for randomization,
or props and velocities
"""
if props is None:
# random initialization
self.current_particle = np.random.randint(0, N-1, N)
self.best_state = np.random.randint(0, N-1, N)
self.velocities = np.random.uniform(-(N-1), N-1, N)
else:
self.current_particle = props
self.best_state = props
self.velocities = velocities
self.best_fitness = cost_function(self.best_state)
def set_new_best(self, props: List[int], new_fitness: int):
self.best_state = props
self.best_fitness = new_fitness
def __repr__(self):
return f'{self.__class__.__name__}(\n' +\
f'\tcurrent_particle={self.current_particle}\n' +\
f'\best_state={self.best_state}\n' +\
f'\tvelocities={self.velocities}\n' +\
f'\best_fitness={self.best_fitness}\n' +\
')'
def init_particles(N: int, n_particles: int):
return [Particle(N=N) for i in range(n_particles)]
particles = init_particles(8, 20)
particles[0]
particles[0].current_particle
particles[0].current_particle
cost_function(np.array([7, 9, 3, 8, 2, 4, 6, 1]))
particles[0]
# cdef int[:] get_best_particle(list particles):
# def get_best_particle(particles: List[Particles]) -> Particle:
def get_best_particle(particles):
best_particle = 0 # int
best_score = -1 # int
score = -1 # int
for i, particle in enumerate(particles):
score = cost_function(particle.current_particle)
if score > best_score:
best_score = score
best_ind = i
return particles[best_ind].current_particle, best_score
particles = particles[:-1]
particles.append(Particle(props=solution))
get_best_particle(particles) # == (solution, 28)
particles[0]
# good explanation and pseudocode https://en.wikipedia.org/wiki/Particle_swarm_optimization
def particle_swarm_optimization(
N: int, omega: float, phip: float, phig: float,
n_particles: int, visualize=False, max_iteration=999999
) -> List[int]:
"""Particle Swarm Optimization
Parameters:
-----------
N - number of queens
omega - a decay parameter
phip - controls the contribution of the local search
phig - controls the contribution of the global search
n_particles - the number of particles
visualize - if the solutions should be visualized by images
max_iteration - for early stopping
Returns:
--------
best_particle - the best solution
scores - the best scores over iterations
Highly sensitive to the choice of parameters!
"""
def print_best():
print(f'iteration {iteration} - best particle: {best_particle}, score: {best_score}')
solved_cost = np.sum(np.arange(N))
particles = init_particles(N=N, n_particles=n_particles)
iteration = 0
best_particle, best_score = get_best_particle(particles)
scores = [ best_score ]
if visualize:
print('iteration:', iteration)
show_board(best_particle)
while best_score < solved_cost and iteration < max_iteration:
if (iteration % 500) == 0 or iteration == 0:
print_best()
for particle in particles:
rg = np.random.rand((N))
rp = np.random.rand((N))
delta_p = particle.best_state - particle.current_particle
delta_g = best_particle - particle.current_particle
update = (rp * phip * delta_p +\
rg * phig * delta_g) # local vs global
#print('particle:')
#print(delta_p)
#print(delta_g)
#print(update)
particle.velocities = omega * particle.velocities + update
#print(particle.velocities)
particle.current_particle = (
np.abs(particle.current_particle + particle.velocities) % N
).astype(int)
# update the particle best
current_fitness = cost_function(particle.current_particle)
if current_fitness > particle.best_fitness:
particle.set_new_best(particle.current_particle, current_fitness)
best_particle_cand, best_score_cand = get_best_particle(particles)
if best_score_cand > best_score:
best_particle, best_score = best_particle_cand, best_score_cand
if iteration > 0 and visualize:
print('iteration:', iteration)
show_board(best_particle)
scores.append(best_score)
iteration += 1
print_best()
return best_particle, scores
best_particle, scores = particle_swarm_optimization(
N=8, omega=1, phip=8, phig=2,
n_particles=50, visualize=True
)
plot_scores(scores)
###Output
_____no_output_____ |
LTFS Data Science FinHack/LTFS_Data_Science_FinHack.ipynb | ###Markdown
Imports
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
import re
from sklearn import preprocessing, metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
###Output
_____no_output_____
###Markdown
**Data Analysis**We neesd to predict the probability of a loanee/borrower defaulting on a vehicle loan in the first EMI on the due date. Following information regarding the loan and loanee are provided in the datasets:1. LOANEE INFORMATION : Demographic data like age, identity proof etc 2. LOAN INFORMATION : Disbursal details, loan to value ratio etc 3. BUREAU DATA AND HISTORY : Bureau score, number of active accounts, the status of other loans, credit history etcDoing so will ensure that clients capable of repayment are not rejected and important determinants can be identified which can be further used for minimising the default rates.
###Code
train_df = pd.read_csv('train.csv')
###Output
_____no_output_____
###Markdown
**Loan Default class Distribution**
###Code
# Given Dataset
ax = train_df['loan_default'].value_counts().plot(kind = 'bar', figsize = (10, 7), color = 'slateblue', fontsize = 13)
ax.set_alpha(0.8)
ax.set_xlabel('Class', color = 'gray', fontsize = 18 )
ax.set_ylabel('No of Obeservations', fontsize = 18, color = 'gray')
ax.set_title("Loan Default Class Distribution", fontsize = 18)
totals = []
for i in ax.patches:
totals.append(i.get_height())
total = sum(totals)
print(totals)
# set individual bar lables using above list
for i in ax.patches:
# get_x pulls left or right; get_height pushes up or down
ax.text(i.get_x() + 0.1, i.get_height()- 13000, \
str(round((i.get_height()/total)*100, 2))+'%', fontsize=22,
color='white')
plt.show()
train_df['loan_default'] = train_df['loan_default'].astype('category')
train_df.head(5)
# No. of Vehicle Manufacturer
len(train_df['manufacturer_id'].unique())
# Count of Loan defaults grouped by Employee Type
df = train_df.groupby(['Employment.Type', 'loan_default']).agg({'loan_default': 'count'})
df
train_df.groupby(['State_ID', 'loan_default']).agg({'loan_default': 'count'})
train_df.head(5)
train_df.columns
len(train_df['PERFORM_CNS.SCORE.DESCRIPTION'].unique())
train_df['PERFORM_CNS.SCORE'].unique()
df = train_df[['PERFORM_CNS.SCORE', 'PERFORM_CNS.SCORE.DESCRIPTION', 'loan_default']]
df.head(50)
# Quantitative variables : [disbursed_amount, asset_cost, ltv,
# Perform_CNS_Score, Perform_CNS_Score_DESC,
# pri_NO_accts, active_acct,
#
#
#
#
# NEW.ACCTS.IN.LAST.SIX.MONTHS, DELINQUENT.ACCTS.IN.LAST.SIX.MONTHS, NO.OF_INQUIRIES]
# Categoorical Variables : [branch_id, supplier_id, manufacturer_id, current_pincode, Employement.Type,
# State_ID, MobileNo_AVL_Flag, Aadhar_Flag, PAN_flag, VoterID_flag, Driving_flag, Passport_flag,
# ]
# preprocess : [date.of.Birth, disbursalDate, CREDIT.HISTORY.LENGTH, AVERAGE.ACCT.AGE]
#
# Delete features : [Unique_ID, Employee_code_ID, ]
# preparing Quantitative variables
train_df.dtypes
train_df.iloc[0]
train_df.describe()
train_df.columns
drop_features = ['UniqueID', 'branch_id', 'supplier_id', 'manufacturer_id', 'Current_pincode_ID', 'Employee_code_ID',
'MobileNo_Avl_Flag', 'Aadhar_flag', 'PAN_flag', 'VoterID_flag',
'Driving_flag', 'Passport_flag',]
train_df = train_df.drop(drop_features, axis = 1)
train_df.shape
train_df.isnull().sum()
train_df.dropna(inplace = True)
###Output
_____no_output_____
###Markdown
**Non Numneric data**
###Code
train_df.dtypes.value_counts()
object_cols_df = train_df.select_dtypes(include = ['object'])
print(object_cols_df.iloc[0])
# Date.of.Birth
from datetime import datetime
from datetime import date
def calAge(dob):
age = pd.to_datetime('today').year - pd.to_datetime(dob).year
if age < 0:
age = age + 100
return age
train_df['Age'] = train_df['Date.of.Birth'].apply(calAge)
train_df = train_df.drop('Date.of.Birth', axis = 1)
# Employement.Type
dummy_df = pd.get_dummies(train_df['Employment.Type'])
train_df = pd.concat([train_df, dummy_df], axis = 1)
train_df = train_df.drop('Employment.Type', axis = 1)
# PERFORM_CNS.SCORE.DESCRIPTION
dummy_df = pd.get_dummies(train_df['PERFORM_CNS.SCORE.DESCRIPTION'])
train_df = pd.concat([train_df, dummy_df], axis = 1)
train_df = train_df.drop('PERFORM_CNS.SCORE.DESCRIPTION', axis = 1)
# AVERAGE.ACCT.AGE & CREDIT.HISTORY.LENGTH
import re
def preprocess(str1):
no = re.findall('\d+', str1)
mylist = [12, 1]
res = 0
for i,j in zip(no, mylist):
i = int(i)
res = res + (i*j)
return res
train_df['AVG_ACCT_AGE_Mon'] = train_df['AVERAGE.ACCT.AGE'].apply(preprocess)
train_df['CREDIT.HISTORY.LENGTH_Mon'] = train_df['CREDIT.HISTORY.LENGTH'].apply(preprocess)
train_df = train_df.drop(['AVERAGE.ACCT.AGE', 'CREDIT.HISTORY.LENGTH'], axis = 1)
train_df = train_df.drop('DisbursalDate', axis = 1)
train_df.info()
train_df.head(10)
###Output
_____no_output_____
###Markdown
Modelling
###Code
# Logistic regression classifier
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
cols = train_df.columns
train_cols = cols.drop('loan_default')
features = train_df[train_cols]
target = train_df['loan_default']
lr.fit(features, target)
predictions = lr.predict(features)
from sklearn.model_selection import
lr = LogisticRegression()
kf = KFold(features.shape[0], random_state=42)
predictions = cross_val_predict(lr, features, target, cv=kf)
predictions = pd.Series(predictions)
###Output
_____no_output_____ |
_notebooks/2021-09-20-18-advanced.ipynb | ###Markdown
18. Advanced Features
> Help you write more Pythonic code with some advanced features in Python.
- toc: true
- badges: true
- comments: true
- categories: [python]
- author: Zhi Li
- sticky_rank: 18
Conditional expressions
###Code
import math
x = float(input())
if x > 0:
y = math.log(x)
else:
y = float('nan') # a special floating-point value that represents “Not a Number”.
print(y)
###Output
_____no_output_____
###Markdown
A more concise expression:
###Code
y = math.log(x) if x > 0 else float('nan')
###Output
_____no_output_____
###Markdown
Another example:
###Code
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n-1)
# conditional expression
def factorial(n):
return 1 if n == 0 else n * factorial(n-1)
###Output
_____no_output_____
###Markdown
List comprehensions
###Code
def capitalize_all(t):
res = []
for s in t:
res.append(s.capitalize())
return res
# using list comprehension
def capitalize_all(t):
return [s.capitalize() for s in t]
###Output
_____no_output_____
###Markdown
List comprehensions can also be used for filtering.
###Code
def only_upper(t):
res = []
for s in t:
if s.isupper():
res.append(s)
return res
# using list comprehension
def only_upper(t):
return [s for s in t if s.isupper()]
###Output
_____no_output_____
###Markdown
**Note**: list comprehensions are harder to debug because you can’t put a print statement inside the loop. I suggest that you use them only if the computation is simple enough that you are likely to get it right the first time. And for beginners that means **never**. Generator expressionsIt's easy to create a list using list comprehension. However the size of list is limited due to RAM. It's difficult to operate on a list of 1 million elements.Generator allows generating elements while calculating
###Code
L = [x * x for x in range(5)]
L
# a generator expression
g = (x * x for x in range(5))
g
next(g)
next(g)
next(g)
next(g)
next(g)
next(g)
###Output
_____no_output_____
###Markdown
Another way to write generator: yield
###Code
def fib(max):
n, a, b = 0, 0, 1
while n < max:
yield b
a, b = b, a + b
n = n + 1
return 'done'
f = fib(4)
f
next(f)
next(f)
next(f)
next(f)
next(f)
###Output
_____no_output_____
###Markdown
`any` and `all`Python provides a built-in function, `any`, that takes a sequence of boolean values and returns `True` if any of the values are `True`. It works on lists:
###Code
any([False, False, True])
any(letter == 's' for letter in 'babson')
###Output
_____no_output_____
###Markdown
Python provides another built-in function, `all`, that returns `True` if every element of the sequence is `True`.
###Code
all([False, True, True])
all([True, True, True])
all(letter == 'w' for letter in 'www')
###Output
_____no_output_____
###Markdown
SetsEach element in a `set` is unique.
###Code
s = set([1, 2, 3, 3])
s
s.add(4)
s
s.add(4)
s
###Output
_____no_output_____
###Markdown
`set` works like set in math.
###Code
s1 = set([1, 2, 3])
s2 = set([2, 3, 4])
s1 & s2
s1 | s2
###Output
_____no_output_____
###Markdown
***Exercise 01***1. Using `set`, rewrite function `subtract` in *analyze_book.py* in chapter 13.2. Using `set`, rewrite function `avoids` and *uses_only* in chapter 9. CountersA `Counter` is like a set, except that if an element appears more than once, the Counter keeps track of how many times it appears.
###Code
from collections import Counter
count = Counter('babson')
count
count['a']
count['c']
###Output
_____no_output_____
###Markdown
Named tuplesMany simple objects are basically collections of related values. For example, the `Point` object contains two numbers, `x` and `y`. When you define a class like this, you usually start with an `init` method and a `str` method:
###Code
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return '(%g, %g)' % (self.x, self.y)
###Output
_____no_output_____
###Markdown
This is a lot of code to convey a small amount of information. Python provides a more concise way to say the same thing:
###Code
from collections import namedtuple
Point = namedtuple('Point', ['x', 'y'])
Point
###Output
_____no_output_____
###Markdown
By using `namedtuple`, `Point` automatically provides methods like __init__ and __str__ so you don’t have to write them.To create a `Point` object, you use the `Point` class as a function:
###Code
p = Point(1, 2)
p
p.x, p.y
###Output
_____no_output_____
###Markdown
`namedtuple` provide a quick way to define simple classes. The drawback is that simple classes don’t always stay simple. You might decide later that you want to add methods to a named tuple. In that case, you could define a new class that inherits from the `namedtuple`:
###Code
class Pointier(Point):
# add more methods here
###Output
_____no_output_____
###Markdown
Gathering keyword argsWe've learned how to write a function that gathers its arguments into a `tuple`:
###Code
def printall(*args):
print(args)
printall(1, 2.0, '3')
###Output
_____no_output_____
###Markdown
But the `*` operator doesn’t gather keyword arguments:
###Code
printall(1, 2.0, third='3')
###Output
_____no_output_____
###Markdown
To gather keyword arguments, you can use the `**` operator:
###Code
def printall(*args, **kwargs):
print(args, kwargs)
printall(1, 2.0, third='3')
###Output
_____no_output_____ |
Learning and Loading Word Embeddings in Keras.ipynb | ###Markdown
Keras Embedding Layer* Keras offers an Embedding layer that can be used for neural networks on text data. It requires that the input data be integer encoded, so that each word is represented by a unique integer.* The Embedding layer is defined as the first hidden layer of a network. It must specify 3 arguments:1. **input_dim** : This is the size of the vocabulary in the text data. For example, if your data is integer encoded to values between 0-10, then the size of the vocabulary would be 11 words.2. **output_dim** : This is the size of the vector space in which words will be embedded. It defines the size of the output vectors from this layer for each word. For example, it could be 32 or 100 or even larger. Test different values for your problem.3. **input_length** : This is the length of input sequences, as you would define for any input layer of a Keras model. For example, if all of your input documents are comprised of 1000 words, this would be 1000. Example of Learning an Embedding
###Code
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
import numpy as np
# define documents
docs = ["Well done!","Good work","Great effort","nice work","Excellent!","Weak","Poor effort!","not good","poor work","Could have done better."]
# define class labels
labels = [1,1,1,1,1,0,0,0,0,0]
# integer encode the documents
vocab_size = 50
encoded_docs = [one_hot(d,vocab_size) for d in docs]
print(encoded_docs)
# pad documents to a max length of 4 words
max_length = 4
padded_docs = pad_sequences(encoded_docs,maxlen=max_length,padding='post')
print(padded_docs)
# define the model
model = Sequential()
model.add(Embedding(vocab_size,8,input_length=max_length))
model.add(Flatten())
model.add(Dense(1,activation='sigmoid'))
# compile the model
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['acc'])
# summarize the model
model.summary()
# fit the model
model.fit(np.array(padded_docs),np.array(labels),epochs=50,verbose=0)
# evaluate the model
loss,accuracy = model.evaluate(np.array(padded_docs),np.array(labels),verbose=0)
print('Accuracy: %f' %(accuracy*100))
###Output
[[6, 46], [12, 19], [29, 5], [17, 19], [13], [6], [45, 5], [4, 12], [45, 19], [17, 40, 46, 23]]
[[ 6 46 0 0]
[12 19 0 0]
[29 5 0 0]
[17 19 0 0]
[13 0 0 0]
[ 6 0 0 0]
[45 5 0 0]
[ 4 12 0 0]
[45 19 0 0]
[17 40 46 23]]
Model: "sequential_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_3 (Embedding) (None, 4, 8) 400
_________________________________________________________________
flatten_3 (Flatten) (None, 32) 0
_________________________________________________________________
dense_3 (Dense) (None, 1) 33
=================================================================
Total params: 433
Trainable params: 433
Non-trainable params: 0
_________________________________________________________________
Accuracy: 80.000001
###Markdown
Example of Using Pre-Trained Glove Embedding
###Code
from numpy import asarray
from numpy import zeros
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
# define documents
docs = ["Well done!","Good work","Great effort","nice work","Excellent!","Weak","Poor effort!","not good","poor work","Could have done better."]
# define class labels
labels = [1,1,1,1,1,0,0,0,0,0]
# prepare tokenizer
t = Tokenizer()
t.fit_on_texts(docs)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(docs)
print(encoded_docs)
# pad documents to a max length of 4 words
max_length = 4
padded_docs = pad_sequences(encoded_docs,maxlen=max_length,padding='post')
print(padded_docs)
# load the whole embedding into memory
embeddings_index = dict()
f = open('glove.6B.100d.txt',mode='rt',encoding='utf-8')
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:],dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Loaded %s word vectors.' % len(embeddings_index))
# create a weight matrix for words in training docs
embedding_matrix = zeros((vocab_size,100))
for word,i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
# define model
model = Sequential()
e = Embedding(vocab_size,100,weights=[embedding_matrix],input_length=4,trainable=False)
model.add(e)
model.add(Flatten())
model.add(Dense(1,activation='sigmoid'))
# compile the model
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['acc'])
# summarize the model
model.summary()
# fit the model
model.fit(np.array(padded_docs),np.array(labels),epochs=50,verbose=0)
# evaluate the model
loss,accuracy = model.evaluate(np.array(padded_docs),np.array(labels),verbose=0)
print('Accuracy: %f' %(accuracy*100))
###Output
[[6, 2], [3, 1], [7, 4], [8, 1], [9], [10], [5, 4], [11, 3], [5, 1], [12, 13, 2, 14]]
[[ 6 2 0 0]
[ 3 1 0 0]
[ 7 4 0 0]
[ 8 1 0 0]
[ 9 0 0 0]
[10 0 0 0]
[ 5 4 0 0]
[11 3 0 0]
[ 5 1 0 0]
[12 13 2 14]]
Loaded 0 word vectors.
Model: "sequential_6"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_6 (Embedding) (None, 4, 100) 1500
_________________________________________________________________
flatten_6 (Flatten) (None, 400) 0
_________________________________________________________________
dense_6 (Dense) (None, 1) 401
=================================================================
Total params: 1,901
Trainable params: 401
Non-trainable params: 1,500
_________________________________________________________________
Accuracy: 50.000000
|
DDN_equality_jax.ipynb | ###Markdown
Bernstein Coefficient Polynomials
###Code
def bernstein_coeff_order10_new(n, tmin, tmax, t_actual):
l = tmax - tmin
t = (t_actual - tmin) / l
P0 = scipy.special.binom(n, 0) * ((1 - t) ** (n - 0)) * t ** 0
P1 = scipy.special.binom(n, 1) * ((1 - t) ** (n - 1)) * t ** 1
P2 = scipy.special.binom(n, 2) * ((1 - t) ** (n - 2)) * t ** 2
P3 = scipy.special.binom(n, 3) * ((1 - t) ** (n - 3)) * t ** 3
P4 = scipy.special.binom(n, 4) * ((1 - t) ** (n - 4)) * t ** 4
P5 = scipy.special.binom(n, 5) * ((1 - t) ** (n - 5)) * t ** 5
P6 = scipy.special.binom(n, 6) * ((1 - t) ** (n - 6)) * t ** 6
P7 = scipy.special.binom(n, 7) * ((1 - t) ** (n - 7)) * t ** 7
P8 = scipy.special.binom(n, 8) * ((1 - t) ** (n - 8)) * t ** 8
P9 = scipy.special.binom(n, 9) * ((1 - t) ** (n - 9)) * t ** 9
P10 = scipy.special.binom(n, 10) * ((1 - t) ** (n - 10)) * t ** 10
P0dot = -10.0 * (-t + 1) ** 9
P1dot = -90.0 * t * (-t + 1) ** 8 + 10.0 * (-t + 1) ** 9
P2dot = -360.0 * t ** 2 * (-t + 1) ** 7 + 90.0 * t * (-t + 1) ** 8
P3dot = -840.0 * t ** 3 * (-t + 1) ** 6 + 360.0 * t ** 2 * (-t + 1) ** 7
P4dot = -1260.0 * t ** 4 * (-t + 1) ** 5 + 840.0 * t ** 3 * (-t + 1) ** 6
P5dot = -1260.0 * t ** 5 * (-t + 1) ** 4 + 1260.0 * t ** 4 * (-t + 1) ** 5
P6dot = -840.0 * t ** 6 * (-t + 1) ** 3 + 1260.0 * t ** 5 * (-t + 1) ** 4
P7dot = -360.0 * t ** 7 * (-t + 1) ** 2 + 840.0 * t ** 6 * (-t + 1) ** 3
P8dot = 45.0 * t ** 8 * (2 * t - 2) + 360.0 * t ** 7 * (-t + 1) ** 2
P9dot = -10.0 * t ** 9 + 9 * t ** 8 * (-10.0 * t + 10.0)
P10dot = 10.0 * t ** 9
P0ddot = 90.0 * (-t + 1) ** 8
P1ddot = 720.0 * t * (-t + 1) ** 7 - 180.0 * (-t + 1) ** 8
P2ddot = 2520.0 * t ** 2 * (-t + 1) ** 6 - 1440.0 * t * (-t + 1) ** 7 + 90.0 * (-t + 1) ** 8
P3ddot = 5040.0 * t ** 3 * (-t + 1) ** 5 - 5040.0 * t ** 2 * (-t + 1) ** 6 + 720.0 * t * (-t + 1) ** 7
P4ddot = 6300.0 * t ** 4 * (-t + 1) ** 4 - 10080.0 * t ** 3 * (-t + 1) ** 5 + 2520.0 * t ** 2 * (-t + 1) ** 6
P5ddot = 5040.0 * t ** 5 * (-t + 1) ** 3 - 12600.0 * t ** 4 * (-t + 1) ** 4 + 5040.0 * t ** 3 * (-t + 1) ** 5
P6ddot = 2520.0 * t ** 6 * (-t + 1) ** 2 - 10080.0 * t ** 5 * (-t + 1) ** 3 + 6300.0 * t ** 4 * (-t + 1) ** 4
P7ddot = -360.0 * t ** 7 * (2 * t - 2) - 5040.0 * t ** 6 * (-t + 1) ** 2 + 5040.0 * t ** 5 * (-t + 1) ** 3
P8ddot = 90.0 * t ** 8 + 720.0 * t ** 7 * (2 * t - 2) + 2520.0 * t ** 6 * (-t + 1) ** 2
P9ddot = -180.0 * t ** 8 + 72 * t ** 7 * (-10.0 * t + 10.0)
P10ddot = 90.0 * t ** 8
P = np.hstack((P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10))
Pdot = np.hstack((P0dot, P1dot, P2dot, P3dot, P4dot, P5dot, P6dot, P7dot, P8dot, P9dot, P10dot)) / l
Pddot = np.hstack((P0ddot, P1ddot, P2ddot, P3ddot, P4ddot, P5ddot, P6ddot, P7ddot, P8ddot, P9ddot, P10ddot)) / (l ** 2)
return P, Pdot, Pddot
###Output
_____no_output_____
###Markdown
Initializations
###Code
x_min = -6.0
x_max = 6.0
y_min = -6.0
y_max = 6.0
t_fin = 2.0
num = 25
tot_time = np.linspace(0.0, t_fin, num)
tot_time_copy = tot_time.reshape(num, 1)
P, Pdot, Pddot = bernstein_coeff_order10_new(10, tot_time_copy[0], tot_time_copy[-1], tot_time_copy)
nvar = np.shape(P)[1]
num = np.shape(P)[0]
x_obs_temp = np.hstack((-2.0, -0.79, 3.0, 4.0))
y_obs_temp = np.hstack((-2.0, 1.0, -0.80, 2.0))
num_obs = np.shape(x_obs_temp)[0]
a_obs = 1.0
b_obs = 1.0
x_obs = np.ones((num_obs, num)) * x_obs_temp[:, np.newaxis]
y_obs = np.ones((num_obs, num)) * y_obs_temp[:, np.newaxis]
x_init = -2.87
y_init = 2.96
vx_init = 0.0
ax_init = 0.0
vy_init = 0.0
ay_init = 0.0
x_fin = 1.4
y_fin = 0.2
vx_fin = 0.0
ax_fin = 0.0
vy_fin = 0.0
ay_fin = 0.0
rho_obs = 2.0
rho_eq = 10.0
weight_smoothness = 100
A_eq = np.vstack((P[0], Pdot[0], Pddot[0], P[-1], Pdot[-1], Pddot[-1]))
A_obs = np.tile(P, (num_obs, 1))
Q_smoothness = np.dot(Pddot.T, Pddot)
P_jax = jnp.asarray(P)
A_eq_jax = jnp.asarray(A_eq)
A_obs_jax = jnp.asarray(A_obs)
x_obs_jax = jnp.asarray(x_obs)
y_obs_jax = jnp.asarray(y_obs)
Q_smoothness_jax = jnp.asarray(Q_smoothness)
# bx_eq = np.array([-1.6721, -0.0158, 0.2543, -0.5678, 0.0000, 0.0000])
# by_eq = np.array([2.1997, -1.7899, -0.6161, -0.7362, 0.0000, 0.0000])
bx_eq = np.array([1.2147, -0.8816, 0.1860, 0.0862, 1.1351, 1.0330])
by_eq = np.array([0.0876, 0.9048, 0.0106, -0.3246, 0.2031, 1.6398])
bx_eq, by_eq
###Output
_____no_output_____
###Markdown
Compute Solution
###Code
def compute_sol(rho_obs, rho_eq, weight_smoothness, num_obs, bx_eq, by_eq, P, Pdot, Pddot, x_obs, y_obs, a_obs, b_obs):
maxiter = 300
nvar = np.shape(P)[1]
num = np.shape(P)[0]
cost_smoothness = weight_smoothness * np.dot(Pddot.T, Pddot)
alpha_obs = np.zeros((num_obs, num))
d_obs = np.ones((num_obs, num))
lamda_x = np.zeros(nvar)
lamda_y = np.zeros(nvar)
res_obs = np.ones(maxiter)
res_eq = np.ones(maxiter)
d_min = np.ones(maxiter)
cost = cost_smoothness + rho_obs * np.dot(A_obs.T, A_obs)
cost_mat = np.vstack((np.hstack((cost, A_eq.T)), np.hstack((A_eq, np.zeros((np.shape(A_eq)[0], np.shape(A_eq)[0]))))))
cost_mat_inv = np.linalg.inv(cost_mat)
for i in range(0, maxiter):
temp_x_obs = d_obs * np.cos(alpha_obs) * a_obs
b_obs_x = x_obs.reshape(num * num_obs) + temp_x_obs.reshape(num * num_obs)
temp_y_obs = d_obs * np.sin(alpha_obs) * b_obs
b_obs_y = y_obs.reshape(num * num_obs) + temp_y_obs.reshape(num * num_obs)
lincost_x = - lamda_x - rho_obs * np.dot(A_obs.T, b_obs_x)
lincost_y = - lamda_y - rho_obs * np.dot(A_obs.T, b_obs_y)
sol_x = np.dot(cost_mat_inv, np.hstack((-lincost_x, bx_eq)))
sol_y = np.dot(cost_mat_inv, np.hstack((-lincost_y, by_eq)))
primal_x = sol_x[0:nvar]
dual_x = sol_x[nvar:nvar + 6]
primal_y = sol_y[0:nvar]
dual_y = sol_y[nvar:nvar + 6]
x = np.dot(P, primal_x)
y = np.dot(P, primal_y)
wc_alpha = (x - x_obs)
ws_alpha = (y - y_obs)
alpha_obs = np.arctan2(ws_alpha * a_obs, wc_alpha * b_obs)
c1_d = 1.0 * rho_obs * (a_obs ** 2 * np.cos(alpha_obs) ** 2 + b_obs ** 2 * np.sin(alpha_obs) ** 2)
c2_d = 1.0 * rho_obs * (a_obs * wc_alpha * np.cos(alpha_obs) + b_obs * ws_alpha * np.sin(alpha_obs))
d_temp = c2_d / c1_d
d_obs = np.maximum(np.ones((num_obs, num)), d_temp)
d_min[i] = np.amin(d_temp)
res_x_obs_vec = wc_alpha - a_obs * d_obs * np.cos(alpha_obs)
res_y_obs_vec = ws_alpha - b_obs * d_obs * np.sin(alpha_obs)
lamda_x = lamda_x - rho_obs * np.dot(A_obs.T, res_x_obs_vec.reshape(num_obs * num))
lamda_y = lamda_y - rho_obs * np.dot(A_obs.T, res_y_obs_vec.reshape(num_obs * num))
res_obs[i] = np.linalg.norm(np.hstack((res_x_obs_vec, res_y_obs_vec)))
slack_obs = np.sqrt((d_obs - 1))
return x, y, primal_x, primal_y, dual_x, dual_y, alpha_obs.reshape(num_obs * num), d_obs.reshape(num_obs * num), lamda_x, lamda_y, slack_obs.reshape(num_obs * num)
x, y, primal_x, primal_y, dual_x, dual_y, alpha_obs, d_obs, lamda_x, lamda_y, slack_obs = compute_sol(rho_obs, rho_eq, weight_smoothness, num_obs, bx_eq, by_eq, P, Pdot, Pddot, x_obs, y_obs, a_obs, b_obs)
aug_sol = np.hstack((primal_x, primal_y, alpha_obs, d_obs))
aug_sol[:10]
aug_sol
lamda_x_jax = jnp.asarray(lamda_x)
lamda_y_jax = jnp.asarray(lamda_y)
###Output
_____no_output_____
###Markdown
Cost Function
###Code
def cost_fun(aug_sol_jax, param_sol):
x_init, vx_init, ax_init, x_fin, vx_fin, ax_fin, y_init, vy_init, ay_init, y_fin, vy_fin, ay_fin = param_sol
bx_eq_jax = jnp.array(bx_eq)
by_eq_jax = jnp.array(by_eq)
c_x = aug_sol_jax[0:nvar]
c_y = aug_sol_jax[nvar: 2 * nvar]
num_tot = num_obs * num
alpha_obs = aug_sol_jax[2 * nvar:2*nvar + num_tot]
d_obs = aug_sol_jax[2 * nvar + num_tot:2 * nvar + 2 * num_tot]
cost_smoothness_x = 0.5 * weight_smoothness * jnp.dot(c_x.T, jnp.dot(Q_smoothness_jax, c_x))
cost_smoothness_y = 0.5 * weight_smoothness * jnp.dot(c_y.T, jnp.dot(Q_smoothness_jax, c_y))
temp_x_obs = d_obs * jnp.cos(alpha_obs) * a_obs
b_obs_x = x_obs_jax.reshape(num * num_obs) + temp_x_obs
temp_y_obs = d_obs * jnp.sin(alpha_obs) * b_obs
b_obs_y = y_obs_jax.reshape(num * num_obs) + temp_y_obs
cost_obs_x = 0.5 * rho_obs * (jnp.sum((jnp.dot(A_obs_jax, c_x) - b_obs_x) ** 2))
cost_obs_y = 0.5 * rho_obs * (jnp.sum((jnp.dot(A_obs_jax, c_y) - b_obs_y) ** 2))
cost_slack = 0.5 * rho_obs * jnp.sum(jnp.maximum(jnp.zeros(num_tot), -d_obs + 1))
cost_eq_x = 0.5 * rho_eq * (jnp.sum((jnp.dot(A_eq_jax, c_x) - bx_eq_jax) ** 2))
cost_eq_y = 0.5 * rho_eq * (jnp.sum((jnp.dot(A_eq_jax, c_y) - by_eq_jax) ** 2))
cost_x = cost_smoothness_x + cost_obs_x - jnp.dot(lamda_x_jax.T, c_x)
cost_y = cost_smoothness_y + cost_obs_y - jnp.dot(lamda_y_jax.T, c_y)
eps = 10 ** (-8.0)
cost = cost_x + cost_y + eps * jnp.sum(c_x ** 2) + eps * jnp.sum(c_y ** 2) + eps * jnp.sum(d_obs ** 2) + eps * jnp.sum(alpha_obs ** 2) + cost_slack
return cost
aug_sol_jax = jnp.asarray(aug_sol)
params = jnp.hstack((x_init, vx_init, ax_init, x_fin, vx_fin, ax_fin, y_init, vy_init, ay_init, y_fin, vy_fin, ay_fin))
# cost_fun(aug_sol_jax, params)
# x_init, vx_init, ax_init, x_fin, vx_fin, ax_fin, y_init, vy_init, ay_init, y_fin, vy_fin, ay_fin = params
bx_eq_jax = jnp.array(bx_eq)
by_eq_jax = jnp.array(by_eq)
c_x = aug_sol_jax[0:nvar]
c_y = aug_sol_jax[nvar: 2 * nvar]
num_tot = num_obs * num
alpha_obs = aug_sol_jax[2 * nvar:2*nvar + num_tot]
d_obs = aug_sol_jax[2 * nvar + num_tot:2 * nvar + 2 * num_tot]
cost_smoothness_x = 0.5 * weight_smoothness * jnp.dot(c_x.T, jnp.dot(Q_smoothness_jax, c_x))
cost_smoothness_y = 0.5 * weight_smoothness * jnp.dot(c_y.T, jnp.dot(Q_smoothness_jax, c_y))
temp_x_obs = d_obs * jnp.cos(alpha_obs) * a_obs
b_obs_x = x_obs_jax.reshape(num * num_obs) + temp_x_obs
temp_y_obs = d_obs * jnp.sin(alpha_obs) * b_obs
b_obs_y = y_obs_jax.reshape(num * num_obs) + temp_y_obs
cost_obs_x = 0.5 * rho_obs * (jnp.sum((jnp.dot(A_obs_jax, c_x) - b_obs_x) ** 2))
cost_obs_y = 0.5 * rho_obs * (jnp.sum((jnp.dot(A_obs_jax, c_y) - b_obs_y) ** 2))
cost_slack = 0.5 * rho_obs * jnp.sum(jnp.maximum(jnp.zeros(num_tot), -d_obs + 1))
cost_eq_x = 0.5 * rho_eq * (jnp.sum((jnp.dot(A_eq_jax, c_x) - bx_eq_jax) ** 2))
cost_eq_y = 0.5 * rho_eq * (jnp.sum((jnp.dot(A_eq_jax, c_y) - by_eq_jax) ** 2))
cost_x = cost_smoothness_x + cost_obs_x - jnp.dot(lamda_x_jax.T, c_x)
cost_y = cost_smoothness_y + cost_obs_y - jnp.dot(lamda_y_jax.T, c_y)
eps = 10 ** (-8.0)
cost = cost_x + cost_y + eps * jnp.sum(c_x ** 2) + eps * jnp.sum(c_y ** 2) + eps * jnp.sum(d_obs ** 2) + eps * jnp.sum(alpha_obs ** 2) + cost_slack
c_x
cost_smoothness_x
aug_sol[:10]
###Output
_____no_output_____
###Markdown
Compute argmin derivative
###Code
hess_inp = jit(jacfwd(jacrev(cost_fun)))
hess_param = jit(jacfwd(jacrev(cost_fun), argnums=1))
aug_sol = np.hstack((primal_x, primal_y, alpha_obs, d_obs))
aug_sol_jax = jnp.asarray(aug_sol)
params = jnp.hstack((x_init, vx_init, ax_init, x_fin, vx_fin, ax_fin, y_init, vy_init, ay_init, y_fin, vy_fin, ay_fin))
F_yy = hess_inp(aug_sol, params)
F_xy = hess_param(aug_sol, params)
F_yy_inv = jnp.linalg.inv(F_yy)
dgx = jnp.dot(-F_yy_inv, F_xy)
aug_sol.shape, params.shape
cost_fun(aug_sol_jax, params)
###Output
_____no_output_____
###Markdown
Testing
###Code
maxiter = 300
nvar = np.shape(P)[1]
num = np.shape(P)[0]
cost_smoothness = weight_smoothness * np.dot(Pddot.T, Pddot)
alpha_obs = np.zeros((num_obs, num))
d_obs = np.ones((num_obs, num))
lamda_x = np.zeros(nvar)
lamda_y = np.zeros(nvar)
res_obs = np.ones(maxiter)
res_eq = np.ones(maxiter)
d_min = np.ones(maxiter)
cost = cost_smoothness + rho_obs * np.dot(A_obs.T, A_obs)
cost_mat = np.vstack((np.hstack((cost, A_eq.T)), np.hstack((A_eq, np.zeros((np.shape(A_eq)[0], np.shape(A_eq)[0]))))))
cost_mat_inv = np.linalg.inv(cost_mat)
cost.shape, A_eq.shape
np.hstack((cost, A_eq.T)).shape
temp_x_obs = d_obs*np.cos(alpha_obs)*a_obs
b_obs_x = x_obs.reshape(num*num_obs)+temp_x_obs.reshape(num*num_obs)
temp_y_obs = d_obs*np.sin(alpha_obs)*b_obs
b_obs_y = y_obs.reshape(num*num_obs)+temp_y_obs.reshape(num*num_obs)
lincost_x = -lamda_x-rho_obs*np.dot(A_obs.T, b_obs_x)
lincost_y = -lamda_y-rho_obs*np.dot(A_obs.T, b_obs_y)
sol_x = np.dot(cost_mat_inv, np.hstack(( -lincost_x, bx_eq )))
sol_y = np.dot(cost_mat_inv, np.hstack(( -lincost_y, by_eq )))
sol_x.shape, primal_x.shape
lincost_x.shape
bx_eq.shape
np.hstack(( -lincost_x, bx_eq )).shape
sol
###Output
_____no_output_____ |
Data Structures and Algorithms Nanodegree/P4 - Route Planner/project_notebook.ipynb | ###Markdown
Implementing a Route PlannerIn this project you will use A\* search to implement a "Google-maps" style route planning algorithm.
###Code
# Run this cell first!
from helpers import Map, load_map, show_map
from student_code import shortest_path
%load_ext autoreload
%autoreload 2
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
Map Basics
###Code
map_10 = load_map('map-10.pickle')
show_map(map_10)
###Output
_____no_output_____
###Markdown
The map above (run the code cell if you don't see it) shows a disconnected network of 10 intersections. The two intersections on the left are connected to each other but they are not connected to the rest of the road network. On the graph above, the edge between 2 nodes(intersections) represents a literal straight road not just an abstract connection of 2 cities.These `Map` objects have two properties you will want to use to implement A\* search: `intersections` and `roads`**Intersections**The `intersections` are represented as a dictionary. In this example, there are 10 intersections, each identified by an x,y coordinate. The coordinates are listed below. You can hover over each dot in the map above to see the intersection number.
###Code
map_10.intersections
###Output
_____no_output_____
###Markdown
**Roads**The `roads` property is a list where, if `i` is an intersection, `roads[i]` contains a list of the intersections that intersection `i` connects to.
###Code
# this shows that intersection 0 connects to intersections 7, 6, and 5
map_10.roads[0]
# This shows the full connectivity of the map
map_10.roads
# map_40 is a bigger map than map_10
map_40 = load_map('map-40.pickle')
show_map(map_40)
###Output
_____no_output_____
###Markdown
Advanced VisualizationsThe map above shows a network of roads which spans 40 different intersections (labeled 0 through 39). The `show_map` function which generated this map also takes a few optional parameters which might be useful for visualizaing the output of the search algorithm you will write.* `start` - The "start" node for the search algorithm.* `goal` - The "goal" node.* `path` - An array of integers which corresponds to a valid sequence of intersection visits on the map.
###Code
# run this code, note the effect of including the optional
# parameters in the function call.
show_map(map_40, start=5, goal=34, path=[5,16,37,12,34])
###Output
_____no_output_____
###Markdown
Writing your algorithmYou should open the file `student_code.py` in another tab and work on your algorithm there. Do that by selecting `File > Open` and then selecting the appropriate file.The algorithm you write will be responsible for generating a `path` like the one passed into `show_map` above. In fact, when called with the same map, start and goal, as above you algorithm should produce the path `[5, 16, 37, 12, 34]````bash> shortest_path(map_40, 5, 34)[5, 16, 37, 12, 34]```
###Code
path = shortest_path(map_40, 5, 34)
if path == [5, 16, 37, 12, 34]:
print("great! Your code works for these inputs!")
else:
print("something is off, your code produced the following:")
print(path)
###Output
great! Your code works for these inputs!
###Markdown
Testing your CodeIf the code below produces no errors, your algorithm is behaving correctly. You are almost ready to submit! Before you submit, go through the following submission checklist:**Submission Checklist**1. Does my code pass all tests?2. Does my code implement `A*` search and not some other search algorithm?3. Do I use an **admissible heuristic** to direct search efforts towards the goal?4. Do I use data structures which avoid unnecessarily slow lookups?When you can answer "yes" to all of these questions, submit by pressing the Submit button in the lower right!
###Code
from test import test
test(shortest_path)
###Output
All tests pass! Congratulations!
|
Folium/ee-api-folium-setup.ipynb | ###Markdown
Run in Google Colab View source on GitHubNotebook ViewerRun in binder Earth Engine Python API and Folium Interactive MappingThis notebook demonstrates how to setup the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) in the [Google Colaboratory](https://colab.sandbox.google.com/notebooks/welcome.ipynb) platform (Colab) and provides several examples for visualizing Earth Engine processed data interactively using the [folium](https://github.com/python-visualization/folium) library. This notebook was adapted from the [ Earth Engine Python API example](https://github.com/google/earthengine-api/blob/master/python/examples/ipynb/ee-api-colab-setup.ipynb). **Contact:** Qiusheng Wu ()**Table of Contents*** [Install Earth Engine API and folium](install-ee-folium)* [Import Earth Engine API and authenticate](import-api)* [Static map](static-map)* [Interactive map](interactive-map) * [Add custom basemaps](add-basemaps) * [Define add_ee_layer() function](add-ee-layer) * [Display ee.Image tiles](display-ee-image) * [A complete example](a-complete-example)* [Chart visualization](chart-vis) Install Earth Engine API and folium The Earth Engine API and folium library are installed by default in Google Colaboratory. You only need to install these two Python packages if you are running this notebook using your local Python interpreter.
###Code
!pip install earthengine-api
!pip install folium
###Output
_____no_output_____
###Markdown
Import Earth Engine API and authenticateThe Earth Engine API is installed by default in Google Colaboratory so requires only importing and authenticating. These steps must be completed for each new Colab session, if you restart your Colab kernel, or if your Colab virtual machine is recycled due to inactivity. Import the APIRun the following cell to import the API into your session.
###Code
import ee
###Output
_____no_output_____
###Markdown
Authenticate and initializeRun the `ee.Authenticate` function to authenticate your access to Earth Engine servers and `ee.Initialize` to initialize it. Upon running the following cell you'll be asked to grant Earth Engine access to your Google account. Follow the instructions printed to the cell.
###Code
## Trigger the authentication flow. You only need to do this once
ee.Authenticate()
# Initialize the library.
ee.Initialize()
###Output
_____no_output_____
###Markdown
Test the APITest the API by printing the elevation of Mount Everest.
###Code
# Print the elevation of Mount Everest.
dem = ee.Image('USGS/SRTMGL1_003')
xy = ee.Geometry.Point([86.9250, 27.9881])
elev = dem.sample(xy, 30).first().get('elevation').getInfo()
print('Mount Everest elevation (m):', elev)
###Output
Mount Everest elevation (m): 8729
###Markdown
Static map `ee.Image` objects can be displayed to notebook output cells. The `IPython.display` module contains the `Image` function, which can display the results of a URL representing an image generated from a call to the EarthEngine `getThumbUrl` function. The following cell will display a thumbnail of the global elevation model.
###Code
# Import the Image function from the IPython.display module.
from IPython.display import Image
# Display a thumbnail of global elevation.
Image(url = dem.updateMask(dem.gt(0))
.getThumbUrl({'min': 0, 'max': 4000, 'dimensions': 512,
'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']}))
###Output
_____no_output_____
###Markdown
Interactive map The [`folium`](https://python-visualization.github.io/folium/)library can be used to display `ee.Image` objects on an interactive [Leaflet](https://leafletjs.com/) map. Folium has no default method for handling tiles from Earth Engine, so one must be definedand added to the `folium.Map` module before use.The following cells provide an example of adding a method for handing Earth Engine tiles and using it to display an elevation model to a Leaflet map.
###Code
# Import the folium library.
import folium
from folium import plugins
###Output
_____no_output_____
###Markdown
Add custom basemaps
###Code
# Add custom basemaps to folium
basemaps = {
'Google Maps': folium.TileLayer(
tiles = 'https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}',
attr = 'Google',
name = 'Google Maps',
overlay = True,
control = True
),
'Google Satellite': folium.TileLayer(
tiles = 'https://mt1.google.com/vt/lyrs=s&x={x}&y={y}&z={z}',
attr = 'Google',
name = 'Google Satellite',
overlay = True,
control = True
),
'Google Terrain': folium.TileLayer(
tiles = 'https://mt1.google.com/vt/lyrs=p&x={x}&y={y}&z={z}',
attr = 'Google',
name = 'Google Terrain',
overlay = True,
control = True
),
'Google Satellite Hybrid': folium.TileLayer(
tiles = 'https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}',
attr = 'Google',
name = 'Google Satellite',
overlay = True,
control = True
),
'Esri Satellite': folium.TileLayer(
tiles = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
attr = 'Esri',
name = 'Esri Satellite',
overlay = True,
control = True
)
}
###Output
_____no_output_____
###Markdown
Define add_ee_layer() function
###Code
# Define a method for displaying Earth Engine image tiles on a folium map.
def add_ee_layer(self, ee_object, vis_params, name):
try:
# display ee.Image()
if isinstance(ee_object, ee.image.Image):
map_id_dict = ee.Image(ee_object).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles = map_id_dict['tile_fetcher'].url_format,
attr = 'Google Earth Engine',
name = name,
overlay = True,
control = True
).add_to(self)
# display ee.ImageCollection()
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
ee_object_new = ee_object.mosaic()
map_id_dict = ee.Image(ee_object_new).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles = map_id_dict['tile_fetcher'].url_format,
attr = 'Google Earth Engine',
name = name,
overlay = True,
control = True
).add_to(self)
# display ee.Geometry()
elif isinstance(ee_object, ee.geometry.Geometry):
folium.GeoJson(
data = ee_object.getInfo(),
name = name,
overlay = True,
control = True
).add_to(self)
# display ee.FeatureCollection()
elif isinstance(ee_object, ee.featurecollection.FeatureCollection):
ee_object_new = ee.Image().paint(ee_object, 0, 2)
map_id_dict = ee.Image(ee_object_new).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles = map_id_dict['tile_fetcher'].url_format,
attr = 'Google Earth Engine',
name = name,
overlay = True,
control = True
).add_to(self)
except:
print("Could not display {}".format(name))
# Add EE drawing method to folium.
folium.Map.add_ee_layer = add_ee_layer
###Output
_____no_output_____
###Markdown
Display ee.Image tiles
###Code
# Set visualization parameters.
vis_params = {
'min': 0,
'max': 4000,
'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']}
# Create a folium map object.
my_map = folium.Map(location=[20, 0], zoom_start=3, height=500)
# Add custom basemaps
basemaps['Google Maps'].add_to(my_map)
basemaps['Google Satellite Hybrid'].add_to(my_map)
# Add the elevation model to the map object.
my_map.add_ee_layer(dem.updateMask(dem.gt(0)), vis_params, 'DEM')
# Add a layer control panel to the map.
my_map.add_child(folium.LayerControl())
# Add fullscreen button
plugins.Fullscreen().add_to(my_map)
# Display the map.
display(my_map)
###Output
_____no_output_____
###Markdown
A complete example
###Code
# Set visualization parameters.
vis_params = {
'min': 0,
'max': 4000,
'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']}
# Create a folium map object.
my_map = folium.Map(location=[40.33, -99.42], zoom_start=4, height=500)
# Add custom basemaps
basemaps['Google Maps'].add_to(my_map)
basemaps['Google Satellite Hybrid'].add_to(my_map)
# Add the elevation model to the map object.
my_map.add_ee_layer(dem.updateMask(dem.gt(0)), vis_params, 'DEM')
# Display ee.Image
dataset = ee.Image('JRC/GSW1_1/GlobalSurfaceWater')
occurrence = dataset.select('occurrence');
occurrenceVis = {'min': 0.0, 'max': 100.0, 'palette': ['ffffff', 'ffbbbb', '0000ff']}
my_map.add_ee_layer(occurrence, occurrenceVis, 'JRC Surface Water')
# Display ee.Geometry
holePoly = ee.Geometry.Polygon(coords = [[[-35, -10], [-35, 10], [35, 10], [35, -10], [-35, -10]]],
proj= 'EPSG:4326',
geodesic = True,
maxError= 1.,
evenOdd = False)
my_map.add_ee_layer(holePoly, {}, 'Polygon')
# Display ee.FeatureCollection
fc = ee.FeatureCollection('TIGER/2018/States')
my_map.add_ee_layer(fc, {}, 'US States')
# Add a layer control panel to the map.
my_map.add_child(folium.LayerControl())
plugins.Fullscreen().add_to(my_map)
# Display the map.
display(my_map)
###Output
_____no_output_____
###Markdown
Chart visualization Some Earth Engine functions produce tabular data that can be plotted bydata visualization packages such as `matplotlib`. The following exampledemonstrates the display of tabular data from Earth Engine as a scatterplot. See [Charting in Colaboratory](https://colab.sandbox.google.com/notebooks/charts.ipynb)for more information.
###Code
# Import the matplotlib.pyplot module.
import matplotlib.pyplot as plt
# Fetch a Landsat image.
img = ee.Image('LANDSAT/LT05/C01/T1_SR/LT05_034033_20000913')
# Select Red and NIR bands, scale them, and sample 500 points.
samp_fc = img.select(['B3','B4']).divide(10000).sample(scale=30, numPixels=500)
# Arrange the sample as a list of lists.
samp_dict = samp_fc.reduceColumns(ee.Reducer.toList().repeat(2), ['B3', 'B4'])
samp_list = ee.List(samp_dict.get('list'))
# Save server-side ee.List as a client-side Python list.
samp_data = samp_list.getInfo()
# Display a scatter plot of Red-NIR sample pairs using matplotlib.
plt.scatter(samp_data[0], samp_data[1], alpha=0.2)
plt.xlabel('Red', fontsize=12)
plt.ylabel('NIR', fontsize=12)
plt.show()
###Output
_____no_output_____ |
report/chapter-3/detection/ascad.ipynb | ###Markdown
Hamming weight extractor- Extracts the Hamming weight of the state after the SBox in the first round.- Appends a hamming weight column to the ASCAD file.
###Code
ASCAD_RAW = ASCADData.raw()
IXS = np.arange(len(ASCAD_RAW['traces']))
profile_mask = np.ones(len(IXS)).astype(bool)
profile_mask[2::3] = False
rng = default_rng(seed=42)
num_traces = 10000
detect_ixs = IXS[profile_mask][:num_traces]
lim = max(detect_ixs)
RAW_TRACES = ASCAD_RAW['traces']
RAW_META = ASCAD_RAW['metadata']
full_key_ix = 1
plaintext_ix = 0
trace_len = ASCAD_RAW['traces'].shape[1]
traces = np.zeros((num_traces, trace_len), dtype=np.int8)
profile_key, profile_plain = np.zeros((num_traces, 16), dtype=np.uint8), np.zeros((num_traces, 16), dtype=np.uint8)
for i, ascad_i in tqdm(zip(range(num_traces), detect_ixs), total=num_traces):
traces[i] = RAW_TRACES[ascad_i]
profile_key[i] = RAW_META[ascad_i][full_key_ix]
profile_plain[i] = RAW_META[ascad_i][plaintext_ix]
ASCAD_RAW.close()
###Output
100%|██████████| 10000/10000 [00:22<00:00, 451.66it/s]
###Markdown
Calculate full intermediate states from AESBased on plaintext and key metadata.
###Code
profile_states = full_states(np.array(profile_plain), np.array(profile_key))
profile_hw = hamming_weights(profile_states[:, 0, 0])
profile_hw_masked = hamming_weights(profile_states[:, 0, 2])
sns.lineplot(data={
"Profile hw distribution": np.bincount(profile_hw),
"Attack hw distribution": np.bincount(profile_hw_masked),
})
traces
tvla_x, tvla_y = prepare_tvla(traces, profile_hw)
PVS = rho_test(tvla_x, tvla_y)
STEP = 10
IX = np.arange(len(PVS[0]), step=STEP)
G = sns.lineplot(data=pd.DataFrame({"$p$-values": PVS[0][::STEP]}, index=IX))
START = 45500
END = START + 1977 * 3
HIGHLIGHT = sns.color_palette()[1]
plt.axvspan(START, END, color=HIGHLIGHT, alpha=.5, zorder=100)
sns.lineplot(y=[10 ** -5] * len(IX), x = IX, color="#FF000080", linestyle="--", label="Threshold", zorder=101)
G.set(yscale="log", ylim=(1, 10 ** -150), xlim=(0, max(IX)), xlabel="Sample point index", ylabel="Confidence values ($p$)",
title="PoI selection on the ASCAD dataset, using the $\\rho$-test")
handles, labels = G.get_legend_handles_labels()
patch = patches.Patch(color=HIGHLIGHT, alpha=.5, label='Selection')
handles.append(patch)
plt.legend(handles=handles)
store_sns(G, "poi-ascad-none")
###Output
_____no_output_____ |
bert/tf_tweet_classification_bert.ipynb | ###Markdown
###Code
###Output
_____no_output_____ |
04.1 - Slidings_NaiveBayes-NoSituation.ipynb | ###Markdown
Importing the libraries and dataset. Also taking a look at the dataset.
###Code
import pandas as pd
import numpy as np
import seaborn as sns
slidings = pd.read_csv('Data\slidings_training.csv')
slidings = slidings.drop(columns='Unnamed: 0',axis=1)
slidings.head()
slidings.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 45931 entries, 0 to 45930
Data columns (total 10 columns):
situation 45931 non-null int64
location 45931 non-null int64
risk 45931 non-null int64
victims 45931 non-null int64
deadly_victims 45931 non-null int64
length 45931 non-null int64
num_points 45931 non-null int64
in_place 45931 non-null int64
sliding 45931 non-null int64
Rain 45931 non-null float64
dtypes: float64(1), int64(9)
memory usage: 3.5 MB
###Markdown
Again the same analysis as the Gaussian naive bayes notebook but without the situation feature.
###Code
random_state = 2
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
X = slidings.drop(columns={'sliding','situation'},axis=1)
y= slidings['sliding']
model = GaussianNB()
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=random_state)
model.fit(X_train,y_train)
from sklearn.metrics import classification_report,confusion_matrix
predictions = model.predict(X_test)
print(classification_report(y_test,predictions))
cfm = confusion_matrix(y_test,predictions)
sns.heatmap(cfm, cbar=False, annot=True, cmap='Blues', fmt='d')
model.score(X,y)
from sklearn.model_selection import cross_val_score
scores = cross_val_score(model, X, y, cv=10)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
scores_precision = cross_val_score(model, X, y, cv=10, scoring='precision')
scores_precision
print("Precision: %0.2f (+/- %0.2f)" % (scores_precision.mean(), scores_precision.std() * 2))
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
fpr, tpr, _ = roc_curve(y_test, predictions)
roc_auc = auc(fpr, tpr)
plt.figure(figsize=(5,10))
plt.xlim([-0.01, 1.00])
plt.ylim([-0.01, 1.01])
plt.plot(fpr, tpr, lw=3, label='LogRegr ROC curve (area = {:0.2f})'.format(roc_auc))
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('ROC curve', fontsize=16)
plt.legend(loc='lower right', fontsize=13)
plt.plot([0, 1], [0, 1], color='navy', lw=3, linestyle='--')
plt.gca().set_aspect('equal')
plt.show()
###Output
_____no_output_____ |
project3/.Trash-0/files/project_3_starter 9.ipynb | ###Markdown
Project 3: Smart Beta Portfolio and Portfolio Optimization OverviewSmart beta has a broad meaning, but we can say in practice that when we use the universe of stocks from an index, and then apply some weighting scheme other than market cap weighting, it can be considered a type of smart beta fund. By contrast, a purely alpha fund may create a portfolio of specific stocks, not related to an index, or may choose from the global universe of stocks. The other characteristic that makes a smart beta portfolio "beta" is that it gives its investors a diversified broad exposure to a particular market.Imagine you're a portfolio manager, and wish to try out some different portfolio weighting methods.One way to design portfolio is to look at certain accounting measures (fundamentals) that, based on past trends, indicate stocks that produce better results. For instance, you may start with a hypothesis that dividend-issuing stocks tend to perform better than stocks that do not. This may not always be true of all companies; for instance, Apple does not issue dividends, but has had good historical performance. The hypothesis about dividend-paying stocks may go something like this: Companies that regularly issue dividends may also be more prudent in allocating their available cash, and may indicate that they are more conscious of prioritizing shareholder interests. For example, a CEO may decide to reinvest cash into pet projects that produce low returns. Or, the CEO may do some analysis, identify that reinvesting within the company produces lower returns compared to a diversified portfolio, and so decide that shareholders would be better served if they were given the cash (in the form of dividends). So according to this hypothesis, dividends may be both a proxy for how the company is doing (in terms of earnings and cash flow), but also a signal that the company acts in the best interest of its shareholders. Of course, it's important to test whether this works in practice.You may also have another hypothesis, with which you wish to design a portfolio that can then be made into an ETF. You may find that investors may wish to invest in passive beta funds, but wish to have less risk exposure (less volatility) in their investments. The goal of having a low volatility fund that still produces returns similar to an index may be appealing to investors who have a shorter investment time horizon, and so are more risk averse.So the objective of your proposed portfolio is to design a portfolio that closely tracks an index, while also minimizing the portfolio variance. Also, if this portfolio can match the returns of the index with less volatility, then it has a higher risk-adjusted return (same return, lower volatility).Smart Beta ETFs can be designed with both of these two general methods (among others): alternative weighting and minimum volatility ETF. InstructionsEach problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a ` TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity. PackagesWhen you implement the functions, you'll only need to use the [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/) packages. Don't import any other packages, otherwise the grader will not be able to run your code.The other packages that we're importing is `helper`, `project_helper`, and `project_tests`. These are custom packages built to help you solve the problems. The `helper` and `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems. Install Packages
###Code
import sys
!{sys.executable} -m pip install -r requirements.txt
###Output
_____no_output_____
###Markdown
Load Packages
###Code
import pandas as pd
import numpy as np
import helper
import project_helper
import project_tests
###Output
_____no_output_____
###Markdown
Market DataThe data source we'll be using is the [Wiki End of Day data](https://www.quandl.com/databases/WIKIP) hosted at [Quandl](https://www.quandl.com). This contains data for many stocks, but we'll just be looking at the S&P 500 stocks. We'll also make things a little easier to solve by narrowing our range of time from 2007-06-30 to 2017-09-30. Set API KeySet the `quandl_api_key` variable to your Quandl api key. You can find your Quandl api key [here](https://www.quandl.com/account/api).
###Code
# TODO: Add your Quandl API Key
quandl_api_key = ''
###Output
_____no_output_____
###Markdown
Download Data
###Code
import os
snp500_file_path = 'data/tickers_SnP500.txt'
wiki_file_path = 'data/WIKI_PRICES.csv'
start_date, end_date = '2013-07-01', '2017-06-30'
use_columns = ['date', 'ticker', 'adj_close', 'adj_volume', 'ex-dividend']
if not os.path.exists(wiki_file_path):
with open(snp500_file_path) as f:
tickers = f.read().split()
helper.download_quandl_dataset(quandl_api_key, 'WIKI', 'PRICES', wiki_file_path, use_columns, tickers, start_date, end_date)
else:
print('Data already downloaded')
###Output
_____no_output_____
###Markdown
Load Data
###Code
df = pd.read_csv(wiki_file_path)
###Output
_____no_output_____
###Markdown
Create the UniverseWe'll be selecting large dollar volume stocks for our stock universe. We're using this universe, since it is highly liquid.
###Code
percent_top_dollar = 0.2
high_volume_symbols = project_helper.large_dollar_volume_stocks(df, 'adj_close', 'adj_volume', percent_top_dollar)
df = df[df['ticker'].isin(high_volume_symbols)]
###Output
_____no_output_____
###Markdown
2-D MatricesHere we convert df into multiple DataFrames for each OHLC. We could use a multiindex, but that just stacks the columns for each ticker. We want to be able to apply calculations without using groupby each time.
###Code
close = df.reset_index().pivot(index='date', columns='ticker', values='adj_close')
volume = df.reset_index().pivot(index='date', columns='ticker', values='adj_volume')
ex_dividend = df.reset_index().pivot(index='date', columns='ticker', values='ex-dividend')
###Output
_____no_output_____
###Markdown
View DataTo see what one of these 2-d matrices looks like, let's take a look at the closing prices matrix.
###Code
project_helper.print_dataframe(close)
###Output
_____no_output_____
###Markdown
market cap: stock A and B are both 50% weightdividend weighted portf: stock A 100%, stock B 0%.Combine them:stock_A is 0.5 * 1sock_B is 0.5 * 0rescale these products so they still sum to 1. Part 1: Smart Beta PortfolioIn Part 1 of this project, you'll build a portfolio using dividend yield to choose the portfolio weights. A portfolio such as this could be incorporated into a smart beta ETF. You'll compare this portfolio to a market cap weighted index to see how well it performs. Note that in practice, you'll probably get the index weights from a data vendor (such as companies that create indices, like MSCI, FTSE, Standard and Poor's), but for this exercise we will simulate a market cap weighted index. Index WeightsThe index we'll be using is based on large dollar volume stocks. Implement `generate_dollar_volume_weights` to generate the weights for this index. For each date, generate the weights based on dollar volume traded for that date. For example, assume the following is close prices and volume data:``` Prices A B ...2013-07-08 2 2 ...2013-07-09 5 6 ...2013-07-10 1 2 ...2013-07-11 6 5 ...... ... ... ... Volume A B ...2013-07-08 100 340 ...2013-07-09 240 220 ...2013-07-10 120 500 ...2013-07-11 10 100 ...... ... ... ...```The weights created from the function `generate_dollar_volume_weights` should be the following:``` A B ...2013-07-08 0.126.. 0.194.. ...2013-07-09 0.759.. 0.377.. ...2013-07-10 0.075.. 0.285.. ...2013-07-11 0.037.. 0.142.. ...... ... ... ...```
###Code
def generate_dollar_volume_weights(close, volume):
"""
Generate dollar volume weights.
Parameters
----------
close : DataFrame
Close price for each ticker and date
volume : str
Volume for each ticker and date
Returns
-------
dollar_volume_weights : DataFrame
The dollar volume weights for each ticker and date
"""
assert close.index.equals(volume.index)
assert close.columns.equals(volume.columns)
#TODO: Implement function
return None
project_tests.test_generate_dollar_volume_weights(generate_dollar_volume_weights)
###Output
_____no_output_____
###Markdown
View DataLet's generate the index weights using `generate_dollar_volume_weights` and view them using a heatmap.
###Code
index_weights = generate_dollar_volume_weights(close, volume)
project_helper.plot_weights(index_weights, 'Index Weights')
###Output
_____no_output_____
###Markdown
Portfolio WeightsNow that we have the index weights, let's choose the portfolio weights based on dividends.Implement `calculate_dividend_weights` to returns the weights for each stock based on its total dividend yield over time. This is similar to generating the weight for the index, but it's using dividend data instead.For example, assume the following is ex_dividend data:``` Prices A B2013-07-08 0 02013-07-09 0 12013-07-10 0.5 02013-07-11 0 02013-07-12 2 0... ... ...```The weights created from the function `calculate_dividend_weights` should be the following:``` A B2013-07-08 NaN NaN2013-07-09 0 12013-07-10 0.333.. 0.666..2013-07-11 0.333.. 0.666..2013-07-12 0.714.. 0.285..... ... ...```
###Code
def calculate_dividend_weights(ex_dividend):
"""
Calculate dividend weights.
Parameters
----------
ex_dividend : DataFrame
Ex-dividend for each stock and date
Returns
-------
dividend_weights : DataFrame
Weights for each stock and date
"""
#TODO: Implement function
return None
project_tests.test_calculate_dividend_weights(calculate_dividend_weights)
###Output
_____no_output_____
###Markdown
View DataJust like the index weights, let's generate the ETF weights and view them using a heatmap.
###Code
etf_weights = calculate_dividend_weights(ex_dividend)
project_helper.plot_weights(etf_weights, 'ETF Weights')
###Output
_____no_output_____
###Markdown
ReturnsImplement `generate_returns` to generate returns data for all the stocks and dates from price data. You might notice we're implementing returns and not log returns. Since we're not dealing with volatility, we don't have to use log returns.
###Code
def generate_returns(prices):
"""
Generate returns for ticker and date.
Parameters
----------
prices : DataFrame
Price for each ticker and date
Returns
-------
returns : Dataframe
The returns for each ticker and date
"""
#TODO: Implement function
return None
project_tests.test_generate_returns(generate_returns)
###Output
_____no_output_____
###Markdown
View DataLet's generate the closing returns using `generate_returns` and view them using a heatmap.
###Code
returns = generate_returns(close)
project_helper.plot_returns(returns, 'Close Returns')
###Output
_____no_output_____
###Markdown
Weighted ReturnsWith the returns of each stock computed, we can use it to compute the returns for an index or ETF. Implement `generate_weighted_returns` to create weighted returns using the returns and weights.
###Code
def generate_weighted_returns(returns, weights):
"""
Generate weighted returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
weights : DataFrame
Weights for each ticker and date
Returns
-------
weighted_returns : DataFrame
Weighted returns for each ticker and date
"""
assert returns.index.equals(weights.index)
assert returns.columns.equals(weights.columns)
#TODO: Implement function
return None
project_tests.test_generate_weighted_returns(generate_weighted_returns)
###Output
_____no_output_____
###Markdown
View DataLet's generate the ETF and index returns using `generate_weighted_returns` and view them using a heatmap.
###Code
index_weighted_returns = generate_weighted_returns(returns, index_weights)
etf_weighted_returns = generate_weighted_returns(returns, etf_weights)
project_helper.plot_returns(index_weighted_returns, 'Index Returns')
project_helper.plot_returns(etf_weighted_returns, 'ETF Returns')
###Output
_____no_output_____
###Markdown
Cumulative ReturnsTo compare performance between the ETF and Index, we're going to calculate the tracking error. Before we do that, we first need to calculate the index and ETF comulative returns. Implement `calculate_cumulative_returns` to calculate the cumulative returns over time given the returns.
###Code
def calculate_cumulative_returns(returns):
"""
Calculate cumulative returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
Returns
-------
cumulative_returns : Pandas Series
Cumulative returns for each date
"""
#TODO: Implement function
return None
project_tests.test_calculate_cumulative_returns(calculate_cumulative_returns)
###Output
_____no_output_____
###Markdown
View DataLet's generate the ETF and index cumulative returns using `calculate_cumulative_returns` and compare the two.
###Code
index_weighted_cumulative_returns = calculate_cumulative_returns(index_weighted_returns)
etf_weighted_cumulative_returns = calculate_cumulative_returns(etf_weighted_returns)
project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, etf_weighted_cumulative_returns, 'Smart Beta ETF vs Index')
###Output
_____no_output_____
###Markdown
Tracking ErrorIn order to check the performance of the smart beta portfolio, we can calculate the tracking error against the index. Implement `tracking_error` to return the tracking error between the ETF and index over time.For reference, we'll be using the following tracking error function:$$ TE = \sqrt{\frac{\sum_{i=1}^{n}(R_{P} - R_{B})^{2}}{N-1}} $$Where the $ R_{P}$ variable is the etf returns and $ R_{B} $ varable is the index returns.
###Code
def tracking_error(index_weighted_cumulative_returns, etf_weighted_cumulative_returns):
"""
Calculate the tracking error.
Parameters
----------
index_weighted_cumulative_returns : Pandas Series
The weighted index Cumulative returns for each date
etf_weighted_cumulative_returns : Pandas Series
The weighted ETF Cumulative returns for each date
Returns
-------
tracking_error : Pandas Series
The tracking error for each date
"""
assert index_weighted_cumulative_returns.index.equals(etf_weighted_cumulative_returns.index)
#TODO: Implement function
return None
project_tests.test_tracking_error(tracking_error)
###Output
_____no_output_____
###Markdown
View DataLet's generate the tracking error using `tracking_error` and graph it over time.
###Code
smart_beta_tracking_error = tracking_error(index_weighted_cumulative_returns, etf_weighted_cumulative_returns)
project_helper.plot_tracking_error(smart_beta_tracking_error, 'Smart Beta Tracking Error')
###Output
_____no_output_____
###Markdown
Part 2: Portfolio OptimizationNow, let's create a second portfolio. We'll still reuse the market cap weighted index, but this will be independent of the dividend-weighted portfolio that we created in part 1.We want to both minimize the portfolio variance and also want to closely track a market cap weighted index. In other words, we're trying to minimize the distance between the weights of our portfolio and the weights of the index.$Minimize \left [ \sigma^2_p + \lambda \sqrt{\sum_{1}^{m}(weight_i - indexWeight_i)^2} \right ]$ where $m$ is the number of stocks in the portfolio, and $\lambda$ is a scaling factor that you can choose.Why are we doing this? Funds are measured by how CovarianceImplement `get_covariance_returns` to calculate the covariance of the `returns`. We'll use this to calculate the portfolio variance.If we have $m$ stock series, the covariance matrix is an $m \times m$ matrix containing the covariance between each pair of stocks. We can use [numpy.cov](https://docs.scipy.org/doc/numpy/reference/generated/numpy.cov.html) to get the covariance. We give it a 2D array in which each row is a stock series, and each column is an observation at the same period of time.The covariance matrix $\mathbf{P} = \begin{bmatrix}\sigma^2_{1,1} & ... & \sigma^2_{1,m} \\ ... & ... & ...\\\sigma_{m,1} & ... & \sigma^2_{m,m} \\\end{bmatrix}$
###Code
def get_covariance_returns(returns):
"""
Calculate covariance matrices.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
Returns
-------
returns_covariance : 2 dimensional Ndarray
The covariance of the returns
"""
#TODO: Implement function
return None
project_tests.test_get_covariance_returns(get_covariance_returns)
###Output
_____no_output_____
###Markdown
View DataLet's look at the covariance generated from `get_covariance_returns`.
###Code
covariance_returns = get_covariance_returns(returns)
covariance_returns = pd.DataFrame(covariance_returns, returns.columns, returns.columns)
covariance_returns_correlation = np.linalg.inv(np.diag(np.sqrt(np.diag(covariance_returns))))
covariance_returns_correlation = pd.DataFrame(
covariance_returns_correlation.dot(covariance_returns).dot(covariance_returns_correlation),
covariance_returns.index,
covariance_returns.columns)
project_helper.plot_covariance_returns_correlation(
covariance_returns_correlation,
'Covariance Returns Correlation Matrix')
###Output
_____no_output_____
###Markdown
portfolio varianceWe can write the portfolio variance $\sigma^2_p = \mathbf{x^T} \mathbf{P} \mathbf{x}$Recall that the $\mathbf{x^T} \mathbf{P} \mathbf{x}$ is called the quadratic form.We can use the cvxpy function `quad_form(x,P)` to get the quadratic form. Distance from index weightsWe want portfolio weights that track the index closely. So we want to minimize the distance between them.Recall from the Pythagorean theorem that you can get the distance between two points in an x,y plane by adding the square of the x and y distances and taking the square root. Extending this to any number of dimensions is called the L2 norm. So: $\sqrt{\sum_{1}^{n}(weight_i - indexWeight_i)^2}$ Can also be written as $\left \| \mathbf{x} - \mathbf{index} \right \|_2$. There's a cvxpy function called [norm()](https://www.cvxpy.org/api_reference/cvxpy.atoms.other_atoms.htmlnorm)`norm(x, p=2, axis=None)`. The default is already set to find an L2 norm, so you would pass in one argument, which is the difference between your portfolio weights and the index weights. objective functionWe want to minimize both the portfolio variance and the distance of the portfolio weights from the index weights.We also want to choose a `scale` constant, which is $\lambda$ in the expression. $\mathbf{x^T} \mathbf{P} \mathbf{x} + \lambda \left \| \mathbf{x} - \mathbf{index} \right \|_2$This lets us choose how much priority we give to minimizing the difference from the index, relative to minimizing the variance of the portfolio. If you choose a higher value for `scale` ($\lambda$).We can find the objective function using cvxpy `objective = cvx.Minimize()`. Can you guess what to pass into this function? constraintsWe can also define our constraints in a list. For example, you'd want the weights to sum to one. So $\sum_{1}^{n}x = 1$. You may also need to go long only, which means no shorting, so no negative weights. So $x_i >0 $ for all $i$. you could save a variable as `[x >= 0, sum(x) == 1]`, where x was created using `cvx.Variable()`. optimizationSo now that we have our objective function and constraints, we can solve for the values of $\mathbf{x}$.cvxpy has the constructor `Problem(objective, constraints)`, which returns a `Problem` object.The `Problem` object has a function solve(), which returns the minimum of the solution. In this case, this is the minimum variance of the portfolio.It also updates the vector $\mathbf{x}$.We can check out the values of $x_A$ and $x_B$ that gave the minimum portfolio variance by using `x.value`
###Code
import cvxpy as cvx
def get_optimal_weights(covariance_returns, index_weights, scale=2.0):
"""
Find the optimal weights.
Parameters
----------
covariance_returns : 2 dimensional Ndarray
The covariance of the returns
index_weights : Pandas Series
Index weights for all tickers at a period in time
scale : int
The penalty factor for weights the deviate from the index
Returns
-------
x : 1 dimensional Ndarray
The solution for x
"""
assert len(covariance_returns.shape) == 2
assert len(index_weights.shape) == 1
assert covariance_returns.shape[0] == covariance_returns.shape[1] == index_weights.shape[0]
#TODO: Implement function
return None
project_tests.test_get_optimal_weights(get_optimal_weights)
###Output
_____no_output_____
###Markdown
Optimized PortfolioUsing the `get_optimal_weights` function, let's generate the optimal ETF weights without rebalanceing. We can do this by feeding in the covariance of the entire history of data. We also need to feed in a set of index weights. We'll go with the average weights of the index over time.
###Code
# The average index weights at each point in time
median_index_weights = (index_weights.cumsum().T / range(1, len(index_weights)+1)).T
raw_optimal_single_rebalance_etf_weights = get_optimal_weights(covariance_returns.values, median_index_weights.iloc[-1])
optimal_single_rebalance_etf_weights = pd.DataFrame(
np.tile(raw_optimal_single_rebalance_etf_weights, (len(returns.index), 1)),
returns.index,
returns.columns)
###Output
_____no_output_____
###Markdown
With our ETF weights built, let's compare it to the index. Run the next cell to calculate the ETF returns and compare it to the index returns.
###Code
optim_etf_returns = generate_weighted_returns(returns, optimal_single_rebalance_etf_weights)
optim_etf_cumulative_returns = calculate_cumulative_returns(optim_etf_returns)
project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, optim_etf_cumulative_returns, 'Optimized ETF vs Index')
optim_etf_tracking_error = tracking_error(index_weighted_cumulative_returns, optim_etf_cumulative_returns)
project_helper.plot_tracking_error(optim_etf_tracking_error, 'Optimized ETF Tracking Error')
###Output
_____no_output_____
###Markdown
Rebalance Portfolio Over TimeThe single optimized ETF portfolio used the same weights for the entire history. This might not be the optimal weights for the entire period. Let's rebalance the portfolio over the same period instead of using the same weights. Implement `rebalance_portfolio` to rebalance a portfolio.Reblance the portfolio every n number of days, which is given as `shift_size`. When rebalancing, you should look back a certain number of days of data in the past, denoted as `chunk_size`. Using this data, compute the optoimal weights using `get_optimal_weights` and `get_covariance_returns`.
###Code
def rebalance_portfolio(returns, median_index_weights, shift_size, chunk_size):
"""
Get weights for each rebalancing of the portfolio.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
median_index_weights : DataFrame
Median index weight for each ticker and date
shift_size : int
The number of days between each rebalance
chunk_size : int
The number of days to look in the past for rebalancing
Returns
-------
all_rebalance_weights : list of Ndarrays
The ETF weights for each point they are rebalanced
"""
assert returns.index.equals(median_index_weights.index)
assert returns.columns.equals(median_index_weights.columns)
assert shift_size > 0
assert chunk_size >= 0
#TODO: Implement function
return None
project_tests.test_rebalance_portfolio(rebalance_portfolio)
###Output
_____no_output_____
###Markdown
Run the following cell to rebalance the portfolio using `rebalance_portfolio`.
###Code
chunk_size = 250
shift_size = 5
all_rebalance_weights = rebalance_portfolio(returns, median_index_weights, shift_size, chunk_size)
###Output
_____no_output_____
###Markdown
Portfolio TurnoverWith the portfolio rebalanced, we need to use a metric to measure the cost of rebalancing the portfolio. Implement `get_portfolio_turnover` to calculate the annual portfolio turnover. You can calculate this by multiplying the average turnover by the number of rebalances in a year.
###Code
def get_portfolio_turnover(all_rebalance_weights, shift_size, rebalance_count, n_trading_days_in_year=252):
"""
Calculage portfolio turnover.
Parameters
----------
all_rebalance_weights : list of Ndarrays
The ETF weights for each point they are rebalanced
shift_size : int
The number of days between each rebalance
rebalance_count : int
Number of times the portfolio was rebalanced
n_trading_days_in_year: int
Number of trading days in a year
Returns
-------
portfolio_turnover : float
The portfolio turnover
"""
assert shift_size > 0
assert rebalance_count > 0
#TODO: Implement function
return None
project_tests.test_get_portfolio_turnover(get_portfolio_turnover)
###Output
_____no_output_____
###Markdown
Run the following cell to get the portfolio turnover from `get_portfolio turnover`.
###Code
print(get_portfolio_turnover(all_rebalance_weights, shift_size, returns.shape[1]))
###Output
_____no_output_____ |
SupportingFigure.ipynb | ###Markdown
Constants
###Code
F = 96485 # C / mol
ne_H2 = 2 # mol e / mol H
m_H2 = 2 # g/mol
g_to_kg = 1000
G0 = 237000 # J / mol
H0 = 285000 # J / mol
S0 = 163 # J / mol-K
V_to_eff = 26.8 # conversion factor for hydrogen
### Model assumptions = = = =
# System function
replace_rate = 7
MINCELLJ = 1 #A/cm^2
#Operating
ELECTRIC = 0.01# $/kWh
CAPACITY = 0.4
#Durability
voltage_drift = 1E-6 # V/h
stack_replacement = 0.15 # fraction
# Finances
IRR = 0.10 # fractional rate
INFLATION = 0.019 # fractional rate
###Output
_____no_output_____
###Markdown
Cell and Stack Design
###Code
## Balance of plant
# Reference: https://www.nrel.gov/hydrogen/assets/docs/current-central-pem-electrolysis-v3-2018.xlsm
## Stack model
hydrogen_prod = 50000/24 #kg/h ... 50 tonnes per day divided by 24 hours
max_year = 20
###Output
_____no_output_____
###Markdown
Cash flow set up
###Code
def system_model(stack_cost, min_cell_voltage, cell_J=MINCELLJ, replace_rate=replace_rate):
#Capital costs
#====Secondary input====
bop_electrical = 5 #kWh/kg
elec_bop_cost = 82 #$/kW
mech_bop_cost = 76 #$/(kg/day)
cell_prod = cell_J / (ne_H2*F) * (3600 / g_to_kg * m_H2) #kg/h/cm^2
total_cell_area = hydrogen_prod/cell_prod # cm^2
maximum_power = (min_cell_voltage + voltage_drift*8760*stack_replacement) * cell_J * total_cell_area / 1000 # kW
#====Model inputs
system_cost = stack_cost*maximum_power + elec_bop_cost*maximum_power + mech_bop_cost*hydrogen_prod*24
#====
return system_cost
###Output
_____no_output_____
###Markdown
Calculate LCOH
###Code
def NPV_calc(h2_price, system_cost, stack_cost, min_cell_voltage, show_results=False):
#Scheduled cash flow
CashFlow = pd.DataFrame({'Years':[],'Stack Life':[],'Inflation Year':[],
'Stack Eff':[], 'Hydrogen Sales':[],'Replacement Costs':[],'Operating Costs':[],
'Net Cash Flow':[],'Discounted Flow':[]})
## Initialize variables
year = [1]
stacklife = [0]
inflationyear = [1]
stackeff = [0]
hydrogensales = [0]
replacement_costs = [0]
operating_costs = [0]
net_cash = [-system_cost]
discounted_cash = [net_cash[-1]/(1+IRR)**year[-1]]
for x in range(2,max_year+2):
year.append(x)
inflationyear.append((1+INFLATION)**x)
#Hydrogen sales
hydrogensales.append(h2_price*hydrogen_prod*8760*CAPACITY) # [$/kg] * [kg/h] * [h/year] * percent
hydrogensales[-1] = hydrogensales[-1]*inflationyear[-1]
#Stacklife counting
if year[-1] == (max_year+1): # Don't replace stack in the last year of life
stacklife.append(stacklife[-1]+1)
elif stacklife[-1] < replace_rate:
stacklife.append(stacklife[-1]+1)
else:
stacklife.append(1)
#Maintenance cost are 0.005% system cost per year + stack replacement
replacement_costs.append(stack_replacement*system_cost*(stacklife[-1]==replace_rate) + 0.005*system_cost)
replacement_costs[-1] = replacement_costs[-1]*inflationyear[-1]
#Stack efficiency is the average of the beginning and end of year efficiency
try:
stackeff.append(V_to_eff*(min_cell_voltage + voltage_drift*8760*CAPACITY*(stacklife[-1]+stacklife[-1]-1) / 2) )
except:
stackeff.append(V_to_eff*(MINCELLV + voltage_drift*8760*CAPACITY*(stacklife[-1]+stacklife[-1]-1) / 2) )
#Operating costs are assumed to be dominated by electricity prices
operating_costs.append(ELECTRIC*stackeff[-1]*hydrogen_prod*8760*CAPACITY) # [$/kWh] * [kWh/kg] * [kg/h] * [h/year] * percent
operating_costs[-1] = operating_costs[-1]*inflationyear[-1]
#Net cash flow = Sales - replacement - operating costs
net_cash.append(hydrogensales[-1] - operating_costs[-1] - replacement_costs[-1])
#Discounted cash flow = net cash flow / (1+IRR)^year
discounted_cash.append(net_cash[-1]/(1+IRR)**year[-1])
CashFlow['Years'] = year
CashFlow['Stack Life'] = stacklife
CashFlow['Inflation Year'] = inflationyear
CashFlow['Stack Eff'] = stackeff
CashFlow['Hydrogen Sales'] = hydrogensales
CashFlow['Replacement Costs'] = replacement_costs
CashFlow['Operating Costs'] = operating_costs
CashFlow['Net Cash Flow'] = net_cash
CashFlow['Discounted Flow'] = discounted_cash
if show_results:
return CashFlow
else:
return sum(CashFlow['Discounted Flow'])
def LCOH(stack_cost,cell_V):
# System function
system_cost = system_model(stack_cost=stack_cost, min_cell_voltage=cell_V)
low_price = 0.10
high_price = 20
low_val = NPV_calc(low_price, min_cell_voltage=cell_V,system_cost=system_cost, stack_cost=stack_cost)
high_val = NPV_calc(high_price, min_cell_voltage=cell_V,system_cost=system_cost, stack_cost=stack_cost);
slope = (high_price - low_price)/(high_val - low_val)
return high_price - high_val*slope
###Output
_____no_output_____
###Markdown
Iterate across LCOH vs model assumptionsIteratively set up model + build NPV function before running LCOH function to calculate LCOH
###Code
## Set up a mesh grid for desired variables
Es = np.linspace(1,2.5,20)
Stacks = np.logspace(1,3.7,20)
XX,YY = np.meshgrid(Es,Stacks) # syntax is columns, rows
rs, cs = np.shape(XX)
Z = np.zeros((rs,cs))
for row in range(0,len(Stacks)):
for col in range(0,len(Es)):
cell_V = XX[row,col]
stack_cost = YY[row,col]
Z[row,col] = LCOH(stack_cost=stack_cost, cell_V=cell_V)
fig, ax = plt.subplots(figsize=(2.5,2.5))
manual_lines = [0.5, 0.75, 1, 2, 3, 5]
ax.plot(1.9, 526,'*',color='salmon') # Base case
ax.text(1.05,255,'Base, \$0.01 kWh$^{-1}$',color='salmon',rotation=-15) # Base case
ax.plot(1.75, 200,'^',color='skyblue') # Expensive efficient
ax.plot(2.4, 50,'o',color='gold') # Cheap inefficient
CS = ax.contour(XX, YY, Z, manual_lines, cmap='plasma', vmin=0, vmax=10)
ax.set_yscale('log')
ax.set_xlabel('Voltage [V]')
ax.set_ylabel('Stack Cost [\$/kW]')
ax.clabel(CS, inline=True, colors='k', fmt='$ %1.2f')
plt.gcf().set_dpi(200)
###Output
_____no_output_____ |
python/vespa/docs/sphinx/source/evaluation.ipynb | ###Markdown
Evaluate query models> Define metrics and evaluate query models Example setup Connect to the application and define a query model.
###Code
from vespa.application import Vespa
from vespa.query import Query, RankProfile, OR
app = Vespa(url = "https://api.cord19.vespa.ai")
query_model = Query(
match_phase = OR(),
rank_profile = RankProfile(name="bm25", list_features=True))
###Output
_____no_output_____
###Markdown
Define some labelled data.
###Code
labelled_data = [
{
"query_id": 0,
"query": "Intrauterine virus infections and congenital heart disease",
"relevant_docs": [{"id": 0, "score": 1}, {"id": 3, "score": 1}]
},
{
"query_id": 1,
"query": "Clinical and immunologic studies in identical twins discordant for systemic lupus erythematosus",
"relevant_docs": [{"id": 1, "score": 1}, {"id": 5, "score": 1}]
}
]
###Output
_____no_output_____
###Markdown
Define metrics
###Code
from vespa.evaluation import MatchRatio, Recall, ReciprocalRank
eval_metrics = [MatchRatio(), Recall(at=10), ReciprocalRank(at=10)]
###Output
_____no_output_____
###Markdown
Evaluate in batch
###Code
evaluation = app.evaluate(
labelled_data = labelled_data,
eval_metrics = eval_metrics,
query_model = query_model,
id_field = "id",
)
evaluation
###Output
_____no_output_____
###Markdown
Evaluate specific query> You can have finer control with the `evaluate_query` method.
###Code
from pandas import concat, DataFrame
evaluation = []
for query_data in labelled_data:
query_evaluation = app.evaluate_query(
eval_metrics = eval_metrics,
query_model = query_model,
query_id = query_data["query_id"],
query = query_data["query"],
id_field = "id",
relevant_docs = query_data["relevant_docs"],
default_score = 0
)
evaluation.append(query_evaluation)
evaluation = DataFrame.from_records(evaluation)
evaluation
###Output
_____no_output_____ |
python_tutorial/Exercise_ Strings and Dictionaries.ipynb | ###Markdown
**[Python Home Page](https://www.kaggle.com/learn/python)**--- Try It YourselfYou are almost done with the course. Nice job. Fortunately, we have a couple more interesting problems for you before you go. As always, run the setup code below before working on the questions.
###Code
from learntools.core import binder; binder.bind(globals())
from learntools.python.ex6 import *
print('Setup complete.')
###Output
_____no_output_____
###Markdown
Exercises 0. Let's start with a string lightning round to warm up. What are the lengths of the strings below?For each of the five strings below, predict what `len()` would return when passed that string. Use the variable `length` to record your answer, then run the cell to check whether you were right.
###Code
a = ""
length=len(a)
q0.a.check()
b = "it's ok"
length = len(b)
q0.b.check()
c = 'it\'s ok'
length = len(c)
q0.c.check()
d = """hey"""
length = len(d)
q0.d.check()
e = '\n'
length = len(e)
q0.e.check()
###Output
_____no_output_____
###Markdown
1.There is a saying that "Data scientists spend 80% of their time cleaning data, and 20% of their time complaining about cleaning data." Let's see if you can write a function to help clean US zip code data. Given a string, it should return whether or not that string represents a valid zip code. For our purposes, a valid zip code is any string consisting of exactly 5 digits.HINT: `str` has a method that will be useful here. Use `help(str)` to review a list of string methods.
###Code
def is_valid_zip(zip_str):
"""Returns whether the input string is a valid (5 digit) zip code
"""
return len(zip_str)==5 and zip_str.isdigit()
pass
# Check your answer
q1.check()
#q1.hint()
#q1.solution()
###Output
_____no_output_____
###Markdown
2.A researcher has gathered thousands of news articles. But she wants to focus her attention on articles including a specific word. Complete the function below to help her filter her list of articles.Your function should meet the following criteria:- Do not include documents where the keyword string shows up only as a part of a larger word. For example, if she were looking for the keyword “closed”, you would not include the string “enclosed.” - She does not want you to distinguish upper case from lower case letters. So the phrase “Closed the case.” would be included when the keyword is “closed”- Do not let periods or commas affect what is matched. “It is closed.” would be included when the keyword is “closed”. But you can assume there are no other types of punctuation.
###Code
def word_search(documents, keyword):
indices = []
# Iterate through the indices (i) and elements (doc) of documents
for i, doc in enumerate(documents):
# Split the string doc into a list of words (according to whitespace)
tokens = doc.split()
# Make a transformed list where we 'normalize' each word to facilitate matching.
# Periods and commas are removed from the end of each word, and it's set to all lowercase.
normalized = [token.rstrip('.,').lower() for token in tokens]
# Is there a match? If so, update the list of matching indices.
if keyword.lower() in normalized:
indices.append(i)
return indices
pass
# Check your answer
q2.check()
#q2.hint()
#q2.solution()
###Output
_____no_output_____
###Markdown
3.Now the researcher wants to supply multiple keywords to search for. Complete the function below to help her.(You're encouraged to use the `word_search` function you just wrote when implementing this function. Reusing code in this way makes your programs more robust and readable - and it saves typing!)
###Code
def multi_word_search(documents, keywords):
keyword_to_indices = {}
for keyword in keywords:
keyword_to_indices[keyword] = word_search(documents, keyword)
return keyword_to_indices
pass
# Check your answer
q3.check()
#q3.solution()
###Output
_____no_output_____ |
Neural-networks/NNClas_Wine.ipynb | ###Markdown
A TUTORIAL ON NEURAL NETWORK CLASSIFICATION USING KERAS AND TENSORFLOWby Sebastian T. Glavind, August, 2020 IntroductionIn this tutorial, we will consider how to define, train, and predict with a simple feed-forward neural network model using keras and tensorflow. First, we will consider how to use neural networks for multi-class classification. Second, we will see how to choose the hyperparameters using random search cross-validation. Note that random search is found to be superior to grid search when the model contains many hyperparameters. The interested reader is referred to the prominent textbook of Goodfellow et al. (2016) for an introduction to neural networks, and Geron (2019) for an excellent guide on their implementation.We will consider a small data set in the tutorial, i.e. the Wine recognition data set, so that training can be performed on a standard computer, thus regularization becomes extra important! In this regard, we will consider drop-out regularization.***I. Goodfellow, Y. Bengio, and A. Courville, Deep Learning. MIT Press, 2016, http://www.deeplearningbook.org.A. Geron, Hands-on machine learning with Scikit-Learn, Keras, and TensorFlow: Concepts, tools, and techniques to build intelligent systems. O’Reilly Media, 2019.*** Prelude
###Code
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
import sklearn.preprocessing
import sklearn.model_selection
import sklearn.compose
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
import pickle
%matplotlib inline
# tensorflow 2.1
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Input, InputLayer, Dense, Flatten, Dropout, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
###Output
_____no_output_____
###Markdown
Multi-class classification with neural network Data setIn this tutorial, we will consider the Wine recognition data set, see below, which ships with `scikit-learn`;
###Code
WineData = load_wine()
print(WineData['DESCR'])
###Output
.. _wine_dataset:
Wine recognition dataset
------------------------
**Data Set Characteristics:**
:Number of Instances: 178 (50 in each of three classes)
:Number of Attributes: 13 numeric, predictive attributes and the class
:Attribute Information:
- Alcohol
- Malic acid
- Ash
- Alcalinity of ash
- Magnesium
- Total phenols
- Flavanoids
- Nonflavanoid phenols
- Proanthocyanins
- Color intensity
- Hue
- OD280/OD315 of diluted wines
- Proline
- class:
- class_0
- class_1
- class_2
:Summary Statistics:
============================= ==== ===== ======= =====
Min Max Mean SD
============================= ==== ===== ======= =====
Alcohol: 11.0 14.8 13.0 0.8
Malic Acid: 0.74 5.80 2.34 1.12
Ash: 1.36 3.23 2.36 0.27
Alcalinity of Ash: 10.6 30.0 19.5 3.3
Magnesium: 70.0 162.0 99.7 14.3
Total Phenols: 0.98 3.88 2.29 0.63
Flavanoids: 0.34 5.08 2.03 1.00
Nonflavanoid Phenols: 0.13 0.66 0.36 0.12
Proanthocyanins: 0.41 3.58 1.59 0.57
Colour Intensity: 1.3 13.0 5.1 2.3
Hue: 0.48 1.71 0.96 0.23
OD280/OD315 of diluted wines: 1.27 4.00 2.61 0.71
Proline: 278 1680 746 315
============================= ==== ===== ======= =====
:Missing Attribute Values: None
:Class Distribution: class_0 (59), class_1 (71), class_2 (48)
:Creator: R.A. Fisher
:Donor: Michael Marshall (MARSHALL%[email protected])
:Date: July, 1988
This is a copy of UCI ML Wine recognition datasets.
https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data
The data is the results of a chemical analysis of wines grown in the same
region in Italy by three different cultivators. There are thirteen different
measurements taken for different constituents found in the three types of
wine.
Original Owners:
Forina, M. et al, PARVUS -
An Extendible Package for Data Exploration, Classification and Correlation.
Institute of Pharmaceutical and Food Analysis and Technologies,
Via Brigata Salerno, 16147 Genoa, Italy.
Citation:
Lichman, M. (2013). UCI Machine Learning Repository
[https://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
School of Information and Computer Science.
.. topic:: References
(1) S. Aeberhard, D. Coomans and O. de Vel,
Comparison of Classifiers in High Dimensional Settings,
Tech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of
Mathematics and Statistics, James Cook University of North Queensland.
(Also submitted to Technometrics).
The data was used with many others for comparing various
classifiers. The classes are separable, though only RDA
has achieved 100% correct classification.
(RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data))
(All results using the leave-one-out technique)
(2) S. Aeberhard, D. Coomans and O. de Vel,
"THE CLASSIFICATION PERFORMANCE OF RDA"
Tech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of
Mathematics and Statistics, James Cook University of North Queensland.
(Also submitted to Journal of Chemometrics).
###Markdown
Collect raw data
###Code
Xraw, yraw = WineData['data'], WineData['target']
nTarget = np.array([sum(yraw==0), sum(yraw==1), sum(yraw==2)])
print('Samples in each class', nTarget)
###Output
Samples in each class [59 71 48]
###Markdown
Simple random over-sampling (Bootstrapping) to manage class-imbalance
###Code
Xboost = list()
yboost = list()
for i in range(len(nTarget)):
index_i = np.where( yraw==i )[0]
X_i_boost = Xraw[index_i,:]
y_i_boost = yraw[index_i]
if nTarget[i] < max(nTarget):
index_i_boost = sklearn.utils.resample(index_i, replace=True, n_samples=max(nTarget)-nTarget[i],
random_state=123)
X_i_boost = np.vstack([ X_i_boost, Xraw[index_i_boost,:] ])
y_i_boost = np.concatenate([ y_i_boost, yraw[index_i_boost] ])
Xboost.append(X_i_boost)
yboost.append(y_i_boost)
Xboost = np.vstack(Xboost)
yboost = np.concatenate(yboost)
###Output
_____no_output_____
###Markdown
Define train and test data
###Code
Xtrain, Xtest, ytrain, ytest = sklearn.model_selection.train_test_split(Xboost, yboost, stratify=yboost,
train_size=.8, shuffle=True,
random_state=1)
###Output
_____no_output_____
###Markdown
Scaling of inputs according to scale of training data
###Code
# Standardize features
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit(Xtrain)
sXtrain = scaler.transform(Xtrain)
sXtest = scaler.transform(Xtest)
###Output
_____no_output_____
###Markdown
NN model Model definitionIn this section, we will consider a neural network with two hidden layers of 32 units each and regularize the network training using drop-out. Other kinds of regularization are e.g. early stopping and batch normalization (commented out below), see e.g. Goodfellow et al. (2016) for a reference.***I. Goodfellow, Y. Bengio, and A. Courville, Deep Learning. MIT Press, 2016, http://www.deeplearningbook.org.***
###Code
NN_model = keras.models.Sequential()
# The Input Layer :
NN_model.add(Input(shape=(sXtrain.shape[1])))
# NN_model.add(Flatten(input_shape=[Xtrain.shape[1]]))
# NN_model.add(InputLayer(input_shape=Xtrain.shape[1]))
NN_model.add(Dropout(.2))
# NN_model.add(BatchNormalization())
# The Hidden Layers :
for layer in range(1):
NN_model.add(Dense(16, kernel_initializer='he_normal',activation='relu'))
NN_model.add(Dropout(.5))
# NN_model.add(BatchNormalization())
# The Output Layer :
NN_model.add(Dense(3, kernel_initializer='he_normal',activation='softmax'))
# Compile the network :
NN_model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
NN_model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dropout (Dropout) (None, 13) 0
_________________________________________________________________
dense (Dense) (None, 16) 224
_________________________________________________________________
dropout_1 (Dropout) (None, 16) 0
_________________________________________________________________
dense_1 (Dense) (None, 3) 51
=================================================================
Total params: 275
Trainable params: 275
Non-trainable params: 0
_________________________________________________________________
###Markdown
Model training
###Code
# checkpoint_name = 'Weights-{epoch:03d}--{val_loss:.5f}.hdf5'
# checkpoint = ModelCheckpoint(checkpoint_name, monitor='val_loss', verbose = 1, save_best_only = True, mode ='auto')
# callbacks_list = [checkpoint]
# mcp = ModelCheckpoint("NNRreg_singleOut.hd5", save_best_only=True, period=5)
# csv = CSVLogger("NNRreg_singleOut.csv")
# es = EarlyStopping(patience=10, verbose=1, restore_best_weights=True)
# lr = ReduceLROnPlateau(patience=10, verbose=1)
callbacks_list = []
tf.random.set_seed(123)
history = NN_model.fit(sXtrain, ytrain, epochs=200, batch_size=8, callbacks=callbacks_list)
# print(history.history.keys())
# "Loss"
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['accuracy'])
plt.title('Training loss and metrics')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.axis([0, 200, 0, 1.25])
plt.legend(['Cross-entropy', 'Accuracy'], loc='upper right');
###Output
_____no_output_____
###Markdown
Training error
###Code
NN_model.evaluate(sXtrain, ytrain)
print('\n Assessment of error: \n')
pred_train_prob = NN_model.predict(sXtrain)
pred_train = np.argmax(pred_train_prob, axis=1)
print(sklearn.metrics.classification_report(ytrain,pred_train))
print('Confusion:')
print(sklearn.metrics.confusion_matrix(ytrain,pred_train))
print('Predictions (prob):')
print(np.round(pred_train_prob[1:10,:],3))
###Output
170/170 [==============================] - 0s 451us/sample - loss: 0.0121 - accuracy: 1.0000
Assessment of error:
precision recall f1-score support
0 1.00 1.00 1.00 57
1 1.00 1.00 1.00 56
2 1.00 1.00 1.00 57
accuracy 1.00 170
macro avg 1.00 1.00 1.00 170
weighted avg 1.00 1.00 1.00 170
Confusion:
[[57 0 0]
[ 0 56 0]
[ 0 0 57]]
Predictions (prob):
[[0. 0. 1. ]
[0. 0.001 0.999]
[0.005 0.995 0. ]
[1. 0. 0. ]
[1. 0. 0. ]
[0. 1. 0. ]
[0.001 0.981 0.019]
[0. 1. 0. ]
[1. 0. 0. ]]
###Markdown
Test error
###Code
NN_model.evaluate(sXtest, ytest);
print('\n Assessment of error: \n')
pred_test_prob = NN_model.predict(sXtest)
pred_test = np.argmax(pred_test_prob, axis=1)
print(sklearn.metrics.classification_report(ytest,pred_test))
print('Confusion:')
print(sklearn.metrics.confusion_matrix(ytest,pred_test))
print('Predictions (prob):')
print(np.round(pred_test_prob[1:10,:],3))
###Output
43/43 [==============================] - 0s 147us/sample - loss: 0.0222 - accuracy: 0.9767
Assessment of error:
precision recall f1-score support
0 1.00 0.93 0.96 14
1 0.94 1.00 0.97 15
2 1.00 1.00 1.00 14
accuracy 0.98 43
macro avg 0.98 0.98 0.98 43
weighted avg 0.98 0.98 0.98 43
Confusion:
[[13 1 0]
[ 0 15 0]
[ 0 0 14]]
Predictions (prob):
[[0. 0. 1. ]
[0. 0.001 0.999]
[0. 1. 0. ]
[0. 1. 0. ]
[1. 0. 0. ]
[1. 0. 0. ]
[1. 0. 0. ]
[1. 0. 0. ]
[1. 0. 0. ]]
###Markdown
Hyperparameter optimizationIt gets tedious to find an "optimal" combination of the hyperparameters by hand, so in this section, we will explore how we can automate this process. Random searchIn this section, we will use the random search implementation `RandomizedSearchCV` of the `sklearn` package to select a setting for the model hyperparameters, which we will take to be the number of hidden layers and neurons.
###Code
from sklearn.pipeline import Pipeline
from sklearn.model_selection import RandomizedSearchCV
###Output
_____no_output_____
###Markdown
First, we define a model builder function for `RandomizedSearchCV`;
###Code
def model_opt(n_hidden=1, n_units=16, r_dropOut_hidden=.5, input_shape=[13]):
model = keras.models.Sequential()
# The Input Layer :
model.add(InputLayer(input_shape=input_shape))
model.add(Dropout(.2))
# NN_model.add(BatchNormalization())
# The Hidden Layers :
for layer in range(n_hidden):
model.add(Dense(n_units, kernel_initializer='he_normal',activation='relu'))
model.add(Dropout(r_dropOut_hidden))
# NN_model.add(BatchNormalization())
# The Output Layer :
model.add(Dense(3, kernel_initializer='he_normal',activation='softmax'))
# Compile the network :
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return(model)
###Output
_____no_output_____
###Markdown
Second, we wrap the model for use with `sklearn` using the `KerasClassifier` function, define the search space, and perform the optimization. Note that this is a little time consuming even for this small example, and thus the optimization is pre-run, saved and loaded below (uncomment the content of the following two cells to re-run the optimization).
###Code
# estimators = []
# estimators.append(( 'scaler', sklearn.preprocessing.StandardScaler() ))
# estimators.append(( 'mlp', keras.wrappers.scikit_learn.KerasClassifier(model_opt) ))
# pipeline = Pipeline(estimators)
# hyper_param = {'mlp__n_hidden': (1,2,3),
# 'mlp__n_units': (8, 16, 32, 64, 128),
# 'mlp__r_dropOut_hidden': (.2, .3, .4, .5)
# }
# rsCV = RandomizedSearchCV(pipeline, hyper_param, n_iter=10, cv=5, refit=True, random_state=42)
# rsCV.fit(Xtrain, ytrain, mlp__epochs=200, mlp__batch_size=8, mlp__callbacks=callbacks_list);
# print(rsCV.best_params_)
# optScaler = rsCV.best_estimator_['scaler'] # collect input scaler
# optModel = rsCV.best_estimator_['mlp'].model # collect model
# optModel.save('NNClas_optModel.h5') # creates a HDF5 file 'NNClas_optModel.h5'
# pickle.dump(optScaler, open('NNClas_scaler.pkl', 'wb'))
optScaler = pickle.load(open('NNClas_scaler.pkl', 'rb'))
optModel = keras.models.load_model('NNClas_optModel.h5')
optModel.summary()
###Output
Model: "sequential_264"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dropout_820 (Dropout) (None, 13) 0
_________________________________________________________________
dense_832 (Dense) (None, 16) 224
_________________________________________________________________
dropout_821 (Dropout) (None, 16) 0
_________________________________________________________________
dense_833 (Dense) (None, 16) 272
_________________________________________________________________
dropout_822 (Dropout) (None, 16) 0
_________________________________________________________________
dense_834 (Dense) (None, 16) 272
_________________________________________________________________
dropout_823 (Dropout) (None, 16) 0
_________________________________________________________________
dense_835 (Dense) (None, 3) 51
=================================================================
Total params: 819
Trainable params: 819
Non-trainable params: 0
_________________________________________________________________
###Markdown
Training error
###Code
sXtrain_opt = optScaler.transform(Xtrain) # same as sXtrain (scaler trained on full training set in both cases)
optModel.evaluate(sXtrain_opt, ytrain)
print('\n Assessment of error: \n')
pred_train_prob_opt = optModel.predict(sXtrain_opt)
pred_train_opt = np.argmax(pred_train_prob_opt, axis=1)
print(sklearn.metrics.classification_report(ytrain,pred_train_opt))
print('Confusion:')
print(sklearn.metrics.confusion_matrix(ytrain,pred_train_opt))
print('Predictions (prob):')
print(np.round(pred_train_prob_opt[1:10,:],3))
###Output
170/170 [==============================] - 0s 462us/sample - loss: 0.0091 - accuracy: 1.0000
Assessment of error:
precision recall f1-score support
0 1.00 1.00 1.00 57
1 1.00 1.00 1.00 56
2 1.00 1.00 1.00 57
accuracy 1.00 170
macro avg 1.00 1.00 1.00 170
weighted avg 1.00 1.00 1.00 170
Confusion:
[[57 0 0]
[ 0 56 0]
[ 0 0 57]]
Predictions (prob):
[[0. 0. 1. ]
[0. 0. 1. ]
[0. 1. 0. ]
[1. 0. 0. ]
[1. 0. 0. ]
[0. 1. 0. ]
[0.012 0.974 0.014]
[0. 1. 0. ]
[1. 0. 0. ]]
###Markdown
Test error
###Code
sXtest_opt = optScaler.transform(Xtest) # same as sXtest (scaler trained on full training set in both cases)
optModel.evaluate(sXtest_opt, ytest)
print('\n Assessment of error: \n')
pred_test_prob_opt = optModel.predict(sXtest_opt)
pred_test_opt = np.argmax(pred_test_prob_opt, axis=1)
print(sklearn.metrics.classification_report(ytest, pred_test_opt))
print('Confusion:')
print(sklearn.metrics.confusion_matrix(ytest ,pred_test_opt))
print('Predictions (prob):')
print(np.round(pred_test_prob_opt[1:10,:],3))
###Output
43/43 [==============================] - 0s 113us/sample - loss: 0.0108 - accuracy: 1.0000
Assessment of error:
precision recall f1-score support
0 1.00 1.00 1.00 14
1 1.00 1.00 1.00 15
2 1.00 1.00 1.00 14
accuracy 1.00 43
macro avg 1.00 1.00 1.00 43
weighted avg 1.00 1.00 1.00 43
Confusion:
[[14 0 0]
[ 0 15 0]
[ 0 0 14]]
Predictions (prob):
[[0. 0. 1.]
[0. 0. 1.]
[0. 1. 0.]
[0. 1. 0.]
[1. 0. 0.]
[1. 0. 0.]
[1. 0. 0.]
[1. 0. 0.]
[1. 0. 0.]]
|
Euler 024 - Lexicographic permutations.ipynb | ###Markdown
Euler Problem 24================A permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order. The lexicographic permutations of 0, 1 and 2 are: 012 021 102 120 201 210What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?
###Code
factorials = [1]
prod = 1
for k in range(1, 10):
prod *= k
factorials.append(prod)
factorials.reverse()
def base_factorial(n):
seq = []
for f in factorials:
seq.append(n//f)
n %= f
return seq
def get_perm(n):
L = list(range(10))
perm = ''
for d in base_factorial(n):
perm += str(L[d])
del L[d]
return perm
print(get_perm(10**6 - 1))
###Output
2783915460
|
notebooks/kwdlc_ner_tutorial.ipynb | ###Markdown
1.はじめに京都大学ウェブ文書リードコーパスを利用し、日本語を対象とした固有表現抽出モデルの学習・評価・予測を行います。GPU を利用する場合は **ランタイム→ランタイムのタイプ変更→GPU** をオンにしてください。 2.事前準備- Python ライブラリーのインストール- 作業ディレクトリの作成
###Code
!pip install bs4
!pip install nagisa
!pip install seqeval
!pip install flair
!mkdir data
###Output
_____no_output_____
###Markdown
3.京都大学ウェブ文書リードコーパスの前処理- GitHub よりコーパスをダウンロードする- nagisa と FLAIR 学習用にスペース区切りのデータセットに変換する- 学習/開発/評価用データセットに分割する
###Code
!git clone https://github.com/ku-nlp/KWDLC
import os
import glob
import random
import bs4
import nagisa
def load_kwdlc(dir_path):
files = glob.glob(dir_path+"/*/*", recursive=True)
data = []
words = []
postgas = []
position2ne = {}
for fn in files:
with open(fn, "r") as f:
for line in f:
line = line.strip()
first_char = line[0]
if first_char == "+":
soup = bs4.BeautifulSoup(line, "html.parser")
num_tags = len(soup.contents)
for i in range(num_tags):
if str(type(soup.contents[i])) == "<class 'bs4.element.Tag'>":
ne_tag_tokens = str(soup.contents[i]).split(":")
is_ne = ne_tag_tokens[0][1:]
if is_ne == "ne":
netype = ne_tag_tokens[1]
target = ne_tag_tokens[2].split(">")[0]
position2ne[len(words)] = [target, netype]
elif first_char == "#" or first_char == "*":
None
elif line == "EOS":
# process
if len(position2ne) > 0:
positions = position2ne.keys()
for position in positions:
target = position2ne[position][0]
netype = position2ne[position][1]
data.append([words, postgas, position2ne])
# reset
words = []
postgas = []
position2ne = {}
else:
tokens = line.split()
surface = tokens[0]
words.append(surface)
postag = "_".join(tokens[3:4])
postgas.append(postag)
return data, position2ne
def write_kwdlc_as_single_file(filename, data, position2ne):
with open(filename, "w") as f:
for line in data:
words, postgas, position2ne = line
nes = [v[0] for k, v in sorted(position2ne.items(), key=lambda x:x[0])]
nes = list(reversed(nes))
tags = [v[1] for k, v in sorted(position2ne.items(), key=lambda x:x[0])]
tags = list(reversed(tags))
if len(nes) == 0:
None
else:
ne_tags = []
ne = nes.pop()
tag = tags.pop()
ne_target_char = ne[0]
partical = []
for word in words:
first_char = word[0]
if first_char == ne_target_char:
if word in ne:
partical.append(word)
if "".join(partical) == ne:
for i, word in enumerate(partical):
if i == 0:
ne_tags.append("B-"+tag)
elif i == len(partical) - 1:
ne_tags.append("E-"+tag)
else:
ne_tags.append("M-"+tag)
if len(nes) > 0:
ne = nes.pop()
tag = tags.pop()
ne_target_char = ne[0]
partical = []
else:
ne_target_char = ne[len("".join(partical))]
else:
partical = []
ne_tags.append("O")
else:
partical = []
ne_tags.append("O")
for word, postag, ne_tag in zip(words, postgas, ne_tags):
f.write(" ".join([word, ne_tag])+"\n")
f.write("\n")
def write_file(filename, X, Y):
with open(filename, "w") as f:
for x, y in zip(X, Y):
for word, tag in zip(x, y):
f.write(" ".join([word, tag])+"\n")
f.write("\n")
# load KNP files
dir_path = "KWDLC"
dir_path = os.path.join(dir_path, "knp")
data, position2ne = load_kwdlc(dir_path)
# write a file
fn_out = "data/kwdlc.txt"
write_kwdlc_as_single_file(fn_out, data, position2ne)
# divide kwdlc.txt into trainset, devset, testset
random.seed(1234)
fn_in = "data/kwdlc.txt"
fn_out_train = "data/kwdlc.train"
fn_out_dev = "data/kwdlc.dev"
fn_out_test = "data/kwdlc.test"
train_data = 0.9
dev_data = 0.05
test_data = 0.05
X, Y = nagisa.utils.load_file(fn_in, delimiter=' ', newline='')
indice = [i for i in range(len(X))]
random.shuffle(indice)
num_train = int(train_data * len(indice))
num_dev = int(dev_data * len(indice))
num_test = int(test_data * len(indice))
train_X = [X[i] for i in indice[:num_train]]
train_Y = [Y[i] for i in indice[:num_train]]
write_file(fn_out_train, train_X, train_Y)
dev_X = [X[i] for i in indice[num_train:num_train+num_dev]]
dev_Y = [Y[i] for i in indice[num_train:num_train+num_dev]]
write_file(fn_out_dev, dev_X, dev_Y)
test_X = [X[i] for i in indice[num_train+num_dev:num_train+num_dev+num_test]]
test_Y = [Y[i] for i in indice[num_train+num_dev:num_train+num_dev+num_test]]
write_file(fn_out_test, test_X, test_Y)
###Output
_____no_output_____
###Markdown
4.固有表現抽出モデルの学習 (nagisa)
###Code
nagisa.fit(
train_file="data/kwdlc.train",
dev_file="data/kwdlc.dev",
test_file="data/kwdlc.test",
model_name="data/kwdlc_ner_model",
delimiter=' ', # delimiter="\t"
newline='', # newline='EOS'
)
###Output
_____no_output_____
###Markdown
5.固有表現抽出モデルの評価 (nagisa)
###Code
from seqeval.metrics import classification_report
ner_tagger = nagisa.Tagger(
vocabs='data/kwdlc_ner_model.vocabs',
params='data/kwdlc_ner_model.params',
hp='data/kwdlc_ner_model.hp'
)
fn_in_test = "data/kwdlc.test"
test_X, test_Y = nagisa.utils.load_file(fn_in_test, delimiter=' ', newline='')
true_Y = []
pred_Y = []
for x, true_y in zip(test_X, test_Y):
pred_y = ner_tagger.decode(x)
true_Y += true_y
pred_Y += pred_y
report = classification_report(true_Y, pred_Y)
print(report)
###Output
precision recall f1-score support
artifact 0.35 0.37 0.36 46
date 0.82 0.91 0.86 86
time 0.62 0.50 0.56 10
location 0.70 0.75 0.73 132
organization 0.47 0.46 0.47 54
person 0.49 0.60 0.54 58
optional 0.20 0.13 0.16 15
money 0.38 1.00 0.55 3
percent 0.67 0.67 0.67 3
micro avg 0.61 0.65 0.63 407
macro avg 0.60 0.65 0.63 407
###Markdown
6.固有表現抽出モデルの予測 (nagisa)
###Code
ner_tagger = nagisa.Tagger(
vocabs="data/kwdlc_ner_model.vocabs",
params="data/kwdlc_ner_model.params",
hp="data/kwdlc_ner_model.hp"
)
text = "FacebookのAIラボ所長でもあるヤン・ルカン博士"
tokens = ner_tagger.tagging(text)
print(tokens)
###Output
Facebook/O の/O AI/O ラボ/E-person 所長/O で/O も/O ある/O ヤン/B-person ・/M-person ルカン/E-person 博士/O
###Markdown
7.固有表現抽出モデルの学習 (FLAIR)
###Code
from flair.data import Sentence
from flair.models import SequenceTagger
from flair.trainers import ModelTrainer
from flair.datasets import ColumnCorpus
from flair.embeddings import FlairEmbeddings
from flair.embeddings import StackedEmbeddings
# preprocess
columns = {0: 'text', 1: 'ner'}
data_folder = '.'
corpus = ColumnCorpus(
data_folder,
columns,
train_file='data/kwdlc.train',
dev_file="data/kwdlc.dev",
test_file="data/kwdlc.test"
)
tag_type = 'ner'
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# construct a flair model
embedding_types = [
FlairEmbeddings('ja-forward'),
FlairEmbeddings('ja-backward'),
]
embeddings = StackedEmbeddings(embeddings=embedding_types)
tagger = SequenceTagger(
hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type,
use_crf=True
)
# start training
trainer = ModelTrainer(tagger, corpus)
trainer.train(
'resources/taggers/example-ner',
learning_rate=0.1,
mini_batch_size=32,
max_epochs=10
)
###Output
_____no_output_____
###Markdown
8.固有表現抽出モデルの評価 (FLAIR)
###Code
from flair.data import Sentence
from flair.models import SequenceTagger
from seqeval.metrics import classification_report
model = SequenceTagger.load('resources/taggers/example-ner/final-model.pt')
fn_in_test = "data/kwdlc.test"
test_X, test_Y = nagisa.utils.load_file(fn_in_test, delimiter=' ', newline="")
true_Y = []
pred_Y = []
for x, true_y in zip(test_X, test_Y):
text = " ".join(x)
sentence = Sentence(text)
model.predict(sentence)
tagged_text = sentence.to_tagged_string()
tokens = tagged_text.split()
words = []
tags = []
for token in tokens:
first_char = token[0]
last_char = token[-1]
if first_char == "<" and last_char == ">":
tag = token[1:-1]
tags[-1] = tag
else:
words.append(token)
tags.append("O")
pred_y = tags
true_Y += true_y
pred_Y += pred_y
report = classification_report(true_Y, pred_Y)
print(report)
###Output
2019-11-07 00:30:16,051 loading file resources/taggers/example-ner/final-model.pt
precision recall f1-score support
artifact 0.64 0.61 0.62 46
date 0.87 0.94 0.91 86
time 0.44 0.70 0.54 10
location 0.89 0.87 0.88 132
organization 0.71 0.59 0.65 54
person 0.85 0.79 0.82 58
optional 0.36 0.27 0.31 15
money 0.75 1.00 0.86 3
percent 0.25 0.33 0.29 3
micro avg 0.79 0.78 0.79 407
macro avg 0.79 0.78 0.78 407
###Markdown
9.固有表現抽出モデルの予測 (FLAIR)
###Code
model = SequenceTagger.load('resources/taggers/example-ner/final-model.pt')
text = "Facebook の AI ラボ 所長 でも ある ヤン ・ ルカン 博士"
sentence = Sentence(text)
model.predict(sentence)
print(sentence.to_tagged_string())
###Output
2019-11-07 00:30:56,475 loading file resources/taggers/example-ner/final-model.pt
Facebook の AI ラボ 所長 でも ある ヤン <B-person> ・ <M-person> ルカン <E-person> 博士
|
Programmeerelementen/Structuren/0200_Herhalingsstructuur.ipynb | ###Markdown
HERHALINGSSTRUCTUUR In deze notebook maak je kennis met de herhalingsstructuur. Soms doorloop je een list of een string. Soms wil je eenzelfde instructie een bepaald aantal keren herhalen. Om te vermijden dat je eenzelfde instructie of algoritme meerdere keren moet ingeven, maak je gebruik van een lus.Zowel met een for-lus als een while-lus kan je herhaling brengen in je script. 1. For-lus De for-lus wordt gebruikt voor een *begrensde herhaling*: een instructie wordt een bepaald aantal keren uitgevoerd. Het aantal keren dat de instructie wordt uitgevoerd wordt, expliciet vermeld of is begrensd door bv. het aantal elementen in een lijst. Voorbeeld 1.1 In het volgende script staan twee lijsten. Een lijst `wiskundigen`, waarin de vrouwen zijn vergeten, en een lijst `wiskundige_vrouwen` met vrouwelijke wiskundigen. Het script zal al de wiskundige vrouwen automatisch toevoegen aan de lijst wiskundigen. De lus wordt begrensd door het aantal elementen in de lijst `wiskundige_vrouwen`.Voer het Python-script uit. Begrijp je het?
###Code
# begrensde herhaling
wiskundigen = ["Fermat", "Gauss", "Euler", "Fibonacci", "Galois"]
wiskundige_vrouwen = ["Noether", "Nightingale", "Lovelace"]
for vrouw in wiskundige_vrouwen:
wiskundigen.append(vrouw)
print(wiskundigen)
###Output
_____no_output_____
###Markdown
Oefening 1.1- Wat zou de uitvoer zijn, als er had gestaan: `for man in wiskundige_vrouwen:`? Antwoord: - Pas de code aan zodat de uitvoer `['Noether', 'Nightingale', 'Lovelace', 'Fermat', 'Gauss', 'Euler', 'Fibonacci', 'Galois']` is. Je hebt vast al opgemerkt dat Python geen `;` of `{}` gebruikt. Python gebruikt wel `:`. Heel specifiek is het gebruik van *indentatie*. Op bepaalde plaatsen wordt er vier spaties ingesprongen. Zo wordt de code in de for-lus 'gegroepeerd'. De variabele `vrouw` verwijst naar een element in de lijst `wiskundigen`; deze variabele is niet op voorhand gedeclareerd. De instructies van een lus die moeten worden uitgevoerd, volgen na : en worden gegroepeerd door in te springen. Men noemt dit indentatie. Waar de inspringing stopt, is de lus een eerste keer doorlopen. Voorbeeld 1.2 Je kan ook een *begrensde herhaling* gebruiken waarbij je het aantal keren dat de instructie wordt uitgevoerd, expliciet vermeldt. In de volgende code-cel wordt een for-lus gebruikt die 20 keer wordt uitgevoerd. Voer de volgende code uit.
###Code
# begrensde herhaling
getal = 10
print("getal bij de start:", getal)
for i in range(20): # lus wordt 20 keer doorlopen; i neemt waarden van 0 t.e.m. 19 aan
print("i:", i)
getal = getal + i # verkorte instructie: getal += i
print("\tgetal:", getal) # \t doet inspringen
###Output
_____no_output_____
###Markdown
`range(20)` genereert de opeenvolgende natuurlijke getallen van 0 t.e.m. 19. Voorbeeld 1.3 Voer ook de volgende code uit.
###Code
# begrensde herhaling
even = [0, 2, 4, 6, 8]
# deze lijst aanvullen met volgende zes even getallen
for i in range(6): # lus wordt 6 keer doorlopen; i neemt waarden 0, 1, 2, 3, 4 en 5 aan
print("i:", i)
getal = 8 + 2 * (i+1)
print("\tgetal:", getal)
even.append(getal)
print("\tde lijst even is nu:", even)
###Output
_____no_output_____
###Markdown
Oefening 1.2Schrijf een script dat 7 veelvouden van 56 afdrukt. Het script moet daarvoor starten met de gegeven lijst en moet deze lijst aanvullen.
###Code
lijst = [56, 112]
# aangevuld script
###Output
_____no_output_____
###Markdown
2. While-lus Naast de for-lus kan je ook een while-lus als herhalingsstructuur gebruiken. Bij de while-lus wordt het aantal keren dat de lus wordt doorlopen, bepaald door een voorwaarde. Je werkt in dat geval met een *voorwaardelijke herhaling*.
###Code
# voorwaardelijke herhaling
lijst = ["A", "B", "C", "D", "E", "F", "G", "H", "I"]
getal = 0
# de eerste 6 letters van de lijst afdrukken
while getal < 6:
print("getal =", getal, "\nletter =" , lijst[getal]) # \n doet overgaan naar nieuwe lijn
getal +=1
###Output
_____no_output_____ |
cnn/results_analysis.ipynb | ###Markdown
Reading metrics data
###Code
raw_data = pd.read_csv("results/metrics.csv")
raw_data = raw_data.drop([i for i in range(12)])
raw_data = raw_data.drop([15])
raw_data
class Metrics:
def __init__(self, df):
self.info = df.describe()
self.avg_accuracy = self.info.accuracy[1]
self.std_accuracy = self.info.accuracy[2]
self.avg_balanced_accuracy = self.info.balanced_accuracy[1]
self.std_balanced_accuracy = self.info.balanced_accuracy[2]
self.avg_recall = self.info.recall[1]
self.std_recall = self.info.recall[2]
self.avg_precision = self.info.precision[1]
self.std_precision = self.info.precision[2]
def accuracy_info():
return self.avg_accuracy, self.std_accuracy
def baccuracy_info():
return self.avg_balanced_accuracy, self.std_balanced_accuracy
def recall_info():
return self.avg_accuracy, self.std_accuracy
def precision_info():
return self.avg_precision, self.std_precision
class OptimInfo:
def __init__(self, df, optim_name):
self.optim = optim_name
self.data = df[df.optim.str.contains(self.optim)]
self.metrics = Metrics(self.data)
def avg_metrics():
return self.metrics.accuracy_info(), self.metrics.baccuracy_info(), self.metrics.recall_info(), self.metrics.precision_info()
def accuracy_info():
return self.metrics.accuracy()
class ModelTestsInfos:
def __init__(self, df, model_name):
self.model = model_name
self.data = df[df.model.str.contains(self.model)]
self.sgd_optim = OptimInfo(self.data, "SGD")
self.adam_optim = OptimInfo(self.data, "Adam")
def get_sgd_avg_metrics():
return self.sgd.avg_metrics()
resnet50 = ModelTestsInfos(raw_data, "resnet50")
vgg16 = ModelTestsInfos(raw_data, "vgg16")
mobilenetv2 = ModelTestsInfos(raw_data, "mobilenetv2")
densenet121 = ModelTestsInfos(raw_data, "densenet121")
nets = [resnet50, vgg16, mobilenetv2, densenet121]
print('*'*20)
avg_accuracy = []
avg_baccuracy = []
avg_recall = []
avg_precision = []
name = []
std_accuracy = []
std_baccuracy = []
std_recall = []
std_precision = []
optim = ["Adam", "SGD","Adam", "SGD","Adam", "SGD","Adam", "SGD"]
for net in nets:
aux = []
print(f'CNN: {net.model}')
# name.append(net.model)
name.append(net.model)
avg_accuracy.append(net.adam_optim.metrics.avg_accuracy)
avg_accuracy.append(net.sgd_optim.metrics.avg_accuracy)
std_accuracy.append(net.adam_optim.metrics.std_accuracy)
std_accuracy.append(net.sgd_optim.metrics.std_accuracy)
avg_baccuracy.append(net.adam_optim.metrics.avg_balanced_accuracy)
avg_baccuracy.append(net.sgd_optim.metrics.avg_balanced_accuracy)
std_baccuracy.append(net.adam_optim.metrics.std_balanced_accuracy)
std_baccuracy.append(net.sgd_optim.metrics.std_balanced_accuracy)
avg_recall.append(net.adam_optim.metrics.avg_recall)
avg_recall.append(net.sgd_optim.metrics.avg_recall)
std_recall.append(net.adam_optim.metrics.std_recall)
std_recall.append(net.sgd_optim.metrics.std_recall)
avg_precision.append(net.adam_optim.metrics.avg_precision)
avg_precision.append(net.sgd_optim.metrics.avg_precision)
std_precision.append(net.adam_optim.metrics.std_precision)
std_precision.append(net.sgd_optim.metrics.std_precision)
for i, net in enumerate(nets):
print(f"CNN {net.model}")
print("-")
print(f'Accuracy Adam: {avg_accuracy[2*i]:.5} +- {std_accuracy[2*i]:2.4}')
print(f'Accuracy SGD: {avg_accuracy[2*i + 1]:.5} +- {std_accuracy[2*i + 1]:2.4} ')
print("-")
print(f'Balanced Accuracy Adam: {avg_baccuracy[2*i]:.5} +- {std_baccuracy[2*i]:2.4}')
print(f'Balanced Accuracy SGD: {avg_baccuracy[2*i + 1]:.5} +- {std_baccuracy[2*i + 1]:2.4} ')
print("-")
print(f'Precision Adam: {avg_precision[2*i] * 100:.5} +- {std_precision[2*i] * 100:2.4}')
print(f'Precision SGD: {avg_precision[2*i + 1] * 100:.5} +- {std_precision[2*i + 1] *100:2.4} ')
print("-")
print(f'Recall Adam: {avg_recall[2*i]*100:.5} +- {std_recall[2*i]*100:2.3}')
print(f'Recall SGD: {avg_recall[2*i + 1]*100:.5} +- {std_recall[2*i + 1]*100:2.3} ')
print("-")
print("*"*10, end='\n\n')
avg_recall
std_recall
avg_precision
std_precision
name
def plot_graph(avg, std, ylabel, title, ylim0, ylim1, step, path):
fig, ax = plt.subplots(figsize=(13,10))
width = 0.3
rects1 = ax.bar(np.arange(4),
[avg[0], avg[2], avg[4], avg[6]],
yerr=[std[0], std[2], std[4], std[6]],
align='center', alpha=0.5, color = 'b', ecolor='black', capsize=10, width=width)
rects2 = ax.bar(np.arange(4) + width,
[avg[1], avg[3], avg[5], avg[7]],
yerr=[std[1], std[3], std[5], std[7]],
align='center', alpha=0.5, color = 'g', ecolor='black', capsize=10, width=width)
ax.set_xticks(np.arange(4))
ax.set_yticks(np.arange(ylim0, ylim1, step=step))
ax.set_xticks(np.arange(4) + width/2)
plt.ylim([ylim0, ylim1])
plt.legend(['Adam', 'SGD'], fontsize=12, title="Otimizador")
ax.set_ylabel(ylabel, fontsize=18)
ax.set_xticklabels(name, fontsize=18)
ax.set_title(title, fontsize=20)
ax.yaxis.grid(True)
plt.savefig(path, dpi=300,bbox_inches='tight')
plot_graph(avg_accuracy,
std_accuracy,
"Acurácia",
"Acurácia Média com Desvio Padrão por Combinação",
0, 100.1,
5,
"results/average/accuracy.png")
plot_graph(avg_baccuracy, std_baccuracy,
"Acurácia Balanceada",
"Acurácia Balanceada Média com Desvio Padrão por Combinação",
0, 100.1,
5,
"results/average/balanced_accuracy.png")
plot_graph(avg_recall, std_recall,
"Revocação",
"Revocação Média com Desvio Padrão por Combinação",
0, 1.001,
0.05,
"results/average/recall.png")
plot_graph(avg_precision, std_precision,
"Precisão",
"Precisão Média com Desvio Padrão por Combinação",
0, 1.001,
0.05,
"results/average/precision.png")
###Output
_____no_output_____ |
Statistical_Machine_Learning/workshop/workshop01-notebook.ipynb | ###Markdown
COMP90051 Workshop 1 Welcome to Jupyter Notebook—an interactive environment that mixes code, visualisations and text.Jupyter Notebook supports many programming languages (called "kernels" in the Jupyter lingo). In this course, we'll mainly be using Python 3 due to its popularity in the machine learning/data science communities. Information about the kernel is diplayed in the top right of the UI. Cells Notebooks are made up cells: *markdown cells* and *code cells*. This cell is an example of a markdown cell. Markdown cells can contain text, tables, images, equations, etc. (see the Markdown guide under the _Help_ menu for more info). You can edit a markdown cell by double-clicking on it. To evaluate the cell press the button in the toolbar, or hit `+`. Try it below! --- **Edit me** --- Next are some code cells. You can evaluate them individually, using the button or by hitting `+`. Often, you'll want to run all cells in the notebook, or below a certain point. The functions for doing this are in the _Cell_ menu.
###Code
message = "Hello world!"
print(message)
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.0, 2.0, 0.01)
y = (x - 1)**2
plt.plot(x,y)
plt.ylabel("y")
plt.xlabel("x")
plt.title("Parabola")
plt.show()
###Output
_____no_output_____
###Markdown
Interrupting/restarting the kernel Code is run in the kernel process. You can interrupt the kernel by pressing the stop button in the toolbar. Try it out below.
###Code
import time
time.sleep(10)
###Output
_____no_output_____ |
locale/examples/01-filter/sampling_functions_2d.ipynb | ###Markdown
Sample Function: Perlin Noise in 2D===================================Here we use `pyvista.core.imaging.sample_function`{.interpreted-textrole="func"} to sample Perlin noise over a region to generate randomterrain.Perlin noise is atype of gradient noise often used by visual effectsartists to increase the appearance of realism in computer graphics.Source: The development of Perlin Noise has allowed computer graphics artists tobetter represent the complexity of natural phenomena in visual effectsfor the motion picture industry.
###Code
import pyvista as pv
###Output
_____no_output_____
###Markdown
Generate Perlin Noise over a StructuredGrid===========================================Feel free to change the values of `freq` to change the shape of the\"mountains\". For example, lowering the frequency will make the terrainseem more like hills rather than mountains.
###Code
freq = [0.689, 0.562, 0.683]
noise = pv.perlin_noise(1, freq, (0, 0, 0))
sampled = pv.sample_function(noise,
bounds=(-10, 10, -10, 10, -10, 10),
dim=(500, 500, 1))
###Output
_____no_output_____
###Markdown
Warp by scalar==============Here we warp by scalar to give the terrain some height based on thevalue of the Perlin noise. This is necessary to the terrain its shape.
###Code
mesh = sampled.warp_by_scalar('scalars')
mesh = mesh.extract_surface()
# clean and smooth a little to reduce Perlin noise artifacts
mesh = mesh.smooth(n_iter=100, inplace=False, relaxation_factor=1)
# This makes the "water" level look flat.
z = mesh.points[:, 2]
diff = z.max() - z.min()
# water level at 70% (change this to change the water level)
water_percent = 0.7
water_level = z.max() - water_percent*diff
mesh.points[z < water_level, 2] = water_level
###Output
_____no_output_____
###Markdown
Show the terrain as a contour plot
###Code
# make the water blue
rng = z.max() - z.min()
clim = (z.max() - rng*1.65, z.max())
pl = pv.Plotter()
pl.add_mesh(mesh, scalars=z,
cmap='gist_earth', n_colors=10, show_scalar_bar=False,
smooth_shading=True, clim=clim)
pl.show()
###Output
_____no_output_____
###Markdown
Show the terrain with custom lighting and shadows
###Code
pl = pv.Plotter(lighting=None)
pl.add_light(pv.Light((3, 1, 0.5), show_actor=True, positional=True,
cone_angle=90, intensity=1.2))
pl.add_mesh(mesh, cmap='gist_earth', show_scalar_bar=False,
smooth_shading=True, clim=clim)
pl.enable_shadows = True
pl.show()
###Output
_____no_output_____ |
visualize_nonlinear_assoc.ipynb | ###Markdown
教師信号が無い場合
###Code
np.random.seed(seed=0)
network = Network(force_self_prediction=True)
target_network = TargetNetwork()
network.load("saved")
target_network.load("saved")
dt = 0.1
lp_filter = LowPassFilter(dt, 3)
inputs = []
targets = []
outputs = []
for i in range(30):
input_values, target_values = target_network.get_training_pair()
for j in range(1000):
filtered_input_values = lp_filter.process(input_values)
inputs.append(filtered_input_values)
network.set_input_firing_rate(filtered_input_values)
network.update(dt)
targets.append(target_values)
outputs.append(network.layers[2].get_p_activation())
inputs = np.array(inputs)
targets = np.array(targets)
outputs = np.array(outputs)
inputs.shape
check_index = 1
plt.plot(outputs[:,check_index], label="output firing rate")
plt.plot(targets[:,check_index], label="target firing rate")
plt.legend()
plt.show()
plt.plot(inputs[:,2], label="input firing rate")
plt.legend()
plt.show()
data_file_path = "saved/layer1.npz"
data = np.load(data_file_path)
w_pp_bu_1 = data["w_pp_bu"]
w_pp_td_1 = data["w_pp_td"]
w_ip_1 = data["w_ip"] # (10,20)
w_pi_1 = data["w_pi"] # (20,10)
sns.heatmap(w_pp_bu_1) # 学習対象
plt.show()
sns.heatmap(w_pp_td_1) # 固定
plt.show()
sns.heatmap(w_ip_1) # 学習対象
plt.show()
sns.heatmap(w_pi_1) # 固定?
plt.show()
sns.heatmap(-w_pp_td_1) # 固定, w_ipとマイナスの関係になっているのを確認
plt.show()
data_file_path = "saved/layer0.npz"
data = np.load(data_file_path)
w_pp_bu_0 = data["w_pp_bu"]
w_pp_td_0 = data["w_pp_td"]
sns.heatmap(w_pp_bu_0) # 学習対象
plt.show()
sns.heatmap(w_pp_td_0) # 固定
plt.show()
###Output
_____no_output_____
###Markdown
教師信号を入れた場合の確認
###Code
np.random.seed(seed=0)
network = Network(force_self_prediction=True)
target_network = TargetNetwork()
network.load("saved")
target_network.load("saved")
dt = 0.1
lp_filter = LowPassFilter(dt, 3)
train_inputs = []
train_targets = []
train_outputs = []
for i in range(30):
input_values, target_values = target_network.get_training_pair()
for j in range(1000):
filtered_input_values = lp_filter.process(input_values)
train_inputs.append(filtered_input_values)
network.set_target_firing_rate(target_values)
network.set_input_firing_rate(filtered_input_values)
network.update(dt)
train_targets.append(target_values)
train_outputs.append(network.layers[2].get_p_activation())
train_inputs = np.array(train_inputs)
train_targets = np.array(train_targets)
train_outputs = np.array(train_outputs)
check_index = 1
plt.plot(train_outputs[:,check_index], label="output firing rate")
plt.plot(train_targets[:,check_index], label="target firing rate")
plt.legend()
plt.show()
###Output
_____no_output_____ |
Heart Disease Prediction using Machine Learning.ipynb | ###Markdown
Heart Disease Prediction
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import rcParams
from matplotlib.cm import rainbow
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
from sklearn.neighbors import KNeighborsClassifier
df = pd.read_csv('heart_disease_dataset.csv')
df.info()
df.describe()
###Output
_____no_output_____
###Markdown
Feature Selection
###Code
import seaborn as sns
#obtain the correlation of each feature in dataset
corrmat = df.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(20,20))
#plot heat map
sns.heatmap(df[top_corr_features].corr(),annot=True,cmap='RdYlGn')
plt.show()
df.hist()
plt.show()
sns.set_style('whitegrid')
sns.countplot(x='target',data=df,palette='RdBu_r')
plt.show()
###Output
_____no_output_____
###Markdown
Data Processing
###Code
# In data processing, the categorical values are converted to dummy variables and scale all the values before training the machine leraning models.
dataset = pd.get_dummies(df,columns = ['sex' , 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal'])
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
standardScaler = StandardScaler()
columns_to_scale = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak']
dataset[columns_to_scale] = standardScaler.fit_transform(dataset[columns_to_scale])
dataset.head()
y=dataset['target']
x=dataset.drop(['target'],axis=1)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.33, random_state = 0)
knn_scores = []
for k in range(1,25):
knn_classifier = KNeighborsClassifier(n_neighbors = k)
knn_classifier.fit(X_train, y_train)
knn_scores.append(knn_classifier.score(X_test, y_test))
plt.plot([k for k in range(1,25)],knn_scores,color='blue')
for i in range(1,25):
plt.text(i, knn_scores[i-1], (i, knn_scores[i-1]))
plt.xticks([i for i in range(1, 25)])
plt.xlabel('Number of Neighbors (K)',color='Red',weight='bold',fontsize='12')
plt.ylabel('Scores',color='Red',weight='bold',fontsize='12')
plt.title('K Neighbors Classifier scores for different K values',color='Red',weight='bold',fontsize='12')
plt.show()
plt.rcParams["font.weight"]= "bold"
plt.rcParams["axes.labelweight"] = "bold"
from sklearn.model_selection import cross_val_score
knn_classifier = KNeighborsClassifier(n_neighbors = 12)
score=cross_val_score(knn_classifier,x,y,cv=10)
score.mean()
###Output
_____no_output_____ |
w1/w1-14 Time value of money A.ipynb | ###Markdown
표면금리 10%, 6개월마다 이자를 지급하는 5년만기 국채의 액면가가 1천원이라면, 이 채권의 현재 가격은? (요구수익률은 15%로 가정) r=10%, k=2, n=5, FV=1000, YTM=15% > PV?
###Code
fv = 1000
ytm = 0.15
r = 0.1
k = 2
n = 5
###Output
_____no_output_____
###Markdown
$$\frac{ 1000 }{ (1 + \frac{0.15}{2})^{10} } + \frac{1000* \frac{0.1}{2}}{ (1 + \frac{0.15}{2})^1 }+ \frac{1000* \frac{0.1}{2}}{ (1 + \frac{0.15}{2})^{2} } + ...+ \frac{1000* \frac{0.1}{2}}{ (1 + \frac{0.15}{2})^{10} }$$
###Code
pv1 = 1000 / (1+ytm/k)**(k*n)
pv1
pv2 = 0
for i in range(1, k*n+1, 1): # range(시작, 끝, 주기) - 단, 파이썬은 끝을 포함하지 않으므로 k*n +1로 하나 더해준다
tmp = (fv*r/2) / (1+ytm/k)**i
print(i, ':', tmp)
pv2 = pv2 + tmp
pv2
pv = pv1 + pv2
pv
###Output
_____no_output_____
###Markdown
채권 가격을 구하는 함수를 작성해보세요
###Code
def bond_price(fv, ytm, r, k, n):
pv1 = fv / (1+ytm/k)**(k*n)
pv2 = 0
for i in range(1, k*n+1, 1):
tmp = (fv*r/2) / (1+ytm/k)**i
print(i, ':', tmp)
pv2 = pv2 + tmp
pv = pv1 + pv2
return(pv)
fv = 1000
ytm = 0.15
r = 0.1
k = 2
n = 5
bond_price(fv, ytm, r, k, n)
###Output
_____no_output_____ |
Supervised Machine Learning Algorithms/MLP_FINAL.ipynb | ###Markdown
Heatin system prediction
###Code
history = model.fit(X_train, Y_train,validation_split = 0.1, epochs = 30) #storing in the history variable to plot it after (loss and accuracy)
model.evaluate(X_test,Y_test)
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
X_predict = model.predict(X_test)
X_predict = X_predict.flatten()
#Y_test = Y_test.values
test = Y_test.values
test
X_predict
#rounding the prediction in order to classify better, the model give us 0.99 for example but its close to 1, this is the reason
#the model give to us only a nearly 50% accuracy
X_predict_round = np.around(X_predict, decimals=1)
test = np.around(test, decimals=1)
test
from sklearn.metrics import confusion_matrix
y_test = test * 10 #multiply to 10 to avoid the decimals (the confusion matrix get integers)
pred = X_predict_round*10 #multiply to 10 to avoid the decimals (the confusion matrix get integers)
labels = [0, 5, 10] #the 3 labels we want to find (remember thereare the normalized data * 10)
cm = confusion_matrix(y_test, pred, labels)
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
ax.set_xticklabels([''] + ['E', 'WP', 'Zon'])
ax.set_yticklabels([''] + ['E', 'WP', 'Zon'])
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
print()
from sklearn.metrics import classification_report
print(classification_report(y_test,pred))
#model.evaluate(test, y_predict)
indices = Y_test.index
house_to_pred = 0
print('real: ' + str(test[house_to_pred]))
print('predicted: ' + str(X_predict[house_to_pred]))
print('predicted rounded: ' + str(X_predict_round[house_to_pred]))
print('\nhouse no. ' + str(indices[house_to_pred]))
house_no = indices[house_to_pred]
#indice = Y_test.index[15]
df.iloc[house_no,:]
###Output
real: 0.5
predicted: 0.4334466
predicted rounded: 0.4
house no. 727072
###Markdown
People prediction
###Code
del_people = df[['delivery', 'consumption','heating_sys', 'solar_panels']] #storing only the columns that i want to use to train, (all of them except the target variable)
del_people
del_people['solar_panels'] = pd.to_numeric(del_people['solar_panels']) #converting to numeric, it was an object
del_people['heating_sys'] = pd.to_numeric(del_people['heating_sys']) #converting to numeric, it was an object
x = del_people.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df_norm = pd.DataFrame(x_scaled)
df_norm
total = df_norm.values #array converting from the normalized dataframe
df['people'] = pd.to_numeric(df['people']) #converting to numeric, it was an object
output = df['people'] #storing the target variable column
output_norm = (output-np.min(output))/(np.max(output)-np.min(output)) #target variable normalitation
output_norm.unique() #4 groups of people
X_train, X_test, Y_train, Y_test = train_test_split(total, output_norm, test_size=0.3) #splitin the 30% of the data to test
history = model.fit(X_train, Y_train,validation_split = 0.1, epochs = 20) #storing in the history variable to plot it after (loss and accuracy)
model.evaluate(X_test,Y_test)
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
X_predict = model.predict(X_test)
X_predict = X_predict.flatten()
#Y_test = Y_test.values
test = Y_test.values
test
#rounding the prediction in order to classify better, the model give us 0.99 for example but its close to 1, this is the reason
#the model give to us only a nearly 50% accuracy
X_predict_round = np.around(X_predict, decimals=1)
test = np.around(test, decimals=1)
X_predict
from sklearn.metrics import confusion_matrix
y_test = test * 10 #multiply to 10 to avoid the decimals (the confusion matrix get integers)
pred = X_predict_round*10 #multiply to 10 to avoid the decimals (the confusion matrix get integers)
labels = [0, 3.3, 5, 10] #the 3 labels we want to find (remember thereare the normalized data * 10)
cm = confusion_matrix(y_test, pred, labels)
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
ax.set_xticklabels([''] + ['1', '2', '3', '4'])
ax.set_yticklabels([''] + ['1', '2', '3', '4'])
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
print()
from sklearn.metrics import classification_report
print(classification_report(y_test,pred))
###Output
/opt/jupyterhub/anaconda/lib/python3.6/site-packages/sklearn/metrics/classification.py:1439: UndefinedMetricWarning: Recall and F-score are ill-defined and being set to 0.0 in labels with no true samples.
'recall', 'true', average, warn_for)
###Markdown
solar panels prediction
###Code
df_deli = df[(df[['delivery']] != 0.0).any(1)] #deletting the rows of 0 delivery
df_deli
del_sol = df_deli[['delivery', 'consumption']] #storing only the columns that i want to use to train, (all of them except the target variable)
del_sol
#del_sol['people'] = pd.to_numeric(del_sol['people']) #converting to numeric, it was an object
#del_sol['heating_sys'] = pd.to_numeric(del_sol['heating_sys']) #converting to numeric, it was an object
x = del_sol.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df_norm = pd.DataFrame(x_scaled)
df_norm
total = df_norm.values #array converting from the normalized dataframe
df_deli['solar_panels'] = pd.to_numeric(df_deli['solar_panels']) #converting to numeric, it was an object
output = df_deli['solar_panels'] #storing the target variable column
print(np.unique(output))
output_norm = (output-np.min(output))/(np.max(output)-np.min(output)) #target variable normalitation
output_norm.unique() #4 groups of people
X_train, X_test, Y_train, Y_test = train_test_split(total, output_norm, test_size=0.3) #splitin the 30% of the data to test
#reshaping the model because i only use 2 input neurons here
model = Sequential()
model.add(Dense(units = 10, input_dim = 2, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid')) #3 diferent groups ##sigmoid
lr= 0.1
model.compile(loss = 'mean_squared_error', optimizer = keras.optimizers.Adam(lr), metrics = ['accuracy'])
history = model.fit(X_train, Y_train,validation_split = 0.1, epochs = 5) #storing in the history variable to plot it after (loss and accuracy)
model.evaluate(X_test,Y_test)
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
X_predict = model.predict(X_test)
X_predict = X_predict.flatten()
#Y_test = Y_test.values
test = Y_test.values
#rounding the prediction in order to classify better, the model give us 0.99 for example but its close to 1, this is the reason
#the model give to us only a nearly 50% accuracy
X_predict_round = np.around(X_predict, decimals=1)
test = np.around(test, decimals=1)
from sklearn.metrics import confusion_matrix
y_test = test * 10 #multiply to 10 to avoid the decimals (the confusion matrix get integers)
pred = X_predict_round*10 #multiply to 10 to avoid the decimals (the confusion matrix get integers)
labels = [8,9,10,11,12,13,14,15,17] #the 3 labels we want to find (remember thereare the normalized data * 10)
cm = confusion_matrix(y_test, pred, labels)
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
ax.set_xticklabels([''] + ['8','9','10','11','12','13','14','15','17'])
ax.set_yticklabels([''] + ['8','9','10','11','12','13','14','15','17'])
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
print()
from sklearn.metrics import classification_report
print(classification_report(y_test,pred))
###Output
precision recall f1-score support
0.0 0.00 0.00 0.00 4340
1.0 0.00 0.00 0.00 26323
2.0 0.00 0.00 0.00 17402
3.0 0.00 0.00 0.00 26159
4.0 0.11 0.84 0.19 17586
5.0 0.00 0.00 0.00 0
6.0 0.25 0.07 0.11 19636
7.0 0.21 0.01 0.01 30338
8.0 0.03 0.00 0.00 11349
9.0 0.00 0.00 0.00 0
10.0 0.00 0.00 0.00 6296
accuracy 0.10 159429
macro avg 0.05 0.08 0.03 159429
weighted avg 0.08 0.10 0.04 159429
|
notebooks/certification_deepz.ipynb | ###Markdown
Certification of Robustness using Zonotopes with DeepZ In this notebook we will demonstrate the usage of certification using zonotopes within ART. With deterministic certification methods such as DeepZ we can have a guarantee if a datapoint could have its class changed under a given bound. This method was originally proposed in: https://papers.nips.cc/paper/2018/file/f2f446980d8e971ef3da97af089481c3-Paper.pdfThe zonotopes abstraction used here is defined by:\begin{equation} \hat{x} = \eta_0 + \sum_{i=1}^{i=N} \eta_i \epsilon_i \end{equation}where $\eta_0$ is the central vector, $\epsilon_i$ are noise symbols, $\eta_i$ are coefficients representing deviations around $\eta_0$.We can illustrate a 2D toy example of this below in which the initial datapoint has two features, with a central vector of [0.25, 0.25] and these features both have noise terms of [0.25, 0.25]. We push this zonotope through the neural network and show it's intermediate shapes: ![zonotope_picture.png.png](attachment:zonotope_picture.png.png) We can see that the zonotope changes shape as it is passed through the neural network. When passing though a ReLU it gains another term (going from 2 sets of parallel lines to 3). We can then check if the final zonotope crosses any desicion boundaries and say if a point is certified.Let's see how to use this method in ART!
###Code
import torch
import torch.optim as optim
import numpy as np
from torch import nn
from sklearn.utils import shuffle
from art.estimators.certification import deep_z
from art.utils import load_mnist, preprocess, to_categorical
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# We make an example pytorch classifier
class MNISTModel(nn.Module):
def __init__(self):
super(MNISTModel, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1,
out_channels=32,
kernel_size=(4, 4),
stride=(2, 2),
dilation=(1, 1),
padding=(0, 0))
self.conv2 = nn.Conv2d(in_channels=32,
out_channels=32,
kernel_size=(4, 4),
stride=(2, 2),
dilation=(1, 1),
padding=(0, 0))
self.fc1 = nn.Linear(in_features=800,
out_features=10)
self.relu = nn.ReLU()
def forward(self, x):
if isinstance(x, np.ndarray):
x = torch.from_numpy(x).float().to(device)
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = torch.flatten(x, 1)
x = self.fc1(x)
return x
model = MNISTModel()
opt = optim.Adam(model.parameters(), lr=1e-4)
criterion = nn.CrossEntropyLoss()
(x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()
x_test = np.squeeze(x_test)
x_test = np.expand_dims(x_test, axis=1)
y_test = np.argmax(y_test, axis=1)
x_train = np.squeeze(x_train)
x_train = np.expand_dims(x_train, axis=1)
y_train = np.argmax(y_train, axis=1)
# train the model normally
def standard_train(model, opt, criterion, x, y, bsize=32, epochs=5):
num_of_batches = int(len(x) / bsize)
for epoch in range(epochs):
x, y = shuffle(x, y)
loss_list = []
for bnum in range(num_of_batches):
x_batch = np.copy(x[bnum * bsize:(bnum + 1) * bsize])
y_batch = np.copy(y[bnum * bsize:(bnum + 1) * bsize])
x_batch = torch.from_numpy(x_batch).float().to(device)
y_batch = torch.from_numpy(y_batch).type(torch.LongTensor).to(device)
# zero the parameter gradients
opt.zero_grad()
outputs = model(x_batch)
loss = criterion(outputs, y_batch)
loss_list.append(loss.data)
loss.backward()
opt.step()
print('End of epoch {} loss {}'.format(epoch, np.mean(loss_list)))
return model
model = standard_train(model=model,
opt=opt,
criterion=criterion,
x=x_train,
y=y_train)
# lets now get the predicions for the MNIST test set and see how well our model is doing.
with torch.no_grad():
test_preds = model(torch.from_numpy(x_test).float().to(device))
test_preds = np.argmax(test_preds.cpu().detach().numpy(), axis=1)
print('Test acc: ', np.mean(test_preds == y_test) * 100)
# But how robust are these predictions?
# We can now examine this neural network's certified robustness.
# We pass it into PytorchDeepZ. We will get a print out showing which
# neural network layers have been registered. There will also be a
# warning to tell us that PytorchDeepZ currently infers a reshape when
# a neural network goes from using convolutional to dense layers.
# This will cover the majority of use cases, however, if not then the
# certification layers in art.estimators.certification.deepz.deep_z.py
# can be used to directly build a certified model structure.
zonotope_model = deep_z.PytorchDeepZ(model=model,
clip_values=(0, 1),
loss=nn.CrossEntropyLoss(),
input_shape=(1, 28, 28),
nb_classes=10)
# Lets now see how robust our model is!
# First we need to define what bound we need to check.
# Here let's check for L infinity robustness with small bound of 0.05
bound = 0.05
num_certified = 0
num_correct = 0
# lets now loop over the data to check its certified robustness:
# we need to consider a single sample at a time as due to memory and compute footprints batching is not supported.
# In this demo we will look at the first 50 samples of the MNIST test data.
original_x = np.copy(x_test)
for i, (sample, pred, label) in enumerate(zip(x_test[:50], test_preds[:50], y_test[:50])):
# we make the matrix representing the allowable perturbations.
# we have 28*28 features and each one can be manipulated independently requiring a different row.
# hence a 784*784 matrix.
eps_bound = np.eye(784) * bound
# we then need to adjust the raw data with the eps bounds to take into account
# the allowable range of 0 - 1 for pixel data.
# We provide a simple function to do this preprocessing for image data.
# However if your use case is not supported then a custom pre-processor function will need to be written.
sample, eps_bound = zonotope_model.pre_process(cent=sample,
eps=eps_bound)
sample = np.expand_dims(sample, axis=0)
# We pass the data sample and the eps bound to the certifier along with the prediction that was made
# for the datapoint.
# A boolean is returned signifying if it can have its class changed under the given bound.
is_certified = zonotope_model.certify(cent=sample,
eps=eps_bound,
prediction=pred)
if pred == label:
num_correct +=1
if is_certified:
num_certified +=1
print('Classified Correct {}/{} and also certified {}/{}'.format(num_correct, i+1, num_certified, i+1))
# we can then compare this to the empirical PGD performance
from art.estimators.classification import PyTorchClassifier
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent import ProjectedGradientDescent
classifier = PyTorchClassifier(
model=model,
clip_values=(0.0, 1.0),
loss=criterion,
optimizer=opt,
input_shape=(1, 28, 28),
nb_classes=10,
)
attack = ProjectedGradientDescent(classifier, eps=0.05, eps_step=0.01, verbose=False)
x_train_adv = attack.generate(x_test[:50].astype('float32'))
y_adv_pred = classifier.predict(torch.from_numpy(x_train_adv).float().to(device))
y_adv_pred = np.argmax(y_adv_pred, axis=1)
print('Test acc: ', np.mean(y_adv_pred == y_test[:50]) * 100)
###Output
Test acc: 92.0
|
CentralFrequencyDumb.ipynb | ###Markdown
Burst Center FrequencyFinds the center frequency of a burst by integrating over time and fitting a gaussian to the spectrum
###Code
from __future__ import division
import math
import os
import sys
import time
import numpy as np
import scipy.stats
from scipy.optimize import curve_fit
from math import log10
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy import pi as nombrepi
from scipy import signal, ndimage
from tqdm import tqdm
from matplotlib import colors as mcolors
import functools
print = functools.partial(print, flush=True) # print doesn't happen til script ends so force it to flush... windows thing?
import pandas as pd
bursts = pd.read_csv('bursts.csv')
bursts
bursts.head(16)
def findcenter(burst):
burstnum = burst[0][:2].strip('0') if burst[0][:2][0] == '0' else burst[0][:2]
print('Burst #{}'.format(burstnum))
folder = 'data'
filename = burst.filename
edge = burst.edge
junk, nchan, nbin, I, Q, U, V = np.loadtxt('{}/{}'.format(folder, filename), delimiter=' ', unpack=True)
Q, U, V = None, None, None
n = len(junk)
print("Data loaded")
binmax = int(nbin[n-1])+1
frequencymax = (int(nchan[n-1])+1)
intensity = np.zeros((frequencymax, binmax))
X = np.zeros(binmax)
Y = np.zeros(frequencymax)
# what are these?
tmin = 500
tmax = 1500
#### 1. remove noise
intensitynoise1 = np.zeros(tmin-1)
intensitynoise2 = np.zeros(binmax-tmax)
for i in tqdm(range(frequencymax-50,51,-1), desc='noise removal', disable=False):
Y[i-1] = 4.15 + (i-1) * 1.5625 # ?
for j in range(1,tmin) :
intensitynoise1[j-1] = (I[j-1 + binmax*(frequencymax-i)])/(tmin-1)
for j in range(tmax+1,binmax+1) :
intensitynoise2[j-1-tmax] = (I[j-1 + binmax*(frequencymax-i)])/(binmax-tmax)
a = sum(intensitynoise1)
b = sum(intensitynoise2)
for j in range(1,binmax+1) :
X[j-1] = j-1
intensity[i-1,j-1] = I[j-1 + binmax*(frequencymax-i)] - (a+b)/2
burstwindow = intensity[:,edge:edge+frequencymax]
plt.title('Burst #{}'.format(burstnum))
freqspectrum = pd.DataFrame(burstwindow[:,:].sum(axis=1)[:, None])
data = freqspectrum[50:463][0]
x = data.keys()
xo = sum(x*data)/sum(data)
return xo # return the central frequency
bursts['center'] = bursts.head(16).apply(findcenter, axis=1)
bursts['center']
###Output
_____no_output_____ |
Others/lab-11_4_custom_dataset_4.ipynb | ###Markdown
모두를 위한 딥러닝 시즌2 pytorch *** Custom Dataset은 어떻게 쓰나요? (4)*** 이번에는 Neural Network를 만들어 보겠습니다. 이전 장에서 다뤘던 내용들을 한번 다시 되집어 볼까요? 우리는 학습시킬 Neural Network를 class를 통해서 정의합니다. class "Neural Network의 이름"(nn.Module): def __init__(self): super(Neural Network의 이름",self).__init__() ~~~~~~~~~~~~~~~~ def __forword(self,inputs): ~~~~~~~~~~~~~~~~ 위와 같은 형태로 선언 했던것 기억 나시나요? 우리는 Convolution layer를 사용하기로 했으니까 Convolution 연산에 대해서 알아봅시다. 자 빠르게 command창을 켜고(linux나 mac이라면 terminal) import torch.nn as nn을 하고 dir(nn)명령어를 입력해 볼까요? 엄청 나게 많은 것을이 나오는걸 보셨나요? dir은 괄호 안의 값에 속한 function이나 value를 보여주는 pythnon의 기본 기능입니다. 내가 사용해야 되는 function이 무슨 기능이 있는지 아주 좋은 함수죠! (모르셨다면 어마어마한 꿀팁 아닙니까 정말?) 거기 나와있는거 다 쓰시면 됩니다. CNN Architecture중 가장 간단한 LeNet-5를 만들어 볼껀데 이제 시작해볼까요?
###Code
from torch.utils.data import DataLoader
import torchvision
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
data=0
class NN(nn.Module):
def __init__(self):
super(NN,self).__init__()
self.conv1 = nn.Conv2d(3,6,5)
self.conv2 = nn.Conv2d(6,16,5)
self.pool=nn.MaxPool2d(2)
self.fc1 = nn.Linear(16*13*29,120)
self.fc2 = nn.Linear(120,2)
def forward(self,x):
x=F.relu(self.conv1(x))
x=self.pool(x)
x=F.relu(self.conv2(x))
x=self.pool(x)
x=x.view(x.shape[0],-1)
x=F.relu(self.fc1(x))
x=self.fc2(x)
return x
trans = transforms.Compose([
transforms.ToTensor()
])
train_data=torchvision.datasets.ImageFolder(root='./train_data',transform=trans)
trainloader=DataLoader(dataset=train_data,batch_size=4,shuffle=True,num_workers=4)
net = NN()
for num, data in enumerate(trainloader):
print(data[0].shape)
out = net(data[0])
print(out.shape)
break
###Output
_____no_output_____ |
Test_case.ipynb | ###Markdown
Test of the neural network for the San Pedro River Basin This Notebook runs the neural network on some test cases and compares the predictions with MODFLOW calculations in the attached data 'Well_data_examps' In Google Colab
###Code
!pip install scikit-fmm
!pip install cloudpickle==1.6.0
# Import
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from time import time
import matplotlib.pyplot as plt
import tensorflow as tf
import joblib
import scipy.ndimage
import scipy.signal
import skfmm
import h5py
from urllib.request import urlretrieve as urlretrieve
###Output
_____no_output_____
###Markdown
Here are some functions, that we use to create input data for the neural network. Loaded text files contain data on stream locations and hydraulic conductivities that are extracted from the MODFLOW model
###Code
#distance to stream
def dist():
if os.path.exists('str.txt'):
data_str = np.loadtxt('str.txt')
data_hk = np.loadtxt('hyd_kon')
if not os.path.exists('str.txt'):
url_str = 'https://github.com/MathiasBusk/HYDROsim-paper/raw/main/str.txt'
Path_str = tf.keras.utils.get_file('str.txt', url_str)
data_str = np.loadtxt(Path_str)
url_hk = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/hyd_kon?raw=true'
Path_hk = tf.keras.utils.get_file('hyd_kon', url_hk)
data_hk = np.loadtxt(Path_hk)
row = (data_str[:,1])
col = (data_str[:,2])
row = row.astype(int)
col = col.astype(int)
data_hks = data_hk[::-1]
hyk = np.flipud((data_hks))
xmax = 80000
ymax = 110000
X, Y = np.meshgrid(np.linspace(0,xmax,320), np.linspace(ymax,0,440))
phi = -1* np.ones_like(X)
phi[row,col] = 1
d = skfmm.distance(phi,dx=250)
d = -d
return d
#distance to well
def well_dist(row,col):
xmax = 80000
ymax = 110000
X, Y = np.meshgrid(np.linspace(0,xmax,320), np.linspace(ymax,0,440))
phi = -1* np.ones_like(X)
phi[row,col] = 1
d_well = skfmm.distance(phi,dx=250)
d_well = -d_well
return d_well
#travel time from to well
def travel_time(row,col,h0):
if os.path.exists('hyd_kon'):
data_hk = np.loadtxt('hyd_kon')
if not os.path.exists('hyd_kon'):
url_hk = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/hyd_kon?raw=true'
Path_hk = tf.keras.utils.get_file('hyd_kon', url_hk)
data_hk = np.loadtxt(Path_hk)
result = np.where(h0 == -999)
result = np.array(result)
data_hks = data_hk[::-1]
hyk = np.flipud((data_hks))
hyk +=0.1
hk_smooth = scipy.ndimage.filters.gaussian_filter(hyk,(6.5,6.5))
hk_smooth[result[0,:],result[1,:]] = -2
hyk = np.flipud((data_hks))
xmax = 80000
ymax = 110000
X, Y = np.meshgrid(np.linspace(0,320,320), np.linspace(440,0,440))
#X = X[::-1]
#Y = Y[::-1]
phi = np.ones_like(hyk)*hyk
phi[phi == 0] = 0
phi[phi != 0] = -1
phi[row,col] = 1
speed=np.ones_like(phi)*hk_smooth
t = skfmm.travel_time(phi, speed,dx=250)
return t
#Generates pandas dataframe with input data
def data_gen(i,j,data0):
if os.path.exists('hyd_kon'):
data_hk = np.loadtxt('hyd_kon')
d_boundary = np.load('d_boundary.npy')
row_nr = np.load('row_nr.npy')
col_nr = np.load('col_nr.npy')
if not os.path.exists('hyd_kon'):
url_hk = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/hyd_kon?raw=true'
url_bound = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/d_boundary.npy?raw=true'
url_row = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/row_nr.npy?raw=true'
url_col = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/col_nr.npy?raw=true'
Path_hk = tf.keras.utils.get_file('hyd_kon', url_hk)
Path_bound = tf.keras.utils.get_file('d_boundary.npy', url_bound)
Path_row = tf.keras.utils.get_file('row_nr.npy', url_row)
Path_col = tf.keras.utils.get_file('col_nr.npy', url_col)
data_hk = np.loadtxt(Path_hk)
d_boundary = np.load(Path_bound)
row_nr = np.load(Path_row)
col_nr = np.load(Path_col)
#d_boundary = np.load('d_boundary.npy')
#row_nr = np.load('row_nr.npy')
#col_nr = np.load('col_nr.npy')
#data_hk = np.loadtxt('hyd_kon')
data_hks = data_hk[::-1]
hyk = np.flipud((data_hks))
t = travel_time(i,j,data0)
d_well = well_dist(i,j)
d = dist()
#head_diff = np.reshape(head_differ,(320*440))
dists = np.reshape(d,(320*440))
dist_b = np.reshape(d_boundary,(320*440))
time = np.reshape(t,(320*440))
dist_well = np.reshape(d_well,(320*440))
head0 = np.reshape(data0,(320*440))
hykk = np.reshape(hyk,(320*440))
hykk_l = np.reshape(np.log10(hyk),(320*440))
row = np.reshape(row_nr,(320*440))
col = np.reshape(col_nr,(320*440))
data_set_t= pd.DataFrame(head0)
data_set_t.columns = ["head"]
#data_set_t['head']=head0
data_set_t['dist']=dists
data_set_t['time']=time
data_set_t['dist_well']=dist_well
data_set_t['h_cond']=hykk
data_set_t['h_cond_log']=hykk_l
data_set_t['row']=row
data_set_t['col']=col
data_set_t['dist_boundary']=dist_b
data_set_t = data_set_t[(data_set_t[['head']] != -999).all(axis=1)]
data_set_t = data_set_t[(data_set_t[['time']] != 0).all(axis=1)]
data_set_t = data_set_t[(data_set_t[['dist_boundary']] > 750).all(axis=1)]
return data_set_t
def data_scatter(i,j,data0,head_differ):
if os.path.exists('d_boundary.npy'):
d_boundary = np.load('d_boundary.npy')
if not os.path.exists('d_boundary.npy'):
url_bound = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/d_boundary.npy?raw=true'
Path_bound = tf.keras.utils.get_file('d_boundary.npy', url_bound)
d_boundary = np.load(Path_bound)
#d_boundary = np.load('d_boundary.npy')
t = travel_time(i,j,data0)
head_diff = np.reshape(head_differ,(320*440))
dist_b = np.reshape(d_boundary,(320*440))
time = np.reshape(t,(320*440))
head0 = np.reshape(data0,(320*440))
data_set_t= pd.DataFrame(head_diff)
data_set_t.columns = ["head_differ"]
data_set_t['head']=head0
data_set_t['time']=time
data_set_t['dist_boundary']=dist_b
data_set_t = data_set_t[(data_set_t[['head']] != -999).all(axis=1)]
data_set_t = data_set_t[(data_set_t[['time']] != 0).all(axis=1)]
data_set_t = data_set_t[(data_set_t[['dist_boundary']] > 750).all(axis=1)]
return data_set_t
###Output
_____no_output_____
###Markdown
Load data on hydraulic head changes from well simulations in MODFLOW
###Code
Path = os.path.join("Well_data_exampss")
if os.path.exists(Path):
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(Path):
for file in f:
if '.npy' in file:
files.append(os.path.join(r, file))
row_len = 440
col_len = 320
data = np.empty((len(files),row_len,col_len))
for i in range(len(files)):
dats = np.load(files[i])
data[i,:,:] = dats[3,:,:]
data0 = np.load(os.path.join("Well_data_examps/no_pump",'head_no_pump.npy'))
if not os.path.exists(Path):
url1 = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/Well_data_examps/head_137.0_182.0_rate200.npy?raw=true'
url2 = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/Well_data_examps/head_157.0_222.0_rate200.npy?raw=true'
url3 = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/Well_data_examps/head_180.0_174.0_rate200.npy?raw=true'
url4 = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/Well_data_examps/head_212.0_197.0_rate200.npy?raw=true'
url5 = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/Well_data_examps/head_255.0_221.0_rate200.npy?raw=true'
url = [url1, url2, url3, url4, url5]
row_len = 440
col_len = 320
data = np.empty((len(url),row_len,col_len))
for i in range(len(url)):
print(url[i])
Path = tf.keras.utils.get_file(url[i][73:101], url[i])
dats = np.load(Path)
data[i,:,:] = dats[3,:,:]
url0 = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/Well_data_examps/no_pump/head_no_pump.npy?raw=true'
Path = tf.keras.utils.get_file('no_pump.npy', url0)
data0 = np.load(Path)
###Output
https://github.com/MathiasBusk/HYDROsim-paper/blob/main/Well_data_examps/head_137.0_182.0_rate200.npy?raw=true
https://github.com/MathiasBusk/HYDROsim-paper/blob/main/Well_data_examps/head_157.0_222.0_rate200.npy?raw=true
https://github.com/MathiasBusk/HYDROsim-paper/blob/main/Well_data_examps/head_180.0_174.0_rate200.npy?raw=true
https://github.com/MathiasBusk/HYDROsim-paper/blob/main/Well_data_examps/head_212.0_197.0_rate200.npy?raw=true
https://github.com/MathiasBusk/HYDROsim-paper/blob/main/Well_data_examps/head_255.0_221.0_rate200.npy?raw=true
###Markdown
The well locations [row, column] for each simulation are noted below
###Code
examps =np.array([[137,182], [157,222], [180, 174], [212,197], [255,221]])
###Output
_____no_output_____
###Markdown
Load the pre-trained network
###Code
from keras.models import load_model
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
import tensorflow_probability as tfp
tfd = tfp.distributions
if os.path.exists('my_model.h5'):
model = tf.keras.models.load_model('my_model.h5',compile=False)
sc=joblib.load('std_scaler.bin')
if not os.path.exists('my_model.h5'):
url = 'https://github.com/MathiasBusk/HYDROsim-paper/blob/main/my_model.h5?raw=true'
url_scaler = 'https://github.com/MathiasBusk/HYDROsim-paper/raw/main/std_scaler.bin'
Path = tf.keras.utils.get_file('my_model.h5', url)
Path_scaler = tf.keras.utils.get_file('std_scaler.bin', url_scaler)
model = tf.keras.models.load_model(Path,compile=False)
sc = joblib.load(Path_scaler)
model.summary()
###Output
Using TensorFlow backend.
###Markdown
The 'data_select' variable determines what simulation scenario is run - check the 'examps' array for row and column number.
###Code
data_select = 0 #0,1,2,3 or 4
row = examps[data_select,0]
col = examps[data_select,1]
###Output
_____no_output_____
###Markdown
Input data for the network is generated. The input parameters are scaled with a standard scaler and the network is applied to predict hydraulic head changes from the given inputs. The outputs are mean hydraulic head change and standard deviaiton on that prediction.
###Code
data_sets = data_gen(row,col,data0[3,:,:])
data_scaled = sc.transform(data_sets.iloc[:,0:6])
t_1 = time()
y_hat = model(data_scaled)
mean = y_hat.mean()
stddev = y_hat.stddev()
t_2 = time()
print(f'Prediction time {np.round(t_2-t_1,3)} s')
###Output
Prediction time 0.041 s
###Markdown
The predictions are compared to MODFLOW values in the following subplot
###Code
data_ML = np.empty((row_len,col_len))
data_ML.fill(np.nan)
data_set = np.array(data_sets.iloc[:,:])
rows = data_set[:,-3].astype(int)
cols = data_set[:,-2].astype(int)
data_ML[rows,cols]=mean[:,0]
data_st = np.empty((row_len,col_len))
data_st.fill(np.nan)
data_st[rows,cols]=stddev[:,0]
fig, (ax3, ax1, ax2) = plt.subplots(1,3)
fig.set_figheight(10)
fig.set_figwidth(30)
c1 = ax1.imshow(data_ML, vmin=0, vmax=1, cmap='hot_r')
ax1.set(xlabel='column', ylabel='row',title='Neural network prediction')
fig.colorbar(c1,ax = ax1)
c2 = ax2.imshow(data_st,cmap='hot_r',vmin=0,vmax=.5)
ax2.set(xlabel='column',title='NN Standard deviation')
fig.colorbar(c2,ax = ax2)
c3 = ax3.imshow(data0[3,:,:]-data[data_select,:,:],cmap='hot_r',vmin=0,vmax=1)
ax3.set(xlabel='column',title='MODFLOW prediction')
fig.colorbar(c3,ax = ax3)
###Output
_____no_output_____
###Markdown
The following scatterplot compares the predictions on a 1-1 scale. x-axis shows MODFLOW head change values. y-axis shows predicted head change values from the neural network along with a 95 % confidence interval.
###Code
y_testi = data_scatter(row,col,data0[3,:,:],(data0[3,:,:]-data[data_select,:,:]))
y_testi = y_testi.iloc[:,0]
err = 1.96*stddev
plt.figure(figsize=(30,10))
plt.errorbar(y_testi,mean,yerr=err[:,0],fmt='.', color='b', label='95 % conf')
plt.plot(y_testi,mean,'ro', label='Scatter point')
plt.plot([y_testi.min(), y_testi.max()], [y_testi.min(), y_testi.max()], 'k--', lw=4)
#plt.ylim([0,0.5])
#plt.xlim([0,0.5])
plt.ylabel('Predicted')
plt.xlabel('Measured (Flopy)')
plt.title('Predicted values with 95% conf')
plt.legend(loc='lower right')
plt.show()
###Output
_____no_output_____ |
23 - Python for Finance/2_Calculating and Comparing Rates of Return in Python/5_Calculating a Security's Rate of Return in Python - Simple Returns - Part II (3:28)/Simple Returns - Part II - Lecture_CSV.ipynb | ###Markdown
Simple Rate of Return $$\frac{P_1 - P_0}{P_0} = \frac{P_1}{P_0} - 1$$
###Code
PG['simple_return'] = (PG['Adj Close'] / PG['Adj Close'].shift(1)) - 1
print PG['simple_return']
PG['simple_return'].plot(figsize=(8, 5))
plt.show()
avg_returns_d = PG['simple_return'].mean()
avg_returns_d
avg_returns_a = PG['simple_return'].mean() * 250
avg_returns_a
print str(round(avg_returns_a, 5) * 100) + ' %'
###Output
10.482 %
|
03_DataPreprocessing/04_Country/CountryMissingData.ipynb | ###Markdown
We have 536 missing data (about 15% of our data)
###Code
df['country'].value_counts().plot(kind='bar')
plt.show()
nac=df[(df['country']=='n/a') & ((df['uniAccepted']!='n/a') | (df['uniSelected']!='n/a'))][['uniAccepted','uniSelected','country']].copy()
nac.head(3)
# ['unknown','no one','\-+','not yet','not decided yet','i have not decided yet','waiting','still working on it']
###Output
_____no_output_____
###Markdown
People with GRE muslty admitted in US Universities
###Code
noCountryWithGre=df[(df['country']=='n/a')&(df['uniSelected']!='n/a')&((df['engExamQuan']!='n/a')|(df['engExamAnlt']!='n/a')|(df['engExamVrbl']!='n/a'))][['country','uniSelected',"engExamQuan","engExamAnlt","engExamVrbl"]]
noCountryWithGre.count()[1]
# for i in noCountryWithGre.index:
# print(i)
# print(df.get_value(i,'apUni'))
# a = (raw_input()).strip()
# if (a=='STOP'): break;
# df.set_value(i,'country',a)
# noCountryWithGre=df[(df['country']=='n/a')]
# noCountryWithGre.count()[0]
df.to_json('Fix1.json',date_format='utf8')
###Output
_____no_output_____ |
Final_Project_flores/Final Project.ipynb | ###Markdown
Finds minimum for function on the domain a,b
###Code
def minimum(func,a,b):
x_values=np.linspace(a,b,100000)
y_values=func(x_values)
min_num=min(y_values)
return min_num
###Output
_____no_output_____
###Markdown
this function find the max value on the domain a,b
###Code
def maximum(func,a,b):
x_values=np.linspace(a,b,100000)
y_values=func(x_values)
max_num=max(y_values)
return max_num
###Output
_____no_output_____
###Markdown
This function takes in the function we are 'integrating', the integral bounds, and the maximum number of iterations in. The function then selects a random point, and determines whether or not it is below or above the function. It then funs the area in total, then the the are under the curve.
###Code
def integration(func,a,b,min_num,max_num,i_count):
#func-function we are integrating
#a-lower bound
#b-upper bound
#max_num-max number on integral
#i_count-numb of iterations
i=0.0
points_below=0
while(i<i_count):
#getting a random point within grade
x=random.uniform(a,b)
y=random.uniform(min_num,max_num)
#if y is less than max on interval,
#then it must be under the curve(and thus in area)
#and it needs to be accounted for
if (y<func(x)):
points_below= points_below + 1
i=i+1
area_max=(max_num-min_num)*(b-a)
area_under=(points_below/i)*area_max
return area_under
###Output
_____no_output_____
###Markdown
finds error between integral made in function and system definite integral
###Code
def error(func,a,b,estim):
#func-function we are integrating
#a-lower bound
#b-upper bound
answer=integrate.quad(func,a,b)
err=np.fabs(answer-estimate)/answer
return err
###Output
_____no_output_____
###Markdown
Tests functions
###Code
a=0.0
b=1.75
max_numb=maximum(function,a,b)
min_numb=minimum(function,a,b)
iterations=100000
estimate=integration(function,a,b,min_numb,max_numb,iterations)
print(estimate)
tolerance=error(function,a,b,estimate)
print ("The tolerance for this function is the first number, with the second number is the certainty of the tolerance ", tolerance)
points=250
max_numb=maximum(function,a,b)
min_numb=minimum(function,a,b)
x=np.linspace(0.0,1.75,100)
y=function(x)
plt.plot(x,y, label="cos(x)")
plt.xlabel('x')
plt.ylabel('f(x)')
plt.xlim(0.0,1.75)
plt.ylim(min_numb,max_numb)
label="points below curve",
label="points above curve"
i=0.0
while(i<points):
#getting a random point within grade
x_coord=random.uniform(a,b)
y_coord=random.uniform(min_numb,max_numb)
if (y_coord<function(x_coord)):
plt.plot(x_coord,y_coord,marker="o",color='g')
elif(y_coord>function(x_coord)):
plt.plot(x_coord,y_coord,marker="o",color='r')
i=i+1
plt.legend()
plt.show()
###Output
_____no_output_____ |
src/jupyter-notebooks/Stationarity.ipynb | ###Markdown
Table of ContentsIntroductionMotivationAnalaysisAnalysis of stationarity of security pricesTODO IntroductionThe following notebook will look at stationarity as a phenomenon and how it relates to analysis of different systems.
###Code
import numpy as np # linear algebra
import pandas as pd # data analysis/manipulation
import random
from datetime import datetime, timedelta
import matplotlib.pyplot as plt # plotting
# set random seed so you get same results with each run
np.random.seed(1)
###Output
_____no_output_____
###Markdown
MotivationIn life we come across many different variables that evolve over time. Whilst there are many different systems and processes that can be observed, a natural way to track these would be to investigate the evolution with time- giving rise to time series data.In this notebook I aim to explore time series data (using Python code), specifically the statistical properties of a time series - in particular looking at any bias/drift inherent in the dataset. Many datasets have trends/cycles/random walks (or all three), however in order to carry out statistical analysis one must remove these trends. AnalaysisBelow, see a plot of two different time series. The first is a plot of random number See below a plot of two different time series- the **top plot** is 3000 numbers randomly selected from a normal distribution (average value 0, standard deviation of 1), whilst the **bottom plot** is the addition of these random numbers together.
###Code
dates = [datetime.today() + timedelta(days=x) for x in range(3000)]
points = pd.Series(np.random.randn(3000), index=dates)
total = points.cumsum()
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,7))
fig.subplots_adjust(hspace=0.5) # make a little extra space between the subplots
ax1.set_xlabel("Year")
ax1.set_ylabel("Change")
ax1.plot(points, color="blue", linewidth=0.5)
ax2.set_xlabel("Year")
ax2.set_ylabel("Price")
ax2.plot(total, color="red")
###Output
_____no_output_____
###Markdown
As you can see from the plots above, the *change* variable fluctuates wildly over time, whilst the *price* variable also fluctuates, but there is a clear positive drift - the price increases as time goes on. If someone asked you the average for the *change* and *price* (see above plots) for the two year periods 2020-2022 and 2025-2027, the answer for the former might be quite similar, whilst the latter would have different answers (the average for the price looks like it has increased). See two further plots below to investigate this.
###Code
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,7))
# make a little extra space between the subplots
fig.subplots_adjust(hspace=0.5)
ax1.set_xlabel("Year")
ax1.set_ylabel("Rolling (30-day) average change")
ax1.plot(points.rolling(30).mean(), color="blue", linewidth= 0.5)
ax2.set_xlabel("Year")
ax2.set_ylabel("Rolling (30-day) average price")
ax2.plot(total.rolling(30).mean(), color="red")
###Output
_____no_output_____
###Markdown
Therefore, we can see that the means evolve over time in different ways- the first plot appears to be quite random in terms of the average value for *change*, whilst the second, average *price* is path-dependent, it follows a trend.In the field of financial securities, for example, we often see the evolution of prices more similar to the **bottom plot**. Therefore, many people are motivated to study the trends of the past to find out which securities they should invest in to make a good return on their investment, and which securities should be avoided.My natural intuition when given a security time series (such as the **bottom plot**) is to look at the statistical properties of the data. Python has some nice in-built capabilities to decompose data into the Trend, Seasonality and Residual, known as **Holt's Linear Trend Method**. Below I implement this in Python.
###Code
import statsmodels.api as sm_api
from statsmodels.tsa.seasonal import seasonal_decompose
# below function from https://stackoverflow.com/questions/45184055/how-to-plot-multiple-seasonal-decompose-plots-in-one-figure
def plotseasonal(res, axes, color):
res.observed.plot(ax=axes[0], legend=False, color=color)
axes[0].set_ylabel('Observed')
res.trend.plot(ax=axes[1], legend=False, color=color)
axes[1].set_ylabel('Trend')
res.seasonal.plot(ax=axes[2], legend=False, color=color)
axes[2].set_ylabel('Seasonal')
res.resid.plot(ax=axes[3], legend=False, color=color)
axes[3].set_ylabel('Residual')
subs_change = seasonal_decompose(points, freq=365)
subs_price = seasonal_decompose(total, freq=365)
fig, axes = plt.subplots(ncols=2, nrows=4, sharex=True, figsize=(15,12))
plotseasonal(subs_change, axes[:,0], color="blue")
plotseasonal(subs_price, axes[:,1], color="red")
###Output
_____no_output_____ |
content/notebooks/2016-01-22-string-formatting.ipynb | ###Markdown
Sometimes in order to run some programming software we need to prepare an input description file which specifies the model setup (e.g., chemical mechanism, intergration method, desired type of results, etc). If we are planning to run the model several times with different, for example, initial conditions, constracting such a file using a script could be beneficial. During our last meeting we discussed how to assemble such a file with Python, and here is what we did.Let's assume that we need to construct a file containing the following information: ```{!-*- F90 -*-}INITVALUESO3 =7.50e+11;CH4 =4.55e+13;CO =2.55e+12;INLINE F90_INITTSTART = 0*3600TEND = TSTART + 7*3600DT = 3600TEMP = 298ENDINLINE``` The first line of this file is a header. Since it is not going to change in our model runs, we can store it in a separate file:
###Code
header_text = '{!-*- F90 -*-}'
with open('header.inp','w') as header_file:
header_file.write(header_text)
###Output
_____no_output_____
###Markdown
The next four lines define the INITVALUES section of the file, where the initial concentrations (actually, number densities) of chemical compounds of interest are set. If we want to change only numerical values in this section, it is logical to create a text template which would take into account the syntax of the target sortware and include some sort of 'gaps' to fill in with our initial values. One way of achieving that is to define a function that creates a multline string and has a number of arguments determining the initial concentrations of our chemical species:
###Code
def gen_concs_string(O3=7.50e+11, CH4=4.55e+13, CO=2.55e+12):
concs_string = \
"""
#INITVALUES
O3\t={O3_nd:.2e};
CH4\t={CH4_nd:.2e};
CO\t={CO_nd:.2e};"""\
.format(O3_nd=O3, CH4_nd=CH4, CO_nd=CO)
return concs_string
###Output
_____no_output_____
###Markdown
For convinience we can even set default values for each of the arguments (e.g., here default ozone initial concentration is $7.5\times 10^{11}$ molecules per $cm^{3}$).By the way, we have just used a new style of string formatting in Python! An old way of doing the same would include a '%' sign in front of the function agruments inside the string expression and a line of code starting with '%' afterwards, like this ```"""INITVALUESO3\t=%(O3_nd).2e;CH4\t=%(CH4_nd).2e;CO\t=%(CO_nd).2e;"""\%{"O3":O3_nd, "CH4":CH4_nd, "CO_nd":CO_nd}``` but using a new style makes your code more readable. For more examples on differences between old and new styles of string formatting in Python follow this link: [PyFormat](https://pyformat.info/). Well, let's reinforce our knowledge and apply a new style of string formatting to reproduce the last section of the input file, which specifies model integration time and temperature:
###Code
def gen_time_str(tstart, nhours, timestep, temp):
time_string = \
"""
#INLINE F90_INIT
TSTART = {tstart}*{timestep}
TEND = TSTART + {nhours}*3600
DT = {timestep}
TEMP = {temp}
#ENDINLINE"""\
.format(tstart=tstart, nhours=nhours, timestep=timestep, temp=temp)
return time_string
###Output
_____no_output_____
###Markdown
And finally let's assemble our input description file:
###Code
# Read header
with open('header.inp','r') as header_file:
header_text = header_file.read()
# Use default inital concentrations and set model integration time and temperature
concstr = gen_concs_string()
timestr = gen_time_str(0,7,3600,298)
# Combine all strings together
full_str = header_text + concstr + timestr
# Create function that writes a string to file
def write_str_to_file(string, filename='model.def'):
with open(filename,'w') as def_file:
def_file.write(string)
# Call this function with your string
write_str_to_file(full_str)
###Output
_____no_output_____
###Markdown
Creating a file mask There are plenty of other ways to use a new string formatting in Python to your advantage. For example, you could use it to create file names in a loop:
###Code
file_mask = '+{one}hours_{two}UTC.jpg'
for i, j in zip((1,2,3), (4,5,6)):
print(file_mask.format(one=i, two=j))
HTML(html)
###Output
_____no_output_____ |
sql_magic API.ipynb | ###Markdown
sql_magic API Load extension
###Code
%load_ext sql_magic
###Output
_____no_output_____
###Markdown
Use sql_magic with a psycopg2 connection
###Code
import pandas.io.sql as psql
import psycopg2
connect_credentials = {'database': 'postgres',
'host': 'localhost',
'password': '',
'user': 'postgres'}
# connect to postgres connection object
conn = psycopg2.connect(**connect_credentials)
conn.autocommit = True
schema_name = 'template'
psql.execute('SET search_path TO {}'.format(schema_name), conn)
%config SQL.conn_name='conn'
%%read_sql
SELECT version()
###Output
Query started at 06:46:56 PM EDT; Query executed in 0.00 m
###Markdown
Assign result to pandas dataframe and plot
###Code
%%read_sql df
SELECT generate_series(1,10,1) s
%matplotlib inline
df.plot()
###Output
_____no_output_____
###Markdown
Utilize Python variables in SQL query
###Code
lower, upper = 9,15
%%read_sql df
SELECT *
FROM (SELECT generate_series(1,20,2) s) f
WHERE s BETWEEN {lower} and {upper}
###Output
Query started at 06:46:56 PM EDT; Query executed in 0.00 m
###Markdown
Connect to Spark engine
###Code
%config SQL.conn_name='spark'
%%read_sql
SELECT 1
###Output
Query started at 06:46:56 PM EDT; Query executed in 0.04 m
###Markdown
Connect to postgres simulaneously
###Code
%%read_sql -c conn
SELECT version()
###Output
Query started at 06:46:58 PM EDT; Query executed in 0.00 m
###Markdown
Use sql_magic with a SQLAlchemy Engine
###Code
# sqllite conn
from sqlalchemy import create_engine
from sqlite3 import dbapi2 as sqlite
sqllite_engine = create_engine('sqlite+pysqlite:///test.db', module=sqlite)
%config SQL.conn_name='sqllite_engine'
%%read_sql
SELECT sqlite_version();
###Output
Query started at 06:46:59 PM EDT; Query executed in 0.00 m
###Markdown
Miscelleanous results
###Code
%%read_sql
DROP TABLE IF EXISTS example_table;
CREATE TEMP TABLE example_table
AS
SELECT 1;
%%read_sql
SELECT 1
###Output
Query started at 06:46:59 PM EDT; Query executed in 0.00 m
###Markdown
Asynchronous callsQueries can be run in async mode using the --async (or -a) flag. Displaying results from async calls are disabled by default.
###Code
%%read_sql df -a
SELECT 'long query here'
###Output
Query started at 06:46:59 PM EDT; Query executed in 0.00 m
###Markdown
Configuration FlagsNotifications and auto-display can be temporarily disabled with flags:positional arguments: table_nameoptional arguments: -h, --help show this help message and exit -n, --notify Toggle option for notifying query result -a, --async Run query in seperate thread. Please be cautious when assigning result to a variable -d, --display Toggle option for outputing query result
###Code
%%read_sql
SELECT 1
%%read_sql -d
SELECT 1
###Output
Query started at 06:46:59 PM EDT; Query executed in 0.00 m
###Markdown
DefaultsNotifications and displaying results are enabled by default, but can be turned off with %config magic
###Code
# alerts and display are automatically enabled
%config SQL
%config SQL.output_result = False
%%read_sql
SELECT 1
###Output
Query started at 06:46:59 PM EDT; Query executed in 0.00 m |
Mini-Project-3/aiddata-preprocessing-project-3-v5.ipynb | ###Markdown
Read csv data into a pandas dataframe
###Code
aid = pd.read_csv('aiddata-countries-only.csv', delimiter=',')
aid
aid.count()
aid.columns
###Output
_____no_output_____
###Markdown
Group by donor and recipient respectively
###Code
# donor = aid.groupby('donor').sum()[['donor','commitment_amount_usd_constant']]
# recipient = aid.groupby('recipient').sum()[['commitment_amount_usd_constant']]
aid['donated_amount'] = aid['commitment_amount_usd_constant'].groupby(aid['donor']).transform('sum')
aid['recieved_amount'] = aid['commitment_amount_usd_constant'].groupby(aid['recipient']).transform('sum')
# donor = aid[['donor','donor_amount']]
# recipient = aid[['recipient','recipient_amount']]
# donor.drop_duplicates()
# recipient.drop_duplicates()
# res = pd.merge(donor,recipient, left_on='donor', right_on='recipient')
# res.drop_duplicates()
###Output
_____no_output_____
###Markdown
Mini Project 2: Visualization 1
###Code
temp = aid[['year','donor','recipient','commitment_amount_usd_constant','donated_amount','recieved_amount']]
donor = temp.groupby(['year','donor'])[['commitment_amount_usd_constant']].sum()
recipient = temp.groupby(['year','recipient']).sum()[['commitment_amount_usd_constant']]
donor = donor.rename(index=str, columns={"commitment_amount_usd_constant": "donated_amount"}).drop_duplicates()
donor
temd = pd.read_csv('don.csv', delimiter=',')
temd = temd.pivot(index='year', columns='donor', values='donated_amount')
temd = temd.fillna(0)
# temd.to_csv(r'tem-don.csv')
temd
recipient = recipient.rename(index=str, columns={"commitment_amount_usd_constant": "received_amount"}).drop_duplicates()
recipient
tem = pd.read_csv('rec.csv', delimiter=',')
tem = tem.pivot(index='year', columns='recipient', values='received_amount')
tem = tem.fillna(0)
# tem.to_csv(r'tem-rec.csv')
tem
#recipient.pivot(index='year', columns='recipient', values='received_amount')
recipient.columns
###Output
_____no_output_____
###Markdown
Visualization 2 Get top 10 purposes
###Code
purp = aid[['year','commitment_amount_usd_constant','coalesced_purpose_name']]
purp = purp.drop_duplicates()
purpamt = purp.groupby(['coalesced_purpose_name']).sum()[['commitment_amount_usd_constant']]
purpamt = purpamt.sort_values(by='commitment_amount_usd_constant', ascending=False)
purpamt.reset_index()
purpamt['coalesced_purpose_name'] = purpamt.index
purpamt = purpamt.head(10)
purpamt
purposes = purpamt[['coalesced_purpose_name']]
# purposes
purplist = purposes.values.tolist()
purplist
purps = purp.groupby(['year','coalesced_purpose_name']).sum()[['commitment_amount_usd_constant']]
# purps.to_csv(r'purp.csv')
purps
toppurp = pd.read_csv('purp.csv', delimiter=',')
toppurp = toppurp.pivot(index='year', columns='coalesced_purpose_name', values='commitment_amount_usd_constant')
toppurp = toppurp.fillna(0)
toppurp = toppurp[['Air transport','Rail transport','Industrial development','RESCHEDULING AND REFINANCING','Power generation/non-renewable sources','Import support (capital goods)','Social/ welfare services','Power generation/renewable sources','Telecommunications','Mineral/Metal prospection and exploration']]
toppurp.to_csv(r'top-purposes.csv')
###Output
_____no_output_____
###Markdown
Mini-Project-2: Visualization 3
###Code
tempdata = aid[['year','donor','recipient','commitment_amount_usd_constant']]
# donordata = tempdata.groupby(['year','donor'])['commitment_amount_usd_constant'].sum()
# donordata
tempdata = tempdata.loc[tempdata['donor'] == 'New Zealand']
tempdata = tempdata.sort_values(by='year')
tempdata = tempdata.drop(columns=['donor'])
tempdata = tempdata.groupby(['year','recipient'])['commitment_amount_usd_constant'].sum()
tempdata
#resdata.to_csv(r'kiwi-donations.csv')
res = pd.read_csv('kiwi-donations.csv', delimiter=',')
res = res.pivot(index='year', columns='recipient',values='amount')
res = res.fillna(0)
# res.to_csv(r'kiwi-don.csv')
res
purpose = aid[['recipient','coalesced_purpose_name']]
purpose = purpose.sort_values(by=['recipient'])
purpose = purpose.groupby(purpose.columns.tolist()).size().reset_index().\
rename(columns={0:'count'})
purpose
temp = temp.sort_values(by=['year'])
temp
t = temp.groupby(['donor','recipient']).sum()[['commitment_amount_usd_constant']]
t
#temp.groupby(['year','recipient']).sum()[['commitment_amount_usd_constant']]
coordinates = pd.merge(purpose, coords, left_on='recipient', right_on='country')
coordinates = coordinates[['recipient','coalesced_purpose_name', 'Latitude', 'Longitude', 'count']]
coordinates
coords = pd.read_csv('cleandata.csv', delimiter=',')
coords
coordinates = pd.merge(aid, coords, left_on='recipient', right_on='country')
coordinates = coordinates[['recipient','coalesced_purpose_name', 'Latitude', 'Longitude', 'count']]
array = ['']
coordinates.pivot(index='recipient', columns='coalesced_purpose_name', values='count')
coordinates.to_csv(r'coordinates.csv')
###Output
_____no_output_____
###Markdown
Create donor and recipient dataframes
###Code
donor = aid[['donor','donated_amount']]
donor.drop_duplicates()
recipient = aid[['recipient','recieved_amount']]
recipient.drop_duplicates()
###Output
_____no_output_____
###Markdown
Merge two dataframes
###Code
res = pd.merge(donor,recipient, left_on='donor', right_on='recipient')
res.drop_duplicates()
###Output
_____no_output_____
###Markdown
Dump donor and recipient dataframes to csv
###Code
res.to_csv(r'aid.csv')
res.drop(columns='recipient').drop_duplicates()
result = res.rename(index=str, columns={"donor": "Country"}).drop_duplicates()
result.drop(columns='recipient')
result.rename(index=str, columns={"donor": "Country"}).drop(columns='recipient')
###Output
_____no_output_____
###Markdown
Convert csv to json
###Code
# result.to_json(r'res.json',orient='records')
# result = result.drop(columns='recipient').drop_duplicates()
# result contains three columns - country, donated_amount, recieved_amount
result.columns
result['Countryid'] = result.index
result.to_csv(r'aid-donor-recipient.csv')
###Output
_____no_output_____
###Markdown
Normalize the dataframe
###Code
# Create x, where x the 'scores' column's values as floats
x = result[['donated_amount']].values.astype(float)
# Create a minimum and maximum processor object
min_max_scaler = preprocessing.MinMaxScaler()
# Create an object to transform the data to fit minmax processor
x_scaled = min_max_scaler.fit_transform(x)
result['normalized_donated_amount'] = x_scaled
result['donated_amount'].mean()
# Create y, where y the 'scores' column's values as floats
y = result[['recieved_amount']].values.astype(float)
# Create a minimum and maximum processor object
min_max_scaler = preprocessing.MinMaxScaler()
# Create an object to transform the data to fit minmax processor
y_scaled = min_max_scaler.fit_transform(y)
result['normalized_recieved_amount'] = y_scaled
result
result.to_csv(r'aid-normalized.csv')
result['normalized_donated_amount']
###Output
_____no_output_____
###Markdown
Dump dataframe to resjson
###Code
resjson = result.to_json()
with open('results.json', 'w') as file:
json.dump(resjson, file)
###Output
_____no_output_____
###Markdown
Mini-Project 3: Visualization 3 Part 1: Donor-Recipient relationship with Heatmaps or Adjacency matrix Donated amount
###Code
donor = aid[['donor','recipient','commitment_amount_usd_constant']]
donor['donated_amount'] = donor['commitment_amount_usd_constant'].groupby(donor['donor']).transform('sum')
donor = donor[['donor', 'recipient', 'donated_amount']]
# Sort data in ascending order
donor = donor.sort_values(by=['donated_amount'], ascending=True)
donor.to_csv(r'donor-recipient.csv')
donor
###Output
/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
This is separate from the ipykernel package so we can avoid doing imports until
###Markdown
Recieved Amount
###Code
reciever = aid[['donor','recipient','commitment_amount_usd_constant']]
reciever['recieved_amount'] = reciever['commitment_amount_usd_constant'].groupby(reciever['recipient']).transform('sum')
reciever = reciever[['donor','recipient','recieved_amount']]
# Sort data in ascending order
reciever = reciever.sort_values(by=['recieved_amount'], ascending=True)
reciever['recieved_amount'].max()
# reciever.to_csv(r'recipient-donor.csv')
# reciever
###Output
/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
This is separate from the ipykernel package so we can avoid doing imports until
###Markdown
Part 3: Arc Diagram, Chord diagram or Edge bundling Get top 5 purposes
###Code
purp5 = aid[['year','commitment_amount_usd_constant','coalesced_purpose_name']]
purp5 = purp5.drop_duplicates()
purpamt5 = purp5.groupby(['coalesced_purpose_name']).sum()[['commitment_amount_usd_constant']]
purpamt5 = purpamt5.sort_values(by='commitment_amount_usd_constant', ascending=False)
purpamt5.reset_index()
purpamt5['coalesced_purpose_name'] = purpamt5.index
purpamt5 = purpamt5.head(5)
purpamt5
pur5 = aid[['donor','recipient','coalesced_purpose_name']]
# Select top 5 purposes
qur5 = pur5.loc[pur5['coalesced_purpose_name'].isin(['Rail transport','Air transport','Industrial development','RESCHEDULING AND REFINANCING','Power generation/non-renewable sources'])]
# Group by purposes
qur5 = qur5.groupby(['coalesced_purpose_name','donor','recipient']).count()
qur5.to_csv(r'purpose-relations.csv')
qur5
# pur5 = pur5.groupby(['donor','recipient']).count()[['coalesced_purpose_name']]
# # pur5.to_csv(r'pur5-count.csv')
# pur5
ppur5 = aid[['donor','recipient','coalesced_purpose_name']]
# Select Air transport purpose
# qqur5 = ppur5.loc[pur5['coalesced_purpose_name'].isin(['Rail transport','Air transport','Industrial development','RESCHEDULING AND REFINANCING','Power generation/non-renewable sources'])]
qqur5 = ppur5.loc[ppur5['coalesced_purpose_name'].isin(['Industrial development'])]
# Group by purposes
qqur5 = qqur5.groupby(['coalesced_purpose_name','donor','recipient']).count()
qqur5.to_csv(r'purpose-relations-industrial-development.csv')
qqur5
pu5 = pd.read_csv('purpose-relations-industrial-development.csv')
pu5.rename(columns={"donor":"source","recipient":"target","coalesced_purpose_name":"value"}, inplace=True)
#pu5=pu5.sort_values(by=['value'])
pu5
###Output
_____no_output_____
###Markdown
List without unique source values
###Code
temp_pu5 = list(pu5.apply(lambda row: {"source": row['source'], "target": row['target'], "value": row['value']}, axis=1))
temp_pu5
###Output
_____no_output_____
###Markdown
Get unique source values
###Code
unique_pu5 = pd.Index(pu5['source']
.append(pu5['target'])
.reset_index(drop=True).unique())
unique_pu5
###Output
_____no_output_____
###Markdown
Create grouped dictionary
###Code
group_dict = {}
counter = 0
for src in unique_pu5:
counter += 1
group_dict[src] = counter
group_dict
###Output
_____no_output_____
###Markdown
Create links_list
###Code
links_list = []
for link in temp_pu5:
record = {"source":unique_pu5.get_loc(link['source']),
"target": unique_pu5.get_loc(link['target']), "value":link['value']}
links_list.append(record)
links_list
###Output
_____no_output_____
###Markdown
Create Nodes list
###Code
nodes_list = []
for src in unique_pu5:
nodes_list.append({"group": group_dict.get(src), "name":src})
nodes_list
###Output
_____no_output_____
###Markdown
Create JSON file
###Code
json_prep = {"nodes":nodes_list, "links":links_list}
json_dump = json.dumps(json_prep, indent=1, sort_keys=True)
print(json_dump)
###Output
{
"links": [
{
"source": 0,
"target": 23,
"value": "Industrial development"
},
{
"source": 0,
"target": 24,
"value": "Industrial development"
},
{
"source": 0,
"target": 25,
"value": "Industrial development"
},
{
"source": 1,
"target": 26,
"value": "Industrial development"
},
{
"source": 1,
"target": 27,
"value": "Industrial development"
},
{
"source": 1,
"target": 23,
"value": "Industrial development"
},
{
"source": 1,
"target": 28,
"value": "Industrial development"
},
{
"source": 1,
"target": 29,
"value": "Industrial development"
},
{
"source": 1,
"target": 25,
"value": "Industrial development"
},
{
"source": 1,
"target": 30,
"value": "Industrial development"
},
{
"source": 2,
"target": 26,
"value": "Industrial development"
},
{
"source": 2,
"target": 31,
"value": "Industrial development"
},
{
"source": 2,
"target": 32,
"value": "Industrial development"
},
{
"source": 2,
"target": 23,
"value": "Industrial development"
},
{
"source": 2,
"target": 24,
"value": "Industrial development"
},
{
"source": 2,
"target": 25,
"value": "Industrial development"
},
{
"source": 3,
"target": 26,
"value": "Industrial development"
},
{
"source": 3,
"target": 31,
"value": "Industrial development"
},
{
"source": 3,
"target": 32,
"value": "Industrial development"
},
{
"source": 3,
"target": 4,
"value": "Industrial development"
},
{
"source": 3,
"target": 33,
"value": "Industrial development"
},
{
"source": 3,
"target": 34,
"value": "Industrial development"
},
{
"source": 3,
"target": 23,
"value": "Industrial development"
},
{
"source": 3,
"target": 35,
"value": "Industrial development"
},
{
"source": 3,
"target": 28,
"value": "Industrial development"
},
{
"source": 3,
"target": 36,
"value": "Industrial development"
},
{
"source": 3,
"target": 37,
"value": "Industrial development"
},
{
"source": 3,
"target": 24,
"value": "Industrial development"
},
{
"source": 3,
"target": 25,
"value": "Industrial development"
},
{
"source": 4,
"target": 33,
"value": "Industrial development"
},
{
"source": 4,
"target": 23,
"value": "Industrial development"
},
{
"source": 4,
"target": 38,
"value": "Industrial development"
},
{
"source": 4,
"target": 39,
"value": "Industrial development"
},
{
"source": 5,
"target": 32,
"value": "Industrial development"
},
{
"source": 5,
"target": 23,
"value": "Industrial development"
},
{
"source": 5,
"target": 24,
"value": "Industrial development"
},
{
"source": 5,
"target": 25,
"value": "Industrial development"
},
{
"source": 6,
"target": 4,
"value": "Industrial development"
},
{
"source": 6,
"target": 33,
"value": "Industrial development"
},
{
"source": 6,
"target": 34,
"value": "Industrial development"
},
{
"source": 6,
"target": 23,
"value": "Industrial development"
},
{
"source": 6,
"target": 35,
"value": "Industrial development"
},
{
"source": 6,
"target": 24,
"value": "Industrial development"
},
{
"source": 6,
"target": 25,
"value": "Industrial development"
},
{
"source": 7,
"target": 31,
"value": "Industrial development"
},
{
"source": 7,
"target": 32,
"value": "Industrial development"
},
{
"source": 7,
"target": 27,
"value": "Industrial development"
},
{
"source": 7,
"target": 4,
"value": "Industrial development"
},
{
"source": 7,
"target": 23,
"value": "Industrial development"
},
{
"source": 7,
"target": 12,
"value": "Industrial development"
},
{
"source": 7,
"target": 38,
"value": "Industrial development"
},
{
"source": 7,
"target": 39,
"value": "Industrial development"
},
{
"source": 7,
"target": 35,
"value": "Industrial development"
},
{
"source": 7,
"target": 28,
"value": "Industrial development"
},
{
"source": 7,
"target": 25,
"value": "Industrial development"
},
{
"source": 8,
"target": 26,
"value": "Industrial development"
},
{
"source": 8,
"target": 31,
"value": "Industrial development"
},
{
"source": 8,
"target": 32,
"value": "Industrial development"
},
{
"source": 8,
"target": 33,
"value": "Industrial development"
},
{
"source": 8,
"target": 23,
"value": "Industrial development"
},
{
"source": 8,
"target": 12,
"value": "Industrial development"
},
{
"source": 8,
"target": 37,
"value": "Industrial development"
},
{
"source": 8,
"target": 24,
"value": "Industrial development"
},
{
"source": 8,
"target": 25,
"value": "Industrial development"
},
{
"source": 9,
"target": 26,
"value": "Industrial development"
},
{
"source": 9,
"target": 24,
"value": "Industrial development"
},
{
"source": 10,
"target": 26,
"value": "Industrial development"
},
{
"source": 10,
"target": 31,
"value": "Industrial development"
},
{
"source": 10,
"target": 32,
"value": "Industrial development"
},
{
"source": 10,
"target": 34,
"value": "Industrial development"
},
{
"source": 10,
"target": 23,
"value": "Industrial development"
},
{
"source": 10,
"target": 35,
"value": "Industrial development"
},
{
"source": 10,
"target": 28,
"value": "Industrial development"
},
{
"source": 10,
"target": 36,
"value": "Industrial development"
},
{
"source": 11,
"target": 26,
"value": "Industrial development"
},
{
"source": 11,
"target": 31,
"value": "Industrial development"
},
{
"source": 11,
"target": 32,
"value": "Industrial development"
},
{
"source": 11,
"target": 4,
"value": "Industrial development"
},
{
"source": 11,
"target": 23,
"value": "Industrial development"
},
{
"source": 11,
"target": 12,
"value": "Industrial development"
},
{
"source": 11,
"target": 28,
"value": "Industrial development"
},
{
"source": 11,
"target": 29,
"value": "Industrial development"
},
{
"source": 11,
"target": 24,
"value": "Industrial development"
},
{
"source": 11,
"target": 40,
"value": "Industrial development"
},
{
"source": 11,
"target": 25,
"value": "Industrial development"
},
{
"source": 11,
"target": 30,
"value": "Industrial development"
},
{
"source": 12,
"target": 26,
"value": "Industrial development"
},
{
"source": 12,
"target": 32,
"value": "Industrial development"
},
{
"source": 12,
"target": 25,
"value": "Industrial development"
},
{
"source": 13,
"target": 23,
"value": "Industrial development"
},
{
"source": 14,
"target": 39,
"value": "Industrial development"
},
{
"source": 15,
"target": 32,
"value": "Industrial development"
},
{
"source": 15,
"target": 23,
"value": "Industrial development"
},
{
"source": 15,
"target": 24,
"value": "Industrial development"
},
{
"source": 15,
"target": 25,
"value": "Industrial development"
},
{
"source": 16,
"target": 26,
"value": "Industrial development"
},
{
"source": 16,
"target": 31,
"value": "Industrial development"
},
{
"source": 16,
"target": 33,
"value": "Industrial development"
},
{
"source": 16,
"target": 23,
"value": "Industrial development"
},
{
"source": 16,
"target": 24,
"value": "Industrial development"
},
{
"source": 16,
"target": 25,
"value": "Industrial development"
},
{
"source": 17,
"target": 26,
"value": "Industrial development"
},
{
"source": 18,
"target": 26,
"value": "Industrial development"
},
{
"source": 18,
"target": 31,
"value": "Industrial development"
},
{
"source": 18,
"target": 32,
"value": "Industrial development"
},
{
"source": 18,
"target": 4,
"value": "Industrial development"
},
{
"source": 18,
"target": 33,
"value": "Industrial development"
},
{
"source": 18,
"target": 23,
"value": "Industrial development"
},
{
"source": 18,
"target": 39,
"value": "Industrial development"
},
{
"source": 18,
"target": 28,
"value": "Industrial development"
},
{
"source": 18,
"target": 36,
"value": "Industrial development"
},
{
"source": 18,
"target": 25,
"value": "Industrial development"
},
{
"source": 19,
"target": 31,
"value": "Industrial development"
},
{
"source": 19,
"target": 4,
"value": "Industrial development"
},
{
"source": 19,
"target": 33,
"value": "Industrial development"
},
{
"source": 19,
"target": 23,
"value": "Industrial development"
},
{
"source": 19,
"target": 38,
"value": "Industrial development"
},
{
"source": 19,
"target": 39,
"value": "Industrial development"
},
{
"source": 19,
"target": 35,
"value": "Industrial development"
},
{
"source": 19,
"target": 36,
"value": "Industrial development"
},
{
"source": 19,
"target": 24,
"value": "Industrial development"
},
{
"source": 19,
"target": 25,
"value": "Industrial development"
},
{
"source": 20,
"target": 26,
"value": "Industrial development"
},
{
"source": 20,
"target": 31,
"value": "Industrial development"
},
{
"source": 20,
"target": 32,
"value": "Industrial development"
},
{
"source": 20,
"target": 23,
"value": "Industrial development"
},
{
"source": 20,
"target": 24,
"value": "Industrial development"
},
{
"source": 21,
"target": 26,
"value": "Industrial development"
},
{
"source": 21,
"target": 32,
"value": "Industrial development"
},
{
"source": 21,
"target": 4,
"value": "Industrial development"
},
{
"source": 21,
"target": 23,
"value": "Industrial development"
},
{
"source": 21,
"target": 35,
"value": "Industrial development"
},
{
"source": 21,
"target": 36,
"value": "Industrial development"
},
{
"source": 21,
"target": 24,
"value": "Industrial development"
},
{
"source": 21,
"target": 25,
"value": "Industrial development"
},
{
"source": 22,
"target": 0,
"value": "Industrial development"
},
{
"source": 22,
"target": 26,
"value": "Industrial development"
},
{
"source": 22,
"target": 3,
"value": "Industrial development"
},
{
"source": 22,
"target": 31,
"value": "Industrial development"
},
{
"source": 22,
"target": 32,
"value": "Industrial development"
},
{
"source": 22,
"target": 4,
"value": "Industrial development"
},
{
"source": 22,
"target": 33,
"value": "Industrial development"
},
{
"source": 22,
"target": 41,
"value": "Industrial development"
},
{
"source": 22,
"target": 34,
"value": "Industrial development"
},
{
"source": 22,
"target": 23,
"value": "Industrial development"
},
{
"source": 22,
"target": 9,
"value": "Industrial development"
},
{
"source": 22,
"target": 12,
"value": "Industrial development"
},
{
"source": 22,
"target": 13,
"value": "Industrial development"
},
{
"source": 22,
"target": 38,
"value": "Industrial development"
},
{
"source": 22,
"target": 39,
"value": "Industrial development"
},
{
"source": 22,
"target": 42,
"value": "Industrial development"
},
{
"source": 22,
"target": 35,
"value": "Industrial development"
},
{
"source": 22,
"target": 17,
"value": "Industrial development"
},
{
"source": 22,
"target": 43,
"value": "Industrial development"
},
{
"source": 22,
"target": 28,
"value": "Industrial development"
},
{
"source": 22,
"target": 29,
"value": "Industrial development"
},
{
"source": 22,
"target": 24,
"value": "Industrial development"
},
{
"source": 22,
"target": 18,
"value": "Industrial development"
},
{
"source": 22,
"target": 19,
"value": "Industrial development"
},
{
"source": 22,
"target": 40,
"value": "Industrial development"
},
{
"source": 22,
"target": 25,
"value": "Industrial development"
}
],
"nodes": [
{
"group": 1,
"name": "Australia"
},
{
"group": 2,
"name": "Austria"
},
{
"group": 3,
"name": "Belgium"
},
{
"group": 4,
"name": "Canada"
},
{
"group": 5,
"name": "Czech Republic"
},
{
"group": 6,
"name": "Denmark"
},
{
"group": 7,
"name": "Finland"
},
{
"group": 8,
"name": "France"
},
{
"group": 9,
"name": "Germany"
},
{
"group": 10,
"name": "Ireland"
},
{
"group": 11,
"name": "Italy"
},
{
"group": 12,
"name": "Japan"
},
{
"group": 13,
"name": "Korea"
},
{
"group": 14,
"name": "Kuwait"
},
{
"group": 15,
"name": "Liechtenstein"
},
{
"group": 16,
"name": "Netherlands"
},
{
"group": 17,
"name": "Norway"
},
{
"group": 18,
"name": "Portugal"
},
{
"group": 19,
"name": "Spain"
},
{
"group": 20,
"name": "Sweden"
},
{
"group": 21,
"name": "Switzerland"
},
{
"group": 22,
"name": "United Kingdom"
},
{
"group": 23,
"name": "United States"
},
{
"group": 24,
"name": "India"
},
{
"group": 25,
"name": "South Africa"
},
{
"group": 26,
"name": "Thailand"
},
{
"group": 27,
"name": "Brazil"
},
{
"group": 28,
"name": "Cyprus"
},
{
"group": 29,
"name": "Romania"
},
{
"group": 30,
"name": "Saudi Arabia"
},
{
"group": 31,
"name": "United Arab Emirates"
},
{
"group": 32,
"name": "Chile"
},
{
"group": 33,
"name": "Colombia"
},
{
"group": 34,
"name": "Estonia"
},
{
"group": 35,
"name": "Hungary"
},
{
"group": 36,
"name": "Poland"
},
{
"group": 37,
"name": "Slovak Republic"
},
{
"group": 38,
"name": "Slovenia"
},
{
"group": 39,
"name": "Latvia"
},
{
"group": 40,
"name": "Lithuania"
},
{
"group": 41,
"name": "Taiwan"
},
{
"group": 42,
"name": "Greece"
},
{
"group": 43,
"name": "New Zealand"
},
{
"group": 44,
"name": "Qatar"
}
]
}
###Markdown
Dump into JSON file
###Code
filename_out = 'industrail-development-purpose.json'
json_out = open(filename_out,'w')
json_out.write(json_dump)
json_out.close()
purposes5 = purpamt5[['coalesced_purpose_name']]
# purposes
purplist5 = purposes5.values.tolist()
purplist5
kiwi = pd.read_csv('kiwi-donations.csv', delimiter=',')
kiwi
kiwi['amount'].max()
p = pd.read_csv('purp.csv')
p
p['commitment_amount_usd_constant'].max()
# p = p.loc[p['coalesced_purpose_name'] == [['Air transport','Rail transport','Industrial development','RESCHEDULING AND REFINANCING','Power generation/non-renewable sources','Import support (capital goods)','Social/ welfare services','Power generation/renewable sources','Telecommunications','Mineral/Metal prospection and exploration']]]
#q = p.loc[p['coalesced_purpose_name'] == 'Rail transport']
q = p.loc[p['coalesced_purpose_name'].isin(['Rail transport','Air transport','Industrial development','RESCHEDULING AND REFINANCING','Power generation/non-renewable sources','Import support (capital goods)','Social/ welfare services','Power generation/renewable sources','Telecommunications','Mineral/Metal prospection and exploration'])]
q
q.to_csv(r'purposes-10.csv')
q = p.loc[p['coalesced_purpose_name'].isin(['Rail transport','Air transport','Industrial development','RESCHEDULING AND REFINANCING','Power generation/non-renewable sources'])]
q
###Output
_____no_output_____ |
demo/Dropbox Integration.ipynb | ###Markdown
Dropbox Integration * You may link your dropbox to save your own copy of videos. * This integration is write-only a single App directory (we can't see your Dropbox!)* Integration is performed following the link in your profile in-app
###Code
import pycollector.video
v = pycollector.video.Video(mp4file='/Users/jebyrne/Dropbox (Personal)/Apps/Visym Collector/DC65FA28-1E6D-44D0-AED3-AA541EA6696E.mp4',
jsonfile='/Users/jebyrne/Dropbox (Personal)/Apps/Visym Collector/DC65FA28-1E6D-44D0-AED3-AA541EA6696E_20210514_170931.json')
print(v)
v.annotate().frame(30).show()
###Output
[vipy.video.annotate]: Annotating video ...
|
silx/processing/marchingcubes/marchingCubesExercise.ipynb | ###Markdown
Isolevel in one cube: Exercise : display the followinf mesh using the given plotMesh function and the marchingcubes module functions![larger-iso-surface-using-silx-plot3d](img/simpleCube.png) Help : for the following iso values at vertices, Index of vertices are : dim 0 (Z) ^ | 4 +------+ 5 /| /| / | / | 6 +------+ 7| | | | | |0 +---|--+ 1 -> dim 2 (X) | / | / |/ |/ 2 +------+ 3 / dim 1 (Y) so for the following setup : dim 0 (Z) ^ | 1 +------+ 1 /| /| / | / | 0 +------+ 0| | | | | |1 +---|--+ 1 -> dim 2 (X) | / | / |/ |/ 0 +------+ 0 / dim 1 (Y) cube is defined by : cube = numpy.array( (((1., 1.), (0., 0.)), ((1., 1.), (0., 0.))), dtype=numpy.float32)
###Code
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plotMesh(_vertices):
import numpy
from matplotlib.collections import PolyCollection
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# draw ref
ax.plot((0, 1),(0, 0),(0, 0))
ax.plot((0, 0),(0, 1),(0, 0))
ax.plot((0, 0),(0, 0),(0, 1))
# draw the points created
x=[]
y=[]
z=[]
for vertice in _vertices:
x.append(vertice[2])
y.append(vertice[1])
z.append(vertice[0])
x=numpy.array(x)
y=numpy.array(y)
z=numpy.array(z)
x, y = numpy.meshgrid(x, y)
ax.plot_surface(x, y, z, color='orange')
ax.set_xlabel('dim2 (x)')
ax.set_ylabel('dim1 (y)')
ax.set_zlabel('dim0 (z)')
plt.show()
...
###Output
_____no_output_____ |
data/week_1_finalised/geospatial.ipynb | ###Markdown
Week 1 Notebook 2: Geospatial Visualisation, Further Exploration of Dataframes and Bar Charts Our datasetThis dataset is based on citizen's reports to FixMyStreet.com regarding problems with pavements or potholes. For each report there is a category assigned, the longitude and the latitude are provided, as well as some datazone information (name of the datazone and code).Source: https://www.fixmystreet.com/ Aims1. Reinforce some of the concepts covered earlier2. Introduce and learn how to apply further dataframes manipulation techniques 3. Learn how to use gmplot to create geospatial visualisations4. Learn about bar charts
###Code
import pandas as pd
import gmplot #library we will use as part of the geospatial plot
import numpy as np
import matplotlib.pyplot as plt
from IPython.core.display import display, HTML
from IPython.display import IFrame
###Output
_____no_output_____
###Markdown
Recall: Reading from a csvIn the previous notebook, we learned how to read from a csv file and create a dataframe.
###Code
dataframe = pd.read_csv("fix_myStreetGlasgow.csv", sep=',')
###Output
_____no_output_____
###Markdown
Your turn1. What are the columns of the dataframe? 2. Get the last 10 elements of the dataframe3. What are the minimum and maximum latitude and longitude?
###Code
#print the columns of the dataframe
print (dataframe.ATTRIBUTE)
#last 10 elements
print (dataframe.FUNCTION)
#min and max longitude/latitude
print (str(dataframe['COLUMN_NAME'].FUNCTION) + ' is the min')
print (str(dataframe['COLUMN_NAME'].FUNCTION) + ' is the max')
###Output
_____no_output_____
###Markdown
Expected Outputcolumns: Index(['category', 'latitude', 'longitude', 'datazone', 'name'], dtype='object')last 10 elements: |category | latitude|longitude|datazone|name| |:-------|:--------|:--------|:--------|:---||Potholes| 55.81172| -4.33927 | S01009782| Nitshill - 08 ||Pavements/footpaths| 55.84243| -4.27378| S01009881 | Pollokshields East - 01|| Potholes | 55.87842 | -4.27847| S01010307| Firhill - 07| |Potholes| 55.86728 | -4.23603| S01010262| City Centre East - 04 ||Potholes| 55.88295 | -4.30497 | S01010412 |Kelvinside and Jordanhill - 05 ||Pavements/footpaths | 55.85108 | -4.32594 | S01009851 |Craigton - 04 ||Pavements/footpaths | 55.84770 | -4.22407 |S01010053 |Parkhead West and Barrowfield - 05 | |Pavements/footpaths | 55.83499 | -4.36386 | S01009804 |Pollok North and East - 06 ||Pavements/footpaths | 55.87559 | -4.13677 | S01010117 | Garthamlock, Auchinlea and Gartloch - 05 ||Pavements/footpaths | 55.83467 | -4.26067 | S01009888 | Govanhill West - 02| Min and max latitude: Max latitude: 55.926809999999996Min latitude: 55.784009999999995 Visualising Our DataWe have a list of potholes and pavements problems and it would be quite useful to see where those are and which areas it would be a good idea to avoid because of their bad road conditions. Here you will be introduced to using gmplot to visualise geographical data. You provide gmplot with longitude and latitude, then based on Google Maps gmplot plots the coordinates and stores an hmtl file with a geographical heatmap in your workspace. What's a heatmap? Heatmaps use colour-coding to present different values. In our particular case, if we see a lot of red on the map that means there are many road problems in a particular area of Glasgow. Heatmaps can be applied to a different range of domains. For example, there are often used when we want to analyse on which parts of the web page people pay more attention to. In molecular biology, heatmaps are used to visualise levels of gene expression across samples. When creating a heatmap it's important to be aware of the colour scheme used. (find a useful resource to include here //TODO)
###Code
# take the latitudes and longitudes
latitudes = dataframe["latitude"]
longitudes = dataframe["longitude"]
# Creating the location we would like to initialize the focus on.
# Parameters: Lattitude, Longitude, Zoom
gmap = gmplot.GoogleMapPlotter(55.8721,-4.2882,10)
# Overlay our datapoints onto the map
gmap.heatmap(latitudes, longitudes)
# Generate the heatmap into an HTML file
gmap.draw("Glasgow_heatmap.html")
IFrame("Glasgow_heatmap.html", 700,350)
###Output
_____no_output_____
###Markdown
FilteringWhile it's useful to see areas with high concentration of roadproblems, it would be useful to see only potholes. We should be able to achieve this with a bit of filtering.NOTE: While there is a method called filter in pandas, we are not going to use it as part of this exercise as it only allows us to filter on the specified index. In our category column, we have 2 types of problems, one is potholes and the second one is pavements/footpaths. Filtering will allow us the following: ![alt text](filtering.jpg)As you can see we are only left with the entries for which category is potholes. dataframe['category']=='Potholes': the result we get is the column of the dataframe with true/false values signifying whether the entry is a pothole or not.dataPotholes = dataframe[dataframe['category'] == 'Potholes']: pandas return us the entries for which category is Potholes
###Code
dataPotholes = dataframe[dataframe['category'] == 'Potholes']
###Output
_____no_output_____
###Markdown
Your turnNow, after we've obtained a dataframe containing only potholes, you can generate a geographical heatmap for that.
###Code
# take the latitudes and longitudes
latitudes =
longitudes =
# Creating the location we would like to initialize the focus on.
# Parameters: Lattitude, Longitude, Zoom
gmap = gmplot.GoogleMapPlotter(55.8721,-4.2882,10)
# Overlay our datapoints onto the map
gmap.heatmap(latitudes, longitudes)
# Generate the heatmap into an HTML file
gmap.draw(FILENAME.HTML)
###Output
_____no_output_____
###Markdown
Refining our analysisIt was great to see our potholes visualised but sometimes we want to have our data quantified. Currently, we have a single record per pothole per region but it would be nice to get a summary of the problems per datazone. Essentially, what we want to achieve is having the results in the following form:| name | potholes | |:-------|:--------|| Alexandra Parade - 03 | 3 || Anderston - 01 | 7 || ... | ... |The first step is familiar, as that's what we did earlier when we wanted to retrieve only the potholes.
###Code
dataF = dataframe[dataframe['category'] == 'Potholes']
###Output
_____no_output_____
###Markdown
Now, when we have only pothole entries, we want to aggregate them, so that we can obtain the desired output. Those are the steps we are going to go through in our processing pipeline. The first step will be aggregating the potholes based on the datazone name. ![alt text](aggregations.jpg)The result is essentially groups of pothole entries.
###Code
#once we have only the potholes, we want to aggregate the results based on the name of the datazone and count the total number of potholes
dataF = dataF.groupby('name', as_index=False)
#for name in dataF:
# print (name)
#So far: only potholes in our data, grouped-by the datazone where they were reported.
#Next goal: count the potholes/pavement issues in the same zone
dataF = dataF['category'].count()
#Now when we have that, we would lile to store it in a new dataframe
dataPo = pd.DataFrame(data=dataF).rename(index=str, columns={"name": "Datazone", "category": "Potholes"})
print (dataPo.head(10))
###Output
Datazone Potholes
0 Alexandra Parade - 03 3
1 Anderston - 01 7
2 Anderston - 02 9
3 Anderston - 03 1
4 Anderston - 04 2
5 Anderston - 05 10
6 Anderston - 06 6
7 Anniesland East - 01 4
8 Anniesland East - 02 9
9 Anniesland East - 03 3
###Markdown
Your turnNow when you know how filtering work, please complete the following exercises: 1. What are the datazones with more than 13 potholes? 2. What's the maximum number of potholes recorded?
###Code
#dataPo13 dataframe to store the entries with more than 13 potholes. You may find > helpful.Recall how we applied filtering condition earlier.
#HINT: the dataframe we base our analysis on is dataPo13
dataPo13 = DATAFRAME_NAME[DATAFRAME_NAME['Potholes']>13]
print (dataPo13)
#based on dataPo dataframe identify max number of potholes recorded
maxPotholes = dataPo[column_name].FUNCTION
###Output
_____no_output_____
###Markdown
Your turnBased on the earlier example, create a dataframe, called **dataPav** that contains the datazones and the count of pavements/footpath issues. Follow the steps outlined below if you are stuck: 1. Filter the data based on whether the value of the 'category' is 'Pavements/footpaths'2. Group it by 'name' and count the total occurences per datazone3. Create a new dataframe, called **dataPav**, with columns: 'Datazone' and 'Pavements'
###Code
#returns only the rows for which the category is pavements. HINT: Check whether you got the correct field name
dataPav = dataframe[dataframe['category'] == "Pavements/footpaths"]
#check the size of your dataframe; your result should be 2185 entries
print (dataPav.size)
#same as above
dataPav = dataPav.groupby('name', as_index=False)['category'].count()
#creat
dataPav = pd.DataFrame(data=dataPav).rename( columns={"name": "Datazone", "category": "Pavements"})
print (dataPav.head(10))
###Output
Datazone Pavements
0 Anderston - 01 1
1 Anderston - 05 1
2 Anderston - 06 2
3 Anniesland East - 02 2
4 Anniesland East - 05 2
5 Anniesland West - 03 1
6 Anniesland West - 04 2
7 Anniesland West - 06 1
8 Anniesland West - 08 2
9 Baillieston East - 01 1
###Markdown
Merging dataframesAssuming your code for the previous task works, now you should have 2 dataframes: 1. dataPo: containing 2 columns, the name of the datazone and the count of potholes for that datazone2. dataPav: containing 2 columns, the name of the datazone and the count of reported pavement problemsIdeally, what we want to do is to have one dataframe that combines the data from dataPo and dataPav. Since the column 'Datazone' is common for the two dataframes, we want to use it to join them together.![alt text](merge.jpg)
###Code
#concatenating the two dataframes
result = pd.merge(dataPo,dataPav, how='outer')
print (result.head(10))
###Output
Datazone Potholes Pavements
0 Alexandra Parade - 03 3.0 NaN
1 Anderston - 01 7.0 1.0
2 Anderston - 02 9.0 NaN
3 Anderston - 03 1.0 NaN
4 Anderston - 04 2.0 NaN
5 Anderston - 05 10.0 1.0
6 Anderston - 06 6.0 2.0
7 Anniesland East - 01 4.0 NaN
8 Anniesland East - 02 9.0 2.0
9 Anniesland East - 03 3.0 NaN
###Markdown
Expected Output| Datazone | Potholes|Pavements| |:-------|:--------|:--------|| Alexandra Parade - 03 | 3 .0|NaN|| Anderston - 01 | 7.0 | 1.0||Anderston - 02 | 9.0 | NaN||Anderston - 03 | 1.0 | NaN||Anderston - 04 | 2.0 | NaN||Anderston - 05 | 10.0 | 1.0||Anderston - 06 | 6.0 | 2.0||Anniesland East - 01|4.0| NaN||Anniesland East - 02|9.0| 2.0||Anniesland East - 03|3.0| NaN| Side NoteWhat happens when you print the resulting dataframe? Sometimes the count value is **NaN**.Why? Since no problems have been reported for this area. Maybe there are no road problems for those datazones or maybe they haven't been reported. In a few words, we don't know, and therefore we have **missing data**.There are different techniques for dealing with missing data and they will be discussed later but for now we choose to replace all missing values with 0, as we can make the assumption that there are no road problems.
###Code
result = result.fillna(0)
print (result.head(10))
###Output
Datazone Potholes Pavements
0 Alexandra Parade - 03 3.0 0.0
1 Anderston - 01 7.0 1.0
2 Anderston - 02 9.0 0.0
3 Anderston - 03 1.0 0.0
4 Anderston - 04 2.0 0.0
5 Anderston - 05 10.0 1.0
6 Anderston - 06 6.0 2.0
7 Anniesland East - 01 4.0 0.0
8 Anniesland East - 02 9.0 2.0
9 Anniesland East - 03 3.0 0.0
###Markdown
Expected Output | Datazone | Potholes|Pavements| |:-------|:--------|:--------|| Alexandra Parade - 03 | 3 .0|0.0|| Anderston - 01 | 7.0 | 1.0||Anderston - 02 | 9.0 | 0.0||Anderston - 03 | 1.0 | 0.0||Anderston - 04 | 2.0 | 0.0||Anderston - 05 | 10.0 | 1.0||Anderston - 06 | 6.0 | 2.0||Anniesland East - 01|4.0| 0.0||Anniesland East - 02|9.0| 2.0||Anniesland East - 03|3.0| 0.0| Visualising our comparisonWe have a dataframe containing the each datazone and the corresponding number of road issues. However, a picture is worth a thousand words and we will achieve a much better understanding of our data if we can see how the number of reported problems varies. Bar charts are used to compare categories and the height of the bars is proportional to the values they represent. Your turnBefore diving into creating a bar chart, let's get some numbers from our data. 1. What's the max number of potholes recorded?2. What's the max number of pavement issues?
###Code
#calculate the max number of potholes
maxPotholes =
print ("Max potholes: "+ str(maxPotholes))
maxPave =
print ("Max pavement issues: "+ str(maxPave))
###Output
_____no_output_____
###Markdown
Expected outputMax potholes: 60.0Max pavement issues: 13.0 Creating a bar chartWhile it's important to fix all potholes, we want to prioritise datazones with more severe conditions, so we are going to pick a threshold number and we are only going to consider datazones with recorded potholes above this threshold. For the purposes of this example, we are going to pick 15 as our threshold.
###Code
#refines the dataframe we are wanting to plot, potholes count should be more than 20
resultPotholes = result[result['Potholes']>20]
#plots the datazones and the count of potholes
plot = plt.bar(resultPotholes['Datazone'],resultPotholes['Potholes'], align='center')
#rotates the x axis ticks, so they don't overlap
plt.xticks(rotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
Good practices require us to add labels and title to our chart.
###Code
#refines the dataframe we are wanting to plot, potholes count should be more than 20
resultPotholes = result[result['Potholes']>20]
#plots the datazones and the count of potholes
plot = plt.bar(resultPotholes['Datazone'],resultPotholes['Potholes'], align='center')
#rotates the x axis ticks, so they don't overlap
plt.xticks(rotation=90)
plt.xlabel('Datazones', fontsize=13)
plt.title("Datazones with more than 20 potholes")
plt.ylabel('Number of problems reported',fontsize=13)
plt.show()
###Output
_____no_output_____
###Markdown
By adding labels and title, our chart is already more informative.However, our datazones seem to be ordered in alphabetical order based on the name of the datazone. It would be better if we sort the number of potholes in descending order.
###Code
#datazones with more than 20 potoholes ordered in a descending order
resultPotholes = result[result['Potholes']>20].sort_values(by='Potholes', ascending = False)
plt.figure(figsize=(9,7))
#creating the bar chart
plot = plt.bar(resultPotholes['Datazone'],resultPotholes['Potholes'], align='center')
#code to set labels and titles
plt.xlabel('Datazones', fontsize=13)
plt.title("Datazones with more than 20 potholes")
plt.xticks(rotation=90)
plt.ylabel('Number of problems reported',fontsize=13)
plt.show()
###Output
_____no_output_____
###Markdown
Your turnCreate a similar bar chart for pavement issues
###Code
resultPavements = result[condition].sort_values(by=COLUMN_NAME, ascending = False)
plt.figure(figsize=(9,7))
plot = plt.bar(x_VALUES,y_VALUES)
plt.xlabel('Datazones', fontsize=13)
plt.title(TITLE_HERE)
plt.xticks(rotation=90)
plt.ylabel('Number of problems reported',fontsize=13)
plt.show()
###Output
_____no_output_____ |
python/jupyter-notebooks/Exploring Localness_Blogpost.ipynb | ###Markdown
Exploring Localness of OSM Data An Analysis using the OSHDB and OHSOME "Local Knowledge" is constituting the exceptional value of Volunteered Geographical Information and thus also considered as an important indicator of data quality. We are interested in how much local information is captured in [OpenStreetMap](https://www.openstreetmap.org) data. In this Blog post we explore the temporal evolution of mapping in OSM and the information stored in its database, by taking an explorativ look at four different cities in Germany, Nepal and the Philippines: Heidelberg, Kathmandu, Pokhara and Manila.Heidelberg is generally considered to be well map and has experienced mapping activity over a decade for now. Mapping in Kathmandu has been impacted heavily by data created for disaster response in the aftermath of the [2015 earthquake](https://www.hotosm.org/projects/nepal_2015_earthquake_response) disaster in Nepal. This resulted in a significant increase in activity from mappers around the world. As comparison, we also will take a look at Pokhara. Pokhara is Nepals second largest city and lays approximately 200 km west of Kathmandu and belongs to the more rural part of Nepal. Manila is the capital and the economical and cultural center of the Philippines.The image below, shows a potential classification of OSM data in regard to the information types it may contains. While buildings and road network completeness, are of interest for level 0-1 (mostly based on remote mapping), further levels 2-4 are considered to source from local knowledge.![image info ](./data/TwitterDevelopment.jpg)[Source: @RebeccaFirthy (twitter)](https://twitter.com/RebeccaFirthy/status/1202952838722342912)In the following we will compare different aspects of development of OSM Data, including* completeness of road network and buildings (level 0-1)* exploratory analysis of local information for facilities and POIs (level 3)* overall information richness (level 4)Examining this evolution should give us insights on how long it takes volunteers to provide local information and how far the process is at the different locations. In order to perform this Analysis the [OHSOME API](https://heigit.org/big-spatial-data-analytics-en/ohsome/) was utilized, to access the OSM full history data. It provides different endpoints to extract and aggregate data about the objects, users and single contributions. Level 0-1: Roads and Buildings Road networkMapping in OSM often started with roads and buildings, which can be traced from satellite imagery. In total, over 1.75 million kilometers of highway were mapped in Kathmandu. The graph shows clearly the impact of the 2015 earthquake and the road network increased by approximately 15% directly after the disaster and by 30% until today. Pokharas increase, since the earthquake, is even bigger, with doubling its mapped length of highway objects since then. Especially two spikes were noticeable, one in the direct aftermath of the earthquake and one in the year 2016. The development of Heidelbergs road network length, showed in contrast a more constant development, with a small growth rate over the last decade. Manilas road network was showing signs of still being in development, with the length increased by approximately 7% over the last year.
###Code
import src.TagSteps as plots
#define globals for plotting
BBOXes = {
"Heidelberg" : "8.6581,49.3836,8.7225,49.4363",
"Pokhara":"83.9142,28.1693,84.0775,28.2687",
"Kathmandu" : "85.26810609,27.66794937,85.3755574,27.75133958",
"Manila":"120.94169186,14.55699989, 121.0261672,14.63900265"}
TIME_MONTHLY = "2009-11-01/2020-06-01/P1M"
FILTER = ["building=*","highway=*","amenity=* and name=*"]
plots.geometry(BBOXes,TIME_MONTHLY)
###Output
_____no_output_____
###Markdown
BuildingsThe mapping of buildings, in comparison to the length of the road network, indicated a slightly temporally delayed development. After initial mapping of buildings in 2011, only very few buildings were mapped in Pokhara untill about one year after the earthquake. Only then the building data experienced a rapid growth over a few months in the second quarter of 2016. Kathmandu showed three main growth events: one in the end of 2012, marking the first noticeable amount of contributions, one in the directly following the earthquake in 2015 and another smaller one in 2019. Manila showed a steady data evolution, excluding one spike in the year 2019 and indicates that like the road network, the buildings are still not mapped completely. Level 3: Temporal Evolution of Facilities and POIs FacilitiesThe third level is characterized by containing information about facilities. Here we will take a look on the temporal development of educational facilities, access to drinking water, healthcare facilities and information about the road network (in this case bridges and tunnels).The plot shows the total count of objects belonging to the groups named above.While Heidelberg showed a more or less constant behavior, the 3 other cities showed more irregular pattern. Manila experienced an steady increase in the last 10 years, which only further accelerated in the last one to two years. This indicates a still ongoing process of facilities and infrastructure being mapped. Kathmandu's graph shows a significant increase between 2012 and 2013 and has since experienced irregular phases of growth, slowly leveling out recently. Notable here is, that the 2015 earthquake response mapping didn't have a significant impact on this. Pokhara's development, started slow and has grown between 2016 and 2018, before leveling out afterwards.
###Code
plots.level3(BBOXes,TIME_MONTHLY)
###Output
_____no_output_____
###Markdown
Point of InterestsAs point of interest (POI) we consider objects containing the tags `name` and `amenity`. The amount of those can be of interest, to understand the amount of information besides geometries, facilities and critical infrastructure. A comparison to the development of roads and buildings indicates, that the mapping of POIs followed after the mapping of essential map features like buildings and roads. In particular the development for Kathmandu and Pokhara, who experienced short periods of highway and road mapping, showed a delayed evolution in regard to POIs. This might indicate that mapping buildins and roads, and mapping POIs are two separate processes. Manila and Heidelberg, are showing at least some form of correlation between the mapping of buildings and roads and mapping of POIS. For these cities this might indicate a simultaneous process.
###Code
plots.pointsOfIntrest(BBOXes,TIME_MONTHLY)
###Output
_____no_output_____
###Markdown
Level 4: Overall Information Richness Temporal Evolution for Buildings and RoadsFollowing the scheme above, the main characteristic of the fourth level is a high amount of stored information in the objects in the map. We will take a look at the number of additional tags per object.We can clearly see, that for roads and buildings the amount of additional tag information in Heidelberg is very high. For instance, more than 50% of the buildings contain five or more tags. Manila, Kathmandu and Pokhara have a significant lower portion of buildings and street containing further information. An exception is the road network of Manila, which shows comparable results to the ones in Heidelberg.
###Code
x = plots.plotTagcompletness(BBOXes,FILTER,TIME_MONTHLY,[3,5,10]) #this step might take a few minutes
###Output
finished 2 out of 3 filter for 4 out of 4 locations | 16998 / 17116 Objectsts
###Markdown
Spatial Distribution for Buildings and RoadsExploring the spatio-temporal domain using the leaflet map below, shows that Manila and Heidelberg both showed alternating pattern of activity over a longer stretch of time.Pokhara and Kathmandu instead showed region wide morex extensive activity over short periods. This suggests, that Heidelberg and Manila, both had a variety of spatially separated processes, while Kathmandu and Pokhara, were affected events covering the whole cities.
###Code
from src.map import ChoroplethMap
BBOXes = {
"Heidelberg" : "8.6581,49.3836,8.7225,49.4363",
"Pokhara":"83.9142,28.1693,84.0775,28.2687",
"Kathmandu" : "85.26810609,27.66794937,85.3755574,27.75133958",
"Manila":"120.94169186,14.55699989, 121.0261672,14.63900265"}
TIME_MONTHLY = "2009-11-01/2020-06-01/P1M"
FILTER = ["building=*","highway=*","amenity=* and name=*"]
path_grid = r"./data/map_stored.json"
path_style = r"./data/style_stored.json"
m = ChoroplethMap()
m.loadJSON(path_style,path_grid,BBOXes)
m.renderMap()
m.getMap()
###Output
_____no_output_____
###Markdown
ConclusionKathmandu and Pokhara showed clear differences to Heidelberg, both in temporal development of buildings and roads and the amount of tags. This indicates, that a lower amount of local knowledge is contained in the data of the two cities. Manila showed at least some resemblance in comparison to the development of Heidelberg, but also contains overall less information yet and the buildings and road network are still undergoing constant mapping.In case you are interested to learn more about OHSOME take a look at the [How to become OHSOME](http://k1z.blog.uni-heidelberg.de/?s=%22become+ohsome%22) series. Or take a look at the literature below. In case you want to take a look at a region of your choice, just add your bounding box in the cell below and rerun the cells. Links[OSHDB](https://github.com/GIScience/oshdb) and [ohsome API](https://github.com/GIScience/ohsome-api) git repositorys[Humanitarian OSM Stats](https://humstats.heigit.org/index.html?org=all) Global statistics for Humanitarian Open Street Map Team projects[ohsome HeX- Open Street Map History Explorer](http://k1z.blog.uni-heidelberg.de/2019/02/18/openstreetmap-history-explorer-ohsomehex/) LiteratureRaifer, M., Troilo, R., Kowatsch, F., Auer, M., Loos, L., Marx, S., Przybill, K., Fendrich, S., Mocnik, F.-B.& Zipf, A. (2019): OSHDB: a framework for spatio-temporal analysis of OpenStreetMap history data.Open Geospatial Data, Software and Standards 2019 4:3. https://doi.org/10.1186/s40965-019-0061-3Auer, M.; Eckle, M.; Fendrich, S.; Griesbaum, L.; Kowatsch, F.; Marx, S.; Raifer, M.; Schott, M.; Troilo, R.; Zipf, A. (2018): Towards Using the Potential of OpenStreetMap History for Disaster Activation Monitoring. ISCRAM 2018. Rochester. NY. US.Ludwig, C. ; Zipf, A. (2019): Exploring regional differences in the representation of urban green spaces in OpenStreetMap. Proceedings of the GeoCultGIS - Geographic and Cultural Aspects of Geo-Information: Issues and Solutions, Limassol (Cyprus)
###Code
import src.TagSteps as plots
from src.map import ChoroplethMap
BBOXes = {
"Heidelberg" : "8.6581,49.3836,8.7225,49.4363",
"Pokhara":"83.9142,28.1693,84.0775,28.2687",
"Kathmandu" : "85.26810609,27.66794937,85.3755574,27.75133958",
"Manila":"120.94169186,14.55699989, 121.0261672,14.63900265"}
TIME_MONTHLY = "2009-11-01/2020-06-01/P1M"
FILTER = ["building=*","highway=*","amenity=* and name=*"]
path_grid = r"./data/map_stored.json"
path_style = r"./data/style_stored.json"
m = ChoroplethMap()
m.getTimeChoroplethMap(BBOXes,FILTER,TIME_MONTHLY,size=1,tags=False) #tags=True will increase computiation time significantly
m.renderMap()
leaflet_m = m.getMap()
leaflet_m
###Output
_____no_output_____ |
Jupyter Notebooks/Data_Manager.ipynb | ###Markdown
@author: Marcos Tulio Fermin Lopez
###Code
import json
import os.path
###Output
_____no_output_____
###Markdown
This module contains all functions used to retrieve the and save the desired data in a .json file.
###Code
def get_data():
# if file exists on disk send it back else send dummy dat to prevent crash
if os.path.exists('simulation_data.json'):
with open('simulation_data.json', 'r+') as f: # read
data = json.load(f)
return data
else:
print('\nThere is so simulation data present on the drive! -- returning dummy data to prevent crash')
trafficDataDict = { # dummy data needed to make the file a template
"PIR": {
"carsServiced": "0",
"simulationTime": "0",
"AWT": "0"
},
"Camera": {
"carsServiced": "0",
"simulationTime": "0",
"AWT": "0"
},
"Antenna": {
"carsServiced": "0",
"EastToWest": "0",
"NorthToSouth": "0",
"AWT": "0"
},
}
return trafficDataDict
def make_data_file():
trafficDataDict = { # dummy data needed to make the file a template
"PIR": {
"carsServiced": "nil",
"simulationTime": "nil",
"AWT": "nil"
},
"Camera": {
"carsServiced": "nil",
"simulationTime": "nil",
"AWT": "nil"
},
"Antenna": {
"carsServiced": "nil",
"EastToWest": "nil",
"NorthToSouth": "nil",
"AWT": "nil",
},
}
# the json file where the output must be stored
out_file = open("simulation_data.json", "w")
json.dump(trafficDataDict, out_file, indent=5)
print('File made')
out_file.close()
def safely_check_if_file_exists():
if os.path.exists('simulation_data.json'):
pass
else:
print('Simulation data file is not present\nMaking file . . .')
make_data_file()
def show_All_Data():
file = open('simulation_data.json', 'r+')
# returns JSON object as
# a dictionary
data = json.load(file)
# Iterating through the json
# list
for i in data:
print(i, data[i])
def save_Antenna(NorthToSouthCars, EastToWestCars, AWT):
safely_check_if_file_exists()
data = get_data()
with open('simulation_data.json', 'w') as f: # write
data['Antenna']['EastToWest'] = EastToWestCars # left and right
data['Antenna']['NorthToSouth'] = NorthToSouthCars # top and bottom
data['Antenna']['AWT'] = AWT
data['Antenna']['carsServiced'] = (EastToWestCars+NorthToSouthCars)
json.dump(data, f, indent=5)
def save_PIR(carsSeviced, simulationTime, AWT):
safely_check_if_file_exists()
data = get_data()
with open('simulation_data.json', 'w') as f: # write
data['PIR']['carsServiced'] = carsSeviced
data['PIR']['simulationTime'] = simulationTime
data['PIR']['AWT'] = AWT
json.dump(data, f, indent=5)
def save_Camera(carsSeviced, simulationTime, AWT):
safely_check_if_file_exists()
data = get_data()
with open('simulation_data.json', 'w') as f: # write
data['Camera']['carsServiced'] = carsSeviced
data['Camera']['simulationTime'] = simulationTime
data['Camera']['AWT'] = AWT
json.dump(data, f, indent=5)
if __name__ == '__main__':
# save_PIR('1', '1')
# save_Camera('2', '2')
# show_All_Data()
# safely_check_if_file_exists()
pass
###Output
_____no_output_____ |
2015_Fall/MATH-578B/Homework1/Homework1.ipynb | ###Markdown
Problem 1 ![mc](mc.png)The transition matrix is given by:$$\begin{bmatrix}1-\alpha & \alpha\\\beta & 1-\beta\end{bmatrix}$$ Part (a)$\eta = min(n>0, X_n=1)$ given $X_0=0$*To Prove* $\eta \sim Geom(\alpha)$$\eta = P(X_0=0,X_1=0, \dots X_{n-1}=1, X_n=1)$Using the Markov property this can be written as:$$\eta = P(X_0=0)P(X_1=0|X_0=0)P(X_2=0|X_1=0)P(X_3=0|X_2=0) \dots P(X_{n-1}=0|X_{n-2}=0)P(X_{n}=1|X_{n-1}=0)$$And being time-homogenous, this simplifies to:$$\eta = P(X_0=0)\big(P(X_1=0|X_0)\big)^{n-1}\times P(X_1=1|X_0=0)$$$\implies$ $$\eta = P(X_0=0)\big(1-\alpha)^{n-1}\alpha = \big(1-\alpha)^{n-1}\alpha$$And hence $\eta \sim Geom(\alpha)$ Part (b)Spectral decomposition of $P$ and value for $P(X_n=1|X_0=0)$Spectral decomposition of $P$:$$det\begin{bmatrix}\alpha-\lambda & 1-\alpha\\1-\beta & \beta-\lambda\end{bmatrix} = 0$$$$\lambda^2 +(\alpha + \beta-2) \lambda + (1-\alpha -\beta) = 0$$Thus, $\lambda_1 = 1$ and $\lambda_2 = 1-\alpha-\beta$Eigenvectors are given by:$v_1^T = \big( x_1\ x_1 \big)\ \forall\ x_1 \in R$and for $\lambda_2$ , $v_2 = \big( x_1\ \frac{-\beta x_1}{\alpha} \big)$Now using Markov property: $P(X_n=1|X_0=0) = (P^n)_{01}$Now, $P^n = VD^nV^{-1}$where:$$V = \begin{bmatrix}1 & 1\\1 & \frac{-\beta}{\alpha}\end{bmatrix}$$and $$D = \begin{bmatrix}1 & 0 \\0 & (1-\alpha-\beta)\end{bmatrix}$$$$V^{-1} = \frac{-1}{\frac{\beta}{\alpha}+1}\begin{bmatrix}-\frac{\beta}{\alpha} & -1 \\-1 & 1\end{bmatrix}$$Thus,$$P^n = \begin{bmatrix}1 & 1\\1 & \frac{-\beta}{\alpha}\end{bmatrix} \times \begin{bmatrix}1 & 0 \\0 & (1-\alpha-\beta)^n\end{bmatrix} \times \frac{-1}{\frac{\beta}{\alpha}+1}\begin{bmatrix}-\frac{\beta}{\alpha} & -1 \\-1 & 1\end{bmatrix}$$$$P^n = \frac{1}{\alpha+\beta} \begin{bmatrix}\beta + \alpha(1-\alpha-\beta)^n & \alpha-\alpha(1-\alpha-\beta)^n\\\beta - \beta(1-\alpha-\beta)^n & \alpha + \beta(1-\alpha-\beta)^n\end{bmatrix}$$ Part (c)When $\alpha+\beta=1$, the eigen values are $\lambda_1=1$ and $\lambda_2=0$ and hence$$P^n = \begin{bmatrix}\beta & \alpha \\\beta & \alpha\end{bmatrix}$$*Check:*Also consider the following identifiy: $P^{n+1}=PP^n$then:$$\begin{bmatrix}p_{00}^{n+1} & p_{01}^{n+1}\\p_{10}^{n+1} & p_{11}^{n+1}\\\end{bmatrix} = \begin{bmatrix} p_{00}^n & p_{01}^n\\p_{10}^n & p_{11}^n\end{bmatrix} \times \begin{bmatrix}1-\alpha & \alpha\\\beta & 1-\beta\end{bmatrix}$$$\implies$$$\begin{align}p_{11}^{n+1} &= p_{10}^n(\alpha) + p_{11}^n(1-\beta)\\ &= (1-p_{11}^n)(\alpha) +(p_{11}^n)(1-\beta)\\ &= \alpha + (1-\alpha-\beta)p_{11}^n\end{align}$$Consider the recurrence:$$x_{n+1} = \alpha+(1-\alpha-\beta)x_n$$Constant solution $x_n=x_{n+1}=x$ is given by: $x=\frac{\alpha}{\alpha+\beta}$Now let $y_n = x_n-x=x_n-\frac{\alpha}{\alpha+\beta}$ then,$y_{n+1} = (1-\alpha-\beta)y_n$ and hence $y_n=(1-\alpha-\beta)^n y_0$Thus,$$p_{11}^{n} = (1-\alpha-\beta)^np_{11}^0 +\frac{\alpha}{\alpha+\beta}$$Given $P_{00}=\frac{\beta}{\alpha+\beta}$ and $\alpha+\beta=1$ and hence:$p_{11}^n = \frac{\alpha}{\alpha+\beta} = \alpha$and hence, $p_{10}^n = \beta$Similary,$p_{00}^n = \beta$ and $p_{01}^n = \alpha$ Problem 2$P(X_1=0) \frac{\beta}{\alpha+\beta}$ and hence $P(X_1=1) = \frac{\alpha}{\alpha+\beta}$$X=X_1X_2\dots X_n$ and $Y=Y_1Y_2\dots Y_n$ representes the reverse string $Y_k=X_{n+k-1}$ Part (a)Given string of digits: $a_1,a_2,a_3 \dots a_n $ to find: $P(Y_1=a_1,Y_2=a_2,Y_3=a_3\dots Y_n=a_n)$$$\begin{align}P(Y_1=a_1,Y_2=a_2,Y_3=a_3\dots Y_n=a_n) &= P(X_1=a_n,X_2=a_{n-1}, \dots X_n=a_1) \\&= P(X_1=a_n)P(X_2=a_{n-1}|X_1=a_n)P(X_3=a_{n-2}|X_2=a_{n-1})\dots P(X_n=a_1|X_{n-1}=a_2) \\&= P(X_1=a_n)(P_{a_n a_{n-1}})(P_{a_{n-1} a_{n-2}}) \dots (P_{a_2 a_1})\end{align}$$The problem asked about not using spectral decomposition, but I was not sure how spectral decomposition would have come in handy if the states $a_i$ are not specified explicitly. Part (b)$$z=\begin{cases}X & if \theta = H\\Y & otherwise\end{cases}$$Given function f such that, $f :\{0,1\}^n \longrightarrow \{H,T\}$To show: $P(f(Z)=\theta)=0.5$$P(\theta=H) = P(\theta=T) = 0.5$Given Z, guess $\theta$: $P(\theta=H|Z=X) = \frac{P(\theta=H, Z=X)}{P(Z=X)}$Z, has only two possible values: $H$ and $T$ and hence assuming the guess function is unbiased:$P(f(Z)=H) = P(f(Z)=T)=0.5$ Problem 3$$\tau = min\{ n \geq 0: X_n=\dagger\}$$$$E[\tau] = E_a[E_a[\tau|X_n=a]\ where\ a \in \{\phi, \alpha, \beta, \alpha+\beta, pol, \dagger\}$$Let $S=\{\phi, \alpha, \beta, \alpha+\beta, pol, \dagger\}$Consider for $a\neq \dagger$:$$h(a) = E[\tau|X_0=a] = \sum_{s \in S}P_{as} \times (1) + P_{as}\times E[\tau|X_0=s) $$$\implies$ $$h(a) = ((I-P_{-})^{-1})_a$$where $P_{-}$ represents the matrix with the row and column representng $X_i=\dagger$ removed.
###Code
%matplotlib inline
from __future__ import division
import numpy as np
from numpy import linalg as LA
k_a=0.2
k_b=0.2
k_p = 0.5
P = np.matrix([[1-k_a-k_b, k_a ,k_b, 0, 0, 0],
[k_a, 1-k_a-k_b, 0, k_b, 0, 0],
[k_b, 0, 1-k_a-k_b, k_a, 0, 0],
[0, k_b, k_a, 1-k_a-k_b-k_p, k_p, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0]])
q = [[k_a-k_b,k_a,k_b,0,0],
[k_a,k_a+k_b,0,k_b,0],
[k_b,0,k_a+k_b,0,0],
[0,k_b,k_a,k_a+k_b+k_p,k_p],
[0,0,0,0,0]]
qq = np.array(q)
print(P)
states = ['phi', 'alpha', 'beta', 'ab', 'pol', 'd']
import networkx as nx
G=nx.from_numpy_matrix(P,create_using=nx.MultiDiGraph())
G.edges(data=True)
#nx.draw_graphviz(G)# labels=states)
nx.write_dot(G,'G.dot')
!neato -T png G.dot > multi.png
###Output
_____no_output_____
###Markdown
![alt](multi.png) The markov chain seems to be irreducibleOne way to obtain the stationary state is to look at the eigen vectors correspendoing to the eigen value of 1. However, the eigen vectors come out to be imaginary. This seemed to be an issue wwith the solver so I relied on solving the system of equation: $\pi = P\pi$
###Code
w, v = LA.eig(P)
for i in range(0,6):
print 'Eigen value: {}\n Eigen vector: {}\n'.format(w[i],v[:,i])
## Solve for (I-Q)^{-1}
iq = np.linalg.inv(np.eye(5)-qq)
iq_phi = iq[0,0]
iq_alpha = iq[1,1]
iq_beta = iq[2,2]
iq_alphabeta = iq[3,3]
iq_pol = iq[4,4]
###Output
_____no_output_____
###Markdown
EDIT: I made correction to solve for corrected $\pi$, by acounting for $P^T$ and not $P$
###Code
A = np.eye(6)-P.T
A[-1,:] = [1,1,1,1,1,1]
B = [0,0,0,0,0,1]
X=np.linalg.solve(A,B)
print(X)
###Output
[ 0.2 0.2 0.2 0.2 0.1 0.1]
###Markdown
Stationary state is given by $\pi = (0.1667, 0.1667, 0.1667, 0.1667, 0.1667, 0.1667)$ The mean number of visits per unit time to $\dagger$ are $\frac{1}{\pi_6} = 6$ However strangely this does not satisfy $\pi=P\pi$. I was not able to figure out where I went wrong.EDIT: I made correction to solve for corrected $\pi$, by acounting for $P^T$ and not $P$, so this no longer holds
###Code
#EDIT: I made correction to solve for corrected $\pi$, by acounting for $P^T$ and not $P$
print('\pi*P={}\n'.format(X*P))
print('But \pi={}'.format(X))
###Output
\pi*P=[[ 0.2 0.2 0.2 0.2 0.1 0.1]]
But \pi=[ 0.2 0.2 0.2 0.2 0.1 0.1]
###Markdown
Simulating the chain:General strategy: Generate a random number $\longrightarrow$ Select a state $\longrightarrow$ Jump to state $\longrightarrow$ Repeat
###Code
## phi
np.random.seed(1)
PP = {}
PP['phi']= [1-k_a-k_b, k_a ,k_b, 0, 0, 0]
PP['alpha'] = [k_a, 1-k_a-k_b, 0, k_b, 0, 0]
PP['beta'] = [k_b, 0, 1-k_a-k_b, k_a, 0, 0]
PP['ab']= [0, k_b, k_a, 1-k_a-k_b-k_p, k_p, 0]
PP['pol']= [0, 0, 0, 0, 0, 1]
PP['d']= [0, 0, 0, 1, 0, 0]
##For $h(\phi)$
x0='phi'
x='phi'
def h(x):
s=0
new_state=x
for i in range(1,1000):
old_state=new_state
probs = PP[old_state]
z=np.random.choice(6, 1, p=probs)
new_state = states[z[0]]
#print('{} --> {}'.format(old_state, new_state))
s+=z[0]
return s/1000
###Output
_____no_output_____
###Markdown
Part (a,b,c)
###Code
print(r'$h(\phi)$: From simulation: {}; From calculation: {}'.format(h('phi'),iq_phi))
print(r'$h(\alpha)$: From simulation: {}; From calculation: {}'.format(h('alpha'),iq_alpha))
print(r'$h(\beta)$: From simulation: {}; From calculation: {}'.format(h('beta'),iq_beta))
print(r'$h(\alpha+\beta)$: From simulation: {}; From calculation: {}'.format(h('ab'),iq_alphabeta))
print(r'$h(\pol)$: From simulation: {}; From calculation: {}'.format(h('pol'),iq_pol))
old_state = [0.1,0.2,0.3,0.4,0,0]
def perturb(old_state):
new_state = old_state*P
return new_state
new_state = [0,0,0,0,0,1]
while not np.allclose(old_state, new_state):
old_state, new_state = new_state, perturb(old_state)
print old_state
# EDIT: I made correction to solve for corrected $\pi$, by acounting for $P^T$ and not $P$
print('From calculation(which is NO LONGER wrong!), stationary distribution:{}'.format(X))
print('From simulation, stationary distribution: {}'.format(old_state))
###Output
From calculation(which is NO LONGER wrong!), stationary distribution:[ 0.2 0.2 0.2 0.2 0.1 0.1]
From simulation, stationary distribution: [[ 0.19999998 0.20000004 0.20000004 0.20000002 0.1000002 0.09999973]]
|
January/Week1/4.ipynb | ###Markdown
Validate Balanced Parentheses[The Original Question](https://mp.weixin.qq.com/s/PW7Lqurlde-xTyatiJUlGQ) QuestionGiven a string containing just the characters `(`, `)`, `{`, `}`, `[` and `]`, determine if the input string is valid. An input string is valid if:- Open brackets are closed by the same type of brackets.- Open brackets are closed in the correct order.- An empty string.
###Code
def validation(brackets: str) -> bool:
# Define a dictionary to attach the bracket pairs.
pairs = {
'}': '{',
']': '[',
')': '('
}
# Create a stack to record unclosed brackets.
stack = list()
# Traverse all characters in the string.
for k in brackets:
# If the current characters closes the nearest bracket,
# remove it from the stack, representing it is closed.
if len(stack) > 0 and k in pairs.keys() and pairs[k] == stack[-1]:
stack.pop()
pass
# Otherwise, push the current characters into the stack,
# and mark them unclosed.
else:
stack.append(k)
pass
pass
# If input string is valid,
# there's no unclosed characters remained in the stack.
return len(stack) == 0
print(validation(r''))
print(validation(r'([{}])'))
print(validation(r'(){}[]'))
print(validation(r'{](}[)'))
print(validation(r')]}'))
###Output
True
True
True
False
False
|
Intermediate Python for Data Science/Logic- Control Flow and Filtering/01-Equality.ipynb | ###Markdown
To check if two Python values, or variables, are equal you can use ==. To check for inequality, you need !=. As a refresher, have a look at the following examples that all result in True. Feel free to try them out in the IPython Shell.>2 == (1 + 1)"intermediate" != "python"True != False"Python" != "python"When you write these comparisons in a script, you will need to wrap a print() function around them to see the output. In the editor on the right, write code to see if True equals False.
###Code
# Comparison of booleans
print(True==False)
###Output
False
###Markdown
Write Python code to check if -5 * 15 is not equal to 75
###Code
# Comparison of integers
print(-5*15!=75)
###Output
True
###Markdown
Ask Python whether the strings "pyscript" and "PyScript" are equal.
###Code
# Comparison of strings
print("pyscript"=="PyScript")
###Output
False
###Markdown
What happens if you compare booleans and integers? Write code to see if True and 1 are equal.
###Code
# Compare a boolean with an integer
print(True==1)
###Output
True
|
NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 4_ APIs/M4C4 - Obteniendo la discografía (Script).ipynb | ###Markdown
Módulo 4: APIs SpotifyEn este módulo utilizaremos APIs para obtener información sobre artistas, discos y tracks disponibles en Spotify. Pero primero.. ¿Qué es una **API**?Por sus siglas en inglés, una API es una interfaz para programar aplicaciones (*Application Programming Interface*). Es decir que es un conjunto de funciones, métodos, reglas y definiciones que nos permitirán desarrollar aplicaciones (en este caso un scraper) que se comuniquen con los servidores de Spotify. Las APIs son diseñadas y desarrolladas por las empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que utilicen sus servicios. Spotify tiene APIs públicas y bien documentadas que estaremos usando en el desarrollo de este proyecto. RESTUn término se seguramente te vas a encontrar cuando estés buscando información en internet es **REST** o *RESTful*. Significa *representational state transfer* y si una API es REST o RESTful, implica que respeta unos determinados principios de arquitectura, como por ejemplo un protocolo de comunicación cliente/servidor (que será HTTP) y (entre otras cosas) un conjunto de operaciones definidas que conocemos como **métodos**. Ya veníamos usando el método GET para hacer solicitudes a servidores web. DocumentaciónComo mencioné antes, las APIs son diseñadas por las mismas empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que consuman sus servicios o información. Es por eso que la forma de utilizar las APIs variará dependiendo del servicio que querramos consumir. No es lo mismo utilizar las APIs de Spotify que las APIs de Twitter. Por esta razón es de suma importancia leer la documentación disponible, generalmente en la sección de desarrolladores de cada sitio. Te dejo el [link a la de Spotify](https://developer.spotify.com/documentation/) JSONJson significa *JavaScript Object Notation* y es un formato para describir objetos que ganó tanta popularidad en su uso que ahora se lo considera independiente del lenguaje. De hecho, lo utilizaremos en este proyecto por más que estemos trabajando en Python, porque es la forma en la que obtendremos las respuestas a las solicitudes que realicemos utilizando las APIs. Para nosotros, no será ni más ni menos que un diccionario con algunas particularidades que iremos viendo a lo largo del curso. Links útiles para la clase:- [Documentación de Spotify - Artistas](https://developer.spotify.com/documentation/web-api/reference/artists/)- [Iron Maiden en Spotify](https://open.spotify.com/artist/6mdiAmATAx73kdxrNrnlao)
###Code
import requests
id_im = '6mdiAmATAx73kdxrNrnlao'
url_base = 'https://api.spotify.com/v1'
ep_artist = '/artists/{artist_id}'
url_base+ep_artist.format(artist_id=id_im)
r = requests.get(url_base+ep_artist.format(artist_id=id_im))
r.status_code
r.json()
token_url = 'https://accounts.spotify.com/api/token'
params = {'grant_type': 'client_credentials'}
headers = {'Authorization': 'Basic NDRiN2IzNmVjMTQ1NDY3ZjlhOWVlYWY3ZTQxN2NmOGI6N2I0YWE3YTBlZjQ4NDQwNDhhYjFkMjI0MzBhMWViMWY='}
r = requests.post(token_url, data=params, headers=headers)
r.status_code
r.json()
token = r.json()['access_token']
token
header = {"Authorization": "Bearer {}".format(token)}
r = requests.get(url_base+ep_artist.format(artist_id=id_im), headers=header)
r.status_code
r.json()
url_busqueda = 'https://api.spotify.com/v1/search'
search_params = {'q': "Iron+Maiden", 'type':'artist', 'market':'AR'}
busqueda = requests.get(url_busqueda, headers=header, params=search_params)
busqueda.status_code
busqueda.json()
import pandas as pd
df = pd.DataFrame(busqueda.json()['artists']['items'])
df.head()
df.sort_values(by='popularity', ascending=False).iloc[0]['id']
###Output
_____no_output_____
###Markdown
Clase 20
###Code
import base64
def get_token(client_id, client_secret):
encoded = base64.b64encode(bytes(client_id+':'+client_secret, 'utf-8'))
params = {'grant_type':'client_credentials'}
header={'Authorization': 'Basic ' + str(encoded, 'utf-8')}
r = requests.post('https://accounts.spotify.com/api/token', headers=header, data=params)
if r.status_code != 200:
print('Error en la request.', r.json())
return None
print('Token válido por {} segundos.'.format(r.json()['expires_in']))
return r.json()['access_token']
client_id = '44b7b36ec145467f9a9eeaf7e417cf8b'
client_secret = '7b4aa7a0ef4844048ab1d22430a1eb1f'
token = get_token(client_id, client_secret)
header = {"Authorization": "Bearer {}".format(token)}
id_im
artist_im = requests.get(url_base+ep_artist.format(artist_id=id_im), headers=header)
artist_im.status_code
artist_im.json()
params = {'country': 'AR'}
albums_im = requests.get(url_base+ep_artist.format(artist_id=id_im)+'/albums', headers=header, params=params)
albums_im.status_code
albums_im.json()['items']
[(album['id'], album['name']) for album in albums_im.json()['items']]
bnw_id = '1hDF0QPIHVTnSJtxyQVguB'
###Output
_____no_output_____
###Markdown
Vayamos a la documentación de álbums
###Code
album_ep = '/albums/{album_id}'
album_params = {'market':'AR'}
bnw = requests.get(url_base+album_ep.format(album_id=bnw_id)+'/tracks', headers=header, params=album_params)
bnw
bnw.json()
bnw.json()['items']
[(track['id'], track['name']) for track in bnw.json()['items']]
###Output
_____no_output_____ |
scratch/school-closures.ipynb | ###Markdown
Coronavirus School Report CardsMock-up a demo case that could be scaled to LAUSD.Pare back columns to bare bones for what's needed to do the school report cards.Add rules for how NYC is quarantining / investigating cases as tests come back positive.
###Code
import pandas as pd
from IPython.display import Markdown
df = pd.read_parquet('../scratch/master_student_df.parquet')
test_results = pd.read_parquet('../scratch/tested_positive.parquet')
DATES = list(test_results.date.unique())
DATES
display(Markdown(
f"In this simple example, there are {df.school.nunique()} schools, "
f"{df.pod.nunique()} unique classrooms / pods within these schools, "
f"and {df.student_id.nunique()} students total."
f"We will demo what getting results for {DATES[0]} and {DATES[1]}, "
"and produce coronavirus school report cards for each day."
))
###Output
_____no_output_____
###Markdown
Apply quarantine rules / school protocols* When on student is infected in a pod, whole pod QT 14 days* If there is second case in pod, whole school is investigated for possible QT and closure* If there is second case outside pod, pod QT 14 days and school closes for 1 day for investigation* If link btwn cases cannot be determined, close entire school, everyone QT 14 days* If link is outside the school, then affected pods QT, but school can reopen* If link can be determined and the 2 cases are not linked, then affected pods QT, but school can reopen
###Code
def unique_infections(df, aggregation_level):
new_col = f"num_{aggregation_level}_infections"
summary = (df.groupby(aggregation_level)["infection_id"].nunique()
.to_frame().reset_index()
.rename(columns = {"infection_id": new_col})
)
"""
Can't think of a good way to figure out QT start/end for the pod
if it's assumed there will be multiple QT periods in a school year
Maybe a subset at the beginning of this, or keep track of which day test results are,
and data resets after day 30
Or, students are tested by cohorts, so naturally, test results naturally account for this.
This assumes that contact tracers are able to group each outbreak by infection_id.
Worst case scenario is that each unique test result is unique infection_id...blowing up num_pod_infections up.
"""
return summary
def assign_pod_QT(row):
if row.num_pod_infections >= 1:
return 1
else:
return 0
def apply_positives(student_df, tested_positive):
# 1:m merge beacuse there could be multiple sources of infections that affect same student
df = pd.merge(student_df, tested_positive, on = "student_id", how = "left", validate = "1:m")
# If you were not infected yesterday but infected today, we want to count you as infected.
# If you were infected yesterday, and not today, we still want to count you as infected.
# Look across these 2 rows and take the max, so when we aggregate later, it's a cumulative today.
df2 = (df.assign(
student_infected = (df[["student_infected", "infected"]].max(axis=1)
.fillna(0).astype(int)
)
)
)
# See how many unique infections occur within a pod
pod_infections = unique_infections(df2, "pod")
# See how many unique infections occur within a school
school_infections = unique_infections(df2, "school")
# Merge onto student-level df
# (Eventually, think about a student-virus level df, if students are assumed to be infected multiple times in school year)
m1 = pd.merge(df2, pod_infections, on = "pod", how = "left", validate = "m:1")
m2 = pd.merge(m1, school_infections, on = "school", how = "left", validate = "m:1")
# Assign pod QT
m3 = m2.assign(
pod_QT_today = m2.apply(assign_pod_QT, axis=1)
)
# Similarly, flag the pod as already being in QT
# This needs to be revised to include a 14-day window....otherwise it's on forever
m3 = (m3.assign(
pod_QT = m3[["pod_QT", "pod_QT_today"]].max(axis=1).fillna(0).astype(int)
).drop(columns = "pod_QT_today")
)
return m3
def assign_school_QT(df):
df2 = df[["school", "infection_id", "pod"]].drop_duplicates().reset_index()
# If same infection_id affects multiple pods, close school
pods_affected = (df2[df2.infection_id.notna()]
.groupby(["school", "infection_id"])
.agg({"pod": "nunique"})
.rename(columns = {"pod": "num_pods_affected"})
.reset_index()
)
# Schools can be forced into QT when multiple pods affected and no link established
# If link is established and it's outside school, only affected pods QT
pods_affected = (pods_affected.assign(
school_QT = pods_affected.apply(lambda x: 1 if x.num_pods_affected > 1 else 0, axis=1)
)[["school", "school_QT"]]
)
# Schools can already be in QT mode when more positive tests come in, so take max
pods_affected = pods_affected.assign(
school_QT = pods_affected.groupby("school")["school_QT"].transform("max")
).drop_duplicates()
return pods_affected
# Use this as high-level function to call other functions defined above
def apply_quarantine_rules(df, tested_positive):
df2 = apply_positives(df, tested_positive)
school_QT_df = assign_school_QT(df2)
df2 = pd.merge(df2.drop(columns = "school_QT"),
school_QT_df,
on = "school", how = "left", validate = "m:1")
keep = ["student_id", "student",
"classroom", "school", "pod",
"student_infected",
"pod_QT", "school_QT"
]
df3 = (df2[keep]
.assign(
student_infected = (df2.groupby("student_id")["student_infected"].transform("max")
.fillna(0).astype(int)
),
pod_QT = df2.pod_QT.fillna(0).astype(int),
school_QT = df2.school_QT.fillna(0).astype(int),
date = tested_positive.date.iloc[0]
)
)
return df3
# Join in grade information or demographic information if school wants further analysis
# These summary stats will just be by pod/school for now
def summary_report_card(df, aggregation_level):
students_confirmed_positive = (df.groupby(aggregation_level)
.agg({
"date": "min",
"student_infected": "sum",
"student_id": "nunique"
})
.reset_index()
.rename(columns = {
"student_infected": f"students_confirmed_positive",
"student_id": f"students_QT"
})
)
if aggregation_level == "school":
pods_affected = (df.groupby(aggregation_level)
.agg({"pod": "nunique"})
.reset_index()
.rename(columns = {"pod": f"{aggregation_level}_pods_QT"})
)
# Merge on this extra info about number of pods affected for schools
students_confirmed_positive = pd.merge(students_confirmed_positive, pods_affected,
on = "school", how = "left", validate = "1:1"
)
# Can define column order
return students_confirmed_positive
test_results1 = test_results[test_results.date == DATES[0]]
test_results2 = test_results[test_results.date == DATES[1]]
start_cols = ['student', 'classroom', 'school', 'student_id', 'pod',
'student_infected', 'pod_QT', 'school_QT']
day1 = apply_quarantine_rules(df[start_cols], test_results1)
pod_summary1 = summary_report_card(day1, "pod")
school_summary1 = summary_report_card(day1, "school")
display(pod_summary1)
display(school_summary1)
day2 = apply_quarantine_rules(day1[start_cols], test_results2)
pod_summary2 = summary_report_card(day2, "pod")
school_summary2 = summary_report_card(day2, "school")
display(pod_summary2)
display(school_summary2)
###Output
_____no_output_____ |
beginners/01_0.Formule1_Data_Collection.ipynb | ###Markdown
Formule 1 Data Collection from ERGAST Web
###Code
import os
import pandas as pd
import numpy as np
from pprint import pprint
import requests
# I will use this function later to calculate points and wins prior to the race
def lookup (df, team, points):
df['lookup1'] = df.season.astype(str) + df[team] + df['round'].astype(str)
df['lookup2'] = df.season.astype(str) + df[team] + (df['round']-1).astype(str)
new_df = df.merge(df[['lookup1', points]], how = 'left', left_on='lookup2',right_on='lookup1')
new_df.drop(['lookup1_x', 'lookup2', 'lookup1_y'], axis = 1, inplace = True)
new_df.rename(columns = {points+'_x': points+'_after_race', points+'_y': points}, inplace = True)
new_df[points].fillna(0, inplace = True)
return new_df
###Output
_____no_output_____
###Markdown
Races
###Code
races = {'season': [],
'round': [],
'circuit_id': [],
'lat': [],
'long': [],
'country': [],
'date': [],
'url': []}
for year in list(range(1950,2020)):
url = 'https://ergast.com/api/f1/{}.json'
r = requests.get(url.format(year))
json = r.json()
for item in json['MRData']['RaceTable']['Races']:
try:
races['season'].append(int(item['season']))
except:
races['season'].append(None)
try:
races['round'].append(int(item['round']))
except:
races['round'].append(None)
try:
races['circuit_id'].append(item['Circuit']['circuitId'])
except:
races['circuit_id'].append(None)
try:
races['lat'].append(float(item['Circuit']['Location']['lat']))
except:
races['lat'].append(None)
try:
races['long'].append(float(item['Circuit']['Location']['long']))
except:
races['long'].append(None)
try:
races['country'].append(item['Circuit']['Location']['country'])
except:
races['country'].append(None)
try:
races['date'].append(item['date'])
except:
races['date'].append(None)
try:
races['url'].append(item['url'])
except:
races['url'].append(None)
races = pd.DataFrame(races)
print(races.shape)
races.head()
races.tail()
if not os.path.exists('./data'):
os.mkdir('./data')
races.to_csv('./data/races.csv', index = False)
###Output
_____no_output_____
###Markdown
Rounds
###Code
race = pd.read_csv('./data/races.csv')
rounds = []
for year in np.array(race.season.unique()):
rounds.append([year, list(race[race.season == year]['round'])])
rounds[:5]
###Output
_____no_output_____
###Markdown
Results
###Code
results = {'season': [],
'round':[],
'circuit_id':[],
'driver': [],
'date_of_birth': [],
'nationality': [],
'constructor': [],
'grid': [],
'time': [],
'status': [],
'points': [],
'podium': [],
'url': []}
for n in list(range(len(rounds))):
for i in rounds[n][1]:
url = 'http://ergast.com/api/f1/{}/{}/results.json'
r = requests.get(url.format(rounds[n][0], i))
json = r.json()
for item in json['MRData']['RaceTable']['Races'][0]['Results']:
try:
results['season'].append(int(json['MRData']['RaceTable']['Races'][0]['season']))
except:
results['season'].append(None)
try:
results['round'].append(int(json['MRData']['RaceTable']['Races'][0]['round']))
except:
results['round'].append(None)
try:
results['circuit_id'].append(json['MRData']['RaceTable']['Races'][0]['Circuit']['circuitId'])
except:
results['circuit_id'].append(None)
try:
results['driver'].append(item['Driver']['driverId'])
except:
results['driver'].append(None)
try:
results['date_of_birth'].append(item['Driver']['dateOfBirth'])
except:
results['date_of_birth'].append(None)
try:
results['nationality'].append(item['Driver']['nationality'])
except:
results['nationality'].append(None)
try:
results['constructor'].append(item['Constructor']['constructorId'])
except:
results['constructor'].append(None)
try:
results['grid'].append(int(item['grid']))
except:
results['grid'].append(None)
try:
results['time'].append(int(item['Time']['millis']))
except:
results['time'].append(None)
try:
results['status'].append(item['status'])
except:
results['status'].append(None)
try:
results['points'].append(int(item['points']))
except:
results['points'].append(None)
try:
results['podium'].append(int(item['position']))
except:
results['podium'].append(None)
try:
results['url'].append(json['MRData']['RaceTable']['Races'][0]['url'])
except:
results['url'].append(None)
results = pd.DataFrame(results)
print(results.shape)
results.head()
results.tail()
results.to_csv('./data/results.csv', index = False)
###Output
_____no_output_____
###Markdown
Driver Standings
###Code
driver_standings = {'season': [],
'round':[],
'driver': [],
'driver_points': [],
'driver_wins': [],
'driver_standings_pos': []}
for n in list(range(len(rounds))):
for i in rounds[n][1]:
url = 'https://ergast.com/api/f1/{}/{}/driverStandings.json'
r = requests.get(url.format(rounds[n][0], i))
json = r.json()
for item in json['MRData']['StandingsTable']['StandingsLists'][0]['DriverStandings']:
try:
driver_standings['season'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['season']))
except:
driver_standings['season'].append(None)
try:
driver_standings['round'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['round']))
except:
driver_standings['round'].append(None)
try:
driver_standings['driver'].append(item['Driver']['driverId'])
except:
driver_standings['driver'].append(None)
try:
driver_standings['driver_points'].append(int(item['points']))
except:
driver_standings['driver_points'].append(None)
try:
driver_standings['driver_wins'].append(int(item['wins']))
except:
driver_standings['driver_wins'].append(None)
try:
driver_standings['driver_standings_pos'].append(int(item['position']))
except:
driver_standings['driver_standings_pos'].append(None)
driver_standings = pd.DataFrame(driver_standings)
print(driver_standings.shape)
driver_standings = lookup(driver_standings, 'driver', 'driver_points')
driver_standings = lookup(driver_standings, 'driver', 'driver_wins')
driver_standings = lookup(driver_standings, 'driver', 'driver_standings_pos')
driver_standings.head()
driver_standings.tail()
driver_standings.to_csv('./data/driver_standings.csv', index = False)
###Output
_____no_output_____
###Markdown
Constructor Standings
###Code
constructor_rounds = rounds[8:]
constructor_standings = {'season': [],
'round':[],
'constructor': [],
'constructor_points': [],
'constructor_wins': [],
'constructor_standings_pos': []}
for n in list(range(len(constructor_rounds))):
for i in constructor_rounds[n][1]:
url = 'https://ergast.com/api/f1/{}/{}/constructorStandings.json'
r = requests.get(url.format(constructor_rounds[n][0], i))
json = r.json()
for item in json['MRData']['StandingsTable']['StandingsLists'][0]['ConstructorStandings']:
try:
constructor_standings['season'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['season']))
except:
constructor_standings['season'].append(None)
try:
constructor_standings['round'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['round']))
except:
constructor_standings['round'].append(None)
try:
constructor_standings['constructor'].append(item['Constructor']['constructorId'])
except:
constructor_standings['constructor'].append(None)
try:
constructor_standings['constructor_points'].append(int(item['points']))
except:
constructor_standings['constructor_points'].append(None)
try:
constructor_standings['constructor_wins'].append(int(item['wins']))
except:
constructor_standings['constructor_wins'].append(None)
try:
constructor_standings['constructor_standings_pos'].append(int(item['position']))
except:
constructor_standings['constructor_standings_pos'].append(None)
constructor_standings = pd.DataFrame(constructor_standings)
print(constructor_standings.shape)
constructor_standings = lookup(constructor_standings, 'constructor', 'constructor_points')
constructor_standings = lookup(constructor_standings, 'constructor', 'constructor_wins')
constructor_standings = lookup(constructor_standings, 'constructor', 'constructor_standings_pos')
constructor_standings.head()
constructor_standings.tail()
constructor_standings.to_csv('./data/constructor_standings.csv', index = False)
###Output
_____no_output_____ |
notebooks/series_fit.ipynb | ###Markdown
Обучение модели временного рядаПо 3м точкам финансовых показателей пытаемся предсказать четвертую точку на год вперед. Затем смотрим, как изменение финансовых показателей повлияло на проставление метки дефолта.Например, модель Prophet принимает на вход датафрейм из 2х колонок: дата и значение показателя. Таким образом, наша модель обучится только на одной компании и сможет предсказывать значение какого-то параметра на год вперед. Так мы для каждой компании сможем без обучающей выборки и без обобщения по другим компаниям определить следующее значение этого параметра, а затем рассчитать метку дефолта.
###Code
import os
from google.colab import drive
drive.mount('/content/drive')
os.chdir('/content/drive/Shared drives/Кредитные риски')
import pandas as pd
import numpy as np
import pickle
from sklearn.metrics import accuracy_score, confusion_matrix
from fbprophet import Prophet
from tqdm import tqdm_notebook
DIR_IN = 'Датасеты/revision_007/'
###Output
_____no_output_____
###Markdown
Считывание данных (только некоторые коды, необходимые для рассчета дефолта)
###Code
df_train = pd.read_pickle(f'{DIR_IN}companies_ready_train.pkl')
y_train = df_train['target']
x_train = df_train.drop(columns=['target'])
df_test = pd.read_pickle(f'{DIR_IN}companies_ready_test.pkl')
y_test = df_test['target']
x_test = df_test.drop(columns=['target'])
df_prod = pd.read_pickle(f'{DIR_IN}companies_ready_prod.pkl')
y_prod = df_prod['target']
x_prod = df_prod.drop(columns=['target'])
###Output
_____no_output_____
###Markdown
Обучение модели для каждой компанииРасчет дефолта для каждой компании:```code_12003 != 0 and code_15003 != 0 and code_12003 / code_15003 < 0.5code_16004 / code_16003 > 2code_12303 != 0 and code_12304 != 0 and (code_12303 / code_16003) / (code_12004 / code_16004) > 1code_12103 != 0 and code_12104 != 0 and (code_12103 / code_12104 > 3 or code_12104 / code_12103 > 3)```
###Code
code = ['1200', '1210', '1500', '1230', '1600']
def is_default(x, y):
'''
Расчитывает дефолт компании
False(1-2 poins get) - ok
True(3-5 points get) - дефолт
'''
cond_a = ((y['1200'] != 0) & (y['1500'] != 0) & (y['1200']/y['1500'] < 0.5)).astype(np.uint8)
cond_b = (x['year_0_1600']/y['1600'] > 2).astype(np.uint8)
cond_c = ((y['1230'] != 0) & (x['year_0_1230'] != 0) & ((y['1230']/y['1600'])/(x['year_0_1200']/x['year_0_1600']) > 1)).astype(np.uint8)
cond_d = ((y['1210'] != 0) & (x['year_0_1210'] !=0 ) & ((y['1210']/x['year_0_1210'] > 3) | (x['year_0_1210']/y['1210'] > 3))).astype(np.uint8)
return (cond_a + cond_b + cond_c + cond_d + 1) > 2
y_pred = []
for index, row in tqdm_notebook(x_test[:1000].iterrows(), total=1000):
answer = {}
for c in code:
data = row.loc[[f'year_-2_{c}', f'year_-1_{c}', f'year_0_{c}']]
df = pd.DataFrame({'ds': ['2016-01-01', '2017-01-01', '2018-01-01'], 'y': data})
prph = Prophet()
prph.fit(df)
future = prph.make_future_dataframe(periods=1, freq='Y')
forecast = prph.predict(future)
answer[c] = forecast.iloc[3, 1]
y = pd.DataFrame([answer])
y_pred.append(is_default(row, y).astype(np.uint8).values[0])
with open(f'{DIR_IN}/prophet_predict.pkl', 'wb') as f:
pickle.dump(np.array(y_pred), f)
accuracy_score(y_pred, y_test[:1000])
confusion_matrix(y_pred, y_test[:1000])
sum(y_test[:1000])
###Output
_____no_output_____
###Markdown
Модель на временных рядах верно определила всего 16 дефолтных компаний из 152 дефолтных. При этом 208 хороших компаний причислила к классу дефолтных, хотя они таковыми не являлись.Такая модель не выдает вероятности принадлежности к классу дефолта, что также является ее недостатком.
###Code
###Output
_____no_output_____ |
analysis/WesleyBurchnall/milestone1.ipynb | ###Markdown
UBCO Data 301 Group 11 Wesley Burchnall Milestone 1 Submission
###Code
import pandas as pd
abs = pd.read_excel("../../data/raw/Seasonally Adjusted.xlsx")
abs.head()
abs.tail()
###Output
_____no_output_____ |
multi.ipynb | ###Markdown
Plotly Python - Plotly multi line chart| Plotly Python data visualization
###Code
%matplotlib inline
import plotly.offline as pyo
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
plt.style.use('ggplot')
bitcoin = pd.read_csv('data/bitcoin-usd.csv', parse_dates=['date'], index_col='date')
bitcoin.head()
bitcoin.index.strftime('%Y')
get_year = lambda date: date.strftime('%Y')
get_year(bitcoin.index)
bitcoin['year'] = get_year(bitcoin.index)
bitcoin.head()
bit_ag = pd.pivot_table(bitcoin, values=['open', 'high', 'volume'], index="year")
bit_ag.head()
fac0 = go.Scatter(
x = bit_ag.index,
y = bit_ag['high'],
mode = 'lines',
name='High'
)
fac1 = go.Scatter(
x = bit_ag.index,
y = bit_ag['open'],
mode = 'lines',
name='Open'
)
fac2 = go.Scatter(
x = bit_ag.index,
y = bit_ag['volume'],
mode = 'lines',
name='Volume'
)
data = [fac0, fac1]
layout = go.Layout(title='La moyenne des differentes colonnes')
fig = go.Figure(data=data, layout=layout)
pyo.plot(fig)
###Output
_____no_output_____ |
02_continuous_control/Continuous_Control_DDPG.ipynb | ###Markdown
Continuous Control---In this notebook, you will learn how to use the Unity ML-Agents environment for the second project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program. 1. Start the EnvironmentWe begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/).
###Code
%load_ext autoreload
%autoreload 2
from unityagents import UnityEnvironment
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
%matplotlib inline
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
###Output
_____no_output_____
###Markdown
Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded.- **Mac**: `"path/to/Reacher.app"`- **Windows** (x86): `"path/to/Reacher_Windows_x86/Reacher.exe"`- **Windows** (x86_64): `"path/to/Reacher_Windows_x86_64/Reacher.exe"`- **Linux** (x86): `"path/to/Reacher_Linux/Reacher.x86"`- **Linux** (x86_64): `"path/to/Reacher_Linux/Reacher.x86_64"`- **Linux** (x86, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86"`- **Linux** (x86_64, headless): `"path/to/Reacher_Linux_NoVis/Reacher.x86_64"`For instance, if you are using a Mac, then you downloaded `Reacher.app`. If this file is in the same folder as the notebook, then the line below should appear as follows:```env = UnityEnvironment(file_name="Reacher.app")```
###Code
env = UnityEnvironment(file_name='ReacherSingle_Windows_x86_64/Reacher.exe')
###Output
INFO:unityagents:
'Academy' started successfully!
Unity Academy name: Academy
Number of Brains: 1
Number of External Brains : 1
Lesson number : 0
Reset Parameters :
goal_speed -> 1.0
goal_size -> 5.0
Unity brain name: ReacherBrain
Number of Visual Observations (per agent): 0
Vector Observation space type: continuous
Vector Observation space size (per agent): 33
Number of stacked Vector Observation: 1
Vector Action space type: continuous
Vector Action space size (per agent): 4
Vector Action descriptions: , , ,
###Markdown
Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
###Code
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
###Output
_____no_output_____
###Markdown
2. Examine the State and Action SpacesIn this environment, a double-jointed arm can move to target locations. A reward of `+0.1` is provided for each step that the agent's hand is in the goal location. Thus, the goal of your agent is to maintain its position at the target location for as many time steps as possible.The observation space consists of `33` variables corresponding to position, rotation, velocity, and angular velocities of the arm. Each action is a vector with four numbers, corresponding to torque applicable to two joints. Every entry in the action vector must be a number between `-1` and `1`.Run the code cell below to print some information about the environment.
###Code
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
###Output
Number of agents: 1
Size of each action: 4
There are 1 agents. Each observes a state with length: 33
###Markdown
4. Training Reset the environment and retrieve information
###Code
# Reset the environment and retrieve information
env_info = env.reset(train_mode=True)[brain_name]
num_agents = len(env_info.agents)
action_size = brain.vector_action_space_size
states = env_info.vector_observations
state_size = states.shape[1]
###Output
_____no_output_____
###Markdown
Define workflow functions
###Code
def GameUpdate(states, actions):
env_info = env.step(actions)[brain_name] # send the action to the environment
num_agents = len(env_info.agents)
next_states = env_info.vector_observations # get the next state
rewards = env_info.rewards # get the reward
dones = env_info.local_done # see if episode has finished
return [(states[i], actions[i], rewards[i], next_states[i], dones[i]) for i in range(num_agents)]
def DisplayScore(scores, running_average=100, title=""):
N = len(scores)
avg = [sum(scores[i:i+running_average])/running_average for i in range(N - running_average)]
plt.figure(figsize=(15,5))
plt.plot(np.arange(N),scores)
plt.plot(np.arange(running_average, N), avg)
plt.xlabel("Episode")
plt.ylabel("Score")
plt.suptitle(title)
plt.show()
###Output
_____no_output_____
###Markdown
Define Training
###Code
from Source.Agents import DDPG_Agent
from Source.Network import DDPG_Actor, DDPG_Critic
from Source.Buffers import Buffer
def DDPG(n_episodes=500, max_t=1000, print_every=100, learn_every=20, score_window=100, success_score=30):
scores_window = deque(maxlen=score_window)
scores = []
# Create Actor and Critic Networks
actor_network = DDPG_Actor( state_size, action_size, fc1_units=400, fc2_units=200, device=device, learning_rate=1e-3)
critic_network = DDPG_Critic(state_size, action_size, fc1_units=400, fc2_units=200, weight_decay=0, device=device, learning_rate=1e-3)
# Define Buffer
buffer = Buffer(buffer_size=1e6, batch_size=1024, seed=1234)
# Define the agent
agents = [DDPG_Agent(state_size = state_size,
action_size = action_size,
actor_network = actor_network,
critic_network = critic_network,
replay_buffer = buffer,
device = device) for _ in range(num_agents)]
for episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
ep_scores = [0] * num_agents
for agent in agents:
agent.episode_start()
# ==== START EPISODE ====
for t in range(max_t):
# Retrieve action for every agent
actions = [agent.act(state) for agent, state in zip(agents, states)]
# Compute one game frame
results = GameUpdate(states, actions)
# Compute a step for every agent
i = 0
for agent, data in zip(agents,results):
state, action, reward, next_state, done = data
agent.step(state, action, reward, next_state, done)
ep_scores[i] += reward
i += 1
# Learn when appropiate
if t % learn_every == 0:
for agent in agents:
agent.learn()
# Check if the episode has ended
if all([result[-1] for result in results]):
break
# State transition
states = [next_state for state, action, reward, next_state, done in results]
# ==== END EPISODE ====
for agent in agents:
agent.episode_end()
# Update Scores
mean_score = np.mean(ep_scores)
scores.append(mean_score)
scores_window.append(mean_score)
mean_window = np.mean(scores_window) if len(scores_window) > 0 else 0
# Checkpoint Networks
agent.save("Models", "DDPG")
# Verbose Information
if episode % print_every == 0:
print('\rEpisode {}\tAverage Score: {:.2f} '.format(episode, mean_window))
else:
print('\rEpisode {}\tAverage Score: {:.2f} | Last: {:.2f}'.format(episode, mean_window, mean_score), end="")
if mean_window > success_score:
print("\r[Solved] Episode {}\tAverage Score: {:.2f} ".format(episode, mean_window))
break
return scores
###Output
_____no_output_____
###Markdown
Train and display results
###Code
scores = DDPG(n_episodes=1000)
DisplayScore(scores)
###Output
Episode 100 Average Score: 0.72
Episode 200 Average Score: 2.12
Episode 300 Average Score: 5.27
Episode 400 Average Score: 11.24
Episode 500 Average Score: 17.36
Episode 600 Average Score: 24.57
Episode 700 Average Score: 29.07
[Solved] Episode 730 Average Score: 30.0534.89
###Markdown
*Note: The average score in episode 730 should be 30.05. The additional characters appear because it did not fully erase the previously printed line.* 5. Close Environment
###Code
env.close()
###Output
_____no_output_____ |
Combinatorics - Basics & Code.ipynb | ###Markdown
*Total work in progress; more than anything an early attempt at sorting it all out by myself. Code and derivations are my own and not necessarily bug-free (though about 80% of the equations here are standard results that are easy to find).* Combinatorics *If there are m ways to do one thing, and n ways to do another, then there are mn ways of doing both.* - Deepak ChopraI am using the language convention that 'collections' are unordered and 'sequences' are ordered. Principles Product Principleif we have a partition of a set S into m blocks, each of size n, then S has size mn. Quotient PrincipleIf we partition a set P into q blocks, each of size r, then q = p/r. Sum Principleif we have a partition of a set S, then the size of S is the sum of the sizesof the blocks of the partition. Bijection PrincipleTwo sets have the same size if and only if there is a bijection between them. Combinatorics Code- factorial- binomial coefficent- subsets of size n (choices of n out of S)- subsets of size n+m where n and m are drawn from different, possibly overlappingsets - multinomial- permutations- unordered multiplicities for all n-sized multisets with m classes (no permutations of multiplicities)- multiplicities for all n-sized multisets with m classes
###Code
import numpy as np
def factorial(x):
if x == 0:
return 1
else:
res = 1
for i in range(1,x+1):
res *= i
return int(res)
def binomial(n,k):
"""
binomial coefficients. convention is that it's 0 if k>n
"""
if k <= n:
return int(factorial(n)/(factorial(k)*factorial(n-k)))
else:
return 0
def multinomial(n,k):
"""
multinomial coefficient. k is multiindex. Returns error if sum(k) != n
"""
assert np.sum(k) == n
return int(factorial(n)/np.product([factorial(x) for x in k]))
def unordered_multiplicities(n,m,k0_max=None):
"""
Generator that yields all unordered multiplicities for multisets with n objects of m classes.
The convention is that they are sorted:
n_1 >= n_2 >= n_3 >= ... >= n_m
Example: (n=3,m=3) returns [3,0,0],[2,1,0],[1,1,1]
I don't know what the proper math term for this is: If a set of n elements is partitioned into m subsets,
then these are the possible relative sizes of that partitions can have.
In a multiset of size n, so {1,1,2} = [2,0] is different from {1,2,2} = [0,2]. The multiplicities are ordered,
and so [0,2] and [2,0] are counted twice.
The "unordered multiplicites" here treat [2,0] and [0,2] as identical.
As of now, I don't know how many there should be, but I think that she solution probably lies
in creating a bijection to lattice paths. For the case m = n, I think the count might be the Catalan number C_n.
I haven't checked.
EDIT: These are called partitions and I am using the 'decreasing list' representation of partitions
"""
if m == 0:
yield []
else:
# define range of largest entry
if not k0_max:
k0_max = n
k0_min = int(np.ceil(n/m))
# iterate over largest entry, add combinations of remaining entries recursively
for k0 in range(k0_min,k0_max+1):
# the remaining entries are distributed as n-k0 objects over m-1 buckets, including constraint on maximum amount
k_rem = unordered_multiplicities(n-k0,m-1,k0_max=np.min([n-k0,k0]))
for k_n in k_rem:
res = [k0] + k_n
assert np.sum(res) == n
yield res
def permutations(x):
"""
generator that yields all permutations of an array x
will be a total of factorial(len(x)) if all elements of x are distinguishable
(you can just use itertools library)
"""
pivots = set()
if len(x) == 0 or len(x) == 1:
yield x
elif len(x) == 2:
if x[0] == x[1]:
yield x
else:
res = [x,x[::-1]]
for i in range(2):
yield res[i]
else:
for i in range(len(x)):
p = x[i]
if p in pivots:
pass
else:
pivots.add(p)
y = permutations(x[:i]+x[i+1:])
for remainder in y:
yield [p] + remainder
def subsets(n,S):
"""
Return all choices n out of S
assumes all elements of the set are distinguishable
"""
S = list(S)
assert len(S) >= n
if n == 0:
yield []
else:
for i in range(len(S)-n+1):
p = [S[i]]
rem = subsets(n-1,S[i+1:])
for r in rem:
yield p+r
def choose(n,m,A,B):
"""
Returns all unordered collections where n are chosen from A and m are chosen from B, where A & B may overlap.
"""
C = [x for x in A if x in set(B)]
A_ = [x for x in A if x not in B]
B_ = [x for x in B if x not in A]
a_ = len(A_)
b_ = len(B_)
c = len(C)
for i in range(np.max([n-a_,0]),np.min([c,n])+1):
for j in range(np.max([m-b_,0]),np.min([c-i,m])+1):
c_C = subsets(i+j,C)
for cc in c_C: # iterate over picks from intersection
a_A = subsets(n-i,A_)
for aa in a_A: # iterate over picks from A
b_B = subsets(m-j,B_)
for bb in b_B: # iterate over picks from B
yield aa+bb+cc
def multisets(n,m):
"""
All multisets of n objects of m classes. All possible ways for n objects divided into m distinguishable classes.
ex.: [1,0,0],[0,1,0],[0,0,1]
Will be a total of (n multichoose m)
This could be huge.
"""
distributions = unordered_multiplicities(n,m)
for distribution in distributions:
mindexes = permutations(distribution)
for mindex in mindexes:
yield mindex
###Output
_____no_output_____
###Markdown
Subsets and Permutations k-Element PermutationsA permutation is an ordered sequence of distinguishable elements drawn from some set without replacement. Let $[S]$ be a set of $S$ distinguishable elements. A $k$-element permutation can be thought of as a injective function from $[k]=\{1,2,3,...,k\}$ to $[S]$. There are $S^{\underline{k}} = \frac{|S|!}{(|S|-k)!}$ different $k$-element permutations. The notation $S^\underline{k} = S(S-1)(S-2)...(S-k+1)$ is the "falling factorial". Number of Subsets of Set with Size nThe number of subsets of a set $S$ with size $n$ is $2^n$. One way to calculate that:\begin{equation}n_{subsets} = \sum^n_{i=0} (\mathrm{ways\ of\ picking\ i\ out\ of\ n}) = \sum^n_{i=0}\left(\begin{array}{c}n\\i\end{array}\right) = 2^n\end{equation}There is a better way, though. A subset can be thought of as assigning a label to each element of $S$: it's in or out. That means that any possible subset cen be described by an $n$-element binary string and vice versa -- the subsets and the $n$-element strings can be linked by bijection. The number of subsets is therefore the number of $n$-element binary strings, which is:\begin{equation}n_{subsets} = 2^n\end{equation} Number of ways to Divide a Set of size n into m PartitionsEqually well, one might ask: how many ways are there to partition $S$ into $k$ subsets? In that case there are $k$ labels, so there are $k^n$ ways. That means:\begin{equation}\sum_{\begin{array}{c}k_1,k_2,...,k_m\\ \sum_{k_i}=n\end{array}}\left(\begin{array}{c}n\\k_1 k_2 ... k_m \end{array}\right) = m^n\end{equation}Which is: (ways of taking $k_1$ out of $n$) x (ways of taking $k_2$ out of ($n-k_1$)) x (ways of ...). Progressively dividing the set into subsets, and adding up all ways of making the division. Sequences and Collections Sequences sampled with ReplacementLet [A] be a set of size $A$.\begin{equation}\begin{array}{c}S = \left\{ (a_1,a_2,...,a_n) : a_1,a_2,...,a_n\in [A] \right\}\\|S| = A^n\end{array}\end{equation}The creation of n-tuples $(a_1,a_2,...,a_n)$ from elements of $A$ can be thought of in terms of functions that map each element of the tuple ($a_i$ for any choice of $i\in (1,n)$) to an element of $A$. For ordered tuples, these functions are always injective.*Example: bit strings of length $3$: [0,0,0],[0,0,1],[0,1,1],[1,0,1],...* Collections sampled with Replacement (Multisets)Let [A] a set of size A.\begin{equation}S = \left\{ \{a_1,a_2,...,a_n\} : a_1,a_2,...,a_n \in [A] \right\}\end{equation}This can be thought of as picking a total of $n$ objects from a choice of $k$ classes. There are three entries, that can correspond to up to $\min(3,A)$ different elements of $[A]$.For $n=3$, the options are: they are all the same, two are the same, or they're all different. There are $A$ ways for them to be all the same, $2*A*(A-1)$ ways for two of them to be the same (the factor 2 is because $(a,a,b) \neq (b,b,a)$) and $\left(\begin{array}{c}A\\3\end{array}\right)$ ways for them to all be different. Instead of expressing the elements of S in terms of the unordered multiset $\{a,b,c\}$, we can write them in terms of an ordered sequence that contains the multiplicity of the elements of the multisets. That is, if $[A] = \{1,2,3,4\}$, we write $\{1,1,2\} \in S$ as $[2_1|1_2|0_3|0_4]$. This is a bijection between elements of $S$ and the elements of $N = \{[i_1,i_2,...,i_k]: i_1,i_2,...,i_k \in \mathbb{N}_0^+ , \sum_{i_j} i_j = n\}$ (all length $k$ sequences of integers $[i_1,i_2,...,i_k]$ so that $\sum_{i_j} i_j = n$). The bijection implies that $S$ and $K$ have the same size. The amount of elements in $N$ is the Bose-Einstein Coefficient that is derived in the ``Bose Einstein Coefficients - Stars and Bars`` notebook. Therefore:\begin{equation}|S| = \left(\begin{array}{c}n+k-1\\n\end{array}\right)\end{equation}This is also known as *multichoose*. It is the number of $k$-element multisets on $n$ symbols.*Example: You are 8 years old and you get to pick n pieces of candy from a candy store that has k different types of candy.**Example: Ways for n bosons to be distributed over k degenerate states.**Example: The number of ways to draw n elements from k equally likely classes, when the order does not matter.**Example: How many ways are there to partition a set with n elements (apart the empty set). (In this case k=n)* *Example: In a universe of k stocks, how many portfolios are possible that consist of n stocks?* Sequences sampled without ReplacementAssume $a,b,c$ are all drawn from a single set $[A]$ without replacement. Then $|S| = A*(A-1)*(A-2)$, i.e. $\frac{n!}{(n-k)!} = n^\underline{k}$ *Example: Possible ways of assigning first, second and third place in a competition.* Collections sampled without ReplacementThis means, selecting $k$ out of $n$:\begin{equation}|S| = \frac{n^\underline{k}}{k!} = \left(\begin{array}{c}n\\k\end{array}\right)\end{equation}Which is the *binomial coefficient*.That's the same as taking the number of ordered $k$-tuples and dividing by the number of internal orderings $k!$.*Example: Possible ways that k fermions might be distributed over n degenerate states**Example: Possible groups of k people that can be formed out of a pool of n.* Collections sampled from several Disjoint Sets with or without ReplacementLet [A], [B] and [C] be disjoint subsets of size $A$, $B$ and $C$, respectively. \begin{equation}\begin{array}{c}S = \left\{ \{a,b,c\} : a\in [A], b\in [B], c\in[C] \right\}\\|S| = ABC\end{array}\end{equation}The mappings between the domain $S$ and co-domain are injective: each entry in the n-tuples is mapped to a disjoint subset of the co-domain. It is impossible for two tuples in $S$ to be permutations of each other, so that different permutations doesn't lead to overcounting.*Example: Choose one drink $a$, one sandwich $b$ and one free T-shirt $c$. How many options are there?* Collections from several Overlapping Sets with ReplacementLet $[A]$ and $[B]$ be two sets of size $A$ and $B$ respectively, where $A$ and $B$ may or may not overlap. Let:\begin{equation}S = \{\{a_1,a_2,...,a_n,b_1,b_2,...,b_m\}: a\in[A],b\in[B]\}\end{equation}Then, I *think*, but haven't checked:\begin{equation}|S| = \left( \begin{array}{c}n+a-1\\n \end{array}\right)\left( \begin{array}{c}n+b-1\\m \end{array}\right)\end{equation} Collections from several Overlapping Sets without ReplacementLet $[A]$ and $[B]$ be two sets of size $A$ and $B$ respectively, so that $[A] \neq [B]$ but $[A]\cap[B]=[C]$, with the $C$ the size of $[C]$. The set $S$ consists of all collections of $n$ items from $[A]$ and $m$ items of $[B]$.Let:\begin{equation}S = \{\{a_1,a_2,...,a_n,b_1,b_2,...,b_m\}: a\in[A],b\in[B]\}\end{equation}Then the size of $|S|$:\begin{equation}|S| = \sum_{\max(n-A+C,0)\leq i \leq \min(C,n)\\\max(m-B+C,0)\leq j \leq \min(C,m) \\i+j\leq C} \left(\begin{array}{c}C\\i+j\end{array}\right)\left(\begin{array}{c}A-C\\n-i\end{array}\right)\left(\begin{array}{c}B-C\\m-j\end{array}\right)\end{equation}This can be thought of choosing unordered collections from the 3 disjoint sets $[C]$,$[A]\setminus[C]$ and $[B]\setminus[C]$ and summing over the ways in which $[A]$ and $[B]$-assigned elements can be drawn from $[C]$. TO DO: Generalize this to more than two sets. The key will be to reduce the problem to sampling from disjoint sets again. *Example: You want to travel to 6 countries, of which at least 3 are Spanish speaking and at least 3 are in Europe.* ExperimentsTesting almost everything.
###Code
# Permutations
print("Permutations:")
for n in range(10):
x = list(range(n))
print('objects: %i\t permutations: %i %i' % (n,len(list(permutations(x))),factorial(n)))
#Subsets
nn = [2,5,10]
mm = [2,5,10]
print("\nTotal number of subsets of an n-sized set")
for n in nn:
n_subsets = 0
for i in range(n+1):
n_subsets += binomial(n,i) # choose i out of n
print("n: %i\t %i %i" % (n,n_subsets,2**n))
#Number of Partitionings into m Partitions
print("\nTotal number of partitionings of an n-sized set into m")
for n in nn:
for m in mm:
n_partitions = 0
for mindex in multisets(n,m):
n_partitions += multinomial(n,mindex)
print("n: %i m: %i\t %i %i" % (n,m,n_partitions,m**n))
print("\nSubsets Generator:")
S = 'A,B,C,D,E'.split(',')
for n in range(6):
print('choose %i out of %i\t size: %i %i' % (n,len(S),len(list(subsets(n,S))),binomial(len(S),n)))
print([''.join(x) for x in subsets(n,S)])
# Class balances when choosing n from k classes
print("\nMultiplicities:")
print("Class balances when choosing n from k classes")
m = 4
for n in range(10):
x = list(range(n))
print('objects: %i classes: %i\t size: %i %i' % (n,m,len(list(unordered_multiplicities(n,m))),0))
print(list(unordered_multiplicities(n,m)))
# Collections of n objects from k classes with replacement
print("\nMultisets:")
print("(Collections of n objects from k classes with replacement)")
m = 4
for n in range(7):
x = list(range(n))
print('objects: %i classes: %i\t size: %i %i' % (n,m,len(list(multisets(n,m))),
factorial(n+m-1)/(factorial(n)*factorial(m-1))))
print(list(multisets(n,m)))
A = list('1234')
ordered_pairs = np.vstack(np.vstack(np.vstack([[[[(a,b,c) for a in A ] for b in A]] for c in A])))
print('\nOrdered Sequences with Replacement %i %i' % (len(ordered_pairs),len(A)**3))
print([''.join(x) for x in ordered_pairs])
unordered_pairs = {tuple(sorted(pair)) for pair in ordered_pairs}
print('\nUnordered Collections with Replacement %i %i' % (len(unordered_pairs),factorial(3+len(A)-1)/(factorial(3)*factorial(len(A)-1))))
print([''.join(x) for x in sorted(list(unordered_pairs))])
unordered_pairs_without_replacement = []
for i in range(len(A)):
for j in range(i+1,len(A)):
for k in range(j+1,len(A)):
unordered_pairs_without_replacement+=[(A[i],A[j],A[k])]
print('\nUnordered Collections without Replacement %i %i' % (len(unordered_pairs_without_replacement),
factorial(len(A))/(factorial(3)*factorial(len(A)-3))))
print([''.join(x) for x in sorted(unordered_pairs_without_replacement)])
def count_choices(n,m,A,B):
"""
Count choices for drawing n from A and m from B without replacement, where A and B may overlap.
"""
C = [x for x in A if x in B]
a = len(A)
b = len(B)
c = len(C)
i_min = np.max([n-a+c,0])
j_min = np.max([m-b+c,0])
s = 0
for i in range(i_min,np.min([c,n])+1):
for j in range(j_min,np.min([c-i,m])+1):
s += binomial(c,i+j)*binomial(a-c,n-i)*binomial(b-c,m-j)
return s
A = 'Spain,Mexico,Cuba'.split(',') # spanish speaking countries
B = 'Spain,France,Germany'.split(',') # countries in Europe
print("\nChoose from Overlapping Sets")
for n in range(len(A)):
for m in range(len(B)):
S = list(choose(n,m,A,B))
print("Spanish speaking: %i European: %i \t %i %i" % (n,m,len(S),count_choices(n,m,A,B)))
print(['-'.join(x) for x in S])
###Output
Permutations:
objects: 0 permutations: 1 1
objects: 1 permutations: 1 1
objects: 2 permutations: 2 2
objects: 3 permutations: 6 6
objects: 4 permutations: 24 24
objects: 5 permutations: 120 120
objects: 6 permutations: 720 720
objects: 7 permutations: 5040 5040
objects: 8 permutations: 40320 40320
objects: 9 permutations: 362880 362880
Total number of subsets of an n-sized set
n: 2 4 4
n: 5 32 32
n: 10 1024 1024
Total number of partitionings of an n-sized set into m
n: 2 m: 2 4 4
n: 2 m: 5 25 25
n: 2 m: 10 100 100
n: 5 m: 2 32 32
n: 5 m: 5 3125 3125
n: 5 m: 10 100000 100000
n: 10 m: 2 1024 1024
n: 10 m: 5 9765625 9765625
n: 10 m: 10 10000000000 10000000000
Subsets Generator:
choose 0 out of 5 size: 1 1
['']
choose 1 out of 5 size: 5 5
['A', 'B', 'C', 'D', 'E']
choose 2 out of 5 size: 10 10
['AB', 'AC', 'AD', 'AE', 'BC', 'BD', 'BE', 'CD', 'CE', 'DE']
choose 3 out of 5 size: 10 10
['ABC', 'ABD', 'ABE', 'ACD', 'ACE', 'ADE', 'BCD', 'BCE', 'BDE', 'CDE']
choose 4 out of 5 size: 5 5
['ABCD', 'ABCE', 'ABDE', 'ACDE', 'BCDE']
choose 5 out of 5 size: 1 1
['ABCDE']
Multiplicities:
Class balances when choosing n from k classes
objects: 0 classes: 4 size: 1 0
[[0, 0, 0, 0]]
objects: 1 classes: 4 size: 1 0
[[1, 0, 0, 0]]
objects: 2 classes: 4 size: 2 0
[[1, 1, 0, 0], [2, 0, 0, 0]]
objects: 3 classes: 4 size: 3 0
[[1, 1, 1, 0], [2, 1, 0, 0], [3, 0, 0, 0]]
objects: 4 classes: 4 size: 5 0
[[1, 1, 1, 1], [2, 1, 1, 0], [2, 2, 0, 0], [3, 1, 0, 0], [4, 0, 0, 0]]
objects: 5 classes: 4 size: 6 0
[[2, 1, 1, 1], [2, 2, 1, 0], [3, 1, 1, 0], [3, 2, 0, 0], [4, 1, 0, 0], [5, 0, 0, 0]]
objects: 6 classes: 4 size: 9 0
[[2, 2, 1, 1], [2, 2, 2, 0], [3, 1, 1, 1], [3, 2, 1, 0], [3, 3, 0, 0], [4, 1, 1, 0], [4, 2, 0, 0], [5, 1, 0, 0], [6, 0, 0, 0]]
objects: 7 classes: 4 size: 11 0
[[2, 2, 2, 1], [3, 2, 1, 1], [3, 2, 2, 0], [3, 3, 1, 0], [4, 1, 1, 1], [4, 2, 1, 0], [4, 3, 0, 0], [5, 1, 1, 0], [5, 2, 0, 0], [6, 1, 0, 0], [7, 0, 0, 0]]
objects: 8 classes: 4 size: 15 0
[[2, 2, 2, 2], [3, 2, 2, 1], [3, 3, 1, 1], [3, 3, 2, 0], [4, 2, 1, 1], [4, 2, 2, 0], [4, 3, 1, 0], [4, 4, 0, 0], [5, 1, 1, 1], [5, 2, 1, 0], [5, 3, 0, 0], [6, 1, 1, 0], [6, 2, 0, 0], [7, 1, 0, 0], [8, 0, 0, 0]]
objects: 9 classes: 4 size: 18 0
[[3, 2, 2, 2], [3, 3, 2, 1], [3, 3, 3, 0], [4, 2, 2, 1], [4, 3, 1, 1], [4, 3, 2, 0], [4, 4, 1, 0], [5, 2, 1, 1], [5, 2, 2, 0], [5, 3, 1, 0], [5, 4, 0, 0], [6, 1, 1, 1], [6, 2, 1, 0], [6, 3, 0, 0], [7, 1, 1, 0], [7, 2, 0, 0], [8, 1, 0, 0], [9, 0, 0, 0]]
Multisets:
(Collections of n objects from k classes with replacement)
objects: 0 classes: 4 size: 1 1
[[0, 0, 0, 0]]
objects: 1 classes: 4 size: 4 4
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
objects: 2 classes: 4 size: 10 10
[[1, 1, 0, 0], [1, 0, 1, 0], [1, 0, 0, 1], [0, 1, 1, 0], [0, 1, 0, 1], [0, 0, 1, 1], [2, 0, 0, 0], [0, 2, 0, 0], [0, 0, 2, 0], [0, 0, 0, 2]]
objects: 3 classes: 4 size: 20 20
[[1, 1, 1, 0], [1, 1, 0, 1], [1, 0, 1, 1], [0, 1, 1, 1], [2, 1, 0, 0], [2, 0, 1, 0], [2, 0, 0, 1], [1, 2, 0, 0], [1, 0, 2, 0], [1, 0, 0, 2], [0, 2, 1, 0], [0, 2, 0, 1], [0, 1, 2, 0], [0, 1, 0, 2], [0, 0, 2, 1], [0, 0, 1, 2], [3, 0, 0, 0], [0, 3, 0, 0], [0, 0, 3, 0], [0, 0, 0, 3]]
objects: 4 classes: 4 size: 35 35
[[1, 1, 1, 1], [2, 1, 1, 0], [2, 1, 0, 1], [2, 0, 1, 1], [1, 2, 1, 0], [1, 2, 0, 1], [1, 1, 2, 0], [1, 1, 0, 2], [1, 0, 2, 1], [1, 0, 1, 2], [0, 2, 1, 1], [0, 1, 2, 1], [0, 1, 1, 2], [2, 2, 0, 0], [2, 0, 2, 0], [2, 0, 0, 2], [0, 2, 2, 0], [0, 2, 0, 2], [0, 0, 2, 2], [3, 1, 0, 0], [3, 0, 1, 0], [3, 0, 0, 1], [1, 3, 0, 0], [1, 0, 3, 0], [1, 0, 0, 3], [0, 3, 1, 0], [0, 3, 0, 1], [0, 1, 3, 0], [0, 1, 0, 3], [0, 0, 3, 1], [0, 0, 1, 3], [4, 0, 0, 0], [0, 4, 0, 0], [0, 0, 4, 0], [0, 0, 0, 4]]
objects: 5 classes: 4 size: 56 56
[[2, 1, 1, 1], [1, 2, 1, 1], [1, 1, 2, 1], [1, 1, 1, 2], [2, 2, 1, 0], [2, 2, 0, 1], [2, 1, 2, 0], [2, 1, 0, 2], [2, 0, 2, 1], [2, 0, 1, 2], [1, 2, 2, 0], [1, 2, 0, 2], [1, 0, 2, 2], [0, 2, 2, 1], [0, 2, 1, 2], [0, 1, 2, 2], [3, 1, 1, 0], [3, 1, 0, 1], [3, 0, 1, 1], [1, 3, 1, 0], [1, 3, 0, 1], [1, 1, 3, 0], [1, 1, 0, 3], [1, 0, 3, 1], [1, 0, 1, 3], [0, 3, 1, 1], [0, 1, 3, 1], [0, 1, 1, 3], [3, 2, 0, 0], [3, 0, 2, 0], [3, 0, 0, 2], [2, 3, 0, 0], [2, 0, 3, 0], [2, 0, 0, 3], [0, 3, 2, 0], [0, 3, 0, 2], [0, 2, 3, 0], [0, 2, 0, 3], [0, 0, 3, 2], [0, 0, 2, 3], [4, 1, 0, 0], [4, 0, 1, 0], [4, 0, 0, 1], [1, 4, 0, 0], [1, 0, 4, 0], [1, 0, 0, 4], [0, 4, 1, 0], [0, 4, 0, 1], [0, 1, 4, 0], [0, 1, 0, 4], [0, 0, 4, 1], [0, 0, 1, 4], [5, 0, 0, 0], [0, 5, 0, 0], [0, 0, 5, 0], [0, 0, 0, 5]]
objects: 6 classes: 4 size: 84 84
[[2, 2, 1, 1], [2, 1, 2, 1], [2, 1, 1, 2], [1, 2, 2, 1], [1, 2, 1, 2], [1, 1, 2, 2], [2, 2, 2, 0], [2, 2, 0, 2], [2, 0, 2, 2], [0, 2, 2, 2], [3, 1, 1, 1], [1, 3, 1, 1], [1, 1, 3, 1], [1, 1, 1, 3], [3, 2, 1, 0], [3, 2, 0, 1], [3, 1, 2, 0], [3, 1, 0, 2], [3, 0, 2, 1], [3, 0, 1, 2], [2, 3, 1, 0], [2, 3, 0, 1], [2, 1, 3, 0], [2, 1, 0, 3], [2, 0, 3, 1], [2, 0, 1, 3], [1, 3, 2, 0], [1, 3, 0, 2], [1, 2, 3, 0], [1, 2, 0, 3], [1, 0, 3, 2], [1, 0, 2, 3], [0, 3, 2, 1], [0, 3, 1, 2], [0, 2, 3, 1], [0, 2, 1, 3], [0, 1, 3, 2], [0, 1, 2, 3], [3, 3, 0, 0], [3, 0, 3, 0], [3, 0, 0, 3], [0, 3, 3, 0], [0, 3, 0, 3], [0, 0, 3, 3], [4, 1, 1, 0], [4, 1, 0, 1], [4, 0, 1, 1], [1, 4, 1, 0], [1, 4, 0, 1], [1, 1, 4, 0], [1, 1, 0, 4], [1, 0, 4, 1], [1, 0, 1, 4], [0, 4, 1, 1], [0, 1, 4, 1], [0, 1, 1, 4], [4, 2, 0, 0], [4, 0, 2, 0], [4, 0, 0, 2], [2, 4, 0, 0], [2, 0, 4, 0], [2, 0, 0, 4], [0, 4, 2, 0], [0, 4, 0, 2], [0, 2, 4, 0], [0, 2, 0, 4], [0, 0, 4, 2], [0, 0, 2, 4], [5, 1, 0, 0], [5, 0, 1, 0], [5, 0, 0, 1], [1, 5, 0, 0], [1, 0, 5, 0], [1, 0, 0, 5], [0, 5, 1, 0], [0, 5, 0, 1], [0, 1, 5, 0], [0, 1, 0, 5], [0, 0, 5, 1], [0, 0, 1, 5], [6, 0, 0, 0], [0, 6, 0, 0], [0, 0, 6, 0], [0, 0, 0, 6]]
Ordered Sequences with Replacement 64 64
['111', '211', '311', '411', '121', '221', '321', '421', '131', '231', '331', '431', '141', '241', '341', '441', '112', '212', '312', '412', '122', '222', '322', '422', '132', '232', '332', '432', '142', '242', '342', '442', '113', '213', '313', '413', '123', '223', '323', '423', '133', '233', '333', '433', '143', '243', '343', '443', '114', '214', '314', '414', '124', '224', '324', '424', '134', '234', '334', '434', '144', '244', '344', '444']
Unordered Collections with Replacement 20 20
['111', '112', '113', '114', '122', '123', '124', '133', '134', '144', '222', '223', '224', '233', '234', '244', '333', '334', '344', '444']
Unordered Collections without Replacement 4 4
['123', '124', '134', '234']
Choose from Overlapping Sets
Spanish speaking: 0 European: 0 1 1
['']
Spanish speaking: 0 European: 1 3 3
['France', 'Germany', 'Spain']
Spanish speaking: 0 European: 2 3 3
['France-Germany', 'France-Spain', 'Germany-Spain']
Spanish speaking: 1 European: 0 3 3
['Mexico', 'Cuba', 'Spain']
Spanish speaking: 1 European: 1 8 8
['Mexico-France', 'Mexico-Germany', 'Cuba-France', 'Cuba-Germany', 'Mexico-Spain', 'Cuba-Spain', 'France-Spain', 'Germany-Spain']
Spanish speaking: 1 European: 2 7 7
['Mexico-France-Germany', 'Cuba-France-Germany', 'Mexico-France-Spain', 'Mexico-Germany-Spain', 'Cuba-France-Spain', 'Cuba-Germany-Spain', 'France-Germany-Spain']
Spanish speaking: 2 European: 0 3 3
['Mexico-Cuba', 'Mexico-Spain', 'Cuba-Spain']
Spanish speaking: 2 European: 1 7 7
['Mexico-Cuba-France', 'Mexico-Cuba-Germany', 'Mexico-Cuba-Spain', 'Mexico-France-Spain', 'Mexico-Germany-Spain', 'Cuba-France-Spain', 'Cuba-Germany-Spain']
Spanish speaking: 2 European: 2 5 5
['Mexico-Cuba-France-Germany', 'Mexico-Cuba-France-Spain', 'Mexico-Cuba-Germany-Spain', 'Mexico-France-Germany-Spain', 'Cuba-France-Germany-Spain']
|
PredictionModels/LSTMonOHLC25.ipynb | ###Markdown
Imports
###Code
import numpy as np
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error , mean_squared_error
import tensorflow as tf
from tensorflow.keras.layers import LSTM , Dropout , Dense
from tensorflow.keras.models import Sequential
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('../../data/Equity/NSE50.csv')
df['Date'] = pd.to_datetime(df['Date'])
df.index = df['Date']
df.drop(df[df['High']==df['Low']].index , axis=0 , inplace=True)
df.head()
df.drop(['Ticker' , 'Date' ,'Time', 'Volume' , 'OI '] , axis=1 , inplace=True)
data = df.copy()
# Training and testing data
train_size = int(len(data) *0.80)
train = data[:train_size]
test = data[train_size :]
scaler = MinMaxScaler(feature_range=(0,1))
scaled_close = scaler.fit_transform(np.array(train['Close']).reshape(-1,1))
scaled_open = scaler.transform(np.array(train['Open']).reshape(-1,1))
scaled_high = scaler.transform(np.array(train['High']).reshape(-1,1))
scaled_low = scaler.transform(np.array(train['Low']).reshape(-1,1))
open_list = []
close_list = []
high_list = []
low_list = []
for i in range(len(scaled_open)):
open_list.append(scaled_open[i])
high_list.append(scaled_high[i])
low_list.append(scaled_low[i])
close_list.append( scaled_close[i])
open_arr = np.array(open_list).reshape(-1,1)
high_arr = np.array(high_list).reshape(-1,1)
low_arr = np.array(low_list).reshape(-1,1)
close_arr = np.array(close_list).reshape(-1,1)
scaled_data = pd.DataFrame(open_arr , columns=['Open'])
scaled_data['High'] = high_arr
scaled_data['Low'] = low_arr
scaled_data['Close'] = close_arr
scaled_train = np.array(scaled_data)
X_train = []
y_train = []
for x in range(60,len(scaled_train)-1):
temp = []
for i in scaled_train[x-60 :x,]:
temp.append(i[0] )
temp.append(i[1])
temp.append( i[2])
temp.append(i[3])
X_train.append(temp)
y_train.append(scaled_train[x+1][3])
X_train = np.array(X_train)
y_train = np.array(y_train)
X_train[1] , y_train[0]
test_scaled_close = scaler.transform(np.array(test['Close']).reshape(-1,1))
test_scaled_open = scaler.transform(np.array(test['Open']).reshape(-1,1))
test_scaled_high = scaler.transform(np.array(test['High']).reshape(-1,1))
test_scaled_low = scaler.transform(np.array(test['Low']).reshape(-1,1))
open_list = []
close_list = []
high_list = []
low_list = []
for i in range(len(test_scaled_open)):
open_list.append(test_scaled_open[i])
high_list.append(test_scaled_high[i])
low_list.append(test_scaled_low[i])
close_list.append( test_scaled_close[i])
open_arr = np.array(open_list).reshape(-1,1)
high_arr = np.array(high_list).reshape(-1,1)
low_arr = np.array(low_list).reshape(-1,1)
close_arr = np.array(close_list).reshape(-1,1)
scaled_data = pd.DataFrame(open_arr , columns=['Open'])
scaled_data['High'] = high_arr
scaled_data['Low'] = low_arr
scaled_data['Close'] = close_arr
scaled_test = np.array(scaled_data)
X_test = []
y_test = []
for x in range(60,len(scaled_test)-1):
temp = []
for i in scaled_test[x-60 :x,]:
temp.append(i[0] )
temp.append(i[1])
temp.append( i[2])
temp.append(i[3])
X_test.append(temp)
y_test.append(scaled_test[x+1][3])
X_test = np.array(X_test)
y_test = np.array(y_test)
lstm = Sequential()
lstm.add(LSTM(250 ,return_sequences=True , input_shape=(240,1)))
lstm.add(LSTM(250 , return_sequences = True))
lstm.add(LSTM(250))
lstm.add(Dense(1))
lstm.compile(loss='mean_squared_error' , optimizer='adam' , metrics=['accuracy','mse'])
lstm.summary()
X_train = X_train.reshape((-1,240 , 1))
y_train = y_train.reshape(-1,1)
X_test = X_test.reshape((-1,240,1))
y_train = y_train.reshape(-1,1)
lstm.fit(X_train ,y_train , validation_data=(X_test,y_test) ,epochs=100 , batch_size=64 ,verbose=1 )
training_predictions = lstm.predict(X_train)
print('Mean Squared Error : {}'.format(mean_squared_error(scaler.inverse_transform(np.array(y_train).reshape(-1,1)) ,scaler.inverse_transform(training_predictions) )))
print('Mean Absolute Error : {}'.format(mean_absolute_error(scaler.inverse_transform(np.array(y_train).reshape(-1,1)) ,scaler.inverse_transform(training_predictions))))
plt.figure(figsize=(20,10))
plt.plot(scaler.inverse_transform(training_predictions) , label='Training Fitting')
plt.plot(scaler.inverse_transform(np.array(y_train).reshape(-1,1)) ,label='Original Values')
plt.legend()
plt.show()
print('Mean Squared Error : {}'.format(mean_squared_error(scaler.inverse_transform(np.array(y_test).reshape(-1,1)) ,scaler.inverse_transform(preds) )))
print('Mean Absolute Error : {}'.format(mean_absolute_error(scaler.inverse_transform(np.array(y_test).reshape(-1,1)) ,scaler.inverse_transform(preds))))
preds = lstm.predict(X_test)
plt.figure(figsize=(20,10))
plt.plot(scaler.inverse_transform(preds) , label='Testing Prediction')
plt.plot(scaler.inverse_transform(np.array(y_test).reshape(-1,1)) ,label='Test Original')
plt.legend()
plt.show()
lstm.save_weights('./models/lstm240.weights')
lstm.save('./models/lstm240.h5')
###Output
_____no_output_____ |
HA2_Anastasia_Vladimirovna_Markova_195.ipynb | ###Markdown
Task 1.1.1 Create a dictionary with the countries as the keys and information on the population (in millions) as the values. The order is preserved (the first value in ```population_mln``` is for the first country in the list ```countries```, the second value - for the second country, etc.). 1.2 Add the new values in this dictionary: Germany - 83, France - 67, China - 140.81. 1.3 Oops! We made a mistake for China. It's 1408.1 million. Replace the existing value with the correct one.
###Code
countries = ['Italy', 'Russia', 'Austria', 'Australia', 'Cyprus', 'USA', 'Burkina Faso', 'Spain', 'Greece']
population_mln = [60.3, 144.4, 8.6, 25.4, 0.86, 328, 20.3, 47, 10.7]
#1.1
dict = {countries[i]: population_mln[i] for i in range(len(countries))}
#1.2
dict ['Germany'] = 83
dict ['France']= 67
dict ['China']= 140.81
#1.3
dict ['China']= 1408.1
print (dict)
###Output
{'Italy': 60.3, 'Russia': 144.4, 'Austria': 8.6, 'Australia': 25.4, 'Cyprus': 0.86, 'USA': 328, 'Burkina Faso': 20.3, 'Spain': 47, 'Greece': 10.7, 'Germany': 83, 'France': 67, 'China': 1408.1}
###Markdown
Task 2. Write code to return the second maximum value in the list. This means that it is less than the maximum value, but greater than others.
###Code
array = [59, 7, 9, 87, 1, 0, 2, 20, 62, 120, 121, 98]
array.sort ()
print (array[-2])
###Output
120
###Markdown
Task 3 Demonstrate at least 2 ways to check if a given string is a palindrome. Your code should return True if this string is a palindrome and False otherwise. A palindrome is a word, number, phrase, string, or other sequence of characters which reads the same backward as forward. Examples: 'madam', 'kayak', 'aaabbbbaaa'.
###Code
candidate = 'aAaacbbcaaAa'
#1
candidate_reverse = candidate[::-1]
print (candidate == candidate_reverse)
#2
candidate_reverse = reversed(candidate)
if list(candidate) == list (candidate_reverse):
print (True)
else:
print (False)
###Output
True
True
###Markdown
Task 4.4.1 You have downloaded the introductory excerpt from the book Political Order and Political Decay by Dr Francis Fukuyama, but the text was downloaded in parts and with special characters. On every line where these characters appear, remove them (namely: , , , h1, h2, h3). 4.2 Combine all parts of the text into one, so that the output is one text in string format (without extra square brackets left over from the list as a data structure. For instance, the beginning should be \"PART ONE. The State. WHAT IS POLITICAL ...\" instead of \"[PART ONE. The State.] [WHAT IS POLITICAL ...\"). The text must be connected in the correct sequence: chapter_title, subtitle, paragpraph_1, paragpraph_2, paragpraph_3. Print your final text as the answer for this task.
###Code
paragraph_3 = ["The rule of law has many possible definitions, including simple law and order, property rights",
"and contract enforcement, or the modern Western understanding of human rights, which includes",
"equal rights for women and racial and ethnic minorities.1 <page3>The definition of the rule of law",
"I am using in this book is not tied to a specific substantive understanding of law. Rather, I",
"define it as a set of rules of behavior, reflecting a broad consensus within the society,",
" that is binding on even the most powerful political actors in the society, whether kings,",
" presidents, or prime ministers. "]
paragraph_3 = ''.join (paragraph_3)
paragraph_3 = paragraph_3.replace('<page3>', '')
chapter_title = ["PART", " ONE. #h1", "The ", "State.#h2 "]
chapter_title = ''.join (chapter_title)
chapter_title = chapter_title.replace('#h1', '')
chapter_title = chapter_title.replace('#h2', '')
paragraph_2 = ["In the first volume of this book, I argued that there were three basic ",
"categories of institutions that constituted a political order: the state, ",
"rule of law, and mechanisms of accountability. The state is ",
"a hierarchical, centralized organization that holds a monopoly on legitimate",
"force over a defined territory. <page2>In addition to characteristics like complexity ",
"and adaptability, states can be more or less impersonal: early states were ",
"indistinguishable from the ruler’s household and were described as “patrimonial” because",
"they favored and worked through the ruler’s family and friends. Modern, more highly developed",
"states, by contrast, make a distinction between the private interest of the rulers and the public",
"interest of the whole community. They strive to treat citizens on a more impersonal basis,",
" applying laws, recruiting officials, and undertaking policies without favoritism. "]
paragraph_2 = ''.join (paragraph_2)
paragraph_2 = paragraph_2.replace('<page2>', '')
subtitle = ["#h3WHAT IS POLITICAL DEVELOPMENT?"]
subtitle = ''.join (subtitle)
subtitle = subtitle.replace('#h3', '')
paragraph_1 = ["Political development and its three components: the state, rule of law,",
" and accountability; why all societies are subject to political decay;",
" the plan for the book; why <page1>it is good to have a balanced political system.",
"Political development is change over time in political institutions.",
"This is different from shifts in politics or policies: prime ministers,",
" presidents, and legislators may come and go, laws may be modified, ",
"but it is the underlying rules by which societies organize themselves",
" that define a political order. "]
paragraph_1 =''.join (paragraph_1)
paragraph_1 = paragraph_1.replace('<page1>', '')
#final text
final = [chapter_title, subtitle, paragraph_1, paragraph_2, paragraph_3]
final ='\n'.join (final)
print (final)
###Output
PART ONE. The State.
WHAT IS POLITICAL DEVELOPMENT?
Political development and its three components: the state, rule of law, and accountability; why all societies are subject to political decay; the plan for the book; why it is good to have a balanced political system.Political development is change over time in political institutions.This is different from shifts in politics or policies: prime ministers, presidents, and legislators may come and go, laws may be modified, but it is the underlying rules by which societies organize themselves that define a political order.
In the first volume of this book, I argued that there were three basic categories of institutions that constituted a political order: the state, rule of law, and mechanisms of accountability. The state is a hierarchical, centralized organization that holds a monopoly on legitimateforce over a defined territory. In addition to characteristics like complexity and adaptability, states can be more or less impersonal: early states were indistinguishable from the ruler’s household and were described as “patrimonial” becausethey favored and worked through the ruler’s family and friends. Modern, more highly developedstates, by contrast, make a distinction between the private interest of the rulers and the publicinterest of the whole community. They strive to treat citizens on a more impersonal basis, applying laws, recruiting officials, and undertaking policies without favoritism.
The rule of law has many possible definitions, including simple law and order, property rightsand contract enforcement, or the modern Western understanding of human rights, which includesequal rights for women and racial and ethnic minorities.1 The definition of the rule of lawI am using in this book is not tied to a specific substantive understanding of law. Rather, Idefine it as a set of rules of behavior, reflecting a broad consensus within the society, that is binding on even the most powerful political actors in the society, whether kings, presidents, or prime ministers.
###Markdown
Task 55.1. Generate a sequence of numbers from 1 to 1000 5.2. Read more about if conditions. https://docs.python.org/3/tutorial/controlflow.html "Look at our tasks for the practical class once again, we used ```if``` in list comprehensions and in a task with a feedback classification (week4). 5.3. Create an empty list. For each number check the condition: if this number is divided by 3 (without the remainder), add a word 'Three' in a new list, if it is divided by 5, add a word 'Five', if it is divided by both 3 and 5 - add 'ThreeFive'. Else (if all these conditions are not satisfied), simply add this number to a new list. The beginning of this sequence should be: [1, 2, 'Three', 4, 'Five', 'Three', 7, 8, 'Three', 'Five', 11, 'Three', 13, 14, 'ThreeFive', ...]
###Code
#5.1
list_1 = range(1, 1001)
#5.3
empty_list = []
for i in range(1,1001):
if (i% 3 == 0) and (i% 5 == 0):
print("ThreeFive")
elif (i% 5 == 0):
print("Five")
elif (i% 3 == 0):
print("Three")
else:
print(i)
###Output
1
2
Three
4
Five
Three
7
8
Three
Five
11
Three
13
14
ThreeFive
16
17
Three
19
Five
Three
22
23
Three
Five
26
Three
28
29
ThreeFive
31
32
Three
34
Five
Three
37
38
Three
Five
41
Three
43
44
ThreeFive
46
47
Three
49
Five
Three
52
53
Three
Five
56
Three
58
59
ThreeFive
61
62
Three
64
Five
Three
67
68
Three
Five
71
Three
73
74
ThreeFive
76
77
Three
79
Five
Three
82
83
Three
Five
86
Three
88
89
ThreeFive
91
92
Three
94
Five
Three
97
98
Three
Five
101
Three
103
104
ThreeFive
106
107
Three
109
Five
Three
112
113
Three
Five
116
Three
118
119
ThreeFive
121
122
Three
124
Five
Three
127
128
Three
Five
131
Three
133
134
ThreeFive
136
137
Three
139
Five
Three
142
143
Three
Five
146
Three
148
149
ThreeFive
151
152
Three
154
Five
Three
157
158
Three
Five
161
Three
163
164
ThreeFive
166
167
Three
169
Five
Three
172
173
Three
Five
176
Three
178
179
ThreeFive
181
182
Three
184
Five
Three
187
188
Three
Five
191
Three
193
194
ThreeFive
196
197
Three
199
Five
Three
202
203
Three
Five
206
Three
208
209
ThreeFive
211
212
Three
214
Five
Three
217
218
Three
Five
221
Three
223
224
ThreeFive
226
227
Three
229
Five
Three
232
233
Three
Five
236
Three
238
239
ThreeFive
241
242
Three
244
Five
Three
247
248
Three
Five
251
Three
253
254
ThreeFive
256
257
Three
259
Five
Three
262
263
Three
Five
266
Three
268
269
ThreeFive
271
272
Three
274
Five
Three
277
278
Three
Five
281
Three
283
284
ThreeFive
286
287
Three
289
Five
Three
292
293
Three
Five
296
Three
298
299
ThreeFive
301
302
Three
304
Five
Three
307
308
Three
Five
311
Three
313
314
ThreeFive
316
317
Three
319
Five
Three
322
323
Three
Five
326
Three
328
329
ThreeFive
331
332
Three
334
Five
Three
337
338
Three
Five
341
Three
343
344
ThreeFive
346
347
Three
349
Five
Three
352
353
Three
Five
356
Three
358
359
ThreeFive
361
362
Three
364
Five
Three
367
368
Three
Five
371
Three
373
374
ThreeFive
376
377
Three
379
Five
Three
382
383
Three
Five
386
Three
388
389
ThreeFive
391
392
Three
394
Five
Three
397
398
Three
Five
401
Three
403
404
ThreeFive
406
407
Three
409
Five
Three
412
413
Three
Five
416
Three
418
419
ThreeFive
421
422
Three
424
Five
Three
427
428
Three
Five
431
Three
433
434
ThreeFive
436
437
Three
439
Five
Three
442
443
Three
Five
446
Three
448
449
ThreeFive
451
452
Three
454
Five
Three
457
458
Three
Five
461
Three
463
464
ThreeFive
466
467
Three
469
Five
Three
472
473
Three
Five
476
Three
478
479
ThreeFive
481
482
Three
484
Five
Three
487
488
Three
Five
491
Three
493
494
ThreeFive
496
497
Three
499
Five
Three
502
503
Three
Five
506
Three
508
509
ThreeFive
511
512
Three
514
Five
Three
517
518
Three
Five
521
Three
523
524
ThreeFive
526
527
Three
529
Five
Three
532
533
Three
Five
536
Three
538
539
ThreeFive
541
542
Three
544
Five
Three
547
548
Three
Five
551
Three
553
554
ThreeFive
556
557
Three
559
Five
Three
562
563
Three
Five
566
Three
568
569
ThreeFive
571
572
Three
574
Five
Three
577
578
Three
Five
581
Three
583
584
ThreeFive
586
587
Three
589
Five
Three
592
593
Three
Five
596
Three
598
599
ThreeFive
601
602
Three
604
Five
Three
607
608
Three
Five
611
Three
613
614
ThreeFive
616
617
Three
619
Five
Three
622
623
Three
Five
626
Three
628
629
ThreeFive
631
632
Three
634
Five
Three
637
638
Three
Five
641
Three
643
644
ThreeFive
646
647
Three
649
Five
Three
652
653
Three
Five
656
Three
658
659
ThreeFive
661
662
Three
664
Five
Three
667
668
Three
Five
671
Three
673
674
ThreeFive
676
677
Three
679
Five
Three
682
683
Three
Five
686
Three
688
689
ThreeFive
691
692
Three
694
Five
Three
697
698
Three
Five
701
Three
703
704
ThreeFive
706
707
Three
709
Five
Three
712
713
Three
Five
716
Three
718
719
ThreeFive
721
722
Three
724
Five
Three
727
728
Three
Five
731
Three
733
734
ThreeFive
736
737
Three
739
Five
Three
742
743
Three
Five
746
Three
748
749
ThreeFive
751
752
Three
754
Five
Three
757
758
Three
Five
761
Three
763
764
ThreeFive
766
767
Three
769
Five
Three
772
773
Three
Five
776
Three
778
779
ThreeFive
781
782
Three
784
Five
Three
787
788
Three
Five
791
Three
793
794
ThreeFive
796
797
Three
799
Five
Three
802
803
Three
Five
806
Three
808
809
ThreeFive
811
812
Three
814
Five
Three
817
818
Three
Five
821
Three
823
824
ThreeFive
826
827
Three
829
Five
Three
832
833
Three
Five
836
Three
838
839
ThreeFive
841
842
Three
844
Five
Three
847
848
Three
Five
851
Three
853
854
ThreeFive
856
857
Three
859
Five
Three
862
863
Three
Five
866
Three
868
869
ThreeFive
871
872
Three
874
Five
Three
877
878
Three
Five
881
Three
883
884
ThreeFive
886
887
Three
889
Five
Three
892
893
Three
Five
896
Three
898
899
ThreeFive
901
902
Three
904
Five
Three
907
908
Three
Five
911
Three
913
914
ThreeFive
916
917
Three
919
Five
Three
922
923
Three
Five
926
Three
928
929
ThreeFive
931
932
Three
934
Five
Three
937
938
Three
Five
941
Three
943
944
ThreeFive
946
947
Three
949
Five
Three
952
953
Three
Five
956
Three
958
959
ThreeFive
961
962
Three
964
Five
Three
967
968
Three
Five
971
Three
973
974
ThreeFive
976
977
Three
979
Five
Three
982
983
Three
Five
986
Three
988
989
ThreeFive
991
992
Three
994
Five
Three
997
998
Three
Five
|
birthday_remaining.ipynb | ###Markdown
###Code
from datetime import datetime
import time
def get_user_birthday():
date_str = input("Enter your birth date in DD/MM/YYYY: ")
try:
birthday = datetime.strptime(date_str, "%d/%m/%Y")
except TypeError:
birthday = datetime.datetime(*(time.strptime(date_str, "%d/%m/%Y")[0:6]))
return birthday
def days_remaining(birth_date):
now = datetime.now()
current_year = datetime(now.year, birth_date.month, birth_date.day)
days = (current_year - now).days
if days < 0:
next_year = datetime(now.year+1, birth_date.month, birth_date.day)
days = (next_year - now).days
return days
birthday = get_user_birthday()
next_birthday = days_remaining(birthday)
print("Your birthday is coming in: ", next_birthday, " days")
###Output
Enter your birth date in DD/MM/YYYY: 27/09/9999
Your birthday is coming in: 199 days
|
docs/zoo/tutorials.ipynb | ###Markdown
Larq Zoo TutorialRun on Colab View on GitHubThis tutorial demonstrates how to load pretrained models from Larq Zoo. These models can be used for prediction, feature extraction, and fine-tuning.
###Code
pip install larq larq-zoo
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import larq_zoo as lqz
from urllib.request import urlopen
from PIL import Image
###Output
_____no_output_____
###Markdown
Download and prepare a sample imageIn the following we will use a sample image from the [ImageNet](http://image-net.org/) dataset:
###Code
img_path = "https://raw.githubusercontent.com/larq/zoo/master/tests/fixtures/elephant.jpg"
with urlopen(img_path) as f:
img = Image.open(f).resize((224, 224))
x = tf.keras.preprocessing.image.img_to_array(img)
x = lqz.preprocess_input(x)
x = np.expand_dims(x, axis=0)
###Output
_____no_output_____
###Markdown
Classify ImageNet classes with QuickNetWe will first load the QuickNet architecture with pretrained weights and predict the image class.
###Code
model = lqz.sota.QuickNet(weights="imagenet")
preds = model.predict(x)
lqz.decode_predictions(preds, top=5)[0]
###Output
_____no_output_____
###Markdown
Extract features with QuickNetLarq Zoo models can also be used to extract features that can be used as input to a second model.
###Code
tf.keras.backend.clear_session()
model = lqz.sota.QuickNet(weights="imagenet", include_top=False)
features = model.predict(x)
print("Feature shape:", features.shape)
###Output
Feature shape: (1, 7, 7, 512)
###Markdown
Extract features from an arbitrary intermediate layerFeatures can also be extracted from arbitrary intermediate layer with just a few lines of code.
###Code
avg_pool_layer = model.get_layer("add_7")
avg_pool_model = tf.keras.models.Model(
inputs=model.input, outputs=avg_pool_layer.output)
avg_pool_features = avg_pool_model.predict(x)
print("add_7 feature shape:", avg_pool_features.shape)
###Output
add_7 feature shape: (1, 14, 14, 256)
###Markdown
Build QuickNet over a custom input TensorThe model can also be used with an input Tensor that might also be the output a different Keras model or layer.
###Code
input_tensor = tf.keras.layers.Input(shape=(224, 224, 3))
model = lqz.sota.QuickNet(input_tensor=input_tensor, weights="imagenet")
###Output
_____no_output_____
###Markdown
Evaluate QuickNet with TensorFlow DatasetsTo re-run the evaluation on the entire [ImageNet](http://image-net.org/) validation dataset [Tensorflow Datasets](https://www.tensorflow.org/datasets/) can be used.Note that running this example will require [**mannualy downloading**](https://www.tensorflow.org/datasets/catalog/imagenet2012) the entire dataset and might take a very long time to complete.
###Code
def preprocess(data):
img = lqz.preprocess_input(data["image"])
label = tf.one_hot(data["label"], 1000)
return img, label
dataset = (
tfds.load("imagenet2012:5.0.0", split=tfds.Split.VALIDATION)
.map(preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
.batch(128)
.prefetch(1)
)
model = lqz.sota.QuickNet()
model.compile(
optimizer="sgd",
loss="categorical_crossentropy",
metrics=["categorical_accuracy", "top_k_categorical_accuracy"],
)
model.evaluate(dataset)
###Output
_____no_output_____ |
Fashion-MNIST CNN.ipynb | ###Markdown
A general approach to building a convolutional neural network using the Fashion-MNIST dataset Packages required:- pytorch2. torchvision3. matplotlib4. numpy
###Code
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
from torch.autograd import Variable
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Building the preliminary training and test datasets
###Code
train_dataset=dsets.FashionMNIST(root='./data',train=True,transform=transforms.ToTensor(),download=True)
test_dataset=dsets.FashionMNIST(root='./data',train=False,transform=transforms.ToTensor())
###Output
_____no_output_____
###Markdown
The size of the entire training dataset
###Code
train_dataset.train_data.size()
###Output
_____no_output_____
###Markdown
Printing an example of the 28x28 image and it's label
###Code
print("Label: {}".format(train_dataset.train_labels[0]))
plt.imshow(train_dataset.train_data[0])
###Output
Label: 9
###Markdown
Starting to build the neural net
###Code
# We use a batch size of 100 i.e 100 images are processed in a single iteration
batch_size=100
# Total no. pf iterations will be 3000
n_iters=3000
num_epochs=int(n_iters/(len(train_dataset)/batch_size)) #Total 5 epochs overall
###Output
_____no_output_____
###Markdown
Converting the datasets to an iterable format
###Code
train_loader=torch.utils.data.DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
test_loader=torch.utils.data.DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=False)
###Output
_____no_output_____
###Markdown
Building the model class
###Code
class CNNModel(nn.Module):
def __init__(self):
super(CNNModel,self).__init__()
#Convolution 1
self.cnn1=nn.Conv2d(in_channels=1,out_channels=16,kernel_size=5,stride=1,padding=2) #Using SAME padding
self.relu1=nn.ReLU()
#Maxpool 1
self.maxpool1=nn.MaxPool2d(kernel_size=2)
#Convolution 2
self.cnn2=nn.Conv2d(in_channels=16,out_channels=32,kernel_size=5,stride=1,padding=2)
self.relu2=nn.ReLU()
#Maxpool 2
self.maxpool2=nn.MaxPool2d(kernel_size=2)
self.fc1=nn.Linear(32*7*7,10) #Input size:32kernels*7x7 image, Output:10
def forward(self,x):
out=self.cnn1(x)
out=self.relu1(out)
out=self.maxpool1(out)
out=self.cnn2(out)
out=self.relu2(out)
out=self.maxpool2(out)
#Resizing
out=out.view(out.size(0),-1)
out=self.fc1(out)
return out
###Output
_____no_output_____
###Markdown
Instantiating model class
###Code
model=CNNModel()
###Output
_____no_output_____
###Markdown
Instantiating loss functionWe use the CrossEntropyLoss for our CNN
###Code
criterion=nn.CrossEntropyLoss()
learning_rate=0.01
optimizer=torch.optim.SGD(model.parameters() ,lr=learning_rate)
###Output
_____no_output_____
###Markdown
Training our CNN
###Code
iter=0
for epoch in range(num_epochs):
for i,(images,labels) in enumerate(train_loader):
images=Variable(images)
labels=Variable(labels)
optimizer.zero_grad()
outputs=model(images)
loss=criterion(outputs,labels)
loss.backward()
optimizer.step()
iter+=1
if ((iter%500)==0):
correct=0
total=0
for images,labels in test_loader:
images=Variable(images)
outputs=model(images)
_,pred=torch.max(outputs.data,1)
total+=labels.size(0)
correct+=(pred==labels).sum()
accuracy=correct*100/total
print("Iteration{}: Loss{}, Accuracy{}".format(iter,loss.data[0],accuracy))
###Output
_____no_output_____ |
scraping_notebooks/pinyins_splitting_function.ipynb | ###Markdown
We can use this function on each song. And for the transliteration of each song, we split the connected pinyins into individual ones, then for each Chinese line(Chinese characters not pinyins), we can count its length and append the corresponding number of pinyin to each cell of transliteration, which fixes the mismatching problem and can better hone the accuracy of our model.
###Code
# First we have to define the dictionary of all the Chinese pinyin, will do this later
# The number of total pinyins in Mandarin is 407
dictionary = {"jin", "tian", "wo", "zai", "mai", "dang", "lao"}
def split(string, dictionary):
'''
This function splits individual connected pinyin into splitted ones.
'''
pinyin = ""
pinyins = []
remainder = ""
remainder2 = ""
for i in range(len(string)):
pinyin += string[i]
if pinyin in dictionary:
pinyins.append(pinyin)
remainder = string[len(pinyin):]
pinyin = ""
if len(remainder) > 1:
for i in range(len(remainder)):
pinyin += remainder[i]
if pinyin in dictionary:
pinyins.append(pinyin)
remainder2 = remainder[len(pinyin):]
if len(remainder2)>1:
pinyins.append(remainder2)
return pinyins
# One example of using this function
string = "jintianwo"
split(string, dictionary)
split("wozaimai", dictionary)
def MaxMatch(connected_pinyin, dictionary):
'''
This function splits the whole sentence with connected pinyin into individual ones and returns a list.
'''
result = []
for i in range(len(before)):
if before[i] in dictionary:
result.append(before[i])
if before[i] not in dictionary:
result.extend(split(before[i], dictionary))
return result
connected_pinyin = "jintian wo zai maidanglao"
connected_pinyin = connected_pinyin.split()
connected_pinyin
# An example of using the function
MaxMatch(connected_pinyin, dictionary)
before = "wojintian zaimai jinlao"
before = before.split()
MaxMatch(before, dictionary)
###Output
_____no_output_____ |
postportem_dask.ipynb | ###Markdown
Hello World.Visual Studio Code Jupyter Notebooks Interface.
###Code
import fsspec
fs = fsspec.filesystem("file")
with fs.open(path="final_log") as f:
can_data = f.read()
hammer = dict()
for can_line in can_data.splitlines():
time_raw, bus_raw, msg_raw = can_line.decode().split(" ")
time_raw
bus_raw
msg_raw
can_id_hex_str, can_data = msg_raw.split("#")
int(len(can_data)/2)
hammer=dict()
if not can_id_hex_str in hammer:
hammer[can_id_hex_str]=dict({
"timestamp": list(),
"canbus": list(),
"can_id": list(),
"can_id_hex": list(),
"can_data_hex": list()
})
for byte in range(int(len(can_data)/2)):
hammer[can_id_hex_str][f"B{byte}"]=list()
hammer[can_id_hex_str][f"b{byte}"]=list()
timestamp = float(time_raw.strip(")("))
hammer[can_id_hex_str]["timestamp"].append(timestamp)
hammer[can_id_hex_str]["canbus"].append(bus_raw)
hammer[can_id_hex_str]["can_id"].append(int(can_id_hex_str, base=16))
hammer[can_id_hex_str]["can_id_hex"].append(can_id_hex_str)
hammer[can_id_hex_str]["can_data_hex"].append(can_data)
can_data_bytes = [
int(can_data[idx:idx+2], base=16)
for idx in range(0, len(can_data), 2)
]
for byte_idx, byte_data in enumerate(can_data_bytes):
hammer[can_id_hex_str][f"B{byte_idx}"].append(byte_data)
byte_data_unsigned = struct.unpack("<b", struct.pack("<B", 154))[0]
hammer[can_id_hex_str][f"b{byte_idx}"].append(byte_data)
with fs.open(path="final_log") as f:
candump_log_data = f.read()
def process_log_line(can_line):
if isinstance(can_line, bytes):
can_line=can_line.decode()
time_raw, bus_raw, msg_raw = can_line.split(" ")
for can_line in candump_log_data.splitlines():
process_log_line(can_line)
process_log_line(can_line.decode())
hammer=dict()
def init_dict(can_data):
tmp = dict({
"timestamp": list(),
"canbus": list(),
"can_id": list(),
"can_id_hex": list(),
"can_data_hex": list()
})
for byte in range(int(len(can_data)/2)):
tmp[f"B{byte}"]=list()
tmp[f"b{byte}"]=list()
return tmp
def process_log_line(can_line):
## Preprocess
if isinstance(can_line, bytes):
can_line=can_line.decode()
## Process
time_raw, bus_raw, msg_raw = can_line.split(" ")
can_id_hex_str, can_data = msg_raw.split("#")
timestamp = float(time_raw.strip(")("))
## Init
if not can_id_hex_str in hammer:
hammer[can_id_hex_str]=init_dict(can_data)
hammer[can_id_hex_str]["timestamp"].append(timestamp)
hammer[can_id_hex_str]["canbus"].append(bus_raw)
hammer[can_id_hex_str]["can_id"].append(int(can_id_hex_str, base=16))
hammer[can_id_hex_str]["can_id_hex"].append(can_id_hex_str)
hammer[can_id_hex_str]["can_data_hex"].append(can_data)
can_data_bytes = [
int(can_data[idx:idx+2], base=16)
for idx in range(0, len(can_data), 2)
]
for byte_idx, byte_data in enumerate(can_data_bytes):
hammer[can_id_hex_str][f"B{byte_idx}"].append(byte_data)
byte_data_unsigned = struct.unpack("<b", struct.pack("<B", byte_data))[0]
hammer[can_id_hex_str][f"b{byte_idx}"].append(byte_data_unsigned)
for can_line in candump_log_data.splitlines():
process_log_line(can_line)
for key, item in hammer.items():
break
type(item)
import pandas as pd
data = pd.DataFrame(item)
data
parquet_file = f"candump_0x{key}.parquet"
parquet_file
data.to_parquet(parquet_file)
data2=pd.DataFrame.p
import pyarrow.parquet as pq
import pyarrow as pa
df=data
table = pa.Table.from_pandas(df)
table
pq.write_table(table, 'example.parquet')
table2 = pq.read_table('example.parquet')
table2.to_pandas()
%%timeit
data.to_parquet(parquet_file)
%%timeit
table = pa.Table.from_pandas(df)
pq.write_table(table, 'example.parquet')
can_id_hex_str
int(can_id_hex_str, base=16)
assert(f"{int(can_id_hex_str, base=16):X}"==can_id_hex_str)
can_id_hex_str in hammer
hammer=dict()
if not can_id_hex_str in hammer:
hammer[can_id_hex_str]=dict({
"timestamp": list(),
"canbus": list(),
"can_id": list(),
"can_id_hex": list(),
"can_data_hex": list()
# ...
})
timestamp = float(time_raw.strip(")("))
hammer[can_id_hex_str]["timestamp"].append(timestamp)
hammer[can_id_hex_str]["canbus"].append(bus_raw)
hammer[can_id_hex_str]["can_id"].append(int(can_id_hex_str, base=16))
hammer[can_id_hex_str]["can_id_hex"].append(can_id_hex_str)
hammer[can_id_hex_str]["can_data_hex"].append(can_id_hex_str)
can_data
for idx in range(0, len(can_data), 2):
print(can_data[idx:idx+2])
[
(can_data[idx:idx+2])
for idx in range(0, len(can_data), 2)
]
[
int(can_data[idx:idx+2], base=16)
for idx in range(0, len(can_data), 2)
]
can_data_bytes = [
int(can_data[idx:idx+2], base=16)
for idx in range(0, len(can_data), 2)
]
len(can_data_bytes)
"<"+"B"*len(can_data_bytes)
x = "<"+"B"*len(can_data_bytes)
struct.pack(x, *can_data_bytes)
can_data
x = ">"+"B"*len(can_data_bytes)
B = struct.pack(x, *can_data_bytes)
len(can_data_bytes)%2
x = "<"+"H"*int(len(can_data_bytes)/2)
struct.unpack(x, B)
x = "<"+"h"*int(len(can_data_bytes)/2)
struct.unpack(x, B)
x = ">"+"H"*int(len(can_data_bytes)/2)
struct.unpack(x, B)
x = ">"+"H"*int(len(can_data_bytes)/2)
struct.unpack(x, B)
struct.unpack(x, B)
len(can_data)/2
int(can_data, base=16)
import struct
struct.pack(">U", int(can_data, base=16))
struct.pack?
hammer
can_id_hex_str
float(time_raw.strip(")("))
df[DATE_FIELD]=(pd.to_datetime(df[DATE_FIELD],unit='ms'))
import pandas as pd
pd.to_datetime([float(time_raw.strip(")("))], unit="s")
class CanMsg(object):
def __init__(self, canid, data):
self.canid=canid
self.data=data
def __repr__(self):
return f"CAN<{self.canid}, {self.data}>"
def parse_log(log_file):
messages = list()
with open(log_file, "r") as f:
for can_msg in f.readlines():
can_id, can_data = can_msg.strip().split(" ")[-1].split("#")
cm = CanMsg(can_id, can_data)
messages.append(cm)
#
unique_datas = dict()
#
for canid in set([msg.canid for msg in messages]):
id_datas = set([msg.data for msg in messages if msg.canid==canid])
unique_datas[canid] = id_datas
#
return unique_datas
data = parse_log("final_log")
import struct
data = parse_log("final_log")
###Output
_____no_output_____ |