markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Constructor propio
class OtroSaludo(m:String,nombre:String){ //Se deben declarar todos los atributos que se vayan a usar def this()={ this("Hola","Pepe") //Siempre se debe llamar al constructor por defecto } def this(mensaje:String){ this("Hola","Jose") } def saludar()={ println(this.m+" "+nombre) } } val sal=new OtroSaludo() sal.saludar()
_____no_output_____
MIT
Scala-basics.ipynb
FranciscoJavierMartin/Notebooks
Herencia
class Punto(var x:Int,var y:Int){ def mover(dx:Int,dy:Int):Unit={ this.x=dx this.y=dy } } class Particula(x:Int,y:Int,masa:Int) extends Punto(x:Int,y:Int){ override def toString():String={ //Para redefinir un metodo de una clase padre agregar override return s"X:${this.x} Y:${this.y} M:${this.masa}"; } } val particula=new Particula(0,0,0); particula.mover(1,1) println(particula.toString())
_____no_output_____
MIT
Scala-basics.ipynb
FranciscoJavierMartin/Notebooks
Clases abstractas
abstract class Figura(lado:Int){ def getPerimetro():Double; //Metodo sin implementacion def printLado():Unit= println("El lado mide "+this.lado) //Metodo implementado } class Cuadrado(lado:Int,n:Int) extends Figura(lado:Int){ override def getPerimetro():Double={ return lado*lado; } } val figura:Figura=new Cuadrado(4,0) println("El perimetro es "+figura.getPerimetro()) figura.printLado();
_____no_output_____
MIT
Scala-basics.ipynb
FranciscoJavierMartin/Notebooks
Traits Son similares a las interfaces de otros lenguajes de programación. Sin embargo cuenta con dos principales diferencias respecto de las interfaces:- Pueden ser parcialmente implementadas como ocurre en las clases abstractas.- No pueden tener parametros en el constructor.
trait Correo{ def enviar():Unit; def recibir(mensaje:String):Unit={ println(s"Mensaje recibido: ${mensaje}") } } class CorreoPostal() extends Correo{ override def enviar()={ println("Enviado desde correo postal") } } class CorreoElectronico(usuario:String) extends Correo{ override def enviar()={ println(s"Enviado por ${usuario}") } } val carta:Correo=new CorreoPostal() val email:Correo=new CorreoElectronico("pepe") carta.enviar() carta.recibir("Hola desde carta") email.enviar() email.recibir("Hola desde email")
_____no_output_____
MIT
Scala-basics.ipynb
FranciscoJavierMartin/Notebooks
Colecciones Las colecciones por defecto incluidas son inmutables, no se puede agregar ni eliminar elementos. Las operaciones como *add* y similares lo que hacen es devolver una nueva colección con los nuevos elementos. Al crear la nueva colección se agregan las referencias de los objetos y por tanto casi no tiene penalización en tiempo de ejecución y en consumo de memoria.
val lista=List(1,2,3) //Lista inmutable 0::lista //Devuelve una lista con el nuevo elemento insertado al principio lista.head //Devuelve el primer elemento de la lista lista.tail //Devuelve toda la lista excepto el primer elemento lista:::lista //Concatena dos listas y devuelve el resultado
_____no_output_____
MIT
Scala-basics.ipynb
FranciscoJavierMartin/Notebooks
Operaciones y funciones sobre conjuntos (y similares)
val conjunto=Set(1,2,3) val conjunto2=conjunto.map(x => x+3) //Ejecuta la funcion que se le pasa a cada miembro de la coleccion val conjunto3=List(conjunto,conjunto2).flatten //Crea una nueva coleccion con los elementos de las sub-colecciones Set(1,4,9).flatMap { x => Set(x,x+1) } //FlatMap val lista=(List(1,2,3)++List(1,2,3)) lista.distinct //Devuelve una lista con todos los elementos distintos Set(1,2,3)(1) //Devuelve true si el elemento esta contenido en la coleccion, false en caso contrario List(4,5,6)(1) //Devuelve el elemento de la posicion indicada val conjuntoImpares=conjunto.filter(x => x%2!=0) //Devuelve otro conjunto con los elementos que superen el filtro val escalar:Int=1 //Para conjuntos inmutables conjunto+escalar //Agrega el elemento al conjunto y devuelve una copia conjunto++conjunto2 //Union de conjuntos conjunto-escalar //Extrae del conjunto conjunto--conjunto2 //Diferencia de conjuntos conjunto&conjunto2 //Interseccion //Solo para conjuntos mutables val conjuntoMutable=scala.collection.mutable.Set(1,2,3) val conjuntoMutable2=scala.collection.mutable.Set(3,4,5) conjuntoMutable+= escalar //Agrega el valor al conjunto conjuntoMutable++=conjuntoMutable2 //Agrega los elementos del segundo conjunto al primero conjuntoMutable retain { x=> x%2==0} //Se queda solo con los elementos que cumplan la condicion
_____no_output_____
MIT
Scala-basics.ipynb
FranciscoJavierMartin/Notebooks
Mapas Son estructuras clave/valor similares a los Mapas de Java o los diccionarios de Python.
val mapa=Map(1->"Uno",2->"Dos",3->"Tres")
_____no_output_____
MIT
Scala-basics.ipynb
FranciscoJavierMartin/Notebooks
Colab FAQFor some basic overview and features offered in Colab notebooks, check out: [Overview of Colaboratory Features](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)You need to use the colab GPU for this assignmentby selecting:> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU** Setup PyTorchAll files are stored at /content/csc421/a4/ folder
###################################################################### # Setup python environment and change the current working directory ###################################################################### !pip install torch torchvision !pip install imageio !pip install matplotlib %mkdir -p /content/csc413/a4/ %cd /content/csc413/a4
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
Helper code Utility functions
import os import numpy as np import matplotlib.pyplot as plt import torch from torch import nn from torch.nn import Parameter import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision import datasets from torchvision import transforms from six.moves.urllib.request import urlretrieve import tarfile import imageio from urllib.error import URLError from urllib.error import HTTPError def get_file(fname, origin, untar=False, extract=False, archive_format='auto', cache_dir='data'): datadir = os.path.join(cache_dir) if not os.path.exists(datadir): os.makedirs(datadir) if untar: untar_fpath = os.path.join(datadir, fname) fpath = untar_fpath + '.tar.gz' else: fpath = os.path.join(datadir, fname) print(fpath) if not os.path.exists(fpath): print('Downloading data from', origin) error_msg = 'URL fetch failure on {}: {} -- {}' try: try: urlretrieve(origin, fpath) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except (Exception, KeyboardInterrupt) as e: if os.path.exists(fpath): os.remove(fpath) raise if untar: if not os.path.exists(untar_fpath): print('Extracting file.') with tarfile.open(fpath) as archive: archive.extractall(datadir) return untar_fpath return fpath class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self def to_var(tensor, cuda=True): """Wraps a Tensor in a Variable, optionally placing it on the GPU. Arguments: tensor: A Tensor object. cuda: A boolean flag indicating whether to use the GPU. Returns: A Variable object, on the GPU if cuda==True. """ if cuda: return Variable(tensor.cuda()) else: return Variable(tensor) def to_data(x): """Converts variable to numpy.""" if torch.cuda.is_available(): x = x.cpu() return x.data.numpy() def create_dir(directory): """Creates a directory if it doesn't already exist. """ if not os.path.exists(directory): os.makedirs(directory) def gan_checkpoint(iteration, G, D, opts): """Saves the parameters of the generator G and discriminator D. """ G_path = os.path.join(opts.checkpoint_dir, 'G.pkl') D_path = os.path.join(opts.checkpoint_dir, 'D.pkl') torch.save(G.state_dict(), G_path) torch.save(D.state_dict(), D_path) def load_checkpoint(opts): """Loads the generator and discriminator models from checkpoints. """ G_path = os.path.join(opts.load, 'G.pkl') D_path = os.path.join(opts.load, 'D_.pkl') G = DCGenerator(noise_size=opts.noise_size, conv_dim=opts.g_conv_dim, spectral_norm=opts.spectral_norm) D = DCDiscriminator(conv_dim=opts.d_conv_dim) G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage)) D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage)) if torch.cuda.is_available(): G.cuda() D.cuda() print('Models moved to GPU.') return G, D def merge_images(sources, targets, opts): """Creates a grid consisting of pairs of columns, where the first column in each pair contains images source images and the second column in each pair contains images generated by the CycleGAN from the corresponding images in the first column. """ _, _, h, w = sources.shape row = int(np.sqrt(opts.batch_size)) merged = np.zeros([3, row * h, row * w * 2]) for (idx, s, t) in (zip(range(row ** 2), sources, targets, )): i = idx // row j = idx % row merged[:, i * h:(i + 1) * h, (j * 2) * h:(j * 2 + 1) * h] = s merged[:, i * h:(i + 1) * h, (j * 2 + 1) * h:(j * 2 + 2) * h] = t return merged.transpose(1, 2, 0) def generate_gif(directory_path, keyword=None): images = [] for filename in sorted(os.listdir(directory_path)): if filename.endswith(".png") and (keyword is None or keyword in filename): img_path = os.path.join(directory_path, filename) print("adding image {}".format(img_path)) images.append(imageio.imread(img_path)) if keyword: imageio.mimsave( os.path.join(directory_path, 'anim_{}.gif'.format(keyword)), images) else: imageio.mimsave(os.path.join(directory_path, 'anim.gif'), images) def create_image_grid(array, ncols=None): """ """ num_images, channels, cell_h, cell_w = array.shape if not ncols: ncols = int(np.sqrt(num_images)) nrows = int(np.math.floor(num_images / float(ncols))) result = np.zeros((cell_h * nrows, cell_w * ncols, channels), dtype=array.dtype) for i in range(0, nrows): for j in range(0, ncols): result[i * cell_h:(i + 1) * cell_h, j * cell_w:(j + 1) * cell_w, :] = array[i * ncols + j].transpose(1, 2, 0) if channels == 1: result = result.squeeze() return result def gan_save_samples(G, fixed_noise, iteration, opts): generated_images = G(fixed_noise) generated_images = to_data(generated_images) grid = create_image_grid(generated_images) # merged = merge_images(X, fake_Y, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}.png'.format(iteration)) imageio.imwrite(path, grid) print('Saved {}'.format(path))
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
Data loader
def get_emoji_loader(emoji_type, opts): """Creates training and test data loaders. """ transform = transforms.Compose([ transforms.Scale(opts.image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) train_path = os.path.join('data/emojis', emoji_type) test_path = os.path.join('data/emojis', 'Test_{}'.format(emoji_type)) train_dataset = datasets.ImageFolder(train_path, transform) test_dataset = datasets.ImageFolder(test_path, transform) train_dloader = DataLoader(dataset=train_dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.num_workers) test_dloader = DataLoader(dataset=test_dataset, batch_size=opts.batch_size, shuffle=False, num_workers=opts.num_workers) return train_dloader, test_dloader
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
Training and evaluation code
def print_models(G_XtoY, G_YtoX, D_X, D_Y): """Prints model information for the generators and discriminators. """ print(" G ") print("---------------------------------------") print(G_XtoY) print("---------------------------------------") print(" D ") print("---------------------------------------") print(D_X) print("---------------------------------------") def create_model(opts): """Builds the generators and discriminators. """ ### GAN G = DCGenerator(noise_size=opts.noise_size, conv_dim=opts.g_conv_dim, spectral_norm=opts.spectral_norm) D = DCDiscriminator(conv_dim=opts.d_conv_dim, spectral_norm=opts.spectral_norm) print_models(G, None, D, None) if torch.cuda.is_available(): G.cuda() D.cuda() print('Models moved to GPU.') return G, D def train(opts): """Loads the data, creates checkpoint and sample directories, and starts the training loop. """ # Create train and test dataloaders for images from the two domains X and Y dataloader_X, test_dataloader_X = get_emoji_loader(emoji_type=opts.X, opts=opts) # Create checkpoint and sample directories create_dir(opts.checkpoint_dir) create_dir(opts.sample_dir) # Start training if opts.least_squares_gan: G, D = gan_training_loop_leastsquares(dataloader_X, test_dataloader_X, opts) else: G, D = gan_training_loop_regular(dataloader_X, test_dataloader_X, opts) return G, D def print_opts(opts): """Prints the values of all command-line arguments. """ print('=' * 80) print('Opts'.center(80)) print('-' * 80) for key in opts.__dict__: if opts.__dict__[key]: print('{:>30}: {:<30}'.format(key, opts.__dict__[key]).center(80)) print('=' * 80)
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
Your code for generators and discriminators Helper modules
def sample_noise(batch_size, dim): """ Generate a PyTorch Tensor of uniform random noise. Input: - batch_size: Integer giving the batch size of noise to generate. - dim: Integer giving the dimension of noise to generate. Output: - A PyTorch Tensor of shape (batch_size, dim, 1, 1) containing uniform random noise in the range (-1, 1). """ return to_var(torch.rand(batch_size, dim) * 2 - 1).unsqueeze(2).unsqueeze(3) def upconv(in_channels, out_channels, kernel_size, stride=2, padding=2, batch_norm=True, spectral_norm=False): """Creates a upsample-and-convolution layer, with optional batch normalization. """ layers = [] if stride>1: layers.append(nn.Upsample(scale_factor=stride)) conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, padding=padding, bias=False) if spectral_norm: layers.append(SpectralNorm(conv_layer)) else: layers.append(conv_layer) if batch_norm: layers.append(nn.BatchNorm2d(out_channels)) return nn.Sequential(*layers) def conv(in_channels, out_channels, kernel_size, stride=2, padding=2, batch_norm=True, init_zero_weights=False, spectral_norm=False): """Creates a convolutional layer, with optional batch normalization. """ layers = [] conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) if init_zero_weights: conv_layer.weight.data = torch.randn(out_channels, in_channels, kernel_size, kernel_size) * 0.001 if spectral_norm: layers.append(SpectralNorm(conv_layer)) else: layers.append(conv_layer) if batch_norm: layers.append(nn.BatchNorm2d(out_channels)) return nn.Sequential(*layers) class ResnetBlock(nn.Module): def __init__(self, conv_dim): super(ResnetBlock, self).__init__() self.conv_layer = conv(in_channels=conv_dim, out_channels=conv_dim, kernel_size=3, stride=1, padding=1) def forward(self, x): out = x + self.conv_layer(x) return out
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
DCGAN Spectral Norm class
def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + "_u") v = getattr(self.module, self.name + "_v") w = getattr(self.module, self.name + "_bar") height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data)) # sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: u = getattr(self.module, self.name + "_u") v = getattr(self.module, self.name + "_v") w = getattr(self.module, self.name + "_bar") return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + "_u", u) self.module.register_parameter(self.name + "_v", v) self.module.register_parameter(self.name + "_bar", w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args)
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
**[Your Task]** GAN generator
class DCGenerator(nn.Module): def __init__(self, noise_size, conv_dim, spectral_norm=False): super(DCGenerator, self).__init__() self.conv_dim = conv_dim ########################################### ## FILL THIS IN: CREATE ARCHITECTURE ## ########################################### # self.linear_bn = ... # self.upconv1 = ... # self.upconv2 = ... # self.upconv3 = ... def forward(self, z): """Generates an image given a sample of random noise. Input ----- z: BS x noise_size x 1 x 1 --> BSx100x1x1 (during training) Output ------ out: BS x channels x image_width x image_height --> BSx3x32x32 (during training) """ batch_size = z.size(0) out = F.relu(self.linear_bn(z)).view(-1, self.conv_dim*4, 4, 4) # BS x 128 x 4 x 4 out = F.relu(self.upconv1(out)) # BS x 64 x 8 x 8 out = F.relu(self.upconv2(out)) # BS x 32 x 16 x 16 out = F.tanh(self.upconv3(out)) # BS x 3 x 32 x 32 out_size = out.size() if out_size != torch.Size([batch_size, 3, 32, 32]): raise ValueError("expect {} x 3 x 32 x 32, but get {}".format(batch_size, out_size)) return out
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
GAN discriminator
class DCDiscriminator(nn.Module): """Defines the architecture of the discriminator network. Note: Both discriminators D_X and D_Y have the same architecture in this assignment. """ def __init__(self, conv_dim=64, spectral_norm=False): super(DCDiscriminator, self).__init__() self.conv1 = conv(in_channels=3, out_channels=conv_dim, kernel_size=5, stride=2, spectral_norm=spectral_norm) self.conv2 = conv(in_channels=conv_dim, out_channels=conv_dim*2, kernel_size=5, stride=2, spectral_norm=spectral_norm) self.conv3 = conv(in_channels=conv_dim*2, out_channels=conv_dim*4, kernel_size=5, stride=2, spectral_norm=spectral_norm) self.conv4 = conv(in_channels=conv_dim*4, out_channels=1, kernel_size=5, stride=2, padding=1, batch_norm=False, spectral_norm=spectral_norm) def forward(self, x): batch_size = x.size(0) out = F.relu(self.conv1(x)) # BS x 64 x 16 x 16 out = F.relu(self.conv2(out)) # BS x 64 x 8 x 8 out = F.relu(self.conv3(out)) # BS x 64 x 4 x 4 out = self.conv4(out).squeeze() out_size = out.size() if out_size != torch.Size([batch_size,]): raise ValueError("expect {} x 1, but get {}".format(batch_size, out_size)) return out
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
**[Your Task]** GAN training loop * Regular GAN* Least Squares GAN
def gan_training_loop_regular(dataloader, test_dataloader, opts): """Runs the training loop. * Saves checkpoint every opts.checkpoint_every iterations * Saves generated samples every opts.sample_every iterations """ # Create generators and discriminators G, D = create_model(opts) g_params = G.parameters() # Get generator parameters d_params = D.parameters() # Get discriminator parameters # Create optimizers for the generators and discriminators g_optimizer = optim.Adam(g_params, opts.lr, [opts.beta1, opts.beta2]) d_optimizer = optim.Adam(d_params, opts.lr * 2., [opts.beta1, opts.beta2]) train_iter = iter(dataloader) test_iter = iter(test_dataloader) # Get some fixed data from domains X and Y for sampling. These are images that are held # constant throughout training, that allow us to inspect the model's performance. fixed_noise = sample_noise(100, opts.noise_size) # # 100 x noise_size x 1 x 1 iter_per_epoch = len(train_iter) total_train_iters = opts.train_iters losses = {"iteration": [], "D_fake_loss": [], "D_real_loss": [], "G_loss": []} gp_weight = 1 adversarial_loss = torch.nn.BCEWithLogitsLoss() # Use this loss # [Hint: you may find the folowing code helpful] # ones = Variable(torch.Tensor(real_images.shape[0]).float().cuda().fill_(1.0), requires_grad=False) try: for iteration in range(1, opts.train_iters + 1): # Reset data_iter for each epoch if iteration % iter_per_epoch == 0: train_iter = iter(dataloader) real_images, real_labels = train_iter.next() real_images, real_labels = to_var(real_images), to_var(real_labels).long().squeeze() for d_i in range(opts.d_train_iters): d_optimizer.zero_grad() # FILL THIS IN # 1. Compute the discriminator loss on real images # D_real_loss = ... # 2. Sample noise # noise = ... # 3. Generate fake images from the noise # fake_images = ... # 4. Compute the discriminator loss on the fake images # D_fake_loss = ... # ---- Gradient Penalty ---- if opts.gradient_penalty: alpha = torch.rand(real_images.shape[0], 1, 1, 1) alpha = alpha.expand_as(real_images).cuda() interp_images = Variable(alpha * real_images.data + (1 - alpha) * fake_images.data, requires_grad=True).cuda() D_interp_output = D(interp_images) gradients = torch.autograd.grad(outputs=D_interp_output, inputs=interp_images, grad_outputs=torch.ones(D_interp_output.size()).cuda(), create_graph=True, retain_graph=True)[0] gradients = gradients.view(real_images.shape[0], -1) gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12) gp = gp_weight * gradients_norm.mean() else: gp = 0.0 # -------------------------- # 5. Compute the total discriminator loss # D_total_loss = ... D_total_loss.backward() d_optimizer.step() ########################################### ### TRAIN THE GENERATOR ### ########################################### g_optimizer.zero_grad() # FILL THIS IN # 1. Sample noise # noise = ... # 2. Generate fake images from the noise # fake_images = ... # 3. Compute the generator loss # G_loss = ... G_loss.backward() g_optimizer.step() # Print the log info if iteration % opts.log_step == 0: losses['iteration'].append(iteration) losses['D_real_loss'].append(D_real_loss.item()) losses['D_fake_loss'].append(D_fake_loss.item()) losses['G_loss'].append(G_loss.item()) print('Iteration [{:4d}/{:4d}] | D_real_loss: {:6.4f} | D_fake_loss: {:6.4f} | G_loss: {:6.4f}'.format( iteration, total_train_iters, D_real_loss.item(), D_fake_loss.item(), G_loss.item())) # Save the generated samples if iteration % opts.sample_every == 0: gan_save_samples(G, fixed_noise, iteration, opts) # Save the model parameters if iteration % opts.checkpoint_every == 0: gan_checkpoint(iteration, G, D, opts) except KeyboardInterrupt: print('Exiting early from training.') return G, D plt.figure() plt.plot(losses['iteration'], losses['D_real_loss'], label='D_real') plt.plot(losses['iteration'], losses['D_fake_loss'], label='D_fake') plt.plot(losses['iteration'], losses['G_loss'], label='G') plt.legend() plt.savefig(os.path.join(opts.sample_dir, 'losses.png')) plt.close() return G, D def gan_training_loop_leastsquares(dataloader, test_dataloader, opts): """Runs the training loop. * Saves checkpoint every opts.checkpoint_every iterations * Saves generated samples every opts.sample_every iterations """ # Create generators and discriminators G, D = create_model(opts) g_params = G.parameters() # Get generator parameters d_params = D.parameters() # Get discriminator parameters # Create optimizers for the generators and discriminators g_optimizer = optim.Adam(g_params, opts.lr, [opts.beta1, opts.beta2]) d_optimizer = optim.Adam(d_params, opts.lr * 2., [opts.beta1, opts.beta2]) train_iter = iter(dataloader) test_iter = iter(test_dataloader) # Get some fixed data from domains X and Y for sampling. These are images that are held # constant throughout training, that allow us to inspect the model's performance. fixed_noise = sample_noise(100, opts.noise_size) # # 100 x noise_size x 1 x 1 iter_per_epoch = len(train_iter) total_train_iters = opts.train_iters losses = {"iteration": [], "D_fake_loss": [], "D_real_loss": [], "G_loss": []} #adversarial_loss = torch.nn.BCEWithLogitsLoss() gp_weight = 1 try: for iteration in range(1, opts.train_iters + 1): # Reset data_iter for each epoch if iteration % iter_per_epoch == 0: train_iter = iter(dataloader) real_images, real_labels = train_iter.next() real_images, real_labels = to_var(real_images), to_var(real_labels).long().squeeze() for d_i in range(opts.d_train_iters): d_optimizer.zero_grad() # FILL THIS IN # 1. Compute the discriminator loss on real images # D_real_loss = ... # 2. Sample noise # noise = ... # 3. Generate fake images from the noise # fake_images = ... # 4. Compute the discriminator loss on the fake images # D_fake_loss = ... # ---- Gradient Penalty ---- if opts.gradient_penalty: alpha = torch.rand(real_images.shape[0], 1, 1, 1) alpha = alpha.expand_as(real_images).cuda() interp_images = Variable(alpha * real_images.data + (1 - alpha) * fake_images.data, requires_grad=True).cuda() D_interp_output = D(interp_images) gradients = torch.autograd.grad(outputs=D_interp_output, inputs=interp_images, grad_outputs=torch.ones(D_interp_output.size()).cuda(), create_graph=True, retain_graph=True)[0] gradients = gradients.view(real_images.shape[0], -1) gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12) gp = gp_weight * gradients_norm.mean() else: gp = 0.0 # -------------------------- # 5. Compute the total discriminator loss # D_total_loss = ... D_total_loss.backward() d_optimizer.step() ########################################### ### TRAIN THE GENERATOR ### ########################################### g_optimizer.zero_grad() # FILL THIS IN # 1. Sample noise # noise = ... # 2. Generate fake images from the noise # fake_images = ... # 3. Compute the generator loss # G_loss = ... G_loss.backward() g_optimizer.step() # Print the log info if iteration % opts.log_step == 0: losses['iteration'].append(iteration) losses['D_real_loss'].append(D_real_loss.item()) losses['D_fake_loss'].append(D_fake_loss.item()) losses['G_loss'].append(G_loss.item()) print('Iteration [{:4d}/{:4d}] | D_real_loss: {:6.4f} | D_fake_loss: {:6.4f} | G_loss: {:6.4f}'.format( iteration, total_train_iters, D_real_loss.item(), D_fake_loss.item(), G_loss.item())) # Save the generated samples if iteration % opts.sample_every == 0: gan_save_samples(G, fixed_noise, iteration, opts) # Save the model parameters if iteration % opts.checkpoint_every == 0: gan_checkpoint(iteration, G, D, opts) except KeyboardInterrupt: print('Exiting early from training.') return G, D plt.figure() plt.plot(losses['iteration'], losses['D_real_loss'], label='D_real') plt.plot(losses['iteration'], losses['D_fake_loss'], label='D_fake') plt.plot(losses['iteration'], losses['G_loss'], label='G') plt.legend() plt.savefig(os.path.join(opts.sample_dir, 'losses.png')) plt.close() return G, D
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
**[Your Task]** Training Download dataset
###################################################################### # Download Translation datasets ###################################################################### data_fpath = get_file(fname='emojis', origin='http://www.cs.toronto.edu/~jba/emojis.tar.gz', untar=True)
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
Train DCGAN
SEED = 11 # Set the random seed manually for reproducibility. np.random.seed(SEED) torch.manual_seed(SEED) if torch.cuda.is_available(): torch.cuda.manual_seed(SEED) args = AttrDict() args_dict = { 'image_size':32, 'g_conv_dim':32, 'd_conv_dim':64, 'noise_size':100, 'num_workers': 0, 'train_iters':20000, 'X':'Apple', # options: 'Windows' / 'Apple' 'Y': None, 'lr':0.00003, 'beta1':0.5, 'beta2':0.999, 'batch_size':32, 'checkpoint_dir': 'results/checkpoints_gan_gp1_lr3e-5', 'sample_dir': 'results/samples_gan_gp1_lr3e-5', 'load': None, 'log_step':200, 'sample_every':200, 'checkpoint_every':1000, 'spectral_norm': False, 'gradient_penalty': True, 'least_squares_gan': False, 'd_train_iters': 1 } args.update(args_dict) print_opts(args) G, D = train(args) generate_gif("results/samples_gan_gp1_lr3e-5")
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
Download your output
!zip -r /content/csc413/a4/results/samples.zip /content/csc413/a4/results/samples_gan_gp1_lr3e-5 from google.colab import files files.download("/content/csc413/a4/results/samples.zip")
_____no_output_____
MIT
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
Do some cleaning and reformatting:
df.drop(df.columns[df.columns.str.contains('unnamed',case = False)], axis = 1, inplace = True) df = df[['arrival', 'choice']] df['arrival'].replace({9.0: 8.6, 9.1: 8.7}, inplace=True) df.head() fig, ax = plt.subplots() fig.set_size_inches(6.7, 1.2) fig = sns.regplot(x='arrival', y='choice', data=df, scatter_kws={"color": "white"}, ci=95, n_boot=10000, logistic=True, ax=ax) plt.setp(fig.collections[1], alpha=1) # setting translucency of CI to zero fig.set(xlim=(8.2, 8.7)) fig.axis('off') #plt.rcParams['figure.figsize']=(6.7,.2) plt.rcParams["font.family"] = "sans-serif" PLOTS_DIR = '../plots' if not os.path.exists(PLOTS_DIR): os.makedirs(PLOTS_DIR) plt.savefig(os.path.join(PLOTS_DIR, 'fig5_logit_all.png'), bbox_inches='tight', transparent=True, dpi=300) plt.savefig(os.path.join(PLOTS_DIR, 'fig5_logit_all.pdf'), transparent=True, dpi=300) sns.despine()
_____no_output_____
MIT
python/fig5_logit_all.ipynb
thomasnicolet/Paper_canteen_dilemma
Initial Setup
from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import os import math import string import re import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import helper import pickle import keras from keras.models import Sequential,load_model from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D,Conv1D,MaxPooling1D layers = keras.layers
Using TensorFlow backend.
MIT
train_result/ml_ee_xxl_data_training_step7.ipynb
cufezhusy/mlXVA
Training ParametersWe'll set the hyperparameters for training our model. If you understand what they mean, feel free to play around - otherwise, we recommend keeping the defaults for your first run 🙂
# Hyperparams if GPU is available if tf.test.is_gpu_available(): print('---- We are using GPU now ----') # GPU BATCH_SIZE = 512 # Number of examples used in each iteration EPOCHS = 80 # Number of passes through entire dataset # Hyperparams for CPU training else: print('---- We are using CPU now ----') # CPU BATCH_SIZE = 256 EPOCHS = 100
---- We are using CPU now ----
MIT
train_result/ml_ee_xxl_data_training_step7.ipynb
cufezhusy/mlXVA
DataThe wine reviews dataset is already attached to your workspace (if you want to attach your own data, [check out our docs](https://docs.floydhub.com/guides/workspace/attaching-floydhub-datasets)).Let's take a look at data.
data_path = '/floyd/input/gengduoshuju/' # ADD path/to/dataset Y= pickle.load( open(os.path.join(data_path,'Y.pks'), "rb" ) ) X= pickle.load( open(os.path.join(data_path,'X.pks'), "rb" ) ) X = X.reshape((X.shape[0],X.shape[1],1)) print("Size of X :" + str(X.shape)) print("Size of Y :" + str(Y.shape)) X = X.astype(np.float64) X = np.nan_to_num(X)
Size of X :(412038, 240, 1) Size of Y :(412038,)
MIT
train_result/ml_ee_xxl_data_training_step7.ipynb
cufezhusy/mlXVA
Data Preprocessing
X_train, X_test, Y_train_orig,Y_test_orig= helper.divide_data(X,Y) print(Y.min()) print(Y.max()) num_classes = 332 Y_train = keras.utils.to_categorical(Y_train_orig, num_classes) Y_test = keras.utils.to_categorical(Y_test_orig, num_classes) print("number of training examples = " + str(X_train.shape[0])) print("number of test examples = " + str(X_test.shape[0])) print("X_train shape: " + str(X_train.shape)) print("Y_train shape: " + str(Y_train.shape)) print("X_test shape: " + str(X_test.shape)) print("Y_test shape: " + str(Y_test.shape)) input_shape = X_train.shape[1:] print(input_shape)
(240, 1)
MIT
train_result/ml_ee_xxl_data_training_step7.ipynb
cufezhusy/mlXVA
Model definition The *Tokens per sentence* plot (see above) is useful for setting the `MAX_LEN` training hyperparameter.
# =================================================================================== # Load the model what has already ben trained # =================================================================================== model = load_model(r"floyd_model_xxl_data_ver8.h5")
_____no_output_____
MIT
train_result/ml_ee_xxl_data_training_step7.ipynb
cufezhusy/mlXVA
Model Training
opt = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) model.summary() X_train = X_train.astype('float32') X_test = X_test.astype('float32') model.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(X_test, Y_test), shuffle=True) model.save(r"floyd_model_xxl_data_ver9.h5") print('Training is done!')
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv1d_1 (Conv1D) (None, 240, 16) 80 _________________________________________________________________ activation_1 (Activation) (None, 240, 16) 0 _________________________________________________________________ max_pooling1d_1 (MaxPooling1 (None, 120, 16) 0 _________________________________________________________________ conv1d_2 (Conv1D) (None, 120, 32) 2080 _________________________________________________________________ activation_2 (Activation) (None, 120, 32) 0 _________________________________________________________________ max_pooling1d_2 (MaxPooling1 (None, 60, 32) 0 _________________________________________________________________ conv1d_3 (Conv1D) (None, 60, 64) 8256 _________________________________________________________________ activation_3 (Activation) (None, 60, 64) 0 _________________________________________________________________ max_pooling1d_3 (MaxPooling1 (None, 30, 64) 0 _________________________________________________________________ conv1d_4 (Conv1D) (None, 30, 64) 16448 _________________________________________________________________ activation_4 (Activation) (None, 30, 64) 0 _________________________________________________________________ max_pooling1d_4 (MaxPooling1 (None, 15, 64) 0 _________________________________________________________________ conv1d_5 (Conv1D) (None, 15, 32) 8224 _________________________________________________________________ activation_5 (Activation) (None, 15, 32) 0 _________________________________________________________________ max_pooling1d_5 (MaxPooling1 (None, 8, 32) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 256) 0 _________________________________________________________________ dense_1 (Dense) (None, 332) 85324 _________________________________________________________________ activation_6 (Activation) (None, 332) 0 ================================================================= Total params: 120,412 Trainable params: 120,412 Non-trainable params: 0 _________________________________________________________________ Train on 403797 samples, validate on 8241 samples Epoch 1/100 403797/403797 [==============================] - 80s 197us/step - loss: 0.1270 - acc: 0.9520 - val_loss: 0.1445 - val_acc: 0.9475 Epoch 2/100 403797/403797 [==============================] - 78s 193us/step - loss: 0.1261 - acc: 0.9524 - val_loss: 0.1427 - val_acc: 0.9485 Epoch 3/100 403797/403797 [==============================] - 78s 193us/step - loss: 0.1267 - acc: 0.9522 - val_loss: 0.1432 - val_acc: 0.9484 Epoch 4/100 403797/403797 [==============================] - 78s 194us/step - loss: 0.1293 - acc: 0.9516 - val_loss: 0.1461 - val_acc: 0.9472 Epoch 5/100 403797/403797 [==============================] - 78s 193us/step - loss: 0.1217 - acc: 0.9544 - val_loss: 0.1377 - val_acc: 0.9509 Epoch 6/100 403797/403797 [==============================] - 78s 192us/step - loss: 0.1269 - acc: 0.9527 - val_loss: 0.1720 - val_acc: 0.9379 Epoch 7/100 403797/403797 [==============================] - 78s 192us/step - loss: 0.1263 - acc: 0.9526 - val_loss: 0.1432 - val_acc: 0.9453 Epoch 8/100 403797/403797 [==============================] - 79s 195us/step - loss: 0.1265 - acc: 0.9527 - val_loss: 0.1417 - val_acc: 0.9495 Epoch 9/100 403797/403797 [==============================] - 78s 194us/step - loss: 0.1267 - acc: 0.9524 - val_loss: 0.1412 - val_acc: 0.9470 Epoch 10/100 403797/403797 [==============================] - 77s 192us/step - loss: 0.1248 - acc: 0.9531 - val_loss: 0.1595 - val_acc: 0.9414 Epoch 11/100 403797/403797 [==============================] - 78s 192us/step - loss: 0.1245 - acc: 0.9531 - val_loss: 0.1502 - val_acc: 0.9461 Epoch 12/100 403797/403797 [==============================] - 77s 192us/step - loss: 0.1252 - acc: 0.9530 - val_loss: 0.1338 - val_acc: 0.9498 Epoch 13/100 403797/403797 [==============================] - 78s 193us/step - loss: 0.1242 - acc: 0.9536 - val_loss: 0.1682 - val_acc: 0.9398 Epoch 14/100 403797/403797 [==============================] - 79s 196us/step - loss: 0.1249 - acc: 0.9532 - val_loss: 0.1441 - val_acc: 0.9488 Epoch 15/100 403797/403797 [==============================] - 79s 196us/step - loss: 0.1273 - acc: 0.9524 - val_loss: 0.1328 - val_acc: 0.9513 Epoch 16/100 403797/403797 [==============================] - 79s 195us/step - loss: 0.1199 - acc: 0.9551 - val_loss: 0.1508 - val_acc: 0.9466 Epoch 17/100 403797/403797 [==============================] - 79s 197us/step - loss: 0.1234 - acc: 0.9538 - val_loss: 0.1425 - val_acc: 0.9469 Epoch 18/100 403797/403797 [==============================] - 79s 197us/step - loss: 0.1257 - acc: 0.9528 - val_loss: 0.1497 - val_acc: 0.9467 Epoch 19/100 403797/403797 [==============================] - 79s 195us/step - loss: 0.1211 - acc: 0.9541 - val_loss: 0.1484 - val_acc: 0.9442 Epoch 20/100 403797/403797 [==============================] - 78s 193us/step - loss: 0.1250 - acc: 0.9530 - val_loss: 0.1347 - val_acc: 0.9502 Epoch 21/100 403797/403797 [==============================] - 78s 194us/step - loss: 0.1282 - acc: 0.9522 - val_loss: 0.1386 - val_acc: 0.9504 Epoch 22/100 403797/403797 [==============================] - 77s 191us/step - loss: 0.1174 - acc: 0.9554 - val_loss: 0.1496 - val_acc: 0.9464 Epoch 23/100 403797/403797 [==============================] - 77s 191us/step - loss: 0.1220 - acc: 0.9541 - val_loss: 0.1403 - val_acc: 0.9478 Epoch 24/100 403797/403797 [==============================] - 78s 193us/step - loss: 0.1219 - acc: 0.9542 - val_loss: 0.1309 - val_acc: 0.9529 Epoch 25/100 403797/403797 [==============================] - 79s 195us/step - loss: 0.1216 - acc: 0.9544 - val_loss: 0.1484 - val_acc: 0.9450 Epoch 26/100 403797/403797 [==============================] - 78s 193us/step - loss: 0.1208 - acc: 0.9541 - val_loss: 0.1455 - val_acc: 0.9456 Epoch 27/100 403797/403797 [==============================] - 78s 192us/step - loss: 0.1211 - acc: 0.9544 - val_loss: 0.1474 - val_acc: 0.9447 Epoch 28/100 403797/403797 [==============================] - 78s 194us/step - loss: 0.1183 - acc: 0.9555 - val_loss: 0.1374 - val_acc: 0.9487 Epoch 37/100 403797/403797 [==============================] - 78s 193us/step - loss: 0.1224 - acc: 0.9540 - val_loss: 0.1818 - val_acc: 0.9357 Epoch 38/100 403797/403797 [==============================] - 77s 191us/step - loss: 0.1188 - acc: 0.9551 - val_loss: 0.1339 - val_acc: 0.9510 Epoch 39/100 403797/403797 [==============================] - 77s 191us/step - loss: 0.1184 - acc: 0.9555 - val_loss: 0.1432 - val_acc: 0.9472 Epoch 40/100 50688/403797 [==>...........................] - ETA: 1:08 - loss: 0.1228 - acc: 0.9541
MIT
train_result/ml_ee_xxl_data_training_step7.ipynb
cufezhusy/mlXVA
$$\newcommand\bs[1]{\boldsymbol{1}}$$ This content is part of a series following the chapter 2 on linear algebra from the [Deep Learning Book](http://www.deeplearningbook.org/) by Goodfellow, I., Bengio, Y., and Courville, A. (2016). It aims to provide intuitions/drawings/python code on mathematical theories and is constructed as my understanding of these concepts. You can check the syllabus in the [introduction post](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-Introduction/). IntroductionThis is the first post/notebook of a series following the syllabus of the [linear algebra chapter from the Deep Learning Book](http://www.deeplearningbook.org/contents/linear_algebra.html) by Goodfellow et al.. This work is a collection of thoughts/details/developements/examples I made while reading this chapter. It is designed to help you go through their introduction to linear algebra. For more details about this series and the syllabus, please see the [introduction post](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-Introduction/).This first chapter is quite light and concerns the basic elements used in linear algebra and their definitions. It also introduces important functions in Python/Numpy that we will use all along this series. It will explain how to create and use vectors and matrices through examples. 2.1 Scalars, Vectors, Matrices and TensorsLet's start with some basic definitions:- A scalar is a single number- A vector is an array of numbers.$$\bs{x} =\begin{bmatrix} x_1 \\\\ x_2 \\\\ \cdots \\\\ x_n\end{bmatrix}$$- A matrix is a 2-D array$$\bs{A}=\begin{bmatrix} A_{1,1} & A_{1,2} & \cdots & A_{1,n} \\\\ A_{2,1} & A_{2,2} & \cdots & A_{2,n} \\\\ \cdots & \cdots & \cdots & \cdots \\\\ A_{m,1} & A_{m,2} & \cdots & A_{m,n}\end{bmatrix}$$- A tensor is a $n$-dimensional array with $n>2$We will follow the conventions used in the [Deep Learning Book](http://www.deeplearningbook.org/):- scalars are written in lowercase and italics. For instance: $n$- vectors are written in lowercase, italics and bold type. For instance: $\bs{x}$- matrices are written in uppercase, italics and bold. For instance: $\bs{X}$ Example 1. Create a vector with Python and Numpy*Coding tip*: Unlike the `matrix()` function which necessarily creates $2$-dimensional matrices, you can create $n$-dimensionnal arrays with the `array()` function. The main advantage to use `matrix()` is the useful methods (conjugate transpose, inverse, matrix operations...). We will use the `array()` function in this series.We will start by creating a vector. This is just a $1$-dimensional array:
x = np.array([1, 2, 3, 4]) x
_____no_output_____
MIT
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
PeterFogh/deepLearningBook-Notes
Example 2. Create a (3x2) matrix with nested bracketsThe `array()` function can also create $2$-dimensional arrays with nested brackets:
A = np.array([[1, 2], [3, 4], [5, 6]]) A
_____no_output_____
MIT
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
PeterFogh/deepLearningBook-Notes
ShapeThe shape of an array (that is to say its dimensions) tells you the number of values for each dimension. For a $2$-dimensional array it will give you the number of rows and the number of columns. Let's find the shape of our preceding $2$-dimensional array `A`. Since `A` is a Numpy array (it was created with the `array()` function) you can access its shape with:
A.shape
_____no_output_____
MIT
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
PeterFogh/deepLearningBook-Notes
We can see that $\bs{A}$ has 3 rows and 2 columns.Let's check the shape of our first vector:
x.shape
_____no_output_____
MIT
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
PeterFogh/deepLearningBook-Notes
As expected, you can see that $\bs{x}$ has only one dimension. The number corresponds to the length of the array:
len(x)
_____no_output_____
MIT
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
PeterFogh/deepLearningBook-Notes
TranspositionWith transposition you can convert a row vector to a column vector and vice versa:The transpose $\bs{A}^{\text{T}}$ of the matrix $\bs{A}$ corresponds to the mirrored axes. If the matrix is a square matrix (same number of columns and rows):If the matrix is not square the idea is the same:The superscript $^\text{T}$ is used for transposed matrices.$$\bs{A}=\begin{bmatrix} A_{1,1} & A_{1,2} \\\\ A_{2,1} & A_{2,2} \\\\ A_{3,1} & A_{3,2}\end{bmatrix}$$$$\bs{A}^{\text{T}}=\begin{bmatrix} A_{1,1} & A_{2,1} & A_{3,1} \\\\ A_{1,2} & A_{2,2} & A_{3,2}\end{bmatrix}$$The shape ($m \times n$) is inverted and becomes ($n \times m$). Example 3. Create a matrix A and transpose it
A = np.array([[1, 2], [3, 4], [5, 6]]) A A_t = A.T A_t
_____no_output_____
MIT
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
PeterFogh/deepLearningBook-Notes
We can check the dimensions of the matrices:
A.shape A_t.shape
_____no_output_____
MIT
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
PeterFogh/deepLearningBook-Notes
We can see that the number of columns becomes the number of rows with transposition and vice versa. AdditionMatrices can be added if they have the same shape:$$\bs{A} + \bs{B} = \bs{C}$$Each cell of $\bs{A}$ is added to the corresponding cell of $\bs{B}$:$$\bs{A}_{i,j} + \bs{B}_{i,j} = \bs{C}_{i,j}$$$i$ is the row index and $j$ the column index.$$\begin{bmatrix} A_{1,1} & A_{1,2} \\\\ A_{2,1} & A_{2,2} \\\\ A_{3,1} & A_{3,2}\end{bmatrix}+\begin{bmatrix} B_{1,1} & B_{1,2} \\\\ B_{2,1} & B_{2,2} \\\\ B_{3,1} & B_{3,2}\end{bmatrix}=\begin{bmatrix} A_{1,1} + B_{1,1} & A_{1,2} + B_{1,2} \\\\ A_{2,1} + B_{2,1} & A_{2,2} + B_{2,2} \\\\ A_{3,1} + B_{3,1} & A_{3,2} + B_{3,2}\end{bmatrix}$$The shape of $\bs{A}$, $\bs{B}$ and $\bs{C}$ are identical. Let's check that in an example: Example 4. Create two matrices A and B and add themWith Numpy you can add matrices just as you would add vectors or scalars.
A = np.array([[1, 2], [3, 4], [5, 6]]) A B = np.array([[2, 5], [7, 4], [4, 3]]) B # Add matrices A and B C = A + B C
_____no_output_____
MIT
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
PeterFogh/deepLearningBook-Notes
It is also possible to add a scalar to a matrix. This means adding this scalar to each cell of the matrix.$$\alpha+ \begin{bmatrix} A_{1,1} & A_{1,2} \\\\ A_{2,1} & A_{2,2} \\\\ A_{3,1} & A_{3,2}\end{bmatrix}=\begin{bmatrix} \alpha + A_{1,1} & \alpha + A_{1,2} \\\\ \alpha + A_{2,1} & \alpha + A_{2,2} \\\\ \alpha + A_{3,1} & \alpha + A_{3,2}\end{bmatrix}$$ Example 5. Add a scalar to a matrix
A # Exemple: Add 4 to the matrix A C = A+4 C
_____no_output_____
MIT
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
PeterFogh/deepLearningBook-Notes
BroadcastingNumpy can handle operations on arrays of different shapes. The smaller array will be extended to match the shape of the bigger one. The advantage is that this is done in `C` under the hood (like any vectorized operations in Numpy). Actually, we used broadcasting in the example 5. The scalar was converted in an array of same shape as $\bs{A}$.Here is another generic example:$$\begin{bmatrix} A_{1,1} & A_{1,2} \\\\ A_{2,1} & A_{2,2} \\\\ A_{3,1} & A_{3,2}\end{bmatrix}+\begin{bmatrix} B_{1,1} \\\\ B_{2,1} \\\\ B_{3,1}\end{bmatrix}$$is equivalent to$$\begin{bmatrix} A_{1,1} & A_{1,2} \\\\ A_{2,1} & A_{2,2} \\\\ A_{3,1} & A_{3,2}\end{bmatrix}+\begin{bmatrix} B_{1,1} & B_{1,1} \\\\ B_{2,1} & B_{2,1} \\\\ B_{3,1} & B_{3,1}\end{bmatrix}=\begin{bmatrix} A_{1,1} + B_{1,1} & A_{1,2} + B_{1,1} \\\\ A_{2,1} + B_{2,1} & A_{2,2} + B_{2,1} \\\\ A_{3,1} + B_{3,1} & A_{3,2} + B_{3,1}\end{bmatrix}$$where the ($3 \times 1$) matrix is converted to the right shape ($3 \times 2$) by copying the first column. Numpy will do that automatically if the shapes can match. Example 6. Add two matrices of different shapes
A = np.array([[1, 2], [3, 4], [5, 6]]) A B = np.array([[2], [4], [6]]) B # Broadcasting C=A+B C
_____no_output_____
MIT
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
PeterFogh/deepLearningBook-Notes
`distance_transform_lin`A variant of the standard distance transform where the distances are computed along a give axis rather than radially.
import numpy as np import porespy as ps import scipy.ndimage as spim import matplotlib.pyplot as plt
_____no_output_____
MIT
examples/filters/reference/distance_transform_lin.ipynb
xu-kai-xu/porespy
The arguments and their defaults are:
import inspect inspect.signature(ps.filters.distance_transform_lin)
_____no_output_____
MIT
examples/filters/reference/distance_transform_lin.ipynb
xu-kai-xu/porespy
`axis`The axis along which the distances should be computed
fig, ax = plt.subplots(1, 2, figsize=[12, 6]) im = ps.generators.blobs(shape=[500, 500], porosity=0.7) axis = 0 dt = ps.filters.distance_transform_lin(im, axis=axis) ax[0].imshow(dt/im) ax[0].axis(False) ax[0].set_title(f'axis = {axis}') axis = 1 dt = ps.filters.distance_transform_lin(im, axis=axis) ax[1].imshow(dt/im) ax[1].axis(False) ax[1].set_title(f'axis = {axis}');
_____no_output_____
MIT
examples/filters/reference/distance_transform_lin.ipynb
xu-kai-xu/porespy
`mode`Whether the distances are comptuted from the start to end, end to start, or both.
fig, ax = plt.subplots(1, 3, figsize=[15, 5]) im = ps.generators.blobs(shape=[500, 500], porosity=0.7) mode = 'forward' dt = ps.filters.distance_transform_lin(im, mode=mode) ax[0].imshow(dt/im) ax[0].axis(False) ax[0].set_title(f'mode = {mode}') mode = 'reverse' dt = ps.filters.distance_transform_lin(im, mode=mode) ax[1].imshow(dt/im) ax[1].axis(False) ax[1].set_title(f'mode = {mode}') mode = 'both' dt = ps.filters.distance_transform_lin(im, mode=mode) ax[2].imshow(dt/im) ax[2].axis(False) ax[2].set_title(f'mode = {mode}');
_____no_output_____
MIT
examples/filters/reference/distance_transform_lin.ipynb
xu-kai-xu/porespy
Develop and Register ModelIn this noteook, we will go through the steps to load the MaskRCNN model and call the model to find the top predictions. We will then register the model in ACR using AzureML. Note: Always make sure you don't have any lingering notebooks running (Shutdown previous notebooks). Otherwise it may cause GPU memory issue.
%reload_ext autoreload %autoreload 2 %matplotlib inline import torch import torchvision import numpy as np from pathlib import * from PIL import Image from azureml.core.workspace import Workspace from azureml.core.model import Model from dotenv import set_key, find_dotenv from testing_utilities import get_auth import urllib env_path = find_dotenv(raise_error_if_not_found=True)
_____no_output_____
MIT
object-detection-azureml/031_DevAndRegisterModel.ipynb
Bhaskers-Blu-Org2/deploy-MLmodels-on-iotedge
ModelWe load a pretrained [**Mask R-CNN ResNet-50 FPN** object detection model](https://pytorch.org/blog/torchvision03/). This model is trained on subset of COCO train2017, which contains the same 20 categories as those from Pascal VOC.
# use pretrained model: https://pytorch.org/blog/torchvision03/ model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True) #device = torch.device("cpu") device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model.to(device) url = "https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth" urllib.request.urlretrieve(url, "maskrcnn_resnet50.pth") img_path = "./test_image.jpg" print(Image.open(img_path).size) img = Image.open(img_path) img = np.array(img.convert(mode='RGB'), dtype = np.float32) img_tensor = torchvision.transforms.functional.to_tensor(img)/255 model.eval() with torch.no_grad(): prediction = model([img_tensor.to(device)]) print(prediction)
_____no_output_____
MIT
object-detection-azureml/031_DevAndRegisterModel.ipynb
Bhaskers-Blu-Org2/deploy-MLmodels-on-iotedge
Register Model
# Get workspace # Load existing workspace from the config file info. ws = Workspace.from_config(auth=get_auth()) print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep="\n") model = Model.register( model_path="maskrcnn_resnet50.pth", # this points to a local file model_name="maskrcnn_resnet50_model", # this is the name the model is registered as tags={"model": "dl", "framework": "maskrcnn"}, description="torchvision maskrcnn_resnet50", workspace=ws, ) print(model.name, model.description, model.version) set_key(env_path, "model_version", str(model.version))
_____no_output_____
MIT
object-detection-azureml/031_DevAndRegisterModel.ipynb
Bhaskers-Blu-Org2/deploy-MLmodels-on-iotedge
!nvidia-smi
Sun Dec 6 06:17:07 2020 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 455.45.01 Driver Version: 418.67 CUDA Version: 10.1 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 | | N/A 39C P8 10W / 70W | 0MiB / 15079MiB | 0% Default | | | | ERR! | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | No running processes found | +-----------------------------------------------------------------------------+
MIT
TensorFI_Capsnet.ipynb
MahdiSajedi/TensorFI
import `tensorflow version 1` for colab and `os`
# set tensorflow version to 1 %tensorflow_version 1.x # if need to install some spesfic version # !pip install tensorflow-gpu==1.10.0 import os
_____no_output_____
MIT
TensorFI_Capsnet.ipynb
MahdiSajedi/TensorFI
**Download Modified git repo and change dir to `TensorFI`**
!git clone https://github.com/MahdiSajedi/TensorFI.git os.chdir('TensorFI') !pwd %ls
fatal: destination path 'TensorFI' already exists and is not an empty directory. /content/TensorFI/TensorFI faultTypes.py fiLog.py __init__.py modifyGraph.py tensorFI.py fiConfig.py fiStats.py injectFault.py printGraph.py
MIT
TensorFI_Capsnet.ipynb
MahdiSajedi/TensorFI
Intstall `TensorFI` pip package Run `capsnet.py` file
!pip install tensorfi !python ./Tests/capsnet.py !pwd
/content/TensorFI
MIT
TensorFI_Capsnet.ipynb
MahdiSajedi/TensorFI
Artificial Intelligence Nanodegree Voice User Interfaces Project: Speech Recognition with Neural Networks---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following blocks of code will require additional functionality which you must provide. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n", "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Introduction In this notebook, you will build a deep neural network that functions as part of an end-to-end automatic speech recognition (ASR) pipeline! Your completed pipeline will accept raw audio as input and return a predicted transcription of the spoken language. The full pipeline is summarized in the figure below.- **STEP 1** is a pre-processing step that converts raw audio to one of two feature representations that are commonly used for ASR. - **STEP 2** is an acoustic model which accepts audio features as input and returns a probability distribution over all potential transcriptions. After learning about the basic types of neural networks that are often used for acoustic modeling, you will engage in your own investigations, to design your own acoustic model!- **STEP 3** in the pipeline takes the output from the acoustic model and returns a predicted transcription. Feel free to use the links below to navigate the notebook:- [The Data](thedata)- [**STEP 1**](step1): Acoustic Features for Speech Recognition- [**STEP 2**](step2): Deep Neural Networks for Acoustic Modeling - [Model 0](model0): RNN - [Model 1](model1): RNN + TimeDistributed Dense - [Model 2](model2): CNN + RNN + TimeDistributed Dense - [Model 3](model3): Deeper RNN + TimeDistributed Dense - [Model 4](model4): Bidirectional RNN + TimeDistributed Dense - [Models 5+](model5) - [Compare the Models](compare) - [Final Model](final)- [**STEP 3**](step3): Obtain Predictions The DataWe begin by investigating the dataset that will be used to train and evaluate your pipeline. [LibriSpeech](http://www.danielpovey.com/files/2015_icassp_librispeech.pdf) is a large corpus of English-read speech, designed for training and evaluating models for ASR. The dataset contains 1000 hours of speech derived from audiobooks. We will work with a small subset in this project, since larger-scale data would take a long while to train. However, after completing this project, if you are interested in exploring further, you are encouraged to work with more of the data that is provided [online](http://www.openslr.org/12/).In the code cells below, you will use the `vis_train_features` module to visualize a training example. The supplied argument `index=0` tells the module to extract the first example in the training set. (You are welcome to change `index=0` to point to a different training example, if you like, but please **DO NOT** amend any other code in the cell.) The returned variables are:- `vis_text` - transcribed text (label) for the training example.- `vis_raw_audio` - raw audio waveform for the training example.- `vis_mfcc_feature` - mel-frequency cepstral coefficients (MFCCs) for the training example.- `vis_spectrogram_feature` - spectrogram for the training example. - `vis_audio_path` - the file path to the training example.
from data_generator import vis_train_features # extract label and audio features for a single training example vis_text, vis_raw_audio, vis_mfcc_feature, vis_spectrogram_feature, vis_audio_path = vis_train_features()
There are 2136 total training examples.
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
The following code cell visualizes the audio waveform for your chosen example, along with the corresponding transcript. You also have the option to play the audio in the notebook!
from IPython.display import Markdown, display from data_generator import vis_train_features, plot_raw_audio from IPython.display import Audio %matplotlib inline # plot audio signal plot_raw_audio(vis_raw_audio) # print length of audio signal display(Markdown('**Shape of Audio Signal** : ' + str(vis_raw_audio.shape))) # print transcript corresponding to audio clip display(Markdown('**Transcript** : ' + str(vis_text))) # play the audio file Audio(vis_audio_path)
_____no_output_____
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
STEP 1: Acoustic Features for Speech RecognitionFor this project, you won't use the raw audio waveform as input to your model. Instead, we provide code that first performs a pre-processing step to convert the raw audio to a feature representation that has historically proven successful for ASR models. Your acoustic model will accept the feature representation as input.In this project, you will explore two possible feature representations. _After completing the project_, if you'd like to read more about deep learning architectures that can accept raw audio input, you are encouraged to explore this [research paper](https://pdfs.semanticscholar.org/a566/cd4a8623d661a4931814d9dffc72ecbf63c4.pdf). SpectrogramsThe first option for an audio feature representation is the [spectrogram](https://www.youtube.com/watch?v=_FatxGN3vAM). In order to complete this project, you will **not** need to dig deeply into the details of how a spectrogram is calculated; but, if you are curious, the code for calculating the spectrogram was borrowed from [this repository](https://github.com/baidu-research/ba-dls-deepspeech). The implementation appears in the `utils.py` file in your repository.The code that we give you returns the spectrogram as a 2D tensor, where the first (_vertical_) dimension indexes time, and the second (_horizontal_) dimension indexes frequency. To speed the convergence of your algorithm, we have also normalized the spectrogram. (You can see this quickly in the visualization below by noting that the mean value hovers around zero, and most entries in the tensor assume values close to zero.)
from data_generator import plot_spectrogram_feature # plot normalized spectrogram plot_spectrogram_feature(vis_spectrogram_feature) # print shape of spectrogram display(Markdown('**Shape of Spectrogram** : ' + str(vis_spectrogram_feature.shape)))
_____no_output_____
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
Mel-Frequency Cepstral Coefficients (MFCCs)The second option for an audio feature representation is [MFCCs](https://en.wikipedia.org/wiki/Mel-frequency_cepstrum). You do **not** need to dig deeply into the details of how MFCCs are calculated, but if you would like more information, you are welcome to peruse the [documentation](https://github.com/jameslyons/python_speech_features) of the `python_speech_features` Python package. Just as with the spectrogram features, the MFCCs are normalized in the supplied code.The main idea behind MFCC features is the same as spectrogram features: at each time window, the MFCC feature yields a feature vector that characterizes the sound within the window. Note that the MFCC feature is much lower-dimensional than the spectrogram feature, which could help an acoustic model to avoid overfitting to the training dataset.
from data_generator import plot_mfcc_feature # plot normalized MFCC plot_mfcc_feature(vis_mfcc_feature) # print shape of MFCC display(Markdown('**Shape of MFCC** : ' + str(vis_mfcc_feature.shape)))
_____no_output_____
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
When you construct your pipeline, you will be able to choose to use either spectrogram or MFCC features. If you would like to see different implementations that make use of MFCCs and/or spectrograms, please check out the links below:- This [repository](https://github.com/baidu-research/ba-dls-deepspeech) uses spectrograms.- This [repository](https://github.com/mozilla/DeepSpeech) uses MFCCs.- This [repository](https://github.com/buriburisuri/speech-to-text-wavenet) also uses MFCCs.- This [repository](https://github.com/pannous/tensorflow-speech-recognition/blob/master/speech_data.py) experiments with raw audio, spectrograms, and MFCCs as features. STEP 2: Deep Neural Networks for Acoustic ModelingIn this section, you will experiment with various neural network architectures for acoustic modeling. You will begin by training five relatively simple architectures. **Model 0** is provided for you. You will write code to implement **Models 1**, **2**, **3**, and **4**. If you would like to experiment further, you are welcome to create and train more models under the **Models 5+** heading. All models will be specified in the `sample_models.py` file. After importing the `sample_models` module, you will train your architectures in the notebook.After experimenting with the five simple architectures, you will have the opportunity to compare their performance. Based on your findings, you will construct a deeper architecture that is designed to outperform all of the shallow models.For your convenience, we have designed the notebook so that each model can be specified and trained on separate occasions. That is, say you decide to take a break from the notebook after training **Model 1**. Then, you need not re-execute all prior code cells in the notebook before training **Model 2**. You need only re-execute the code cell below, that is marked with **`RUN THIS CODE CELL IF YOU ARE RESUMING THE NOTEBOOK AFTER A BREAK`**, before transitioning to the code cells corresponding to **Model 2**.
##################################################################### # RUN THIS CODE CELL IF YOU ARE RESUMING THE NOTEBOOK AFTER A BREAK # ##################################################################### # allocate 50% of GPU memory (if you like, feel free to change this) from keras.backend.tensorflow_backend import set_session import tensorflow as tf config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.4 set_session(tf.Session(config=config)) # watch for any changes in the sample_models module, and reload it automatically %load_ext autoreload %autoreload 2 # import NN architectures for speech recognition from sample_models import * # import function for training acoustic model from train_utils import train_model
Using TensorFlow backend. /home/pjordan/anaconda3/envs/dnn-speech-recognizer/lib/python3.5/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
Model 0: RNNGiven their effectiveness in modeling sequential data, the first acoustic model you will use is an RNN. As shown in the figure below, the RNN we supply to you will take the time sequence of audio features as input.At each time step, the speaker pronounces one of 28 possible characters, including each of the 26 letters in the English alphabet, along with a space character (" "), and an apostrophe (').The output of the RNN at each time step is a vector of probabilities with 29 entries, where the $i$-th entry encodes the probability that the $i$-th character is spoken in the time sequence. (The extra 29th character is an empty "character" used to pad training examples within batches containing uneven lengths.) If you would like to peek under the hood at how characters are mapped to indices in the probability vector, look at the `char_map.py` file in the repository. The figure below shows an equivalent, rolled depiction of the RNN that shows the output layer in greater detail. The model has already been specified for you in Keras. To import it, you need only run the code cell below.
model_0 = simple_rnn_model(input_dim=161) # change to 13 if you would like to use MFCC features
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= the_input (InputLayer) (None, None, 161) 0 _________________________________________________________________ rnn (GRU) (None, None, 29) 16617 _________________________________________________________________ softmax (Activation) (None, None, 29) 0 ================================================================= Total params: 16,617 Trainable params: 16,617 Non-trainable params: 0 _________________________________________________________________ None
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
As explored in the lesson, you will train the acoustic model with the [CTC loss](http://www.cs.toronto.edu/~graves/icml_2006.pdf) criterion. Custom loss functions take a bit of hacking in Keras, and so we have implemented the CTC loss function for you, so that you can focus on trying out as many deep learning architectures as possible :). If you'd like to peek at the implementation details, look at the `add_ctc_loss` function within the `train_utils.py` file in the repository.To train your architecture, you will use the `train_model` function within the `train_utils` module; it has already been imported in one of the above code cells. The `train_model` function takes three **required** arguments:- `input_to_softmax` - a Keras model instance.- `pickle_path` - the name of the pickle file where the loss history will be saved.- `save_model_path` - the name of the HDF5 file where the model will be saved.If we have already supplied values for `input_to_softmax`, `pickle_path`, and `save_model_path`, please **DO NOT** modify these values. There are several **optional** arguments that allow you to have more control over the training process. You are welcome to, but not required to, supply your own values for these arguments.- `minibatch_size` - the size of the minibatches that are generated while training the model (default: `20`).- `spectrogram` - Boolean value dictating whether spectrogram (`True`) or MFCC (`False`) features are used for training (default: `True`).- `mfcc_dim` - the size of the feature dimension to use when generating MFCC features (default: `13`).- `optimizer` - the Keras optimizer used to train the model (default: `SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)`). - `epochs` - the number of epochs to use to train the model (default: `20`). If you choose to modify this parameter, make sure that it is *at least* 20.- `verbose` - controls the verbosity of the training output in the `model.fit_generator` method (default: `1`).- `sort_by_duration` - Boolean value dictating whether the training and validation sets are sorted by (increasing) duration before the start of the first epoch (default: `False`).The `train_model` function defaults to using spectrogram features; if you choose to use these features, note that the acoustic model in `simple_rnn_model` should have `input_dim=161`. Otherwise, if you choose to use MFCC features, the acoustic model should have `input_dim=13`.We have chosen to use `GRU` units in the supplied RNN. If you would like to experiment with `LSTM` or `SimpleRNN` cells, feel free to do so here. If you change the `GRU` units to `SimpleRNN` cells in `simple_rnn_model`, you may notice that the loss quickly becomes undefined (`nan`) - you are strongly encouraged to check this for yourself! This is due to the [exploding gradients problem](http://www.wildml.com/2015/10/recurrent-neural-networks-tutorial-part-3-backpropagation-through-time-and-vanishing-gradients/). We have already implemented [gradient clipping](https://arxiv.org/pdf/1211.5063.pdf) in your optimizer to help you avoid this issue.__IMPORTANT NOTE:__ If you notice that your gradient has exploded in any of the models below, feel free to explore more with gradient clipping (the `clipnorm` argument in your optimizer) or swap out any `SimpleRNN` cells for `LSTM` or `GRU` cells. You can also try restarting the kernel to restart the training process.
train_model(input_to_softmax=model_0, pickle_path='model_0.pickle', save_model_path='model_0.h5', optimizer=SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1), spectrogram=True) # change to False if you would like to use MFCC features
Epoch 1/20 106/106 [==============================] - 116s - loss: 962.2045 - val_loss: 746.4123 Epoch 2/20 106/106 [==============================] - 111s - loss: 757.1928 - val_loss: 729.0466 Epoch 3/20 106/106 [==============================] - 116s - loss: 753.0298 - val_loss: 730.4964 Epoch 4/20 106/106 [==============================] - 115s - loss: 750.8956 - val_loss: 721.6433 Epoch 5/20 106/106 [==============================] - 115s - loss: 751.6414 - val_loss: 726.6612 Epoch 6/20 106/106 [==============================] - 115s - loss: 750.7420 - val_loss: 727.9034 Epoch 7/20 106/106 [==============================] - 112s - loss: 750.2763 - val_loss: 729.9839 Epoch 8/20 106/106 [==============================] - 116s - loss: 751.5226 - val_loss: 723.4622 Epoch 9/20 106/106 [==============================] - 117s - loss: 750.4366 - val_loss: 721.1129 Epoch 10/20 106/106 [==============================] - 117s - loss: 751.0709 - val_loss: 733.4978 Epoch 11/20 106/106 [==============================] - 116s - loss: 751.7690 - val_loss: 725.5819 Epoch 12/20 106/106 [==============================] - 117s - loss: 750.4331 - val_loss: 728.1983 Epoch 13/20 106/106 [==============================] - 116s - loss: 750.6872 - val_loss: 721.3921 Epoch 14/20 106/106 [==============================] - 117s - loss: 750.7719 - val_loss: 723.6158 Epoch 15/20 106/106 [==============================] - 117s - loss: 749.6198 - val_loss: 728.7696 Epoch 16/20 106/106 [==============================] - 115s - loss: 750.4491 - val_loss: 723.0323 Epoch 17/20 106/106 [==============================] - 116s - loss: 750.8921 - val_loss: 725.6289 Epoch 18/20 106/106 [==============================] - 116s - loss: 750.8845 - val_loss: 725.6971 Epoch 19/20 106/106 [==============================] - 116s - loss: 750.1892 - val_loss: 722.8667 Epoch 20/20 106/106 [==============================] - 117s - loss: 750.7994 - val_loss: 724.6980
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
(IMPLEMENTATION) Model 1: RNN + TimeDistributed DenseRead about the [TimeDistributed](https://keras.io/layers/wrappers/) wrapper and the [BatchNormalization](https://keras.io/layers/normalization/) layer in the Keras documentation. For your next architecture, you will add [batch normalization](https://arxiv.org/pdf/1510.01378.pdf) to the recurrent layer to reduce training times. The `TimeDistributed` layer will be used to find more complex patterns in the dataset. The unrolled snapshot of the architecture is depicted below.The next figure shows an equivalent, rolled depiction of the RNN that shows the (`TimeDistrbuted`) dense and output layers in greater detail. Use your research to complete the `rnn_model` function within the `sample_models.py` file. The function should specify an architecture that satisfies the following requirements:- The first layer of the neural network should be an RNN (`SimpleRNN`, `LSTM`, or `GRU`) that takes the time sequence of audio features as input. We have added `GRU` units for you, but feel free to change `GRU` to `SimpleRNN` or `LSTM`, if you like!- Whereas the architecture in `simple_rnn_model` treated the RNN output as the final layer of the model, you will use the output of your RNN as a hidden layer. Use `TimeDistributed` to apply a `Dense` layer to each of the time steps in the RNN output. Ensure that each `Dense` layer has `output_dim` units.Use the code cell below to load your model into the `model_1` variable. Use a value for `input_dim` that matches your chosen audio features, and feel free to change the values for `units` and `activation` to tweak the behavior of your recurrent layer.
model_1 = rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features units=246, activation='relu', dropout_rate=0.0)
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= the_input (InputLayer) (None, None, 161) 0 _________________________________________________________________ rnn (GRU) (None, None, 246) 301104 _________________________________________________________________ batch_normalization_10 (Batc (None, None, 246) 984 _________________________________________________________________ time_distributed_11 (TimeDis (None, None, 29) 7163 _________________________________________________________________ softmax (Activation) (None, None, 29) 0 ================================================================= Total params: 309,251 Trainable params: 308,759 Non-trainable params: 492 _________________________________________________________________ None
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/how-can-i-save-a-keras-model) in the HDF5 file `model_1.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_1.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.
from keras.optimizers import SGD train_model(input_to_softmax=model_1, pickle_path='model_1.pickle', save_model_path='model_1.h5', optimizer=SGD(lr=0.07693823225442271, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1), spectrogram=True) # change to False if you would like to use MFCC features
Epoch 1/20 106/106 [==============================] - 125s - loss: 301.3889 - val_loss: 255.1117 Epoch 2/20 106/106 [==============================] - 126s - loss: 208.7791 - val_loss: 195.5662 Epoch 3/20 106/106 [==============================] - 126s - loss: 188.6020 - val_loss: 184.3830 Epoch 4/20 106/106 [==============================] - 126s - loss: 172.8454 - val_loss: 165.9265 Epoch 5/20 106/106 [==============================] - 126s - loss: 159.9952 - val_loss: 160.3791 Epoch 6/20 106/106 [==============================] - 126s - loss: 151.2288 - val_loss: 150.3075 Epoch 7/20 106/106 [==============================] - 125s - loss: 144.6389 - val_loss: 147.3992 Epoch 8/20 106/106 [==============================] - 126s - loss: 139.3690 - val_loss: 143.2048 Epoch 9/20 106/106 [==============================] - 124s - loss: 134.5651 - val_loss: 140.9699 Epoch 10/20 106/106 [==============================] - 126s - loss: 130.5984 - val_loss: 139.4818 Epoch 11/20 106/106 [==============================] - 125s - loss: 127.2223 - val_loss: 134.7147 Epoch 12/20 106/106 [==============================] - 125s - loss: 124.1384 - val_loss: 135.1391 Epoch 13/20 106/106 [==============================] - 127s - loss: 121.4931 - val_loss: 135.6264 Epoch 14/20 106/106 [==============================] - 139s - loss: 119.0370 - val_loss: 132.6101 Epoch 15/20 106/106 [==============================] - 149s - loss: 117.5036 - val_loss: 135.2287 Epoch 16/20 106/106 [==============================] - 149s - loss: 115.1628 - val_loss: 134.6172 Epoch 17/20 106/106 [==============================] - 148s - loss: 114.1567 - val_loss: 133.6147 Epoch 18/20 106/106 [==============================] - 149s - loss: 113.1525 - val_loss: 131.8664 Epoch 19/20 106/106 [==============================] - 151s - loss: 110.8212 - val_loss: 133.1285 Epoch 20/20 106/106 [==============================] - 149s - loss: 109.7723 - val_loss: 133.2252
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
(IMPLEMENTATION) Model 2: CNN + RNN + TimeDistributed DenseThe architecture in `cnn_rnn_model` adds an additional level of complexity, by introducing a [1D convolution layer](https://keras.io/layers/convolutional/conv1d). This layer incorporates many arguments that can be (optionally) tuned when calling the `cnn_rnn_model` module. We provide sample starting parameters, which you might find useful if you choose to use spectrogram audio features. If you instead want to use MFCC features, these arguments will have to be tuned. Note that the current architecture only supports values of `'same'` or `'valid'` for the `conv_border_mode` argument.When tuning the parameters, be careful not to choose settings that make the convolutional layer overly small. If the temporal length of the CNN layer is shorter than the length of the transcribed text label, your code will throw an error.Before running the code cell below, you must modify the `cnn_rnn_model` function in `sample_models.py`. Please add batch normalization to the recurrent layer, and provide the same `TimeDistributed` layer as before.
model_2 = cnn_rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features filters=185, kernel_size=5, conv_stride=3, conv_border_mode='valid', units=350, dropout_rate=0.5)
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= the_input (InputLayer) (None, None, 161) 0 _________________________________________________________________ conv1d (Conv1D) (None, None, 185) 149110 _________________________________________________________________ bn_conv_1d (BatchNormalizati (None, None, 185) 740 _________________________________________________________________ rnn (GRU) (None, None, 350) 562800 _________________________________________________________________ batch_normalization_18 (Batc (None, None, 350) 1400 _________________________________________________________________ time_distributed_18 (TimeDis (None, None, 29) 10179 _________________________________________________________________ softmax (Activation) (None, None, 29) 0 ================================================================= Total params: 724,229 Trainable params: 723,159 Non-trainable params: 1,070 _________________________________________________________________ None
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/how-can-i-save-a-keras-model) in the HDF5 file `model_2.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_2.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.
from keras.optimizers import SGD train_model(input_to_softmax=model_2, pickle_path='model_2.pickle', save_model_path='model_2.h5', optimizer=SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1), spectrogram=True) # change to False if you would like to use MFCC features
Epoch 1/20 106/106 [==============================] - 47s - loss: 258.7976 - val_loss: 215.1476 Epoch 2/20 106/106 [==============================] - 44s - loss: 210.2469 - val_loss: 195.7121 Epoch 3/20 106/106 [==============================] - 44s - loss: 194.4411 - val_loss: 176.9136 Epoch 4/20 106/106 [==============================] - 44s - loss: 184.4350 - val_loss: 164.3036 Epoch 5/20 106/106 [==============================] - 44s - loss: 176.8723 - val_loss: 161.7172 Epoch 6/20 106/106 [==============================] - 45s - loss: 171.1767 - val_loss: 155.6394 Epoch 7/20 106/106 [==============================] - 44s - loss: 166.1970 - val_loss: 150.5580 Epoch 8/20 106/106 [==============================] - 45s - loss: 162.9583 - val_loss: 150.3715 Epoch 9/20 106/106 [==============================] - 45s - loss: 159.4488 - val_loss: 146.7499 Epoch 10/20 106/106 [==============================] - 44s - loss: 156.4711 - val_loss: 143.3999 Epoch 11/20 106/106 [==============================] - 44s - loss: 153.5752 - val_loss: 141.8302 Epoch 12/20 106/106 [==============================] - 44s - loss: 151.9115 - val_loss: 141.0765 Epoch 13/20 106/106 [==============================] - 45s - loss: 149.8154 - val_loss: 140.0649 Epoch 14/20 106/106 [==============================] - 44s - loss: 148.0079 - val_loss: 138.6670 Epoch 15/20 106/106 [==============================] - 45s - loss: 146.1044 - val_loss: 138.5527 Epoch 16/20 106/106 [==============================] - 45s - loss: 144.4150 - val_loss: 135.1045 Epoch 17/20 106/106 [==============================] - 44s - loss: 143.2880 - val_loss: 135.8767 Epoch 18/20 106/106 [==============================] - 45s - loss: 141.8172 - val_loss: 134.6186 Epoch 19/20 106/106 [==============================] - 44s - loss: 140.8268 - val_loss: 130.9444 Epoch 20/20 106/106 [==============================] - 45s - loss: 139.1327 - val_loss: 132.9859
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
(IMPLEMENTATION) Model 3: Deeper RNN + TimeDistributed DenseReview the code in `rnn_model`, which makes use of a single recurrent layer. Now, specify an architecture in `deep_rnn_model` that utilizes a variable number `recur_layers` of recurrent layers. The figure below shows the architecture that should be returned if `recur_layers=2`. In the figure, the output sequence of the first recurrent layer is used as input for the next recurrent layer.Feel free to change the supplied values of `units` to whatever you think performs best. You can change the value of `recur_layers`, as long as your final value is greater than 1. (As a quick check that you have implemented the additional functionality in `deep_rnn_model` correctly, make sure that the architecture that you specify here is identical to `rnn_model` if `recur_layers=1`.)
model_3 = deep_rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features units=290, recur_layers=3, dropout_rate=0.3035064397585259)
WARNING:tensorflow:From /home/pjordan/anaconda3/envs/dnn-speech-recognizer/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py:1190: calling reduce_sum (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version. Instructions for updating: keep_dims is deprecated, use keepdims instead WARNING:tensorflow:From /home/pjordan/anaconda3/envs/dnn-speech-recognizer/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py:1154: calling reduce_max (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version. Instructions for updating: keep_dims is deprecated, use keepdims instead _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= the_input (InputLayer) (None, None, 161) 0 _________________________________________________________________ gru_1 (GRU) (None, None, 290) 393240 _________________________________________________________________ batch_normalization_1 (Batch (None, None, 290) 1160 _________________________________________________________________ gru_2 (GRU) (None, None, 290) 505470 _________________________________________________________________ batch_normalization_2 (Batch (None, None, 290) 1160 _________________________________________________________________ gru_3 (GRU) (None, None, 290) 505470 _________________________________________________________________ batch_normalization_3 (Batch (None, None, 290) 1160 _________________________________________________________________ time_distributed_1 (TimeDist (None, None, 29) 8439 _________________________________________________________________ softmax (Activation) (None, None, 29) 0 ================================================================= Total params: 1,416,099 Trainable params: 1,414,359 Non-trainable params: 1,740 _________________________________________________________________ None
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/how-can-i-save-a-keras-model) in the HDF5 file `model_3.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_3.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.
from keras.optimizers import SGD train_model(input_to_softmax=model_3, pickle_path='model_3.pickle', save_model_path='model_3.h5', optimizer=SGD(lr=0.0635459438114008, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1), spectrogram=True) # change to False if you would like to use MFCC features
WARNING:tensorflow:From /home/pjordan/anaconda3/envs/dnn-speech-recognizer/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py:1297: calling reduce_mean (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version. Instructions for updating: keep_dims is deprecated, use keepdims instead WARNING:tensorflow:Variable *= will be deprecated. Use `var.assign(var * other)` if you want assignment to the variable value or `x = x * y` if you want a new python Tensor object. Epoch 1/20 106/106 [==============================] - 346s - loss: 306.0022 - val_loss: 230.8003 Epoch 2/20 106/106 [==============================] - 356s - loss: 228.5843 - val_loss: 208.5356 Epoch 3/20 106/106 [==============================] - 359s - loss: 221.6973 - val_loss: 202.8349 Epoch 4/20 106/106 [==============================] - 359s - loss: 214.3285 - val_loss: 193.8858 Epoch 5/20 106/106 [==============================] - 357s - loss: 206.8792 - val_loss: 190.5545 Epoch 6/20 106/106 [==============================] - 354s - loss: 197.0360 - val_loss: 180.5237 Epoch 7/20 106/106 [==============================] - 359s - loss: 186.1461 - val_loss: 173.6953 Epoch 8/20 106/106 [==============================] - 360s - loss: 177.0056 - val_loss: 156.9757 Epoch 9/20 106/106 [==============================] - 354s - loss: 170.2661 - val_loss: 152.6200 Epoch 10/20 106/106 [==============================] - 361s - loss: 166.5991 - val_loss: 152.2965 Epoch 11/20 106/106 [==============================] - 357s - loss: 164.0009 - val_loss: 146.9104 Epoch 12/20 106/106 [==============================] - 358s - loss: 160.5796 - val_loss: 142.4707 Epoch 13/20 106/106 [==============================] - 357s - loss: 157.5561 - val_loss: 142.0041 Epoch 14/20 106/106 [==============================] - 362s - loss: 156.8146 - val_loss: 141.1184 Epoch 15/20 106/106 [==============================] - 360s - loss: 154.9966 - val_loss: 140.2970 Epoch 16/20 106/106 [==============================] - 360s - loss: 152.4878 - val_loss: 138.7927 Epoch 17/20 106/106 [==============================] - 359s - loss: 152.0400 - val_loss: 136.6318 Epoch 18/20 106/106 [==============================] - 359s - loss: 149.7010 - val_loss: 137.6884 Epoch 19/20 106/106 [==============================] - 354s - loss: 148.1568 - val_loss: 130.7026 Epoch 20/20 106/106 [==============================] - 357s - loss: 148.5415 - val_loss: 136.0464
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
(IMPLEMENTATION) Model 4: Bidirectional RNN + TimeDistributed DenseRead about the [Bidirectional](https://keras.io/layers/wrappers/) wrapper in the Keras documentation. For your next architecture, you will specify an architecture that uses a single bidirectional RNN layer, before a (`TimeDistributed`) dense layer. The added value of a bidirectional RNN is described well in [this paper](http://www.cs.toronto.edu/~hinton/absps/DRNN_speech.pdf).> One shortcoming of conventional RNNs is that they are only able to make use of previous context. In speech recognition, where whole utterances are transcribed at once, there is no reason not to exploit future context as well. Bidirectional RNNs (BRNNs) do this by processing the data in both directions with two separate hidden layers which are then fed forwards to the same output layer.Before running the code cell below, you must complete the `bidirectional_rnn_model` function in `sample_models.py`. Feel free to use `SimpleRNN`, `LSTM`, or `GRU` units. When specifying the `Bidirectional` wrapper, use `merge_mode='concat'`.
model_4 = bidirectional_rnn_model( input_dim=161, # change to 13 if you would like to use MFCC features units=250, dropout_rate=0.1)
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= the_input (InputLayer) (None, None, 161) 0 _________________________________________________________________ bidirectional_9 (Bidirection (None, None, 500) 618000 _________________________________________________________________ time_distributed_28 (TimeDis (None, None, 29) 14529 _________________________________________________________________ softmax (Activation) (None, None, 29) 0 ================================================================= Total params: 632,529 Trainable params: 632,529 Non-trainable params: 0 _________________________________________________________________ None
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/how-can-i-save-a-keras-model) in the HDF5 file `model_4.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_4.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.
train_model(input_to_softmax=model_4, pickle_path='model_4.pickle', save_model_path='model_4.h5', optimizer=SGD(lr=0.06, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1), spectrogram=True) # change to False if you would like to use MFCC features
Epoch 1/20 106/106 [==============================] - 205s - loss: 275.6266 - val_loss: 226.8717 Epoch 2/20 106/106 [==============================] - 205s - loss: 213.2997 - val_loss: 201.3109 Epoch 3/20 106/106 [==============================] - 204s - loss: 200.7651 - val_loss: 186.7573 Epoch 4/20 106/106 [==============================] - 205s - loss: 193.3435 - val_loss: 182.8960 Epoch 5/20 106/106 [==============================] - 204s - loss: 187.6618 - val_loss: 173.7006 Epoch 6/20 106/106 [==============================] - 204s - loss: 182.4469 - val_loss: 177.4735 Epoch 7/20 106/106 [==============================] - 204s - loss: 177.6839 - val_loss: 169.6660 Epoch 8/20 106/106 [==============================] - 204s - loss: 173.7626 - val_loss: 169.5262 Epoch 9/20 106/106 [==============================] - 205s - loss: 169.5368 - val_loss: 162.4727 Epoch 10/20 106/106 [==============================] - 204s - loss: 166.1426 - val_loss: 161.0329 Epoch 11/20 106/106 [==============================] - 205s - loss: 162.1614 - val_loss: 159.1479 Epoch 12/20 106/106 [==============================] - 205s - loss: 159.1850 - val_loss: 154.9204 Epoch 13/20 106/106 [==============================] - 204s - loss: 156.0412 - val_loss: 149.9123 Epoch 14/20 106/106 [==============================] - 204s - loss: 153.1229 - val_loss: 151.7496 Epoch 15/20 106/106 [==============================] - 204s - loss: 150.3786 - val_loss: 147.4174 Epoch 16/20 106/106 [==============================] - 205s - loss: 148.0250 - val_loss: 148.3927 Epoch 17/20 106/106 [==============================] - 203s - loss: 145.1999 - val_loss: 142.2009 Epoch 18/20 106/106 [==============================] - 205s - loss: 143.4235 - val_loss: 142.9599 Epoch 19/20 106/106 [==============================] - 205s - loss: 140.9955 - val_loss: 141.4220 Epoch 20/20 106/106 [==============================] - 204s - loss: 139.4114 - val_loss: 138.3626
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
(OPTIONAL IMPLEMENTATION) Models 5+If you would like to try out more architectures than the ones above, please use the code cell below. Please continue to follow the same convention for saving the models; for the $i$-th sample model, please save the loss at **`model_i.pickle`** and saving the trained model at **`model_i.h5`**.
model_5 = cnn2d_rnn_model( input_dim=161, # change to 13 if you would like to use MFCC features filters=50, kernel_size=(11,11), conv_stride=1, conv_border_mode='same', pool_size=(1,5), units=200, dropout_rate=0.1) from keras.optimizers import SGD train_model(input_to_softmax=model_5, pickle_path='model_5.pickle', save_model_path='model_5.h5', optimizer=SGD(lr=0.06, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1), spectrogram=True) # change to False if you would like to use MFCC features
Epoch 1/20 106/106 [==============================] - 137s - loss: 285.0588 - val_loss: 228.7582 Epoch 2/20 106/106 [==============================] - 129s - loss: 230.2834 - val_loss: 213.1584 Epoch 3/20 106/106 [==============================] - 126s - loss: 213.9887 - val_loss: 194.7103 Epoch 4/20 106/106 [==============================] - 126s - loss: 197.2486 - val_loss: 179.5294 Epoch 5/20 106/106 [==============================] - 126s - loss: 180.5510 - val_loss: 166.0413 Epoch 6/20 106/106 [==============================] - 125s - loss: 166.6758 - val_loss: 153.4104 Epoch 7/20 106/106 [==============================] - 125s - loss: 157.2719 - val_loss: 144.9292 Epoch 8/20 106/106 [==============================] - 126s - loss: 150.0972 - val_loss: 142.1533 Epoch 9/20 106/106 [==============================] - 125s - loss: 143.9420 - val_loss: 138.1702 Epoch 10/20 106/106 [==============================] - 124s - loss: 138.9901 - val_loss: 132.9487 Epoch 11/20 106/106 [==============================] - 125s - loss: 135.0339 - val_loss: 131.0782 Epoch 12/20 106/106 [==============================] - 125s - loss: 131.4873 - val_loss: 129.4672 Epoch 13/20 106/106 [==============================] - 124s - loss: 128.0020 - val_loss: 128.9729 Epoch 14/20 106/106 [==============================] - 124s - loss: 125.3787 - val_loss: 126.8662 Epoch 15/20 106/106 [==============================] - 124s - loss: 122.5167 - val_loss: 122.6902 Epoch 16/20 106/106 [==============================] - 124s - loss: 119.9851 - val_loss: 123.2564 Epoch 17/20 106/106 [==============================] - 125s - loss: 117.8803 - val_loss: 121.1491 Epoch 18/20 106/106 [==============================] - 124s - loss: 115.6461 - val_loss: 121.6285 Epoch 19/20 106/106 [==============================] - 124s - loss: 113.2564 - val_loss: 119.9097 Epoch 20/20 106/106 [==============================] - 125s - loss: 111.4150 - val_loss: 118.3596
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
Compare the ModelsExecute the code cell below to evaluate the performance of the drafted deep learning models. The training and validation loss are plotted for each model.
from glob import glob import numpy as np import _pickle as pickle import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline sns.set_style(style='white') # obtain the paths for the saved model history all_pickles = sorted(glob("results/*.pickle")) # extract the name of each model model_names = [item[8:-7] for item in all_pickles] # extract the loss history for each model valid_loss = [pickle.load( open( i, "rb" ) )['val_loss'] for i in all_pickles] train_loss = [pickle.load( open( i, "rb" ) )['loss'] for i in all_pickles] # save the number of epochs used to train each model num_epochs = [len(valid_loss[i]) for i in range(len(valid_loss))] fig = plt.figure(figsize=(16,5)) # plot the training loss vs. epoch for each model ax1 = fig.add_subplot(121) for i in range(len(all_pickles)): ax1.plot(np.linspace(1, num_epochs[i], num_epochs[i]), train_loss[i], label=model_names[i]) # clean up the plot ax1.legend() ax1.set_xlim([1, max(num_epochs)]) plt.xlabel('Epoch') plt.ylabel('Training Loss') # plot the validation loss vs. epoch for each model ax2 = fig.add_subplot(122) for i in range(len(all_pickles)): ax2.plot(np.linspace(1, num_epochs[i], num_epochs[i]), valid_loss[i], label=model_names[i]) # clean up the plot ax2.legend() ax2.set_xlim([1, max(num_epochs)]) plt.xlabel('Epoch') plt.ylabel('Validation Loss') plt.show()
_____no_output_____
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
__Question 1:__ Use the plot above to analyze the performance of each of the attempted architectures. Which performs best? Provide an explanation regarding why you think some models perform better than others. __Answer:__The following table gives the model performance in ascending order of (best) validation loss.| Rank | Model | Description | Best Loss | | -- | -- | -- | -- || 1 | 5| 2D CNN + RNN + TimeDistributed Dense | 118.3596 || 2 | 3 | Deeper RNN + TimeDistributed Dense | 130.7026 | | 3 | 2 | CNN + RNN + TimeDistributed Dense | 130.9444 | | 4 | 1 | RNN + TimeDistributed Dense | 131.8664 | | 5 | 4 | Bidirectional RNN + TimeDistributed Dense | 138.3626 || 6 | 0 | RNN | 721.1129 | All of the time distributed models perform well, indicating that the time series gives valuable signal (as expected). The models that preprocessed the input with CNNs performed well, but we prone to overfitting. The network with the two-dimensional convolutional layer performed best, indicating that the convolutional layer can produce features beyond what a time series model alone infer. In particular, the frequency dimension has informative patterns that can be mined. Deeper recurrent layers do not seem to add much to performance, as evidenced by the model 3 to model 1 comparison, within the 20-epoch evaluation. Models 3 and 4, with sufficient dropout rates, do seem like they are not prone to overfitting and may perform better with more epochs than the models with convolutional layers. The latter two models both use recurrent layers that are less prone to gradient explosions, possibly why they take longer to train.The final model combines the best convolutional layer with the bidirectional RNN with time distributed dense layers. (IMPLEMENTATION) Final ModelNow that you've tried out many sample models, use what you've learned to draft your own architecture! While your final acoustic model should not be identical to any of the architectures explored above, you are welcome to merely combine the explored layers above into a deeper architecture. It is **NOT** necessary to include new layer types that were not explored in the notebook.However, if you would like some ideas for even more layer types, check out these ideas for some additional, optional extensions to your model:- If you notice your model is overfitting to the training dataset, consider adding **dropout**! To add dropout to [recurrent layers](https://faroit.github.io/keras-docs/1.0.2/layers/recurrent/), pay special attention to the `dropout_W` and `dropout_U` arguments. This [paper](http://arxiv.org/abs/1512.05287) may also provide some interesting theoretical background.- If you choose to include a convolutional layer in your model, you may get better results by working with **dilated convolutions**. If you choose to use dilated convolutions, make sure that you are able to accurately calculate the length of the acoustic model's output in the `model.output_length` lambda function. You can read more about dilated convolutions in Google's [WaveNet paper](https://arxiv.org/abs/1609.03499). For an example of a speech-to-text system that makes use of dilated convolutions, check out this GitHub [repository](https://github.com/buriburisuri/speech-to-text-wavenet). You can work with dilated convolutions [in Keras](https://keras.io/layers/convolutional/) by paying special attention to the `padding` argument when you specify a convolutional layer.- If your model makes use of convolutional layers, why not also experiment with adding **max pooling**? Check out [this paper](https://arxiv.org/pdf/1701.02720.pdf) for example architecture that makes use of max pooling in an acoustic model.- So far, you have experimented with a single bidirectional RNN layer. Consider stacking the bidirectional layers, to produce a [deep bidirectional RNN](https://www.cs.toronto.edu/~graves/asru_2013.pdf)!All models that you specify in this repository should have `output_length` defined as an attribute. This attribute is a lambda function that maps the (temporal) length of the input acoustic features to the (temporal) length of the output softmax layer. This function is used in the computation of CTC loss; to see this, look at the `add_ctc_loss` function in `train_utils.py`. To see where the `output_length` attribute is defined for the models in the code, take a look at the `sample_models.py` file. You will notice this line of code within most models:```model.output_length = lambda x: x```The acoustic model that incorporates a convolutional layer (`cnn_rnn_model`) has a line that is a bit different:```model.output_length = lambda x: cnn_output_length( x, kernel_size, conv_border_mode, conv_stride)```In the case of models that use purely recurrent layers, the lambda function is the identity function, as the recurrent layers do not modify the (temporal) length of their input tensors. However, convolutional layers are more complicated and require a specialized function (`cnn_output_length` in `sample_models.py`) to determine the temporal length of their output.You will have to add the `output_length` attribute to your final model before running the code cell below. Feel free to use the `cnn_output_length` function, if it suits your model.
# specify the model model_end = final_model( input_dim=161, filters=50, kernel_size=(11,11), conv_stride=1, conv_border_mode='same', pool_size=(1,5), units=200, recur_layers=1, dropout_rate=0.5)
WARNING:tensorflow:From /home/pjordan/anaconda3/envs/dnn-speech-recognizer/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py:1208: calling reduce_prod (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version. Instructions for updating: keep_dims is deprecated, use keepdims instead _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= the_input (InputLayer) (None, None, 161) 0 _________________________________________________________________ lambda_17 (Lambda) (None, None, 161, 1) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, None, 161, 50) 6100 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, None, 32, 50) 0 _________________________________________________________________ time_distributed_19 (TimeDis (None, None, 1600) 0 _________________________________________________________________ bidirectional_1 (Bidirection (None, None, 400) 2161200 _________________________________________________________________ batch_normalization_19 (Batc (None, None, 400) 1600 _________________________________________________________________ time_distributed_20 (TimeDis (None, None, 29) 11629 _________________________________________________________________ softmax (Activation) (None, None, 29) 0 ================================================================= Total params: 2,180,529 Trainable params: 2,179,729 Non-trainable params: 800 _________________________________________________________________ None
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/how-can-i-save-a-keras-model) in the HDF5 file `model_end.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_end.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.
from keras.optimizers import SGD train_model(input_to_softmax=model_end, pickle_path='model_end.pickle', save_model_path='model_end.h5', optimizer=SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1), spectrogram=True) # change to False if you would like to use MFCC features
Epoch 1/20 106/106 [==============================] - 248s - loss: 335.9858 - val_loss: 255.5860 Epoch 2/20 106/106 [==============================] - 240s - loss: 242.4996 - val_loss: 238.2656 Epoch 3/20 106/106 [==============================] - 239s - loss: 222.3218 - val_loss: 197.3325 Epoch 4/20 106/106 [==============================] - 241s - loss: 200.9018 - val_loss: 185.4125 Epoch 5/20 106/106 [==============================] - 239s - loss: 187.2262 - val_loss: 171.6594 Epoch 6/20 106/106 [==============================] - 236s - loss: 175.8966 - val_loss: 157.7085 Epoch 7/20 106/106 [==============================] - 241s - loss: 167.2219 - val_loss: 154.9972 Epoch 8/20 106/106 [==============================] - 236s - loss: 161.3043 - val_loss: 151.5892 Epoch 9/20 106/106 [==============================] - 237s - loss: 156.4006 - val_loss: 145.9787 Epoch 10/20 106/106 [==============================] - 238s - loss: 152.1061 - val_loss: 141.4593 Epoch 11/20 106/106 [==============================] - 238s - loss: 148.1522 - val_loss: 139.1478 Epoch 12/20 106/106 [==============================] - 238s - loss: 145.1965 - val_loss: 136.7189 Epoch 13/20 106/106 [==============================] - 239s - loss: 142.2492 - val_loss: 134.3185 Epoch 14/20 106/106 [==============================] - 237s - loss: 140.1469 - val_loss: 131.4872 Epoch 15/20 106/106 [==============================] - 238s - loss: 137.7255 - val_loss: 129.9063 Epoch 16/20 106/106 [==============================] - 236s - loss: 135.6871 - val_loss: 129.2205 Epoch 17/20 106/106 [==============================] - 239s - loss: 133.5798 - val_loss: 127.0699 Epoch 18/20 106/106 [==============================] - 239s - loss: 131.8900 - val_loss: 125.6219 Epoch 19/20 106/106 [==============================] - 237s - loss: 130.2181 - val_loss: 126.2355 Epoch 20/20 106/106 [==============================] - 237s - loss: 128.9155 - val_loss: 126.0262
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
__Question 2:__ Describe your final model architecture and your reasoning at each step. __Answer:__The final architecture included a two-dimensional convolutional layer followed by a max-pooling layer. The output of the max pooling layer fed into a bi-directional GRU layer, which outputted to a time-distributed dense layer. In total, the network has 2,179,729.The 2D convolutional and max pooling layers are used to transform the time and frequency matrix into a time and feature matrix input, hopefully producing meaningful distilations of common waveforms. As in the base models in the previous section, the bidirectional GRU allows more flexibility by processing in both directions in time. The latter does not appear to add much improvement over a GRU with comparable parameters. STEP 3: Obtain PredictionsWe have written a function for you to decode the predictions of your acoustic model. To use the function, please execute the code cell below.
import numpy as np from data_generator import AudioGenerator from keras import backend as K from utils import int_sequence_to_text from IPython.display import Audio def get_predictions(index, partition, input_to_softmax, model_path): """ Print a model's decoded predictions Params: index (int): The example you would like to visualize partition (str): One of 'train' or 'validation' input_to_softmax (Model): The acoustic model model_path (str): Path to saved acoustic model's weights """ # load the train and test data data_gen = AudioGenerator() data_gen.load_train_data() data_gen.load_validation_data() # obtain the true transcription and the audio features if partition == 'validation': transcr = data_gen.valid_texts[index] audio_path = data_gen.valid_audio_paths[index] data_point = data_gen.normalize(data_gen.featurize(audio_path)) elif partition == 'train': transcr = data_gen.train_texts[index] audio_path = data_gen.train_audio_paths[index] data_point = data_gen.normalize(data_gen.featurize(audio_path)) else: raise Exception('Invalid partition! Must be "train" or "validation"') # obtain and decode the acoustic model's predictions input_to_softmax.load_weights(model_path) prediction = input_to_softmax.predict(np.expand_dims(data_point, axis=0)) output_length = [input_to_softmax.output_length(data_point.shape[0])] pred_ints = (K.eval(K.ctc_decode( prediction, output_length)[0][0])+1).flatten().tolist() # play the audio file, and display the true and predicted transcriptions print('-'*80) Audio(audio_path) print('True transcription:\n' + '\n' + transcr) print('-'*80) print('Predicted transcription:\n' + '\n' + ''.join(int_sequence_to_text(pred_ints))) print('-'*80)
_____no_output_____
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
Use the code cell below to obtain the transcription predicted by your final model for the first example in the training dataset.
get_predictions(index=0, partition='train', input_to_softmax=model_end, model_path='results/model_end.h5')
-------------------------------------------------------------------------------- True transcription: he was young no spear had touched him no poison lurked in his wine -------------------------------------------------------------------------------- Predicted transcription: he was o no sperhd thtm no pis on mork din iso --------------------------------------------------------------------------------
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
Use the next code cell to visualize the model's prediction for the first example in the validation dataset.
get_predictions(index=0, partition='validation', input_to_softmax=model_end, model_path='results/model_end.h5')
-------------------------------------------------------------------------------- True transcription: o life of this our spring -------------------------------------------------------------------------------- Predicted transcription: bo f an dhes rbrn --------------------------------------------------------------------------------
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
One standard way to improve the results of the decoder is to incorporate a language model. We won't pursue this in the notebook, but you are welcome to do so as an _optional extension_. If you are interested in creating models that provide improved transcriptions, you are encouraged to download [more data](http://www.openslr.org/12/) and train bigger, deeper models. But beware - the model will likely take a long while to train. For instance, training this [state-of-the-art](https://arxiv.org/pdf/1512.02595v1.pdf) model would take 3-6 weeks on a single GPU!
!!python -m nbconvert *.ipynb !!zip submission.zip vui_notebook.ipynb report.html sample_models.py results/*
_____no_output_____
Apache-2.0
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
QA Inference on BERT using TensorRT 1. OverviewBidirectional Embedding Representations from Transformers (BERT), is a method of pre-training language representations which obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks. The original paper can be found here: https://arxiv.org/abs/1810.04805. 1.a Learning objectivesThis notebook demonstrates:- Inference on Question Answering (QA) task with BERT Base/Large model- The use fine-tuned NVIDIA BERT models- Use of BERT model with TRT 2. RequirementsPlease refer to the ReadMe file 3. BERT Inference: Question AnsweringWe can run inference on a fine-tuned BERT model for tasks like Question Answering.Here we use a BERT model fine-tuned on a [SQuaD 2.0 Dataset](https://rajpurkar.github.io/SQuAD-explorer/) which contains 100,000+ question-answer pairs on 500+ articles combined with over 50,000 new, unanswerable questions. 3.a Paragraph and QueriesThe paragraph and the questions can be customized by changing the text below. Note that when using models with small sequence lengths, you should use a shorter paragraph: Paragraph:
paragraph_text = "The Apollo program, also known as Project Apollo, was the third United States human spaceflight program carried out by the National Aeronautics and Space Administration (NASA), which accomplished landing the first humans on the Moon from 1969 to 1972. First conceived during Dwight D. Eisenhower's administration as a three-man spacecraft to follow the one-man Project Mercury which put the first Americans in space, Apollo was later dedicated to President John F. Kennedy's national goal of landing a man on the Moon and returning him safely to the Earth by the end of the 1960s, which he proposed in a May 25, 1961, address to Congress. Project Mercury was followed by the two-man Project Gemini. The first manned flight of Apollo was in 1968. Apollo ran from 1961 to 1972, and was supported by the two-man Gemini program which ran concurrently with it from 1962 to 1966. Gemini missions developed some of the space travel techniques that were necessary for the success of the Apollo missions. Apollo used Saturn family rockets as launch vehicles. Apollo/Saturn vehicles were also used for an Apollo Applications Program, which consisted of Skylab, a space station that supported three manned missions in 1973-74, and the Apollo-Soyuz Test Project, a joint Earth orbit mission with the Soviet Union in 1975." # Short paragraph version for BERT models with max sequence length of 128 short_paragraph_text = "The Apollo program was the third United States human spaceflight program. First conceived as a three-man spacecraft to follow the one-man Project Mercury which put the first Americans in space, Apollo was dedicated to President John F. Kennedy's national goal of landing a man on the Moon. The first manned flight of Apollo was in 1968. Apollo ran from 1961 to 1972 followed by the Apollo-Soyuz Test Project a joint Earth orbit mission with the Soviet Union in 1975."
_____no_output_____
Apache-2.0
demo/BERT/inference.ipynb
malithj/TensorRT
Question:
question_text = "What project put the first Americans into space?" #question_text = "What year did the first manned Apollo flight occur?" #question_text = "What President is credited with the original notion of putting Americans in space?" #question_text = "Who did the U.S. collaborate with on an Earth orbit mission in 1975?"
_____no_output_____
Apache-2.0
demo/BERT/inference.ipynb
malithj/TensorRT
In this example we ask our BERT model questions related to the following paragraph:**The Apollo Program**_"The Apollo program, also known as Project Apollo, was the third United States human spaceflight program carried out by the National Aeronautics and Space Administration (NASA), which accomplished landing the first humans on the Moon from 1969 to 1972. First conceived during Dwight D. Eisenhower's administration as a three-man spacecraft to follow the one-man Project Mercury which put the first Americans in space, Apollo was later dedicated to President John F. Kennedy's national goal of landing a man on the Moon and returning him safely to the Earth by the end of the 1960s, which he proposed in a May 25, 1961, address to Congress. Project Mercury was followed by the two-man Project Gemini. The first manned flight of Apollo was in 1968. Apollo ran from 1961 to 1972, and was supported by the two-man Gemini program which ran concurrently with it from 1962 to 1966. Gemini missions developed some of the space travel techniques that were necessary for the success of the Apollo missions. Apollo used Saturn family rockets as launch vehicles. Apollo/Saturn vehicles were also used for an Apollo Applications Program, which consisted of Skylab, a space station that supported three manned missions in 1973-74, and the Apollo-Soyuz Test Project, a joint Earth orbit mission with the Soviet Union in 1975."_The questions and relative answers expected are shown below: - **Q1:** "What project put the first Americans into space?" - **A1:** "Project Mercury" - **Q2:** "What program was created to carry out these projects and missions?" - **A2:** "The Apollo program" - **Q3:** "What year did the first manned Apollo flight occur?" - **A3:** "1968" - **Q4:** "What President is credited with the original notion of putting Americans in space?" - **A4:** "John F. Kennedy" - **Q5:** "Who did the U.S. collaborate with on an Earth orbit mission in 1975?" - **A5:** "Soviet Union" - **Q6:** "How long did Project Apollo run?" - **A6:** "1961 to 1972" - **Q7:** "What program helped develop space travel techniques that Project Apollo used?" - **A7:** "Gemini Mission" - **Q8:** "What space station supported three manned missions in 1973-1974?" - **A8:** "Skylab" Data PreprocessingLet's convert the paragraph and the question to BERT input with the help of the tokenizer:
import helpers.data_processing as dp import helpers.tokenization as tokenization tokenizer = tokenization.FullTokenizer(vocab_file="/workspace/TensorRT/demo/BERT/models/fine-tuned/bert_tf_ckpt_large_qa_squad2_amp_128_v19.03.1/vocab.txt", do_lower_case=True) # The maximum number of tokens for the question. Questions longer than this will be truncated to this length. max_query_length = 64 # When splitting up a long document into chunks, how much stride to take between chunks. doc_stride = 128 # The maximum total input sequence length after WordPiece tokenization. # Sequences longer than this will be truncated, and sequences shorter max_seq_length = 128 # Extract tokens from the paragraph doc_tokens = dp.convert_doc_tokens(short_paragraph_text) # Extract features from the paragraph and question features = dp.convert_example_to_features(doc_tokens, question_text, tokenizer, max_seq_length, doc_stride, max_query_length)
_____no_output_____
Apache-2.0
demo/BERT/inference.ipynb
malithj/TensorRT
TensorRT Inference
import tensorrt as trt TRT_LOGGER = trt.Logger(trt.Logger.INFO) import ctypes import os ctypes.CDLL("libnvinfer_plugin.so", mode=ctypes.RTLD_GLOBAL) import pycuda.driver as cuda import pycuda.autoinit import collections import numpy as np import time # Load the BERT-Large Engine with open("/workspace/TensorRT/demo/BERT/engines/bert_large_128.engine", "rb") as f, \ trt.Runtime(TRT_LOGGER) as runtime, \ runtime.deserialize_cuda_engine(f.read()) as engine, \ engine.create_execution_context() as context: # We always use batch size 1. input_shape = (1, max_seq_length) input_nbytes = trt.volume(input_shape) * trt.int32.itemsize # Allocate device memory for inputs. d_inputs = [cuda.mem_alloc(input_nbytes) for binding in range(3)] # Create a stream in which to copy inputs/outputs and run inference. stream = cuda.Stream() # Specify input shapes. These must be within the min/max bounds of the active profile (0th profile in this case) # Note that input shapes can be specified on a per-inference basis, but in this case, we only have a single shape. for binding in range(3): context.set_binding_shape(binding, input_shape) assert context.all_binding_shapes_specified # Allocate output buffer by querying the size from the context. This may be different for different input shapes. h_output = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.float32) d_output = cuda.mem_alloc(h_output.nbytes) print("\nRunning Inference...") _NetworkOutput = collections.namedtuple( # pylint: disable=invalid-name "NetworkOutput", ["start_logits", "end_logits", "feature_index"]) networkOutputs = [] eval_time_elapsed = 0 for feature_index, feature in enumerate(features): # Copy inputs input_ids = cuda.register_host_memory(np.ascontiguousarray(feature.input_ids.ravel())) segment_ids = cuda.register_host_memory(np.ascontiguousarray(feature.segment_ids.ravel())) input_mask = cuda.register_host_memory(np.ascontiguousarray(feature.input_mask.ravel())) eval_start_time = time.time() cuda.memcpy_htod_async(d_inputs[0], input_ids, stream) cuda.memcpy_htod_async(d_inputs[1], segment_ids, stream) cuda.memcpy_htod_async(d_inputs[2], input_mask, stream) # Run inference context.execute_async_v2(bindings=[int(d_inp) for d_inp in d_inputs] + [int(d_output)], stream_handle=stream.handle) # Synchronize the stream stream.synchronize() eval_time_elapsed += (time.time() - eval_start_time) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(h_output, d_output, stream) stream.synchronize() for index, batch in enumerate(h_output): # Data Post-processing networkOutputs.append(_NetworkOutput( start_logits = np.array(batch.squeeze()[:, 0]), end_logits = np.array(batch.squeeze()[:, 1]), feature_index = feature_index )) eval_time_elapsed /= len(features) print("-----------------------------") print("Running Inference at {:.3f} Sentences/Sec".format(1.0/eval_time_elapsed)) print("-----------------------------")
_____no_output_____
Apache-2.0
demo/BERT/inference.ipynb
malithj/TensorRT
Data Post-Processing Now that we have the inference results let's extract the actual answer to our question
# The total number of n-best predictions to generate in the nbest_predictions.json output file n_best_size = 20 # The maximum length of an answer that can be generated. This is needed # because the start and end predictions are not conditioned on one another max_answer_length = 30 prediction, nbest_json, scores_diff_json = dp.get_predictions(doc_tokens, features, networkOutputs, n_best_size, max_answer_length) for index, output in enumerate(networkOutputs): print("Processing output") print("Answer: '{}'".format(prediction)) print("with prob: {:.3f}%".format(nbest_json[0]['probability'] * 100.0))
_____no_output_____
Apache-2.0
demo/BERT/inference.ipynb
malithj/TensorRT
Outliers Impact
import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') %matplotlib inline import pandas as pd
_____no_output_____
MIT
bonston_housing_project/Regularized Regression.ipynb
taareek/machine_learning
Linear Regression
from sklearn.linear_model import LinearRegression np.random.seed(42) n_samples = 100 rng = np.random.randn(n_samples) * 10 print("Feeature shape: ", rng.shape) y_gen = 0.5 * rng + 2 * np.random.randn(n_samples) print("\nTarget shape: ", y_gen.shape) lr = LinearRegression() lr.fit(rng.reshape(-1, 1), y_gen) model_pred = lr.predict(rng.reshape(-1, 1)) # plotting plt.figure(figsize= (10, 8)); plt.scatter(rng, y_gen); plt.plot(rng, model_pred); print("Coefficient Estimate: ", lr.coef_); idx= rng.argmax() y_gen[idx] = 200 plt.figure(figsize=(10, 8)); plt.scatter(rng, y_gen); o_lr = LinearRegression(normalize= True) o_lr.fit(rng.reshape(-1, 1), y_gen) o_model_pred = o_lr.predict(rng.reshape(-1, 1)) plt.scatter(rng, y_gen); plt.plot(rng, o_model_pred) print("Coefficient Estimate: ", o_lr.coef_)
Coefficient Estimate: [0.92796845]
MIT
bonston_housing_project/Regularized Regression.ipynb
taareek/machine_learning
Ridge Regression
from sklearn.linear_model import Ridge ridge_mod = Ridge(alpha= 1, normalize= True) ridge_mod.fit(rng.reshape(-1, 1), y_gen) ridge_mod_pred = ridge_mod.predict(rng.reshape(-1,1)) plt.figure(figsize=(10,8)) plt.scatter(rng, y_gen); plt.plot(rng, ridge_mod_pred); print("Coefficient of Estimation: ", ridge_mod.coef_) # ridge_mod_pred
_____no_output_____
MIT
bonston_housing_project/Regularized Regression.ipynb
taareek/machine_learning
Lasso Regression
from sklearn.linear_model import Lasso # define model lasso_mod = Lasso(alpha= 0.4, normalize= True) lasso_mod.fit(rng.reshape(-1, 1), y_gen) # (features, target) lasso_mod_pred = lasso_mod.predict(rng.reshape(-1,1)) # (features) # plotting plt.figure(figsize=(10, 8)); plt.scatter(rng, y_gen); # (features, target) plt.plot(rng, lasso_mod_pred); # (features, prediction) print("Coefficient Estimation: ", lasso_mod.coef_) # coefficent change by the rate of alpha
Coefficient Estimation: [0.48530263]
MIT
bonston_housing_project/Regularized Regression.ipynb
taareek/machine_learning
Elastic Net Regression
from sklearn.linear_model import ElasticNet # defining model and prediction elnet_mod = ElasticNet(alpha= 0.02, normalize= True) elnet_mod.fit(rng.reshape(-1, 1), y_gen) elnet_pred = elnet_mod.predict(rng.reshape(-1,1)) # plotting plt.figure(figsize=(10, 8)); plt.scatter(rng, y_gen); plt.plot(rng, elnet_pred); print("Coefficent Estimation: ", elnet_mod.coef_)
Coefficent Estimation: [0.4584509]
MIT
bonston_housing_project/Regularized Regression.ipynb
taareek/machine_learning
![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/work-with-data/dataprep/how-to-guides/add-column-using-expression.png) Add Column using Expression With Azure ML Data Prep you can add a new column to data with `Dataflow.add_column` by using a Data Prep expression to calculate the value from existing columns. This is similar to using Python to create a [new script column](./custom-python-transforms.ipynbNew-Script-Column) except the Data Prep expressions are more limited and will execute faster. The expressions used are the same as for [filtering rows](./filtering.ipynbFiltering-rows) and hence have the same functions and operators available.Here we add additional columns. First we get input data.
import azureml.dataprep as dprep # loading data dflow = dprep.auto_read_file('../data/crime-spring.csv') dflow.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`substring(start, length)`Add a new column "Case Category" using the `substring(start, length)` expression to extract the prefix from the "Case Number" column.
case_category = dflow.add_column(new_column_name='Case Category', prior_column='Case Number', expression=dflow['Case Number'].substring(0, 2)) case_category.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`substring(start)`Add a new column "Case Id" using the `substring(start)` expression to extract just the number from "Case Number" column and then convert it to numeric.
case_id = dflow.add_column(new_column_name='Case Id', prior_column='Case Number', expression=dflow['Case Number'].substring(2)) case_id = case_id.to_number('Case Id') case_id.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`length()`Using the length() expression, add a new numeric column "Length", which contains the length of the string in "Primary Type".
dflow_length = dflow.add_column(new_column_name='Length', prior_column='Primary Type', expression=dflow['Primary Type'].length()) dflow_length.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`to_upper()`Using the to_upper() expression, add a new numeric column "Upper Case", which contains the string in "Primary Type" in upper case.
dflow_to_upper = dflow.add_column(new_column_name='Upper Case', prior_column='Primary Type', expression=dflow['Primary Type'].to_upper()) dflow_to_upper.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`to_lower()`Using the to_lower() expression, add a new numeric column "Lower Case", which contains the string in "Primary Type" in lower case.
dflow_to_lower = dflow.add_column(new_column_name='Lower Case', prior_column='Primary Type', expression=dflow['Primary Type'].to_lower()) dflow_to_lower.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`col(column1) + col(column2)`Add a new column "Total" to show the result of adding the values in the "FBI Code" column to the "Community Area" column.
dflow_total = dflow.add_column(new_column_name='Total', prior_column='FBI Code', expression=dflow['Community Area']+dflow['FBI Code']) dflow_total.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`col(column1) - col(column2)`Add a new column "Subtract" to show the result of subtracting the values in the "FBI Code" column from the "Community Area" column.
dflow_diff = dflow.add_column(new_column_name='Difference', prior_column='FBI Code', expression=dflow['Community Area']-dflow['FBI Code']) dflow_diff.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`col(column1) * col(column2)`Add a new column "Product" to show the result of multiplying the values in the "FBI Code" column to the "Community Area" column.
dflow_prod = dflow.add_column(new_column_name='Product', prior_column='FBI Code', expression=dflow['Community Area']*dflow['FBI Code']) dflow_prod.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`col(column1) / col(column2)`Add a new column "True Quotient" to show the result of true (decimal) division of the values in "Community Area" column by the "FBI Code" column.
dflow_true_div = dflow.add_column(new_column_name='True Quotient', prior_column='FBI Code', expression=dflow['Community Area']/dflow['FBI Code']) dflow_true_div.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`col(column1) // col(column2)`Add a new column "Floor Quotient" to show the result of floor (integer) division of the values in "Community Area" column by the "FBI Code" column.
dflow_floor_div = dflow.add_column(new_column_name='Floor Quotient', prior_column='FBI Code', expression=dflow['Community Area']//dflow['FBI Code']) dflow_floor_div.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`col(column1) % col(column2)`Add a new column "Mod" to show the result of applying the modulo operation on the "FBI Code" column and the "Community Area" column.
dflow_mod = dflow.add_column(new_column_name='Mod', prior_column='FBI Code', expression=dflow['Community Area']%dflow['FBI Code']) dflow_mod.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
`col(column1) ** col(column2)`Add a new column "Power" to show the result of applying the exponentiation operation when the base is the "Community Area" column and the exponent is "FBI Code" column.
dflow_pow = dflow.add_column(new_column_name='Power', prior_column='FBI Code', expression=dflow['Community Area']**dflow['FBI Code']) dflow_pow.head(5)
_____no_output_____
MIT
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
Purpose: A basic object identification package for the lab to use *Step 1: import packages*
import os.path as op import numpy as np import matplotlib.pyplot as plt import pandas as pd #Sci-kit Image Imports from skimage import io from skimage import filters from skimage.feature import canny from skimage import measure from scipy import ndimage as ndi %matplotlib inline import warnings warnings.filterwarnings('ignore')
_____no_output_____
MIT
scripts/object_identification_basic.ipynb
hhelmbre/qdbvcella
*Step 2: User Inputs*
file_location = '../../31.2_DG_quant.tif' plot_name = 'practice2.png' channel_1_color = 'Blue' channel_2_color = 'Green'
_____no_output_____
MIT
scripts/object_identification_basic.ipynb
hhelmbre/qdbvcella
*Step 3: Read the image into the notebook*
#Read in the file im = io.imread(file_location) #Convert image to numpy array imarray = np.array(im) #Checking the image shape imarray.shape
_____no_output_____
MIT
scripts/object_identification_basic.ipynb
hhelmbre/qdbvcella
*Step 4: Color Split*
channel_1 = im[0, :, :] channel_2 = im[1, :, :]
_____no_output_____
MIT
scripts/object_identification_basic.ipynb
hhelmbre/qdbvcella
*Step 5: Visualization Check*
fig = plt.figure() ax1 = fig.add_subplot(2,2,1) ax1.set_title(channel_1_color) ax1.imshow(channel_1, cmap='gray') ax2 = fig.add_subplot(2,2,2) ax2.set_title(channel_2_color) ax2.imshow(channel_2, cmap='gray') fig.set_size_inches(10.5, 10.5, forward=True)
_____no_output_____
MIT
scripts/object_identification_basic.ipynb
hhelmbre/qdbvcella
*Step 6: Apply a Threshold*
threshold_local = filters.threshold_otsu(channel_1) binary_c1 = channel_1 > threshold_local threshold_local = filters.threshold_otsu(channel_2) binary_c2 = channel_2 > threshold_local fig = plt.figure() ax1 = fig.add_subplot(2,2,1) ax1.set_title(str(channel_1_color + ' Threshold')) ax1.imshow(binary_c1, cmap='gray') ax2 = fig.add_subplot(2,2,2) ax2.set_title(str(channel_2_color + ' Threshold')) ax2.imshow(binary_c2, cmap='gray') fig.set_size_inches(10.5, 10.5, forward=True)
_____no_output_____
MIT
scripts/object_identification_basic.ipynb
hhelmbre/qdbvcella
*Step 7: Fill in Objects*
filled_c1 = ndi.binary_fill_holes(binary_c1) filled_c2 = ndi.binary_fill_holes(binary_c2)
_____no_output_____
MIT
scripts/object_identification_basic.ipynb
hhelmbre/qdbvcella