markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Examples Intro cartoons
t = 500e-3 dt = 1e-3 times = create_times(t, dt) s = .5 noi = np.random.normal(0, s, size=times.shape[0]) f = 10 r = 1 ro = 2 n = 1 l = 11.7e-3 n_bursts = 2 stim = boxcar(times, r, 2, l, dt, offset=200e-3) + ro re = stim + noi m_post = np.logical_and(times > 0.39, times < 0.42) m_pre = np.logical_and(times > 0.36, times < 0.39) # One perfect stim plt.figure(figsize=(2, 10)) plt.subplot(311) plt.plot(times, stim, 'k', linestyle='--') # plt.plot(times, re, 'k', alpha=0.2) plt.axis('off') plt.ylim(-2, 10) plt.xlim(.15, 0.35) # Three stim stim += boxcar(times, r*2, 2, l, dt, offset=250e-3) stim += boxcar(times, r*3, 2, l, dt, offset=300e-3) plt.figure(figsize=(2, 8)) plt.subplot(311) plt.plot(times, stim, 'purple', alpha=0.5) # plt.plot(times, re, 'k', alpha=0.2) plt.axis('off') plt.ylim(-2, 10) plt.xlim(.15, 0.35) t = 500e-3 dt = 1e-3 times = create_times(t, dt) s = 3 noi = np.random.normal(0, s, size=times.shape[0]) f = 10 r = 14 ro = 2 n = 1 l = 11.7e-3 n_bursts = 2 stim = boxcar(times, r, 2, l, dt, offset=400e-3) + ro re = stim + noi m_post = np.logical_and(times > 0.39, times < 0.42) m_pre = np.logical_and(times > 0.1, times < 0.35) # Zooooooom plt.figure(figsize=(3, 2)) plt.plot(times, re, 'k', alpha=0.2) plt.plot(times[m_pre], re[m_pre], 'grey', linewidth=4) plt.plot(times[m_post], re[m_post], 'purple', linewidth=4, alpha=0.4) plt.axvline(x=0.4, color='purple', alpha=0.6, linewidth=3, linestyle='-.') plt.axvline(x=0.412, color='purple', alpha=0.6, linewidth=3, linestyle='-.') plt.xlim(0.1, 0.5) plt.axis('off')
_____no_output_____
MIT
figs/fig4.ipynb
voytekresearch/alphalogical
Model examples Random phase
%run /home/ejp/src/bluemass/bm.py ../data/fig4/ ../pars/fig4/mathewson_constant_osc_r72.2222222222.yaml -t 0.5 --sigma 3 --loc r_E res1 = load_kdf("../data/fig4/result.hdf5") idx1 = load_kdf("../data/fig4/index.hdf5") %run /home/ejp/src/bluemass/bm.py ../data/fig4/ ../pars/fig4/mathewson_constant_osc_r72.2222222222.yaml -t 0.5 --sigma 3 --loc r_E res2 = load_kdf("../data/fig4/result.hdf5") idx2 = load_kdf("../data/fig4/index.hdf5") %run /home/ejp/src/bluemass/bm.py ../data/fig4/ ../pars/fig4/mathewson_constant_osc_r72.2222222222.yaml -t 0.5 --sigma 3 --loc r_E res3 = load_kdf("../data/fig4/result.hdf5") idx3 = load_kdf("../data/fig4/index.hdf5") %run /home/ejp/src/bluemass/bm.py ../data/fig4/ ../pars/fig4/mathewson_constant_osc_r72.2222222222.yaml -t 0.5 --sigma 3 --loc r_E res4 = load_kdf("../data/fig4/result.hdf5") idx4 = load_kdf("../data/fig4/index.hdf5") times = res1['times'] stim = res1['stims'][:,0] ys1 = res1['ys'] ys2 = res2['ys'] ys3 = res3['ys'] ys4 = res4['ys'] re1 = ys1[:, idx1['r_E']] re2 = ys2[:, idx2['r_E']] re3 = ys3[:, idx3['r_E']] re4 = ys4[:, idx4['r_E']] plt.figure(figsize=(3, 3)) plt.plot(times, re1, color='k', linewidth=3) plt.plot(times, re2+5, color='k', linewidth=3) plt.plot(times, re3+10, color='k', linewidth=3) plt.plot(times, re4+15, color='k', linewidth=3) plt.axis('off') plt.ylim(3, 25) plt.xlim(0.2, 0.5) plt.axvline(x=0.4, color='purple', alpha=0.7, linewidth=3, linestyle='-.') plt.axvline(x=0.412, color='purple', alpha=0.7, linewidth=3, linestyle='-.') plt.figure(figsize=(3, 3)) plt.plot(times, res1['rates'][:, 0] / res1['rates'][:, 0].max() + 0, 'grey', linewidth=3, linestyle='--') plt.plot(times, res2['rates'][:, 0] / res2['rates'][:, 0].max() + 1, 'grey', linewidth=3, linestyle='--') plt.plot(times, res3['rates'][:, 0] / res3['rates'][:, 0].max() + 2, 'grey', linewidth=3, linestyle='--') plt.plot(times, res4['rates'][:, 0] / res4['rates'][:, 0].max() + 3, 'grey', linewidth=3, linestyle='--') plt.axvline(x=0.4, color='purple', alpha=0.7, linewidth=3, linestyle='-.') plt.axvline(x=0.412, color='purple', alpha=0.7, linewidth=3, linestyle='-.') plt.xlim(0.2, 0.5) plt.ylim(0, 4.1) plt.axis('off')
_____no_output_____
MIT
figs/fig4.ipynb
voytekresearch/alphalogical
Locked burst
%run /home/ejp/src/bluemass/bm.py ../data/fig4/ ../pars/fig4/mathewson_lockedburst_osc_r72.2222222222.yaml -t 0.7 --sigma 1 --loc r_E res = load_kdf("../data/fig4/result.hdf5") idx = load_kdf("../data/fig4/index.hdf5") times = res['times'] stim = res['stims'][:,0] ys = res['ys'] re = ys[:, idx['r_E']] rates = res['rates'][:, 0] / res['rates'][:, 0].max() plt.figure(figsize=(4, 2)) plt.plot(times, re, color='k', linewidth=2) plt.axvline(x=0.4, color='purple', alpha=0.7, linewidth=2, linestyle='-.') plt.axvline(x=0.412, color='purple', alpha=0.7, linewidth=2, linestyle='-.') plt.xlim(0.1, 0.9) plt.ylim(3, 10) plt.axis('off') plt.figure(figsize=(4, 1)) plt.plot(times, rates,'grey', linewidth=4, linestyle='--') plt.axvline(x=0.4, color='purple', alpha=0.7, linewidth=2, linestyle='-.') plt.axvline(x=0.412, color='purple', alpha=0.7, linewidth=2, linestyle='-.') plt.xlim(0.1, 0.9) plt.ylim(0, 1.1) plt.axis('off') nrns_e = Spikes(125, 1, dt=1e-3, seed=42) nrns_i = Spikes(125, 1, dt=1e-3, seed=42+1) times = nrns_e.times r = 125 r_osc = bursts(times, r, 10, 2, min_a=12, offset=0.35) o_e = nrns_e.poisson(r_osc).sum(1) o_i = nrns_i.poisson(r_osc).sum(1) plt.figure(figsize=(2, 1)) plt.plot(times, o_e-o_i, color='k') # plt.ylim(-120, 120) plt.xlim(0.2, 0.6) plt.axvline(x=0.4, color='purple', alpha=0.7, linewidth=2, linestyle='-.') plt.axvline(x=0.412, color='purple', alpha=0.7, linewidth=2, linestyle='-.') plt.plot(times, r_osc/2 + 10, 'grey', linewidth=3, linestyle='--') sub = plt.subplot(111) sub.set_frame_on(False) sub.get_yaxis().set_visible(False) sub.get_xaxis().set_visible(False)
_____no_output_____
MIT
figs/fig4.ipynb
voytekresearch/alphalogical
Results Phase experiments
a1 = load_kdf("../data/fig4/a_part1.hdf5") # a2 = load_kdf("../data/fig4/a_part2.hdf5") a3 = load_kdf("../data/fig4/a_part3.hdf5") a4 = load_kdf("../data/fig4/a_part4.hdf5") # a5 = load_kdf("../data/fig4/a_part5.hdf5") a6 = load_kdf("../data/fig4/a_part6.hdf5") b1 = load_kdf("../data/fig4/b_part1.hdf5") # b2 = load_kdf("../data/fig4/b_part2.hdf5") b3 = load_kdf("../data/fig4/b_part3.hdf5") b4 = load_kdf("../data/fig4/b_part4.hdf5") # # b5 = load_kdf("../data/fig4/b_part5.hdf5") b6 = load_kdf("../data/fig4/b_part6.hdf5") pprint(a3.keys()) pprint(a3['hits'].shape)
[u'n_stim', u'hits', u'stims', u'd_primes', u'misses', u'rates', u'false_alarms', u'correct_rejections'] (360, 10)
MIT
figs/fig4.ipynb
voytekresearch/alphalogical
2 SD threshold
plt.figure(figsize=(3, 3)) r = a3['rates'] / 10.0 # 10 Hz is the noise level M = a3['d_primes'].mean(0) SD = a3['d_primes'].std(0) plt.plot(r, M, color='k', linewidth=3, label='!0 Hz oscillation') plt.fill_between(r, M+SD, M-SD, facecolor='black', alpha=0.1) r = a6['rates'] / 10 M = a6['d_primes'].mean(0) SD = a6['d_primes'].std(0) plt.plot(r, M, color='grey', linewidth=3, label="Constant") plt.fill_between(r, M+SD, M-SD, facecolor='grey', alpha=0.1) plt.xlabel("Input SNR") plt.ylim(-3, 4) plt.ylabel("d'") plt.legend(loc='upper left', fancybox=True, framealpha=0.5) plt.tight_layout() sns.despine() plt.figure(figsize=(3, 3)) r = b3['rates'] / 10.0 # 10 Hz is the noise level M = b3['d_primes'].mean(0) SD = b3['d_primes'].std(0) plt.plot(r, M, color='black', linewidth=3, label='2 cycle burst') plt.fill_between(r, M+SD, M-SD, facecolor='black', alpha=0.1) r = b6['rates'] / 10.0 M = b6['d_primes'].mean(0) SD = b6['d_primes'].std(0) plt.plot(r, M, color='grey', linewidth=3, label="Constant") plt.fill_between(r, M+SD, M-SD, facecolor='grey', alpha=0.1) plt.xlabel("Input SNR") plt.ylim(-3, 4) plt.ylabel("d'") plt.legend(loc='upper left', fancybox=True, framealpha=0.1) plt.tight_layout() sns.despine()
_____no_output_____
MIT
figs/fig4.ipynb
voytekresearch/alphalogical
1 SD threshold
plt.figure(figsize=(3, 3)) # osc r = a1['rates'] / 10.0 # 10 Hz is the noise level M = a1['d_primes'].mean(0) SD = a1['d_primes'].std(0) SEM = SD / np.sqrt(len(SD)) plt.plot(r, M, color='k', linewidth=3, label='!0 Hz oscillation') plt.fill_between(r, M+SEM, M-SEM, facecolor='black', alpha=0.1) # const r = a4['rates'] / 10 M = a4['d_primes'].mean(0) SD = a4['d_primes'].std(0) SEM = SD / np.sqrt(len(SD)) plt.plot(r, M, color='grey', linewidth=3, label="Constant") plt.fill_between(r, M+SEM, M-SEM, facecolor='grey', alpha=0.1) # labels, etc plt.xlabel("Input SNR") plt.ylim(-1, 3) plt.ylabel("d'") plt.legend(loc='lower right', fancybox=True, framealpha=0.5) plt.tight_layout() sns.despine() plt.figure(figsize=(3, 3)) r = b1['rates'] / 10.0 # 10 Hz is the noise level M = b1['d_primes'].mean(0) SD = b1['d_primes'].std(0) SEM = SD / np.sqrt(len(SD)) plt.plot(r, M, color='black', linewidth=3, label='2 cycle burst') plt.fill_between(r, M+SEM, M-SEM, facecolor='black', alpha=0.1) r = b4['rates'] / 10.0 M = b4['d_primes'].mean(0) SD = b4['d_primes'].std(0) plt.plot(r, M, color='grey', linewidth=3, label="Constant") plt.fill_between(r, M+SEM, M-SEM, facecolor='grey', alpha=0.1) plt.xlabel("Input SNR") plt.ylim(-1, 3) plt.ylabel("d'") plt.legend(loc='upper left', fancybox=True, framealpha=0.1) plt.tight_layout() sns.despine()
_____no_output_____
MIT
figs/fig4.ipynb
voytekresearch/alphalogical
Amplitude experiments
res = load_kdf("../data/fig4/4p.hdf5") plt.figure(figsize=(3, 3)) p = res['powers2'] M = res['d_primes'].mean(0) SD = res['d_primes'].std(0) SEM = SD / np.sqrt(len(SD)) plt.plot(p, M, color='black', linewidth=3) plt.fill_between(p, M+SEM, M-SEM, facecolor='black', alpha=0.1) plt.xlabel("Rel. power (AU)") plt.ylabel("d'") plt.xlim(1, 3) plt.axhline(y=0, color='k', linewidth=.1) plt.tight_layout() sns.despine() plt.figure(figsize=(3, 3)) p = res['powers2'] / res['pow1'] left = res['p_lefts'].mean(0) right = res['p_rights'].mean(0) plt.plot(p, left, color='purple', alpha=0.4, label='left') plt.plot(p, right, color='purple', alpha=1, label='right') plt.legend(loc='upper left', ncol=2) plt.xlabel("Rel. bias (AU)") plt.ylabel("Choice probability") plt.ylim(0, 1) plt.xlim(1, 3) plt.axhline(y=0.5, color='k', linewidth=.1) plt.tight_layout() sns.despine()
_____no_output_____
MIT
figs/fig4.ipynb
voytekresearch/alphalogical
ReferenceThis example is taken from the book [DL with Python](https://www.manning.com/books/deep-learning-with-python) by F. Chollet. All the notebooks from the book are available for free on [Github](https://github.com/fchollet/deep-learning-with-python-notebooks)If you like to run the example locally follow the instructions provided on [Keras website](https://keras.io/installation)---
import keras keras.__version__
Using TensorFlow backend.
MIT
samples/notebooks/week06-04-introduction-to-gans.ipynb
gu-ma/ba_218_comppx_h1901
Introduction to generative adversarial networksThis notebook contains the second code sample found in Chapter 8, Section 5 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.---[...] A schematic GAN implementationIn what follows, we explain how to implement a GAN in Keras, in its barest form -- since GANs are quite advanced, diving deeply into the technical details would be out of scope for us. Our specific implementation will be a deep convolutional GAN, or DCGAN: a GAN where the generator and discriminator are deep convnets. In particular, it leverages a `Conv2DTranspose` layer for image upsampling in the generator.We will train our GAN on images from CIFAR10, a dataset of 50,000 32x32 RGB images belong to 10 classes (5,000 images per class). To make things even easier, we will only use images belonging to the class "frog".Schematically, our GAN looks like this:* A `generator` network maps vectors of shape `(latent_dim,)` to images of shape `(32, 32, 3)`.* A `discriminator` network maps images of shape (32, 32, 3) to a binary score estimating the probability that the image is real.* A `gan` network chains the generator and the discriminator together: `gan(x) = discriminator(generator(x))`. Thus this `gan` network maps latent space vectors to the discriminator's assessment of the realism of these latent vectors as decoded by the generator.* We train the discriminator using examples of real and fake images along with "real"/"fake" labels, as we would train any regular image classification model.* To train the generator, we use the gradients of the generator's weights with regard to the loss of the `gan` model. This means that, at every step, we move the weights of the generator in a direction that will make the discriminator more likely to classify as "real" the images decoded by the generator. I.e. we train the generator to fool the discriminator. A bag of tricksTraining GANs and tuning GAN implementations is notoriously difficult. There are a number of known "tricks" that one should keep in mind. Like most things in deep learning, it is more alchemy than science: these tricks are really just heuristics, not theory-backed guidelines. They are backed by some level of intuitive understanding of the phenomenon at hand, and they are known to work well empirically, albeit not necessarily in every context.Here are a few of the tricks that we leverage in our own implementation of a GAN generator and discriminator below. It is not an exhaustive list of GAN-related tricks; you will find many more across the GAN literature.* We use `tanh` as the last activation in the generator, instead of `sigmoid`, which would be more commonly found in other types of models.* We sample points from the latent space using a _normal distribution_ (Gaussian distribution), not a uniform distribution.* Stochasticity is good to induce robustness. Since GAN training results in a dynamic equilibrium, GANs are likely to get "stuck" in all sorts of ways. Introducing randomness during training helps prevent this. We introduce randomness in two ways: 1) we use dropout in the discriminator, 2) we add some random noise to the labels for the discriminator.* Sparse gradients can hinder GAN training. In deep learning, sparsity is often a desirable property, but not in GANs. There are two things that can induce gradient sparsity: 1) max pooling operations, 2) ReLU activations. Instead of max pooling, we recommend using strided convolutions for downsampling, and we recommend using a `LeakyReLU` layer instead of a ReLU activation. It is similar to ReLU but it relaxes sparsity constraints by allowing small negative activation values.* In generated images, it is common to see "checkerboard artifacts" caused by unequal coverage of the pixel space in the generator. To fix this, we use a kernel size that is divisible by the stride size, whenever we use a strided `Conv2DTranpose` or `Conv2D` in both the generator and discriminator. The generatorFirst, we develop a `generator` model, which turns a vector (from the latent space -- during training it will sampled at random) into a candidate image. One of the many issues that commonly arise with GANs is that the generator gets stuck with generated images that look like noise. A possible solution is to use dropout on both the discriminator and generator.
import keras from keras import layers import numpy as np latent_dim = 32 height = 32 width = 32 channels = 3 generator_input = keras.Input(shape=(latent_dim,)) # First, transform the input into a 16x16 128-channels feature map x = layers.Dense(128 * 16 * 16)(generator_input) x = layers.LeakyReLU()(x) x = layers.Reshape((16, 16, 128))(x) # Then, add a convolution layer x = layers.Conv2D(256, 5, padding='same')(x) x = layers.LeakyReLU()(x) # Upsample to 32x32 x = layers.Conv2DTranspose(256, 4, strides=2, padding='same')(x) x = layers.LeakyReLU()(x) # Few more conv layers x = layers.Conv2D(256, 5, padding='same')(x) x = layers.LeakyReLU()(x) x = layers.Conv2D(256, 5, padding='same')(x) x = layers.LeakyReLU()(x) # Produce a 32x32 1-channel feature map x = layers.Conv2D(channels, 7, activation='tanh', padding='same')(x) generator = keras.models.Model(generator_input, x) generator.summary()
Using TensorFlow backend.
MIT
samples/notebooks/week06-04-introduction-to-gans.ipynb
gu-ma/ba_218_comppx_h1901
The discriminatorThen, we develop a `discriminator` model, that takes as input a candidate image (real or synthetic) and classifies it into one of two classes, either "generated image" or "real image that comes from the training set".
discriminator_input = layers.Input(shape=(height, width, channels)) x = layers.Conv2D(128, 3)(discriminator_input) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, 4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, 4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, 4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Flatten()(x) # One dropout layer - important trick! x = layers.Dropout(0.4)(x) # Classification layer x = layers.Dense(1, activation='sigmoid')(x) discriminator = keras.models.Model(discriminator_input, x) discriminator.summary() # To stabilize training, we use learning rate decay # and gradient clipping (by value) in the optimizer. discriminator_optimizer = keras.optimizers.RMSprop(lr=0.0008, clipvalue=1.0, decay=1e-8) discriminator.compile(optimizer=discriminator_optimizer, loss='binary_crossentropy')
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_2 (InputLayer) (None, 32, 32, 3) 0 _________________________________________________________________ conv2d_5 (Conv2D) (None, 30, 30, 128) 3584 _________________________________________________________________ leaky_re_lu_6 (LeakyReLU) (None, 30, 30, 128) 0 _________________________________________________________________ conv2d_6 (Conv2D) (None, 14, 14, 128) 262272 _________________________________________________________________ leaky_re_lu_7 (LeakyReLU) (None, 14, 14, 128) 0 _________________________________________________________________ conv2d_7 (Conv2D) (None, 6, 6, 128) 262272 _________________________________________________________________ leaky_re_lu_8 (LeakyReLU) (None, 6, 6, 128) 0 _________________________________________________________________ conv2d_8 (Conv2D) (None, 2, 2, 128) 262272 _________________________________________________________________ leaky_re_lu_9 (LeakyReLU) (None, 2, 2, 128) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 512) 0 _________________________________________________________________ dropout_1 (Dropout) (None, 512) 0 _________________________________________________________________ dense_2 (Dense) (None, 1) 513 ================================================================= Total params: 790,913 Trainable params: 790,913 Non-trainable params: 0 _________________________________________________________________
MIT
samples/notebooks/week06-04-introduction-to-gans.ipynb
gu-ma/ba_218_comppx_h1901
The adversarial networkFinally, we setup the GAN, which chains the generator and the discriminator. This is the model that, when trained, will move the generator in a direction that improves its ability to fool the discriminator. This model turns latent space points into a classification decision, "fake" or "real", and it is meant to be trained with labels that are always "these are real images". So training `gan` will updates the weights of `generator` in a way that makes `discriminator` more likely to predict "real" when looking at fake images. Very importantly, we set the discriminator to be frozen during training (non-trainable): its weights will not be updated when training `gan`. If the discriminator weights could be updated during this process, then we would be training the discriminator to always predict "real", which is not what we want!
# Set discriminator weights to non-trainable # (will only apply to the `gan` model) discriminator.trainable = False gan_input = keras.Input(shape=(latent_dim,)) gan_output = discriminator(generator(gan_input)) gan = keras.models.Model(gan_input, gan_output) gan_optimizer = keras.optimizers.RMSprop(lr=0.0004, clipvalue=1.0, decay=1e-8) gan.compile(optimizer=gan_optimizer, loss='binary_crossentropy')
_____no_output_____
MIT
samples/notebooks/week06-04-introduction-to-gans.ipynb
gu-ma/ba_218_comppx_h1901
How to train your DCGANNow we can start training. To recapitulate, this is schematically what the training loop looks like:```for each epoch: * Draw random points in the latent space (random noise). * Generate images with `generator` using this random noise. * Mix the generated images with real ones. * Train `discriminator` using these mixed images, with corresponding targets, either "real" (for the real images) or "fake" (for the generated images). * Draw new random points in the latent space. * Train `gan` using these random vectors, with targets that all say "these are real images". This will update the weights of the generator (only, since discriminator is frozen inside `gan`) to move them towards getting the discriminator to predict "these are real images" for generated images, i.e. this trains the generator to fool the discriminator.```Let's implement it:
import os from keras.preprocessing import image # Load CIFAR10 data (x_train, y_train), (_, _) = keras.datasets.cifar10.load_data() # Select frog images (class 6) x_train = x_train[y_train.flatten() == 6] # Normalize data x_train = x_train.reshape( (x_train.shape[0],) + (height, width, channels)).astype('float32') / 255. iterations = 10000 batch_size = 20 save_dir = '/home/ubuntu/gan_images/' # Start training loop start = 0 for step in range(iterations): # Sample random points in the latent space random_latent_vectors = np.random.normal(size=(batch_size, latent_dim)) # Decode them to fake images generated_images = generator.predict(random_latent_vectors) # Combine them with real images stop = start + batch_size real_images = x_train[start: stop] combined_images = np.concatenate([generated_images, real_images]) # Assemble labels discriminating real from fake images labels = np.concatenate([np.ones((batch_size, 1)), np.zeros((batch_size, 1))]) # Add random noise to the labels - important trick! labels += 0.05 * np.random.random(labels.shape) # Train the discriminator d_loss = discriminator.train_on_batch(combined_images, labels) # sample random points in the latent space random_latent_vectors = np.random.normal(size=(batch_size, latent_dim)) # Assemble labels that say "all real images" misleading_targets = np.zeros((batch_size, 1)) # Train the generator (via the gan model, # where the discriminator weights are frozen) a_loss = gan.train_on_batch(random_latent_vectors, misleading_targets) start += batch_size if start > len(x_train) - batch_size: start = 0 # Occasionally save / plot if step % 100 == 0: # Save model weights gan.save_weights('gan.h5') # Print metrics print('discriminator loss at step %s: %s' % (step, d_loss)) print('adversarial loss at step %s: %s' % (step, a_loss)) # Save one generated image img = image.array_to_img(generated_images[0] * 255., scale=False) img.save(os.path.join(save_dir, 'generated_frog' + str(step) + '.png')) # Save one real image, for comparison img = image.array_to_img(real_images[0] * 255., scale=False) img.save(os.path.join(save_dir, 'real_frog' + str(step) + '.png'))
discriminator loss at step 0: 0.685675 adversarial loss at step 0: 0.667591 discriminator loss at step 100: 0.756201 adversarial loss at step 100: 0.820905 discriminator loss at step 200: 0.699047 adversarial loss at step 200: 0.776581 discriminator loss at step 300: 0.684602 adversarial loss at step 300: 0.513813 discriminator loss at step 400: 0.707092 adversarial loss at step 400: 0.716778 discriminator loss at step 500: 0.686278 adversarial loss at step 500: 0.741214 discriminator loss at step 600: 0.692786 adversarial loss at step 600: 0.745891 discriminator loss at step 700: 0.69771 adversarial loss at step 700: 0.781026 discriminator loss at step 800: 0.69236 adversarial loss at step 800: 0.748769 discriminator loss at step 900: 0.663193 adversarial loss at step 900: 0.689923 discriminator loss at step 1000: 0.706922 adversarial loss at step 1000: 0.741314 discriminator loss at step 1100: 0.682189 adversarial loss at step 1100: 0.76548 discriminator loss at step 1200: 0.687244 adversarial loss at step 1200: 0.746018 discriminator loss at step 1300: 0.697884 adversarial loss at step 1300: 0.766032 discriminator loss at step 1400: 0.691977 adversarial loss at step 1400: 0.735184 discriminator loss at step 1500: 0.696238 adversarial loss at step 1500: 0.738426 discriminator loss at step 1600: 0.698334 adversarial loss at step 1600: 0.741093 discriminator loss at step 1700: 0.70315 adversarial loss at step 1700: 0.736702 discriminator loss at step 1800: 0.693836 adversarial loss at step 1800: 0.742768 discriminator loss at step 1900: 0.69059 adversarial loss at step 1900: 0.741162 discriminator loss at step 2000: 0.696293 adversarial loss at step 2000: 0.755151 discriminator loss at step 2100: 0.686166 adversarial loss at step 2100: 0.755129 discriminator loss at step 2200: 0.692612 adversarial loss at step 2200: 0.772408 discriminator loss at step 2300: 0.704013 adversarial loss at step 2300: 0.776998 discriminator loss at step 2400: 0.693268 adversarial loss at step 2400: 0.70731 discriminator loss at step 2500: 0.684289 adversarial loss at step 2500: 0.742162 discriminator loss at step 2600: 0.700483 adversarial loss at step 2600: 0.734719 discriminator loss at step 2700: 0.699952 adversarial loss at step 2700: 0.759745 discriminator loss at step 2800: 0.697416 adversarial loss at step 2800: 0.733726 discriminator loss at step 2900: 0.697604 adversarial loss at step 2900: 0.740891 discriminator loss at step 3000: 0.698498 adversarial loss at step 3000: 0.754564 discriminator loss at step 3100: 0.695516 adversarial loss at step 3100: 0.759486 discriminator loss at step 3200: 0.693453 adversarial loss at step 3200: 0.769369 discriminator loss at step 3300: 1.5083 adversarial loss at step 3300: 0.726621 discriminator loss at step 3400: 0.686934 adversarial loss at step 3400: 0.747121 discriminator loss at step 3500: 0.689791 adversarial loss at step 3500: 0.751882 discriminator loss at step 3600: 0.71331 adversarial loss at step 3600: 0.704916 discriminator loss at step 3700: 0.690504 adversarial loss at step 3700: 0.853764 discriminator loss at step 3800: 0.688844 adversarial loss at step 3800: 0.791077 discriminator loss at step 3900: 0.679162 adversarial loss at step 3900: 0.724979 discriminator loss at step 4000: 0.676585 adversarial loss at step 4000: 0.69554 discriminator loss at step 4100: 0.693313 adversarial loss at step 4100: 0.742666 discriminator loss at step 4200: 0.678367 adversarial loss at step 4200: 0.778793 discriminator loss at step 4300: 0.699712 adversarial loss at step 4300: 0.740457 discriminator loss at step 4400: 0.697605 adversarial loss at step 4400: 0.755847 discriminator loss at step 4500: 0.710596 adversarial loss at step 4500: 0.814832 discriminator loss at step 4600: 0.706518 adversarial loss at step 4600: 0.83636 discriminator loss at step 4700: 0.687217 adversarial loss at step 4700: 0.775736 discriminator loss at step 4800: 0.769103 adversarial loss at step 4800: 0.774639 discriminator loss at step 4900: 0.692414 adversarial loss at step 4900: 0.775192 discriminator loss at step 5000: 0.715357 adversarial loss at step 5000: 0.775003 discriminator loss at step 5100: 0.703434 adversarial loss at step 5100: 0.940242 discriminator loss at step 5200: 0.704034 adversarial loss at step 5200: 0.708327 discriminator loss at step 5300: 0.698559 adversarial loss at step 5300: 0.730377 discriminator loss at step 5400: 0.684378 adversarial loss at step 5400: 0.759259 discriminator loss at step 5500: 0.693699 adversarial loss at step 5500: 0.700122 discriminator loss at step 5600: 0.715242 adversarial loss at step 5600: 0.808961 discriminator loss at step 5700: 0.689339 adversarial loss at step 5700: 0.621725 discriminator loss at step 5800: 0.679717 adversarial loss at step 5800: 0.787711 discriminator loss at step 5900: 0.700126 adversarial loss at step 5900: 0.742493 discriminator loss at step 6000: 0.692087 adversarial loss at step 6000: 0.839669 discriminator loss at step 6100: 0.677867 adversarial loss at step 6100: 0.797158 discriminator loss at step 6200: 0.70392 adversarial loss at step 6200: 0.842135 discriminator loss at step 6300: 0.688377 adversarial loss at step 6300: 0.718633 discriminator loss at step 6400: 0.781234 adversarial loss at step 6400: 0.710833 discriminator loss at step 6500: 0.682696 adversarial loss at step 6500: 0.739674 discriminator loss at step 6600: 0.693081 adversarial loss at step 6600: 0.747336 discriminator loss at step 6700: 0.681836 adversarial loss at step 6700: 0.780143 discriminator loss at step 6800: 0.728136 adversarial loss at step 6800: 0.838522 discriminator loss at step 6900: 0.660475 adversarial loss at step 6900: 0.717434 discriminator loss at step 7000: 0.672144 adversarial loss at step 7000: 0.948783 discriminator loss at step 7100: 0.692428 adversarial loss at step 7100: 0.837047 discriminator loss at step 7200: 0.731133 adversarial loss at step 7200: 0.728315 discriminator loss at step 7300: 0.671766 adversarial loss at step 7300: 0.793155 discriminator loss at step 7400: 0.712387 adversarial loss at step 7400: 0.807759 discriminator loss at step 7500: 0.68638 adversarial loss at step 7500: 0.967421 discriminator loss at step 7600: 0.690096 adversarial loss at step 7600: 0.811904 discriminator loss at step 7700: 0.702784 adversarial loss at step 7700: 0.867017 discriminator loss at step 7800: 0.674138 adversarial loss at step 7800: 0.837909 discriminator loss at step 7900: 0.674747 adversarial loss at step 7900: 0.743664 discriminator loss at step 8000: 0.680357 adversarial loss at step 8000: 0.810859 discriminator loss at step 8100: 0.688885 adversarial loss at step 8100: 0.786809 discriminator loss at step 8200: 0.671557 adversarial loss at step 8200: 0.784159 discriminator loss at step 8300: 0.70359 adversarial loss at step 8300: 0.95692 discriminator loss at step 8400: 0.720167 adversarial loss at step 8400: 1.14066 discriminator loss at step 8500: 0.747376 adversarial loss at step 8500: 0.630725 discriminator loss at step 8600: 0.688931 adversarial loss at step 8600: 0.849245 discriminator loss at step 8700: 0.707559 adversarial loss at step 8700: 0.713202 discriminator loss at step 8800: 0.673593 adversarial loss at step 8800: 0.832419 discriminator loss at step 8900: 0.6777 adversarial loss at step 8900: 0.773395 discriminator loss at step 9000: 0.659887 adversarial loss at step 9000: 0.77255 discriminator loss at step 9100: 0.675182 adversarial loss at step 9100: 0.749544 discriminator loss at step 9200: 0.687147 adversarial loss at step 9200: 0.836509 discriminator loss at step 9300: 0.690807 adversarial loss at step 9300: 0.829561 discriminator loss at step 9400: 0.656649 adversarial loss at step 9400: 0.788181 discriminator loss at step 9500: 0.703494 adversarial loss at step 9500: 0.78302 discriminator loss at step 9600: 0.680718 adversarial loss at step 9600: 0.813078 discriminator loss at step 9700: 0.704956 adversarial loss at step 9700: 0.761652 discriminator loss at step 9800: 0.673504 adversarial loss at step 9800: 0.853213 discriminator loss at step 9900: 0.669288 adversarial loss at step 9900: 0.677691
MIT
samples/notebooks/week06-04-introduction-to-gans.ipynb
gu-ma/ba_218_comppx_h1901
Let's display a few of our fake images:
import matplotlib.pyplot as plt # Sample random points in the latent space random_latent_vectors = np.random.normal(size=(10, latent_dim)) # Decode them to fake images generated_images = generator.predict(random_latent_vectors) for i in range(generated_images.shape[0]): img = image.array_to_img(generated_images[i] * 255., scale=False) plt.figure() plt.imshow(img) plt.show()
_____no_output_____
MIT
samples/notebooks/week06-04-introduction-to-gans.ipynb
gu-ma/ba_218_comppx_h1901
Part 1: Join the Duet Server the Data Owner connected to
duet = sy.join_duet(loopback=True)
_____no_output_____
Apache-2.0
examples/private-ai-series/duet_basics/exercise/Exercise_Duet_Basics_Data_Scientist.ipynb
Bhuvan-21/PySyft
Checkpoint 0 : Now STOP and run the Data Owner notebook until Checkpoint 1. Part 2: Search for Available Data
# The data scientist can check the list of searchable data in Data Owner's duet store duet.store.pandas # Data Scientist finds that there are Heights and Weights of a group of people. There are some analysis he/she can do with them together. heights_ptr = duet.store[0] weights_ptr = duet.store[1] # heights_ptr is a reference to the height dataset remotely available on data owner's server print(heights_ptr) # weights_ptr is a reference to the weight dataset remotely available on data owner's server print(weights_ptr)
_____no_output_____
Apache-2.0
examples/private-ai-series/duet_basics/exercise/Exercise_Duet_Basics_Data_Scientist.ipynb
Bhuvan-21/PySyft
Calculate BMI (Body Mass Index) and weight statusUsing the heights and weights pointers of the people of Group A, calculate their BMI and get a pointer to their individual BMI. From the BMI pointers, you can check if a person is normal-weight, overweight or obese, without knowing their actual heights and weights and even BMI values. BMI from 19 to 24 - Normal BMI from 25 to 29 - Overweight BMI from 30 to 39 - Obese BMI = [weight (kg) / (height (cm)^2)] x 10,000 Hint: run duet.torch and find the required operators One amazing thing about pointers is that from a pointer to a list of items, we can get the pointers to each item in the list. As example, here we have weights_ptr pointing to the weight-list, but from that we can also get the pointer to each weight and perform computation on each of them without even the knowing the value! Below code will show you how to access the pointers to each weight and height from the list pointer.
for i in range(6): print("Pointer to Weight of person", i + 1, weights_ptr[i]) print("Pointer to Height of person", i + 1, heights_ptr[i]) def BMI_calculator(w_ptr, h_ptr): bmi_ptr = 0 ##TODO "Write your code here for calculating bmi_ptr" ### return bmi_ptr def weight_status(w_ptr, h_ptr): status = None bmi_ptr = BMI_calculator(w_ptr, h_ptr) ##TODO """Write your code here. Possible values for status: Normal, Overweight, Obese, Out of range """"" ### return status for i in range(0, 6): bmi_ptr = BMI_calculator(weights_ptr[i], heights_ptr[i]) statuses = [] for i in range(0, 6): status = weight_status(weights_ptr[i], heights_ptr[i]) print("Weight of Person", i + 1, "is", status) statuses.append(status)
_____no_output_____
Apache-2.0
examples/private-ai-series/duet_basics/exercise/Exercise_Duet_Basics_Data_Scientist.ipynb
Bhuvan-21/PySyft
На нескольких алгоритмах кластеризации, умеющих работать с sparse матрицами, проверьте, что работает лучше Count_Vectorizer или TfidfVectorizer (попробуйте выжать максимум из каждого - попробуйте нграммы, символьные нграммы, разные значения max_features и min_df) (3 балла)На нескольких алгоритмах кластеризации проверьте, какое матричное разложение (TruncatedSVD или NMF) работает лучше для кластеризации. (3 балла)С помощью алгоритмов, умеющих выделять выбросы, попробуйте найти необычные объявления (необычные - это такие, которые непонятно к какой категории можно вообще отнести, что-то с ошибками или вообще какая-то дичь). В этом задании можно использовать любую векторизацию. (4 балла)Используйте те же данные, что и в семинаре (колонки - title и category_name)Делайте соответствующие вашими ресурсам и потребностям алгоритма подвыборки из всего датасета. Для сравнения используйте любую из метрик, которые есть в семинаре. Оценивать на глаз тоже можно, но тогда нужно объяснить, почему вы считаете одну кластеризацию лучше.НЕ ЗАБЫВАЙТЕ подбирать параметры в кластеризации. За использование всех параметров по умолчанию, оценка будет снижаться (под использованием всех параметров по умолчанию я имею в виду что-то такое - cluster = DBSCAN())Если получится, используйте метод локтя. (1 бонусный балл)
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer import pandas as pd from sklearn.decomposition import TruncatedSVD, NMF from sklearn.cluster import AffinityPropagation, AgglomerativeClustering, DBSCAN, \ KMeans, MiniBatchKMeans, Birch, MeanShift, SpectralClustering from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score, \ silhouette_score, homogeneity_score, completeness_score, \ v_measure_score import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") data = pd.read_csv('data.csv') data = data[['category_name', 'title']]
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
На нескольких алгоритмах кластеризации, умеющих работать с sparse матрицами, проверьте, что работает лучше Count_Vectorizer или TfidfVectorizer (попробуйте выжать максимум из каждого - попробуйте нграммы, символьные нграммы, разные значения max_features и min_df) (3 балла)
def eval_clusterization(X, y, cluster_labels): silhouette = silhouette_score(X, cluster_labels) homogeneity = homogeneity_score(y, cluster_labels) completeness = completeness_score(y, cluster_labels) v_measure = v_measure_score(y, cluster_labels) adj_rand = adjusted_rand_score(y, cluster_labels) mi_score = adjusted_mutual_info_score(y, cluster_labels) print('Clusterization metrics') print(f'Silhouette score: {silhouette:.3f}') print(f'Homogeneity score: {homogeneity:.3f}') print(f'Completeness score: {completeness:.3f}') print(f'V-measure: {v_measure:.3f}') print(f'Ajusted Rand Index: {adj_rand:.3f}') print(f'Adjusted Mutual Information score: {mi_score:.3f}') def fit_and_eval(X, y, clusterizer): global sample clusterizer.fit(X) labels = clusterizer.labels_ eval_clusterization(X, y, labels) sample['cluster'] = labels
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Affinity Propagation
sample = data.sample(frac=0.01) y = sample['category_name']
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*TfidfVectorizer*
tf = TfidfVectorizer(min_df=2, max_df=0.9, max_features=500, ngram_range=(1, 2)) X_tf = tf.fit_transform(sample['title']) cluster = AffinityPropagation(damping=0.7, preference=-2, max_iter=400, verbose=2, convergence_iter=10) fit_and_eval(X_tf, y, cluster)
Did not converge Clusterization metrics Silhouette score: 0.496 Homogeneity score: 0.591 Completeness score: 0.389 V-measure: 0.469 Ajusted Rand Index: -0.013 Adjusted Mutual Information score: 0.198
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*CountVectorizer*
cv = CountVectorizer(min_df=3, max_df=0.6, max_features=1000) X_cv = cv.fit_transform(sample['title']) cluster = AffinityPropagation(damping=0.7, preference=-2, max_iter=400, verbose=2, convergence_iter=10) fit_and_eval(X_cv, y, cluster)
Converged after 244 iterations. Clusterization metrics Silhouette score: 0.481 Homogeneity score: 0.601 Completeness score: 0.371 V-measure: 0.459 Ajusted Rand Index: -0.010 Adjusted Mutual Information score: 0.155
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Для этого алгоритма оба способа векторизации выдают близкие значения V-measure. У tf преимущество по silhouette score, а у cv по homogeneity. У tf выше показатели completeness и MI score, поэтому, на мой взгляд, в данном случае он лучше подходит для данного алгоритма. K-means
sample = data.sample(frac=0.01) y = sample['category_name']
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*TfidfVectorizer*
tf = TfidfVectorizer(min_df=2, max_df=0.8, max_features=500) X_tf = tf.fit_transform(sample['title']) cluster = KMeans(n_clusters=47, n_jobs=-1, random_state=0) fit_and_eval(X_tf, y, cluster)
Clusterization metrics Silhouette score: 0.224 Homogeneity score: 0.318 Completeness score: 0.406 V-measure: 0.357 Ajusted Rand Index: -0.001 Adjusted Mutual Information score: 0.249
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*CountVectorizer*
cv = CountVectorizer(min_df=3, max_df=0.4, max_features=1000) X_cv = cv.fit_transform(sample['title']) cluster = KMeans(n_clusters=47, n_jobs=-1, random_state=0) fit_and_eval(X_cv, y, cluster)
Clusterization metrics Silhouette score: 0.184 Homogeneity score: 0.276 Completeness score: 0.405 V-measure: 0.328 Ajusted Rand Index: 0.003 Adjusted Mutual Information score: 0.212
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
У tf выше homogeneity, но ниже completeness. У tf значительно выше silhouette score. MI score также выше. tf здесь опять же лучше. Если получится, используйте метод локтя. (1 бонусный балл)
def elbow_method(X, clusterizer, left_boundary, right_boundary, step): scores = [] for i in range(left_boundary, right_boundary, step): cluster = clusterizer(n_clusters=i, n_jobs=-1, random_state=0) cluster.fit(X) labels = cluster.labels_ score = silhouette_score(X, labels) scores.append(score) plt.figure(figsize=(12, 5)) plt.plot(list(range(left_boundary, right_boundary, step)), scores) return scores elbow_method(X_tf, KMeans, 25, 1250, 75)
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Видим, что в районе 1100 кластеров перестает расти silhouette score.
cluster = KMeans(n_clusters=1100, n_jobs=-1, random_state=0) cluster.fit(X_tf) c_labels = cluster.labels_ eval_clusterization(X_tf, y, c_labels)
Clusterization metrics Silhouette score: 0.627 Homogeneity score: 0.742 Completeness score: 0.390 V-measure: 0.511 Ajusted Rand Index: -0.003 Adjusted Mutual Information score: 0.111
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Все метрики действительно повысились. При этом не ясно как интерпретировать такое количество кластеров. Spectral Clustering
sample = data.sample(frac=0.01) y = sample['category_name']
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*TfidfVectorizer*
tf = TfidfVectorizer(min_df=2, max_df=0.8, max_features=500) X_tf = tf.fit_transform(sample['title']) cluster = SpectralClustering(n_clusters=47, n_jobs=-1, random_state=0) fit_and_eval(X_tf, y, cluster)
Clusterization metrics Silhouette score: 0.216 Homogeneity score: 0.243 Completeness score: 0.377 V-measure: 0.296 Ajusted Rand Index: -0.021 Adjusted Mutual Information score: 0.171
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*CountVectorizer*
cv = CountVectorizer(min_df=3, max_df=0.4, max_features=1000) X_cv = cv.fit_transform(sample['title']) cluster = SpectralClustering(n_clusters=47, n_jobs=-1, random_state=0) fit_and_eval(X_cv, y, cluster)
Clusterization metrics Silhouette score: 0.211 Homogeneity score: 0.125 Completeness score: 0.593 V-measure: 0.207 Ajusted Rand Index: 0.025 Adjusted Mutual Information score: 0.086
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
На этом алгоритме видим явное преимущество tf почти по всем метрикам. На нескольких алгоритмах кластеризации проверьте, какое матричное разложение (TruncatedSVD или NMF) работает лучше для кластеризации. (3 балла) Mean Shift
sample = data.sample(frac=0.01) y = sample['category_name'] cv = CountVectorizer(min_df=3, max_df=0.6, max_features=2000) X_cv = cv.fit_transform(sample['title']) svd = TruncatedSVD(50, random_state=0) X_svd = svd.fit_transform(X_cv)
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*SVD*
cluster = MeanShift(cluster_all=False, bandwidth=0.8, n_jobs=-1) fit_and_eval(X_svd, y, cluster)
Clusterization metrics Silhouette score: 0.714 Homogeneity score: 0.339 Completeness score: 0.377 V-measure: 0.357 Ajusted Rand Index: -0.008 Adjusted Mutual Information score: 0.212
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*NMF*
cv = CountVectorizer(min_df=3, max_df=0.6, max_features=2000) X_cv = cv.fit_transform(sample['title']) nmf = NMF(50, random_state=0) X_nmf = nmf.fit_transform(X_cv) cluster = MeanShift(cluster_all=False, bandwidth=0.8, n_jobs=-1) fit_and_eval(X_nmf, y, cluster)
Clusterization metrics Silhouette score: 0.608 Homogeneity score: 0.002 Completeness score: 0.307 V-measure: 0.004 Ajusted Rand Index: 0.000 Adjusted Mutual Information score: 0.001
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Для данного алгоритма значительное преимущество у SVD разложения: V-measure, MI score. Agglomerative Clustering
sample = data.sample(frac=0.05) y = sample['category_name']
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*SVD*
cv = CountVectorizer(min_df=3, max_df=0.6, max_features=2000) X_cv = cv.fit_transform(sample['title']) svd = TruncatedSVD(50, random_state=0) X_svd = svd.fit_transform(X_cv) cluster = AgglomerativeClustering(n_clusters=47) fit_and_eval(X_svd, y, cluster)
Clusterization metrics Silhouette score: 0.637 Homogeneity score: 0.302 Completeness score: 0.370 V-measure: 0.332 Ajusted Rand Index: -0.001 Adjusted Mutual Information score: 0.282
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*NMF*
cv = CountVectorizer(min_df=3, max_df=0.6, max_features=2000) X_cv = cv.fit_transform(sample['title']) nmf = NMF(50, random_state=0) X_nmf = nmf.fit_transform(X_cv) cluster = AgglomerativeClustering(n_clusters=47) fit_and_eval(X_nmf, y, cluster)
Clusterization metrics Silhouette score: 0.707 Homogeneity score: 0.292 Completeness score: 0.365 V-measure: 0.324 Ajusted Rand Index: -0.003 Adjusted Mutual Information score: 0.272
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
В отличие от предыдущих алгоритмов, здесь NMF в некоторых метриках показывает результат лучше (silhouette score, v-measure), в остальных на почти на равных с SVD. Кажется, в данном случае сложно оценить какое разложение действительно лучше. DBSCAN
sample = data.sample(frac=0.05) y = sample['category_name'] cv = CountVectorizer(min_df=3, max_df=0.6, max_features=2000) X_cv = cv.fit_transform(sample['title']) svd = TruncatedSVD(50, random_state=0) X_svd = svd.fit_transform(X_cv)
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*SVD*
cluster = DBSCAN(min_samples=7, eps=0.4, n_jobs=-1) fit_and_eval(X_svd, y, cluster)
Clusterization metrics Silhouette score: 0.672 Homogeneity score: 0.301 Completeness score: 0.343 V-measure: 0.321 Ajusted Rand Index: -0.010 Adjusted Mutual Information score: 0.265
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
*NMF*
# с параметрами как у SVD не работает cluster = DBSCAN(min_samples=10, eps=0.3) fit_and_eval(X_nmf, y, cluster)
Clusterization metrics Silhouette score: 0.507 Homogeneity score: 0.001 Completeness score: 0.053 V-measure: 0.002 Ajusted Rand Index: -0.000 Adjusted Mutual Information score: -0.000
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Здесь SVD лучше по всем параметрам. Данный раздел можно резюмировать тем, что несмотря на то, что tfidfvectorizer лучше работает на алгоритмах с dense матрицами, в алгоритмах со sparse матрицами лучше себя показывает countvectorizer. Честно говоря, достаточно сложно оценить в чем причина такого различия, но это стоит иметь в виду. С помощью алгоритмов, умеющих выделять выбросы, попробуйте найти необычные объявления (необычные - это такие, которые непонятно к какой категории можно вообще отнести, что-то с ошибками или вообще какая-то дичь). В этом задании можно использовать любую векторизацию. (4 балла) Алгоритмы **DBSCAN** и **Mean Shift** умеют выделять в кластеры те элементы, которые не удалось поместить ни в какие другие кластеры. Строго говоря, это не обязательно то же самое, что выбросы. DBSCAN
sample = data.sample(frac=0.05) y = sample['category_name'] cv = CountVectorizer(min_df=4, max_df=0.6, max_features=2000) X_cv = cv.fit_transform(sample['title']) svd = TruncatedSVD(50, random_state=0) X_svd = svd.fit_transform(X_cv)
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Попробовал разные параметры. Увеличение требований к кластерам (больший `min_sample` и меньший `eps`) приводит к увеличению размера кластера `-1`. В результате получается от 600 до нескольких тысяч строк. Изменение параметра `leaf_size` для алгоритмов, поддерживающих его, не влияет на размер данного кластера.
cluster = DBSCAN(min_samples=6, eps=0.6, n_jobs=-1, algorithm='kd_tree', leaf_size=30) fit_and_eval(X_svd, y, cluster)
Clusterization metrics Silhouette score: 0.431 Homogeneity score: 0.253 Completeness score: 0.338 V-measure: 0.289 Ajusted Rand Index: 0.007 Adjusted Mutual Information score: 0.176
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Не удалось подобрать такую комбинацию параметров, чтобы в кластере -1 было бы менее 600 строк.
len(sample.loc[sample.cluster == -1])
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Кажется, что это обычные объявления. Сложно назвать это выбросами.
sample.loc[sample.cluster == -1].head(10)
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Mean Shift
sample = data.sample(frac=0.01) y = sample['category_name'] cv = CountVectorizer(min_df=3, max_df=0.6, max_features=2000) X_cv = cv.fit_transform(sample['title']) svd = TruncatedSVD(100, random_state=0) X_svd = svd.fit_transform(X_cv) cluster = MeanShift(cluster_all=False, bandwidth=0.9, n_jobs=-1) fit_and_eval(X_svd, y, cluster)
Clusterization metrics Silhouette score: 0.595 Homogeneity score: 0.412 Completeness score: 0.367 V-measure: 0.388 Ajusted Rand Index: -0.013 Adjusted Mutual Information score: 0.199
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Опять же достаточно обычные объявления. Какой-то особой странности не наблюдается.
len(sample.loc[sample.cluster == -1]) sample.loc[sample.cluster == -1].head(10)
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Вообще для определения именно выбросов есть специальные алгоритмы.
from sklearn.ensemble import IsolationForest from sklearn.neighbors import LocalOutlierFactor iforest = IsolationForest(random_state=0) lof = LocalOutlierFactor(n_neighbors=30) sample['forest'] = iforest.fit_predict(X_cv) sample['lof'] = lof.fit_predict(X_cv)
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Однако, и они не выдают ничего интересного. IsolationForest в основном выбирает недвижимость.
sample.loc[sample.forest == -1].category_name.value_counts() sample.loc[sample.forest == -1].head(10)
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
LocalOutlierFactor скорее стремится к одежде.
sample.loc[sample.lof == -1].category_name.value_counts() sample.loc[sample.lof == -1].head(10)
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Согласны они в небольшом количестве случаев.
sample.loc[(sample.lof == -1) & (sample.forest == -1)]
_____no_output_____
MIT
HW4/HW4.ipynb
slowwavesleep/HSE_ML
Índice do Efeito de Empacotamento ($Q_{a}^*$) Para estimar o índice do efeito de empacotamento da Bricaud et al. (2004):$${Q_{a}}(\lambda) = a_{ph} (\lambda) / a_{sol} (\lambda) $$Onde $a_{ph} (\lambda)$ seria medido da amostra e o $a_{sol} (\lambda) $ seria o coeficiente e absorção caso os pigmentos estivessem na solução.Mas antes é necessário estimar o $a_{sol}$, que é igual a somatória da concentração de cada pigmento vezes seu respectivo coeficiente de absorção especifico :$$ a_{sol} (\lambda) = \sum C_{i}.a_{sol, i}^*(\lambda) $$ Gráfico dos $a_{sol, i}^*(\lambda)$ de cada pigmento dado pelo HPLC:
library(repr) #Carregando o arquivo que tem os coeficientes de absorção de cada pigmento Bricaud et al (2004) bricaud_asol = read.csv("Bricaud_et_al_2004.csv", skip=4, na="999") #Padronizando os nomes dos pigmentos names(bricaud_asol)=c("lambda", "Chla", "DVChla", "Chlb", "DVChlb", "Chlc12", "Fuco", "ButFuco", "HexFuco", "Perid", "Diad", "Zea", "Allox", "betacar", "acar") options(repr.plot.width=6, repr.plot.height=4) #Plotando todos os pigmentos no mesmo gráfico matplot(bricaud_asol$lambda, bricaud_asol[,2:15], type="l",ylab="" , xlab="Wavelength (nm)", ylim=c(0,0.08), cex.lab=0.9, cex.axis=0.9, lwd=rep(2,14)) mtext(side=2, line=2.5, expression(a[sol]^{'*'}~('m'^{2}~"mg"^{-1})), cex=0.9)
_____no_output_____
MIT
Lab_Qa_R.ipynb
Andrealioli/Lab_aph_Qa_Sf
Agora vamos pegar um resultado de HPLC hipotético e estimar o $a_{sol}(\lambda)$:
HPLC = data.frame("Chla"=1, "DVChla"=0.001, "Chlb"=0.03, "DVChlb"=0.01, "Chlc12"=0.3, "Fuco"=1.1, "ButFuco"=0.001, "HexFuco"=0.005, "Perid"=0.5, "Diad"=0.01, "Zea"=0.05, "Allox"=0.5, "betacar"=1, "acar"=0.3) asol=data.frame(wv=bricaud_asol$lambda) for (i in names(HPLC)){ # multiplicado cada concentração e pigmento coms eu respectivo coeficiente de absorção específico asol=cbind(asol, HPLC[,i]*bricaud_asol[,i]) } #Somatória dos asol de todos os pigmentos para cada comprimento de onda asol_t = rowSums(asol[,-1], na.rm = T) options(repr.plot.width=6, repr.plot.height=4) plot(asol$wv, asol_t, type="l", xlab="Wavelength (nm)", ylab="", cex.lab=0.9, cex.axis=0.9, lwd=2) mtext(side=2, line=2.5, expression(a[sol]~('m'^{-1})), cex=0.9) aph=asol_t*0.8 options(repr.plot.width=6, repr.plot.height=4) matplot(asol$wv, cbind(asol_t, aph), type="l", lw=c(2,2), col=c("black", "green"), lt=c(1,2), xlab="Wavelength (nm)", ylab="") mtext(side=2, line=2.5, expression(a~('m'^{-1}))) legend("topright", legend=c(expression(a[sol]), expression(a[ph])),lt=c(1,2), lw=c(2,2), col=c("black", "green"), y.intersp=2, bty="n")
_____no_output_____
MIT
Lab_Qa_R.ipynb
Andrealioli/Lab_aph_Qa_Sf
Os termos faltantes na reconstrução da Bricaud et al. (2004) e a solução proposta Os autores observaram que muitas vezes os espectros reconstruídos eram menores do que os espectros medidos nas amostras (o que não é esperado uma vez que daria resultados de $Q_a$ maiores do que 1). Como esses erros pareciam ser sistemáticos os autores hipotetizaram a existência de pigmentos faltantes, que não são obtidos pelo HPLC. Dessa forma estimaram o termo faltante por meio de uma relação empírica com a Chl-*a*:$$a_{miss}(440)= 0.0525[TChla]^{0.855}$$O número de amostras usadas para chegar nessa relação empírica foi pequeno (n=14), por isso, os autores alertam que os valores estimados não devem ser considerados como $Q_a$ absolutos, por isso, foi usado o termo $Q_a^*$. Estimando o $Q_{a}^*(440)$ com o termo faltante:
a_todos = data.frame(wv=seq(400,700,2), "asol_t"=asol_t) a_sol_440 = a_todos[which(a_todos$wv==440), "asol_t"]+(0.0525*(HPLC$Chla + HPLC$DVChla)^0.855) aph_440 = a_sol_440*0.8 ##Considerando o termo faltante Qa_440_miss = aph_440/a_sol_440 Qa_440_miss ##Sem considerar o termo faltante Qa_440 = aph_440/a_todos[which(a_todos$wv==440), "asol_t"] Qa_440
_____no_output_____
MIT
Lab_Qa_R.ipynb
Andrealioli/Lab_aph_Qa_Sf
$Q_{a}$ em diferentes tamanhos de células Seguindo o que é apresentado do artigo Bricaud et al. (2004), plotar o $Q_{a}^*$ considerando coeficientes da absorção do conteúdo celular ($acm$) diferentes, nesse exemplo vamos considerar no $\lambda = 440nm$
#Coeficiente de absorção do conteudo celular no 440nm acm.440.1=5*10^4 #menos absorvente acm.440.2= 10^6 #mais absorvente #Intervalo de diametros das células d=(1:50)*10^-6 #Estimando o Qa (440) para os diferentes tamanhos Qa.acm.1 = 1+(2*exp(-acm.440.1*d)/(acm.440.1*d)+2*(exp(-acm.440.1*d)-1)/(acm.440.1*d)^2) Qa.acm.2 = 1+ (2*exp(-acm.440.2*d)/(acm.440.2*d)+2*(exp(-acm.440.2*d)-1)/(acm.440.2*d)^2) Qa.1=(3/2)*Qa.acm.1/(acm.440.1*d) Qa.2=(3/2)*Qa.acm.2/(acm.440.2*d) df<- data.frame(d=1:50,Qa.1= Qa.1, Qa.2= Qa.2) options(repr.plot.width=4, repr.plot.height=4) #Plotando as curvas matplot(df$d, df[, c("Qa.1", "Qa.2")], type="l", log="xy", xlab="", ylab="", lwd=c(2,2)) mtext(side=2, line=2.5, expression(Q[a]^{'*'}~(440))) mtext(side=1, line=2.5, expression("Diameter"~(mu~m))) legend("bottomleft", legend=c(expression(a["cm1"]~"(440)="~5~"."~10^{4}~(m^{-1})), expression(a["cm2"]~"(440)="~10^{6}~(m^{-1}))), col=c("black", "red"), lty=c(1,2), lwd=c(2,2), bty="n", y.intersp=2)
_____no_output_____
MIT
Lab_Qa_R.ipynb
Andrealioli/Lab_aph_Qa_Sf
Índice de tamanho ($S_{f}$) O índice de tamanho foi elaborado com fundamento teórico que células apresentariam maior indice de empacotamento e teriam uma curva mais achatada para o coeficiente de absorção específico ($a_{ph}^*$) (Ciotti et al., 2002). Experimentalmente Ciotti et al. (2002) obtiveram curvas bases de referencia para amostras dominadas por picoplancton e outra para amostras dominadas por microplancton. Sendo o $S_{f}$ um indice que indicaria a proporção dessas classes de tamanho para a comunidade amostrada. Seguindo a seguinte equação:$$\hat{a}_{ph} = [S_{f} . \bar{a}_{pico}(\lambda)]+[(1-S_{f}) . \bar{a}_{micro}(\lambda)]$$onde $\hat{a}_{ph}$ é o coeficiente de absorção do fitoplancton normalizado e $\bar{a}_{pico}$ e $\bar{a}_{micro}$ são os vetores bases obtidos por Ciotti et al. (2002, 2006) para o pico e microplâncton, respectivamente.
#Vetores base para o pico e micro Ciotti et al(2002,2006) pico =c(1.7439,1.8264,1.9128,1.9992,2.0895,2.1799,2.2702,2.3684,2.4666,2.5687,2.6669,2.7612,2.8437,2.9183,2.9890,3.0479,3.1029,3.1500,3.1854,3.2089,3.2247,3.2325,3.2286,3.2168,3.1932,3.1540,3.1029,3.0361,2.9576,2.8712,2.7848,2.6944,2.5137,2.4273,2.3488,2.2781,2.2486,2.2192,2.1720,2.1328,2.1013,2.0660,2.0267,1.9835,1.9285,1.8657,1.7989,1.7203,1.6339,1.5357,1.4336,1.3276,1.2176,1.1076,1.0016,0.8994,0.8013,0.7109,0.6284,0.5538,0.4870,0.4320,0.3782,0.3307,0.2875,0.2486,0.2137,0.1842,0.1599,0.1402,0.1233,0.1080,0.0935,0.0789,0.0656,0.0530,0.0424,0.0344,0.0290,0.0260,0.0258,0.0268,0.0304,0.0320,0.0331,0.0347,0.0355,0.0363,0.0382,0.0401,0.0416,0.0428,0.0432,0.0432,0.0432,0.0424,0.0416,0.0408,0.0408,0.0424,0.0452,0.0503,0.0562,0.0628,0.0695,0.0758,0.0821,0.0880,0.0939,0.1002,0.1060,0.1123,0.1178,0.1229,0.1261,0.1280,0.1288,0.1296,0.1308,0.1331,0.1371,0.1422,0.1493,0.1591,0.1728,0.1909,0.2137,0.2416,0.2757,0.3178,0.3692,0.4281,0.5499,0.6009,0.6324,0.6402,0.6324,0.6245,0.5892,0.5342,0.4674,0.3967,0.3276,0.2635,0.2078,0.1618,0.1249,0.0958,0.0746,0.0601,0.0503) micro=c(1.574,1.584,1.600,1.617,1.633,1.654,1.669,1.674,1.684,1.697,1.708,1.710,1.716,1.737,1.763,1.793,1.812,1.827,1.830,1.834,1.824,1.800,1.771,1.741,1.712,1.685,1.667,1.650,1.641,1.631,1.631,1.623,1.616,1.606,1.592,1.568,1.542,1.509,1.481,1.459,1.437,1.415,1.399,1.387,1.377,1.367,1.349,1.338,1.319,1.301,1.271,1.242,1.222,1.196,1.169,1.141,1.118,1.096,1.075,1.057,1.035,1.013,0.992,0.977,0.959,0.944,0.927,0.909,0.888,0.868,0.847,0.826,0.806,0.785,0.764,0.737,0.711,0.682,0.653,0.626,0.604,0.580,0.555,0.535,0.514,0.501,0.487,0.478,0.475,0.468,0.464,0.459,0.452,0.452,0.449,0.443,0.433,0.424,0.416,0.406,0.401,0.400,0.403,0.408,0.416,0.429,0.443,0.458,0.473,0.487,0.495,0.499,0.504,0.514,0.521,0.525,0.532,0.535,0.534,0.535,0.532,0.528,0.526,0.528,0.538,0.549,0.574,0.605,0.655,0.720,0.798,0.889,0.979,1.068,1.147,1.207,1.243,1.249,1.227,1.174,1.096,1.004,0.893,0.767,0.635,0.516,0.409,0.323,0.253,0.200,0.158) #Intervalo do comprimento de ondas wv = seq(400,700,2) df=data.frame(wv=wv, pico=pico, micro=micro) #Plotando o gráfico matplot(df$wv, df[,c("pico", "micro")],type="l", xlab="", ylab="", lwd=2) mtext(side=2, line=2.5, expression(bar(a)~("dimessionless"))) mtext(side=1, line=2.5, expression("Wavelength"~(nm))) legend(x=620, y=3.3, legend=c("pico","micro"), col=c("black","red") , lty=c(1,2), bty="n", y.intersp=2) pico_esp = pico* 0.023 / 0.5892 micro_esp = micro * 0.0086 / 1.249 df_esp = data.frame(wv=wv, pico=pico_esp, micro=micro_esp) matplot(df_esp$wv, df_esp[,c("pico", "micro")],type="l", xlab="", ylab="", lwd=2) mtext(side=2, line=2.5, expression({a[ph]}^{"*"}~(m^{2}~mg^{-1}))) mtext(side=1, line=2.5, expression("Wavelength"~(nm))) legend(x=620, y=0.13, legend=c("pico","micro"), col=c("black","red") , lty=c(1,2), bty="n", y.intersp=2)
_____no_output_____
MIT
Lab_Qa_R.ipynb
Andrealioli/Lab_aph_Qa_Sf
Considerando a relação estabelecida podemos simular um uma curva de $a_{ph}^*$ a partir dos vetores base:
Sf = 0.4 aph_simulado = (pico_esp*Sf) + ((1-Sf)*micro_esp) matplot(df_esp$wv, df_esp[,c("pico", "micro")],type="l", xlab="", ylab="", lwd=2) matlines(df_esp$wv, aph_simulado, col="green", lwd=2) mtext(side=2, line=2.5, expression({a[ph]}^{"*"}~(m^{2}~mg^{-1}))) mtext(side=1, line=2.5, expression("Wavelength"~(nm))) legend(x=600, y=0.13, legend=c("pico","micro", "simulado"), col=c("black","red", "green") , lty=c(1,2,1), lwd=rep(2,3), bty="n", y.intersp=2)
_____no_output_____
MIT
Lab_Qa_R.ipynb
Andrealioli/Lab_aph_Qa_Sf
Bayesian Imputation Real-world datasets often contain many missing values. In those situations, we have to either remove those missing data (also known as "complete case") or replace them by some values. Though using complete case is pretty straightforward, it is only applicable when the number of missing entries is so small that throwing away those entries would not affect much the power of the analysis we are conducting on the data. The second strategy, also known as [imputation](https://en.wikipedia.org/wiki/Imputation_%28statistics%29), is more applicable and will be our focus in this tutorial.Probably the most popular way to perform imputation is to fill a missing value with the mean, median, or mode of its corresponding feature. In that case, we implicitly assume that the feature containing missing values has no correlation with the remaining features of our dataset. This is a pretty strong assumption and might not be true in general. In addition, it does not encode any uncertainty that we might put on those values. Below, we will construct a *Bayesian* setting to resolve those issues. In particular, given a model on the dataset, we will+ create a generative model for the feature with missing value+ and consider missing values as unobserved latent variables.
!pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro # first, we need some imports import os from IPython.display import set_matplotlib_formats from matplotlib import pyplot as plt import numpy as np import pandas as pd from jax import numpy as jnp from jax import ops, random from jax.scipy.special import expit import numpyro from numpyro import distributions as dist from numpyro.distributions import constraints from numpyro.infer import MCMC, NUTS, Predictive plt.style.use("seaborn") if "NUMPYRO_SPHINXBUILD" in os.environ: set_matplotlib_formats("svg") assert numpyro.__version__.startswith('0.6.0')
_____no_output_____
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
Dataset The data is taken from the competition [Titanic: Machine Learning from Disaster](https://www.kaggle.com/c/titanic) hosted on [kaggle](https://www.kaggle.com/). It contains information of passengers in the [Titanic accident](https://en.wikipedia.org/wiki/Sinking_of_the_RMS_Titanic) such as name, age, gender,... And our target is to predict if a person is more likely to survive.
train_df = pd.read_csv( "https://raw.githubusercontent.com/agconti/kaggle-titanic/master/data/train.csv" ) train_df.info() train_df.head()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 891 entries, 0 to 890 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 PassengerId 891 non-null int64 1 Survived 891 non-null int64 2 Pclass 891 non-null int64 3 Name 891 non-null object 4 Sex 891 non-null object 5 Age 714 non-null float64 6 SibSp 891 non-null int64 7 Parch 891 non-null int64 8 Ticket 891 non-null object 9 Fare 891 non-null float64 10 Cabin 204 non-null object 11 Embarked 889 non-null object dtypes: float64(2), int64(5), object(5) memory usage: 83.7+ KB
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
Look at the data info, we know that there are missing data at `Age`, `Cabin`, and `Embarked` columns. Although `Cabin` is an important feature (because the position of a cabin in the ship can affect the chance of people in that cabin to survive), we will skip it in this tutorial for simplicity. In the dataset, there are many categorical columns and two numerical columns `Age` and `Fare`. Let's first look at the distribution of those categorical columns:
for col in ["Survived", "Pclass", "Sex", "SibSp", "Parch", "Embarked"]: print(train_df[col].value_counts(), end="\n\n")
0 549 1 342 Name: Survived, dtype: int64 3 491 1 216 2 184 Name: Pclass, dtype: int64 male 577 female 314 Name: Sex, dtype: int64 0 608 1 209 2 28 4 18 3 16 8 7 5 5 Name: SibSp, dtype: int64 0 678 1 118 2 80 5 5 3 5 4 4 6 1 Name: Parch, dtype: int64 S 644 C 168 Q 77 Name: Embarked, dtype: int64
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
Prepare data First, we will merge rare groups in `SibSp` and `Parch` columns together. In addition, we'll fill 2 missing entries in `Embarked` by the mode `S`. Note that we can make a generative model for those missing entries in `Embarked` but let's skip doing so for simplicity.
train_df.SibSp.clip(0, 1, inplace=True) train_df.Parch.clip(0, 2, inplace=True) train_df.Embarked.fillna("S", inplace=True)
_____no_output_____
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
Looking closer at the data, we can observe that each name contains a title. We know that age is correlated with the title of the name: e.g. those with Mrs. would be older than those with `Miss.` (on average) so it might be good to create that feature. The distribution of titles is:
train_df.Name.str.split(", ").str.get(1).str.split(" ").str.get(0).value_counts()
_____no_output_____
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
We will make a new column `Title`, where rare titles are merged into one group `Misc.`.
train_df["Title"] = ( train_df.Name.str.split(", ") .str.get(1) .str.split(" ") .str.get(0) .apply(lambda x: x if x in ["Mr.", "Miss.", "Mrs.", "Master."] else "Misc.") )
_____no_output_____
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
Now, it is ready to turn the dataframe, which includes categorical values, into numpy arrays. We also perform standardization (a good practice for regression models) for `Age` column.
title_cat = pd.CategoricalDtype( categories=["Mr.", "Miss.", "Mrs.", "Master.", "Misc."], ordered=True ) embarked_cat = pd.CategoricalDtype(categories=["S", "C", "Q"], ordered=True) age_mean, age_std = train_df.Age.mean(), train_df.Age.std() data = dict( age=train_df.Age.pipe(lambda x: (x - age_mean) / age_std).values, pclass=train_df.Pclass.values - 1, title=train_df.Title.astype(title_cat).cat.codes.values, sex=(train_df.Sex == "male").astype(int).values, sibsp=train_df.SibSp.values, parch=train_df.Parch.values, embarked=train_df.Embarked.astype(embarked_cat).cat.codes.values, ) survived = train_df.Survived.values # compute the age mean for each title age_notnan = data["age"][jnp.isfinite(data["age"])] title_notnan = data["title"][jnp.isfinite(data["age"])] age_mean_by_title = jnp.stack([age_notnan[title_notnan == i].mean() for i in range(5)])
_____no_output_____
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
Modelling First, we want to note that in NumPyro, the following models```pythondef model1a(): x = numpyro.sample("x", dist.Normal(0, 1).expand([10])```and```pythondef model1b(): x = numpyro.sample("x", dist.Normal(0, 1).expand([10].mask(False)) numpyro.sample("x_obs", dist.Normal(0, 1).expand([10]), obs=x)```are equivalent in the sense that both of them have+ the same latent sites `x` drawn from `dist.Normal(0, 1)` prior,+ and the same log densities `dist.Normal(0, 1).log_prob(x)`.Now, assume that we observed the last 6 values of `x` (non-observed entries take value `NaN`), the typical model will be```pythondef model2a(x): x_impute = numpyro.sample("x_impute", dist.Normal(0, 1).expand([4])) x_obs = numpyro.sample("x_obs", dist.Normal(0, 1).expand([6]), obs=x[4:]) x_imputed = jnp.concatenate([x_impute, x_obs])```or with the usage of `mask`,```pythondef model2b(x): x_impute = numpyro.sample("x_impute", dist.Normal(0, 1).expand([4]).mask(False)) x_imputed = jnp.concatenate([x_impute, x[4:]]) numpyro.sample("x", dist.Normal(0, 1).expand([10]), obs=x_imputed)``` Both approaches to model the partial observed data `x` are equivalent. For the model below, we will use the latter method.
def model(age, pclass, title, sex, sibsp, parch, embarked, survived=None, bayesian_impute=True): b_pclass = numpyro.sample("b_Pclass", dist.Normal(0, 1).expand([3])) b_title = numpyro.sample("b_Title", dist.Normal(0, 1).expand([5])) b_sex = numpyro.sample("b_Sex", dist.Normal(0, 1).expand([2])) b_sibsp = numpyro.sample("b_SibSp", dist.Normal(0, 1).expand([2])) b_parch = numpyro.sample("b_Parch", dist.Normal(0, 1).expand([3])) b_embarked = numpyro.sample("b_Embarked", dist.Normal(0, 1).expand([3])) # impute age by Title isnan = np.isnan(age) age_nanidx = np.nonzero(isnan)[0] if bayesian_impute: age_mu = numpyro.sample("age_mu", dist.Normal(0, 1).expand([5])) age_mu = age_mu[title] age_sigma = numpyro.sample("age_sigma", dist.Normal(0, 1).expand([5])) age_sigma = age_sigma[title] age_impute = numpyro.sample( "age_impute", dist.Normal(age_mu[age_nanidx], age_sigma[age_nanidx]).mask(False) ) age = ops.index_update(age, age_nanidx, age_impute) numpyro.sample("age", dist.Normal(age_mu, age_sigma), obs=age) else: # fill missing data by the mean of ages for each title age_impute = age_mean_by_title[title][age_nanidx] age = ops.index_update(age, age_nanidx, age_impute) a = numpyro.sample("a", dist.Normal(0, 1)) b_age = numpyro.sample("b_Age", dist.Normal(0, 1)) logits = a + b_age * age logits = logits + b_title[title] + b_pclass[pclass] + b_sex[sex] logits = logits + b_sibsp[sibsp] + b_parch[parch] + b_embarked[embarked] numpyro.sample("survived", dist.Bernoulli(logits=logits), obs=survived)
_____no_output_____
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
Note that in the model, the prior for `age` is `dist.Normal(age_mu, age_sigma)`, where the values of `age_mu` and `age_sigma` depend on `title`. Because there are missing values in `age`, we will encode those missing values in the latent parameter `age_impute`. Then we can replace `NaN` entries in `age` with the vector `age_impute`. Sampling We will use MCMC with NUTS kernel to sample both regression coefficients and imputed values.
mcmc = MCMC(NUTS(model), num_warmup=1000, num_samples=1000) mcmc.run(random.PRNGKey(0), **data, survived=survived) mcmc.print_summary()
sample: 100%|██████████| 2000/2000 [00:18<00:00, 110.91it/s, 63 steps of size 6.48e-02. acc. prob=0.94]
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
To double check that the assumption "age is correlated with title" is reasonable, let's look at the infered age by title. Recall that we performed standarization on `age`, so here we need to scale back to original domain.
age_by_title = age_mean + age_std * mcmc.get_samples()["age_mu"].mean(axis=0) dict(zip(title_cat.categories, age_by_title))
_____no_output_____
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
The infered result confirms our assumption that `Age` is correlated with `Title`:+ those with `Master.` title has pretty small age (in other words, they are children in the ship) comparing to the other groups,+ those with `Mrs.` title have larger age than those with `Miss.` title (in average).We can also see that the result is similar to the actual statistical mean of `Age` given `Title` in our training dataset:
train_df.groupby("Title")["Age"].mean()
_____no_output_____
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
So far so good, we have many information about the regression coefficients together with imputed values and their uncertainties. Let's inspect those results a bit:+ The mean value `-0.44` of `b_Age` implies that those with smaller ages have better chance to survive.+ The mean value `(1.11, -1.07)` of `b_Sex` implies that female passengers have higher chance to survive than male passengers. Prediction In NumPyro, we can use [Predictive](http://num.pyro.ai/en/stable/utilities.htmlnumpyro.infer.util.Predictive) utility for making predictions from posterior samples. Let's check how well the model performs on the training dataset. For simplicity, we will get a `survived` prediction for each posterior sample and perform the majority rule on the predictions.
posterior = mcmc.get_samples() survived_pred = Predictive(model, posterior)(random.PRNGKey(1), **data)["survived"] survived_pred = (survived_pred.mean(axis=0) >= 0.5).astype(jnp.uint8) print("Accuracy:", (survived_pred == survived).sum() / survived.shape[0]) confusion_matrix = pd.crosstab( pd.Series(survived, name="actual"), pd.Series(survived_pred, name="predict") ) confusion_matrix / confusion_matrix.sum(axis=1)
Accuracy: 0.8271605
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
This is a pretty good result using a simple logistic regression model. Let's see how the model performs if we don't use Bayesian imputation here.
mcmc.run(random.PRNGKey(2), **data, survived=survived, bayesian_impute=False) posterior_1 = mcmc.get_samples() survived_pred_1 = Predictive(model, posterior_1)(random.PRNGKey(2), **data)["survived"] survived_pred_1 = (survived_pred_1.mean(axis=0) >= 0.5).astype(jnp.uint8) print("Accuracy:", (survived_pred_1 == survived).sum() / survived.shape[0]) confusion_matrix = pd.crosstab( pd.Series(survived, name="actual"), pd.Series(survived_pred_1, name="predict") ) confusion_matrix / confusion_matrix.sum(axis=1) confusion_matrix = pd.crosstab( pd.Series(survived, name="actual"), pd.Series(survived_pred_1, name="predict") ) confusion_matrix / confusion_matrix.sum(axis=1)
sample: 100%|██████████| 2000/2000 [00:11<00:00, 166.79it/s, 63 steps of size 7.18e-02. acc. prob=0.93]
Apache-2.0
notebooks/source/bayesian_imputation.ipynb
MarcoGorelli/numpyro
Análisis de Datos Exploratorio Análisis Univariante
from pyspark.sql import functions as F online_df = spark.read.csv(DATA_PATH + 'online_retail.csv', sep=';', header=True, inferSchema=True) online_df.show(2) # Respuesta online_df_2 = online_df.withColumn('timestamp', F.unix_timestamp(F.col('InvoiceDate'), 'dd/MM/yyyy HH:mm')) online_df_2.show(2) # Respuesta online_df_3 = online_df_2.withColumn('datetime', F.from_unixtime(F.col('timestamp'))) online_df_3.show(2) # Respuesta online_df.dtypes
_____no_output_____
MIT
2021Q1_DSF/5.- Spark/notebooks/spark_sql/respuestas/extra_02_spark_eda_review_con_respuestas.ipynb
serch86/binder-pyspark-DSF_2021Q1
Primero identifica variables cualitativas y cuantitativas.
# Respuesta quantitative_vars = [c for c,t in online_df.dtypes if t in ['int', 'double']] qualitative_vars = [c for c,t in online_df.dtypes if t in ['boolean', 'string']] # Respuesta quantitative_vars # Respuesta qualitative_vars
_____no_output_____
MIT
2021Q1_DSF/5.- Spark/notebooks/spark_sql/respuestas/extra_02_spark_eda_review_con_respuestas.ipynb
serch86/binder-pyspark-DSF_2021Q1
Variables cuantitativas Calcula métricas para una única columna
# Respuesta avgs = [F.avg(col).alias('avg_' + col) for col in quantitative_vars] maxs = [F.max(col).alias('max_' + col) for col in quantitative_vars] mins = [F.min(col).alias('min_' + col) for col in quantitative_vars] stds = [F.stddev(col).alias('std_' + col) for col in quantitative_vars] # Respuesta operations = avgs + stds + maxs + mins operations # Respuesta results = online_df.select(operations).first() for col in quantitative_vars: avg = results['avg_' + col] std = results['std_' + col] maxi = results['max_' + col] mini = results['min_' + col] print('{}: avg={}, std={}, min={}, max={}'.format(col, round(avg, 2), round(std, 2), mini, maxi))
_____no_output_____
MIT
2021Q1_DSF/5.- Spark/notebooks/spark_sql/respuestas/extra_02_spark_eda_review_con_respuestas.ipynb
serch86/binder-pyspark-DSF_2021Q1
Variables cualitativasPara variables cualitativas se calculan tablas de frecuencia. Calcula la tabla de frecuencia de las columnas cualitativas, y ordénalas de mayor a menor.
# Respuesta online_df.groupBy('Country').count().sort(F.col("count").desc()).show() # Respuesta online_df.groupBy('Country', 'InvoiceDate').count().sort(F.col('count').desc()).show()
_____no_output_____
MIT
2021Q1_DSF/5.- Spark/notebooks/spark_sql/respuestas/extra_02_spark_eda_review_con_respuestas.ipynb
serch86/binder-pyspark-DSF_2021Q1
Análisis Multivariante __Matriz de correlación__
# Respuesta from pyspark.mllib.linalg import Vectors from pyspark.mllib.stat import Statistics import pandas as pd # Respuesta online_df.select(quantitative_vars).rdd.map(lambda v: Vectors.dense(v)) # Respuesta corr_matrix = Statistics.corr(online_df.select(quantitative_vars).rdd.map(lambda v: Vectors.dense(v)), method='pearson') corr_matrix
_____no_output_____
MIT
2021Q1_DSF/5.- Spark/notebooks/spark_sql/respuestas/extra_02_spark_eda_review_con_respuestas.ipynb
serch86/binder-pyspark-DSF_2021Q1
_Transforma la matriz en un DataFrame de pandas_
# Respuesta df_corr_matrix = pd.DataFrame(corr_matrix, columns=quantitative_vars, index=quantitative_vars) df_corr_matrix # Respuesta import numpy as np mask = np.zeros_like(corr_matrix, dtype=np.bool) mask[np.triu_indices_from(mask)] = True mask # Respuesta df_corr_matrix_reduced = df_corr_matrix.mask(mask) df_corr_matrix_reduced # Respuesta import numpy as np from matplotlib import pyplot as plt import seaborn as sns # Respuesta %matplotlib inline # Respuesta plt.figure(figsize=(8,7)) sns.heatmap(df_corr_matrix, cmap='coolwarm', vmin=-1, vmax=1, annot=True, fmt='.2f') plt.show()
_____no_output_____
MIT
2021Q1_DSF/5.- Spark/notebooks/spark_sql/respuestas/extra_02_spark_eda_review_con_respuestas.ipynb
serch86/binder-pyspark-DSF_2021Q1
Valores Atípicos Detección de outliers para variables que siguen la distribución normal
# Respuesta def remove_tukey_outliers(df, col): """ Returns a new dataframe with outliers removed on column 'col' usting Tukey test """ q1, q3 = df.approxQuantile(col, [0.25, 0.75], 0.01) IQR = q3 - q1 min_thresh = q1 - 1.5 * IQR max_thresh = q3 + 1.5 * IQR df_no_outliers = df.filter(F.col(col).between(min_thresh, max_thresh)) return df_no_outliers # Respuesta online_df_no_outliers = remove_tukey_outliers(online_df, 'Quantity') # Respuesta n_rows = online_df.count() # Respuesta n_rows_no = online_df_no_outliers.count() perc_outliers = 100 * (n_rows - n_rows_no) / n_rows # Respuesta print('{} has {:.2f}% outliers'.format('Quantity', perc_outliers))
_____no_output_____
MIT
2021Q1_DSF/5.- Spark/notebooks/spark_sql/respuestas/extra_02_spark_eda_review_con_respuestas.ipynb
serch86/binder-pyspark-DSF_2021Q1
Valores nulos
# Respuesta def remove_nulls(df): df_no_nulls = df for element in df_no_nulls.columns: if df_no_nulls.where(df_no_nulls[element].isNull()).count() != 0: print('\tThe column "{}" has null values'.format(element)) df_no_nulls = df_no_nulls.where(df_no_nulls[element].isNotNull()) if df_no_nulls.where(df_no_nulls[element].isNull()).count() == 0: print('The column "{}" does not have null values'.format(element)) return df_no_nulls # Respuesta def check_nulls(df): existing_nulls = False for element in df.columns: if df.where(df[element].isNull()).count() != 0: print('\tThe column "{}" has null values'.format(element)) existing_nulls = True break if df.where(df[element].isNull()).count() == 0: print('The column "{}" does not have null values'.format(element)) return existing_nulls # Respuesta print(online_df.count()) online_df_no_nulls = remove_nulls(online_df) print(online_df_no_nulls.count())
_____no_output_____
MIT
2021Q1_DSF/5.- Spark/notebooks/spark_sql/respuestas/extra_02_spark_eda_review_con_respuestas.ipynb
serch86/binder-pyspark-DSF_2021Q1
Variables y _placeholders_
import tensorflow as tf import numpy as np
_____no_output_____
Apache-2.0
inteligencia_artificial/03-Variables.ipynb
edwinb-ai/intelicompu
Las _variables_ y _placeholders_ son los pilares de _Tensorflow_. Sin embargo para entender porqué es esto, uno debe entender un poco más sobre la estructura general de _Tensorflow_ y cómo realiza los cálculos correspondientes. _Dataflow_ programming[_Dataflow programming_](https://en.wikipedia.org/wiki/Dataflow_programming) es una _paradigma_ computacional donde las operaciones, instrucciones y todo lo que sucede en un programa se lleva a cabo en un [grafo dirigido](https://en.wikipedia.org/wiki/Directed_graph).Aquí se presenta un grafo dirigido.![Grafo dirigido básico](https://upload.wikimedia.org/wikipedia/commons/thumb/a/a2/Directed.svg/267px-Directed.svg.png "Grafo") _Tensorflow_ funciona de esta forma, utilizando instrucciones y herramientas como _session_, _variables_ y _placeholders_. Como se ha visto anteriormente, ninguna de estas estructuras muestra los datos que tiene pues se encuentra dentro de un grafo. En el momento en que se ejecuta la sesión se da la _instrucción total_ de llevar a cabo **todas** las operaciones del grafo. Ejemplo con _variables_
# Crear una variables con ceros, de dimensiones (3,4) my_var = tf.Variable(tf.zeros((3, 4))) # Iniciar una sesión (en realidad se crea un grafo de computación/operacional) session = tf.Session() # Inicializar las variables inits = tf.global_variables_initializer() # Correr todo el grafo session.run(inits)
_____no_output_____
Apache-2.0
inteligencia_artificial/03-Variables.ipynb
edwinb-ai/intelicompu
Aunque no se muestra nada, en el fondo se creó un **grafo** dirigido, donde un _nodo_ es la variable, y al inicializar el grafo, todas las operaciones pendientes se llevaron a cabo. A continuación se muestra un ejemplo adicional con _placeholders_ donde se puede visualizar mejor este hecho. Ejemplo con _placeholders_
# Crear valores aleatorios de numpy x_vals = np.random.random_sample((2, 2)) print(x_vals) # Crear una sesión; un grafo computacional session = tf.Session() # El placeholder no puede tener otra dimensión diferente a (2,2) x = tf.placeholder(tf.float32, shape=(2,2)) # identity devuelve un tensor con la misma forma y contenido de la estructura # de datos que se le suministra y = tf.identity(x) # Correr todo el grafo computacional session.run(y, feed_dict={x: x_vals})
_____no_output_____
Apache-2.0
inteligencia_artificial/03-Variables.ipynb
edwinb-ai/intelicompu
Inicialización independiente de variablesNo siempre se tienen que inicializar las variables de una sola forma, al mismo tiempo, sino que se pueden inicializar una por una según sea conveniente. Se muestra un ejemplo a continuación.
# Crear la sesión session = tf.Session() # Se tiene una primera variable llena de cero first_var = tf.Variable(tf.zeros((3, 4))) # Y ahora se inicializa session.run(first_var.initializer) # Se tiene una segunda variable llena de uno second_var = tf.Variable(tf.ones_like(first_var)) session.run(second_var.initializer)
_____no_output_____
Apache-2.0
inteligencia_artificial/03-Variables.ipynb
edwinb-ai/intelicompu
Solar Resource Data> Get average Direct Normal Irradiance (avg_dni), average Global Horizontal Irradiance (avg_ghi), and average Tilt (avg_lat_tilt) for a location. An example to get solar resource data - average Direct Normal Irradiance, average Global Horizontal Irradiance, and average tilt - from NREL First, let's set our NREL API key.
import os from nrel_dev_api import set_nrel_api_key from nrel_dev_api.solar import SolarResourceData NREL_API_KEY = os.environ["DEMO_NREL_API_KEY"] set_nrel_api_key(NREL_API_KEY)
_____no_output_____
Apache-2.0
docs/Tutorial/solar/solar_resource_data.ipynb
SarthakJariwala/nrel_dev_api
> Alternatively, you can provide your NREL Developer API key with every call. Setting it globally is just for convenience. Let's check available solar resource data for Seattle, WA.
solar_resource_data = SolarResourceData(lat=47, lon=-122)
_____no_output_____
Apache-2.0
docs/Tutorial/solar/solar_resource_data.ipynb
SarthakJariwala/nrel_dev_api
Outputs for solar resource data is available as the `outputs` attribute.
solar_resource_data.outputs
_____no_output_____
Apache-2.0
docs/Tutorial/solar/solar_resource_data.ipynb
SarthakJariwala/nrel_dev_api
We can also provide the address to access the solar resource data.
address = "Seattle, WA" solar_resource_data = SolarResourceData(address=address)
_____no_output_____
Apache-2.0
docs/Tutorial/solar/solar_resource_data.ipynb
SarthakJariwala/nrel_dev_api
The complete response as a dictionary is available as the `response` attribute.
solar_resource_data.response
_____no_output_____
Apache-2.0
docs/Tutorial/solar/solar_resource_data.ipynb
SarthakJariwala/nrel_dev_api
Grid searching parametershttps://machinelearningmastery.com/grid-search-arima-hyperparameters-with-python/
import pandas as pd from pandas import read_csv import numpy as np from datetime import datetime from pandas import Series from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error import warnings warnings.filterwarnings("ignore") series = Series.from_csv('female.csv', header=0) def evaluate_arima_model(X, arima_order): # prepare training dataset train_size = int(len(X) * 0.6) train, test = X[0:train_size], X[train_size:] history = [x for x in train] # make predictions predictions = list() for t in range(len(test)): model = ARIMA(train, order=arima_order) model_fit = model.fit(disp=0) yhat = model_fit.forecast()[0] predictions.append(yhat) history.append(test[t]) # calculate out of sample error error = mean_squared_error(test, predictions) return error def evaluate_models(dataset, p_values, d_values, q_values): best_score, best_cfg = float("inf"), None for p in p_values: for d in d_values: for q in q_values: order = (p,d,q) try: mse = evaluate_arima_model(dataset, order) if mse < best_score: best_score, best_cfg = mse, order print('ARIMA%s MSE=%.3f' % (order,mse)) except: continue print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score)) p_values = [0, 1, 2] d_values = range(0, 2) q_values = range(0, 2) warnings.filterwarnings("ignore") evaluate_models(series, p_values, d_values, q_values)
Best ARIMANone MSE=inf
Apache-2.0
Gridsearch Parameters.ipynb
BrittGeek/Time-Series-Forecasting
Notebook to perform a sensitivity calculation**Content:**- Calculation of the collection area- Sensitivity calculation in energy bins- Sensitivity calculation in bins of gammaness and theta2 cuts- Optimization of the cuts using Nex/sqrt(Nbg) -> LiMa to be implemented- Plotting of the sensitivity in absolute values
import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm import h5py import pandas as pd import math import pyhessio from astropy import units as u import eventio from eventio.simtel.simtelfile import SimTelFile simtelfile_gammas = "/home/queenmab/DATA/LST1/Gamma/gamma_20deg_0deg_run8___cta-prod3-lapalma-2147m-LaPalma-FlashCam.simtel.gz" simtelfile_protons = "/home/queenmab/DATA/LST1/Proton/proton_20deg_0deg_run194___cta-prod3-lapalma-2147m-LaPalma-FlashCam.simtel.gz" PATH_EVENTS = "../../cta-lstchain-extra/reco/sample_data/dl2/" file_g = PATH_EVENTS+"/reco_gammas.h5" ##Same events but with reconstructed file_p = PATH_EVENTS+"/reco_protons.h5" events_g = pd.read_hdf(file_g) events_p = pd.read_hdf(file_p) Triggered_Events_real_gammas = events_g.shape[0] Triggered_Events_real_protons = events_p.shape[0] source_gammas = SimTelFile(simtelfile_gammas) source_protons = SimTelFile(simtelfile_protons) emin_g, emax_g = source_gammas.mc_run_headers[0]['E_range']*1e3 #GeV spectral_index_g = source_gammas.mc_run_headers[0]['spectral_index'] num_showers = source_gammas.mc_run_headers[0]['num_showers'] num_use = source_gammas.mc_run_headers[0]['num_use'] Simulated_Events_g = num_showers * num_use Max_impact_g = source_gammas.mc_run_headers[0]['core_range'][1]*1e2 #cm Area_sim_g = math.pi * math.pow(Max_impact_g,2) cone_g = source_gammas.mc_run_headers[0]['viewcone'][1] emin_p, emax_p = source_protons.mc_run_headers[0]['E_range']*1e3 #GeV spectral_index_p = source_protons.mc_run_headers[0]['spectral_index'] num_showers = source_protons.mc_run_headers[0]['num_showers'] num_use = source_protons.mc_run_headers[0]['num_use'] Simulated_Events_p = num_showers * num_use Max_impact_p = source_protons.mc_run_headers[0]['core_range'][1]*1e2 #cm Area_sim_p = math.pi * math.pow(Max_impact_p,2) cone_p = source_protons.mc_run_headers[0]['viewcone'][1] energies_g = [] energies_p = [] with SimTelFile(simtelfile_gammas) as f: for i, event in enumerate(f.iter_mc_events()): energies_g.append(event['mc_shower']['energy']*1e3) #In GeV with SimTelFile(simtelfile_protons) as f: for i, event in enumerate(f.iter_mc_events()): energies_p.append(event['mc_shower']['energy']*1e3) e_trig_g = 10**events_g.mc_energy e_trig_p = 10**events_p.mc_energy Triggered_Events_g = e_trig_g.shape[0] Triggered_Events_p = e_trig_p.shape[0] fig,ax = plt.subplots() ax.hist(np.log10(energies_p),label = 'Simulated protons') ax.hist(np.log10(energies_g),label='Simulated gammas') ax.set_yscale("log") ##### Binnings and constants###### # Whenever implemented using simulated files, most of these values can be read from the simulations eedges = 6 ebins = eedges-1 E = np.logspace(math.log10(emin_g),math.log10(emax_g),eedges) E_trig = np.logspace(math.log10(emin_p),math.log10(100000),eedges) Emed = np.sqrt(E[:-1] * E[1:]) Emed_trig = np.sqrt(E_trig[:-1] * E_trig[1:]) gammaness_bins = 3 theta2_bins = 3 Index_Crab = -2.62 ##### Collection area calculation ###### def collection_area(Esim, Etrig): # Esim are all the simulated energies # Etrig are the energies after cuts area = [] Nsim = np.power(Esim,Index_Crab-spectral_index_g) Ncuts = np.power(Etrig,Index_Crab-spectral_index_g) for i in range(0,ebins): Nsim_w = np.sum(Nsim[(Esim < E[i+1]) & (Esim > E[i])]) Ntrig_w = np.sum(Ncuts[(Etrig < E[i+1]) & (Etrig > E[i])]) if(Nsim_w == 0): print("You have not simulated any events in the energy range between %.3f GeV and %.3f GeV" % (E[i],E[i+1])) area.append(0) else: area.append(Ntrig_w / Nsim_w * Area_sim_g) # cm^2 return area # Plot the collection area area = collection_area(energies_g, e_trig_g) fig, ax = plt.subplots() ax.set_xlabel("Energy [GeV]") ax.set_ylabel("Collection area [cm$^2$]") ax.grid(ls='--',alpha=0.4) ax.loglog(E[:-1], area) gammaness_g = events_g.gammaness gammaness_p = events_p.gammaness theta2_g = (events_g.src_x-events_g.src_x_rec)**2+(events_g.src_y-events_g.src_y)**2 theta2_p = (events_p.src_x-events_p.src_x_rec)**2+(events_p.src_y-events_p.src_y)**2 ####### Sensitivity calculation ########## # We will first go for a implementation using Sig = Nex/sqrt(Nbg) obstime = 50 * 3600 # s (50 hours) ####### Weighting of the hadrons ##### # No simulation, just take the gamma energy distribution and convert it to hadrons #Float_t ProtonTrueSpectralIndex = -2.70; #Float_t ProtonTrueNorm = 9.6e-9; // (cm2 sr s GeV)^-1 at ProtonEnorm #Float_t ProtonEnorm = 1000.; // GeV K = Simulated_Events_p*(1+spectral_index_p)/(emax_p**(1+spectral_index_p)-emin_p**(1+spectral_index_p)) cone = cone_p * math.pi/180 if(cone == 0): Omega = 1 else: Omega = 2*np.pi*(1-np.cos(cone)) K_w = 9.6e-11 # GeV^-1 cm^-2 s^-1 index_w_p = -2.7 E0 = 1000. # GeV Int_e1_e2 = K*E0**spectral_index_p Np_ = Int_e1_e2*(emax_p**(index_w_p+1)-emin_p**(index_w_p+1))/(E0**index_w_p)/(index_w_p+1) Rp = K_w*Area_sim_p*Omega*(emax_p**(index_w_p+1)-emin_p**(index_w_p+1))/(E0**index_w_p)/(index_w_p+1) # Rate (in Hz) print("The total rate of simulated proton events is %.1f Hz" % Rp) ####### Weighting of the gamma simulations ##### # HEGRA Crab # TF1* CrabFluxHEGRA = new TF1("CrabFluxHEGRA","[0]*pow(x/1000.,-[1])",50,80000); # CrabFluxHEGRA->SetParameter(0,2.83e-11); # CrabFluxHEGRA->SetParameter(1,2.62); K = Simulated_Events_g*(1+spectral_index_g)/(emax_g**(1+spectral_index_g)-emin_g**(1+spectral_index_g)) Area_sim = math.pi * math.pow(Max_impact_g,2) # cm^2 cone=0 if(cone == 0): Omega = 1 else: Omega = 2*np.pi*(1-np.cos(cone)) K_w = 2.83e-11 # GeV^-1 cm^-2 s^-1 index_w_g = -2.62 E0 = 1000. # GeV Int_e1_e2 = K*E0**spectral_index_g N_ = Int_e1_e2*(emax_g**(index_w_g+1)-emin_g**(index_w_g+1))/(E0**index_w_g)/(index_w_g+1) R = K_w*Area_sim_g*Omega*(emax_g**(index_w_g+1)-emin_g**(index_w_g+1))/(E0**index_w_g)/(index_w_g+1) # Rate (in Hz) print("The total rate of simulated gamma events is %.1f Hz" % R) energies_g = np.asarray(energies_g) energies_p = np.asarray(energies_p) e_w = ((energies_g/E0)**(index_w_g-spectral_index_g))*R/N_ e_trig_w = ((e_trig_g/E0)**(index_w_g-spectral_index_g))*R/N_ ep_w = ((energies_p/E0)**(index_w_p-spectral_index_p))*Rp/Np_ ep_trig_w = ((e_trig_p/E0)**(index_w_p-spectral_index_p))*Rp/Np_ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,5)) ax1.hist(np.log10(energies_g),histtype=u'step',bins=20, density=1,label="Simulated") ax1.hist(np.log10(energies_g),histtype=u'step',bins=20,weights = e_w, density=1,label="Weighted to Crab") ax1.set_yscale('log') #plt.xscale('log') ax1.set_xlabel("$log_{10}E (GeV)$") ax1.grid(ls='--',alpha=.5) ax1.legend() #ax2.hist(np.log10(e),histtype=u'step',bins=20,label="Simulated rate") ax2.hist(np.log10(energies_g),histtype=u'step',bins=20,weights = e_w,label="Simulated rate weighted to Crab") ax2.hist(np.log10(e_trig_g),histtype=u'step',bins=20,weights = e_trig_w,label="Triggered rate weighted to Crab") ax2.hist(np.log10(energies_p),histtype=u'step',bins=20,weights = ep_w,label="Simulated Protons") ax2.hist(np.log10(e_trig_p),histtype=u'step',bins=20,weights = ep_trig_w,label="Triggered Protons") ax2.legend() ax2.set_yscale('log') ax2.set_xlabel("$log_{10}E (GeV)$") ax2.grid(ls='--',alpha=.5) #plt.xscale('log') for i in range(0,ebins): # binning in energy e_w_sum = np.sum(e_w[(energies_g < E[i+1]) & (energies_g > E[i])]) print("Rate of gammas between %.1f GeV and %.1f GeV: %.2f Hz" % (E[i],E[i+1],e_w_sum)) for i in range(0,ebins): # binning in energy e_w_sum = np.sum(ep_w[(energies_p < E[i+1]) & (energies_p > E[i])]) print("Rate of protons between %.1f GeV and %.1f GeV: %.2f Hz" % (E[i],E[i+1],e_w_sum)) for i in range(0,ebins): # binning in energy e_w_sum = np.sum(e_trig_w[(e_trig_g < E_trig[i+1]) & (e_trig_g > E_trig[i])]) print("Rate of triggered gammas between %.1f GeV and %.1f GeV: %.6f Hz" % (E_trig[i],E_trig[i+1],e_w_sum)) for i in range(0,ebins): # binning in energy e_w_sum = np.sum(ep_trig_w[(e_trig_p < E_trig[i+1]) & (e_trig_p > E_trig[i])]) print("Rate of triggered protons between %.1f GeV and %.1f GeV: %.6f Hz" % (E_trig[i],E_trig[i+1],e_w_sum)) # Cut optimization for gammas and hadrons final_gamma = np.ndarray(shape=(ebins,gammaness_bins,theta2_bins)) final_hadrons = np.ndarray(shape=(ebins,gammaness_bins,theta2_bins)) for i in range(0,eedges-1): # binning in energy e_w_binE = np.sum(e_w[(energies_g < E[i+1]) & (energies_g > E[i])]) for g in range(0,gammaness_bins): # cut in gammaness Ngammas = [] Nhadrons = [] for t in range(0,theta2_bins): # cut in theta2 e_trig_w_sum = np.sum(e_trig_w[(e_trig_g < E_trig[i+1]) & (e_trig_g > E_trig[i]) \ & (gammaness_g > 0.1*g) & (theta2_g < 0.05*(t+1))]) # Just considering all the hadrons give trigger... ep_w_sum = np.sum(ep_trig_w[(e_trig_p < E_trig[i+1]) & (e_trig_p > E_trig[i]) \ & (gammaness_p > 0.1*g) & (theta2_p < 0.05*(t+1))]) final_gamma[i][g][t] = e_trig_w_sum * obstime final_hadrons[i][g][t] = ep_w_sum * obstime def Calculate_sensititity(Ng, Nh, alpha): significance = (Ng)/np.sqrt(Nh * alpha) sensitivity = 5/significance * 100 # percentage of Crab return sensitivity sens = Calculate_sensititity(final_gamma, final_hadrons, 1) def fill_bin_content(ax,energy_bin): for i in range(0,gammaness_bins): for j in range(0,theta2_bins): text = ax.text((j+0.5)*(0.5/theta2_bins), (i+0.5)*(1/gammaness_bins), "%.2E %%" % sens[energy_bin][i][j], ha="center", va="center", color="w") return ax def format_axes(ax,pl): ax.set_aspect(0.5) ax.set_ylabel(r'Gammaness',fontsize=15) ax.set_xlabel(r'$\theta^2$ (deg$^2$)',fontsize=15) starty, endy = ax.get_ylim() ax.yaxis.set_ticks(np.arange(endy, starty, 0.1)[::-1]) startx, endx = ax.get_xlim() ax.xaxis.set_ticks(np.arange(startx, endx, 0.1)) cbaxes = fig.add_axes([0.9, 0.125, 0.03, 0.755]) cbar = fig.colorbar(pl,cax=cbaxes) cbar.set_label('Sensitivity (% Crab)',fontsize=15) # Sensitivity plots for different Energy bins for ebin in range(0,ebins): fig, ax = plt.subplots(figsize=(8,8)) pl = ax.imshow(sens[ebin], cmap='viridis', extent=[0., 0.5, 1., 0.]) fill_bin_content(ax, ebin) format_axes(ax, pl) def Crab_spectrum(x): MAGIC_par=[3.23e-11, -2.47, -0.24] #dFdE = MAGIC_par[0]*pow(x/1.,MAGIC_par[1]+MAGIC_par[2]*np.log10(x/1.)) dFdE = MAGIC_par[0]*pow(x/1000.,MAGIC_par[1]+MAGIC_par[2]*np.log10(x/1000.)) return dFdE def format_axes_array(ax, arr_i,arr_j): ax.set_aspect(0.5) if ((arr_i == 0) and (arr_j == 0)): ax.set_ylabel(r'Gammaness',fontsize=15) if ((arr_i == 3) and (arr_j == 2)): ax.set_xlabel(r'$\theta^2$ (deg$^2$)',fontsize=15) starty, endy = ax.get_ylim() ax.yaxis.set_ticks(np.arange(endy, starty, 0.1)[::-1]) startx, endx = ax.get_xlim() ax.xaxis.set_ticks(np.arange(startx, endx, 0.1)) cbaxes = fig.add_axes([0.91, 0.125, 0.03, 0.755]) cbar = fig.colorbar(pl,cax=cbaxes) cbar.set_label('Sensitivity (% Crab)',fontsize=15) #fig, ax = plt.subplots(figsize=(8,8), ) fig, axarr = plt.subplots(2,3, sharex=True, sharey=True, figsize=(13.2,18)) indices=[] sensitivity = np.ndarray(shape=ebins) sens = sens+1e-6 for ebin in range(0,ebins): arr_i = int(ebin/3) arr_j = ebin-int(ebin/3)*3 pl = axarr[arr_i,arr_j].imshow(sens[ebin], cmap='viridis_r', extent=[0., 0.5, 1., 0.] #vmin=sens.min(), vmax=sens.max()) ,norm=LogNorm(vmin=sens.min(), vmax=sens.max())) format_axes_array(axarr[arr_i,arr_j],arr_i,arr_j) # gammaness/theta2 indices where the minimum in sensitivity is reached ind = np.unravel_index(np.argmin(sens[sens>1e-6][ebin], axis=None), sens[ebin].shape) indices.append(ind) sensitivity[ebin] = sens[ebin][ind] fig.subplots_adjust(hspace = 0, wspace = 0) #format_axes(ax) def plot_Crab(ax, percentage=100, **kwargs): # factor is the percentage of Crab En = np.logspace(math.log10(100),math.log10(3.e4),40) # in TeV dFdE = percentage / 100. * Crab_spectrum(En) ax.loglog(En,dFdE * En/1.e3 * En/1.e3, color='gray', **kwargs) return ax def format_axes(ax): ax.set_xscale("log", nonposx='clip') ax.set_yscale("log", nonposy='clip') ax.set_xlim(5e1,9.e4) ax.set_ylim(1.e-14,5.e-10) ax.set_xlabel("Energy [GeV]") ax.set_ylabel(r'E$^2$ $\frac{\mathrm{dN}}{\mathrm{dE}}$ [TeV cm$^{-2}$ s$^{-1}$]') ax.grid(ls='--',alpha=.5) sensitivity Emed = Emed_trig[sensitivity>0] def plot_sensitivity(ax): dFdE = Crab_spectrum(Emed) ax.loglog(Emed, sensitivity[sensitivity>0] / 100 * dFdE * Emed/1.e3 * Emed/1.e3, label = 'Sensitivity') #### SENSITIVITY PLOT ###### fig, ax = plt.subplots() plot_sensitivity(ax) plot_Crab(ax, label=r'Crab') #plot_Crab(ax,10,ls='dashed',label='10% Crab') plot_Crab(ax,1,ls='dotted',label='1% Crab') #format_axes(ax) ax.legend(numpoints=1,prop={'size':9},ncol=2,loc='upper right')
_____no_output_____
BSD-3-Clause
notebooks/Calculate_sensitivity_eventio.ipynb
pawel21/cta-lstchain
Skip-gram Word2VecIn this notebook, I'll lead you through using PyTorch to implement the [Word2Vec algorithm](https://en.wikipedia.org/wiki/Word2vec) using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like machine translation. ReadingsHere are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.* A really good [conceptual overview](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) of Word2Vec from Chris McCormick * [First Word2Vec paper](https://arxiv.org/pdf/1301.3781.pdf) from Mikolov et al.* [Neural Information Processing Systems, paper](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) with improvements for Word2Vec also from Mikolov et al.--- Word embeddingsWhen you're dealing with words in text, you end up with tens of thousands of word classes to analyze; one for each word in a vocabulary. Trying to one-hot encode these words is massively inefficient because most values in a one-hot vector will be set to zero. So, the matrix multiplication that happens in between a one-hot input vector and a first, hidden layer will result in mostly zero-valued hidden outputs.To solve this problem and greatly increase the efficiency of our networks, we use what are called **embeddings**. Embeddings are just a fully connected layer like you've seen before. We call this layer the embedding layer and the weights are embedding weights. We skip the multiplication into the embedding layer by instead directly grabbing the hidden layer values from the weight matrix. We can do this because the multiplication of a one-hot encoded vector with a matrix returns the row of the matrix corresponding the index of the "on" input unit.Instead of doing the matrix multiplication, we use the weight matrix as a lookup table. We encode the words as integers, for example "heart" is encoded as 958, "mind" as 18094. Then to get hidden layer values for "heart", you just take the 958th row of the embedding matrix. This process is called an **embedding lookup** and the number of hidden units is the **embedding dimension**. There is nothing magical going on here. The embedding lookup table is just a weight matrix. The embedding layer is just a hidden layer. The lookup is just a shortcut for the matrix multiplication. The lookup table is trained just like any weight matrix.Embeddings aren't only used for words of course. You can use them for any model where you have a massive number of classes. A particular type of model called **Word2Vec** uses the embedding layer to find vector representations of words that contain semantic meaning. --- Word2VecThe Word2Vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words.Words that show up in similar **contexts**, such as "coffee", "tea", and "water" will have vectors near each other. Different words will be further away from one another, and relationships can be represented by distance in vector space.There are two architectures for implementing Word2Vec:>* CBOW (Continuous Bag-Of-Words) and * Skip-gramIn this implementation, we'll be using the **skip-gram architecture** with **negative sampling** because it performs better than CBOW and trains faster with negative sampling. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts. --- Loading DataNext, we'll ask you to load in data and place it in the `data` directory1. Load the [text8 dataset](https://s3.amazonaws.com/video.udacity-data.com/topher/2018/October/5bbe6499_text8/text8.zip); a file of cleaned up *Wikipedia article text* from Matt Mahoney. 2. Place that data in the `data` folder in the home directory.3. Then you can extract it and delete the archive, zip file to save storage space.After following these steps, you should have one file in your data directory: `data/text8`.
# read in the extracted text file with open('data/text8') as f: text = f.read() # print out the first 100 characters print(text[:100])
anarchism originated as a term of abuse first used against early working class radicals including t
MIT
word2vec-embeddings/Negative_Sampling_My_Solution.ipynb
iromeo/deep-learning-v2-pytorch
Pre-processingHere I'm fixing up the text to make training easier. This comes from the `utils.py` file. The `preprocess` function does a few things:>* It converts any punctuation into tokens, so a period is changed to ` `. In this data set, there aren't any periods, but it will help in other NLP problems. * It removes all words that show up five or *fewer* times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. * It returns a list of words in the text.This may take a few seconds to run, since our text file is quite large. If you want to write your own functions for this stuff, go for it!
import utils # get list of words words = utils.preprocess(text) print(words[:30]) # print some stats about this word data print("Total words in text: {}".format(len(words))) print("Unique words: {}".format(len(set(words)))) # `set` removes any duplicate words
Total words in text: 16680599 Unique words: 63641
MIT
word2vec-embeddings/Negative_Sampling_My_Solution.ipynb
iromeo/deep-learning-v2-pytorch
DictionariesNext, I'm creating two dictionaries to convert words to integers and back again (integers to words). This is again done with a function in the `utils.py` file. `create_lookup_tables` takes in a list of words in a text and returns two dictionaries.>* The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1, and so on. Once we have our dictionaries, the words are converted to integers and stored in the list `int_words`.
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words) int_words = [vocab_to_int[word] for word in words] print(int_words[:30])
[5233, 3080, 11, 5, 194, 1, 3133, 45, 58, 155, 127, 741, 476, 10571, 133, 0, 27349, 1, 0, 102, 854, 2, 0, 15067, 58112, 1, 0, 150, 854, 3580]
MIT
word2vec-embeddings/Negative_Sampling_My_Solution.ipynb
iromeo/deep-learning-v2-pytorch
SubsamplingWords that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by $$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.> Implement subsampling for the words in `int_words`. That is, go through `int_words` and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is the probability that a word is discarded. Assign the subsampled data to `train_words`.
from collections import Counter import random import numpy as np threshold = 1e-5 word_counts = Counter(int_words) #print(list(word_counts.items())[0]) # dictionary of int_words, how many times they appear total_count = len(int_words) freqs = {word: count/total_count for word, count in word_counts.items()} p_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts} # discard some frequent words, according to the subsampling equation # create a new list of words for training train_words = [word for word in int_words if random.random() < (1 - p_drop[word])] print(train_words[:30])
[5233, 741, 10571, 27349, 15067, 58112, 854, 10712, 19, 708, 2757, 5233, 248, 44611, 2877, 5233, 8983, 4147, 6437, 5233, 1818, 4860, 6753, 7573, 566, 247, 11064, 7088, 5948, 4861]
MIT
word2vec-embeddings/Negative_Sampling_My_Solution.ipynb
iromeo/deep-learning-v2-pytorch
Making batches Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to define a surrounding _context_ and grab all the words in a window around that word, with size $C$. From [Mikolov et al.](https://arxiv.org/pdf/1301.3781.pdf): "Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $[ 1: C ]$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels."> **Exercise:** Implement a function `get_target` that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you chose a random number of words to from the window.Say, we have an input and we're interested in the idx=2 token, `741`: ```[5233, 58, 741, 10571, 27349, 0, 15067, 58112, 3580, 58, 10712]```For `R=2`, `get_target` should return a list of four values:```[5233, 58, 10571, 27349]```
def get_target(words, idx, window_size=5): ''' Get a list of words in a window around an index. ''' R = np.random.randint(1, window_size+1) start = idx - R if (idx - R) > 0 else 0 stop = idx + R target_words = words[start:idx] + words[idx+1:stop+1] return list(target_words) # test your code! # run this cell multiple times to check for random window selection int_text = [i for i in range(10)] print('Input: ', int_text) idx=5 # word index of interest target = get_target(int_text, idx=idx, window_size=5) print('Target: ', target) # you should get some indices around the idx
Input: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] Target: [0, 1, 2, 3, 4, 6, 7, 8, 9]
MIT
word2vec-embeddings/Negative_Sampling_My_Solution.ipynb
iromeo/deep-learning-v2-pytorch
Generating Batches Here's a generator function that returns batches of input and target data for our model, using the `get_target` function from above. The idea is that it grabs `batch_size` words from a words list. Then for each of those batches, it gets the target words in a window.
def get_batches(words, batch_size, window_size=5): ''' Create a generator of word batches as a tuple (inputs, targets) ''' n_batches = len(words)//batch_size # only full batches words = words[:n_batches*batch_size] for idx in range(0, len(words), batch_size): x, y = [], [] batch = words[idx:idx+batch_size] for ii in range(len(batch)): batch_x = batch[ii] batch_y = get_target(batch, ii, window_size) y.extend(batch_y) x.extend([batch_x]*len(batch_y)) yield x, y int_text = [i for i in range(20)] x,y = next(get_batches(int_text, batch_size=4, window_size=5)) print('x\n', x) print('y\n', y)
x [0, 0, 1, 1, 1, 2, 2, 2, 3, 3] y [1, 2, 0, 2, 3, 0, 1, 3, 1, 2]
MIT
word2vec-embeddings/Negative_Sampling_My_Solution.ipynb
iromeo/deep-learning-v2-pytorch
--- ValidationHere, I'm creating a function that will help us observe our model as it learns. We're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them using the cosine similarity: $$\mathrm{similarity} = \cos(\theta) = \frac{\vec{a} \cdot \vec{b}}{|\vec{a}||\vec{b}|}$$We can encode the validation words as vectors $\vec{a}$ using the embedding table, then calculate the similarity with each word vector $\vec{b}$ in the embedding table. With the similarities, we can print out the validation words and words in our embedding table semantically similar to those words. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.
def cosine_similarity(embedding, valid_size=16, valid_window=100, device='cpu'): """ Returns the cosine similarity of validation words with words in the embedding matrix. Here, embedding should be a PyTorch embedding module. """ # Here we're calculating the cosine similarity between some random words and # our embedding vectors. With the similarities, we can look at what words are # close to our random words. # sim = (a . b) / |a||b| embed_vectors = embedding.weight # magnitude of embedding vectors, |b| magnitudes = embed_vectors.pow(2).sum(dim=1).sqrt().unsqueeze(0) # pick N words from our ranges (0,window) and (1000,1000+window). lower id implies more frequent valid_examples = np.array(random.sample(range(valid_window), valid_size//2)) valid_examples = np.append(valid_examples, random.sample(range(1000,1000+valid_window), valid_size//2)) valid_examples = torch.LongTensor(valid_examples).to(device) valid_vectors = embedding(valid_examples) similarities = torch.mm(valid_vectors, embed_vectors.t())/magnitudes return valid_examples, similarities
_____no_output_____
MIT
word2vec-embeddings/Negative_Sampling_My_Solution.ipynb
iromeo/deep-learning-v2-pytorch
--- SkipGram modelDefine and train the SkipGram model. > You'll need to define an [embedding layer](https://pytorch.org/docs/stable/nn.htmlembedding) and a final, softmax output layer.An Embedding layer takes in a number of inputs, importantly:* **num_embeddings** – the size of the dictionary of embeddings, or how many rows you'll want in the embedding weight matrix* **embedding_dim** – the size of each embedding vector; the embedding dimensionBelow is an approximate diagram of the general structure of our network.>* The input words are passed in as batches of input word tokens. * This will go into a hidden layer of linear units (our embedding layer). * Then, finally into a softmax output layer. We'll use the softmax layer to make a prediction about the context words by sampling, as usual. --- Negative SamplingFor every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct example, but only a small number of incorrect, or noise, examples. This is called ["negative sampling"](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). There are two modifications we need to make. First, since we're not taking the softmax output over all the words, we're really only concerned with one output word at a time. Similar to how we use an embedding table to map the input word to the hidden layer, we can now use another embedding table to map the hidden layer to the output word. Now we have two embedding layers, one for input words and one for output words. Secondly, we use a modified loss function where we only care about the true example and a small subset of noise examples.$$- \large \log{\sigma\left(u_{w_O}\hspace{0.001em}^\top v_{w_I}\right)} -\sum_i^N \mathbb{E}_{w_i \sim P_n(w)}\log{\sigma\left(-u_{w_i}\hspace{0.001em}^\top v_{w_I}\right)}$$This is a little complicated so I'll go through it bit by bit. $u_{w_O}\hspace{0.001em}^\top$ is the embedding vector for our "output" target word (transposed, that's the $^\top$ symbol) and $v_{w_I}$ is the embedding vector for the "input" word. Then the first term $$\large \log{\sigma\left(u_{w_O}\hspace{0.001em}^\top v_{w_I}\right)}$$says we take the log-sigmoid of the inner product of the output word vector and the input word vector. Now the second term, let's first look at $$\large \sum_i^N \mathbb{E}_{w_i \sim P_n(w)}$$ This means we're going to take a sum over words $w_i$ drawn from a noise distribution $w_i \sim P_n(w)$. The noise distribution is basically our vocabulary of words that aren't in the context of our input word. In effect, we can randomly sample words from our vocabulary to get these words. $P_n(w)$ is an arbitrary probability distribution though, which means we get to decide how to weight the words that we're sampling. This could be a uniform distribution, where we sample all words with equal probability. Or it could be according to the frequency that each word shows up in our text corpus, the unigram distribution $U(w)$. The authors found the best distribution to be $U(w)^{3/4}$, empirically. Finally, in $$\large \log{\sigma\left(-u_{w_i}\hspace{0.001em}^\top v_{w_I}\right)},$$ we take the log-sigmoid of the negated inner product of a noise vector with the input vector. To give you an intuition for what we're doing here, remember that the sigmoid function returns a probability between 0 and 1. The first term in the loss pushes the probability that our network will predict the correct word $w_O$ towards 1. In the second term, since we are negating the sigmoid input, we're pushing the probabilities of the noise words towards 0.
import torch from torch import nn import torch.optim as optim tmp_emb = nn.Embedding(5, 2) print(tmp_emb.weight.shape) tmp_w = tmp_emb.weight print(tmp_w) print(tmp_w.data) tmp_w.data.uniform_(-1,1) print(tmp_w.data) class SkipGramNeg(nn.Module): def __init__(self, n_vocab, n_embed, noise_dist=None): super().__init__() self.n_vocab = n_vocab self.n_embed = n_embed self.noise_dist = noise_dist # define embedding layers for input and output words self.in_embed = nn.Embedding(n_vocab, n_embed) self.out_embed = nn.Embedding(n_vocab, n_embed) # Initialize both embedding tables with uniform distribution self.in_embed.weight.data.uniform_(-1,1) self.in_embed.weight.data.uniform_(-1,1) # !! note: no linear layer / soft max def forward_input(self, input_words): # return input vector embeddings return self.in_embed(input_words) def forward_output(self, output_words): # return output vector embeddings return self.out_embed(output_words) def forward_noise(self, batch_size, n_samples): """ Generate noise vectors with shape (batch_size, n_samples, n_embed)""" if self.noise_dist is None: # Sample words uniformly noise_dist = torch.ones(self.n_vocab) else: noise_dist = self.noise_dist # Sample words from our noise distribution noise_words = torch.multinomial(noise_dist, batch_size * n_samples, replacement=True) device = "cuda" if model.out_embed.weight.is_cuda else "cpu" noise_words = noise_words.to(device) ## TODO: get the noise embeddings # reshape the embeddings so that they have dims (batch_size, n_samples, n_embed) noise_words = self.out_embed(noise_words).view(batch_size, n_samples, self.n_embed) return noise_words class NegativeSamplingLoss(nn.Module): def __init__(self): super().__init__() def forward(self, input_vectors, output_vectors, noise_vectors): batch_size, embed_size = input_vectors.shape # Input vectors should be a batch of column vectors input_vectors = input_vectors.view(batch_size, embed_size, 1) # Output vectors should be a batch of row vectors output_vectors = output_vectors.view(batch_size, 1, embed_size) # here is (1, emb_size)to make output vector is transposed # bmm = batch matrix multiplication # correct log-sigmoid loss out_loss = torch.bmm(output_vectors, input_vectors).sigmoid().log() out_loss = out_loss.squeeze() # remove empty dimentions # incorrect log-sigmoid loss noise_loss = torch.bmm(noise_vectors.neg(), input_vectors).sigmoid().log() noise_loss = noise_loss.squeeze().sum(1) # sum the losses over the sample of noise vectors # negate and sum correct and noisy log-sigmoid losses # return average batch loss return -(out_loss + noise_loss).mean()
_____no_output_____
MIT
word2vec-embeddings/Negative_Sampling_My_Solution.ipynb
iromeo/deep-learning-v2-pytorch
TrainingBelow is our training loop, and I recommend that you train on GPU, if available.
device = 'cuda' if torch.cuda.is_available() else 'cpu' # Get our noise distribution # Using word frequencies calculated earlier in the notebook word_freqs = np.array(sorted(freqs.values(), reverse=True)) unigram_dist = word_freqs/word_freqs.sum() noise_dist = torch.from_numpy(unigram_dist**(0.75)/np.sum(unigram_dist**(0.75))) # instantiating the model embedding_dim = 300 model = SkipGramNeg(len(vocab_to_int), embedding_dim, noise_dist=noise_dist).to(device) # using the loss that we defined criterion = NegativeSamplingLoss() optimizer = optim.Adam(model.parameters(), lr=0.003) print_every = 1500 steps = 0 epochs = 5 # train for some number of epochs for e in range(epochs): # get our input, target batches for input_words, target_words in get_batches(train_words, 512): steps += 1 inputs, targets = torch.LongTensor(input_words), torch.LongTensor(target_words) inputs, targets = inputs.to(device), targets.to(device) # input, outpt, and noise vectors input_vectors = model.forward_input(inputs) output_vectors = model.forward_output(targets) noise_vectors = model.forward_noise(inputs.shape[0], 5) # batchsise: inputs.shape[0]; 5 - number of noise vectors # negative sampling loss loss = criterion(input_vectors, output_vectors, noise_vectors) optimizer.zero_grad() loss.backward() optimizer.step() # loss stats if steps % print_every == 0: print("Epoch: {}/{}".format(e+1, epochs)) print("Loss: ", loss.item()) # avg batch loss at this point in training valid_examples, valid_similarities = cosine_similarity(model.in_embed, device=device) _, closest_idxs = valid_similarities.topk(6) valid_examples, closest_idxs = valid_examples.to('cpu'), closest_idxs.to('cpu') for ii, valid_idx in enumerate(valid_examples): closest_words = [int_to_vocab[idx.item()] for idx in closest_idxs[ii]][1:] print(int_to_vocab[valid_idx.item()] + " | " + ', '.join(closest_words)) print("...\n") # change the name, for saving multiple files # model_name = 'w2v_skip_gram_ns_5_epoch.net' checkpoint = {'n_vocab': model.n_vocab, 'n_embed': model.n_embed, 'state_dict': model.state_dict(), 'noise_dist': model.noise_dist} with open(model_name, 'wb') as f: torch.save(checkpoint, f) with open('w2v_skip_gram_ns_5_epoch.net', 'rb') as f: checkpoint = torch.load(f, map_location=torch.device('cpu') ) model = SkipGramNeg(checkpoint['n_vocab'], checkpoint['n_embed'], noise_dist=checkpoint['noise_dist']).to(device) model.load_state_dict(checkpoint['state_dict'])
_____no_output_____
MIT
word2vec-embeddings/Negative_Sampling_My_Solution.ipynb
iromeo/deep-learning-v2-pytorch
Visualizing the word vectorsBelow we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out [this post from Christopher Olah](http://colah.github.io/posts/2014-10-Visualizing-MNIST/) to learn more about T-SNE and other ways to visualize high-dimensional data.
%matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt from sklearn.manifold import TSNE # getting embeddings from the embedding layer of our model, by name embeddings = model.in_embed.weight.to('cpu').data.numpy() viz_words = 380 tsne = TSNE() embed_tsne = tsne.fit_transform(embeddings[:viz_words, :]) fig, ax = plt.subplots(figsize=(16, 16)) for idx in range(viz_words): plt.scatter(*embed_tsne[idx, :], color='steelblue') plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
_____no_output_____
MIT
word2vec-embeddings/Negative_Sampling_My_Solution.ipynb
iromeo/deep-learning-v2-pytorch
Importing modules
import json import pandas as pd import numpy as np
_____no_output_____
MIT
Q7_Asgn1.nbconvert.ipynb
sunil-dhaka/india-covid19-cases-and-vaccination-analysis
Read cowin csv file
data=pd.read_csv('cowin_vaccine_data_districtwise.csv')
/home/sunild/anaconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py:3169: DtypeWarning: Columns (6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,448,449,450,451,452,453,454,455,456,457,458,459,460,461,462,463,464,465,466,467,468,469,470,471,472,473,474,475,476,477,478,479,480,481,482,483,484,485,486,487,488,489,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504,505,506,507,508,509,510,511,512,513,514,515,516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,532,533,534,535,536,537,538,539,540,541,542,543,544,545,546,547,548,549,550,551,552,553,554,555,556,557,558,559,560,561,562,563,564,565,566,567,568,569,570,571,572,573,574,575,576,577,578,579,580,581,582,583,584,585,586,587,588,589,590,591,592,593,594,595,596,597,598,599,600,601,602,603,604,605,606,607,608,609,610,611,612,613,614,615,616,617,618,619,620,621,622,623,624,625,626,627,628,629,630,631,632,633,634,635,636,637,638,639,640,641,642,643,644,645,646,647,648,649,650,651,652,653,654,655,656,657,658,659,660,661,662,663,664,665,666,667,668,669,670,671,672,673,674,675,676,677,678,679,680,681,682,683,684,685,686,687,688,689,690,691,692,693,694,695,696,697,698,699,700,701,702,703,704,705,706,707,708,709,710,711,712,713,714,715,716,717,718,719,720,721,722,723,724,725,726,727,728,729,730,731,732,733,734,735,736,737,738,739,740,741,742,743,744,745,746,747,748,749,750,751,752,753,754,755,756,757,758,759,760,761,762,763,764,765,766,767,768,769,770,771,772,773,774,775,776,777,778,779,780,781,782,783,784,785,786,787,788,789,790,791,792,793,794,795,796,797,798,799,800,801,802,803,804,805,806,807,808,809,810,811,812,813,814,815,816,817,818,819,820,821,822,823,824,825,826,827,828,829,830,831,832,833,834,835,836,837,838,839,840,841,842,843,844,845,846,847,848,849,850,851,852,853,854,855,856,857,858,859,860,861,862,863,864,865,866,867,868,869,870,871,872,873,874,875,876,877,878,879,880,881,882,883,884,885,886,887,888,889,890,891,892,893,894,895,896,897,898,899,900,901,902,903,904,905,906,907,908,909,910,911,912,913,914,915,916,917,918,919,920,921,922,923,924,925,926,927,928,929,930,931,932,933,934,935,936,937,938,939,940,941,942,943,944,945,946,947,948,949,950,951,952,953,954,955,956,957,958,959,960,961,962,963,964,965,966,967,968,969,970,971,972,973,974,975,976,977,978,979,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023,1024,1025,1026,1027,1028,1029,1030,1031,1032,1033,1034,1035,1036,1037,1038,1039,1040,1041,1042,1043,1044,1045,1046,1047,1048,1049,1050,1051,1052,1053,1054,1055,1056,1057,1058,1059,1060,1061,1062,1063,1064,1065,1066,1067,1068,1069,1070,1071,1072,1073,1074,1075,1076,1077,1078,1079,1080,1081,1082,1083,1084,1085,1086,1087,1088,1089,1090,1091,1092,1093,1094,1095,1096,1097,1098,1099,1100,1101,1102,1103,1104,1105,1106,1107,1108,1109,1110,1111,1112,1113,1114,1115,1116,1117,1118,1119,1120,1121,1122,1123,1124,1125,1126,1127,1128,1129,1130,1131,1132,1133,1134,1135,1136,1137,1138,1139,1140,1141,1142,1143,1144,1145,1146,1147,1148,1149,1150,1151,1152,1153,1154,1155,1156,1157,1158,1159,1160,1161,1162,1163,1164,1165,1166,1167,1168,1169,1170,1171,1172,1173,1174,1175,1176,1177,1178,1179,1180,1181,1182,1183,1184,1185,1186,1187,1188,1189,1190,1191,1192,1193,1194,1195,1196,1197,1198,1199,1200,1201,1202,1203,1204,1205,1206,1207,1208,1209,1210,1211,1212,1213,1214,1215,1216,1217,1218,1219,1220,1221,1222,1223,1224,1225,1226,1227,1228,1229,1230,1231,1232,1233,1234,1235,1236,1237,1238,1239,1240,1241,1242,1243,1244,1245,1246,1247,1248,1249,1250,1251,1252,1253,1254,1255,1256,1257,1258,1259,1260,1261,1262,1263,1264,1265,1266,1267,1268,1269,1270,1271,1272,1273,1274,1275,1276,1277,1278,1279,1280,1281,1282,1283,1284,1285,1286,1287,1288,1289,1290,1291,1292,1293,1294,1295,1296,1297,1298,1299,1300,1301,1302,1303,1304,1305,1306,1307,1308,1309,1310,1311,1312,1313,1314,1315,1316,1317,1318,1319,1320,1321,1322,1323,1324,1325,1326,1327,1328,1329,1330,1331,1332,1333,1334,1335,1336,1337,1338,1339,1340,1341,1342,1343,1344,1345,1346,1347,1348,1349,1350,1351,1352,1353,1354,1355,1356,1357,1358,1359,1360,1361,1362,1363,1364,1365,1366,1367,1368,1369,1370,1371,1372,1373,1374,1375,1376,1377,1378,1379,1380,1381,1382,1383,1384,1385,1386,1387,1388,1389,1390,1391,1392,1393,1394,1395,1396,1397,1398,1399,1400,1401,1402,1403,1404,1405,1406,1407,1408,1409,1410,1411,1412,1413,1414,1415,1416,1417,1418,1419,1420,1421,1422,1423,1424,1425,1426,1427,1428,1429,1430,1431,1432,1433,1434,1435,1436,1437,1438,1439,1440,1441,1442,1443,1444,1445,1446,1447,1448,1449,1450,1451,1452,1453,1454,1455,1456,1457,1458,1459,1460,1461,1462,1463,1464,1465,1466,1467,1468,1469,1470,1471,1472,1473,1474,1475,1476,1477,1478,1479,1480,1481,1482,1483,1484,1485,1486,1487,1488,1489,1490,1491,1492,1493,1494,1495,1496,1497,1498,1499,1500,1501,1502,1503,1504,1505,1506,1507,1508,1509,1510,1511,1512,1513,1514,1515,1516,1517,1518,1519,1520,1521,1522,1523,1524,1525,1526,1527,1528,1529,1530,1531,1532,1533,1534,1535,1536,1537,1538,1539,1540,1541,1542,1543,1544,1545,1546,1547,1548,1549,1550,1551,1552,1553,1554,1555,1556,1557,1558,1559,1560,1561,1562,1563,1564,1565,1566,1567,1568,1569,1570,1571,1572,1573,1574,1575,1576,1577,1578,1579,1580,1581,1582,1583,1584,1585,1586,1587,1588,1589,1590,1591,1592,1593,1594,1595,1596,1597,1598,1599,1600,1601,1602,1603,1604,1605,1606,1607,1608,1609,1610,1611,1612,1613,1614,1615,1616,1617,1618,1619,1620,1621,1622,1623,1624,1625,1626,1627,1628,1629,1630,1631,1632,1633,1634,1635,1636,1637,1638,1639,1640,1641,1642,1643,1644,1645,1646,1647,1648,1649,1650,1651,1652,1653,1654,1655,1656,1657,1658,1659,1660,1661,1662,1663,1664,1665,1666,1667,1668,1669,1670,1671,1672,1673,1674,1675,1676,1677,1678,1679,1680,1681,1682,1683,1684,1685,1686,1687,1688,1689,1690,1691,1692,1693,1694,1695,1696,1697,1698,1699,1700,1701,1702,1703,1704,1705,1706,1707,1708,1709,1710,1711,1712,1713,1714,1715,1716,1717,1718,1719,1720,1721,1722,1723,1724,1725,1726,1727,1728,1729,1730,1731,1732,1733,1734,1735,1736,1737,1738,1739,1740,1741,1742,1743,1744,1745,1746,1747,1748,1749,1750,1751,1752,1753,1754,1755,1756,1757,1758,1759,1760,1761,1762,1763,1764,1765,1766,1767,1768,1769,1770,1771,1772,1773,1774,1775,1776,1777,1778,1779,1780,1781,1782,1783,1784,1785,1786,1787,1788,1789,1790,1791,1792,1793,1794,1795,1796,1797,1798,1799,1800,1801,1802,1803,1804,1805,1806,1807,1808,1809,1810,1811,1812,1813,1814,1815,1816,1817,1818,1819,1820,1821,1822,1823,1824,1825,1826,1827,1828,1829,1830,1831,1832,1833,1834,1835,1836,1837,1838,1839,1840,1841,1842,1843,1844,1845,1846,1847,1848,1849,1850,1851,1852,1853,1854,1855,1856,1857,1858,1859,1860,1861,1862,1863,1864,1865,1866,1867,1868,1869,1870,1871,1872,1873,1874,1875,1876,1877,1878,1879,1880,1881,1882,1883,1884,1885,1886,1887,1888,1889,1890,1891,1892,1893,1894,1895,1896,1897,1898,1899,1900,1901,1902,1903,1904,1905,1906,1907,1908,1909,1910,1911,1912,1913,1914,1915,1916,1917,1918,1919,1920,1921,1922,1923,1924,1925,1926,1927,1928,1929,1930,1931,1932,1933,1934,1935,1936,1937,1938,1939,1940,1941,1942,1943,1944,1945,1946,1947,1948,1949,1950,1951,1952,1953,1954,1955,1956,1957,1958,1959,1960,1961,1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022,2023,2024,2025,2026,2027,2028,2029,2030,2031,2032,2033,2034,2035,2036,2037,2038,2039,2040,2041,2042,2043,2044,2045,2046,2047,2048,2049,2050,2051,2052,2053,2054,2055,2056,2057,2058,2059,2060,2061,2062,2063,2064,2065,2066,2067,2068,2069,2070,2071,2072,2073,2074,2075,2076,2077,2078,2079,2080,2081,2082,2083,2084,2085,2086,2087,2088,2089,2090,2091,2092,2093,2094,2095,2096,2097,2098,2099,2100,2101,2102,2103,2104,2105,2106,2107,2108,2109,2110,2111,2112,2113,2114,2115,2116,2117,2118,2119,2120,2121,2122,2123,2124,2125,2126,2127,2128,2129,2130,2131,2132,2133,2134,2135,2136,2137,2138,2139,2140,2141,2142,2143,2144,2145,2146,2147,2148,2149,2150,2151,2152,2153,2154,2155,2156,2157,2158,2159,2160,2161,2162,2163,2164,2165,2166,2167,2168,2169,2170,2171,2172,2173,2174,2175,2176,2177,2178,2179,2180,2181,2182,2183,2184,2185,2186,2187,2188,2189,2190,2191,2192,2193,2194,2195,2196,2197,2198,2199,2200,2201,2202,2203,2204,2205,2206,2207,2208,2209,2210,2211,2212,2213,2214,2215,2216,2217,2218,2219,2220,2221,2222,2223,2224,2225,2226,2227,2228,2229,2230,2231,2232,2233,2234,2235,2236,2237,2238,2239,2240,2241,2242,2243,2244,2245,2246,2247,2248,2249,2250,2251,2252,2253,2254,2255,2256,2257,2258,2259,2260,2261,2262,2263,2264,2265,2266,2267,2268,2269,2270,2271,2272,2273,2274,2275,2276,2277,2278,2279,2280,2281,2282,2283,2284,2285,2286,2287,2288,2289,2290,2291,2292,2293,2294,2295,2296,2297,2298,2299,2300,2301,2302,2303,2304,2305,2306,2307,2308,2309,2310,2311,2312,2313,2314,2315,2316,2317,2318,2319,2320,2321,2322,2323,2324,2325,2326,2327,2328,2329,2330,2331,2332,2333,2334,2335,2336,2337,2338,2339,2340,2341,2342,2343,2344,2345,2346,2347,2348,2349,2350,2351,2352,2353,2354,2355,2356,2357,2358,2359,2360,2361,2362,2363,2364,2365,2366,2367,2368,2369,2370,2371,2372,2373,2374,2375,2376,2377,2378,2379,2380,2381,2382,2383,2384,2385,2386,2387,2388,2389,2390,2391,2392,2393,2394,2395,2396,2397,2398,2399,2400,2401,2402,2403,2404,2405,2406,2407,2408,2409,2410,2411,2412,2413,2414,2415,2416,2417,2418,2419,2420,2421,2422,2423,2424,2425,2426,2427,2428,2429,2430,2431,2432,2433,2434,2435,2436,2437,2438,2439,2440,2441,2442,2443,2444,2445,2446,2447,2448,2449,2450,2451,2452,2453,2454,2455,2456,2457,2458,2459,2460,2461,2462,2463,2464,2465,2466,2467,2468,2469,2470,2471,2472,2473,2474,2475,2476,2477,2478,2479,2480,2481,2482,2483,2484,2485,2486,2487,2488,2489,2490,2491,2492,2493,2494,2495,2496,2497,2498,2499,2500,2501,2502,2503,2504,2505,2506,2507,2508,2509,2510,2511,2512,2513,2514,2515,2516,2517,2518,2519,2520,2521,2522,2523,2524,2525,2526,2527,2528,2529,2530,2531,2532,2533,2534,2535,2536,2537,2538,2539,2540,2541,2542,2543,2544,2545,2546,2547,2548,2549,2550,2551,2552,2553,2554,2555,2556,2557,2558,2559,2560,2561,2562,2563,2564,2565,2566,2567,2568,2569,2570,2571,2572,2573,2574,2575,2576,2577,2578,2579,2580,2581,2582,2583,2584,2585,2586,2587,2588,2589,2590,2591,2592,2593,2594,2595,2596,2597,2598,2599,2600,2601,2602,2603,2604,2605,2606,2607,2608,2609,2610,2611,2612,2613,2614,2615,2616,2617,2618,2619,2620,2621,2622,2623,2624,2625,2626,2627,2628,2629,2630,2631,2632,2633,2634,2635,2636,2637,2638,2639,2640,2641,2642,2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,2691,2692,2693,2694,2695,2696,2697,2698,2699,2700,2701,2702,2703,2704,2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,2721,2722,2723,2724,2725,2726,2727,2728,2729,2730,2731,2732,2733,2734,2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,2751,2752,2753,2754,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,2766,2767,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,2780,2781,2782,2783,2784,2785,2786,2787,2788,2789,2790,2791,2792,2793,2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,2810,2811,2812,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,2857,2858,2859,2860,2861,2862,2863,2864,2865,2866,2867,2868,2869,2870,2871,2872,2873,2874,2875,2876,2877,2878,2879,2880,2881,2882,2883,2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895) have mixed types.Specify dtype option on import or set low_memory=False. has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
MIT
Q7_Asgn1.nbconvert.ipynb
sunil-dhaka/india-covid19-cases-and-vaccination-analysis
Load modified json file from Q1
## district level # district data from json f=open('neighbor-districts-modified.json') districts_data=json.load(f) district_names=[] district_ids=[] for key in districts_data: district_names.append(key.split('/')[0]) district_ids.append(key.split('/')[1]) Districts=data['District_Key'].str.lower()
_____no_output_____
MIT
Q7_Asgn1.nbconvert.ipynb
sunil-dhaka/india-covid19-cases-and-vaccination-analysis
Prepare data frames for covaxin and covishield vaccine numbers
## dose1=Covaxin ## dose2=CoviShield data_dose1=data.loc[:,(data.loc[0,]=='Covaxin (Doses Administered)')].iloc[1:,:].fillna(0) first_date_dose1=data_dose1.iloc[:,0] data_dose1=data_dose1.astype(int).diff(axis=1) data_dose1.iloc[:,0]=first_date_dose1 data_dose1['District']=data['District_Key'] data_dose2=data.loc[:,(data.loc[0,]=='CoviShield (Doses Administered)')].iloc[1:,:].fillna(0) first_date_dose2=data_dose2.iloc[:,0] data_dose2=data_dose2.astype(int).diff(axis=1) data_dose2.iloc[:,0]=first_date_dose2 data_dose2['District']=data['District_Key']
_____no_output_____
MIT
Q7_Asgn1.nbconvert.ipynb
sunil-dhaka/india-covid19-cases-and-vaccination-analysis