markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Melanoma Diagnoses
import os import keras import pandas as pd import numpy as np from PIL import Image import matplotlib.pyplot as plt %matplotlib inline from keras import applications from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing import image from keras import optimizers from keras.models import Sequential, Model from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D from keras import backend as k from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping from keras.models import load_model from sklearn.metrics import roc_curve, auc import get_results
Using TensorFlow backend.
MIT
solution.ipynb
ezhilvendhan/dermatogist-ai-solution
Import Images
def load_image( infilename ) : img = Image.open( infilename ) img.load() data = np.asarray( img, dtype="float32" ) return data data_dir = 'data' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' fig = plt.figure(figsize=(20,5)) train_files = os.listdir(train_dir+"/melanoma")[:5] for i in range(5): ax = fig.add_subplot(3, 12, i + 1, xticks=[], yticks=[]) ax.imshow(load_image(train_dir + "/melanoma/" +train_files[i]))
_____no_output_____
MIT
solution.ipynb
ezhilvendhan/dermatogist-ai-solution
Image Data transformation
img_width, img_height = 256, 256 batch_size = 16 epochs = 10 train_datagen = ImageDataGenerator( rescale = 1./255, horizontal_flip = True, # fill_mode = "nearest", zoom_range = 0.2, # width_shift_range = 0.3, # height_shift_range=0.3, shear_range=0.2, # rotation_range=30 ) test_datagen = ImageDataGenerator(rescale = 1./255) train_generator = train_datagen.flow_from_directory( train_dir, target_size = (img_height, img_width), batch_size = batch_size, class_mode = "categorical") validation_generator = test_datagen.flow_from_directory( valid_dir, target_size = (img_height, img_width), class_mode = "categorical") test_generator = test_datagen.flow_from_directory( test_dir, target_size = (img_height, img_width), class_mode = "categorical") train_generator.image_shape
_____no_output_____
MIT
solution.ipynb
ezhilvendhan/dermatogist-ai-solution
Fine-tune Model
# create the base pre-trained model vgg_model = applications.VGG19(weights = "imagenet", include_top=False, input_shape = (img_width, img_height, 3)) for layer in vgg_model.layers[:5]: layer.trainable = False #Adding custom Layers x = vgg_model.output x = GlobalAveragePooling2D()(x) # add fully-connected layer x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) # add output layer predictions = Dense(3, activation='softmax')(x) model_final = Model(inputs=vgg_model.input, outputs=predictions) # freeze pre-trained model area's layer for layer in vgg_model.layers: layer.trainable = False # update the weight that are added model_final.compile(optimizer='rmsprop', loss='categorical_crossentropy') model_final.fit_generator( train_generator, steps_per_epoch=train_generator.samples/train_generator.batch_size, epochs = 3, validation_data = validation_generator, validation_steps = validation_generator.samples/validation_generator.batch_size)
Epoch 1/3 125/125 [==============================] - 325s - loss: 0.8962 - val_loss: 1.2098 Epoch 2/3 125/125 [==============================] - 309s - loss: 0.8286 - val_loss: 1.2344 Epoch 3/3 125/125 [==============================] - 302s - loss: 0.8126 - val_loss: 1.1027
MIT
solution.ipynb
ezhilvendhan/dermatogist-ai-solution
Train Model
# Save the model according to the conditions checkpoint = ModelCheckpoint("cancer_vgg19_wt.h5", monitor='val_acc', verbose=2, save_best_only=True, save_weights_only=False, mode='auto', period=1) early = EarlyStopping(monitor='val_acc', min_delta=0, patience=3, verbose=2, mode='auto') # choose the layers which are updated by training layer_num = len(model_final.layers) for layer in model_final.layers[:21]: layer.trainable = False for layer in model_final.layers[21:]: layer.trainable = True # training model_final.compile(optimizer=optimizers.SGD(lr=0.001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy']) # Train the model model_final.fit_generator( train_generator, steps_per_epoch = train_generator.samples/train_generator.batch_size, epochs = epochs, validation_data = validation_generator, validation_steps = validation_generator.samples/validation_generator.batch_size, callbacks = [checkpoint, early]) model_final.save('cancer_vgg19_model.h5') model_final = load_model('cancer_vgg19_model.h5') img_path = 'data/test/nevus/ISIC_0012803.jpg' img = image.load_img(img_path, target_size=(256, 256)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = test_datagen.standardize(x) model_final.predict(x, verbose=2) img_path = 'data/test/melanoma/ISIC_0012989.jpg' img = image.load_img(img_path, target_size=(256, 256)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = test_datagen.standardize(x) model_final.predict(x, verbose=2) img_path = 'data/test/seborrheic_keratosis/ISIC_0012323.jpg' img = image.load_img(img_path, target_size=(256, 256)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = test_datagen.standardize(x) model_final.predict(x, verbose=2) model_final.evaluate_generator(test_generator, steps = test_generator.samples/test_generator.batch_size) y_hat = model_final.predict_generator(test_generator, steps = test_generator.samples/test_generator.batch_size) def get_pred(img_path): img = image.load_img(img_path, target_size=(256, 256)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = test_datagen.standardize(x) return model_final.predict(x) test_files = pd.read_csv('sample_predictions.csv') for index, row in test_files.iterrows(): pred = get_pred(row['Id']) test_files.loc[index, 'task_1'] = pred[0][0] test_files.loc[index, 'task_2'] = pred[0][2] test_files.head() test_files.to_csv('submission_predictions.csv')
_____no_output_____
MIT
solution.ipynb
ezhilvendhan/dermatogist-ai-solution
Table of Contents
import geopandas import plotly
_____no_output_____
MIT
i2i/fspmaps/Phase-6-newData.ipynb
Vizzuality/notebooks
Section 4 Try another nameYou are still working on your Twitter sentiment analysis. You analyze now some things that caught your attention. You noticed that there are email addresses inserted in some tweets. Now, you are curious to find out which is the most common name.You want to extract the first part of the email. E.g. if you have the email [email protected], you are only interested in marysmith90.You need to match the entire expression. So you make sure to extract only names present in emails. Also, you are only interested in names containing upper (e.g. A,B, Z) or lowercase letters (e.g. a, d, z) and numbers.The list sentiment_analysis containing the text of three tweets as well as the re module were loaded in your session. You can use print() to view it in the IPython Shell.
sentiment_analysis = ['Just got ur newsletter, those fares really are unbelievable. Write to [email protected] or [email protected]. They have amazing prices', 'I should have paid more attention when we covered photoshop in my webpage design class in undergrad. Contact me [email protected].', 'hey missed ya at the meeting. Read your email! [email protected]'] import re # Write a regex that matches email regex_email = r"([A-Za-z0-9]+)@\S+" for tweet in sentiment_analysis: # Find all matches of regex in each tweet email_matched = re.findall(regex_email, tweet) # Complete the format method to print the results print("Lists of users found in this tweet: {}".format(email_matched))
Lists of users found in this tweet: ['statravelAU', 'statravelpo'] Lists of users found in this tweet: ['Hollywoodheat34'] Lists of users found in this tweet: ['msdrama098']
MIT
regular-expressions-in-python/4. Advanced Regular Expression Concepts/notebook_section_4.ipynb
nhutnamhcmus/datacamp-playground
Flying homeYour boss assigned you to a small project. They are performing an analysis of the travels people made to attend business meetings. You are given a dataset with only the email subjects for each of the people traveling.You learn that the text followed a pattern. Here is an example:Here you have your boarding pass LA4214 AER-CDB 06NOV.You need to extract the information about the flight:- The two letters indicate the airline (e.g LA),- The 4 numbers are the flight number (e.g. 4214).- The three letters correspond to the departure (e.g AER),- The destination (CDB),- The date (06NOV) of the flight.- All letters are always uppercase.The variable flight containing one email subject was loaded in your session. You can use print() to view it in the IPython Shell.
# Import re import re # Write regex to capture information of the flight regex = r"([A-Z]{2})(\d{4})\s([A-Z]{3})-([A-Z]{3})\s(\d{2}[A-Z]{3})" flight = 'Subject: You are now ready to fly. Here you have your boarding pass IB3723 AMS-MAD 06OCT' # Find all matches of the flight information flight_matches = re.findall(regex, flight) #Print the matches print("Airline: {} Flight number: {}".format(flight_matches[0][0], flight_matches[0][1])) print("Departure: {} Destination: {}".format(flight_matches[0][2], flight_matches[0][3])) print("Date: {}".format(flight_matches[0][4]))
Airline: IB Flight number: 3723 Departure: AMS Destination: MAD Date: 06OCT
MIT
regular-expressions-in-python/4. Advanced Regular Expression Concepts/notebook_section_4.ipynb
nhutnamhcmus/datacamp-playground
Love it!You are still working on the Twitter sentiment analysis project. First, you want to identify positive tweets about movies and concerts.You plan to find all the sentences that contain the words love, like, or enjoy and capture that word. You will limit the tweets by focusing on those that contain the words movie or concert by keeping the word in another group. You will also save the movie or concert name.For example, if you have the sentence: I love the movie Avengers. You match and capture love. You need to match and capture movie. Afterwards, you match and capture anything until the dot.The list sentiment_analysis containing the text of three tweets and the re module are loaded in your session. You can use print() to view the data in the IPython Shell.
sentiment_analysis = ['I totally love the concert The Book of Souls World Tour. It kinda amazing!', 'I enjoy the movie Wreck-It Ralph. I watched with my boyfriend.', "I still like the movie Wish Upon a Star. Too bad Disney doesn't show it anymore."] # Write a regex that matches sentences with the optional words regex_positive = r"(love|like|enjoy).+?(movie|concert)\s(.+?)\." for tweet in sentiment_analysis: # Find all matches of regex in tweet positive_matches = re.findall(regex_positive, tweet) # Complete format to print out the results print("Positive comments found {}".format(positive_matches))
Positive comments found [('love', 'concert', 'The Book of Souls World Tour')] Positive comments found [('enjoy', 'movie', 'Wreck-It Ralph')] Positive comments found [('like', 'movie', 'Wish Upon a Star')]
MIT
regular-expressions-in-python/4. Advanced Regular Expression Concepts/notebook_section_4.ipynb
nhutnamhcmus/datacamp-playground
Ugh! Not for me!After finding positive tweets, you want to do it for negative tweets. Your plan now is to find sentences that contain the words hate, dislike or disapprove. You will again save the movie or concert name. You will get the tweet containing the words movie or concert but this time, you don't plan to save the word.For example, if you have the sentence: I dislike the movie Avengers a lot.. You match and capture dislike. You will match but not capture the word movie. Afterwards, you match and capture anything until the dot.The list sentiment_analysis containing the text of three tweets as well as the re module are loaded in your session. You can use print() to view the data in the IPython Shell.
sentiment_analysis = ['That was horrible! I really dislike the movie The cabin and the ant. So boring.', "I disapprove the movie Honest with you. It's full of cliches.", 'I dislike very much the concert After twelve Tour. The sound was horrible.'] # Write a regex that matches sentences with the optional words regex_negative = r"(hate|dislike|disapprove).+?(?:movie|concert)\s(.+?)\." for tweet in sentiment_analysis: # Find all matches of regex in tweet negative_matches = re.findall(regex_negative, tweet) # Complete format to print out the results print("Negative comments found {}".format(negative_matches))
Negative comments found [('dislike', 'The cabin and the ant')] Negative comments found [('disapprove', 'Honest with you')] Negative comments found [('dislike', 'After twelve Tour')]
MIT
regular-expressions-in-python/4. Advanced Regular Expression Concepts/notebook_section_4.ipynb
nhutnamhcmus/datacamp-playground
Parsing PDF filesYou now need to work on another small project you have been delaying. Your company gave you some PDF files of signed contracts. The goal of the project is to create a database with the information you parse from them. Three of these columns should correspond to the day, month, and year when the contract was signed.The dates appear as Signed on 05/24/2016 (05 indicating the month, 24 the day). You decide to use capturing groups to extract this information. Also, you would like to retrieve that information so you can store it separately in different variables.You decide to do a proof of concept.The variable contract containing the text of one contract and the re module are already loaded in your session. You can use print() to view the data in the IPython Shell.
contract = 'Provider will invoice Client for Services performed within 30 days of performance. Client will pay Provider as set forth in each Statement of Work within 30 days of receipt and acceptance of such invoice. It is understood that payments to Provider for services rendered shall be made in full as agreed, without any deductions for taxes of any kind whatsoever, in conformity with Provider’s status as an independent contractor. Signed on 03/25/2001.' # Write regex and scan contract to capture the dates described regex_dates = r"Signed\son\s(\d{2})/(\d{2})/(\d{4})" dates = re.search(regex_dates, contract) # Assign to each key the corresponding match signature = { "day": dates.group(2), "month": dates.group(1), "year": dates.group(3) } # Complete the format method to print-out print("Our first contract is dated back to {data[year]}. Particularly, the day {data[day]} of the month {data[month]}.".format(data=signature))
Our first contract is dated back to 2001. Particularly, the day 25 of the month 03.
MIT
regular-expressions-in-python/4. Advanced Regular Expression Concepts/notebook_section_4.ipynb
nhutnamhcmus/datacamp-playground
Close the tag, please!In the meantime, you are working on one of your other projects. The company is going to develop a new product. It will help developers automatically check the code they are writing. You need to write a short script for checking that every HTML tag that is open has its proper closure.You have an example of a string containing HTML tags:The Data Science CompanyYou learn that an opening HTML tag is always at the beginning of the string. It appears inside . A closing tag also appears inside , but it is preceded by /.You also remember that capturing groups can be referenced using numbers, e.g \4.The list html_tags, containing three strings with HTML tags, and there module are loaded in your session. You can use print() to view the data in the IPython Shell.
html_tags = ['<body>Welcome to our course! It would be an awesome experience</body>', '<article>To be a data scientist, you need to have knowledge in statistics and mathematics</article>', '<nav>About me Links Contact me!'] for string in html_tags: # Complete the regex and find if it matches a closed HTML tags match_tag = re.match(r"<(\w+)>.*?</\1>", string) if match_tag: # If it matches print the first group capture print("Your tag {} is closed".format(match_tag.group(1))) else: # If it doesn't match capture only the tag notmatch_tag = re.match(r"<(\w+)>", string) # Print the first group capture print("Close your {} tag!".format(notmatch_tag.group(1)))
Your tag body is closed Your tag article is closed Close your nav tag!
MIT
regular-expressions-in-python/4. Advanced Regular Expression Concepts/notebook_section_4.ipynb
nhutnamhcmus/datacamp-playground
Reeepeated charactersBack to your sentiment analysis! Your next task is to replace elongated words that appear in the tweets. We define an elongated word as a word that contains a repeating character twice or more times. e.g. "Awesoooome".Replacing those words is very important since a classifier will treat them as a different term from the source words lowering their frequency.To find them, you will use capturing groups and reference them back using numbers. E.g \4.If you want to find a match for Awesoooome. You first need to capture Awes. Then, match o and reference the same character back, and then, me.The list sentiment_analysis, containing the text of three tweets, and the re module are loaded in your session. You can use print() to view the data in the IPython Shell.
sentiment_analysis = ['@marykatherine_q i know! I heard it this morning and wondered the same thing. Moscooooooow is so behind the times', 'Staying at a friends house...neighborrrrrrrs are so loud-having a party', 'Just woke up an already have read some e-mail'] # Complete the regex to match an elongated word regex_elongated = r"\w*(\w)\1\w*" for tweet in sentiment_analysis: # Find if there is a match in each tweet match_elongated = re.search(regex_elongated, tweet) if match_elongated: # Assign the captured group zero elongated_word = match_elongated.group(0) # Complete the format method to print the word print("Elongated word found: {word}".format(word=elongated_word)) else: print("No elongated word found")
Elongated word found: Moscooooooow Elongated word found: neighborrrrrrrs No elongated word found
MIT
regular-expressions-in-python/4. Advanced Regular Expression Concepts/notebook_section_4.ipynb
nhutnamhcmus/datacamp-playground
Surrounding wordsNow, you want to perform some visualizations with your sentiment_analysis dataset. You are interested in the words surrounding python. You want to count how many times a specific words appears right before and after it.Positive lookahead (?=) makes sure that first part of the expression is followed by the lookahead expression. Positive lookbehind (?<=) returns all matches that are preceded by the specified pattern.The variable sentiment_analysis, containing the text of one tweet, and the re module are loaded in your session. You can use print() to view the data in the IPython Shell.
sentiment_analysis = 'You need excellent python skills to be a data scientist. Must be! Excellent python' # Positive lookahead look_ahead = re.findall(r"\w+(?=\spython)", sentiment_analysis) # Print out print(look_ahead) # Positive lookbehind look_behind = re.findall(r"(?<=[Pp]ython\s)\w+", sentiment_analysis) # Print out print(look_behind)
['skills']
MIT
regular-expressions-in-python/4. Advanced Regular Expression Concepts/notebook_section_4.ipynb
nhutnamhcmus/datacamp-playground
Filtering phone numbersNow, you need to write a script for a cell-phone searcher. It should scan a list of phone numbers and return those that meet certain characteristics.The phone numbers in the list have the structure:- Optional area code: 3 numbers- Prefix: 4 numbers- Line number: 6 numbers- Optional extension: 2 numbersE.g. 654-8764-439434-01.You decide to use .findall() and the non-capturing group's negative lookahead (?!) and negative lookbehind (?<!).The list cellphones, containing three phone numbers, and the re module are loaded in your session. You can use print() to view the data in the IPython Shell.
cellphones = ['4564-646464-01', '345-5785-544245', '6476-579052-01'] for phone in cellphones: # Get all phone numbers not preceded by area code number = re.findall(r"(?<!\d{3}-)\d{4}-\d{6}-\d{2}", phone) print(number) for phone in cellphones: # Get all phone numbers not followed by optional extension number = re.findall(r"\d{3}-\d{4}-\d{6}(?!-\d{2})", phone) print(number)
[] ['345-5785-544245'] []
MIT
regular-expressions-in-python/4. Advanced Regular Expression Concepts/notebook_section_4.ipynb
nhutnamhcmus/datacamp-playground
Batch Normalization – Lesson1. [What is it?](theory)2. [What are it's benefits?](benefits)3. [How do we add it to a network?](implementation_1)4. [Let's see it work!](demos)5. [What are you hiding?](implementation_2) What is Batch Normalization?Batch normalization was introduced in Sergey Ioffe's and Christian Szegedy's 2015 paper [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf). The idea is that, instead of just normalizing the inputs to the network, we normalize the inputs to _layers within_ the network. It's called "batch" normalization because during training, we normalize each layer's inputs by using the mean and variance of the values in the current mini-batch.Why might this help? Well, we know that normalizing the inputs to a _network_ helps the network learn. But a network is a series of layers, where the output of one layer becomes the input to another. That means we can think of any layer in a neural network as the _first_ layer of a smaller network.For example, imagine a 3 layer network. Instead of just thinking of it as a single network with inputs, layers, and outputs, think of the output of layer 1 as the input to a two layer network. This two layer network would consist of layers 2 and 3 in our original network. Likewise, the output of layer 2 can be thought of as the input to a single layer network, consisting only of layer 3.When you think of it like that - as a series of neural networks feeding into each other - then it's easy to imagine how normalizing the inputs to each layer would help. It's just like normalizing the inputs to any other neural network, but you're doing it at every layer (sub-network).Beyond the intuitive reasons, there are good mathematical reasons why it helps the network learn better, too. It helps combat what the authors call _internal covariate shift_. This discussion is best handled [in the paper](https://arxiv.org/pdf/1502.03167.pdf) and in [Deep Learning](http://www.deeplearningbook.org) a book you can read online written by Ian Goodfellow, Yoshua Bengio, and Aaron Courville. Specifically, check out the batch normalization section of [Chapter 8: Optimization for Training Deep Models](http://www.deeplearningbook.org/contents/optimization.html). Benefits of Batch NormalizationBatch normalization optimizes network training. It has been shown to have several benefits:1. **Networks train faster** – Each training _iteration_ will actually be slower because of the extra calculations during the forward pass and the additional hyperparameters to train during back propagation. However, it should converge much more quickly, so training should be faster overall. 2. **Allows higher learning rates** – Gradient descent usually requires small learning rates for the network to converge. And as networks get deeper, their gradients get smaller during back propagation so they require even more iterations. Using batch normalization allows us to use much higher learning rates, which further increases the speed at which networks train. 3. **Makes weights easier to initialize** – Weight initialization can be difficult, and it's even more difficult when creating deeper networks. Batch normalization seems to allow us to be much less careful about choosing our initial starting weights. 4. **Makes more activation functions viable** – Some activation functions do not work well in some situations. Sigmoids lose their gradient pretty quickly, which means they can't be used in deep networks. And ReLUs often die out during training, where they stop learning completely, so we need to be careful about the range of values fed into them. Because batch normalization regulates the values going into each activation function, non-linearlities that don't seem to work well in deep networks actually become viable again. 5. **Simplifies the creation of deeper networks** – Because of the first 4 items listed above, it is easier to build and faster to train deeper neural networks when using batch normalization. And it's been shown that deeper networks generally produce better results, so that's great.6. **Provides a bit of regularlization** – Batch normalization adds a little noise to your network. In some cases, such as in Inception modules, batch normalization has been shown to work as well as dropout. But in general, consider batch normalization as a bit of extra regularization, possibly allowing you to reduce some of the dropout you might add to a network. 7. **May give better results overall** – Some tests seem to show batch normalization actually improves the training results. However, it's really an optimization to help train faster, so you shouldn't think of it as a way to make your network better. But since it lets you train networks faster, that means you can iterate over more designs more quickly. It also lets you build deeper networks, which are usually better. So when you factor in everything, you're probably going to end up with better results if you build your networks with batch normalization. Batch Normalization in TensorFlowThis section of the notebook shows you one way to add batch normalization to a neural network built in TensorFlow. The following cell imports the packages we need in the notebook and loads the MNIST dataset to use in our experiments. However, the `tensorflow` package contains all the code you'll actually need for batch normalization.
# Import necessary packages import tensorflow as tf import tqdm import numpy as np import matplotlib.pyplot as plt %matplotlib inline # Import MNIST data so we have something for our experiments from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes. Extracting MNIST_data/train-images-idx3-ubyte.gz Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes. Extracting MNIST_data/train-labels-idx1-ubyte.gz Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes. Extracting MNIST_data/t10k-images-idx3-ubyte.gz Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes. Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
Neural network classes for testingThe following class, `NeuralNet`, allows us to create identical neural networks with and without batch normalization. The code is heavily documented, but there is also some additional discussion later. You do not need to read through it all before going through the rest of the notebook, but the comments within the code blocks may answer some of your questions.*About the code:*>This class is not meant to represent TensorFlow best practices – the design choices made here are to support the discussion related to batch normalization.>It's also important to note that we use the well-known MNIST data for these examples, but the networks we create are not meant to be good for performing handwritten character recognition. We chose this network architecture because it is similar to the one used in the original paper, which is complex enough to demonstrate some of the benefits of batch normalization while still being fast to train.
class NeuralNet: def __init__(self, initial_weights, activation_fn, use_batch_norm): """ Initializes this object, creating a TensorFlow graph using the given parameters. :param initial_weights: list of NumPy arrays or Tensors Initial values for the weights for every layer in the network. We pass these in so we can create multiple networks with the same starting weights to eliminate training differences caused by random initialization differences. The number of items in the list defines the number of layers in the network, and the shapes of the items in the list define the number of nodes in each layer. e.g. Passing in 3 matrices of shape (784, 256), (256, 100), and (100, 10) would create a network with 784 inputs going into a hidden layer with 256 nodes, followed by a hidden layer with 100 nodes, followed by an output layer with 10 nodes. :param activation_fn: Callable The function used for the output of each hidden layer. The network will use the same activation function on every hidden layer and no activate function on the output layer. e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers. :param use_batch_norm: bool Pass True to create a network that uses batch normalization; False otherwise Note: this network will not use batch normalization on layers that do not have an activation function. """ # Keep track of whether or not this network uses batch normalization. self.use_batch_norm = use_batch_norm self.name = "With Batch Norm" if use_batch_norm else "Without Batch Norm" # Batch normalization needs to do different calculations during training and inference, # so we use this placeholder to tell the graph which behavior to use. self.is_training = tf.placeholder(tf.bool, name="is_training") # This list is just for keeping track of data we want to plot later. # It doesn't actually have anything to do with neural nets or batch normalization. self.training_accuracies = [] # Create the network graph, but it will not actually have any real values until after you # call train or test self.build_network(initial_weights, activation_fn) def build_network(self, initial_weights, activation_fn): """ Build the graph. The graph still needs to be trained via the `train` method. :param initial_weights: list of NumPy arrays or Tensors See __init__ for description. :param activation_fn: Callable See __init__ for description. """ self.input_layer = tf.placeholder(tf.float32, [None, initial_weights[0].shape[0]]) layer_in = self.input_layer for weights in initial_weights[:-1]: layer_in = self.fully_connected(layer_in, weights, activation_fn) self.output_layer = self.fully_connected(layer_in, initial_weights[-1]) def fully_connected(self, layer_in, initial_weights, activation_fn=None): """ Creates a standard, fully connected layer. Its number of inputs and outputs will be defined by the shape of `initial_weights`, and its starting weight values will be taken directly from that same parameter. If `self.use_batch_norm` is True, this layer will include batch normalization, otherwise it will not. :param layer_in: Tensor The Tensor that feeds into this layer. It's either the input to the network or the output of a previous layer. :param initial_weights: NumPy array or Tensor Initial values for this layer's weights. The shape defines the number of nodes in the layer. e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256 outputs. :param activation_fn: Callable or None (default None) The non-linearity used for the output of the layer. If None, this layer will not include batch normalization, regardless of the value of `self.use_batch_norm`. e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers. """ # Since this class supports both options, only use batch normalization when # requested. However, do not use it on the final layer, which we identify # by its lack of an activation function. if self.use_batch_norm and activation_fn: # Batch normalization uses weights as usual, but does NOT add a bias term. This is because # its calculations include gamma and beta variables that make the bias term unnecessary. # (See later in the notebook for more details.) weights = tf.Variable(initial_weights) linear_output = tf.matmul(layer_in, weights) # Apply batch normalization to the linear combination of the inputs and weights batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training) # Now apply the activation function, *after* the normalization. return activation_fn(batch_normalized_output) else: # When not using batch normalization, create a standard layer that multiplies # the inputs and weights, adds a bias, and optionally passes the result # through an activation function. weights = tf.Variable(initial_weights) biases = tf.Variable(tf.zeros([initial_weights.shape[-1]])) linear_output = tf.add(tf.matmul(layer_in, weights), biases) return linear_output if not activation_fn else activation_fn(linear_output) def train(self, session, learning_rate, training_batches, batches_per_sample, save_model_as=None): """ Trains the model on the MNIST training dataset. :param session: Session Used to run training graph operations. :param learning_rate: float Learning rate used during gradient descent. :param training_batches: int Number of batches to train. :param batches_per_sample: int How many batches to train before sampling the validation accuracy. :param save_model_as: string or None (default None) Name to use if you want to save the trained model. """ # This placeholder will store the target labels for each mini batch labels = tf.placeholder(tf.float32, [None, 10]) # Define loss and optimizer cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=self.output_layer)) # Define operations for testing correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) if self.use_batch_norm: # If we don't include the update ops as dependencies on the train step, the # tf.layers.batch_normalization layers won't update their population statistics, # which will cause the model to fail at inference time with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy) else: train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy) # Train for the appropriate number of batches. (tqdm is only for a nice timing display) for i in tqdm.tqdm(range(training_batches)): # We use batches of 60 just because the original paper did. You can use any size batch you like. batch_xs, batch_ys = mnist.train.next_batch(60) session.run(train_step, feed_dict={self.input_layer: batch_xs, labels: batch_ys, self.is_training: True}) # Periodically test accuracy against the 5k validation images and store it for plotting later. if i % batches_per_sample == 0: test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images, labels: mnist.validation.labels, self.is_training: False}) self.training_accuracies.append(test_accuracy) # After training, report accuracy against test data test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images, labels: mnist.validation.labels, self.is_training: False}) print('{}: After training, final accuracy on validation set = {}'.format(self.name, test_accuracy)) # If you want to use this model later for inference instead of having to retrain it, # just construct it with the same parameters and then pass this file to the 'test' function if save_model_as: tf.train.Saver().save(session, save_model_as) def test(self, session, test_training_accuracy=False, include_individual_predictions=False, restore_from=None): """ Trains a trained model on the MNIST testing dataset. :param session: Session Used to run the testing graph operations. :param test_training_accuracy: bool (default False) If True, perform inference with batch normalization using batch mean and variance; if False, perform inference with batch normalization using estimated population mean and variance. Note: in real life, *always* perform inference using the population mean and variance. This parameter exists just to support demonstrating what happens if you don't. :param include_individual_predictions: bool (default True) This function always performs an accuracy test against the entire test set. But if this parameter is True, it performs an extra test, doing 200 predictions one at a time, and displays the results and accuracy. :param restore_from: string or None (default None) Name of a saved model if you want to test with previously saved weights. """ # This placeholder will store the true labels for each mini batch labels = tf.placeholder(tf.float32, [None, 10]) # Define operations for testing correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # If provided, restore from a previously saved model if restore_from: tf.train.Saver().restore(session, restore_from) # Test against all of the MNIST test data test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.test.images, labels: mnist.test.labels, self.is_training: test_training_accuracy}) print('-'*75) print('{}: Accuracy on full test set = {}'.format(self.name, test_accuracy)) # If requested, perform tests predicting individual values rather than batches if include_individual_predictions: predictions = [] correct = 0 # Do 200 predictions, 1 at a time for i in range(200): # This is a normal prediction using an individual test case. However, notice # we pass `test_training_accuracy` to `feed_dict` as the value for `self.is_training`. # Remember that will tell it whether it should use the batch mean & variance or # the population estimates that were calucated while training the model. pred, corr = session.run([tf.arg_max(self.output_layer,1), accuracy], feed_dict={self.input_layer: [mnist.test.images[i]], labels: [mnist.test.labels[i]], self.is_training: test_training_accuracy}) correct += corr predictions.append(pred[0]) print("200 Predictions:", predictions) print("Accuracy on 200 samples:", correct/200)
_____no_output_____
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
There are quite a few comments in the code, so those should answer most of your questions. However, let's take a look at the most important lines.We add batch normalization to layers inside the `fully_connected` function. Here are some important points about that code:1. Layers with batch normalization do not include a bias term.2. We use TensorFlow's [`tf.layers.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization) function to handle the math. (We show lower-level ways to do this [later in the notebook](implementation_2).)3. We tell `tf.layers.batch_normalization` whether or not the network is training. This is an important step we'll talk about later.4. We add the normalization **before** calling the activation function.In addition to that code, the training step is wrapped in the following `with` statement:```pythonwith tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):```This line actually works in conjunction with the `training` parameter we pass to `tf.layers.batch_normalization`. Without it, TensorFlow's batch normalization layer will not operate correctly during inference.Finally, whenever we train the network or perform inference, we use the `feed_dict` to set `self.is_training` to `True` or `False`, respectively, like in the following line:```pythonsession.run(train_step, feed_dict={self.input_layer: batch_xs, labels: batch_ys, self.is_training: True})```We'll go into more details later, but next we want to show some experiments that use this code and test networks with and without batch normalization. Batch Normalization DemosThis section of the notebook trains various networks with and without batch normalization to demonstrate some of the benefits mentioned earlier. We'd like to thank the author of this blog post [Implementing Batch Normalization in TensorFlow](http://r2rt.com/implementing-batch-normalization-in-tensorflow.html). That post provided the idea of - and some of the code for - plotting the differences in accuracy during training, along with the idea for comparing multiple networks using the same initial weights. Code to support testingThe following two functions support the demos we run in the notebook. The first function, `plot_training_accuracies`, simply plots the values found in the `training_accuracies` lists of the `NeuralNet` objects passed to it. If you look at the `train` function in `NeuralNet`, you'll see it that while it's training the network, it periodically measures validation accuracy and stores the results in that list. It does that just to support these plots.The second function, `train_and_test`, creates two neural nets - one with and one without batch normalization. It then trains them both and tests them, calling `plot_training_accuracies` to plot how their accuracies changed over the course of training. The really imporant thing about this function is that it initializes the starting weights for the networks _outside_ of the networks and then passes them in. This lets it train both networks from the exact same starting weights, which eliminates performance differences that might result from (un)lucky initial weights.
def plot_training_accuracies(*args, **kwargs): """ Displays a plot of the accuracies calculated during training to demonstrate how many iterations it took for the model(s) to converge. :param args: One or more NeuralNet objects You can supply any number of NeuralNet objects as unnamed arguments and this will display their training accuracies. Be sure to call `train` the NeuralNets before calling this function. :param kwargs: You can supply any named parameters here, but `batches_per_sample` is the only one we look for. It should match the `batches_per_sample` value you passed to the `train` function. """ fig, ax = plt.subplots() batches_per_sample = kwargs['batches_per_sample'] for nn in args: ax.plot(range(0,len(nn.training_accuracies)*batches_per_sample,batches_per_sample), nn.training_accuracies, label=nn.name) ax.set_xlabel('Training steps') ax.set_ylabel('Accuracy') ax.set_title('Validation Accuracy During Training') ax.legend(loc=4) ax.set_ylim([0,1]) plt.yticks(np.arange(0, 1.1, 0.1)) plt.grid(True) plt.show() def train_and_test(use_bad_weights, learning_rate, activation_fn, training_batches=50000, batches_per_sample=500): """ Creates two networks, one with and one without batch normalization, then trains them with identical starting weights, layers, batches, etc. Finally tests and plots their accuracies. :param use_bad_weights: bool If True, initialize the weights of both networks to wildly inappropriate weights; if False, use reasonable starting weights. :param learning_rate: float Learning rate used during gradient descent. :param activation_fn: Callable The function used for the output of each hidden layer. The network will use the same activation function on every hidden layer and no activate function on the output layer. e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers. :param training_batches: (default 50000) Number of batches to train. :param batches_per_sample: (default 500) How many batches to train before sampling the validation accuracy. """ # Use identical starting weights for each network to eliminate differences in # weight initialization as a cause for differences seen in training performance # # Note: The networks will use these weights to define the number of and shapes of # its layers. The original batch normalization paper used 3 hidden layers # with 100 nodes in each, followed by a 10 node output layer. These values # build such a network, but feel free to experiment with different choices. # However, the input size should always be 784 and the final output should be 10. if use_bad_weights: # These weights should be horrible because they have such a large standard deviation weights = [np.random.normal(size=(784,100), scale=5.0).astype(np.float32), np.random.normal(size=(100,100), scale=5.0).astype(np.float32), np.random.normal(size=(100,100), scale=5.0).astype(np.float32), np.random.normal(size=(100,10), scale=5.0).astype(np.float32) ] else: # These weights should be good because they have such a small standard deviation weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,10), scale=0.05).astype(np.float32) ] # Just to make sure the TensorFlow's default graph is empty before we start another # test, because we don't bother using different graphs or scoping and naming # elements carefully in this sample code. tf.reset_default_graph() # build two versions of same network, 1 without and 1 with batch normalization nn = NeuralNet(weights, activation_fn, False) bn = NeuralNet(weights, activation_fn, True) # train and test the two models with tf.Session() as sess: tf.global_variables_initializer().run() nn.train(sess, learning_rate, training_batches, batches_per_sample) bn.train(sess, learning_rate, training_batches, batches_per_sample) nn.test(sess) bn.test(sess) # Display a graph of how validation accuracies changed during training # so we can compare how the models trained and when they converged plot_training_accuracies(nn, bn, batches_per_sample=batches_per_sample)
_____no_output_____
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
Comparisons between identical networks, with and without batch normalizationThe next series of cells train networks with various settings to show the differences with and without batch normalization. They are meant to clearly demonstrate the effects of batch normalization. We include a deeper discussion of batch normalization later in the notebook. **The following creates two networks using a ReLU activation function, a learning rate of 0.01, and reasonable starting weights.**
train_and_test(False, 0.01, tf.nn.relu)
100%|██████████| 50000/50000 [00:42<00:00, 1183.34it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
As expected, both networks train well and eventually reach similar test accuracies. However, notice that the model with batch normalization converges slightly faster than the other network, reaching accuracies over 90% almost immediately and nearing its max acuracy in 10 or 15 thousand iterations. The other network takes about 3 thousand iterations to reach 90% and doesn't near its best accuracy until 30 thousand or more iterations.If you look at the raw speed, you can see that without batch normalization we were computing over 1100 batches per second, whereas with batch normalization that goes down to just over 500. However, batch normalization allows us to perform fewer iterations and converge in less time over all. (We only trained for 50 thousand batches here so we could plot the comparison.) **The following creates two networks with the same hyperparameters used in the previous example, but only trains for 2000 iterations.**
train_and_test(False, 0.01, tf.nn.relu, 2000, 50)
100%|██████████| 2000/2000 [00:01<00:00, 1069.17it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
As you can see, using batch normalization produces a model with over 95% accuracy in only 2000 batches, and it was above 90% at somewhere around 500 batches. Without batch normalization, the model takes 1750 iterations just to hit 80% – the network with batch normalization hits that mark after around 200 iterations! (Note: if you run the code yourself, you'll see slightly different results each time because the starting weights - while the same for each model - are different for each run.)In the above example, you should also notice that the networks trained fewer batches per second then what you saw in the previous example. That's because much of the time we're tracking is actually spent periodically performing inference to collect data for the plots. In this example we perform that inference every 50 batches instead of every 500, so generating the plot for this example requires 10 times the overhead for the same 2000 iterations. **The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and reasonable starting weights.**
train_and_test(False, 0.01, tf.nn.sigmoid)
100%|██████████| 50000/50000 [00:43<00:00, 1153.97it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
With the number of layers we're using and this small learning rate, using a sigmoid activation function takes a long time to start learning. It eventually starts making progress, but it took over 45 thousand batches just to get over 80% accuracy. Using batch normalization gets to 90% in around one thousand batches. **The following creates two networks using a ReLU activation function, a learning rate of 1, and reasonable starting weights.**
train_and_test(False, 1, tf.nn.relu)
100%|██████████| 50000/50000 [00:35<00:00, 1397.55it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
Now we're using ReLUs again, but with a larger learning rate. The plot shows how training started out pretty normally, with the network with batch normalization starting out faster than the other. But the higher learning rate bounces the accuracy around a bit more, and at some point the accuracy in the network without batch normalization just completely crashes. It's likely that too many ReLUs died off at this point because of the high learning rate.The next cell shows the same test again. The network with batch normalization performs the same way, and the other suffers from the same problem again, but it manages to train longer before it happens.
train_and_test(False, 1, tf.nn.relu)
100%|██████████| 50000/50000 [00:36<00:00, 1379.92it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
In both of the previous examples, the network with batch normalization manages to gets over 98% accuracy, and get near that result almost immediately. The higher learning rate allows the network to train extremely fast. **The following creates two networks using a sigmoid activation function, a learning rate of 1, and reasonable starting weights.**
train_and_test(False, 1, tf.nn.sigmoid)
100%|██████████| 50000/50000 [00:36<00:00, 1382.38it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
In this example, we switched to a sigmoid activation function. It appears to hande the higher learning rate well, with both networks achieving high accuracy.The cell below shows a similar pair of networks trained for only 2000 iterations.
train_and_test(False, 1, tf.nn.sigmoid, 2000, 50)
100%|██████████| 2000/2000 [00:01<00:00, 1167.28it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
As you can see, even though these parameters work well for both networks, the one with batch normalization gets over 90% in 400 or so batches, whereas the other takes over 1700. When training larger networks, these sorts of differences become more pronounced. **The following creates two networks using a ReLU activation function, a learning rate of 2, and reasonable starting weights.**
train_and_test(False, 2, tf.nn.relu)
100%|██████████| 50000/50000 [00:35<00:00, 1412.09it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
With this very large learning rate, the network with batch normalization trains fine and almost immediately manages 98% accuracy. However, the network without normalization doesn't learn at all. **The following creates two networks using a sigmoid activation function, a learning rate of 2, and reasonable starting weights.**
train_and_test(False, 2, tf.nn.sigmoid)
100%|██████████| 50000/50000 [00:35<00:00, 1395.37it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
Once again, using a sigmoid activation function with the larger learning rate works well both with and without batch normalization.However, look at the plot below where we train models with the same parameters but only 2000 iterations. As usual, batch normalization lets it train faster.
train_and_test(False, 2, tf.nn.sigmoid, 2000, 50)
100%|██████████| 2000/2000 [00:01<00:00, 1170.27it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
In the rest of the examples, we use really bad starting weights. That is, normally we would use very small values close to zero. However, in these examples we choose random values with a standard deviation of 5. If you were really training a neural network, you would **not** want to do this. But these examples demonstrate how batch normalization makes your network much more resilient. **The following creates two networks using a ReLU activation function, a learning rate of 0.01, and bad starting weights.**
train_and_test(True, 0.01, tf.nn.relu)
100%|██████████| 50000/50000 [00:43<00:00, 1147.21it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
As the plot shows, without batch normalization the network never learns anything at all. But with batch normalization, it actually learns pretty well and gets to almost 80% accuracy. The starting weights obviously hurt the network, but you can see how well batch normalization does in overcoming them. **The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and bad starting weights.**
train_and_test(True, 0.01, tf.nn.sigmoid)
100%|██████████| 50000/50000 [00:45<00:00, 1108.50it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
Using a sigmoid activation function works better than the ReLU in the previous example, but without batch normalization it would take a tremendously long time to train the network, if it ever trained at all. **The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.**
train_and_test(True, 1, tf.nn.relu)
100%|██████████| 50000/50000 [00:38<00:00, 1313.14it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
The higher learning rate used here allows the network with batch normalization to surpass 90% in about 30 thousand batches. The network without it never gets anywhere. **The following creates two networks using a sigmoid activation function, a learning rate of 1, and bad starting weights.**
train_and_test(True, 1, tf.nn.sigmoid)
100%|██████████| 50000/50000 [00:35<00:00, 1409.45it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
Using sigmoid works better than ReLUs for this higher learning rate. However, you can see that without batch normalization, the network takes a long time tro train, bounces around a lot, and spends a long time stuck at 90%. The network with batch normalization trains much more quickly, seems to be more stable, and achieves a higher accuracy. **The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.**
train_and_test(True, 2, tf.nn.relu)
100%|██████████| 50000/50000 [00:35<00:00, 1392.83it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
We've already seen that ReLUs do not do as well as sigmoids with higher learning rates, and here we are using an extremely high rate. As expected, without batch normalization the network doesn't learn at all. But with batch normalization, it eventually achieves 90% accuracy. Notice, though, how its accuracy bounces around wildly during training - that's because the learning rate is really much too high, so the fact that this worked at all is a bit of luck. **The following creates two networks using a sigmoid activation function, a learning rate of 2, and bad starting weights.**
train_and_test(True, 2, tf.nn.sigmoid)
100%|██████████| 50000/50000 [00:35<00:00, 1401.19it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
In this case, the network with batch normalization trained faster and reached a higher accuracy. Meanwhile, the high learning rate makes the network without normalization bounce around erratically and have trouble getting past 90%. Full Disclosure: Batch Normalization Doesn't Fix EverythingBatch normalization isn't magic and it doesn't work every time. Weights are still randomly initialized and batches are chosen at random during training, so you never know exactly how training will go. Even for these tests, where we use the same initial weights for both networks, we still get _different_ weights each time we run.This section includes two examples that show runs when batch normalization did not help at all.**The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.**
train_and_test(True, 1, tf.nn.relu)
100%|██████████| 50000/50000 [00:36<00:00, 1386.17it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
When we used these same parameters [earlier](successful_example_lr_1), we saw the network with batch normalization reach 92% validation accuracy. This time we used different starting weights, initialized using the same standard deviation as before, and the network doesn't learn at all. (Remember, an accuracy around 10% is what the network gets if it just guesses the same value all the time.)**The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.**
train_and_test(True, 2, tf.nn.relu)
100%|██████████| 50000/50000 [00:35<00:00, 1398.39it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
When we trained with these parameters and batch normalization [earlier](successful_example_lr_2), we reached 90% validation accuracy. However, this time the network _almost_ starts to make some progress in the beginning, but it quickly breaks down and stops learning. **Note:** Both of the above examples use *extremely* bad starting weights, along with learning rates that are too high. While we've shown batch normalization _can_ overcome bad values, we don't mean to encourage actually using them. The examples in this notebook are meant to show that batch normalization can help your networks train better. But these last two examples should remind you that you still want to try to use good network design choices and reasonable starting weights. It should also remind you that the results of each attempt to train a network are a bit random, even when using otherwise identical architectures. Batch Normalization: A Detailed Look The layer created by `tf.layers.batch_normalization` handles all the details of implementing batch normalization. Many students will be fine just using that and won't care about what's happening at the lower levels. However, some students may want to explore the details, so here is a short explanation of what's really happening, starting with the equations you're likely to come across if you ever read about batch normalization. In order to normalize the values, we first need to find the average value for the batch. If you look at the code, you can see that this is not the average value of the batch _inputs_, but the average value coming _out_ of any particular layer before we pass it through its non-linear activation function and then feed it as an input to the _next_ layer.We represent the average as $\mu_B$, which is simply the sum of all of the values $x_i$ divided by the number of values, $m$ $$\mu_B \leftarrow \frac{1}{m}\sum_{i=1}^m x_i$$We then need to calculate the variance, or mean squared deviation, represented as $\sigma_{B}^{2}$. If you aren't familiar with statistics, that simply means for each value $x_i$, we subtract the average value (calculated earlier as $\mu_B$), which gives us what's called the "deviation" for that value. We square the result to get the squared deviation. Sum up the results of doing that for each of the values, then divide by the number of values, again $m$, to get the average, or mean, squared deviation.$$\sigma_{B}^{2} \leftarrow \frac{1}{m}\sum_{i=1}^m (x_i - \mu_B)^2$$Once we have the mean and variance, we can use them to normalize the values with the following equation. For each value, it subtracts the mean and divides by the (almost) standard deviation. (You've probably heard of standard deviation many times, but if you have not studied statistics you might not know that the standard deviation is actually the square root of the mean squared deviation.)$$\hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_{B}^{2} + \epsilon}}$$Above, we said "(almost) standard deviation". That's because the real standard deviation for the batch is calculated by $\sqrt{\sigma_{B}^{2}}$, but the above formula adds the term epsilon, $\epsilon$, before taking the square root. The epsilon can be any small, positive constant - in our code we use the value `0.001`. It is there partially to make sure we don't try to divide by zero, but it also acts to increase the variance slightly for each batch. Why increase the variance? Statistically, this makes sense because even though we are normalizing one batch at a time, we are also trying to estimate the population distribution – the total training set, which itself an estimate of the larger population of inputs your network wants to handle. The variance of a population is higher than the variance for any sample taken from that population, so increasing the variance a little bit for each batch helps take that into account. At this point, we have a normalized value, represented as $\hat{x_i}$. But rather than use it directly, we multiply it by a gamma value, $\gamma$, and then add a beta value, $\beta$. Both $\gamma$ and $\beta$ are learnable parameters of the network and serve to scale and shift the normalized value, respectively. Because they are learnable just like weights, they give your network some extra knobs to tweak during training to help it learn the function it is trying to approximate. $$y_i \leftarrow \gamma \hat{x_i} + \beta$$We now have the final batch-normalized output of our layer, which we would then pass to a non-linear activation function like sigmoid, tanh, ReLU, Leaky ReLU, etc. In the original batch normalization paper (linked in the beginning of this notebook), they mention that there might be cases when you'd want to perform the batch normalization _after_ the non-linearity instead of before, but it is difficult to find any uses like that in practice.In `NeuralNet`'s implementation of `fully_connected`, all of this math is hidden inside the following line, where `linear_output` serves as the $x_i$ from the equations:```pythonbatch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)```The next section shows you how to implement the math directly. Batch normalization without the `tf.layers` packageOur implementation of batch normalization in `NeuralNet` uses the high-level abstraction [tf.layers.batch_normalization](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization), found in TensorFlow's [`tf.layers`](https://www.tensorflow.org/api_docs/python/tf/layers) package.However, if you would like to implement batch normalization at a lower level, the following code shows you how.It uses [tf.nn.batch_normalization](https://www.tensorflow.org/api_docs/python/tf/nn/batch_normalization) from TensorFlow's [neural net (nn)](https://www.tensorflow.org/api_docs/python/tf/nn) package.**1)** You can replace the `fully_connected` function in the `NeuralNet` class with the below code and everything in `NeuralNet` will still work like it did before.
def fully_connected(self, layer_in, initial_weights, activation_fn=None): """ Creates a standard, fully connected layer. Its number of inputs and outputs will be defined by the shape of `initial_weights`, and its starting weight values will be taken directly from that same parameter. If `self.use_batch_norm` is True, this layer will include batch normalization, otherwise it will not. :param layer_in: Tensor The Tensor that feeds into this layer. It's either the input to the network or the output of a previous layer. :param initial_weights: NumPy array or Tensor Initial values for this layer's weights. The shape defines the number of nodes in the layer. e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256 outputs. :param activation_fn: Callable or None (default None) The non-linearity used for the output of the layer. If None, this layer will not include batch normalization, regardless of the value of `self.use_batch_norm`. e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers. """ if self.use_batch_norm and activation_fn: # Batch normalization uses weights as usual, but does NOT add a bias term. This is because # its calculations include gamma and beta variables that make the bias term unnecessary. weights = tf.Variable(initial_weights) linear_output = tf.matmul(layer_in, weights) num_out_nodes = initial_weights.shape[-1] # Batch normalization adds additional trainable variables: # gamma (for scaling) and beta (for shifting). gamma = tf.Variable(tf.ones([num_out_nodes])) beta = tf.Variable(tf.zeros([num_out_nodes])) # These variables will store the mean and variance for this layer over the entire training set, # which we assume represents the general population distribution. # By setting `trainable=False`, we tell TensorFlow not to modify these variables during # back propagation. Instead, we will assign values to these variables ourselves. pop_mean = tf.Variable(tf.zeros([num_out_nodes]), trainable=False) pop_variance = tf.Variable(tf.ones([num_out_nodes]), trainable=False) # Batch normalization requires a small constant epsilon, used to ensure we don't divide by zero. # This is the default value TensorFlow uses. epsilon = 1e-3 def batch_norm_training(): # Calculate the mean and variance for the data coming out of this layer's linear-combination step. # The [0] defines an array of axes to calculate over. batch_mean, batch_variance = tf.nn.moments(linear_output, [0]) # Calculate a moving average of the training data's mean and variance while training. # These will be used during inference. # Decay should be some number less than 1. tf.layers.batch_normalization uses the parameter # "momentum" to accomplish this and defaults it to 0.99 decay = 0.99 train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay)) train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay)) # The 'tf.control_dependencies' context tells TensorFlow it must calculate 'train_mean' # and 'train_variance' before it calculates the 'tf.nn.batch_normalization' layer. # This is necessary because the those two operations are not actually in the graph # connecting the linear_output and batch_normalization layers, # so TensorFlow would otherwise just skip them. with tf.control_dependencies([train_mean, train_variance]): return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon) def batch_norm_inference(): # During inference, use the our estimated population mean and variance to normalize the layer return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon) # Use `tf.cond` as a sort of if-check. When self.is_training is True, TensorFlow will execute # the operation returned from `batch_norm_training`; otherwise it will execute the graph # operation returned from `batch_norm_inference`. batch_normalized_output = tf.cond(self.is_training, batch_norm_training, batch_norm_inference) # Pass the batch-normalized layer output through the activation function. # The literature states there may be cases where you want to perform the batch normalization *after* # the activation function, but it is difficult to find any uses of that in practice. return activation_fn(batch_normalized_output) else: # When not using batch normalization, create a standard layer that multiplies # the inputs and weights, adds a bias, and optionally passes the result # through an activation function. weights = tf.Variable(initial_weights) biases = tf.Variable(tf.zeros([initial_weights.shape[-1]])) linear_output = tf.add(tf.matmul(layer_in, weights), biases) return linear_output if not activation_fn else activation_fn(linear_output)
_____no_output_____
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
This version of `fully_connected` is much longer than the original, but once again has extensive comments to help you understand it. Here are some important points:1. It explicitly creates variables to store gamma, beta, and the population mean and variance. These were all handled for us in the previous version of the function.2. It initializes gamma to one and beta to zero, so they start out having no effect in this calculation: $y_i \leftarrow \gamma \hat{x_i} + \beta$. However, during training the network learns the best values for these variables using back propagation, just like networks normally do with weights.3. Unlike gamma and beta, the variables for population mean and variance are marked as untrainable. That tells TensorFlow not to modify them during back propagation. Instead, the lines that call `tf.assign` are used to update these variables directly.4. TensorFlow won't automatically run the `tf.assign` operations during training because it only evaluates operations that are required based on the connections it finds in the graph. To get around that, we add this line: `with tf.control_dependencies([train_mean, train_variance]):` before we run the normalization operation. This tells TensorFlow it needs to run those operations before running anything inside the `with` block. 5. The actual normalization math is still mostly hidden from us, this time using [`tf.nn.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/nn/batch_normalization).5. `tf.nn.batch_normalization` does not have a `training` parameter like `tf.layers.batch_normalization` did. However, we still need to handle training and inference differently, so we run different code in each case using the [`tf.cond`](https://www.tensorflow.org/api_docs/python/tf/cond) operation.6. We use the [`tf.nn.moments`](https://www.tensorflow.org/api_docs/python/tf/nn/moments) function to calculate the batch mean and variance. **2)** The current version of the `train` function in `NeuralNet` will work fine with this new version of `fully_connected`. However, it uses these lines to ensure population statistics are updated when using batch normalization: ```pythonif self.use_batch_norm: with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)else: train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)```Our new version of `fully_connected` handles updating the population statistics directly. That means you can also simplify your code by replacing the above `if`/`else` condition with just this line:```pythontrain_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)``` **3)** And just in case you want to implement every detail from scratch, you can replace this line in `batch_norm_training`:```pythonreturn tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)```with these lines:```pythonnormalized_linear_output = (linear_output - batch_mean) / tf.sqrt(batch_variance + epsilon)return gamma * normalized_linear_output + beta```And replace this line in `batch_norm_inference`:```pythonreturn tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)```with these lines:```pythonnormalized_linear_output = (linear_output - pop_mean) / tf.sqrt(pop_variance + epsilon)return gamma * normalized_linear_output + beta```As you can see in each of the above substitutions, the two lines of replacement code simply implement the following two equations directly. The first line calculates the following equation, with `linear_output` representing $x_i$ and `normalized_linear_output` representing $\hat{x_i}$: $$\hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_{B}^{2} + \epsilon}}$$And the second line is a direct translation of the following equation:$$y_i \leftarrow \gamma \hat{x_i} + \beta$$We still use the `tf.nn.moments` operation to implement the other two equations from earlier – the ones that calculate the batch mean and variance used in the normalization step. If you really wanted to do everything from scratch, you could replace that line, too, but we'll leave that to you. Why the difference between training and inference?In the original function that uses `tf.layers.batch_normalization`, we tell the layer whether or not the network is training by passing a value for its `training` parameter, like so:```pythonbatch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)```And that forces us to provide a value for `self.is_training` in our `feed_dict`, like we do in this example from `NeuralNet`'s `train` function:```pythonsession.run(train_step, feed_dict={self.input_layer: batch_xs, labels: batch_ys, self.is_training: True})```If you looked at the [low level implementation](low_level_code), you probably noticed that, just like with `tf.layers.batch_normalization`, we need to do slightly different things during training and inference. But why is that?First, let's look at what happens when we don't. The following function is similar to `train_and_test` from earlier, but this time we are only testing one network and instead of plotting its accuracy, we perform 200 predictions on test inputs, 1 input at at time. We can use the `test_training_accuracy` parameter to test the network in training or inference modes (the equivalent of passing `True` or `False` to the `feed_dict` for `is_training`).
def batch_norm_test(test_training_accuracy): """ :param test_training_accuracy: bool If True, perform inference with batch normalization using batch mean and variance; if False, perform inference with batch normalization using estimated population mean and variance. """ weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,100), scale=0.05).astype(np.float32), np.random.normal(size=(100,10), scale=0.05).astype(np.float32) ] tf.reset_default_graph() # Train the model bn = NeuralNet(weights, tf.nn.relu, True) # First train the network with tf.Session() as sess: tf.global_variables_initializer().run() bn.train(sess, 0.01, 2000, 2000) bn.test(sess, test_training_accuracy=test_training_accuracy, include_individual_predictions=True)
_____no_output_____
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
In the following cell, we pass `True` for `test_training_accuracy`, which performs the same batch normalization that we normally perform **during training**.
batch_norm_test(True)
100%|██████████| 2000/2000 [00:03<00:00, 514.57it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
As you can see, the network guessed the same value every time! But why? Because during training, a network with batch normalization adjusts the values at each layer based on the mean and variance **of that batch**. The "batches" we are using for these predictions have a single input each time, so their values _are_ the means, and their variances will always be 0. That means the network will normalize the values at any layer to zero. (Review the equations from before to see why a value that is equal to the mean would always normalize to zero.) So we end up with the same result for every input we give the network, because its the value the network produces when it applies its learned weights to zeros at every layer. **Note:** If you re-run that cell, you might get a different value from what we showed. That's because the specific weights the network learns will be different every time. But whatever value it is, it should be the same for all 200 predictions.To overcome this problem, the network does not just normalize the batch at each layer. It also maintains an estimate of the mean and variance for the entire population. So when we perform inference, instead of letting it "normalize" all the values using their own means and variance, it uses the estimates of the population mean and variance that it calculated while training. So in the following example, we pass `False` for `test_training_accuracy`, which tells the network that we it want to perform inference with the population statistics it calculates during training.
batch_norm_test(False)
100%|██████████| 2000/2000 [00:03<00:00, 511.58it/s]
MIT
batch-norm/Batch_Normalization_Lesson.ipynb
JJINDAHOUSE/deep-learning
0. Environment Setting
import pandas as pd import numpy as np import time import tensorflow as tf import tensorflow.keras as keras from keras.models import load_model from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences start_time = time.time() # 학습완료된 모델, 전처리 완료된 데이터 다운로드 !wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1-8A3zjgyBi3vjlWi9DX61IFxunCihb3-' -O demo_model.h5 !wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1ApkVRmC0B2DgLjYQ3l1sbF6bx2GyBttY' -O demo_sentiment.xlsx # Mecab 설치용 스크립트 (약 3분 걸림) !set -x \ && pip install konlpy \ && curl -s https://raw.githubusercontent.com/konlpy/konlpy/master/scripts/mecab.sh | bash -x # 잘 설치되었는지 테스트 from konlpy.tag import Mecab m = Mecab() m.pos('아버지가방에들어가신다.')
_____no_output_____
MIT
Demo of LSTM_Sentiment_Classification_KOR.ipynb
yool-seoul/sentiment_analysis_lstm
1. Data loading
MAX_LEN = 50 VOCAB_SIZE = 13648 # Tokenizer 인코딩을 위한 학습용 엑셀 파일읽어들이기 약 30초 걸림. df1 = pd.read_excel('./demo_sentiment.xlsx', sheet_name='Train') def tokenize(s): return ['/'.join(t) for t in m.pos(s)] train_docs = [tokenize(row[1]['sentence']) for row in df1.iterrows()] tokenizer = Tokenizer(num_words = VOCAB_SIZE) tokenizer.fit_on_texts(train_docs) # 학습을 완료한 모델을 불러오기 loaded_model = load_model('demo_model.h5') end_time = time.time() print("Elapsed time for ready :", time.strftime('%H:%M:%S', time.localtime(end_time - start_time)))
Elapsed time for ready : 00:01:11
MIT
Demo of LSTM_Sentiment_Classification_KOR.ipynb
yool-seoul/sentiment_analysis_lstm
2. Ready to test
# 이용자가 알아보기 쉽게 결과를 분석해서 출력해 주는 함수들. def sentiment_predict(sentence, log=False): sentences = tokenize(sentence) encoded = tokenizer.texts_to_sequences([sentences]) padded = pad_sequences(encoded, maxlen=MAX_LEN) score = loaded_model.predict(padded) if log: print('Predict score: ', score) return score def print_sentiment(lst): sent = ['기쁨', '불안', '당황', '슬픔', '분노', '상처'] return sent[np.argmax(lst)] print_sentiment(sentiment_predict('니가 어떻게 나에게 이럴수가 있어?', True)) print_sentiment(sentiment_predict('아니, 니가 어떻게 나에게 이럴수가 있어?', True)) print_sentiment(sentiment_predict('감히 니가 어떻게 나에게 이럴수가 있어?', True)) print_sentiment(sentiment_predict('그래, 너 참 잘났다.', True)) print_sentiment(sentiment_predict('너 참 잘났다.', True)) print_sentiment(sentiment_predict('잘들논다.', True)) print_sentiment(sentiment_predict('아이고, 잘들논다.', True)) print_sentiment(sentiment_predict('방금 저녁을 먹었는데 왜 또 배가고프지?', True)) print_sentiment(sentiment_predict('오늘 저녁에 내가 좋아하는 친구의 생일파티에 초대받았어', True)) print_sentiment(sentiment_predict('이번 여름은 코로나 때문에 여행을 가지 못했어.', True)) print_sentiment(sentiment_predict('다음 주에 개학인데, 나는 아직 방학숙제를 다 못했어.', True)) # 자유롭게 문장을 입력해보자. messages = ['어제 한국과 일본이 배구경기를 했는데, 한국이 이겼어.', '어제 한국과 일본이 배구경기를 했는데, 한국이 이겼어. 그런데 나는 일본사람이야.', '어제 한국과 일본이 배구경기를 했는데, 한국이 이겼어. 그런데 나는 한국사람이야.', '어제 한국과 일본이 배구경기를 했는데, 한국이 이겼어. 그런데 나는 중국사람이야.', '하루종일 아무것도 먹지 않았더니 몸에 힘이 없어.', '신규확진 1823명, 열흘만에 다시 1800명 역대 세 번째 큰 규모', '오늘 날씨가 너무 더워서 공부하기 힘들어', '니가 어떻게 나에게 이럴 수가 있어?', '오늘 낮에 내가 황당한 스팸 문자를 받았어', '아니 왜 가만히 있는 우리를 괴롭히는거야?', '약속시간에 늦었어, 어떡하지?', # 아주 짧은 문장은 어떻게 될지 '이게 되네?', # 엄청 긴 문장은 과연... '지난 7월 소비자물가 상승률이 연중 최고치인 2.6%인 것으로 확인되면서 하반기부터는 물가가 2% 이내로 안정화될 것이라는 정부 전망은 빗나갔다. 한은이 제시한 물가안정목표치(2.0%)도 훌쩍 넘길 가능성이 커지고 있다. 물가 불안이 확대되자 정부는 ‘물가 잡기’ 총력전을 펼치는 상황이다. 작황 부진과 폭염으로 인한 폐사 등으로 서민 체감이 큰 밥상 물가가 크게 오르고, 부동산 정책 실패로 집세는 천정부지인데다 수요회복으로 서비스 가격이 뛰는 등 “안오르는게 없는” 상황에 민심 역시 악화되고 있기 때문이다. 특히 지난 5월 조류독감(AI)가 종식됐는데도 가격이 내리지 않는 계란을 놓고는, 문재인 대통령이 직접 “달걀은 필수 먹거리인 만큼 소비자들에게도 피해가 갈 수 있으니 생산단계, 유통단계, 판매단계를 점검하라”고 부처들에 주문하기도 했다. 계란값 잡기에는 물가 총괄 부처인 기재부와 함께 농림부가 나섰고, 여기에 공정거래위원회까지 담합 가능성을 살펴보겠다며 가담한 상태다.' ] for msg in messages: res = sentiment_predict(msg, True) print('입력문장: ', msg) print('감정상태: ', print_sentiment(res), '\n')
_____no_output_____
MIT
Demo of LSTM_Sentiment_Classification_KOR.ipynb
yool-seoul/sentiment_analysis_lstm
Sequence Classification with LSTM on MNIST Table of contents- Introduction- Architectures- Long Short-Term Memory Model (LSTM)- Building a LSTM with TensorFlow IntroductionRecurrent Neural Networks are Deep Learning models with simple structures and a feedback mechanism builted-in, or in different words, the output of a layer is added to the next input and fed back to the same layer.The Recurrent Neural Network is a specialized type of Neural Network that solves the issue of **maintaining context for Sequential data** - such as Weather data, Stocks, Genes, etc. At each iterative step, the processing unit takes in an input and the current state of the network, and produces an output and a new state that is **re-fed into the network**.However, **this model has some problems**. It's very computationally expensive to maintain the state for a large amount of units, even more so over a long amount of time. Additionally, Recurrent Networks are very sensitive to changes in their parameters. As such, they are prone to different problems with their Gradient Descent optimizer - they either grow exponentially (Exploding Gradient) or drop down to near zero and stabilize (Vanishing Gradient), both problems that greatly harm a model's learning capability.To solve these problems, Hochreiter and Schmidhuber published a paper in 1997 describing a way to keep information over long periods of time and additionally solve the oversensitivity to parameter changes, i.e., make backpropagating through the Recurrent Networks more viable. Architectures- Fully Recurrent Network- Recursive Neural Networks- Hopfield Networks- Elman Networks and Jordan Networks- Echo State Networks- Neural history compressor- **The Long Short-Term Memory Model (LSTM)** LSTMLSTM is one of the proposed solutions or upgrades to the **Recurrent Neural Network model**. It is an abstraction of how computer memory works. It is "bundled" with whatever processing unit is implemented in the Recurrent Network, although outside of its flow, and is responsible for keeping, reading, and outputting information for the model. The way it works is simple: you have a linear unit, which is the information cell itself, surrounded by three logistic gates responsible for maintaining the data. One gate is for inputting data into the information cell, one is for outputting data from the input cell, and the last one is to keep or forget data depending on the needs of the network.Thanks to that, it not only solves the problem of keeping states, because the network can choose to forget data whenever information is not needed, it also solves the gradient problems, since the Logistic Gates have a very nice derivative. Long Short-Term Memory ArchitectureAs seen before, the Long Short-Term Memory is composed of a linear unit surrounded by three logistic gates. The name for these gates vary from place to place, but the most usual names for them are:- the "Input" or "Write" Gate, which handles the writing of data into the information cell - the "Output" or "Read" Gate, which handles the sending of data back onto the Recurrent Network - the "Keep" or "Forget" Gate, which handles the maintaining and modification of the data stored in the information cell.*Diagram of the Long Short-Term Memory Unit*The three gates are the centerpiece of the LSTM unit. The gates, when activated by the network, perform their respective functions. For example, the Input Gate will write whatever data it is passed onto the information cell, the Output Gate will return whatever data is in the information cell, and the Keep Gate will maintain the data in the information cell. These gates are analog and multiplicative, and as such, can modify the data based on the signal they are sent. Building a LSTM with TensorFlow LSTM for ClassificationAlthough RNN is mostly used to model sequences and predict sequential data, we can still classify images using a LSTM network. If we consider every image row as a sequence of pixels, we can feed a LSTM network for classification. Lets use the famous MNIST dataset here. Because MNIST image shape is 28*28px, we will then handle 28 sequences of 28 steps for every sample. MNIST DatasetTensor flow already provides **helper functions** to download and process the MNIST dataset.
%matplotlib inline import warnings warnings.filterwarnings('ignore') import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets(".", one_hot=True)
Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes. Extracting ./train-images-idx3-ubyte.gz Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes. Extracting ./train-labels-idx1-ubyte.gz Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes. Extracting ./t10k-images-idx3-ubyte.gz Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes. Extracting ./t10k-labels-idx1-ubyte.gz
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
The function **`input_data.read_data_sets(...)`** loads the entire dataset and returns an object **`tensorflow.contrib.learn.python.learn.datasets.mnist.DataSets`**The argument **(`one_hot=True`)** creates the label arrays as 10-dimensional binary vectors (only zeros and ones), in which the index cell for the number one, is the class label.
train_imgs = mnist.train.images train_labels = mnist.train.labels test_imgs = mnist.test.images test_labels = mnist.test.labels n_train = train_imgs.shape[0] n_test = test_imgs.shape[0] dim = train_imgs.shape[1] n_classes = train_labels.shape[1] print("Train Images:", train_imgs.shape) print("Train Labels:", train_labels.shape) print() print("Test Images:", test_imgs.shape) print("Test Labels:", test_labels.shape)
Train Images: (55000, 784) Train Labels: (55000, 10) Test Images: (10000, 784) Test Labels: (10000, 10)
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
Let's get one sample, just to understand the structure of MNIST dataset The next code snippet prints the **label vector** (one_hot format), **the class** and actual sample formatted as **image**:
samplesIdx = [100, 101, 102] # Change these numbers here to see other samples. from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax1 = fig.add_subplot(121) ax1.imshow(test_imgs[samplesIdx[0]].reshape([28, 28]), cmap='gray') xx, yy = np.meshgrid(np.linspace(0, 28, 28), np.linspace(0, 28, 28)) X = xx Y = yy Z = 100 * np.ones(X.shape) img = test_imgs[77].reshape([28, 28]) ax = fig.add_subplot(122, projection='3d') ax.set_zlim((0, 200)) offset = 200 for i in samplesIdx: img = test_imgs[i].reshape([28,28]).transpose() ax.contourf(X, Y, img, 200, zdir='z', offset=offset, cmap="gray") offset -= 100 ax.set_xticks([]) ax.set_yticks([]) ax.set_zticks([]) plt.show() for i in samplesIdx: print("Sample: {0} - Class: {1} - Label Vector: {2} ".format(i, np.nonzero(test_labels[i])[0], test_labels[i]))
_____no_output_____
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
--- Let's understand the parameters, inputs and outputsWe will treat the MNIST image $\in \mathcal{R}^{28 \times 28}$ as $28$ sequences of a vector $\mathbf{x} \in \mathcal{R}^{28}$. Our simple RNN consists of: 1. One input layer which converts a $28*28$ dimensional input to an $128$ dimensional hidden layer, 2. One intermediate recurrent neural network (LSTM) 3. One output layer which converts an $128$ dimensional output of the LSTM to $10$ dimensional output indicating a class label.
n_input = 28 # MNIST data input (img shape: 28*28). n_steps = 28 # Timesteps. n_hidden = 128 # Hidden layer number of features. n_classes = 10 # MNIST total classes (0-9 digits). learning_rate = 0.001 training_iters = 100000 batch_size = 100 display_step = 10
_____no_output_____
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
Construct a Recurrent Neural Network The input should be a Tensor of shape: [batch_size, time_steps, input_dimension], but in our case it would be (?, 28, 28)
x = tf.placeholder(dtype="float", shape=[None, n_steps, n_input], name="x") # Current data input shape: (batch_size, n_steps, n_input) [100x28x28] y = tf.placeholder(dtype="float", shape=[None, n_classes], name="y")
_____no_output_____
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
Lets create the weights and biases for the read out layer.
weights = { 'out': tf.Variable(tf.random_normal([n_hidden, n_classes])) } biases = { 'out': tf.Variable(tf.random_normal([n_classes])) }
_____no_output_____
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
Lets define a lstm cell with tensorflow.
lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
_____no_output_____
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
__dynamic_rnn__ creates a recurrent neural network specified from __lstm_cell__:
outputs, states = tf.nn.dynamic_rnn(lstm_cell, inputs=x, dtype=tf.float32)
_____no_output_____
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
The output of the RNN would be a [100x28x128] matrix. We use the linear activation to map it to a [?x10] matrix.
output = tf.reshape(tf.split(outputs, 28, axis=1, num=None, name='split')[-1], [-1,128]) pred = tf.matmul(output, weights['out']) + biases['out'] pred
_____no_output_____
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
Now, we define the cost function and optimizer:
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
_____no_output_____
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
Here we define the accuracy and evaluation methods to be used in the learning process:
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
_____no_output_____
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
Just recall that we will treat the MNIST image $\in \mathcal{R}^{28 \times 28}$ as $28$ sequences of a vector $\mathbf{x} \in \mathcal{R}^{28}$.
init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) step = 1 # Keep training until reach max iterations. while step * batch_size < training_iters: # We will read a batch of 100 images [100 x 784] as batch_x # batch_y is a matrix of [100x10] batch_x, batch_y = mnist.train.next_batch(batch_size) # We consider each row of the image as one sequence. # Reshape data to get 28 seq of 28 elements, so that, batxh_x is [100x28x28] batch_x = batch_x.reshape((batch_size, n_steps, n_input)) # Run optimization op (backprop) sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) if step % display_step == 0: # Calculate batch accuracy. acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}) # Calculate batch loss. loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \ "{:.6f}".format(loss) + ", Training Accuracy= " + \ "{:.5f}".format(acc)) step += 1 print("Optimization Finished!") # Calculate accuracy for 128 mnist test images. test_len = 128 test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input)) test_label = mnist.test.labels[:test_len] print("Testing Accuracy:", \ sess.run(accuracy, feed_dict={x: test_data, y: test_label})) sess.close()
_____no_output_____
MIT
Recurrent Neural Networks/LSTM-MNIST.ipynb
KostadinPlachkov/DeepLearning
通过Sub-Pixel实现图像超分辨率**作者:** [Ralph LU](https://github.com/ralph0813)**日期:** 2022.4 **摘要:** 本示例通过Sub-Pixel实现图像超分辨率。 一、简要介绍在计算机视觉中,图像超分辨率(Image Super Resolution)是指由一幅低分辨率图像或图像序列恢复出高分辨率图像。图像超分辨率技术分为超分辨率复原和超分辨率重建。本示例简要介绍如何通过飞桨开源框架,实现图像超分辨率。包括数据集的定义、模型的搭建与训练。参考论文:《Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network》论文链接:https://arxiv.org/abs/1609.05158 二、环境设置导入一些比较基础常用的模块,确认自己的飞桨版本。
import os import io import math import random import numpy as np import matplotlib.pyplot as plt from PIL import Image from IPython.display import display import paddle from paddle.io import Dataset from paddle.vision.transforms import transforms print(paddle.__version__)
2.3.0-rc0
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
三、数据集 3.1 数据集下载本案例使用BSR_bsds500数据集,下载链接:http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz
!wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz !tar -zxvf BSR_bsds500.tgz
_____no_output_____
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
3.2 数据集概览```BSR├── BSDS500│   └── data│   ├── groundTruth│   │   ├── test│   │   ├── train│   │   └── val│   └── images│   ├── test│   ├── train│   └── val├── bench│   ├── benchmarks│   ├── data│   │   ├── ...│   │   └── ...│   └── source└── documentation```可以看到需要的图片文件在BSR/BSDS500/images文件夹下,train、test各200张,val为100张。 3.3 数据集类定义飞桨(PaddlePaddle)数据集加载方案是统一使用Dataset(数据集定义) + DataLoader(多进程数据集加载)。首先先进行数据集的定义,数据集定义主要是实现一个新的Dataset类,继承父类paddle.io.Dataset,并实现父类中以下两个抽象方法,__getitem__和__len__:```pythonclass MyDataset(Dataset): def __init__(self): ... 每次迭代时返回数据和对应的标签 def __getitem__(self, idx): return x, y 返回整个数据集的总数 def __len__(self): return count(samples)```
class BSD_data(Dataset): """ 继承paddle.io.Dataset类 """ def __init__(self, mode='train',image_path="BSR/BSDS500/data/images/"): """ 实现构造函数,定义数据读取方式,划分训练和测试数据集 """ super(BSD_data, self).__init__() self.mode = mode.lower() if self.mode == 'train': self.image_path = os.path.join(image_path,'train') elif self.mode == 'val': self.image_path = os.path.join(image_path,'val') else: raise ValueError('mode must be "train" or "val"') # 原始图像的缩放大小 self.crop_size = 300 # 缩放倍率 self.upscale_factor = 3 # 缩小后送入神经网络的大小 self.input_size = self.crop_size // self.upscale_factor # numpy随机数种子 self.seed=1337 # 图片集合 self.temp_images = [] # 加载数据 self._parse_dataset() def transforms(self, img): """ 图像预处理工具,用于将升维(100, 100) => (100, 100,1), 并对图像的维度进行转换从HWC变为CHW """ if len(img.shape) == 2: img = np.expand_dims(img, axis=2) return img.transpose((2, 0, 1)) def __getitem__(self, idx): """ 返回 缩小3倍后的图片 和 原始图片 """ # 加载原始图像 img = self._load_img(self.temp_images[idx]) # 将原始图像缩放到(3, 300, 300) img = img.resize([self.crop_size,self.crop_size], Image.BICUBIC) #转换为YCbCr图像 ycbcr = img.convert("YCbCr") # 因为人眼对亮度敏感,所以只取Y通道 y, cb, cr = ycbcr.split() y = np.asarray(y,dtype='float32') y = y / 255.0 # 缩放后的图像和前面采取一样的操作 img_ = img.resize([self.input_size,self.input_size], Image.BICUBIC) ycbcr_ = img_.convert("YCbCr") y_, cb_, cr_ = ycbcr_.split() y_ = np.asarray(y_,dtype='float32') y_ = y_ / 255.0 # 升纬并将HWC转换为CHW y = self.transforms(y) x = self.transforms(y_) # x为缩小3倍后的图片(1, 100, 100) y是原始图片(1, 300, 300) return x, y def __len__(self): """ 实现__len__方法,返回数据集总数目 """ return len(self.temp_images) def _sort_images(self, img_dir): """ 对文件夹内的图像进行按照文件名排序 """ files = [] for item in os.listdir(img_dir): if item.split('.')[-1].lower() in ["jpg",'jpeg','png']: files.append(os.path.join(img_dir, item)) return sorted(files) def _parse_dataset(self): """ 处理数据集 """ self.temp_images = self._sort_images(self.image_path) random.Random(self.seed).shuffle(self.temp_images) def _load_img(self, path): """ 从磁盘读取图片 """ with open(path, 'rb') as f: img = Image.open(io.BytesIO(f.read())) img = img.convert('RGB') return img
_____no_output_____
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
3.4 PetDataSet数据集抽样展示实现好BSD_data数据集后,我们来测试一下数据集是否符合预期,因为BSD_data是一个可以被迭代的Class,我们通过for循环从里面读取数据进行展示。
# 测试定义的数据集 train_dataset = BSD_data(mode='train') val_dataset = BSD_data(mode='val') print('=============train dataset=============') x, y = train_dataset[0] x = x[0] y = y[0] x = x * 255 y = y * 255 img_ = Image.fromarray(np.uint8(x), mode="L") img = Image.fromarray(np.uint8(y), mode="L") display(img_) display(img_.size) display(img) display(img.size)
=============train dataset=============
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
四、模型组网Sub_Pixel_CNN是一个全卷积网络,网络结构比较简单,这里采用Layer类继承方式组网。
class Sub_Pixel_CNN(paddle.nn.Layer): def __init__(self, upscale_factor=3, channels=1): super(Sub_Pixel_CNN, self).__init__() self.conv1 = paddle.nn.Conv2D(channels,64,5,stride=1, padding=2) self.conv2 = paddle.nn.Conv2D(64,64,3,stride=1, padding=1) self.conv3 = paddle.nn.Conv2D(64,32,3,stride=1, padding=1) self.conv4 = paddle.nn.Conv2D(32,channels * (upscale_factor ** 2),3,stride=1, padding=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = paddle.nn.functional.pixel_shuffle(x,3) return x
_____no_output_____
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
4.1 模型封装
# 模型封装 model = paddle.Model(Sub_Pixel_CNN())
W0422 19:56:08.608785 191 gpu_context.cc:244] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 10.1, Runtime API Version: 10.1 W0422 19:56:08.614269 191 gpu_context.cc:272] device: 0, cuDNN Version: 7.6.
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
4.2 模型可视化调用飞桨提供的summary接口对组建好的模型进行可视化,方便进行模型结构和参数信息的查看和确认。
model.summary((1,1, 100, 100))
--------------------------------------------------------------------------- Layer (type) Input Shape Output Shape Param # =========================================================================== Conv2D-1 [[1, 1, 100, 100]] [1, 64, 100, 100] 1,664 Conv2D-2 [[1, 64, 100, 100]] [1, 64, 100, 100] 36,928 Conv2D-3 [[1, 64, 100, 100]] [1, 32, 100, 100] 18,464 Conv2D-4 [[1, 32, 100, 100]] [1, 9, 100, 100] 2,601 =========================================================================== Total params: 59,657 Trainable params: 59,657 Non-trainable params: 0 --------------------------------------------------------------------------- Input size (MB): 0.04 Forward/backward pass size (MB): 12.89 Params size (MB): 0.23 Estimated Total Size (MB): 13.16 ---------------------------------------------------------------------------
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
五、模型训练 5.1 启动模型训练使用模型代码进行Model实例生成,使用prepare接口定义优化器、损失函数和评价指标等信息,用于后续训练使用。在所有初步配置完成后,调用fit接口开启训练执行过程,调用fit时只需要将前面定义好的训练数据集、测试数据集、训练轮次(Epoch)和批次大小(batch_size)配置好即可。
model.prepare(paddle.optimizer.Adam(learning_rate=0.001,parameters=model.parameters()), paddle.nn.MSELoss() ) # 启动模型训练,指定训练数据集,设置训练轮次,设置每次数据集计算的批次大小,设置日志格式 model.fit(train_dataset, epochs=20, batch_size=16, verbose=1)
The loss value printed in the log is the current step, and the metric is the average value of previous steps. Epoch 1/20
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
六、模型预测 6.1 预测我们可以直接使用model.predict接口来对数据集进行预测操作,只需要将预测数据集传递到接口内即可。
predict_results = model.predict(val_dataset)
Predict begin... step 100/100 [==============================] - 8ms/step Predict samples: 100
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
6.2 定义预测结果可视化函数
import math import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid1.inset_locator import mark_inset def psnr(img1, img2): """ PSMR计算函数 """ mse = np.mean( (img1/255. - img2/255.) ** 2 ) if mse < 1.0e-10: return 100 PIXEL_MAX = 1 return 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) def plot_results(img, title='results', prefix='out'): """ 画图展示函数 """ img_array = np.asarray(img, dtype='float32') img_array = img_array.astype("float32") / 255.0 fig, ax = plt.subplots() im = ax.imshow(img_array[::-1], origin="lower") plt.title(title) axins = zoomed_inset_axes(ax, 2, loc=2) axins.imshow(img_array[::-1], origin="lower") x1, x2, y1, y2 = 200, 300, 100, 200 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) plt.yticks(visible=False) plt.xticks(visible=False) mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="blue") plt.savefig(str(prefix) + "-" + title + ".png") plt.show() def get_lowres_image(img, upscale_factor): """ 缩放图片 """ return img.resize( (img.size[0] // upscale_factor, img.size[1] // upscale_factor), Image.BICUBIC, ) def upscale_image(model, img): ''' 输入小图,返回上采样三倍的大图像 ''' # 把图片复转换到YCbCr格式 ycbcr = img.convert("YCbCr") y, cb, cr = ycbcr.split() y = np.asarray(y, dtype='float32') y = y / 255.0 img = np.expand_dims(y, axis=0) # 升维度到(1,w,h)一张image img = np.expand_dims(img, axis=0) # 升维度到(1,1,w,h)一个batch img = np.expand_dims(img, axis=0) # 升维度到(1,1,1,w,h)可迭代的batch out = model.predict(img) # predict输入要求为可迭代的batch out_img_y = out[0][0][0] # 得到predict输出结果 out_img_y *= 255.0 # 把图片复转换回RGB格式 out_img_y = out_img_y.reshape((np.shape(out_img_y)[1], np.shape(out_img_y)[2])) out_img_y = Image.fromarray(np.uint8(out_img_y), mode="L") out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC) out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC) out_img = Image.merge("YCbCr", (out_img_y, out_img_cb, out_img_cr)).convert( "RGB" ) return out_img def main(model, img, upscale_factor=3): # 读取图像 with open(img, 'rb') as f: img = Image.open(io.BytesIO(f.read())) # 缩小三倍 lowres_input = get_lowres_image(img, upscale_factor) w = lowres_input.size[0] * upscale_factor h = lowres_input.size[1] * upscale_factor # 将缩小后的图片再放大三倍 lowres_img = lowres_input.resize((w, h)) # 确保未经缩放的图像和其他两张图片大小一致 highres_img = img.resize((w, h)) # 得到缩小后又经过 Efficient Sub-Pixel CNN放大的图片 prediction = upscale_image(model, lowres_input) psmr_low = psnr(np.asarray(lowres_img), np.asarray(highres_img)) psmr_pre = psnr(np.asarray(prediction), np.asarray(highres_img)) # 展示三张图片 plot_results(lowres_img, "lowres") plot_results(highres_img, "highres") plot_results(prediction, "prediction") print("psmr_low:", psmr_low, "psmr_pre:", psmr_pre)
_____no_output_____
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
6.3 执行预测从我们的预测数据集中抽1个张图片来看看预测的效果,展示一下原图、小图和预测结果。
main(model,'BSR/BSDS500/data/images/test/100007.jpg')
Predict begin... step 1/1 [==============================] - 4ms/step Predict samples: 1
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
7.模型保存将模型保存到 checkpoint/model_final ,并保留训练参数
model.save('checkpoint/model_final',training=True)
_____no_output_____
Apache-2.0
docs/practices/cv/super_resolution_sub_pixel.ipynb
nemonameless/docs
Science Case 4: NGC3504 The galaxy NGC3504 has been observed in two unrelated ALMA projects, both in band 6 at at 230 GHz, cause for an interesting comparison.1. **2016.1.00650.S** - one 7m and two 12m observations of just NGC3504, to study flow in a bar2. **2017.1.00964.S** - a collection of 7 galaxies with the purpose of measure gas flow near the central black hole. For NGC3504 two datasets were collected.Here we are focusing on the commonalities and differences between these two observations.
%matplotlib inline import matplotlib.pyplot as plt source = "NGC3504"
_____no_output_____
MIT
notebooks/Case4.ipynb
teuben/study7
astroquery.almaFirst we should query the science archive, we can do https://almascience.nrao.edu/aq/ as well, but we want also show this via the notebook.
from astroquery.alma import Alma import pandas as pd # display the whole table in the notebook pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth',25) # more to come here. here we just want to show how much you can do with the current query, before science alma = Alma() n = Alma.query_object(source) print(pd.unique(n['proposal_id'])) print(pd.unique(n['obs_id']))
[b'2017.1.00964.S' b'2016.1.00650.S'] [b'uid://A001/X1288/Xba6' b'uid://A001/X1288/Xba8' b'uid://A001/X87a/X70a' b'uid://A001/X87a/X708' b'uid://A001/X87a/X706']
MIT
notebooks/Case4.ipynb
teuben/study7
Thus we have indeed two projects,and five observations across those two.Lets print how many beams we have across the image
ci=['obs_id','s_fov','s_resolution'] n['nres'] = 3600*n['s_fov']/n['s_resolution'] print(n[ci])
obs_id s_fov s_resolution deg deg --------------------- -------------------- -------------------- uid://A001/X1288/Xba6 0.006863718695429476 0.034121085407438106 uid://A001/X1288/Xba6 0.006863718695429476 0.034121085407438106 uid://A001/X1288/Xba6 0.006863718695429476 0.034121085407438106 uid://A001/X1288/Xba6 0.006863718695429476 0.034121085407438106 uid://A001/X1288/Xba8 0.00686371854646795 0.20065563262611036 uid://A001/X1288/Xba8 0.00686371854646795 0.20065563262611036 uid://A001/X1288/Xba8 0.00686371854646795 0.20065563262611036 uid://A001/X1288/Xba8 0.00686371854646795 0.20065563262611036 uid://A001/X87a/X70a 0.017853666133604822 5.503583966319726 uid://A001/X87a/X70a 0.017853666133604822 5.503583966319726 uid://A001/X87a/X70a 0.017853666133604822 5.503583966319726 uid://A001/X87a/X70a 0.017853666133604822 5.503583966319726 uid://A001/X87a/X708 0.01305128673735267 1.7825652390462614 uid://A001/X87a/X708 0.01305128673735267 1.7825652390462614 uid://A001/X87a/X708 0.01305128673735267 1.7825652390462614 uid://A001/X87a/X708 0.01305128673735267 1.7825652390462614 uid://A001/X87a/X706 0.013051297132328152 0.565397979165851 uid://A001/X87a/X706 0.013051297132328152 0.565397979165851 uid://A001/X87a/X706 0.013051297132328152 0.565397979165851 uid://A001/X87a/X706 0.013051297132328152 0.565397979165851
MIT
notebooks/Case4.ipynb
teuben/study7
Notice there appears to be a units issue with s_resolution: they appear to be in arcsec. There is also a 'spatial_resolution', but it has the same issue. astroquery.admit
from astroquery.admit import ADMIT import pandas as pd pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth',25) a = ADMIT() a.check()
Found /home/teuben/ALMA/study7/query/admit.db Checking db.... 0 71 71 71 Database version: 27-feb-2022. core.py version: 26-feb-2022 header : 1 entries alma : 124 entries win : 123 entries lines : 33 entries sources : 769 entries
MIT
notebooks/Case4.ipynb
teuben/study7
ContinuumFirst we want to see if any continuum is detected, so we select all windows with one channel.
p = a.query(source_name_alma=source,nchan=1,flux='>0') print(p.shape) a.key_description.show_in_notebook(display_length=20)
_____no_output_____
MIT
notebooks/Case4.ipynb
teuben/study7
We collect a few observables: observing time, as well as peak and flux and the resolutionNote we need to clean up the units
ci=['obs_id','spw','nsources','t_min', 'flux', 'peak_s','fop','bmaj_arcsec','smaj_arcsec'] p['fop'] = p['flux']/p['peak_s'] p['bmaj_arcsec'] = p['bmaj'] * 3600 p['smaj_arcsec'] = p['smaj'] * 3600 print(p[ci])
obs_id spw nsources t_min flux \ 0 uid://A001/X1288/Xba6 spw21 1 58050.543677 0.000860 1 uid://A001/X1288/Xba8 spw19 1 58119.297186 0.001110 2 uid://A001/X1288/Xba8 spw21 1 58119.297186 0.001240 3 uid://A001/X1288/Xba8 spw23 1 58119.297186 0.001790 4 uid://A001/X1288/Xba8 spw25 1 58119.297186 0.001450 5 uid://A001/X87a/X706 spw23 1 57713.452563 0.001730 6 uid://A001/X87a/X706 spw25 1 57713.452563 0.001510 7 uid://A001/X87a/X706 spw27 1 57713.452563 0.001560 8 uid://A001/X87a/X706 spw29 1 57713.452563 0.001730 9 uid://A001/X87a/X708 spw23 3 57830.136178 0.007700 10 uid://A001/X87a/X708 spw23 3 57830.136178 0.013800 11 uid://A001/X87a/X708 spw23 3 57830.136178 0.024200 12 uid://A001/X87a/X708 spw25 2 57830.136178 0.010600 13 uid://A001/X87a/X708 spw25 2 57830.136178 0.009770 14 uid://A001/X87a/X708 spw27 2 57830.136178 0.008230 15 uid://A001/X87a/X708 spw27 2 57830.136178 0.014200 16 uid://A001/X87a/X708 spw29 1 57830.136178 0.008840 17 uid://A001/X87a/X70a spw16 1 57704.450990 0.015200 18 uid://A001/X87a/X70a spw18 1 57704.450990 0.015000 19 uid://A001/X87a/X70a spw20 1 57704.450990 0.018400 20 uid://A001/X87a/X70a spw22 1 57704.450990 0.016900 21 uid://A001/X1288/Xba6 spw19_21_23_25 1 58050.543677 0.000618 22 uid://A001/X1288/Xba8 spw19_21_23_25 1 58119.297186 0.001290 peak_s fop bmaj_arcsec smaj_arcsec 0 0.000151 5.695364 0.049721 0.0 1 0.000512 2.167969 0.218953 0.0 2 0.000580 2.137931 0.208199 0.0 3 0.000594 3.013468 0.206176 0.0 4 0.000706 2.053824 0.219428 0.0 5 0.001090 1.587156 0.617609 0.0 6 0.000968 1.559917 0.660397 0.0 7 0.001020 1.529412 0.655523 0.0 8 0.001260 1.373016 0.623399 0.0 9 0.002080 3.701923 1.810933 3.6 10 0.002040 6.764706 1.810933 7.2 11 0.001940 12.474227 1.810933 7.2 12 0.002120 5.000000 1.919427 7.2 13 0.001710 5.713450 1.919427 3.6 14 0.002290 3.593886 2.262012 3.6 15 0.002310 6.147186 2.262012 7.2 16 0.002280 3.877193 1.812930 3.6 17 0.009540 1.593291 6.312150 7.2 18 0.009410 1.594049 6.296568 7.2 19 0.010600 1.735849 5.772073 7.2 20 0.009970 1.695085 5.714493 7.2 21 0.000160 3.862500 0.041953 0.0 22 0.000570 2.263158 0.212872 0.0
MIT
notebooks/Case4.ipynb
teuben/study7
It's a little surprising that flux/peak is 1.5 for the lowest and highest resolution array data, but there clearly is something very odd about the middle resolution (X708) data.
plt.scatter(p['t_min'],p['flux']); plt.title(source + " continuum lightcurve") plt.xlabel('MJD') plt.ylabel('Flux (Jy)');
_____no_output_____
MIT
notebooks/Case4.ipynb
teuben/study7
well, the fluxes are somewhat all over the place..... averaging 10-15 mJy.The other dataset of NGC3504 at mjd > 58000 seems to have lost a lot of flux.Here is a figure of the continuum source, as seen in the three different ALMA confirmation. Figures have been taking from ADMIT, including where sources were detected. ADMIT was using CASA's ia.findsources()![NGC3504](NGC3504a.png) Spectral Lines
p = a.query(source_name_alma=source,nchan='>1',mom0flux='>0') p = a.query(nchan='>1',mom0flux='>0') print(p.shape) print(p.columns) ci=['obs_id','spw','restfreq','formula','mom0flux','mom1peak','mom2peak'] ci=['spw','restfreq','formula','mom0flux','mom1peak','vlsr','mom2peak','nlines'] print(p[ci])
spw restfreq formula mom0flux mom1peak vlsr mom2peak \ 0 spw21 243.48293 H2COH+ 1863.8500 1457.430 1447.989431 31.01830 1 spw23 244.59816 HCOOH 4235.2400 1427.340 1447.989431 31.75560 2 spw23 244.59816 HCOOH 313.3270 1420.510 1447.989431 38.15480 3 spw23 244.63395 CH3CH2OH 309.1810 1448.800 1447.989431 33.36400 4 spw25 230.53800 CO 2698.2200 1475.480 1447.989431 20.86430 5 spw23 244.93556 CS 3429.8800 1831.720 1521.106704 33.66740 6 spw25 230.53800 CO 3388.7800 1516.190 1521.106704 34.98030 7 spw23 244.93556 CS 1090.5600 1549.750 1521.106704 80.49340 8 spw25 230.53800 CO 18313.7000 1547.690 1521.106704 58.09510 9 spw16 230.53800 CO 11534.2000 639.522 628.000000 70.28280 10 spw21 242.49769 CH3COOH 477.7500 241.190 232.006873 37.10220 11 spw21 242.50962 CH3COOH 0.0000 0.000 232.006873 0.00000 12 spw21 242.90447 CH3CH2CN 4678.6600 755.657 715.580697 32.75740 13 spw23 244.93556 CS 1365.7300 969.082 715.580697 47.37710 14 spw21 242.90447 CH3CH2CN 1217.1900 751.218 715.580697 29.58540 15 spw23 244.93556 CS 405.3630 989.835 715.580697 49.21610 16 spw25 230.53800 CO 9306.8000 710.181 715.580697 62.40980 17 spw21 242.61847 CH2DCCH 1438.9200 393.771 401.856222 32.58840 18 spw21 242.63925 H2NCH2CN 1496.2700 408.513 401.856222 34.56510 19 spw25 230.53800 CO 436.5640 387.738 401.856222 10.90870 20 spw25 230.53800 CO 894.7710 397.203 401.856222 15.01770 21 spw19 228.60363 CH2CHCHO 500.2180 1008.250 978.913173 52.92020 22 spw21 243.08765 SO2 833.0270 974.240 978.913173 33.24510 23 spw23 244.22213 HCCCH 646.3020 980.356 978.913173 30.18990 24 spw21 243.12925 CH2OHCHO 370.0540 991.099 978.913173 31.06460 25 spw23 244.23979 CH3CH2CN 264.3150 949.060 978.913173 36.27090 26 spw25 230.53800 CO 323.7160 980.293 978.913173 6.99879 27 spw25 230.53800 CO 24782.4000 1548.350 1521.106704 60.99150 28 spw29 244.93556 CS 140.5190 1483.460 1521.106704 15.87380 29 spw25 230.53800 CO 32461.2000 1539.720 1521.106704 59.09890 30 spw29 244.93556 CS 107.7540 1538.800 1521.106704 31.05040 31 spw16 230.53800 CO 26393.2000 1537.470 1521.106704 40.22490 32 spw20 244.93556 CS 91.1055 1544.310 1521.106704 24.96880 nlines 0 1 1 1 2 2 3 2 4 1 5 1 6 1 7 1 8 1 9 1 10 2 11 2 12 1 13 1 14 1 15 1 16 1 17 2 18 2 19 1 20 1 21 1 22 1 23 1 24 1 25 1 26 1 27 1 28 1 29 1 30 1 31 1 32 1
MIT
notebooks/Case4.ipynb
teuben/study7
[SOLUTION] Attention BasicsIn this notebook, we look at how attention is implemented. We will focus on implementing attention in isolation from a larger model. That's because when implementing attention in a real-world model, a lot of the focus goes into piping the data and juggling the various vectors rather than the concepts of attention themselves.We will implement attention scoring as well as calculating an attention context vector. Attention Scoring Inputs to the scoring functionLet's start by looking at the inputs we'll give to the scoring function. We will assume we're in the first step in the decoding phase. The first input to the scoring function is the hidden state of decoder (assuming a toy RNN with three hidden nodes -- not usable in real life, but easier to illustrate):
dec_hidden_state = [5,1,20]
_____no_output_____
MIT
attention/Attention_Basics_Solution.ipynb
Muhanad23/udacity-deep-learning-nanodegree
Let's visualize this vector:
%matplotlib inline import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Let's visualize our decoder hidden state plt.figure(figsize=(1.5, 4.5)) sns.heatmap(np.transpose(np.matrix(dec_hidden_state)), annot=True, cmap=sns.light_palette("purple", as_cmap=True), linewidths=1)
_____no_output_____
MIT
attention/Attention_Basics_Solution.ipynb
Muhanad23/udacity-deep-learning-nanodegree
Our first scoring function will score a single annotation (encoder hidden state), which looks like this:
annotation = [3,12,45] #e.g. Encoder hidden state # Let's visualize the single annotation plt.figure(figsize=(1.5, 4.5)) sns.heatmap(np.transpose(np.matrix(annotation)), annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
_____no_output_____
MIT
attention/Attention_Basics_Solution.ipynb
Muhanad23/udacity-deep-learning-nanodegree
IMPLEMENT: Scoring a Single AnnotationLet's calculate the dot product of a single annotation. NumPy's [dot()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) is a good candidate for this operation
def single_dot_attention_score(dec_hidden_state, enc_hidden_state): # TODO: return the dot product of the two vectors return np.dot(dec_hidden_state, enc_hidden_state) single_dot_attention_score(dec_hidden_state, annotation)
_____no_output_____
MIT
attention/Attention_Basics_Solution.ipynb
Muhanad23/udacity-deep-learning-nanodegree
Annotations MatrixLet's now look at scoring all the annotations at once. To do that, here's our annotation matrix:
annotations = np.transpose([[3,12,45], [59,2,5], [1,43,5], [4,3,45.3]])
_____no_output_____
MIT
attention/Attention_Basics_Solution.ipynb
Muhanad23/udacity-deep-learning-nanodegree
And it can be visualized like this (each column is a hidden state of an encoder time step):
# Let's visualize our annotation (each column is an annotation) ax = sns.heatmap(annotations, annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
_____no_output_____
MIT
attention/Attention_Basics_Solution.ipynb
Muhanad23/udacity-deep-learning-nanodegree
IMPLEMENT: Scoring All Annotations at OnceLet's calculate the scores of all the annotations in one step using matrix multiplication. Let's continue to us the dot scoring methodTo do that, we'll have to transpose `dec_hidden_state` and [matrix multiply](https://docs.scipy.org/doc/numpy/reference/generated/numpy.matmul.html) it with `annotations`.
def dot_attention_score(dec_hidden_state, annotations): # TODO: return the product of dec_hidden_state transpose and enc_hidden_states return np.matmul(np.transpose(dec_hidden_state), annotations) attention_weights_raw = dot_attention_score(dec_hidden_state, annotations) attention_weights_raw
_____no_output_____
MIT
attention/Attention_Basics_Solution.ipynb
Muhanad23/udacity-deep-learning-nanodegree
Looking at these scores, can you guess which of the four vectors will get the most attention from the decoder at this time step? SoftmaxNow that we have our scores, let's apply softmax:
def softmax(x): x = np.array(x, dtype=np.float128) e_x = np.exp(x) return e_x / e_x.sum(axis=0) attention_weights = softmax(attention_weights_raw) attention_weights
_____no_output_____
MIT
attention/Attention_Basics_Solution.ipynb
Muhanad23/udacity-deep-learning-nanodegree
Even when knowing which annotation will get the most focus, it's interesting to see how drastic softmax makes the end score become. The first and last annotation had the respective scores of 927 and 929. But after softmax, the attention they'll get is 0.119 and 0.880 respectively. Applying the scores back on the annotationsNow that we have our scores, let's multiply each annotation by its score to proceed closer to the attention context vector. This is the multiplication part of this formula (we'll tackle the summation part in the latter cells)
def apply_attention_scores(attention_weights, annotations): # TODO: Multiple the annotations by their weights return attention_weights * annotations applied_attention = apply_attention_scores(attention_weights, annotations) applied_attention
_____no_output_____
MIT
attention/Attention_Basics_Solution.ipynb
Muhanad23/udacity-deep-learning-nanodegree
Let's visualize how the context vector looks now that we've applied the attention scores back on it:
# Let's visualize our annotations after applying attention to them ax = sns.heatmap(applied_attention, annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
_____no_output_____
MIT
attention/Attention_Basics_Solution.ipynb
Muhanad23/udacity-deep-learning-nanodegree
Contrast this with the raw annotations visualized earlier in the notebook, and we can see that the second and third annotations (columns) have been nearly wiped out. The first annotation maintains some of its value, and the fourth annotation is the most pronounced. Calculating the Attention Context VectorAll that remains to produce our attention context vector now is to sum up the four columns to produce a single attention context vector
def calculate_attention_vector(applied_attention): return np.sum(applied_attention, axis=1) attention_vector = calculate_attention_vector(applied_attention) attention_vector # Let's visualize the attention context vector plt.figure(figsize=(1.5, 4.5)) sns.heatmap(np.transpose(np.matrix(attention_vector)), annot=True, cmap=sns.light_palette("Blue", as_cmap=True), linewidths=1)
_____no_output_____
MIT
attention/Attention_Basics_Solution.ipynb
Muhanad23/udacity-deep-learning-nanodegree
Open Machine Learning CourseAuthor: Mariya Mansurova, Analyst & developer in Yandex.Metrics team. Translated by Ivan Zakharov, ML enthusiast.All content is distributed under the [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/) license. Assignment 9 (demo) Time series analysis **Fill cells marked with "Your code here" and submit your answers to the questions through the [web form](https://goo.gl/forms/Hu0thpYw2rXLfEjI2).**
import pandas as pd import os from plotly import __version__ from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot from plotly import graph_objs as go import requests import pandas as pd print(__version__) # need 1.9.0 or greater init_notebook_mode(connected = True) def plotly_df(df, title = ''): data = [] for column in df.columns: trace = go.Scatter( x = df.index, y = df[column], mode = 'lines', name = column ) data.append(trace) layout = dict(title = title) fig = dict(data = data, layout = layout) iplot(fig, show_link=False)
2.7.0
MIT
assignments/demo/assignment09_time_series.ipynb
geomars/oldschool
Data preparationFirst, read the data in as a `dataframe`. Today we will predict the number of views of the [Machine Learning](https://en.wikipedia.org/wiki/Machine_learning) wiki page. I downloaded the data using the [Wikipediatrend](https://www.r-bloggers.com/using-wikipediatrend/) library for `R`.
df = pd.read_csv('../../data/wiki_machine_learning.csv', sep = ' ') df = df[df['count'] != 0] df.head() df.shape df.date = pd.to_datetime(df.date) plotly_df(df.set_index('date')[['count']])
_____no_output_____
MIT
assignments/demo/assignment09_time_series.ipynb
geomars/oldschool
Predicting with Facebook ProphetWe will build a prediction using the simple library `Facebook Prophet`. In order to evaluate the quality of the model, we drop the last 30 days from the training sample.
from fbprophet import Prophet predictions = 30 df = df[['date', 'count']] df.columns = ['ds', 'y'] train_df = df[:-predictions].copy() # Your code here
_____no_output_____
MIT
assignments/demo/assignment09_time_series.ipynb
geomars/oldschool
** Question 1: ** What is the prediction of the number of views of the wiki page on January 20? Round to the nearest integer.- 4947- 3833- 5229- 2744 Estimate the quality of the prediction with the last 30 points.
# Your code here
_____no_output_____
MIT
assignments/demo/assignment09_time_series.ipynb
geomars/oldschool
** Question 2 **: What is MAPE equal to?- 38.38- 42.42- 5.39- 65.91 ** Question 3 **: What is MAE equal to?- 355- 4007- 713- 903 Predicting with ARIMA
%matplotlib inline import matplotlib.pyplot as plt from scipy import stats import statsmodels.api as sm
_____no_output_____
MIT
assignments/demo/assignment09_time_series.ipynb
geomars/oldschool
** Question 4: ** Let's verify the stationarity of the series using the Dickey-Fuller test. Is the series stationary? What is the p-value?- Series is stationary, p_value = 0.107- Series is not stationary, p_value = 0.107- Series is stationary, p_value = 0.001- Series is not stationary, p_value = 0.001
# Your code here
_____no_output_____
MIT
assignments/demo/assignment09_time_series.ipynb
geomars/oldschool
** Question 5 **: Next, we turn to the construction of the SARIMAX model (`sm.tsa.statespace.SARIMAX`). What is the best set of parameters (among listed) for the SARIMAX model according to the `AIC` criterion?- D = 1, d = 0, Q = 0, q = 2, P = 3, p = 1- D = 2, d = 1, Q = 1, q = 2, P = 3, p = 1- D = 1, d = 1, Q = 1, q = 2, P = 3, p = 1- D = 0, d = 0, Q = 0, q = 2, P = 3, p = 1
# Your code here
_____no_output_____
MIT
assignments/demo/assignment09_time_series.ipynb
geomars/oldschool
Bruner Eduardo Augusto Albrecht Case - Data EngineeringApós a compra de um produto através do Olist, o vendedor recebe uma notificação para começar a processar o pedido e o cliente recebe uma estimativa de data de entrega. Possivelmente, nem todas as decisões da empresa foram acertadas no quesito de carteira de produtos e profitabilidade.Apresente uma análise das vendas da empresa, baseando-se nos dados presentes no dataset, e a partir dela encontre soluções para melhorar os resultados da empresa. Tais sugestões podem ser na linha de **logística, marketing, vendas, produtos, entre outros**. Configurando o notebook e carregando os Dados
#Carregando as bibliotecas import pandas as pd import matplotlib.pyplot as plt print("Setup Complete") #carreagando os datasets para os dataframes #Informação geográfica sobre os vendedores sellers = pd.read_csv('sellers_dataset.csv',index_col = 'seller_id', parse_dates=True) #Informação geográfica sobre os clientes customers = pd.read_csv('customers_dataset.csv',index_col = 'customer_unique_id', parse_dates=True) #Pedidos de compra orders = pd.read_csv('orders_dataset.csv',index_col = 'order_id', parse_dates=True) #Inclui os itens adquiridos em cada pedido de compra items = pd.read_csv('order_items_dataset.csv', index_col = 'order_id', parse_dates=True) #Métodos de pagamentos, valores e parcelas payments = pd.read_csv('order_payments_dataset.csv', index_col = 'order_id', parse_dates=True) #Verificando por possíveis valores nulos em vendedores print(sellers.info()) sellers.head() #Verificando por possíveis valores nulos em compradores print(customers.info()) customers.head() #Verificando por possíveis valores nulos em pagamentos print(payments.info()) payments.head() #Verificando por possíveis valores nulos em pedidos print(orders.info()) orders.head() #Verificando por possíveis valores nulos em items print(items.info()) items.head()
<class 'pandas.core.frame.DataFrame'> Index: 112650 entries, 00010242fe8c5a6d1ba2dd792cb16214 to fffe41c64501cc87c801fd61db3f6244 Data columns (total 6 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 order_item_id 112650 non-null int64 1 product_id 112650 non-null object 2 seller_id 112650 non-null object 3 shipping_limit_date 112650 non-null object 4 price 112650 non-null float64 5 freight_value 112650 non-null float64 dtypes: float64(2), int64(1), object(3) memory usage: 6.0+ MB None
MIT
DataScience/Olist/EnfaseLabs.ipynb
brunereduardo/DataPortfolio
Análise para Vendas: >**Vendas por Estado**
orders_by_state = pd.merge(left = customers, right = orders, on='customer_id', right_index=False) orders_by_state result = orders_by_state.groupby(['customer_state'])['customer_id'].count() print(result) result.plot()
customer_state AC 81 AL 413 AM 148 AP 68 BA 3380 CE 1336 DF 2140 ES 2033 GO 2020 MA 747 MG 11635 MS 715 MT 907 PA 975 PB 536 PE 1652 PI 495 PR 5045 RJ 12852 RN 485 RO 253 RR 46 RS 5466 SC 3637 SE 350 SP 41746 TO 280 Name: customer_id, dtype: int64
MIT
DataScience/Olist/EnfaseLabs.ipynb
brunereduardo/DataPortfolio
>**vendas por dia/semana/mês no marketplace:**> * Seria interesente quando montasse o dataframe ter conhecimento sobre cada um dos dados, pois assim poderiamos aplicar o dtype para adeguar os dados à sua melhor forma
orders["Day"] = orders["order_approved_at"].apply(lambda x: pd.Timestamp(x).day_name()) #orders["Day"] = pd.to_datetime(orders["order_purchase_timestamp"]).dt.isocalendar().day #dias da semana em números,1->7 orders["Week"] = pd.to_datetime(orders["order_approved_at"]).dt.isocalendar().week orders["Month"] = orders["order_approved_at"].apply(lambda x: pd.Timestamp(x).month_name()) orders orders_by_day = orders.groupby(['Day']).size() print(orders_by_day) #necessita colocar título, ylabel, e organizar as labels pro x orders_by_day.plot.bar() orders_by_month = orders.groupby(['Month']).size() print(orders_by_month) #necessita colocar título, ylabel, e organizar as labels pro x orders_by_month.plot.barh() orders_by_week = orders.groupby(['Week']).size() #print(orders_by_week) #necessita colocar título e ylabel orders_by_week.plot()
_____no_output_____
MIT
DataScience/Olist/EnfaseLabs.ipynb
brunereduardo/DataPortfolio
>**Quais são as quantidades de produtos que cada vendedor vende?**
products_by_sellers = items.groupby(['seller_id', 'product_id']).size().reset_index() products_by_sellers.sort_values(0, ascending=False) products_by_sellers = products_by_sellers.rename(columns={'seller_id':'Seller_id','product_id':'Product_id',0:'Qntd'}) products_by_sellers
_____no_output_____
MIT
DataScience/Olist/EnfaseLabs.ipynb
brunereduardo/DataPortfolio
>**Taxa de conversão (%) = (Número de pedidos de uma dado vendendor / número de visitantes no site) x 100**> * Levando em conta os números de pedidos já enviados( ou que o pagamentga já foi aceito)/ pelo núemro de customer_unique_id
items #dar merge para uma nova tabela number_of_clients= len(customers.index) orders_by_sellers = items.groupby(['seller_id', 'order_id'])['order_id'].count().reset_index() orders_by_sellers.sort_values(0, ascending=False) orders_by_sellers = orders_by_sellers.rename(columns={'seller_id':'Seller_id','order_id':'Order_id',0:'Qntd'}) orders_by_sellers
_____no_output_____
MIT
DataScience/Olist/EnfaseLabs.ipynb
brunereduardo/DataPortfolio
>**Qual método de compra se sobresai?**
most_payments_methods =payments.groupby(['payment_type'])['payment_type'].count() most_payments_methods.plot.bar()
_____no_output_____
MIT
DataScience/Olist/EnfaseLabs.ipynb
brunereduardo/DataPortfolio
Análise para Produtos: > **Quais produtos são mais comprados (Top 10 produtos mais comprados)**
top_items = items.groupby(['product_id']).size().reset_index() top_items = top_items.set_index('product_id') top_items = top_items.rename(columns={0:'Qntd'}) top_items = top_items.sort_values('Qntd', ascending=False) top_items.head(10) #top_items.head(10).plot.barh()
_____no_output_____
MIT
DataScience/Olist/EnfaseLabs.ipynb
brunereduardo/DataPortfolio