markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
RecordID (a unique integer for each ICU stay)Age (years)Gender (0: female, or 1: male)Height (cm)ICUType (1: Coronary Care Unit, 2: Cardiac Surgery Recovery Unit, 3: Medical ICU, or 4: Surgical ICU)Weight (kg) Variables Description ALB Albumin (g/dL) ALP Alkaline phosphatase (IU/L) ALT Alanine transaminase (IU/L) AST Aspartate transaminase (IU/L) BIL Bilirubin (mg/dL) BUN Blood urea nitrogen (mg/dL) CHO Cholesterol (mg/dL) CREA Serum creatinine (mg/dL) DBP Invasive diastolic arterial blood pressure (mmHg) FIO Fractional inspired O2 (0-1) GCS Glasgow Coma Score (3-15) GLU Serum glucose (mg/dL) HCO Serum bicarbonate (mmol/L) HCT Hematocrit (%) HR Heart rate (bpm) K Serum potassium (mEq/L) LAC Lactate (mmol/L) MG Serum magnesium (mmol/L) MAP Invasive mean arterial blood pressure (mmHg) MEVE Mechanical ventilation respiration NA Serum sodium (mEq/L) NBP Non-invasive diastolic arterial blood pressure (mmHg) NAP Non-invasive mean arterial blood pressure (mmHg) NSP Non-invasive systolic arterial blood pressure (mmHg) PCO partial pressure of arterial CO2 (mmHg) PO2 Partial pressure of arterial O2 (mmHg) PH Arterial pH (0-14) PLA cells/nL RRA Respiration rate (bpm) SO2 O2 saturation in hemoglobin (%) SBP Invasive systolic arterial blood pressure (mmHg) TEM Temperature (°C) TRI Troponin-I (μg/L) TRT Troponin-T (μg/L) URI Urine output (mL) WBC White blood cell count (cells/nL) WEI kg
# Open file with open('./training_set_a/132539.txt') as inputfile: results = list(csv.reader(inputfile)) # Open file in list of list results = pd.DataFrame(results[1:],columns=results[0]) # Convert list of list to DataFrame results.Value = results.Value.astype(float) # Change Value to float results.head(8) # Pivot_table to convert from long to wide dataset # Creation of new features - aggregate across the time series to find mean, min, max values # mean is chosen rather than median because we want to take into the account of 'outlier values' wide_result = pd.pivot_table(results,values=['Value'],columns='Parameter',aggfunc=[np.mean,np.min,np.max]) wide_result.columns = wide_result.columns.droplevel(level=0) new_columns = [] for ind, col in enumerate(wide_result.columns): if ind < wide_result.columns.shape[0]/3: col = 'mean_'+col new_columns.append(col) elif ind >= wide_result.columns.shape[0]/3 and ind < 2*wide_result.columns.shape[0]/3: col = 'min_'+col new_columns.append(col) else: col = 'max_'+col new_columns.append(col) print new_columns # rename the columns and lower capitalise #new_columns = [u'Age', u'mean_BUN', u'mean_Creatinine', u'mean_GCS', u'Gender', u'mean_Glucose', u'mean_HCO3', # u'mean_HCT', u'mean_HR', u'Height', u'ICUType', u'mean_K', u'mean_Mg', u'mean_NIDiasABP', # u'mean_NIMAP', u'mean_NISysABP', u'mean_Na', u'mean_Platelets', u'RecordID', u'mean_RespRate', # u'mean_Temp', u'mean_Urine', u'mean_WBC', u'mean_Weight', u'min_Age', u'min_BUN', u'min_Creatinine', # u'min_GCS', u'min_Gender', u'min_Glucose', u'min_HCO3', u'min_HCT', u'min_HR', u'min_Height', # u'min_ICUType', u'min_K', u'min_Mg', u'min_NIDiasABP', u'min_NIMAP', u'min_NISysABP', u'min_Na', # u'min_Platelets', u'min_RecordID', u'min_RespRate', u'min_Temp', u'min_Urine', u'min_WBC', #u'min_Weight', u'max_Age', u'max_BUN', u'max_Creatinine', u'max_GCS', u'max_Gender', u'max_Glucose', #u'max_HCO3', u'max_HCT', u'max_HR', u'max_Height', u'max_ICUType', u'max_K', u'max_Mg', #u'max_NIDiasABP', u'max_NIMAP', u'max_NISysABP', u'max_Na', u'max_Platelets', u'max_RecordID', #u'max_RespRate', u'max_Temp', u'max_Urine', u'max_WBC', u'max_Weight'] wide_result.columns = new_columns wide_result.columns = wide_result.columns.str.lower() wide_result.head() # rename descriptor row wide_result.rename(columns={'mean_age':'age','mean_gender':'gender','mean_height':'height', 'mean_icutype':'icutype','mean_recordid':'recordid'},inplace=True) wide_result.columns # drop descriptor rows wide_result.drop(['min_age','max_age','min_gender','max_gender','min_height','max_height' ,'min_icutype','max_icutype','min_recordid','max_recordid'],axis=1,inplace=True) wide_result.set_index(['recordid'],inplace = True) wide_result main = pd.DataFrame() main = main.append(wide_result) main # Open each file as result with open('./training_set_a/132599.txt') as inputfile: data = list(csv.reader(inputfile)) # list of list data = pd.DataFrame(data[1:],columns=data[0]) # Convert list of list to DataFrame data.Value = data.Value.astype(float) # Change Value to float # Pivot_table to convert from long to wide dataset # Creation of new features - aggregate across the time series to find mean, min, max values # mean is chosen rather than median because we want to take into the account of 'outlier values' wide_data = pd.pivot_table(data,values=['Value'],columns='Parameter',aggfunc=[np.mean,np.min,np.max]) wide_data.columns = wide_data.columns.droplevel(level=0) # rename new columns & lower capitalise new_columns = [] for ind, col in enumerate(wide_data.columns): if ind < wide_data.columns.shape[0]/3: col = 'mean_'+col new_columns.append(col) elif ind >= wide_data.columns.shape[0]/3 and ind < 2*wide_data.columns.shape[0]/3: col = 'min_'+col new_columns.append(col) else: col = 'max_'+col new_columns.append(col) wide_data.columns = new_columns wide_data.columns = wide_data.columns.str.lower() # rename descriptor row wide_data.rename(columns={'mean_age':'age','mean_gender':'gender','mean_height':'height', 'mean_icutype':'icutype','mean_recordid':'recordid'},inplace=True) # drop min/max descriptor rows wide_data.drop(['min_age','max_age','min_gender','max_gender','min_height','max_height', 'min_icutype','max_icutype','min_recordid','max_recordid'],axis=1,inplace=True) # set recordid as index wide_data.set_index(['recordid'],inplace = True) main = main.append(wide_data) main for col in main.columns: print col #wide_result.reset_index(inplace=True) #wide_result.drop('index',axis=1,inplace=True) # Pivot_table to convert from long to wide dataset #wide_result = pd.pivot_table(results,values=['Value'],columns='Parameter',index=['Time']) #wide_result.columns = wide_result.columns.droplevel(level=0) #wide_result.reset_index(inplace=True) # Trying to convert time to an 'aggreable' data type #def str_time2(time): # hours, minutes = map(int, time.split(':')) # time = (hours,minutes) # return time #def str_time(time): # hours, minutes = map(int, time.split(':')) # time = time.format(int(hours),int(minutes)) # return time #for time in wide_result.index: # hours, minutes = map(int, time.split(':')) # time = (hours,minutes) # print time #wide_result.Time = wide_result.Time.apply(str_time) #class patient_details(object): # """Run description of the patient when admitted on the 48th hour""" # def __init__(self,df = wide_result): # self.record_id = df[df.Time == '00:00']['RecordID'][0] # self.age = df[df.Time == '00:00']['Age'][0] # self.gender = df[df.Time == '00:00']['Gender'][0] # self.height = df[df.Time == '00:00']['Height'][0] # self.ICUtype = df[df.Time == '00:00']['ICUType'][0] # def fill(self,df = wide_result,details='RecordID'): # """Filling of the NaN values with patient's details can be automated # by specifying the descriptor(column) in **kwargs """ # """Default set as RecordID""" # wide_result[details].fillna(value=df[df.Time == '00:00'][details][0],inplace=True) # Initiate the class patient_details #patient = patient_details() # Fill NaN values in respective descriptor columns #patient.fill() #patient.fill(details='Age') #patient.fill(details='Gender') #patient.fill(details='Height') #patient.fill(details='ICUType') # change all column names to lower key #wide_result.columns = wide_result.columns.str.lower() # Connect to database conn = psycopg2.connect(host="localhost",dbname="mortality") cur = conn.cursor()
_____no_output_____
CC-BY-3.0
__Project Files/.ipynb_checkpoints/Data Cleaning_merge all data together_backup-checkpoint.ipynb
joannasys/Predictions-of-ICU-Mortality
EDA 1. Check if the data is unbalanced
# Open outcomes file with open('./training_outcomes_a.txt') as outcomefile: # Open file in list of list outcome = list(csv.reader(outcomefile)) outcome = pd.DataFrame(outcome[1:],columns=outcome[0]) # Convert list of list to DataFrame outcome = outcome.astype(float,'ignore') # Change values to float # Count the number of positives in dataset # Positives = 1 = Death, Negative = 0 = Survived def imbalance_check(column,labels): """labels can be a list or a tuple.""" for x in labels: label = float(column[column == x].count()) total = float(column.count()) percentage = float((label/total)*100) print 'percentage of',x,'in dataset:',percentage,'%' imbalance_check(outcome['In-hospital_death'],[0,1]) # Conclude that this is an imbalanced dataset
percentage of 0 in dataset: 86.15 % percentage of 1 in dataset: 13.85 %
CC-BY-3.0
__Project Files/.ipynb_checkpoints/Data Cleaning_merge all data together_backup-checkpoint.ipynb
joannasys/Predictions-of-ICU-Mortality
2. Create outcomes table in database
outcome.head(5) pd.to_sql()
_____no_output_____
CC-BY-3.0
__Project Files/.ipynb_checkpoints/Data Cleaning_merge all data together_backup-checkpoint.ipynb
joannasys/Predictions-of-ICU-Mortality
Copyright 2018 The TensorFlow Authors.Licensed under the Apache License, Version 2.0 (the "License"). Neural Machine Translation with Attention Run in Google Colab View source on GitHub This notebook trains a sequence to sequence (seq2seq) model for Spanish to English translation using [tf.keras](https://www.tensorflow.org/programmers_guide/keras) and [eager execution](https://www.tensorflow.org/programmers_guide/eager). This is an advanced example that assumes some knowledge of sequence to sequence models.After training the model in this notebook, you will be able to input a Spanish sentence, such as *"¿todavia estan en casa?"*, and return the English translation: *"are you still at home?"*The translation quality is reasonable for a toy example, but the generated attention plot is perhaps more interesting. This shows which parts of the input sentence has the model's attention while translating:Note: This example takes approximately 10 mintues to run on a single P100 GPU.
from __future__ import absolute_import, division, print_function # Import TensorFlow >= 1.9 and enable eager execution import tensorflow as tf tf.enable_eager_execution() import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split import unicodedata import re import numpy as np import os import time print(tf.__version__)
_____no_output_____
Apache-2.0
notebooks/eager/nmt_w_attention.ipynb
cnodadiaz/tf-workshop
Download and prepare the datasetWe'll use a language dataset provided by http://www.manythings.org/anki/. This dataset contains language translation pairs in the format:```May I borrow this book? ¿Puedo tomar prestado este libro?```There are a variety of languages available, but we'll use the English-Spanish dataset. For convenience, we've hosted a copy of this dataset on Google Cloud, but you can also download your own copy. After downloading the dataset, here are the steps we'll take to prepare the data:1. Add a *start* and *end* token to each sentence.2. Clean the sentences by removing special characters.3. Create a word index and reverse word index (dictionaries mapping from word → id and id → word).4. Pad each sentence to a maximum length.
# Download the file path_to_zip = tf.keras.utils.get_file( 'spa-eng.zip', origin='http://download.tensorflow.org/data/spa-eng.zip', extract=True) path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt" # Converts the unicode file to ascii def unicode_to_ascii(s): return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') def preprocess_sentence(w): w = unicode_to_ascii(w.lower().strip()) # creating a space between a word and the punctuation following it # eg: "he is a boy." => "he is a boy ." # Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation w = re.sub(r"([?.!,¿])", r" \1 ", w) w = re.sub(r'[" "]+', " ", w) # replacing everything with space except (a-z, A-Z, ".", "?", "!", ",") w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w) w = w.rstrip().strip() # adding a start and an end token to the sentence # so that the model know when to start and stop predicting. w = '<start> ' + w + ' <end>' return w # 1. Remove the accents # 2. Clean the sentences # 3. Return word pairs in the format: [ENGLISH, SPANISH] def create_dataset(path, num_examples): lines = open(path, encoding='UTF-8').read().strip().split('\n') word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]] return word_pairs # This class creates a word -> index mapping (e.g,. "dad" -> 5) and vice-versa # (e.g., 5 -> "dad") for each language, class LanguageIndex(): def __init__(self, lang): self.lang = lang self.word2idx = {} self.idx2word = {} self.vocab = set() self.create_index() def create_index(self): for phrase in self.lang: self.vocab.update(phrase.split(' ')) self.vocab = sorted(self.vocab) self.word2idx['<pad>'] = 0 for index, word in enumerate(self.vocab): self.word2idx[word] = index + 1 for word, index in self.word2idx.items(): self.idx2word[index] = word def max_length(tensor): return max(len(t) for t in tensor) def load_dataset(path, num_examples): # creating cleaned input, output pairs pairs = create_dataset(path, num_examples) # index language using the class defined above inp_lang = LanguageIndex(sp for en, sp in pairs) targ_lang = LanguageIndex(en for en, sp in pairs) # Vectorize the input and target languages # Spanish sentences input_tensor = [[inp_lang.word2idx[s] for s in sp.split(' ')] for en, sp in pairs] # English sentences target_tensor = [[targ_lang.word2idx[s] for s in en.split(' ')] for en, sp in pairs] # Calculate max_length of input and output tensor # Here, we'll set those to the longest sentence in the dataset max_length_inp, max_length_tar = max_length(input_tensor), max_length(target_tensor) # Padding the input and output tensor to the maximum length input_tensor = tf.keras.preprocessing.sequence.pad_sequences(input_tensor, maxlen=max_length_inp, padding='post') target_tensor = tf.keras.preprocessing.sequence.pad_sequences(target_tensor, maxlen=max_length_tar, padding='post') return input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_tar
_____no_output_____
Apache-2.0
notebooks/eager/nmt_w_attention.ipynb
cnodadiaz/tf-workshop
Limit the size of the dataset to experiment faster (optional)Training on the complete dataset of >100,000 sentences will take a long time. To train faster, we can limit the size of the dataset to 30,000 sentences (of course, translation quality degrades with less data):
# Try experimenting with the size of that dataset num_examples = 30000 input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_targ = load_dataset(path_to_file, num_examples) # Creating training and validation sets using an 80-20 split input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2) # Show length len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val)
_____no_output_____
Apache-2.0
notebooks/eager/nmt_w_attention.ipynb
cnodadiaz/tf-workshop
Create a tf.data dataset
BUFFER_SIZE = len(input_tensor_train) BATCH_SIZE = 64 N_BATCH = BUFFER_SIZE//BATCH_SIZE embedding_dim = 256 units = 1024 vocab_inp_size = len(inp_lang.word2idx) vocab_tar_size = len(targ_lang.word2idx) dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE) dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(BATCH_SIZE))
_____no_output_____
Apache-2.0
notebooks/eager/nmt_w_attention.ipynb
cnodadiaz/tf-workshop
Write the encoder and decoder modelHere, we'll implement an encoder-decoder model with attention which you can read about in the TensorFlow [Neural Machine Translation (seq2seq) tutorial](https://www.tensorflow.org/tutorials/seq2seq). This example uses a more recent set of APIs. This notebook implements the [attention equations](https://www.tensorflow.org/tutorials/seq2seqbackground_on_the_attention_mechanism) from the seq2seq tutorial. The following diagram shows that each input words is assigned a weight by the attention mechanism which is then used by the decoder to predict the next word in the sentence.The input is put through an encoder model which gives us the encoder output of shape *(batch_size, max_length, hidden_size)* and the encoder hidden state of shape *(batch_size, hidden_size)*. Here are the equations that are implemented:We're using *Bahdanau attention*. Lets decide on notation before writing the simplified form:* FC = Fully connected (dense) layer* EO = Encoder output* H = hidden state* X = input to the decoderAnd the pseudo-code:* `score = FC(tanh(FC(EO) + FC(H)))`* `attention weights = softmax(score, axis = 1)`. Softmax by default is applied on the last axis but here we want to apply it on the *1st axis*, since the shape of score is *(batch_size, max_length, hidden_size)*. `Max_length` is the length of our input. Since we are trying to assign a weight to each input, softmax should be applied on that axis.* `context vector = sum(attention weights * EO, axis = 1)`. Same reason as above for choosing axis as 1.* `embedding output` = The input to the decoder X is passed through an embedding layer.* `merged vector = concat(embedding output, context vector)`* This merged vector is then given to the GRU The shapes of all the vectors at each step have been specified in the comments in the code:
def gru(units): # If you have a GPU, we recommend using CuDNNGRU(provides a 3x speedup than GRU) # the code automatically does that. if tf.test.is_gpu_available(): return tf.keras.layers.CuDNNGRU(units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') else: return tf.keras.layers.GRU(units, return_sequences=True, return_state=True, recurrent_activation='sigmoid', recurrent_initializer='glorot_uniform') class Encoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz): super(Encoder, self).__init__() self.batch_sz = batch_sz self.enc_units = enc_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = gru(self.enc_units) def call(self, x, hidden): x = self.embedding(x) output, state = self.gru(x, initial_state = hidden) return output, state def initialize_hidden_state(self): return tf.zeros((self.batch_sz, self.enc_units)) class Decoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz): super(Decoder, self).__init__() self.batch_sz = batch_sz self.dec_units = dec_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = gru(self.dec_units) self.fc = tf.keras.layers.Dense(vocab_size) # used for attention self.W1 = tf.keras.layers.Dense(self.dec_units) self.W2 = tf.keras.layers.Dense(self.dec_units) self.V = tf.keras.layers.Dense(1) def call(self, x, hidden, enc_output): # enc_output shape == (batch_size, max_length, hidden_size) # hidden shape == (batch_size, hidden size) # hidden_with_time_axis shape == (batch_size, 1, hidden size) # we are doing this to perform addition to calculate the score hidden_with_time_axis = tf.expand_dims(hidden, 1) # score shape == (batch_size, max_length, hidden_size) score = tf.nn.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis)) # attention_weights shape == (batch_size, max_length, 1) # we get 1 at the last axis because we are applying score to self.V attention_weights = tf.nn.softmax(self.V(score), axis=1) # context_vector shape after sum == (batch_size, hidden_size) context_vector = attention_weights * enc_output context_vector = tf.reduce_sum(context_vector, axis=1) # x shape after passing through embedding == (batch_size, 1, embedding_dim) x = self.embedding(x) # x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size) x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1) # passing the concatenated vector to the GRU output, state = self.gru(x) # output shape == (batch_size * max_length, hidden_size) output = tf.reshape(output, (-1, output.shape[2])) # output shape == (batch_size * max_length, vocab) x = self.fc(output) return x, state, attention_weights def initialize_hidden_state(self): return tf.zeros((self.batch_sz, self.dec_units)) encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE) decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
_____no_output_____
Apache-2.0
notebooks/eager/nmt_w_attention.ipynb
cnodadiaz/tf-workshop
Define the optimizer and the loss function
optimizer = tf.train.AdamOptimizer() def loss_function(real, pred): mask = 1 - np.equal(real, 0) loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask return tf.reduce_mean(loss_)
_____no_output_____
Apache-2.0
notebooks/eager/nmt_w_attention.ipynb
cnodadiaz/tf-workshop
Training1. Pass the *input* through the *encoder* which return *encoder output* and the *encoder hidden state*.2. The encoder output, encoder hidden state and the decoder input (which is the *start token*) is passed to the decoder.3. The decoder returns the *predictions* and the *decoder hidden state*.4. The decoder hidden state is then passed back into the model and the predictions are used to calculate the loss.5. Use *teacher forcing* to decide the next input to the decoder.6. *Teacher forcing* is the technique where the *target word* is passed as the *next input* to the decoder.7. The final step is to calculate the gradients and apply it to the optimizer and backpropagate.
EPOCHS = 10 for epoch in range(EPOCHS): start = time.time() hidden = encoder.initialize_hidden_state() total_loss = 0 for (batch, (inp, targ)) in enumerate(dataset): loss = 0 with tf.GradientTape() as tape: enc_output, enc_hidden = encoder(inp, hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([targ_lang.word2idx['<start>']] * BATCH_SIZE, 1) # Teacher forcing - feeding the target as the next input for t in range(1, targ.shape[1]): # passing enc_output to the decoder predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output) loss += loss_function(targ[:, t], predictions) # using teacher forcing dec_input = tf.expand_dims(targ[:, t], 1) batch_loss = (loss / int(targ.shape[1])) total_loss += batch_loss variables = encoder.variables + decoder.variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables), tf.train.get_or_create_global_step()) if batch % 100 == 0: print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1, batch, batch_loss.numpy())) print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / N_BATCH)) print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
_____no_output_____
Apache-2.0
notebooks/eager/nmt_w_attention.ipynb
cnodadiaz/tf-workshop
Translate* The evaluate function is similar to the training loop, except we don't use *teacher forcing* here. The input to the decoder at each time step is its previous predictions along with the hidden state and the encoder output.* Stop predicting when the model predicts the *end token*.* And store the *attention weights for every time step*.Note: The encoder output is calculated only once for one input.
def evaluate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ): attention_plot = np.zeros((max_length_targ, max_length_inp)) sentence = preprocess_sentence(sentence) inputs = [inp_lang.word2idx[i] for i in sentence.split(' ')] inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post') inputs = tf.convert_to_tensor(inputs) result = '' hidden = [tf.zeros((1, units))] enc_out, enc_hidden = encoder(inputs, hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([targ_lang.word2idx['<start>']], 0) for t in range(max_length_targ): predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out) # storing the attention weigths to plot later on attention_weights = tf.reshape(attention_weights, (-1, )) attention_plot[t] = attention_weights.numpy() predicted_id = tf.multinomial(tf.exp(predictions), num_samples=1)[0][0].numpy() result += targ_lang.idx2word[predicted_id] + ' ' if targ_lang.idx2word[predicted_id] == '<end>': return result, sentence, attention_plot # the predicted ID is fed back into the model dec_input = tf.expand_dims([predicted_id], 0) return result, sentence, attention_plot # function for plotting the attention weights def plot_attention(attention, sentence, predicted_sentence): fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(1, 1, 1) ax.matshow(attention, cmap='viridis') fontdict = {'fontsize': 14} ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90) ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict) plt.show() def translate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ): result, sentence, attention_plot = evaluate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ) print('Input: {}'.format(sentence)) print('Predicted translation: {}'.format(result)) attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))] plot_attention(attention_plot, sentence.split(' '), result.split(' ')) translate('hace mucho frio aqui.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ) translate('esta es mi vida.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ) translate('¿todavia estan en casa?', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ) # wrong translation translate('trata de averiguarlo.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
_____no_output_____
Apache-2.0
notebooks/eager/nmt_w_attention.ipynb
cnodadiaz/tf-workshop
Job parameters
BUCKET="tanmcrae-greengrass-blog" bucket_region = s3.head_bucket(Bucket=BUCKET)['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region'] assert bucket_region == region, "Your S3 bucket {} and this notebook need to be in the same region.".format(BUCKET) MANIFEST = "blue_box_large_job.json" JOB_NAME = "blue-box-large-job-public" EXP_NAME = 'blue-box' print(JOB_NAME) USE_AUTO_LABELING = False RUN_FULL_AL_DEMO = False USE_PRIVATE_WORKFORCE = False
_____no_output_____
MIT-0
data-prep/04_create_ground_truth_job.ipynb
jonslo/amazon-sagemaker-aws-greengrass-custom-object-detection-model
specifying categories
CLASS_NAME = "storage box" CLASS_LIST = [CLASS_NAME] print("Label space is {}".format(CLASS_LIST)) json_body = { 'labels': [{'label': label} for label in CLASS_LIST] } with open('class_labels.json', 'w') as f: json.dump(json_body, f) LABEL_KEY = "ground-truth/{}/class_labels.json".format(EXP_NAME) s3.upload_file('class_labels.json', BUCKET, LABEL_KEY) print ("uploaded s3://{}/{}".format(BUCKET, LABEL_KEY))
Label space is ['storage box'] uploaded s3://tanmcrae-greengrass-blog/ground-truth/blue-box/class_labels.json
MIT-0
data-prep/04_create_ground_truth_job.ipynb
jonslo/amazon-sagemaker-aws-greengrass-custom-object-detection-model
Create the instruction template
def make_template(test_template=False, save_fname='instructions.template'): template = r"""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script> <crowd-form> <crowd-bounding-box name="boundingBox" src="{{ task.input.taskObject | grant_read_access }}" header="Draw bounding box for the storage boxes in the picture (blue). Each bounding box should fit tight around the box. Only draw one bounding box per storage box, even if part of the box may be occluded." labels="['storage box']" > <full-instructions header="Please annotate storage boxes in the picture"> <ol> <li><strong>Inspect</strong> the image</li> <li><strong>Determine</strong> if there are visible blue storage box in the picture.</li> <li><strong>Outline</strong> the storage box in the image using the provided “Box” tool. </li> </ol> <h2><span style="color: rgb(0, 138, 0);">Good Example</span></h2> <p><img src=" https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/good-exmaples/good-example-arm.png " style="max-width:450"></p> <p><img src=" https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/good-exmaples/good-example-occlusion.png " style="max-width:450"></p> <p><img src=" https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/good-exmaples/good-example-partial.png " style="max-width:450"></p> <p><img src=" https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/good-exmaples/good-example-standard.png " style="max-width:450"></p> <h2><span style="color: rgb(230, 0, 0);">Bad Example</span></h2> <p>The bounding boxes below are bad as it didn't cover the entire box. </p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-not-full.png" style="max-width:450"></p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-not-full-2.png" style="max-width:450"></p> <p>The bounding boxes below are bad as it's not tight around storage box. </p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-not-tight.png" style="max-width:450"></p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-not-tight-2.png" style="max-width:450"></p> <p>The labeling below are bad as it didn't cover the full </p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-occlusion-partial.png" style="max-width:450"></p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-occlusion-partial-2.png" style="max-width:450"></p> </full-instructions> <short-instructions> <p>Label every blue storage box in the picture. Boxes should fit tight. If the target goes off the screen, label up to the edge of the image. Do not label if it completely cannot be seen. </p> <p><img src=" https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/good-exmaples/good-example-arm.png " style="max-width:100%"/></p> <p><img src=" https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/good-exmaples/good-example-occlusion.png " style="max-width:100%"/></p> <p><img src=" https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/good-exmaples/good-example-partial.png " style="max-width:100%"/></p> <p><img src=" https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/good-exmaples/good-example-standard.png " style="max-width:100%"/></p> <p><br/></p> <h2><span style="color: rgb(230, 0, 0);">Bad examples</span></h2> <p>The bounding boxes below are bad as it didn't cover the entire box. </p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-not-full.png" style="max-width:100%"></p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-not-full-2.png" style="max-width:100%"></p> <p>The bounding boxes below are bad as it's not tight around storage box. </p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-not-tight.png" style="max-width:100%"></p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-not-tight-2.png" style="max-width:100%"></p> <p>The labeling below are bad as it only labeled part of the storage box </p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-occlusion-partial.png" style="max-width:100%"></p> <p><img src="https://s3.amazonaws.com/angelaw-workshop/groundtruth/greengrass-blog/bad-examples/bad-example-occlusion-partial-2.png" style="max-width:100%"></p> </short-instructions> </crowd-bounding-box> </crowd-form> """ with open(save_fname, 'w') as f: f.write(template) template_name = 'instructions.template' # make_template(test_template=True, save_fname='instructions.html') make_template(test_template=False, save_fname=template_name) s3.upload_file(template_name, BUCKET, EXP_NAME + '/' + template_name) print("uploaded template to s3://{}/ground-truth/{}/{}".format(BUCKET, EXP_NAME, template_name)) private_workteam_arn = "arn:aws:sagemaker:us-east-1:854681337758:workteam/private-crowd/greengrass-blog"
_____no_output_____
MIT-0
data-prep/04_create_ground_truth_job.ipynb
jonslo/amazon-sagemaker-aws-greengrass-custom-object-detection-model
Create job
task_description = 'Dear Annotator, please draw a box around the yellow or blue storage box in the picture. Thank you!' task_keywords = ['image', 'object', 'detection', CLASS_NAME] task_title = 'Draw a box around storage box in the picture' print("task_title: {}".format(task_title)) print("JOB_NAME: {}".format(JOB_NAME)) task_keywords # Specify ARNs for resources needed to run an object detection job. ac_arn_map = {'us-west-2': '081040173940', 'us-east-1': '432418664414', 'us-east-2': '266458841044', 'eu-west-1': '568282634449', 'ap-northeast-1': '477331159723'} prehuman_arn = 'arn:aws:lambda:{}:{}:function:PRE-BoundingBox'.format(region, ac_arn_map[region]) acs_arn = 'arn:aws:lambda:{}:{}:function:ACS-BoundingBox'.format(region, ac_arn_map[region]) labeling_algorithm_specification_arn = 'arn:aws:sagemaker:{}:027400017018:labeling-job-algorithm-specification/object-detection'.format(region) public_workteam_arn = 'arn:aws:sagemaker:{}:394669845002:workteam/public-crowd/default'.format(region) human_task_config = { "AnnotationConsolidationConfig": { "AnnotationConsolidationLambdaArn": acs_arn, }, "PreHumanTaskLambdaArn": prehuman_arn, "MaxConcurrentTaskCount": 300, # 200 images will be sent at a time to the workteam. "NumberOfHumanWorkersPerDataObject": 1, # We will obtain and consolidate just 1 human annotation for each image. "TaskAvailabilityLifetimeInSeconds": 43200, #864000, #43200 # Your workteam has 10 days to complete all pending tasks. "TaskDescription": task_description, "TaskKeywords": task_keywords, "TaskTimeLimitInSeconds": 600, # Each image must be labeled within 10 minutes. "TaskTitle": task_title, "UiConfig": { "UiTemplateS3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, template_name), } } if not USE_PRIVATE_WORKFORCE: human_task_config["PublicWorkforceTaskPrice"] = { "AmountInUsd": { "Dollars": 0, "Cents": 3, "TenthFractionsOfACent": 6, } } human_task_config["WorkteamArn"] = public_workteam_arn else: human_task_config["WorkteamArn"] = private_workteam_arn print(json.dumps (human_task_config, indent =2 )) ground_truth_request = { "InputConfig" : { "DataSource": { "S3DataSource": { "ManifestS3Uri": 's3://{}/{}/{}'.format(BUCKET, 'manifests', MANIFEST), } }, "DataAttributes": { "ContentClassifiers": [ "FreeOfPersonallyIdentifiableInformation", "FreeOfAdultContent" ] }, }, "OutputConfig" : { "S3OutputPath": 's3://{}/ground-truth-output/'.format(BUCKET), }, "HumanTaskConfig" : human_task_config, "LabelingJobName": JOB_NAME, "RoleArn": role, "LabelAttributeName": "bb", "LabelCategoryConfigS3Uri": 's3://{}/{}'.format(BUCKET, LABEL_KEY), } if USE_AUTO_LABELING and RUN_FULL_AL_DEMO: ground_truth_request[ "LabelingJobAlgorithmsConfig"] = { "LabelingJobAlgorithmSpecificationArn": labeling_algorithm_specification_arn } print(json.dumps (ground_truth_request, indent =2 )) sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_labeling_job(**ground_truth_request)
_____no_output_____
MIT-0
data-prep/04_create_ground_truth_job.ipynb
jonslo/amazon-sagemaker-aws-greengrass-custom-object-detection-model
look at output manifest
job_name = 'yellow-box-small-job-public' OUTPUT_MANIFEST = 's3://{}/ground-truth-output/{}/manifests/output/output.manifest'.format(BUCKET, job_name) output_file = job_name+'.output.manifest' !aws s3 cp {OUTPUT_MANIFEST} {output_file} with open(output_file, 'r') as f: output = [json.loads(line.strip()) for line in f.readlines()] len(output)
_____no_output_____
MIT-0
data-prep/04_create_ground_truth_job.ipynb
jonslo/amazon-sagemaker-aws-greengrass-custom-object-detection-model
synchro.io> IO classes to read files in the different format Asari lab is acquiring from: hdf5, rhd, raw, npy, all adapted from the SpykingCircus project by Pierre Yger and Olivier Marre https://spyking-circus.readthedocs.io/en/latest/
#export import numpy as np import re, sys, os, logging, struct import h5py from colorama import Fore logger = logging.getLogger(__name__) def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): ''' alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) ''' return [atoi(c) for c in re.split('(\d+)', text) ] def filter_per_extension(files, extension): results = [] for file in files: fn, ext = os.path.splitext(file) if ext == extension: results += [file] return results def print_and_log(to_print, level='info', logger=None, display=True): if display: if level == 'default': for line in to_print: print(Fore.WHITE + line + '\n') if level == 'info': print_info(to_print) elif level == 'error': print_error(to_print) sys.stdout.flush() def print_info(lines): """Prints informations messages, enhanced graphical aspects.""" print(Fore.YELLOW + "------------------------- Informations -------------------------\n") for line in lines: print(Fore.YELLOW + "| " + line + '\n') print(Fore.YELLOW + "------------------------------------------------------------------\n" + Fore.WHITE) def print_error(lines): """Prints errors messages, enhanced graphical aspects.""" print(Fore.RED + "---------------------------- Error -----------------------------\n") for line in lines: print(Fore.RED + "| " + line + '\n') print(Fore.RED + "------------------------------------------------------------------\n" + Fore.WHITE) def get_offset(data_dtype, dtype_offset): if dtype_offset == 'auto': if data_dtype in ['uint16', np.uint16]: dtype_offset = 32768 elif data_dtype in ['int16', np.int16]: dtype_offset = 0 elif data_dtype in ['int32', np.int32]: dtype_offset = 0 elif data_dtype in ['int64', np.int64]: dtype_offset = 0 elif data_dtype in ['float32', np.float32]: dtype_offset = 0 elif data_dtype in ['int8', np.int8]: dtype_offset = 0 elif data_dtype in ['uint8', np.uint8]: dtype_offset = 127 elif data_dtype in ['float64', np.float64]: dtype_offset = 0 elif data_dtype==">d": dtype_offset = 0 else: try: dtype_offset = int(dtype_offset) except: print("Offset %s is not valid" %dtype_offset) return dtype_offset #export class DataFile(object): ''' A generic class that will represent how the program interacts with the data. Such an abstraction layer should allow people to write their own wrappers, for several file formats, with or without parallel write, streams, and so on. Note that depending on the complexity of the datastructure, this extra layer can slow down the code. ''' description = "mydatafile" # Description of the file format extension = [".myextension"] # extensions parallel_write = False # can be written in parallel (using the comm object) is_writable = False # can be written is_streamable = ['multi-files'] # If the file format can support streams of data ['multi-files' is a default, but can be something else] _shape = None # The total shape of the data (nb time steps, nb channels) accross streams if any _t_start = None # The global t_start of the data _t_stop = None # The final t_stop of the data, accross all streams if any # This is a dictionary of values that need to be provided to the constructor, with the corresponding type _required_fields = {} # This is a dictionary of values that may have a default value, if not provided to the constructor _default_values = {} _params = {} def __init__(self, file_name, params, is_empty=False, stream_mode=None): ''' The constructor that will create the DataFile object. Note that by default, values are read from the header of the file. If not found in the header, they are read from the parameter file. If no values are found, the code will trigger an error What you need to specify at a generic level (for a given file format) - parallel_write : can the file be safely written in parallel ? - is_writable : if the file can be written - is_streamable : if the file format can support streaming data - required_fields : what parameter must be specified for the file format, along with the type - default_values : parameters that may have default values if not provided What you need to specify at a low level (maybe by getting specific values with _read_from_header) - _shape : the size of the data, should be a tuple (duration in time bins, nb_channels) - _t_start : the time (in time steps) of the recording (0 by default) ''' self.params = {} self.params.update(self._params) if not is_empty: self._check_filename(file_name) if stream_mode is not None: self.is_stream = True if not stream_mode in self.is_streamable: if self.is_master: print_and_log(["The file format %s does not support stream mode %s" %(self.description, stream_mode)], 'error', logger) sys.exit(1) if is_empty: sys.exit(1) else: self.is_stream = False self.file_name = file_name self.is_empty = is_empty self.stream_mode = stream_mode f_next, extension = os.path.splitext(self.file_name) self._check_extension(extension) self._fill_from_params(params) if not self.is_empty: try: self._fill_from_header(self._read_from_header()) except Exception as ex: print_and_log(["There is an error in the _read_from_header method of the wrapper\n" + str(ex)], 'error', logger) else: self._shape = (0, 0) if self._shape is None: sys.exit(1) self.params['dtype_offset'] = get_offset(self.data_dtype, self.dtype_offset) if self.stream_mode: self._sources = self.set_streams(self.stream_mode) self._times = [] for source in self._sources: self._times += [source.t_start] print_and_log(['The file is composed of %d streams' %len(self._sources), 'Times are between %d and %d' %(self._sources[0].t_start, self._sources[-1].t_stop)], 'debug',logger) ################################################################################################################## ################################################################################################################## ######### Methods that need to be overwritten for a given fileformat ####### ################################################################################################################## ################################################################################################################## def _read_from_header(self): ''' This function is called only if the file is not empty, and should fill the values in the constructor such as _shape. It returns a dictionnary, that will be added to self._params based on the constrains given by required_fields and default_values ''' raise NotImplementedError('The _read_from_header method needs to be implemented for file format %s' %self.description) def _open(self, mode=''): ''' This function should open the file - mode can be to read only 'r', or to write 'w' ''' raise NotImplementedError('The open method needs to be implemented for file format %s' %self.description) def _close(self): ''' This function closes the file ''' raise NotImplementedError('The close method needs to be implemented for file format %s' %self.description) def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None): ''' Assuming the analyze function has been called before, this is the main function used by the code, in all steps, to get data chunks. More precisely, assuming your dataset can be divided in nb_chunks (see analyze) of temporal size (chunk_size), - idx is the index of the chunk you want to load - chunk_size is the time of those chunks, in time steps - if the data loaded are data[idx:idx+1], padding should add some offsets, in time steps, such that we can load data[idx+padding[0]:idx+padding[1]] - nodes is a list of nodes, between 0 and nb_channels ''' raise NotImplementedError('The read_chunk method needs to be implemented for file format %s' %self.description) def read_chunk_adc(self, idx, chunk_size, padding=(0, 0), nodes=None): ''' Same as read_chunk, but for the analog channel of the file. - idx is the index of the chunk you want to load - chunk_size is the time of those chunks, in time steps - if the data loaded are data[idx:idx+1], padding should add some offsets, in time steps, such that we can load data[idx+padding[0]:idx+padding[1]] - nodes is a list of nodes, between 0 and nb_channels ''' raise NotImplementedError('The read_chunk_adc method needs to be implemented for file format %s' %self.description) def write_chunk(self, time, data): ''' This function writes data at a given time. - time is expressed in timestep - data must be a 2D matrix of size time_length x nb_channels ''' raise NotImplementedError('The set_data method needs to be implemented for file format %s' %self.description) def set_streams(self, stream_mode): ''' This function is only used for file format supporting streams, and need to return a list of datafiles, with appropriate t_start for each of them. Note that the results will be using the times defined by the streams. You can do anything regarding the keyword used for the stream mode, but multi-files is immplemented by default This will allow every file format to be streamed from multiple sources, and processed as a single file. ''' if stream_mode == 'multi-files': dirname = os.path.abspath(os.path.dirname(self.file_name)) fname = os.path.basename(self.file_name) fn, ext = os.path.splitext(fname) all_files = os.listdir(dirname) all_files = filter_per_extension(all_files, ext) all_files.sort(key=natural_keys) sources = [] to_write = [] global_time = 0 params = self.get_description() for fname in all_files: new_data = type(self)(os.path.join(os.path.abspath(dirname), fname), params) new_data._t_start = global_time global_time += new_data.duration sources += [new_data] to_write += ['We found the datafile %s with t_start %s and duration %s' %(new_data.file_name, new_data.t_start, new_data.duration)] print_and_log(to_write, 'debug', logger) return sources ################################## Optional, only if internal names are changed ################################## @property def sampling_rate(self): return self.params['sampling_rate'] @property def data_dtype(self): return self.params['data_dtype'] @property def dtype_offset(self): return self.params['dtype_offset'] @property def data_offset(self): return self.params['data_offset'] @property def nb_channels(self): return int(self.params['nb_channels']) @property def gain(self): return self.params['gain'] ################################################################################################################## ################################################################################################################## ######### End of methods that need to be overwritten for a given fileformat ####### ################################################################################################################## ################################################################################################################## def get_file_names(self): res = [] if self.stream_mode == 'multi-files': for source in self._sources: res += [source.file_name] return res def _check_filename(self, file_name): if not os.path.exists(file_name): sys.exit(1) def _check_extension(self, extension): if len(self.extension) > 0: if not extension in self.extension + [item.upper() for item in self.extension]: sys.exit(1) def _fill_from_params(self, params): for key in self._required_fields: if key not in params: self._check_requirements_(params) else: self.params[key] = self._required_fields[key](params[key]) for key in self._default_values: if key not in params: self.params[key] = self._default_values[key] else: self.params[key] = type(self._default_values[key])(params[key]) def _fill_from_header(self, header): for key in list(header.keys()): self.params[key] = header[key] def _check_requirements_(self, params): missing = {} for key, value in list(self._required_fields.items()): if key not in list(params.keys()): missing[key] = value if len(missing) > 0: self._display_requirements_() sys.exit(1) def _display_requirements_(self): to_write = ['The parameters for %s file format are:' %self.description.upper(), ''] nb_params = 0 for key, value in list(self._required_fields.items()): mystring = '-- %s -- %s' %(key, str(value)) mystring += ' [** mandatory **]' to_write += [mystring] nb_params += 1 to_write += [''] for key, value in list(self._default_values.items()): mystring = '-- %s -- %s' %(key, str(type(value))) mystring += ' [default is %s]' %value to_write += [mystring] nb_params += 1 def _scale_data_to_float32(self, data): ''' This function will convert data from local data dtype into float32, the default format of the algorithm ''' if self.data_dtype != np.float32: data = data.astype(np.float32) if self.dtype_offset != 0: data -= self.dtype_offset if np.any(self.gain != 1): data *= self.gain return np.ascontiguousarray(data) def _unscale_data_from_float32(self, data): ''' This function will convert data from float32 back to the original format of the file ''' if np.any(self.gain != 1): data /= self.gain if self.dtype_offset != 0: data += self.dtype_offset if (data.dtype != self.data_dtype) and (self.data_dtype != np.float32): data = data.astype(self.data_dtype) return data def _count_chunks(self, chunk_size, duration, strict=False): ''' This function will count how many block of size chunk_size can be found within a certain duration This returns the number of blocks, plus the remaining part ''' nb_chunks = duration // chunk_size last_chunk_len = duration - nb_chunks * chunk_size if not strict and last_chunk_len > 0: nb_chunks += 1 return nb_chunks, last_chunk_len def _get_t_start_t_stop(self, idx, chunk_size, padding=(0,0)): t_start = idx*np.int64(chunk_size)+padding[0] t_stop = (idx+1)*np.int64(chunk_size)+padding[1] if t_stop > self.duration: t_stop = self.duration if t_start < 0: t_start = 0 return t_start, t_stop def _get_streams_index_by_time(self, local_time): if self.is_stream: cidx = np.searchsorted(self._times, local_time, 'right') - 1 return cidx def is_first_chunk(self, idx, nb_chunks): if self.is_stream: cidx = np.searchsorted(self._chunks_in_sources, idx, 'right') - 1 idx -= self._chunks_in_sources[cidx] if idx == 0: return True else: if idx == 0: return True return False def is_last_chunk(self, idx, nb_chunks): if self.is_stream: if (idx > 0) and (idx in self._chunks_in_sources - 1): return True else: if idx == nb_chunks: return True return False def get_snippet(self, global_time, length, nodes=None): ''' This function should return a time snippet of size length x nodes - time is in timestep - length is in timestep - nodes is a list of nodes, between 0 and nb_channels ''' if self.is_stream: cidx = self._get_streams_index_by_time(global_time) return self._sources[cidx].get_snippet(global_time, length, nodes) else: local_time = global_time - self.t_start return self.get_data(0, chunk_size=length, padding=(local_time, local_time), nodes=nodes)[0] def get_snippet_adc(self, global_time, length, nodes=None): ''' This function should return a time snippet of size length x nodes - time is in timestep - length is in timestep - nodes is a list of nodes, between 0 and nb_channels ''' if self.is_stream: cidx = self._get_streams_index_by_time(global_time) return self._sources[cidx].get_snippet_adc(global_time, length, nodes) else: local_time = global_time - self.t_start return self.get_data_adc(0, chunk_size=length, padding=(local_time, local_time), nodes=nodes)[0] def get_data(self, idx, chunk_size, padding=(0, 0), nodes=None): if self.is_stream: cidx = np.searchsorted(self._chunks_in_sources, idx, 'right') - 1 idx -= self._chunks_in_sources[cidx] return self._sources[cidx].read_chunk(idx, chunk_size, padding, nodes), self._sources[cidx].t_start + idx*chunk_size else: return self.read_chunk(idx, chunk_size, padding, nodes), self.t_start + idx*chunk_size def get_data_adc(self, idx, chunk_size, padding=(0, 0), nodes=None): if self.is_stream: cidx = np.searchsorted(self._chunks_in_sources, idx, 'right') - 1 idx -= self._chunks_in_sources[cidx] return self._sources[cidx].read_chunk_adc(idx, chunk_size, padding, nodes), self._sources[cidx].t_start + idx*chunk_size else: return self.read_chunk_adc(idx, chunk_size, padding, nodes), self.t_start + idx*chunk_size def get_data_dig_in(self, idx, chunk_size, padding=(0, 0), nodes=None): if self.is_stream: cidx = np.searchsorted(self._chunks_in_sources, idx, 'right') - 1 idx -= self._chunks_in_sources[cidx] return self._sources[cidx].read_chunk_dig_in(idx, chunk_size, padding, nodes), self._sources[cidx].t_start + idx*chunk_size else: return self.read_chunk_dig_in(idx, chunk_size, padding, nodes), self.t_start + idx*chunk_size def get_data_both(self, idx, chunk_size, padding=(0, 0), nodes=None): if self.is_stream: cidx = np.searchsorted(self._chunks_in_sources, idx, 'right') - 1 idx -= self._chunks_in_sources[cidx] return (*self._sources[cidx].read_chunk_both(idx, chunk_size, padding, nodes), self._sources[cidx].t_start + idx*chunk_size) else: return (*self.read_chunk_both(idx, chunk_size, padding, nodes), self.t_start + idx*chunk_size) def set_data(self, global_time, data): if self.is_stream: cidx = self._get_streams_index_by_time(global_time) local_time = global_time - self._sources[cidx].t_start return self._sources[cidx].write_chunk(local_time, data) else: local_time = global_time - self.t_start return self.write_chunk(local_time, data) def analyze(self, chunk_size, strict=False): ''' This function should return two values: - the number of temporal chunks of temporal size chunk_size that can be found in the data. Note that even if the last chunk is not complete, it has to be counted. chunk_size is expressed in time steps - the length of the last uncomplete chunk, in time steps ''' if self.is_stream: nb_chunks = 0 last_chunk_len = 0 self._chunks_in_sources = [0] for source in self._sources: a, b = self._count_chunks(chunk_size, source.duration, strict) nb_chunks += a last_chunk_len += b self._chunks_in_sources += [nb_chunks] self._chunks_in_sources = np.array(self._chunks_in_sources) return nb_chunks, last_chunk_len else: return self._count_chunks(chunk_size, self.duration, strict) def get_description(self): result = {} for key in ['sampling_rate', 'data_dtype', 'gain', 'nb_channels', 'dtype_offset'] + list(self._default_values.keys()) + list(self._required_fields.keys()): result[key] = self.params[key] return result @property def shape(self): return (self.duration, int(self.nb_channels)) @property def duration(self): if self.is_stream: duration = 0 for source in self._sources: duration += source.duration return duration else: return np.int64(self._shape[0]) @property def is_master(self): return True#comm.rank == 0 @property def t_start(self): if self.is_stream: return self._sources[0].t_start else: if self._t_start is None: self._t_start = 0 return self._t_start @property def t_stop(self): if self.is_stream: return self._sources[-1].t_stop else: if self._t_stop is None: self._t_stop = self.t_start + self.duration return self._t_stop @property def nb_streams(self): if self.is_stream: return len(self._sources) else: return 1 def open(self, mode='r'): if self.is_stream: for source in self._sources: source._open(mode) else: self._open(mode) def close(self): if self.is_stream: for source in self._sources: source._close() else: self._close() #export def read_header(fid): """Reads the Intan File Format header from the given file.""" # Check 'magic number' at beginning of file to make sure this is an Intan # Technologies RHD2000 data file. magic_number, = struct.unpack('<I', fid.read(4)) if magic_number != int('c6912702', 16): raise Exception('Unrecognized file type.') header = {} # Read version number. version = {} (version['major'], version['minor']) = struct.unpack('<hh', fid.read(4)) header['version'] = version freq = {} # Read information of sampling rate and amplifier frequency settings. header['sample_rate'], = struct.unpack('<f', fid.read(4)) (freq['dsp_enabled'], freq['actual_dsp_cutoff_frequency'], freq['actual_lower_bandwidth'], freq['actual_upper_bandwidth'], freq['desired_dsp_cutoff_frequency'], freq['desired_lower_bandwidth'], freq['desired_upper_bandwidth']) = struct.unpack('<hffffff', fid.read(26)) # This tells us if a software 50/60 Hz notch filter was enabled during # the data acquisition. notch_filter_mode, = struct.unpack('<h', fid.read(2)) header['notch_filter_frequency'] = 0 if notch_filter_mode == 1: header['notch_filter_frequency'] = 50 elif notch_filter_mode == 2: header['notch_filter_frequency'] = 60 freq['notch_filter_frequency'] = header['notch_filter_frequency'] (freq['desired_impedance_test_frequency'], freq['actual_impedance_test_frequency']) = struct.unpack('<ff', fid.read(8)) note1 = read_qstring(fid) note2 = read_qstring(fid) note3 = read_qstring(fid) header['notes'] = { 'note1' : note1, 'note2' : note2, 'note3' : note3} # If data file is from GUI v1.1 or later, see if temperature sensor data was saved. header['num_temp_sensor_channels'] = 0 if (version['major'] == 1 and version['minor'] >= 1) or (version['major'] > 1) : header['num_temp_sensor_channels'], = struct.unpack('<h', fid.read(2)) # If data file is from GUI v1.3 or later, load eval board mode. header['eval_board_mode'] = 0 if ((version['major'] == 1) and (version['minor'] >= 3)) or (version['major'] > 1) : header['eval_board_mode'], = struct.unpack('<h', fid.read(2)) # Place frequency-related information in data structure. (Note: much of this structure is set above) freq['amplifier_sample_rate'] = header['sample_rate'] freq['aux_input_sample_rate'] = header['sample_rate'] / 4 freq['supply_voltage_sample_rate'] = header['sample_rate'] / 60 freq['board_adc_sample_rate'] = header['sample_rate'] freq['board_dig_in_sample_rate'] = header['sample_rate'] header['frequency_parameters'] = freq # Create structure arrays for each type of data channel. header['spike_triggers'] = [] header['amplifier_channels'] = [] header['aux_input_channels'] = [] header['supply_voltage_channels'] = [] header['board_adc_channels'] = [] header['board_dig_in_channels'] = [] header['board_dig_out_channels'] = [] # Read signal summary from data file header. if (header['version']['major'] > 1): header['reference_channel'] = read_qstring(fid) number_of_signal_groups, = struct.unpack('<h', fid.read(2)) for signal_group in range(0, number_of_signal_groups): signal_group_name = read_qstring(fid) signal_group_prefix = read_qstring(fid) (signal_group_enabled, signal_group_num_channels, signal_group_num_amp_channels) = struct.unpack('<hhh', fid.read(6)) if (signal_group_num_channels > 0) and (signal_group_enabled > 0): for signal_channel in range(0, signal_group_num_channels): new_channel = {'port_name' : signal_group_name, 'port_prefix' : signal_group_prefix, 'port_number' : signal_group} new_channel['native_channel_name'] = read_qstring(fid) new_channel['custom_channel_name'] = read_qstring(fid) (new_channel['native_order'], new_channel['custom_order'], signal_type, channel_enabled, new_channel['chip_channel'], new_channel['board_stream']) = struct.unpack('<hhhhhh', fid.read(12)) new_trigger_channel = {} (new_trigger_channel['voltage_trigger_mode'], new_trigger_channel['voltage_threshold'], new_trigger_channel['digital_trigger_channel'], new_trigger_channel['digital_edge_polarity']) = struct.unpack('<hhhh', fid.read(8)) (new_channel['electrode_impedance_magnitude'], new_channel['electrode_impedance_phase']) = struct.unpack('<ff', fid.read(8)) if channel_enabled: if signal_type == 0: header['amplifier_channels'].append(new_channel) header['spike_triggers'].append(new_trigger_channel) elif signal_type == 1: header['aux_input_channels'].append(new_channel) elif signal_type == 2: header['supply_voltage_channels'].append(new_channel) elif signal_type == 3: header['board_adc_channels'].append(new_channel) elif signal_type == 4: header['board_dig_in_channels'].append(new_channel) elif signal_type == 5: header['board_dig_out_channels'].append(new_channel) else: raise Exception('Unknown channel type.') # Summarize contents of data file. header['num_amplifier_channels'] = len(header['amplifier_channels']) header['num_aux_input_channels'] = len(header['aux_input_channels']) header['num_supply_voltage_channels'] = len(header['supply_voltage_channels']) header['num_board_adc_channels'] = len(header['board_adc_channels']) header['num_board_dig_in_channels'] = len(header['board_dig_in_channels']) header['num_board_dig_out_channels'] = len(header['board_dig_out_channels']) return header def get_bytes_per_data_block(header): """Calculates the number of bytes in each 60-sample datablock.""" if (header['version']['major'] == 1): num_samples_per_data_block = 60 else: num_samples_per_data_block = 128 # Each data block contains 60 amplifier samples. bytes_per_block = num_samples_per_data_block * 4 # timestamp data bytes_per_block = bytes_per_block + num_samples_per_data_block * 2 * header['num_amplifier_channels'] # Auxiliary inputs are sampled 4x slower than amplifiers bytes_per_block = bytes_per_block + (num_samples_per_data_block / 4) * 2 * header['num_aux_input_channels'] # Supply voltage is sampled 60x slower than amplifiers bytes_per_block = bytes_per_block + 1 * 2 * header['num_supply_voltage_channels'] # Board analog inputs are sampled at same rate as amplifiers bytes_per_block = bytes_per_block + num_samples_per_data_block * 2 * header['num_board_adc_channels'] # Board digital inputs are sampled at same rate as amplifiers if header['num_board_dig_in_channels'] > 0: bytes_per_block = bytes_per_block + num_samples_per_data_block * 2 # Board digital outputs are sampled at same rate as amplifiers if header['num_board_dig_out_channels'] > 0: bytes_per_block = bytes_per_block + num_samples_per_data_block * 2 # Temp sensor is sampled 60x slower than amplifiers if header['num_temp_sensor_channels'] > 0: bytes_per_block = bytes_per_block + 1 * 2 * header['num_temp_sensor_channels'] return bytes_per_block def read_qstring(fid): """Read Qt style QString. The first 32-bit unsigned number indicates the length of the string (in bytes). If this number equals 0xFFFFFFFF, the string is null. Strings are stored as unicode. """ length, = struct.unpack('<I', fid.read(4)) if length == int('ffffffff', 16): return "" if length > (os.fstat(fid.fileno()).st_size - fid.tell() + 1) : print(length) raise Exception('Length too long.') # convert length from bytes to 16-bit Unicode words length = int(length / 2) data = [] for i in range(0, length): c, = struct.unpack('<H', fid.read(2)) data.append(c) if sys.version_info >= (3,0): a = ''.join([chr(c) for c in data]) else: a = ''.join([chr(c) for c in data]) return a class RHDFile(DataFile): description = "rhd" extension = [".rhd"] parallel_write = True is_writable = True is_streamable = ['multi-files'] _required_fields = {} _default_values = {} _params = {'dtype_offset' : 'auto', 'data_dtype' : 'uint16', 'gain' : 0.195} def _read_from_header(self): header = {} self.file = open(self.file_name, 'rb') full_header = read_header(self.file) self.header = full_header header['nb_channels'] = full_header['num_amplifier_channels'] header['sampling_rate'] = full_header['sample_rate'] if full_header['version']['major'] == 1: self.SAMPLES_PER_RECORD = 60 else: self.SAMPLES_PER_RECORD = 128 self.nb_channels_adc = full_header['num_board_adc_channels'] self.nb_channels_dig_in = full_header['num_board_dig_in_channels'] header['data_offset'] = self.file.tell() data_present = False filesize = os.path.getsize(self.file_name) self.bytes_per_block = get_bytes_per_data_block(full_header) self.block_offset = self.SAMPLES_PER_RECORD * 4 self.block_size = 2 * self.SAMPLES_PER_RECORD * header['nb_channels'] self.block_offset_adc = (self.block_offset + self.block_size + (self.SAMPLES_PER_RECORD/4) * full_header['num_aux_input_channels'] * 2 + 2 * full_header['num_supply_voltage_channels']) self.block_size_adc = 2 * self.SAMPLES_PER_RECORD * self.nb_channels_adc self.block_offset_dig_in = self.block_offset_adc + self.block_size_adc self.block_size_dig_in = 2 * self.SAMPLES_PER_RECORD bytes_remaining = filesize - self.file.tell() self.bytes_per_block_div = self.bytes_per_block / 2 self.block_offset_div = self.block_offset / 2 self.block_offset_div_adc = self.block_offset_adc / 2 self.block_offset_div_dig_in = self.block_offset_dig_in / 2 self.block_size_div = self.block_size / 2 self.block_size_div_adc = self.block_size_adc / 2 self.block_size_div_dig_in = self.block_size_dig_in / 2 if bytes_remaining > 0: data_present = True if bytes_remaining % self.bytes_per_block != 0: print_and_log(['Something is wrong with file size : should have a whole number of data blocks'], 'error', logger) num_data_blocks = int(bytes_remaining / self.bytes_per_block) self.num_amplifier_samples = self.SAMPLES_PER_RECORD * num_data_blocks self.size = self.num_amplifier_samples self._shape = (self.size, header['nb_channels']) self.file.close() return header def _get_slice_(self, t_start, t_stop): x_beg = np.int64(t_start // self.SAMPLES_PER_RECORD) r_beg = np.mod(t_start, self.SAMPLES_PER_RECORD) x_end = np.int64(t_stop // self.SAMPLES_PER_RECORD) r_end = np.mod(t_stop, self.SAMPLES_PER_RECORD) if x_beg == x_end: g_offset = x_beg * self.bytes_per_block_div + self.block_offset_div data_slice = np.arange(g_offset + r_beg * self.nb_channels, g_offset + r_end * self.nb_channels, dtype=np.int64) yield data_slice else: for count, nb_blocks in enumerate(np.arange(x_beg, x_end + 1, dtype=np.int64)): g_offset = nb_blocks * self.bytes_per_block_div + self.block_offset_div if count == 0: data_slice = np.arange(g_offset + r_beg * self.nb_channels, g_offset + self.block_size_div, dtype=np.int64) elif (count == (x_end - x_beg)): data_slice = np.arange(g_offset, g_offset + r_end * self.nb_channels, dtype=np.int64) else: data_slice = np.arange(g_offset, g_offset + self.block_size_div, dtype=np.int64) yield data_slice def _get_slice_adc_(self, t_start, t_stop): x_beg = np.int64(t_start // self.SAMPLES_PER_RECORD) r_beg = np.mod(t_start, self.SAMPLES_PER_RECORD) x_end = np.int64(t_stop // self.SAMPLES_PER_RECORD) r_end = np.mod(t_stop, self.SAMPLES_PER_RECORD) if x_beg == x_end: g_offset = x_beg * self.bytes_per_block_div + self.block_offset_div_adc data_slice = np.arange(g_offset + r_beg * self.nb_channels_adc, g_offset + r_end * self.nb_channels_adc, dtype=np.int64) yield data_slice else: for count, nb_blocks in enumerate(np.arange(x_beg, x_end + 1, dtype=np.int64)): g_offset = nb_blocks * self.bytes_per_block_div + self.block_offset_div_adc if count == 0: data_slice = np.arange(g_offset + r_beg * self.nb_channels_adc, g_offset + self.block_size_div_adc, dtype=np.int64) elif (count == (x_end - x_beg)): data_slice = np.arange(g_offset, g_offset + r_end * self.nb_channels_adc, dtype=np.int64) else: data_slice = np.arange(g_offset, g_offset + self.block_size_div_adc, dtype=np.int64) yield data_slice def _get_slice_dig_in_(self, t_start, t_stop): x_beg = np.int64(t_start // self.SAMPLES_PER_RECORD) r_beg = np.mod(t_start, self.SAMPLES_PER_RECORD) x_end = np.int64(t_stop // self.SAMPLES_PER_RECORD) r_end = np.mod(t_stop, self.SAMPLES_PER_RECORD) if x_beg == x_end: g_offset = x_beg * self.bytes_per_block_div + self.block_offset_div_dig_in data_slice = np.arange(g_offset + r_beg, g_offset + r_end, dtype=np.int64) yield data_slice else: for count, nb_blocks in enumerate(np.arange(x_beg, x_end + 1, dtype=np.int64)): g_offset = nb_blocks * self.bytes_per_block_div + self.block_offset_div_dig_in if count == 0: data_slice = np.arange(g_offset + r_beg, g_offset + self.block_size_div_dig_in, dtype=np.int64) elif (count == (x_end - x_beg)): data_slice = np.arange(g_offset, g_offset + r_end, dtype=np.int64) else: data_slice = np.arange(g_offset, g_offset + self.block_size_div_dig_in, dtype=np.int64) yield data_slice def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None): t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding) local_shape = t_stop - t_start local_chunk = np.zeros((self.nb_channels, local_shape), dtype=self.data_dtype) data_slice = self._get_slice_(t_start, t_stop) self._open() count = 0 for s in data_slice: t_slice = len(s)//self.nb_channels local_chunk[:, count:count + t_slice] = self.data[s].reshape(self.nb_channels, len(s)//self.nb_channels) count += t_slice local_chunk = local_chunk.T self._close() if nodes is not None: if not np.all(nodes == np.arange(self.nb_channels)): local_chunk = np.take(local_chunk, nodes, axis=1) return self._scale_data_to_float32(local_chunk) def read_chunk_adc(self, idx, chunk_size, padding=(0, 0), nodes=None): t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding) local_shape = t_stop - t_start local_chunk = np.zeros((self.nb_channels_adc, local_shape), dtype=self.data_dtype) data_slice = self._get_slice_adc_(t_start, t_stop) self._open() count = 0 for s in data_slice: t_slice = len(s)//self.nb_channels_adc local_chunk[:, count:count + t_slice] = self.data[s].reshape(self.nb_channels_adc, len(s)//self.nb_channels_adc) count += t_slice local_chunk = local_chunk.T self._close() if nodes is not None: if not np.all(nodes == np.arange(self.nb_channels_adc)): local_chunk = np.take(local_chunk, nodes, axis=1) return self._scale_data_to_float32(local_chunk) def read_chunk_dig_in(self, idx, chunk_size, padding=(0, 0), nodes=None): t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding) local_shape = t_stop - t_start tmp_chunk = np.zeros((local_shape,), dtype=np.uint64) local_chunk = np.zeros((self.nb_channels_dig_in, local_shape), dtype=np.uint8) data_slice = self._get_slice_dig_in_(t_start, t_stop) self._open() count = 0 for s in data_slice: t_slice = len(s) tmp_chunk[count:count + t_slice] = self.data[s] #Putting all data in channel 1, then masking count += t_slice for i in range(self.nb_channels_dig_in): chann_bitmask = 1 << self.header['board_dig_in_channels'][i]['native_order'] local_chunk[i] = np.not_equal(np.bitwise_and(tmp_chunk, chann_bitmask), 0) local_chunk = local_chunk.T self._close() if nodes is not None: if not np.all(nodes == np.arange(self.nb_channels_dig_in)): local_chunk = np.take(local_chunk, nodes, axis=1) return local_chunk def read_chunk_both(self, idx, chunk_size, padding=(0, 0), nodes=None): t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding) local_shape = t_stop - t_start local_chunk_adc = np.zeros((self.nb_channels_adc, local_shape), dtype=self.data_dtype) local_chunk = np.zeros((self.nb_channels, local_shape), dtype=self.data_dtype) data_slice_adc = self._get_slice_adc_(t_start, t_stop) data_slice = self._get_slice_(t_start, t_stop) self._open() count = 0 for s in data_slice_adc: t_slice = len(s)//self.nb_channels_adc local_chunk_adc[:, count:count + t_slice] = self.data[s].reshape(self.nb_channels_adc, len(s)//self.nb_channels_adc) count += t_slice count = 0 for s in data_slice: t_slice = len(s)//self.nb_channels local_chunk[:, count:count + t_slice] = self.data[s].reshape(self.nb_channels, len(s)//self.nb_channels) count += t_slice local_chunk = local_chunk.T local_chunk_adc = local_chunk_adc.T self._close() if nodes is not None: if not np.all(nodes == np.arange(self.nb_channels_adc)): local_chunk_adc = np.take(local_chunk_adc, nodes, axis=1) if not np.all(nodes == np.arange(self.nb_channels)): local_chunk = np.take(local_chunk, nodes, axis=1) return self._scale_data_to_float32(local_chunk), self._scale_data_to_float32(local_chunk_adc) def write_chunk(self, time, data): t_start = time t_stop = time + data.shape[0] if t_stop > self.duration: t_stop = self.duration data = self._unscale_data_from_float32(data) data_slice = self._get_slice_(t_start, t_stop) self._open(mode='r+') count = 0 for s in data_slice: t_slice = len(s)//self.nb_channels self.data[s] = data[count:count + t_slice, :].T.ravel() count += t_slice self._close() def _open(self, mode='r'): self.data = np.memmap(self.file_name, offset=self.data_offset, dtype=self.data_dtype, mode=mode) def _close(self): self.data = None #export class H5File(DataFile): description = "hdf5" extension = [".h5", ".hdf5"] parallel_write = h5py.get_config().mpi is_writable = True _required_fields = {'h5_key' : str, 'sampling_rate' : float} _default_values = {'dtype_offset' : 'auto', 'h5_key_adc' : "Data/Recording_0/AnalogStream/Stream_1/ChannelData", 'gain' : 1., 'data_dtype' : 'uint8', 'nb_channels' : 1} def _check_compression(self): # HDF5 does not support parallel writes with compression if self.compression != '': self.parallel_write = False if self.is_master: print_and_log(['Data are compressed thus parallel writing is disabled'], 'debug', logger) def __check_valid_key__(self, key): file = h5py.File(self.file_name, mode='r') all_fields = [] file.visit(all_fields.append) if not key in all_fields: print_and_log(['The key %s can not be found in the dataset! Keys found are:' %key, ", ".join(all_fields)], 'error', logger) sys.exit(1) file.close() def _read_from_header(self): self.__check_valid_key__(self.h5_key) self._open() header = {} header['data_dtype'] = self.my_file.get(self.h5_key).dtype self.compression = self.my_file.get(self.h5_key).compression self._check_compression() self.size = self.my_file.get(self.h5_key).shape if self.size[0] > self.size[1]: self.time_axis = 0 self._shape = (self.size[0], self.size[1]) else: self.time_axis = 1 self._shape = (self.size[1], self.size[0]) header['nb_channels'] = self._shape[1] self._close() return header def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None): t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding) if nodes is None: if self.time_axis == 0: local_chunk = self.data[t_start:t_stop, :] elif self.time_axis == 1: local_chunk = self.data[:, t_start:t_stop].T else: if self.time_axis == 0: local_chunk = self.data[t_start:t_stop, nodes] elif self.time_axis == 1: local_chunk = self.data[nodes, t_start:t_stop].T return self._scale_data_to_float32(local_chunk) def read_chunk_adc(self, idx, chunk_size, padding=(0, 0), nodes=None): t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding) if nodes is None: local_chunk = self.data_adc[0,t_start:t_stop] else: local_chunk = self.data_adc[0,t_start:t_stop] return self._scale_data_to_float32(local_chunk) def write_chunk(self, time, data): data = self._unscale_data_from_float32(data) if self.time_axis == 0: self.data[time:time+data.shape[0], :] = data elif self.time_axis == 1: self.data[:, time:time+data.shape[0]] = data.T def _open(self, mode='r'): # if mode in ['r+', 'w'] and self.parallel_write: # self.my_file = h5py.File(self.file_name, mode=mode, driver='mpio', comm=comm) # else: self.my_file = h5py.File(self.file_name, mode=mode) self.data = self.my_file.get(self.h5_key) self.data_adc = self.my_file.get(self.h5_key_adc) def _close(self): self.my_file.close() del self.data del self.data_adc @property def h5_key(self): return self.params['h5_key'] @property def h5_key_adc(self): return self.params['h5_key_adc'] #export class RawBinaryFile(DataFile): description = "raw_binary" extension = [] parallel_write = True is_writable = True _required_fields = {'data_dtype' : str, 'sampling_rate' : float, 'nb_channels' : int} _default_values = {'dtype_offset' : 'auto', 'data_offset' : 0, 'gain' : 1.} def _read_from_header(self): self._open() self.size = len(self.data) self._shape = (self.size//self.nb_channels, int(self.nb_channels)) self._close() return {} def allocate(self, shape, data_dtype=None): if data_dtype is None: data_dtype = self.data_dtype if self.is_master: self.data = np.memmap(self.file_name, offset=self.data_offset, dtype=data_dtype, mode='w+', shape=shape) # comm.Barrier() self._read_from_header() del self.data def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None): t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding) local_shape = t_stop - t_start self._open() local_chunk = self.data[t_start*self.nb_channels:t_stop*self.nb_channels] local_chunk = local_chunk.reshape(local_shape, self.nb_channels) self._close() if nodes is not None: if not np.all(nodes == np.arange(self.nb_channels)): local_chunk = np.take(local_chunk, nodes, axis=1) return self._scale_data_to_float32(local_chunk) def read_chunk_adc(self, idx, chunk_size, padding=(0, 0), nodes=None): return self.read_chunk(idx, chunk_size, padding=padding, nodes=nodes) def write_chunk(self, time, data): self._open(mode='r+') data = self._unscale_data_from_float32(data) data = data.ravel() self.data[self.nb_channels*time:self.nb_channels*time+len(data)] = data self._close() def _open(self, mode='r'): self.data = np.memmap(self.file_name, offset=self.data_offset, dtype=self.data_dtype, mode=mode) def _close(self): self.data = None #export from numpy.lib.format import open_memmap class NumpyFile(RawBinaryFile): description = "numpy" extension = [".npy"] parallel_write = True is_writable = True _required_fields = {'sampling_rate' : float} _default_values = {'dtype_offset' : 'auto', 'gain' : 1.} def _read_from_header(self): header = {} self._open() self.size = self.data.shape if self.size[0] > self.size[1]: self.time_axis = 0 self._shape = (self.size[0], self.size[1]) else: self.time_axis = 1 self._shape = (self.size[1], self.size[0]) header['nb_channels'] = self._shape[1] header['data_dtype'] = self.data.dtype self.size = len(self.data) self._close() return header def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None): self._open() t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding) if self.time_axis == 0: local_chunk = self.data[t_start:t_stop, :].copy() elif self.time_axis == 1: local_chunk = self.data[:, t_start:t_stop].copy().T self._close() if nodes is not None: if not np.all(nodes == np.arange(self.nb_channels)): local_chunk = np.take(local_chunk, nodes, axis=1) return self._scale_data_to_float32(local_chunk) def read_chunk_adc(self, idx, chunk_size, padding=(0, 0), nodes=None): return self.read_chunk(idx, chunk_size, padding=padding, nodes=nodes) def write_chunk(self, time, data): self._open(mode='r+') data = self._unscale_data_from_float32(data) if self.time_axis == 0: self.data[time:time+len(data)] = data elif self.time_axis == 1: self.data[:, time:time+len(data)] = data.T self._close() def _open(self, mode='r'): self.data = open_memmap(self.file_name, mode=mode) def _close(self): self.data = None #export def load_all_data(datafile:DataFile): """Read all the data contained by a file. For rhd and hdf5, correspond to the ephy channels. To read the ADC data, see `load_all_data_adc`""" datafile.open() if isinstance(datafile, RHDFile): chunk_size = 1800960 else: chunk_size = datafile.duration n_chunks, _ = datafile.analyze(chunk_size) data = np.zeros((datafile.duration, datafile._shape[1])) print("Loading the data... "+str(round(0,2))+"% ",end='\n',flush=True) for idx in range(n_chunks): data_tmp, t_offset = datafile.get_data(idx, chunk_size) data[t_offset:t_offset+len(data_tmp)] = data_tmp print("Loading the data... "+str(round(100*(idx+1)/n_chunks,2))+"% ",end='\n',flush=True) print("Loading the data... "+str(round(100,2))+"% ",end='\n',flush=True) datafile.close() return data def load_all_data_adc(datafile:DataFile, channel_idx=0): """Read all the data contained by a file. For rhd and hdf5, correspond to the adc channels. To read the ephy data, see `load_all_data`""" datafile.open() if isinstance(datafile, RHDFile): chunk_size = 1800960 else: chunk_size = datafile.duration n_chunks, _ = datafile.analyze(chunk_size) data = np.zeros(datafile.duration) print("Loading the data... "+str(round(0,2))+"% ",end='\n',flush=True) for idx in range(n_chunks): data_tmp, t_offset = datafile.get_data_adc(idx, chunk_size) if data_tmp.ndim == 2: data_tmp = data_tmp[:,channel_idx] data[t_offset:t_offset+len(data_tmp)] = data_tmp print("Loading the data... "+str(round(100*(idx+1)/n_chunks,2))+"% ",end='\n',flush=True) print("Loading the data... "+str(round(100,2))+"% ",end='\n',flush=True) datafile.close() return data def load_all_data_dig_in(datafile:DataFile, channel_idx=0): """Read all the data contained by a file. For rhd and hdf5, correspond to the adc channels. To read the ephy data, see `load_all_data`""" datafile.open() if isinstance(datafile, RHDFile): chunk_size = 1800960 else: chunk_size = datafile.duration n_chunks, _ = datafile.analyze(chunk_size) data = np.zeros(datafile.duration, dtype=float) print("Loading the data... "+str(round(0,2))+"% ",end='\r',flush=True) for idx in range(n_chunks): data_tmp, t_offset = datafile.get_data_dig_in(idx, chunk_size) if data_tmp.ndim == 2: data_tmp = data_tmp[:,channel_idx] data[t_offset:t_offset+len(data_tmp)] = data_tmp print("Loading the data... "+str(round(100*(idx+1)/n_chunks,2))+"% ",end='\n',flush=True) print("Loading the data... "+str(round(100,2))+"% ",end='\n',flush=True) datafile.close() return data def load_all_data_both(datafile:DataFile): """Read all the data contained by a file. For rhd and hdf5, correspond to the adc channels. To read the ephy data, see `load_all_data`""" datafile.open() if isinstance(datafile, RHDFile): chunk_size = 1800960 else: chunk_size = datafile.duration n_chunks, _ = datafile.analyze(chunk_size) data_adc = np.zeros(datafile.duration) data = np.zeros((datafile.duration, datafile._shape[1])) print("Loading the data... "+str(round(0,2))+"% ",end='\n',flush=True) for idx in range(n_chunks): data_tmp, data_tmp_adc, t_offset = datafile.get_data_both(idx, chunk_size) data[t_offset:t_offset+len(data_tmp)] = data_tmp if data_tmp_adc.ndim == 2: data_tmp_adc = data_tmp_adc[:,0] data_adc[t_offset:t_offset+len(data_tmp)] = data_tmp_adc print("Loading the data... "+str(round(100*(idx+1)/n_chunks,2))+"% ",end='\n',flush=True) print("Loading the data... "+str(round(100,2))+"% ",end='\n',flush=True) datafile.close() return data, data_adc def export_adc_raw(datafile:DataFile, output_fn="", channel_idx=0): """Exports a datafile adc channel to a single raw binary file. Useful to reduce disk usage after that spike sorting is done.""" data = load_all_data_adc(datafile, channel_idx=channel_idx) if output_fn=="": raw_fn = os.path.splitext(datafile.file_name)[0]+".dat" else: raw_fn = os.path.split(datafile.file_name)[0]+"/"+output_fn param_d = {'sampling_rate': datafile.sampling_rate, 'data_dtype': 'uint16', 'gain': 0.195, 'nb_channels': 1, 'dtype_offset': 32768} raw_file = RawBinaryFile(raw_fn, param_d, is_empty=True) raw_file.allocate(datafile.shape[0]) raw_file.set_data(0, data) raw_file.close() def export_dig_in_raw(datafile:DataFile, output_fn="", channel_idx=0): """Exports a datafile adc channel to a single raw binary file. Useful to reduce disk usage after that spike sorting is done.""" data = load_all_data_dig_in(datafile, channel_idx=channel_idx) if output_fn=="": raw_fn = os.path.splitext(datafile.file_name)[0]+".dat" else: raw_fn = os.path.split(datafile.file_name)[0]+"/"+output_fn param_d = {'sampling_rate': datafile.sampling_rate, 'data_dtype': 'uint8', 'gain': 1, 'nb_channels': 1, 'dtype_offset': 127} raw_file = RawBinaryFile(raw_fn, param_d, is_empty=True) raw_file.allocate(datafile.shape[0]) raw_file.set_data(0, data) raw_file.close() def export_raw(datafile:DataFile, output_fn=""): """Exports a datafile adc channel to a single raw binary file. Useful to reduce disk usage after that spike sorting is done.""" data = load_all_data(datafile) if output_fn=="": raw_fn = os.path.splitext(datafile.file_name)[0]+".dat" else: raw_fn = os.path.split(datafile.file_name)[0]+"/"+output_fn param_d = datafile.get_description() raw_file = RawBinaryFile(raw_fn, param_d, is_empty=True) raw_file.allocate(datafile.shape) raw_file.set_data(0, data) raw_file.close() def export_both_raw(datafile:DataFile): """Exports a both raw data, adc and ephy.""" data, data_adc = load_all_data_both(datafile) raw_fn = os.path.splitext(datafile.file_name)[0]+".dat" param_d = {'sampling_rate': datafile.sampling_rate, 'data_dtype': 'uint16', 'gain': 0.195, 'nb_channels': 1, 'dtype_offset': 32768} raw_file = RawBinaryFile(raw_fn, param_d, is_empty=True) raw_file.allocate(datafile.shape[0]) raw_file.set_data(0, data_adc) raw_file.close() os.rename(raw_fn, os.path.splitext(datafile.file_name)[0]+".data") param_d = datafile.get_description() raw_file = RawBinaryFile(raw_fn, param_d, is_empty=True) raw_file.allocate(datafile.shape) raw_file.set_data(0, data) raw_file.close() def load_adc_raw(filepath, sampling_rate=30000): """Loads adc raw data, in the format exported by `export_adc_raw`""" param_d = {'sampling_rate': sampling_rate, 'data_dtype': 'uint16', 'gain': 0.195, 'nb_channels': 1, 'dtype_offset': 32768} raw_file = RawBinaryFile(filepath, param_d) return load_all_data_adc(raw_file) def load_digin_raw(filepath, sampling_rate=30000): """Loads adc raw data, in the format exported by `export_adc_raw`""" param_d = {'sampling_rate': sampling_rate, 'data_dtype': 'uint8', 'gain': 1, 'nb_channels': 1, 'dtype_offset': 127} raw_file = RawBinaryFile(filepath, param_d) return load_all_data_adc(raw_file) def load_sync_raw(filepath, sampling_rate=10000): """Loads the sync files made by labview for Asari Lab 2P setup""" param_d = {'sampling_rate': sampling_rate, 'data_dtype': '>d', 'gain': 1, 'nb_channels': 1, 'dtype_offset': 0} raw_file = RawBinaryFile(filepath, param_d) return load_all_data_adc(raw_file) #hide from nbdev.export import * notebook2script()
Converted 00_core.ipynb. Converted 01_utils.ipynb. Converted 02_processing.ipynb. Converted 03_modelling.ipynb. Converted 04_plotting.ipynb. Converted 05_database.ipynb. Converted 06_eyetrack.ipynb. Converted 10_synchro.io.ipynb. Converted 11_synchro.extracting.ipynb. Converted 12_synchro.processing.ipynb. Converted 13_leddome.ipynb. Converted 99_testdata.ipynb. Converted index.ipynb.
Apache-2.0
10_synchro.io.ipynb
wiessall/theonerig
Visit the NASA mars news site
# Visit the Mars news site url = 'https://redplanetscience.com/' browser.visit(url) # Optional delay for loading the page browser.is_element_present_by_css('div.list_text', wait_time=1) # Convert the browser html to a soup object html = browser.html news_soup = soup(html, 'html.parser') slide_elem = news_soup.select_one('div.list_text') #print(news_soup.prettify()) #display the current title content slide_elem.find('div', class_='content_title') # Use the parent element to find the first a tag and save it as `news_title` news_title = slide_elem.find('div', class_='content_title').get_text() news_title # Use the parent element to find the paragraph text news_p = slide_elem.find('div', class_='article_teaser_body').get_text() news_p
_____no_output_____
ADSL
Mission_to_Mars-Starter.ipynb
ptlhrs7/Web-Scraping-and-Mongo-Homework
JPL Space Images Featured Image
# Visit URL url = 'https://spaceimages-mars.com' browser.visit(url) # Find and click the full image button full_image_link = browser.find_by_tag('button')[1] full_image_link.click() # Parse the resulting html with soup html = browser.html img_soup = soup(html, 'html.parser') #print(news_soup.prettify()) img_url_rel = img_soup.find('img', class_='fancybox-image').get('src') # find the relative image url img_url_rel # Use the base url to create an absolute url img_url = f'https://spaceimages-mars.com/{img_url_rel}' img_url
_____no_output_____
ADSL
Mission_to_Mars-Starter.ipynb
ptlhrs7/Web-Scraping-and-Mongo-Homework
Mars Facts
# Use `pd.read_html` to pull the data from the Mars-Earth Comparison section # hint use index 0 to find the table df = pd.read_html("https://galaxyfacts-mars.com/")[0] df.head() df.columns = ['Description', 'Mars', 'Earth'] df df.set_index('Description', inplace=True) df.to_html()
_____no_output_____
ADSL
Mission_to_Mars-Starter.ipynb
ptlhrs7/Web-Scraping-and-Mongo-Homework
Hemispheres
url = 'https://marshemispheres.com/' browser.visit(url) # Create a list to hold the images and titles. hemisphere_image_urls = [] # Get a list of all of the hemispheres links = browser.find_by_css('a.product-item img') # Next, loop through those links, click the link, find the sample anchor, return the href for i in range(len(links)): #hemisphere info dictionary hemisphereInfo = {} # We have to find the elements on each loop to avoid a stale element exception browser.find_by_css('a.product-item img')[i].click() # Next, we find the Sample image anchor tag and extract the href sample = browser.links.find_by_text('Sample').first hemisphereInfo["img_url"] = sample['href'] # Get Hemisphere title hemisphereInfo['title'] = browser.find_by_css('h2.title').text # Append hemisphere object to list hemisphere_image_urls.append(hemisphereInfo) # Finally, we navigate backwards browser.back() hemisphere_image_urls browser.quit() # refence code used from Dr.A's Videos
_____no_output_____
ADSL
Mission_to_Mars-Starter.ipynb
ptlhrs7/Web-Scraping-and-Mongo-Homework
FactRuEval example (Cased model), MutiHeadAttention
%reload_ext autoreload %autoreload 2 %matplotlib inline import warnings import sys sys.path.append("../") warnings.filterwarnings("ignore") import os data_path = "/home/lis/ner/ulmfit/data/factrueval/" train_path = os.path.join(data_path, "train_with_pos.csv") valid_path = os.path.join(data_path, "valid_with_pos.csv") model_dir = " /datadrive/models/multi_cased_L-12_H-768_A-12/" init_checkpoint_pt = os.path.join("/datadrive/models/multi_cased_L-12_H-768_A-12/", "pytorch_model.bin") bert_config_file = os.path.join("/datadrive/bert/multi_cased_L-12_H-768_A-12/", "bert_config.json") vocab_file = os.path.join("/datadrive/bert/multi_cased_L-12_H-768_A-12/", "vocab.txt") import torch torch.cuda.set_device(1) torch.cuda.is_available(), torch.cuda.current_device()
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
1. Create dataloaders
from modules import BertNerData as NerData data = NerData.create(train_path, valid_path, vocab_file)
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
For factrueval we use the following sample of labels:
print(data.label2idx)
{'<pad>': 0, '[CLS]': 1, '[SEP]': 2, 'B_O': 3, 'I_O': 4, 'B_ORG': 5, 'I_ORG': 6, 'B_LOC': 7, 'I_LOC': 8, 'B_PER': 9, 'I_PER': 10}
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
2. Create modelFor creating pytorch model we need to create `NerModel` object.
from modules.models.bert_models import BertBiLSTMAttnCRF model = BertBiLSTMAttnCRF.create(len(data.label2idx), bert_config_file, init_checkpoint_pt, enc_hidden_dim=256) model.decoder model.get_n_trainable_params()
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
3. Create learnerFor training our pytorch model we need to create `NerLearner` object.
from modules import NerLearner num_epochs = 100 learner = NerLearner(model, data, best_model_path="/datadrive/models/factrueval/exp_final_attn_cased1.cpt", lr=0.001, clip=1.0, sup_labels=data.id2label[5:], t_total=num_epochs * len(data.train_dl))
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
4. Learn your NER modelCall `learner.fit`
learner.fit(num_epochs, target_metric='f1')
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
5. EvaluateCreate new data loader from existing path.
from modules.data.bert_data import get_bert_data_loader_for_predict dl = get_bert_data_loader_for_predict(data_path + "valid.csv", learner) learner.load_model() preds = learner.predict(dl)
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
IOB precision
from modules.train.train import validate_step print(validate_step(learner.data.valid_dl, learner.model, learner.data.id2label, learner.sup_labels))
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
Tokens report
from sklearn_crfsuite.metrics import flat_classification_report from modules.utils.utils import bert_labels2tokens pred_tokens, pred_labels = bert_labels2tokens(dl, preds) true_tokens, true_labels = bert_labels2tokens(dl, [x.labels for x in dl.dataset]) assert pred_tokens == true_tokens tokens_report = flat_classification_report(true_labels, pred_labels) print(tokens_report) from modules.utils.plot_metrics import analyze_bert_errors res_tokens, res_labels, errors = analyze_bert_errors(dl, preds) len([error for error in errors if error])
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
Span precision
from modules.utils.utils import voting_choicer print(get_bert_span_report(dl, preds, fn=voting_choicer))
precision recall f1-score support ORG 0.809 0.834 0.821 259 LOC 0.851 0.859 0.855 192 PER 0.936 0.936 0.936 188 micro avg 0.858 0.872 0.865 639 macro avg 0.865 0.877 0.871 639 weighted avg 0.859 0.872 0.865 639
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
6. Get mean and stdv on 10 runs
from modules.utils.plot_metrics import * num_runs = 10 best_reports = [] try: for i in range(num_runs): model = BertBiLSTMAttnCRF.create(len(data.label2idx), bert_config_file, init_checkpoint_pt, enc_hidden_dim=256) best_model_path = "/datadrive/models/factrueval/exp_{}_attn_cased.cpt".format(i) learner = NerLearner(model, data, best_model_path=best_model_path, verbose=False, base_lr=0.0001, lr_max=0.001, clip=5.0, use_lr_scheduler=True, sup_labels=data.id2label[5:]) learner.fit(100, target_metric='prec') idx, res = get_mean_max_metric(learner.history, "f1", True) best_reports.append(learner.history[idx]) except KeyboardInterrupt: print("End of exp") import numpy as np
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
f1 Mean and std
np.mean([get_mean_max_metric([r]) for r in best_reports]), np.round(np.std([get_mean_max_metric([r]) for r in best_reports]), 3)
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
Best
get_mean_max_metric(best_reports)
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
precision Mean and std
np.mean([get_mean_max_metric([r], "prec") for r in best_reports]), np.round(np.std([get_mean_max_metric([r], "prec") for r in best_reports]), 3)
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
Best
get_mean_max_metric(best_reports, "prec")
_____no_output_____
MIT
examples/factrueval.ipynb
sloth2012/ner-bert
NLP_Session_2_AmazonReviews_Example Amazon Review Polarity Dataset DOWNLOAD DATA FROM HEREhttps://www.kaggle.com/kritanjalijain/amazon-reviews/download Extract the train and test CSV files to your google drive location and configure that in the code OVERVIEWContains 34,686,770 Amazon reviews from 6,643,669 users on 2,441,053 products, from the Stanford Network Analysis Project (SNAP). This subset contains 1,800,000 training samples and 200,000 testing samples in each polarity sentiment. ORIGINThe Amazon reviews dataset consists of reviews from amazon. The data span a period of 18 years, including ~35 million reviews up to March 2013. Reviews include product and user information, ratings, and a plaintext review. For more information, please refer to the following paper: J. McAuley and J. Leskovec. Hidden factors and hidden topics: understanding rating dimensions with review text. RecSys, 2013. DESCRIPTIONThe Amazon reviews polarity dataset is constructed by taking review score 1 and 2 as negative, and 4 and 5 as positive. Samples of score 3 is ignored. In the dataset, class 1 is the negative and class 2 is the positive. Each class has 1,800,000 training samples and 200,000 testing samples.If you need help extracting the train.csv and test.csv files check out the starter code.The files train.csv and test.csv contain all the training samples as comma-separated values.The CSVs contain polarity, title, text. These 3 columns in them, correspond to class index (1 or 2), review title and review text.polarity - 1 for negative and 2 for positivetitle - review headingtext - review bodyThe review title and text are escaped using double quotes ("), and any internal double quote is escaped by 2 double quotes (""). New lines are escaped by a backslash followed with an "n" character, that is "\n". CITATIONThe Amazon reviews polarity dataset is constructed by Xiang Zhang ([email protected]). It is used as a text classification benchmark in the following paper: Xiang Zhang, Junbo Zhao, Yann LeCun. Character-level Convolutional Networks for Text Classification. Advances in Neural Information Processing Systems 28 (NIPS 2015). About TextBLOB libraryTextBlob is a Python (2 and 3) library for processing textual data. It provides a consistent API for diving into common natural language processing (NLP) tasks such as part-of-speech tagging, noun phrase extraction, sentiment analysis, and more.Reference : https://textblob.readthedocs.io/en/dev/ Reference1. https://www.kaggle.com/kritanjalijain/amazon-reviews2. https://www.kaggle.com/sindhuguttal/amazon-review-using-nlp/data2. https://textblob.readthedocs.io/en/dev/ Importing the libraries
import numpy as np import pandas as pd import matplotlib.pyplot as plt import tarfile import seaborn as sns import matplotlib.style as style import matplotlib as mpl import re import string import itertools import collections from wordcloud import WordCloud import nltk from nltk.util import ngrams from nltk.corpus import stopwords,RegexpTokenizer from textblob import TextBlob from nltk.stem import WordNetLemmatizer
_____no_output_____
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
Setting up NLP Libraries and corpus
nltk.download('stopwords') nltk.download('punkt') nltk.download('averaged_perceptron_tagger')
[nltk_data] Downloading package stopwords to /root/nltk_data... [nltk_data] Package stopwords is already up-to-date! [nltk_data] Downloading package punkt to /root/nltk_data... [nltk_data] Package punkt is already up-to-date! [nltk_data] Downloading package averaged_perceptron_tagger to [nltk_data] /root/nltk_data... [nltk_data] Package averaged_perceptron_tagger is already up-to- [nltk_data] date!
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
Mounting the data source
from google.colab import drive drive.mount('/content/drive',force_remount=True)
Mounted at /content/drive
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
Configuring the input, output and process folders
# Model Input and output folders # Setup in Google drive # '/content/drive/MyDrive/yourlocation/input/' model_input_folder='/content/drive/MyDrive/yourlocation/input/' model_output_folder='/content/drive/MyDrive/yourlocation/output/' input_train_file=model_input_folder+'train.csv' input_test_file=model_input_folder+'test.csv' output_nlp_detail=model_output_folder+'nlp_details.csv'
_____no_output_____
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
Loading the training dataset
# check out what the data looks like before you get started # look at the training data set train_df = pd.read_csv(input_train_file, header=None) print(train_df.head()) train_df.shape
_____no_output_____
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
Reducing the size of the dataframe for demonstration purposes
# Reducing the size of the dataframe train_df=train_df.loc[1:10000] for col in train_df.columns: print(col) #checking a null values train_df.isnull().sum() #droping null vlaues train_df.dropna() train_df.isnull().count()
_____no_output_____
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
Quick look at the dataset loaded
train_df.info() train_df.drop([0],axis=1,inplace=True) for col in train_df.columns: print(col) train_df.drop([1],axis=1,inplace=True) train_df.shape train_df.sample(5) # Checking for Null Values train_df[train_df[2].isnull()]
_____no_output_____
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
Obtaining the review lengths
train = train_df.copy() train[2].apply(str) train["review_length"] = train[2].apply(lambda w : len(re.findall(r'\w+', w)))
_____no_output_____
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
Getting some statistics around the review length
train['review_length'].describe()
_____no_output_____
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
Doing some graphical plots
sns.boxplot(data = train , x="review_length") plt.xlabel('Number of Words') plt.title('Review Length, Including Stop Words') plt.show() sns.distplot(train['review_length'], kde = False) plt.xlabel('Distribution of Review Length') plt.title('Review Length, Including Stop Words') plt.show()
/usr/local/lib/python3.7/dist-packages/seaborn/distributions.py:2619: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms). warnings.warn(msg, FutureWarning)
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
If we want to do better we must pre-process data like1. Converting to lower case2. Removing punctuation3. Removing Numbers4. Removing trailing spaces5. Removing extra whitespaces
train_clean = train.copy() stop_words = stopwords.words("english")
_____no_output_____
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
Function to clean text
# Function for cleaning text def clean(s): s = s.lower() #Converting to lower case s = re.sub(r'[^\w\s]', ' ', s) #Removing punctuation s = re.sub(r'[\d+]', ' ', s) #Removing Numbers s = s.strip() #Removing trailing spaces s = re.sub(' +', ' ', s) #Removing extra whitespaces return s train_clean["Reviews"] = train_clean[2].apply(lambda x: clean(x)) train_clean.sample(2)
_____no_output_____
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
1. Removal of STOP WORDS
# Removal of Stop Words train_clean["Reviews"] = train_clean["Reviews"].apply(lambda x: " ".join(x for x in x.split() if x not in stop_words)) import pandas as pd reviews = pd.Series(train_clean["Reviews"].tolist()).astype(str) plt.figure(figsize = (9, 9)) rev_wcloud_all = WordCloud(width = 900, height = 900, colormap = 'plasma', max_words = 150).generate(''.join(reviews)) plt.imshow(rev_wcloud_all) plt.tight_layout(pad = 0.2) plt.axis('off') plt.show()
_____no_output_____
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
Detailed NLP Analysis
tokenizer = RegexpTokenizer(r'\w+') train_clean["review_token"] = train_clean["Reviews"].apply(lambda x: tokenizer.tokenize(x)) # Sentiment analysis train_clean["sentiment_polarity"] = train_clean["Reviews"].apply(lambda x: TextBlob(x).sentiment.polarity) train_clean["sentiment_subjectivity"] = train_clean["Reviews"].apply(lambda x: TextBlob(x).sentiment.subjectivity) # Pos breakdown train_clean["pos"] = train_clean["Reviews"].apply(lambda x: TextBlob(x).tags) train_clean["words"] = train_clean["Reviews"].apply(lambda x: TextBlob(x).words) train_clean["sentences"] = train_clean["Reviews"].apply(lambda x: TextBlob(x).sentences) #returns a list of tuples of n successive words train_clean["ngrams_3"] = train_clean["Reviews"].apply(lambda x: TextBlob(x).ngrams(n=3)) train_clean["ngrams_5"] = train_clean["Reviews"].apply(lambda x: TextBlob(x).ngrams(n=5)) #writing the detailed analysis train_clean.to_csv()
_____no_output_____
MIT
AmazonReviews_Example/NLP_Session_2_AmazonReviews_Example.ipynb
drshyamsundaram/nlp
![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) 2. Text Preprocessing with Spark NLP **Note** Read this article if you want to understand the basic concepts in Spark NLP.https://towardsdatascience.com/introduction-to-spark-nlp-foundations-and-basic-components-part-i-c83b7629ed59 1. Annotators and Transformer Concepts In Spark NLP, all Annotators are either Estimators or Transformers as we see in Spark ML. An Estimator in Spark ML is an algorithm which can be fit on a DataFrame to produce a Transformer. E.g., a learning algorithm is an Estimator which trains on a DataFrame and produces a model. A Transformer is an algorithm which can transform one DataFrame into another DataFrame. E.g., an ML model is a Transformer that transforms a DataFrame with features into a DataFrame with predictions.In Spark NLP, there are two types of annotators: AnnotatorApproach and AnnotatorModelAnnotatorApproach extends Estimators from Spark ML, which are meant to be trained through fit(), and AnnotatorModel extends Transformers which are meant to transform data frames through transform().Some of Spark NLP annotators have a Model suffix and some do not. The model suffix is explicitly stated when the annotator is the result of a training process. Some annotators, such as Tokenizer are transformers but do not contain the suffix Model since they are not trained, annotators. Model annotators have a pre-trained() on its static object, to retrieve the public pre-trained version of a model.Long story short, if it trains on a DataFrame and produces a model, it’s an AnnotatorApproach; and if it transforms one DataFrame into another DataFrame through some models, it’s an AnnotatorModel (e.g. WordEmbeddingsModel) and it doesn’t take Model suffix if it doesn’t rely on a pre-trained annotator while transforming a DataFrame (e.g. Tokenizer).By convention, there are three possible names:Approach — Trainable annotatorModel — Trained annotatornothing — Either a non-trainable annotator with pre-processing step or shorthand for a modelSo for example, Stemmer doesn’t say Approach nor Model, however, it is a Model. On the other hand, Tokenizer doesn’t say Approach nor Model, but it has a TokenizerModel(). Because it is not “training” anything, but it is doing some preprocessing before converting into a Model. When in doubt, please refer to official documentation and API reference. Even though we will do many hands-on practices in the following articles, let us give you a glimpse to let you understand the difference between AnnotatorApproach and AnnotatorModel. As stated above, Tokenizer is an AnnotatorModel. So we need to call fit() and then transform().Now let’s see how this can be done in Spark NLP using Annotators and Transformers. Assume that we have the following steps that need to be applied one by one on a data frame.Split text into sentencesTokenizeNormalizeGet word embeddingsimage.pngWhat’s actually happening under the hood?When we fit() on the pipeline with Spark data frame (df), its text column is fed into DocumentAssembler() transformer at first and then a new column “document” is created in Document type (AnnotatorType). As we mentioned before, this transformer is basically the initial entry point to Spark NLP for any Spark data frame. Then its document column is fed into SentenceDetector() (AnnotatorApproach) and the text is split into an array of sentences and a new column “sentences” in Document type is created. Then “sentences” column is fed into Tokenizer() (AnnotatorModel) and each sentence is tokenized and a new column “token” in Token type is created. And so on.
import sparknlp spark = sparknlp.start() print("Spark NLP version", sparknlp.version()) print("Apache Spark version:", spark.version)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
Create Spark Dataframe
text = 'Peter Parker is a nice guy and lives in New York' spark_df = spark.createDataFrame([[text]]).toDF("text") spark_df.show(truncate=False) # if you want to create a spark datafarme from a list of strings from pyspark.sql.types import StringType text_list = ['Peter Parker is a nice guy and lives in New York.', 'Bruce Wayne is also a nice guy and lives in Gotham City.'] spark.createDataFrame(text_list, StringType()).toDF("text").show(truncate=80) from pyspark.sql import Row spark.createDataFrame(list(map(lambda x: Row(text=x), text_list))).show(truncate=80) !wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jupyter/annotation/english/spark-nlp-basics/sample-sentences-en.txt with open('sample-sentences-en.txt') as f: print (f.read()) spark_df = spark.read.text('sample-sentences-en.txt').toDF('text') spark_df.show(truncate=False) spark_df.select('text').show(truncate=False) textFiles = spark.sparkContext.wholeTextFiles("./*.txt",4) spark_df = textFiles.toDF(schema=['path','text']) spark_df.show(truncate=30) spark_df.select('text').take(1)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
Transformers what are we going to do if our DataFrame doesn’t have columns in those type? Here comes transformers. In Spark NLP, we have five different transformers that are mainly used for getting the data in or transform the data from one AnnotatorType to another. Here is the list of transformers:`DocumentAssembler`: To get through the NLP process, we need to get raw data annotated. This is a special transformer that does this for us; it creates the first annotation of type Document which may be used by annotators down the road.`TokenAssembler`: This transformer reconstructs a Document type annotation from tokens, usually after these have been normalized, lemmatized, normalized, spell checked, etc, to use this document annotation in further annotators.`Doc2Chunk`: Converts DOCUMENT type annotations into CHUNK type with the contents of a chunkCol.`Chunk2Doc` : Converts a CHUNK type column back into DOCUMENT. Useful when trying to re-tokenize or do further analysis on a CHUNK result.`Finisher`: Once we have our NLP pipeline ready to go, we might want to use our annotation results somewhere else where it is easy to use. The Finisher outputs annotation(s) values into a string. each annotator accepts certain types of columns and outputs new columns in another type (we call this AnnotatorType).In Spark NLP, we have the following types: `Document`, `token`, `chunk`, `pos`, `word_embeddings`, `date`, `entity`, `sentiment`, `named_entity`, `dependency`, `labeled_dependency`. That is, the DataFrame you have needs to have a column from one of these types if that column will be fed into an annotator; otherwise, you’d need to use one of the Spark NLP transformers. 2. Document Assembler In Spark NLP, we have five different transformers that are mainly used for getting the data in or transform the data from one AnnotatorType to another. That is, the DataFrame you have needs to have a column from one of these types if that column will be fed into an annotator; otherwise, you’d need to use one of the Spark NLP transformers. Here is the list of transformers: DocumentAssembler, TokenAssembler, Doc2Chunk, Chunk2Doc, and the Finisher.So, let’s start with DocumentAssembler(), an entry point to Spark NLP annotators. To get through the process in Spark NLP, we need to get raw data transformed into Document type at first. DocumentAssembler() is a special transformer that does this for us; it creates the first annotation of type Document which may be used by annotators down the road.DocumentAssembler() comes from sparknlp.base class and has the following settable parameters. See the full list here and the source code here.`setInputCol()` -> the name of the column that will be converted. We can specify only one column here. It can read either a String column or an Array[String]`setOutputCol()` -> optional : the name of the column in Document type that is generated. We can specify only one column here. Default is ‘document’`setIdCol()` -> optional: String type column with id information`setMetadataCol()` -> optional: Map type column with metadata information`setCleanupMode()` -> optional: Cleaning up options, possible values:```disabled: Source kept as original. This is a default.inplace: removes new lines and tabs.inplace_full: removes new lines and tabs but also those which were converted to strings (i.e. \n)shrink: removes new lines and tabs, plus merging multiple spaces and blank lines to a single space.shrink_full: remove new lines and tabs, including stringified values, plus shrinking spaces and blank lines.```
from sparknlp.base import * documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document")\ .setCleanupMode("shrink") doc_df = documentAssembler.transform(spark_df) doc_df.show(truncate=30)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
At first, we define DocumentAssembler with desired parameters and then transform the data frame with it. The most important point to pay attention to here is that you need to use a String or String[Array] type column in .setInputCol(). So it doesn’t have to be named as text. You just use the column name as it is.
doc_df.printSchema() doc_df.select('document.result','document.begin','document.end').show(truncate=False)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
The new column is in an array of struct type and has the parameters shown above. The annotators and transformers all come with universal metadata that would be filled down the road depending on the annotators being used. Unless you want to append other Spark NLP annotators to DocumentAssembler(), you don’t need to know what all these parameters mean for now. So we will talk about them in the following articles. You can access all these parameters with {column name}.{parameter name}.Let’s print out the first item’s result.
doc_df.select("document.result").take(1)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
If we would like to flatten the document column, we can do as follows.
import pyspark.sql.functions as F doc_df.withColumn( "tmp", F.explode("document"))\ .select("tmp.*")\ .show(truncate=False)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
3. Sentence Detector Finds sentence bounds in raw text. `setCustomBounds(string)`: Custom sentence separator text`setUseCustomOnly(bool)`: Use only custom bounds without considering those of Pragmatic Segmenter. Defaults to false. Needs customBounds.`setUseAbbreviations(bool)`: Whether to consider abbreviation strategies for better accuracy but slower performance. Defaults to true.`setExplodeSentences(bool)`: Whether to split sentences into different Dataset rows. Useful for higher parallelism in fat rows. Defaults to false.
from sparknlp.annotator import * # we feed the document column coming from Document Assembler sentenceDetector = SentenceDetector().\ setInputCols(['document']).\ setOutputCol('sentences') sent_df = sentenceDetector.transform(doc_df) sent_df.show(truncate=False) sent_df.select('sentences').take(3) text ='The patient was prescribed 1 capsule of Advil for 5 days . He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely fro 3 months .' text spark_df = spark.createDataFrame([[text]]).toDF("text") spark_df.show(truncate=False) spark_df.show(truncate=50) doc_df = documentAssembler.transform(spark_df) sent_df = sentenceDetector.transform(doc_df) sent_df.show(truncate=True) sent_df.select('sentences.result').take(1) sentenceDetector.setExplodeSentences(True) sent_df = sentenceDetector.transform(doc_df) sent_df.show(truncate=50) sent_df.select('sentences.result').show(truncate=False) from pyspark.sql import functions as F sent_df.select(F.explode('sentences.result')).show(truncate=False)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
Tokenizer Identifies tokens with tokenization open standards. It is an **Annotator Approach, so it requires .fit()**.A few rules will help customizing it if defaults do not fit user needs.setExceptions(StringArray): List of tokens to not alter at all. Allows composite tokens like two worded tokens that the user may not want to split.`addException(String)`: Add a single exception`setExceptionsPath(String)`: Path to txt file with list of token exceptions`caseSensitiveExceptions(bool)`: Whether to follow case sensitiveness for matching exceptions in text`contextChars(StringArray)`: List of 1 character string to rip off from tokens, such as parenthesis or question marks. Ignored if using prefix, infix or suffix patterns.`splitChars(StringArray)`: List of 1 character string to split tokens inside, such as hyphens. Ignored if using infix, prefix or suffix patterns.`splitPattern (String)`: pattern to separate from the inside of tokens. takes priority over splitChars.setTargetPattern: Basic regex rule to identify a candidate for tokenization. Defaults to \\S+ which means anything not a space`setSuffixPattern`: Regex to identify subtokens that are in the end of the token. Regex has to end with \\z and must contain groups (). Each group will become a separate token within the prefix. Defaults to non-letter characters. e.g. quotes or parenthesis`setPrefixPattern`: Regex to identify subtokens that come in the beginning of the token. Regex has to start with \\A and must contain groups (). Each group will become a separate token within the prefix. Defaults to non-letter characters. e.g. quotes or parenthesis`addInfixPattern`: Add an extension pattern regex with groups to the top of the rules (will target first, from more specific to the more general).`minLength`: Set the minimum allowed legth for each token`maxLength`: Set the maximum allowed legth for each token
tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") text = 'Peter Parker (Spiderman) is a nice guy and lives in New York but has no e-mail!' spark_df = spark.createDataFrame([[text]]).toDF("text") doc_df = documentAssembler.transform(spark_df) token_df = tokenizer.fit(doc_df).transform(doc_df) token_df.show(truncate=100) token_df.select('token.result').take(1) tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") \ .setSplitChars(['-']) \ .setContextChars(['(', ')', '?', '!']) \ .addException("New York") \ token_df = tokenizer.fit(doc_df).transform(doc_df) token_df.select('token.result').take(1)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
Stacking Spark NLP Annotators in Spark ML Pipeline Spark NLP provides an easy API to integrate with Spark ML Pipelines and all the Spark NLP annotators and transformers can be used within Spark ML Pipelines. So, it’s better to explain Pipeline concept through Spark ML official documentation.What is a Pipeline anyway? In machine learning, it is common to run a sequence of algorithms to process and learn from data. Apache Spark ML represents such a workflow as a Pipeline, which consists of a sequence of PipelineStages (Transformers and Estimators) to be run in a specific order.In simple terms, a pipeline chains multiple Transformers and Estimators together to specify an ML workflow. We use Pipeline to chain multiple Transformers and Estimators together to specify our machine learning workflow.The figure below is for the training time usage of a Pipeline. A Pipeline is specified as a sequence of stages, and each stage is either a Transformer or an Estimator. These stages are run in order, and the input DataFrame is transformed as it passes through each stage. That is, the data are passed through the fitted pipeline in order. Each stage’s transform() method updates the dataset and passes it to the next stage. With the help of Pipelines, we can ensure that training and test data go through identical feature processing steps.Now let’s see how this can be done in Spark NLP using Annotators and Transformers. Assume that we have the following steps that need to be applied one by one on a data frame.- Split text into sentences- TokenizeAnd here is how we code this pipeline up in Spark NLP.
from pyspark.ml import Pipeline documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentenceDetector = SentenceDetector().\ setInputCols(['document']).\ setOutputCol('sentences') tokenizer = Tokenizer() \ .setInputCols(["sentences"]) \ .setOutputCol("token") nlpPipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) spark_df = spark.read.text('./sample-sentences-en.txt').toDF('text') spark_df.show(truncate=False) result = pipelineModel.transform(spark_df) result.show(truncate=20) result.printSchema() result.select('sentences.result').take(3) result.select('token').take(3)[2]
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
Normalizer Removes all dirty characters from text following a regex pattern and transforms words based on a provided dictionary`setCleanupPatterns(patterns)`: Regular expressions list for normalization, defaults [^A-Za-z]`setLowercase(value)`: lowercase tokens, default false`setSlangDictionary(path)`: txt file with delimited words to be transformed into something else
import string string.punctuation from sparknlp.annotator import * from sparknlp.base import * documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") normalizer = Normalizer() \ .setInputCols(["token"]) \ .setOutputCol("normalized")\ .setLowercase(True)\ .setCleanupPatterns(["[^\w\d\s]"]) # remove punctuations (keep alphanumeric chars) # if we don't set CleanupPatterns, it will only keep alphabet letters ([^A-Za-z]) nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, normalizer ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(spark_df) result.show(truncate=20) result.select('normalized.result').take(3) result.select('normalized').take(3)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
Stopwords Cleaner This annotator excludes from a sequence of strings (e.g. the output of a Tokenizer, Normalizer, Lemmatizer, and Stemmer) and drops all the stop words from the input sequences. Functions:`setStopWords`: The words to be filtered out. Array[String]`setCaseSensitive`: Whether to do a case sensitive comparison over the stop words.
stopwords_cleaner = StopWordsCleaner()\ .setInputCols("token")\ .setOutputCol("cleanTokens")\ .setCaseSensitive(False)\ #.setStopWords(["no", "without"]) (e.g. read a list of words from a txt) stopwords_cleaner.getStopWords() documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, stopwords_cleaner ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) spark_df = spark.read.text('./sample-sentences-en.txt').toDF('text') result = pipelineModel.transform(spark_df) result.show() result.select('cleanTokens.result').take(1)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
Token Assembler
documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentenceDetector = SentenceDetector().\ setInputCols(['document']).\ setOutputCol('sentences') tokenizer = Tokenizer() \ .setInputCols(["sentences"]) \ .setOutputCol("token") normalizer = Normalizer() \ .setInputCols(["token"]) \ .setOutputCol("normalized")\ .setLowercase(False)\ stopwords_cleaner = StopWordsCleaner()\ .setInputCols("normalized")\ .setOutputCol("cleanTokens")\ .setCaseSensitive(False)\ tokenassembler = TokenAssembler()\ .setInputCols(["sentences", "cleanTokens"]) \ .setOutputCol("clean_text") nlpPipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, normalizer, stopwords_cleaner, tokenassembler ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(spark_df) result.show() result.select('text', 'clean_text.result').take(1) # if we use TokenAssembler().setPreservePosition(True), the original borders will be preserved (dropped & unwanted chars will be replaced by spaces) tokenassembler.setPreservePosition(True) result2 = pipelineModel.transform(spark_df) result2.select('clean_text.result').take(1) result.select('text', F.explode('clean_text.result').alias('clean_text')).show(truncate=False) import pyspark.sql.functions as F result.withColumn( "tmp", F.explode("clean_text")) \ .select("tmp.*").select("begin","end","result","metadata.sentence").show(truncate = False) # if we hadn't used Sentence Detector, this would be what we got. (tokenizer gets document instead of sentences column) tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") tokenassembler = TokenAssembler()\ .setInputCols(["document", "cleanTokens"]) \ .setOutputCol("clean_text") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, normalizer, stopwords_cleaner, tokenassembler ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(spark_df) result.select('text', 'clean_text.result').show(truncate=False) result.withColumn( "tmp", F.explode("clean_text")) \ .select("tmp.*").select("begin","end","result","metadata.sentence").show(truncate = False)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
Stemmer Returns hard-stems out of words with the objective of retrieving the meaningful part of the word
stemmer = Stemmer() \ .setInputCols(["token"]) \ .setOutputCol("stem") documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, stemmer ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(spark_df) result.show() result.select('stem.result').show(truncate=False) import pyspark.sql.functions as F result_df = result.select(F.explode(F.arrays_zip('token.result', 'stem.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("stem")).toPandas() result_df.head(10)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
Lemmatizer Retrieves lemmas out of words with the objective of returning a base dictionary word
!wget https://raw.githubusercontent.com/mahavivo/vocabulary/master/lemmas/AntBNC_lemmas_ver_001.txt -P /FileStore/ lemmatizer = Lemmatizer() \ .setInputCols(["token"]) \ .setOutputCol("lemma") \ .setDictionary("/FileStore/AntBNC_lemmas_ver_001.txt", value_delimiter ="\t", key_delimiter = "->") documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") stemmer = Stemmer() \ .setInputCols(["token"]) \ .setOutputCol("stem") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, stemmer, lemmatizer ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(spark_df) result.show() result.select('lemma.result').show(truncate=False) result_df = result.select(F.explode(F.arrays_zip('token.result', 'stem.result', 'lemma.result')).alias("cols")) \ .select(F.expr("cols['0']").alias("token"), F.expr("cols['1']").alias("stem"), F.expr("cols['2']").alias("lemma")).toPandas() result_df.head(10)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
NGram Generator NGramGenerator annotator takes as input a sequence of strings (e.g. the output of a `Tokenizer`, `Normalizer`, `Stemmer`, `Lemmatizer`, and `StopWordsCleaner`). The parameter n is used to determine the number of terms in each n-gram. The output will consist of a sequence of n-grams where each n-gram is represented by a space-delimited string of n consecutive words with annotatorType `CHUNK` same as the Chunker annotator.Functions:`setN:` number elements per n-gram (>=1)`setEnableCumulative:` whether to calculate just the actual n-grams or all n-grams from 1 through n`setDelimiter:` Glue character used to join the tokens
ngrams_cum = NGramGenerator() \ .setInputCols(["token"]) \ .setOutputCol("ngrams") \ .setN(3) \ .setEnableCumulative(True)\ .setDelimiter("_") # Default is space # .setN(3) means, take bigrams and trigrams. nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, ngrams_cum ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(spark_df) result.select('ngrams.result').show(truncate=200) ngrams_nonCum = NGramGenerator() \ .setInputCols(["token"]) \ .setOutputCol("ngrams_v2") \ .setN(3) \ .setEnableCumulative(False)\ .setDelimiter("_") # Default is space ngrams_nonCum.transform(result).select('ngrams_v2.result').show(truncate=200)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
TextMatcher Annotator to match entire phrases (by token) provided in a file against a DocumentFunctions:setEntities(path, format, options): Provides a file with phrases to match. Default: Looks up path in configuration.path: a path to a file that contains the entities in the specified format.readAs: the format of the file, can be one of {ReadAs.LINE_BY_LINE, ReadAs.SPARK_DATASET}. Defaults to LINE_BY_LINE.options: a map of additional parameters. Defaults to {“format”: “text”}.entityValue : Value for the entity metadata field to indicate which chunk comes from which textMatcher when there are multiple textMatchers.mergeOverlapping : whether to merge overlapping matched chunks. Defaults falsecaseSensitive : whether to match regardless of case. Defaults true
# first method for doing this, second option below import urllib.request with urllib.request.urlopen('https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/pubmed/pubmed-sample.csv') as f: content = f.read().decode('utf-8') dbutils.fs.put("/dbfs/tmp/pubmed/pubmed-sample.csv", content) %sh TMP=/dbfs/tmp/pubmed if [ ! -d "$TMP" ]; then mkdir $TMP cd $TMP wget https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/pubmed/pubmed-sample.csv fi import pyspark.sql.functions as F pubMedDF = spark.read\ .option("header", "true")\ .csv("/dbfs/tmp/pubmed/pubmed-sample.csv")\ .filter("AB IS NOT null")\ .withColumnRenamed("AB", "text")\ .drop("TI") pubMedDF.show(truncate=50) pubMedDF.select('text').take(2) %sh TMP=/FileStore/entities if [ ! -d "$TMP" ]; then mkdir $TMP fi # write the target entities to txt file entities = ['KCNJ9', 'GIRK', 'diabetes mellitus', 'nucleotide polymorphisms'] content = '' for e in entities: content = content + "\n" + e dbutils.fs.put("dbfs:/tmp/pubmed/clinical_entities.txt", content) entities = ['breast cancer', 'colon cancer', 'lung cancer', 'monotherapy', 'therapy'] content='' for e in entities: content = content + "\n" + e dbutils.fs.put("dbfs:/tmp/pubmed/cancer_entities.txt", content) clinical_entity_extractor = TextMatcher() \ .setInputCols(["document",'token'])\ .setOutputCol("clinical_entities")\ .setEntities("dbfs:/tmp/pubmed/clinical_entities.txt")\ .setCaseSensitive(False)\ .setEntityValue('clinical_entity') cancer_entity_extractor = TextMatcher() \ .setInputCols(["document",'token'])\ .setOutputCol("cancer_entities")\ .setEntities("dbfs:/tmp/entities/cancer_entities.txt")\ .setCaseSensitive(False)\ .setEntityValue('cancer_entity') nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, clinical_entity_extractor, cancer_entity_extractor ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) result = pipelineModel.transform(pubMedDF.limit(30)) result.select('clinical_entities.result','cancer_entities.result').take(2) result.select('clinical_entities','cancer_entities').take(2) result_df = result.select(F.explode(F.arrays_zip('clinical_entities.result', 'clinical_entities.begin', 'clinical_entities.end')).alias("cols")) \ .select(F.expr("cols['0']").alias("clinical_entities"), F.expr("cols['1']").alias("begin"), F.expr("cols['2']").alias("end")).toPandas() result_df.head(10) result_df = result.select(F.explode(F.arrays_zip('cancer_entities.result', 'cancer_entities.begin', 'cancer_entities.end')).alias("cols")) \ .select(F.expr("cols['0']").alias("cancer_entities"), F.expr("cols['1']").alias("begin"), F.expr("cols['2']").alias("end")).toPandas() result_df.head(10)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
RegexMatcher
rules = ''' renal\s\w+, started with 'renal' cardiac\s\w+, started with 'cardiac' \w*ly\b, ending with 'ly' \S*\d+\S*, match any word that contains numbers (\d+).?(\d*)\s*(mg|ml|g), match medication metrics ''' dbutils.fs.put("dbfs:/tmp/pubmed/regex_rules.txt", rules) import os documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") regex_matcher = RegexMatcher()\ .setInputCols('document')\ .setStrategy("MATCH_ALL")\ .setOutputCol("regex_matches")\ .setExternalRules(path='dbfs:/tmp/pubmed/regex_rules.txt', delimiter=',') nlpPipeline = Pipeline(stages=[ documentAssembler, regex_matcher ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) match_df = pipelineModel.transform(pubMedDF) match_df.select('regex_matches.result').take(3) match_df.select('text','regex_matches.result')\ .toDF('text','matches').filter(F.size('matches')>1)\ .show(truncate=50)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
Text Cleaning with UDF
text = '<h1 style="color: #5e9ca0;">Have a great <span style="color: #2b2301;">birth</span> day!</h1>' text_df = spark.createDataFrame([[text]]).toDF("text") import re from pyspark.sql.functions import udf from pyspark.sql.types import StringType, IntegerType clean_text = lambda s: re.sub(r'<[^>]*>', '', s) text_df.withColumn('cleaned', udf(clean_text, StringType())('text')).select('text','cleaned').show(truncate= False) find_not_alnum_count = lambda s: len([i for i in s if not i.isalnum() and i!=' ']) find_not_alnum_count("it's your birth day!") find_not_alnum_count = lambda s: len([i for i in s if not i.isalnum() and i!=' ']) find_not_alnum_count("it's your birth day!") text = '<h1 style="color: #5e9ca0;">Have a great <span style="color: #2b2301;">birth</span> day!</h1>' find_not_alnum_count(text) text_df.withColumn('cleaned', udf(find_not_alnum_count, IntegerType())('text')).select('text','cleaned').show(truncate= False)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
Finisher ***Finisher:*** Once we have our NLP pipeline ready to go, we might want to use our annotation results somewhere else where it is easy to use. The Finisher outputs annotation(s) values into a string.If we just want the desired output column in the final dataframe, we can use Finisher to drop previous stages in the final output and gte the `result` from the process.This is very handy when you want to use the output from Spark NLP annotator as an input to another Spark ML transformer.Settable parameters are:`setInputCols()``setOutputCols()``setCleanAnnotations(True)` -> Whether to remove intermediate annotations`setValueSplitSymbol(“”)` -> split values within an annotation character`setAnnotationSplitSymbol(“@”)` -> split values between annotations character`setIncludeMetadata(False)` -> Whether to include metadata keys. Sometimes useful in some annotations.`setOutputAsArray(False)` -> Whether to output as Array. Useful as input for other Spark transformers.
finisher = Finisher() \ .setInputCols(["regex_matches"]) \ .setIncludeMetadata(False) # set to False to remove metadata nlpPipeline = Pipeline(stages=[ documentAssembler, regex_matcher, finisher ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) match_df = pipelineModel.transform(pubMedDF) match_df.show(truncate = 50) match_df.printSchema() match_df.filter(F.size('finished_regex_matches')>1).show(truncate = 50)
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
LightPipeline LightPipelines are Spark NLP specific Pipelines, equivalent to Spark ML Pipeline, but meant to deal with smaller amounts of data. They’re useful working with small datasets, debugging results, or when running either training or prediction from an API that serves one-off requests.Spark NLP LightPipelines are Spark ML pipelines converted into a single machine but the multi-threaded task, becoming more than 10x times faster for smaller amounts of data (small is relative, but 50k sentences are roughly a good maximum). To use them, we simply plug in a trained (fitted) pipeline and then annotate a plain text. We don't even need to convert the input text to DataFrame in order to feed it into a pipeline that's accepting DataFrame as an input in the first place. This feature would be quite useful when it comes to getting a prediction for a few lines of text from a trained ML model. **It is nearly 20x faster than using Spark ML Pipeline**`LightPipeline(someTrainedPipeline).annotate(someStringOrArray)`
documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") stemmer = Stemmer() \ .setInputCols(["token"]) \ .setOutputCol("stem") nlpPipeline = Pipeline(stages=[ documentAssembler, tokenizer, stemmer, lemmatizer ]) empty_df = spark.createDataFrame([['']]).toDF("text") pipelineModel = nlpPipeline.fit(empty_df) pipelineModel.transform(spark_df).show() from sparknlp.base import LightPipeline light_model = LightPipeline(pipelineModel) light_result = light_model.annotate("John and Peter are brothers. However they don't support each other that much.") light_result.keys() list(zip(light_result['token'], light_result['stem'], light_result['lemma'])) light_result = light_model.fullAnnotate("John and Peter are brothers. However they don't support each other that much.") light_result text_list= ["How did serfdom develop in and then leave Russia ?", "There will be some exciting breakthroughs in NLP this year."] light_model.annotate(text_list) ## important note: When you use Finisher in your pipeline, regardless of setting cleanAnnotations to False or True, LigtPipeline will only return the finished columns.
_____no_output_____
Apache-2.0
tutorials/Certification_Trainings/Public/databricks_notebooks/2.4/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
hatrungduc/spark-nlp-workshop
How to deliver JavaScript to the IPython Notebook ViewerAt first glance there appear to be at least four mechanismsfor adding JavaScript code to an IPython notebook: * A notebook cell marked `%%javascript`* A Markdown cell with a `` inside* An `HTML()` display with a `` inside* A `JavaScript()` display with code insideHere are examples of all four possibilities:
%%javascript console.log('Log message from the %%javascript cell')
_____no_output_____
MIT
Javascript-integration.ipynb
Grant-Steinfeld/astronomy-notebooks
*(Markdown cell with a `` inside.)*console.log('Log message from the Markdown cell')
from IPython.display import HTML HTML('<script>console.log("Log message from an HTML display")</script>') from IPython.display import Javascript Javascript('console.log("Log message from a Javascript display")')
_____no_output_____
MIT
Javascript-integration.ipynb
Grant-Steinfeld/astronomy-notebooks
Playing around with NLTKSome material has been taken/adapted from the [NLTK book](http://www.nltk.org/book/)* Exploring NLTK books (Text instance)* Exploring NLTK corpora* Exploring NLTK Treebank* Exploring the WordNet corpusFor the linguistics concepts used here, refer to [the specific notebook](../nlp/concepts/linguistic-notions.ipynb). Books and corpora
## List of all the books and sents imported texts() sents() # Choose the book to play with and some wordsText book = text2 word = 'love' word2 = 'him' words = ['love', 'kiss', 'marriage', 'sense', 'children', 'house', 'hate'] # Print first 100 token in book (book is an instance of nltk.text.Text, which behaves like a list of tokens) # Note that punctuation is included as tokens print(book[0:100], type(book)) print(list(book)[0:100] == book[0:100]) ## Counts and lexical diversity print('Num of tokens', len(book)) print('Num of counts for given word', book.count(word)) print('Lexical diversity', measure_lexical_diversity(book)) print('Fraction of use of word in book', compute_perc_word_usage(word, book)) ## Concordance and context # Choose a book and a word book = text2 word = 'love' # Concordance of chosen word in chosen book print('Concordance: ') book.concordance(word) # Words appearing in same contexts as chosen word in chosen book print('Words in similar context as chosen word:') # given word w, this finds all contexts w_1 w w_2 and finds all words w' which appear in same context, #i.e., w_1 w' w-2 book.similar(word) # Choose two words and show the common contexts print('Common contexts of two chosen words:') book.common_contexts([word, word2]) ## Collocations print('Collocations:') book.collocations() # Dispersion plot of text given some words (how far from the start word appears in text) plt.grid() book.dispersion_plot(words) ## FreqDist for token counts fdist = FreqDist(book) # FreqDist needs a tokens list, gives dict {token: counts} word = 'love' print('Num tokens for word %s: %f' %(word, fdist[word])) print('Num tokens: ', fdist.N()) print('Num unique tokens', fdist.B()) print('Token with the highest count is %s with count %d' %(fdist.max(), fdist[fdist.max()])) print('Hapaxes are (10 of them)', fdist.hapaxes()[:10]) # Plot the 50 most frequent tokens and their token counts, normal and cumulative fdist.plot(50, title='Book token counts') fdist.plot(50, cumulative=True, title='Book token counts, cumulative') # Same distrib, normal but with frequency instead of counts plot_freqdist_freq(fdist, max_num=50, title='Book token frequencies') ## FreqDist for word lenghts fdist_wl = FreqDist([len(word) for word in book]) # Plot and show as table fdist_wl.plot() fdist_wl.tabulate() # Conditional freq distrib on Brown corpus genres # ConditionalFreqDist is a collection of freq dist, one per condition # requires tuples (condition, event) # print genres in corpus print('All genres in Brown corpus: ', sorted(brown.categories())) # choosing some of the categories (genres) and get the words in each tuples = [(genre, word) for genre in ['romance', 'science_fiction'] for word in brown.words(categories=genre)] # Building the cfdist cfdist = ConditionalFreqDist(tuples) # Each cfdist[condition] will be a FreqDist type(cfdist['romance']) # Tabulate selecting the conditions and the specific samples (no selection will give all) cfdist.tabulate(conditions=['romance'], samples=['the', 'love', 'hate']) # Plotting any of the dists on the condition cfdist['romance'].plot(50, title='Counts tokens in genre romance') cfdist['science_fiction'].plot(50, title='Counts tokens in genre science_fiction')
All genres in Brown corpus: ['adventure', 'belles_lettres', 'editorial', 'fiction', 'government', 'hobbies', 'humor', 'learned', 'lore', 'mystery', 'news', 'religion', 'reviews', 'romance', 'science_fiction']
MIT
toolbox/python/nltk.ipynb
martinapugliese/tales-science-data-notebooks
Treebank* Parsed sentences
# The Treebank corpus in NLTK contains 10% of the original Penn Treebank corpus treebank.words() treebank.parsed_sents()
_____no_output_____
MIT
toolbox/python/nltk.ipynb
martinapugliese/tales-science-data-notebooks
WordNet* Hypernyms and Hyponyms
wn = wordnet sss = wn.synsets('dog') s1 = sss[0] print(s1, s1.definition()) print(s1.hypernyms(), s1.hyponyms())
Synset('dog.n.01') a member of the genus Canis (probably descended from the common wolf) that has been domesticated by man since prehistoric times; occurs in many breeds [Synset('canine.n.02'), Synset('domestic_animal.n.01')] [Synset('basenji.n.01'), Synset('corgi.n.01'), Synset('cur.n.01'), Synset('dalmatian.n.02'), Synset('great_pyrenees.n.01'), Synset('griffon.n.02'), Synset('hunting_dog.n.01'), Synset('lapdog.n.01'), Synset('leonberg.n.01'), Synset('mexican_hairless.n.01'), Synset('newfoundland.n.01'), Synset('pooch.n.01'), Synset('poodle.n.01'), Synset('pug.n.01'), Synset('puppy.n.01'), Synset('spitz.n.01'), Synset('toy_dog.n.01'), Synset('working_dog.n.01')]
MIT
toolbox/python/nltk.ipynb
martinapugliese/tales-science-data-notebooks
Text manipulation* Tokenizing* POS tagging* Stemming/lemmatizing
# tagged sentences from Brown corpus brown_tagged_sents = brown.tagged_sents(categories='news') # Separate tagged sents into train and test train_sents = brown_tagged_sents[:int(len(brown_tagged_sents) * 0.8)] test_sents = brown_tagged_sents[int(len(brown_tagged_sents) * 0.8):] # Tokenising # NOTE: obvs the easiest sentence tokenization (naive) is splitting on period with split('.'). # this won't understand "Mr. Smith." though # similarly for tokenizing a sentence into tokens text = """What will you do? I go to the cinema this weekend. That's a test. I can't do better!""" # Standard methods are wrappers around the recommended tokenizers, so equivalent to # so equivalent to tokenizer = TreebankWordTokenizer(); tokenizer.tokenize(sentence) # Tokenizing text into sentences # sent_tokenize calls the PunktSentenceTokenizer (recommended) print('* Docs of PunktSentenceTokenizer:') print(PunktSentenceTokenizer.__doc__) st = sent_tokenize(text) print('* Text tokenized', st) # To train tokenizer on a bespoke text: # import nltk.tokenize.punkt # tokenizer = PunktSentenceTokenizer() # text = open("someplain.txt","r").read() # tokenizer.train(text) # Tokenizing a sentence into tokens # word_tokenise calls the TreebankWordTokenizer (recommended) print('* Docs of TreebankWordTokenizer:') print(TreebankWordTokenizer.__doc__) tokens = word_tokenize(st[2]) print('* Sentence tokenized', tokens) # wordpunct_tokenise calls WordPunctTokenizer, it will separate all punctuation as tokens (uses a regexp) print(WordPunctTokenizer.__doc__) tokens_punct = wordpunct_tokenize(st[2]) print('* Sentence tokenized with a regexp tokenizer', tokens_punct) # POS tagging # pos_tag uses the PerceptronTagger print('* Tagged tokens from above', pos_tag(tokens)) # Evaluate the performance of some taggers # The UnigramTagger will assign tag to token as the most probable for that token given a training set unigram_tagger = UnigramTagger(train_sents) print('* Evaluation Unigram tagger:', unigram_tagger.evaluate(test_sents)) # Repeat with an NGramTagger (assign the most probable tag given word and N - 1 previous context words) threegram_tagger = NgramTagger(3, train_sents) # for n=2 there is already a BigramTagger print('* Evaluation Ngram tagger with N=3:', threegram_tagger.evaluate(test_sents)) # slow due to sparsity: trained tagger hasn't seen many word-context combinations # Combining taggers: start with the Ngram one, if it can't find a tag for token fallback to the unigram one t0 = UnigramTagger(train_sents) t1 = NgramTagger(3, train_sents, backoff=t0) print('* Evaluation combined tagger:', t1.evaluate(test_sents)) # Stemming # Stemming some words with Porter, Lancaster and Snowball stemmers porter_stemmer = PorterStemmer() lancaster_stemmer = LancasterStemmer() snowball_stemmer = SnowballStemmer('english') print('* Stemming with (in order) Porter, Lancaster, Snowball') print('multiply: ', porter_stemmer.stem('multiply'), lancaster_stemmer.stem('multiply'), snowball_stemmer.stem('multiply')) print('mice: ', porter_stemmer.stem('mice'), lancaster_stemmer.stem('mice'), snowball_stemmer.stem('mice')) # Lemmatizing with the WordNet lemmatizer wordnet_lemmatizer = WordNetLemmatizer() print('mice: ', wordnet_lemmatizer.lemmatize('mice'))
mice: mouse
MIT
toolbox/python/nltk.ipynb
martinapugliese/tales-science-data-notebooks
Playing with frequency distributions
# Setting some sentences sentences = ['I go to school', 'I will go to the cinema', 'I like strawberries', 'I read books'] # FreqDist on the word length on some chosen sentences and on the last letter of words split_sentences = [sentence.split() for sentence in sentences] all_words = [] for sent in split_sentences: for word in sent: all_words.append(word) fdist = FreqDist([len(word) for word in all_words]) fdist.plot(title='Counts word lengths') fdist = FreqDist([word[-1:] for word in all_words]) fdist.plot(title='Counts last letter') # ConditionalFreqDist on the words per last letter of words split_sentences = [sentence.split() for sentence in sentences] all_words = [] for sent in split_sentences: for word in sent: all_words.append(word) tuples = [(word[-1:], word) for word in all_words] cfdist = ConditionalFreqDist(tuples) # Can plot both at same time cfdist.plot() cfdist data = [('breakfast', 'cereal'), ('breakfast', 'water'), ('evening', 'meat'), ('evening', 'salad'), ('evening', 'wine'), ('lunch', 'sandwich'), ('lunch', 'fruit'), ('lunch', 'water'), ('lunch', 'chocolate'), ('breakfast', 'milk') ] # word counts per category cfdist = ConditionalFreqDist(data) cfdist.plot() # Conditional freq dist to see how words have been used in time, inaugural corpus cfdist = ConditionalFreqDist( (target, fileid[:4]) for fileid in inaugural.fileids() for w in inaugural.words(fileid) for target in ['america', 'citizen'] if w.lower().startswith(target)) cfdist.plot() # Conditional freq dist to see words which end with chosen letters in Brown corpus cfdist = ConditionalFreqDist( (target, w) for w in brown.words() for target in ['zz'] if w.lower().endswith(target)) cfdist.plot()
_____no_output_____
MIT
toolbox/python/nltk.ipynb
martinapugliese/tales-science-data-notebooks
SLU15 - Hyperparameter tunning: Examples notebook--- 1 Load and the prepare the data
import pandas as pd from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier cancer_data = load_breast_cancer() X = pd.DataFrame(cancer_data["data"], columns=cancer_data["feature_names"]) y = cancer_data.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) estimator = DecisionTreeClassifier()
_____no_output_____
MIT
S01 - Bootcamp and Binary Classification/SLU15 - Hyperparameter Tuning/Examples notebook.ipynb
FarhadManiCodes/batch5-students
2 Grid search
from sklearn.model_selection import GridSearchCV parameters = {'max_depth': range(1, 10), 'max_features': range(1, X.shape[1])} grid_search = GridSearchCV(estimator, parameters, cv=5, scoring="roc_auc") grid_search.fit(X_train, y_train) y_pred = grid_search.predict(X_test)
_____no_output_____
MIT
S01 - Bootcamp and Binary Classification/SLU15 - Hyperparameter Tuning/Examples notebook.ipynb
FarhadManiCodes/batch5-students
2 Random search
from scipy.stats import randint from sklearn.model_selection import RandomizedSearchCV parameters_dist = {"max_depth": randint(1, 100), "max_features": randint(1, X.shape[1]), "class_weight": ["balanced", None]} random_search = RandomizedSearchCV(estimator, parameters_dist, cv=5, n_iter=250, random_state=0) random_search.fit(X_train, y_train) y_pred = random_search.predict(X_test)
_____no_output_____
MIT
S01 - Bootcamp and Binary Classification/SLU15 - Hyperparameter Tuning/Examples notebook.ipynb
FarhadManiCodes/batch5-students
The analysis of the equality of rights between gender using the Human Freedom Index author: Ottillia Ni Project Report 2 (EM212: Applied Data Science) Content:IntroductionDatasheetExploratory Data Anaylsis Introduction Throughout the world, people strive for freedom. Freedom is a means of human progression and growth within the self and amongst society. But how can someone measure a person’s freedom? According to the Cato Institute, the Human Freedom Index is a global measurement of human freedom based on personal, economic, and civil freedom. Since 2008, the Human Freedom Index has collected enough data to rank countries and their levels of freedom based upon various qualities on a scale of 0 to 10, with 10 being the greatest amount of freedom. There are 79 distinct indicators of personal and economic freedom, which are found in the areas of the Rule of Law, Security and Safety, Movement, Religion, Association, Assembly, and Civil Society, Expression and Information, Identity and Relationships, Size and Government, Legal System and Property Rights, Access to Sound Money, Freedom to Trade Internationally, and Regulation of Credit Labor, and Business.The indicators within this dataset were evaluated by credible external sources, not by the creator of the index, to ensure objectivity. The intended use of the index is to fill in the gaps in literature and to look at how a range of different freedoms can be interconnected, such as economic freedom and personal freedom. When scoring the index, economic freedom takes up half the weight of the score while all other Personal freedoms complete the rest of the scored weight. Economic freedom takes half the weight because it has a central importance and affects all other freedoms. This data set has been used by researchers for a variety of reasons. Countries have developed news reports to discuss improvements in levels of freedom. Within the Personal and Economic Freedom, the data provides subcategories to listed areas. It has also been used to target and analyze more specific structures within the index such as women’s personal freedom. Given that this data set has a variety of subcomponents, for this project, I will focus on the data of identity and relationships, specifically same-sex relationships. I will be observing the different levels of tolerance amongst the listed countries. This would incorporate further research to observe and understand what social events might have caused a significant shift in the score of same-sex relationships. Doing a quick skim through the data, I noticed that the scores are either 0 or 10, with a few 5 scattered in between. My initial reaction to this is the indication of legality of same sex relations in these countries. Given that data has been collected over the past decade, I will be using this information to potentially determine how a specific country’s score has fluctuated or stayed the same over the years. The questions I will be asking through the use of this dataset are: what common trends do the countries with accepting of same-sex relationship have in common? How do these top countries rank of same-sex relationships compare to the other subcategories of identity and relationships? Are there any standout differences? An initial data issue I might uncover as I compare a country’s rank over the progression of the past decade is missing data. This potential issue might arise as the Human Freedom Index incorporates new countries with each given year. The analysis of this data can be utilized by companies advocating for global freedom such as Amnesty International, Human Rights Action Center, UNESCO (United Nations Educational, Scientific, and Cultural Organization), and other human rights organizations. Ideally, this data analysis would be funded on a donation basis as many of the organizations that would best benefit from this information are non-profits. ---In this analysis, I am looking to compare gender equality between men and women around the world using the data from the Human Freedom Index. Given that this index has a variety of variables, I will looking at personal freedom variables. I will be using the variable topic of identity and relationships to better understand the level of female equality around the world as compared to males. Datasheet Motivation for Dataset Creation Why was the dataset created? (e.g., were there specific tasks in mind, or a specific gap that needed to be filled?) The Human Freedom Index is a global measurement of human freedom based on personal, economic, and civil freedom that can more objectively regulate these freedom relationships. Since 2008, the Human Freedom Index has collected enough data to rank countries and their levels of freedom based upon various qualities on a scale of 0 to 10, with 10 being the greatest amount of freedom. According to Fred McMahon of the Fraser Institute, the dataset “provides crucial and objective information on the state and evolution of freedom globally.” What (other) tasks could the dataset be used for? Are there obvious tasks for which it should not be used? As the years progress and with each report, analysts can also use this data to compare how global freedom has changed over the years.Has the dataset been used for any tasks already? If so, where are the results so others can compare (e.g., links to published papers)? Those who compiled the dataset writes a report each year to accompany their data and findings. The central purpose of the report is to show a broad, yet accurate freedom of the overall world and have the larger purpose be for defining what they mean by freedom and to understand the relationships between the different existing forms. The link to the 2018 report can be found here: https://object.cato.org/sites/cato.org/files/human-freedom-index-files/human-freedom-index-2018-revised.pdf.Who funded the creation of the dataset? If there is an associated grant, provide the grant number. This dataset was developed by the Cato Institute and was funded and sponsored by the Lotte & John Hecht Memorial Foundation, Liberty Fund, Liberales Institut, Fraser Institute, and Cato Institute. Dataset Composition What are the instances? (that is, examples; e.g., documents, images, people, countries) Are there multiple types of instances? (e.g., movies, users, ratings; people, interactions between them; nodes, edges) The instances are countries willing to report their level of freedom and contribute to the Human Freedom Index. Are relationships between instances made explicit in the data (e.g., social network links, user/movie ratings, etc.)? I’m not certain, but I believe the instances are determined various forms of freedom this data is expressing.How many instances of each type are there? The instances consist of 162 countries with varying repetition as the data incorporates all its results starting 2008. What data does each instance consist of? “Raw” data (e.g., unprocessed text or images)? Features/attributes? Is there a label/target associated with instances? If the instances are related to people, are subpopulations identified (e.g., by age, gender, etc.) and what is their distribution? The data consists of 79 distinct indicators of personal and economic freedom, which are found in the areas of the Rule of Law, Security and Safety, Movement, Religion, Association, Assembly, and Civil Society, Expression and Information, Identity and Relationships, Size and Government, Legal System and Property Rights, Access to Sound Money, Freedom to Trade Internationally, and Regulation of Credit Labor, and Business. Is everything included or does the data rely on external resources? (e.g., websites, tweets, datasets) If external resources, a) are there guarantees that they will exist, and remain constant, over time; b) is there an official archival version. Are there licenses, fees or rights associated with any of the data?Given that the scoring of the freedom is based on other global indices, such as the Rule of Law Index and Social Institutions and Gender index, it is likely that these external sources will remain constant over time as many are forms of public government-based data. Are there recommended data splits or evaluation measures? (e.g., training, development, testing; accuracy/AUC) To compare the specific instances, it is recommended to compare with at least one same variable, such as year or type of freedom.What experiments were initially run on this dataset? Have a summary of those results and, if available, provide the link to a paper with more information here. The report has developed country profiles given the findings of the different specific factors of freedom. Data Collection Process How was the data collected? (e.g., hardware apparatus/sensor, manual human curation, software program, software interface/API; how were these constructs/measures/methods validated?) The Human Freedom Index is based upon sub-indexes, compiled and determined by government reporting. Who was involved in the data collection process? (e.g., students, crowd workers) How were they compensated? (e.g., how much were crowd workers paid?) This data set is a collection of government and university-based indexes.Over what time-frame was the data collected? Does the collection time-frame match the creation time-frame? The data is collected on an annual basis. How was the data associated with each instance acquired? Was the data directly observable (e.g., raw text, movie ratings), reported by subjects (e.g., survey responses), or indirectly inferred/derived from other data (e.g., part of speech tags; model-based guesses for age or language)? If the latter two, were they validated/verified and if so how? The data is derived from other data, more specifically specific based on various subfactors from the collection of government and university-based indexes.Does the dataset contain all possible instances? Or is it, for instance, a sample (not necessarily random) from a larger set of instances? The dataset contains all possible instances.If the dataset is a sample, then what is the population? What was the sampling strategy (e.g., deterministic, probabilistic with specific sampling probabilities)? Is the sample representative of the larger set (e.g., geographic coverage)? If not, why not (e.g., to cover a more diverse range of in- stances)? How does this affect possible uses? The dataset I am using is not a sample.Is there information missing from the dataset and why? (this does not include intentionally dropped instances; it might include, e.g., redacted text, withheld documents) Is this data missing because it was unavailable? Yes, some countries have missing scores given that the Human Freedom Index incorporates new countries with each given year or that specific distinct indicators might not have data recorded for that yearAre there any known errors, sources of noise, or redundancies in the data? UNKNOWN Data Preprocessing What preprocessing/cleaning was done? (e.g., discretization or bucketing, tokenization, part-of-speech tagging, SIFT feature extraction, removal of instances, processing of missing values, etc.) It appears likely.Was the “raw” data saved in addition to the preprocessed/cleaned data? (e.g., to support unanticipated future uses) UNKNOWNIs the preprocessing software available? UNKNOWN Does this dataset collection/processing procedure achieve the motivation for creating the dataset stated in the first section of this datasheet? UNKNOWN Dataset Distribution How is the dataset distributed? (e.g., website, API, etc.; does the data have a DOI; is it archived redundantly?) The data can be downloaded from https://www.kaggle.com/gsutters/the-human-freedom-index or https://object.cato.org/sites/cato.org/files/human-freedom-index-files/hfi2018web-revised3.xlsx.When will the dataset be released/first distributed? (Is there a canonical paper/reference for this dataset?) The dataset was first released in 2008.What license (if any) is it distributed under? Are there any copyrights on the data? The Human Freedom Index Report is copyrighted by the Cato Institute, Fraser Institute, and the Friedrich Naumann Foundation for Freedom. There is no clear indication of copyright to the data as it a collection of information from other index as well.Are there any fees or access/export restrictions? No. Dataset MaintenanceWho is supporting/hosting/maintaining the dataset? How does one contact the owner/curator/manager of the dataset (e.g. email address, or other contact info)? The data is maintained by Ian Vásquez and Tanja Porcnik, the Cato Institute, Fraser Institute, and the Friedrich Naumann Foundation for Freedom.Will the dataset be updated? How often and by whom? How will updates/revisions be documented and communicated (e.g., mailing list, GitHub)? Is there an erratum? This dataset is updated annual.If the dataset becomes obsolete how will this be communicated? The Cato Institute would probably report on their webpage the discontinuation of this dataset. Is there a repository to link to any/all papers/systems that use this dataset? If others want to extend/augment/build on this dataset, is there a mechanism for them to do so? If so, is there a process for tracking/assessing the quality of those contributions. What is the process for communicating/distributing these contributions to users? If others wanted to extend or build on this dataset, they should contact the Cato Institute, but more specifically the authors Ian Vásquez and Tanja Porcnik. Legal and Ethical ConsiderationsIf the dataset relates to people (e.g., their attributes) or was generated by people, were they informed about the data collection? (e.g., datasets that collect writing, photos, interactions, transactions, etc.) There is uncertainty given that dataset is based upon a collection of people not all might have known their data was being collectedIf it relates to other ethically protected subjects, have appropriate obligations been met? (e.g., medical data might include information collected from animals) UNKNOWNIf it relates to people, were there any ethical review applications/reviews/approvals? (e.g. Institutional Review Board applications) UNKNOWNIf it relates to people, were they told what the dataset would be used for and did they consent? What community norms exist for data collected from human communications? If consent was obtained, how? Were the people provided with any mechanism to revoke their consent in the future or for certain uses? UNKNOWNIf it relates to people, could this dataset expose people to harm or legal action? (e.g., financial social or otherwise) What was done to mitigate or reduce the potential for harm? Unlikely, since this data was a government-based collection.If it relates to people, does it unfairly advantage or disadvantage a particular social group? In what ways? How was this mitigated? No.If it relates to people, were they provided with privacy guarantees? If so, what guarantees and how are these ensured? Since this was based on reports, it is highly likely they were provided with privacy guarantees.Does the dataset comply with the EU General Data Protection Regulation (GDPR)? Does it comply with any other standards, such as the US Equal Employment Opportunity Act? Yes.Does the dataset contain information that might be considered sensitive or confidential? (e.g., personally identifying information) No, since the data is a collection of already published data and a study of a collective group of people.Does the dataset contain information that might be considered inappropriate or offensive? If used or interpreted incorrectly, analyst could potentially make inappropriate or offensive accusations of freedom within a specific country. Exploratory Data Anaylsis
import matplotlib.pyplot as plt import pandas as pd import numpy as np import seaborn as sns import pdb import matplotlib.pyplot as plt
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
Importing DatasetTo begin, I will be using python to analyize my data. (This data of the Human Freedom Index is downloaded off Kaggle.)
# read Human Freedom Index data hfi = pd.read_csv('https://tufts.box.com/shared/static/7iwsgxhffhfs87v209scqihq57pnmev0.csv') hfi.head()
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
Cleaning Data Given that I am focusing specifically on the equality between female and male, I want to clean my data so that the variables printed give me the information relavent to my work. In addition, to make understanding the data easier, I will also rename various columns to be more comprehensive of what each variable actually means.
select_cols = ["year","countries","region","pf_ss_women","pf_ss_women_fgm", "pf_ss_women_missing","pf_ss_women_inheritance","pf_ss_women_inheritance_widows","pf_ss_women_inheritance_daughters","pf_movement_women","pf_identity_legal","pf_identity_parental","pf_identity_parental_marriage","pf_identity_parental_divorce","pf_identity_sex","pf_identity_sex_male","pf_identity_sex_female","pf_identity_divorce"] hfi_select = hfi[select_cols] #hfi_select.head() hfi_select1 = hfi_select.rename(columns={'pf_ss_women': 'Women_Safety_Security', 'pf_ss_women_fgm': 'Female_Genital_Mutilation', 'pf_ss_women_missing': 'Missing_Women','pf_ss_women_inheritance':'Inheritance_Rights','pf_ss_women_inheritance_widows': 'Inheritance_Rights_Widows','pf_ss_women_inheritance_daughters':'Inheritance_Rights_Daughters','pf_movement_women':'Womens_Movement','pf_identity_legal':'Legal_Gender','pf_identity_parental':'Parental_rights','pf_identity_parental_marriage': 'Parental_rights_marriage','pf_identity_parental_divorce':'Parental_rights_after_divorce','pf_identity_sex':'Same_sex-relationship','pf_identity_sex_male':'Same_sex_males','pf_identity_sex_female':'Same_sex_female','pf_identity_divorce':'Divorce'}) hfi_select1.head() hfi_select1.dtypes
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
Printing the data types objects makes it immediately evident that the data is mostly in forms of numbers, thus indicating much of the data has already been cleaned.
#Let us determine the percent of data missing per variable. #f, ax = plt.subplots(figsize=(50,20)) #((hfi.isnull().sum()/len(hfi)) * 100).plot(kind='bar') #plt.xticks(rotation=45, horizontalalignment='right') #plt.title('Percent Missing by Variable') # a simple scatterplot #hfi.plot.scatter('pf_score', 'ef_score') #hfi.plot.scatter('pf_identity_sex_male', 'pf_identity_sex')
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
To start off we can first lay out the number of countries represented by region through this bar plot from 2008-2016.
#hfi.region.value_counts().plot(kind='bar') #plt.xticks() #hfi[''].value_counts().plot(kind='bar') #filter to only focus on 2016 data filter1 = hfi_select1.year == 2016 hfi2016 = hfi_select1[filter1] hfi2016.sample(5) #filter to only focuus on 2016 data filter1 = hfi_select1.year == 2016 filter2 = hfi_select1.region == 'Sub-Saharan Africa' hfi2016Af = hfi_select1[filter1 & filter2] hfi2016Af.sample(5)
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
Diving into gender equality, let us understand observe the equality of parental rights between males and females in various regions around the world.
#hfi2016Af.plot.scatter('pf_ss_women', 'pf_movement_women') #sub['mean'] = sub['density'].mean() #plt.plot(sub['name'], sub['density'], 'ro') #plt.plot(sub['name'], sub['mean'], linestyle = '--') #plt.xticks(fontsize = 8, rotation = 'vertical') hfi2016.region.value_counts().plot(kind='bar') plt.xticks() sns.heatmap(hfi_select1.groupby(['year', 'region'])['Parental_rights'].mean().unstack(), annot=True, cbar=False, fmt='.0f', cmap='RdBu_r')
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
According to the Human Freedom Report, "Parental rights refers to the extent to which women have equal rights based in law and custom regarding “legal guardianship of a child during a marriage and custody rights over a child after divorce.”" That being said, we can divide the rights parental rights in regards to legal guardianship of a child during a marriiage as compared to after a divorce. This heat map below shows that in most regions, females equal rights to guardianship of their children during marriage. However, in regions such as the Middle East & North Africa, South Asia, and Sub Saharuan Africa, there represents more of an imbalance.
sns.heatmap(hfi_select1.groupby(['year', 'region'])['Parental_rights_marriage'].mean().unstack(), annot=True, cbar=False, fmt='.0f', cmap='RdBu_r')
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
The same analysis can also be done with parental rights after divorces. The trend appears to be similar to that of parental rights during marriages, but what is most surprising is found in the Middle East & North Africa catagory. The ranking drops from a 5 to a 2 between 2012 and 2013 indicating a setback with progression toward equality in this region.
sns.heatmap(hfi_select1.groupby(['year', 'region'])['Parental_rights_after_divorce'].mean().unstack(), annot=True, cbar=False, fmt='.0f', cmap='RdBu_r')
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
Question: Which regions have the greatest amount of freedom in regards to same sex marriage?
sns.heatmap( hfi_select1.groupby(['year', 'region'])['Same_sex-relationship'].mean().unstack(), annot=True, cbar=False, fmt='.0f', cmap='RdBu_r')
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
From this heat map, we can see that the regions struggle with equality
sns.heatmap(hfi_select1.groupby(['year', 'region'])['Same_sex_males'].mean().unstack(), annot=True, cbar=False, fmt='.0f', cmap='RdBu_r') sns.heatmap(hfi_select1.groupby(['year', 'region'])['Same_sex_female'].mean().unstack(), annot=True, cbar=False, fmt='.0f', cmap='RdBu_r')
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
Merging Data
# read women purchasing power from https://ourworldindata.org/economic-inequality-by-gender ge = pd.read_csv('https://tufts.box.com/shared/static/ikc9nsb0red47dv5ldc0rcsv5rml681l.csv') ge.head() #ge.dtypes mergedata = hfi_select1.merge(ge, left_on=["year", "countries"], right_on=["Year", "Entity"], suffixes=(False, False)) mergedata.head()
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
By merging these two datasets, we can also compare how women in various countries are given the opportunity to participate in purchase descision within their marriages.
#mergedata.dtypes
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
supervised learning - Scikit.learn
#hfi2016.plot.scatter('Inheritance_Rights', 'Parental_rights') f, ax = plt.subplots(figsize=(6.5,6.5)) sns.boxplot(x="Inheritance_Rights", y="Parental_rights", data=hfi2016, fliersize=0.5, linewidth=0.75, ax=ax) #ax.set_title('axes title') ax.set_xlabel('Women Inheritance') ax.set_ylabel('Parental Rights') #filter to only focuus on 2016 data in merged #filtermerge1 = mergedata.year == 2016 #merge2016 = mergedata[filter1] #mergedata.sample(5) sns.heatmap(hfi_select1.groupby(['year', 'region'])['Inheritance_Rights'].mean().unstack(), annot=True, cbar=False, fmt='.0f', cmap='RdBu_r')
_____no_output_____
BSD-3-Clause
ottilliani/project2_ottilliani.ipynb
pezLyfe/applied_ds
第5章 ロジスティック回帰とROC曲線:学習モデルの評価方法 「05-roc_curve」の解説 ITエンジニアための機械学習理論入門「第5章 ロジスティック回帰とROC曲線:学習モデルの評価方法」で使用しているサンプルコード「05-roc_curve.py」の解説です。※ 解説用にコードの内容は少し変更しています。 はじめに必要なモジュールをインポートしておきます。関数 multivariate_normal は、多次元の正規分布に従う乱数を生成するために利用します。
import numpy as np import matplotlib.pyplot as plt import pandas as pd from pandas import Series, DataFrame from numpy.random import multivariate_normal
_____no_output_____
Apache-2.0
05-roc_curve.ipynb
RXV06021/test_ml4se_colab
トレーニング用データを生成する関数を用意します。平面上の○☓の2種類のデータについて、それぞれの「個数、中心座標、分散」を引数で指定します。
def prepare_dataset(n1, mu1, variance1, n2, mu2, variance2): df1 = DataFrame(multivariate_normal(mu1, np.eye(2)*variance1 ,n1), columns=['x','y']) df1['type'] = 1 df2 = DataFrame(multivariate_normal(mu2, np.eye(2)*variance2, n2), columns=['x','y']) df2['type'] = -1 df = pd.concat([df1,df2], ignore_index=True) df = df.reindex(np.random.permutation(df.index)).reset_index(drop=True) return df
_____no_output_____
Apache-2.0
05-roc_curve.ipynb
RXV06021/test_ml4se_colab
ロジスティック回帰で分割線を決定する関数を用意します。ここでは、得られた結果を用いて、トレーニングセットの各データに対して確率の値を付与したデータフレームも返却するようにしています。
# ロジスティック回帰 def run_logistic(train_set): pd.options.mode.chained_assignment = None w = np.array([[0],[0.1],[0.1]]) phi = train_set[['x','y']] phi['bias'] = 1 phi = phi.as_matrix(columns=['bias','x','y']) t = (train_set[['type']] + 1)*0.5 # type = 1, -1 を type = 1, 0 に変換 t = t.as_matrix() # 最大100回のIterationを実施 for i in range(100): # IRLS法によるパラメータの修正 y = np.array([]) for line in phi: a = np.dot(line, w) y = np.append(y, [1.0/(1.0+np.exp(-a))]) r = np.diag(y*(1-y)) y = y[np.newaxis,:].T tmp1 = np.linalg.inv(np.dot(np.dot(phi.T, r),phi)) tmp2 = np.dot(phi.T, (y-t)) w_new = w - np.dot(tmp1, tmp2) # パラメータの変化が 0.1% 未満になったら終了 if np.dot((w_new-w).T, (w_new-w)) < 0.001 * np.dot(w.T, w): w = w_new break w = w_new # 分類誤差の計算 w0, w1, w2 = w[0], w[1], w[2] err = 0 for index, line in train_set.iterrows(): a = np.dot(np.array([1, line.x, line.y]), w) p = 1.0/(1.0+np.exp(-a)) train_set.loc[index, 'probability'] = p if (p-0.5)*line.type < 0: err += 1 err_rate = err * 100 / len(train_set) result = train_set.sort_values(by=['probability'], ascending=[False]).reset_index() return w0, w1, w2, err_rate, result
_____no_output_____
Apache-2.0
05-roc_curve.ipynb
RXV06021/test_ml4se_colab
結果をグラフ、および、ROC曲線として表示する関数を用意します。
# 結果の表示 def show_result(subplot, train_set, w0, w1, w2, err_rate): train_set1 = train_set[train_set['type']==1] train_set2 = train_set[train_set['type']==-1] ymin, ymax = train_set.y.min()-5, train_set.y.max()+10 xmin, xmax = train_set.x.min()-5, train_set.x.max()+10 subplot.set_ylim([ymin-1, ymax+1]) subplot.set_xlim([xmin-1, xmax+1]) subplot.scatter(train_set1.x, train_set1.y, marker='o', label=None) subplot.scatter(train_set2.x, train_set2.y, marker='x', label=None) linex = np.arange(xmin-5, xmax+5) liney = - linex * w1 / w2 - w0 / w2 label = "ERR %.2f%%" % err_rate subplot.plot(linex, liney, label=label, color='red') subplot.legend(loc=1) # ROC曲線の表示 def draw_roc(subplot, result): positives = len(result[result['type']==1]) negatives = len(result[result['type']==-1]) tp = [0.0] * len(result) fp = [0.0] * len(result) for index, line in result.iterrows(): for c in np.arange(0, len(result)): if index < c: if line.type == 1: tp[c] += 1 else: fp[c] += 1 tp_rate = np.array(tp) / positives fp_rate = np.array(fp) / negatives subplot.set_ylim([0, 1]) subplot.set_xlim([0, 1]) subplot.set_xlabel("False positive rate") subplot.set_ylabel("True positive rate") subplot.plot(fp_rate, tp_rate)
_____no_output_____
Apache-2.0
05-roc_curve.ipynb
RXV06021/test_ml4se_colab
比較的分散が小さくて、分類が容易なトレーニングセットを用意します。
train_set = prepare_dataset(80, [9,9], 50, 200, [-3,-3], 50)
_____no_output_____
Apache-2.0
05-roc_curve.ipynb
RXV06021/test_ml4se_colab
ロジスティック回帰を適用した結果を表示します。
w0, w1, w2, err_rate, result = run_logistic(train_set) fig = plt.figure(figsize=(6, 12)) subplot = fig.add_subplot(2,1,1) show_result(subplot, train_set, w0, w1, w2, err_rate) subplot = fig.add_subplot(2,1,2) draw_roc(subplot, result)
_____no_output_____
Apache-2.0
05-roc_curve.ipynb
RXV06021/test_ml4se_colab
分散が大きくて、分類が困難なトレーニングセットを用意します。
train_set = prepare_dataset(80, [9,9], 150, 200, [-3,-3], 150)
_____no_output_____
Apache-2.0
05-roc_curve.ipynb
RXV06021/test_ml4se_colab