markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
compare size of subpopulations in healthy and AML individuals (within sample analysis)
fig, axarr = plt.subplots(2, 1,sharey=True) for id in range(0,5): axarr[0].plot(population_size_H[id],color = 'g') axarr[0].set_title('healty') for id in range(0,16): axarr[1].plot(population_size_SJ[id],color = 'r') axarr[1].set_title('AML') plt.show() X = np.array(population_size_H + population_size_SJ) Y = np.array([0]*5 + [1]*16) predict_prob,models = LOO(X,Y) cell_types = [cell_type_idx2name[i] for i in range(14)] fig, axarr = plt.subplots(2, 1,sharey=True, sharex = True) for id in range(5): axarr[0].plot(population_size_H[id],color = 'g') axarr[0].set_title('Proportion of each cell type for Healty individuals') for id in range(16): axarr[1].plot(population_size_SJ[id],color = 'r') axarr[1].set_title('Proportion of each cell type for AML individuals') plt.xticks(range(14),cell_types,rotation = 90) plt.show() for i in range(21): plt.plot(models[i].coef_[0]) plt.title('LOOCV Logistic Regression Coefficients') plt.xticks(range(14),cell_types,rotation = 90) plt.show()
_____no_output_____
MIT
small_run/Flow_Cytometry_Mondrian_Processes-Random-Effects-Final_n_chain_5_n_sample_1000.ipynb
disiji/fc_mondrian
Diagnosis
# reload data! data = [data_dict[_].head(20000).applymap(f)[markers].values for _ in ['H1','H2','H3','H4',\ 'H5','SJ01','SJ02','SJ03','SJ04','SJ05','SJ06','SJ07','SJ08','SJ09','SJ10',\ 'SJ11','SJ12','SJ13','SJ14','SJ15','SJ16']] # compute data range data_ranges = np.array([[[data[_][:,d].min(),data[_][:,d].max()] \ for d in range(len(markers))] for _ in range(len(data))]) theta_space = np.array([[data_ranges[:,d,0].min(), data_ranges[:,d,1].max()] \ for d in range(len(markers))]) n_samples = len(data)
_____no_output_____
MIT
small_run/Flow_Cytometry_Mondrian_Processes-Random-Effects-Final_n_chain_5_n_sample_1000.ipynb
disiji/fc_mondrian
Logistic regression with cell population of under 2 templates as features
# step 1: learn cell populations of all samples, under 2 template MPs, 5 chains # V: cell proportion for 21 samples under healthy template V_H = [[None for chain in range(n_mcmc_chain)] for _ in range(21)] V_SJ = [[None for chain in range(n_mcmc_chain)] for _ in range(21)] for id in range(21): print id res_H = Parallel(n_jobs=num_cores)(delayed(mcmc_condition_on_template)\ (id,accepts_template_mp_H[i][-1]) for i in range(n_mcmc_chain)) indiv_MP_condition_template_H = [_[1][-1] for _ in res_H] for chain in range(n_mcmc_chain): V_H[id][chain] = compute_cell_population(data[id], indiv_MP_condition_template_H[chain:chain+1], \ table, cell_type_name2idx) res_SJ = Parallel(n_jobs=num_cores)(delayed(mcmc_condition_on_template)\ (id,accepts_template_mp_SJ[i][-1]) for i in range(n_mcmc_chain)) indiv_MP_condition_template_SJ = [_[1][-1] for _ in res_SJ] for chain in range(n_mcmc_chain): V_SJ[id][chain] = compute_cell_population(data[id], indiv_MP_condition_template_SJ[chain:chain+1], \ table, cell_type_name2idx) X = [[V_H[id][chain] + V_SJ[id][chain] for id in range(21)] for chain in range(n_mcmc_chain)] Y = [0]*5 + [1]*16 def LOO(X,Y): from sklearn.model_selection import LeaveOneOut from sklearn import linear_model loo = LeaveOneOut() models = [] X = np.array(X) Y = np.array(Y) predict_prob = [] for train, test in loo.split(X,Y): train_X = X[train] train_Y = Y[train] test_X = X[test] test_Y = Y[test] logreg = linear_model.LogisticRegression(C=1e5) logreg.fit(train_X, train_Y) test_Y_predict = logreg.predict(test_X) models.append(logreg) predict_prob.append(logreg.predict_proba(test_X)[0][0]) print predict_prob plt.scatter(range(21),predict_prob,s = 100) plt.xlim(0, 21) plt.ylim(0, 1) groups = ['H%s' % i for i in range(1,6)] + ['SJ%s' % i for i in range(1,17)] plt.legend() plt.xticks(range(21),groups) plt.ylabel('P(healthy)') plt.title('P(healthy) Predicted by LOOCV Logistic Regression') return predict_prob,models predict_prob,models = [],[] for chain in range(n_mcmc_chain): res = LOO(X[chain],Y) predict_prob.append(res[0]) models.append(res[1])
[0.99841206336111987, 0.99966800288254687, 0.87316854492456542, 0.99999926620800161, 0.99984613432778913, 1.3459889647293721e-08, 0.0026811637112176268, 0.00010195742044638578, 1.5442242625729463e-05, 1.8254518332594394e-05, 0.003338405513243603, 0.00011531545835186119, 0.00034991109377846552, 0.033424769452122471, 0.0017130441929669171, 0.046224174116587413, 4.2252976673040621e-09, 0.0035542734667173281, 4.0519915056602684e-07, 3.3218450434802094e-08, 1.41687768184795e-08] [0.99679385991584324, 0.99983747510386578, 0.96874183860576113, 0.99966630817397717, 0.99999843814722889, 4.4080938899071498e-09, 8.4638356256938607e-07, 3.5134730125285785e-08, 7.2666163464241151e-07, 0.014622982240019011, 0.0033318636184076489, 6.7338532638849813e-07, 0.00066990380102160962, 0.00065606113297067559, 1.729235734559964e-05, 7.4609522781043935e-06, 3.076769780518962e-05, 0.083990951899576283, 4.8496175056866875e-06, 4.6487622640256632e-09, 1.674832040832186e-06] [0.97461768633847823, 0.99955912667312252, 0.99589566487681869, 0.99998901902420423, 0.99994996814969583, 1.2550153397627994e-06, 4.3184142195729081e-05, 2.4103542013431678e-06, 4.7705896568661643e-06, 0.090557765099546939, 0.00049534119915739527, 1.8921063007715233e-05, 7.7215739042735265e-06, 0.00042160357552945005, 2.5992641006222783e-07, 4.4390364528634763e-07, 1.1415410838822027e-09, 0.047078205690052166, 1.2900448687069854e-07, 1.8290425096711971e-07, 2.3621352237546134e-06] [0.9984581698292071, 0.99967566805213026, 0.95581470201280105, 0.99981729250189166, 0.99998005823530389, 3.8171524496810605e-08, 1.0332645393407169e-05, 7.8714287676806549e-07, 1.0093235203179063e-06, 0.0035568928188703941, 0.0045957703296436447, 5.0590847675224815e-05, 0.00021169720824332217, 0.0013218348791412815, 0.00070822216859012244, 0.0014310848368445095, 1.8268520585174031e-08, 0.84909823894672387, 2.2819103173699062e-06, 4.6697962184927277e-06, 7.9412068315631856e-06] [0.95817677332306073, 0.99975102698038409, 0.96197731114098117, 0.99941207433360069, 0.9999706433925627, 1.6075629938328007e-08, 5.4855779849649622e-06, 1.1835507840451953e-07, 6.3212776513221769e-07, 0.025756083957138909, 0.003883512638022113, 3.1504749534727594e-06, 1.089165066914255e-05, 0.070833743257998627, 1.3106135488438753e-05, 1.2063959820007852e-07, 4.8466431523674913e-06, 0.0066751730227279094, 2.6432808174159383e-05, 8.1768226702139124e-05, 0.00011538114822084999]
MIT
small_run/Flow_Cytometry_Mondrian_Processes-Random-Effects-Final_n_chain_5_n_sample_1000.ipynb
disiji/fc_mondrian
Baseline 1: one tree for each group (without random effects)
# fit 1 tree to pooled healthy samples global_MP_H = [] global_MP_SJ = [] n_iter = 1000 data_H = np.concatenate(data[0:5]) for chain in range(n_mcmc_chain): global_MP_H.append(init_mp(theta_space, table, data_H, n_iter,mcmc_gaussin_std)) data_SJ = np.concatenate(data[5:]) for chain in range(n_mcmc_chain): global_MP_SJ.append(init_mp(theta_space, table, data_SJ, n_iter,mcmc_gaussin_std))
_____no_output_____
MIT
small_run/Flow_Cytometry_Mondrian_Processes-Random-Effects-Final_n_chain_5_n_sample_1000.ipynb
disiji/fc_mondrian
Compare classification error(both gives perfect classification):
V_H_Global = [None for _ in range(21)] V_SJ_Global = [None for _ in range(21)] for id in range(21): V_H_Global[id] = compute_cell_population(data[id], global_MP_H, table, cell_type_name2idx) V_SJ_Global[id] = compute_cell_population(data[id], global_MP_SJ, table, cell_type_name2idx) X_Global = [V_H_Global[id] + V_SJ_Global[id] for id in range(21)] Y_Global = [0]*5 + [1]*16 for id in range(21): plt.plot(X_Global[id]) predict_prob,models = LOO(X_Global,Y_Global)
_____no_output_____
MIT
small_run/Flow_Cytometry_Mondrian_Processes-Random-Effects-Final_n_chain_5_n_sample_1000.ipynb
disiji/fc_mondrian
Compare log likelihood $P(data_i|MP_i)$
# individual MP with random effects log_lik_H = [[] for _ in range(5)] # 5 * n_chain log_lik_SJ = [[] for _ in range(16)] # 5 * n_chain for id in range(5): data_subset = data[id] burnt_samples = [i for _ in range(n_mcmc_chain) for i in \ accepts_indiv_mp_lists_H[_][id][-1:]] for sample in burnt_samples: log_lik_H[id].append(comp_log_p_sample(sample, data_subset)) for id in range(16): data_subset = data[5+id] burnt_samples = [i for _ in range(n_mcmc_chain) for i in \ accepts_indiv_mp_lists_SJ[_][id][-1:]] for sample in burnt_samples: log_lik_SJ[id].append(comp_log_p_sample(sample, data_subset)) log_lik = log_lik_H + log_lik_SJ # individual MP without random effects log_lik_H_global = [[] for _ in range(5)] # 5 * n_chain * 2 log_lik_SJ_global = [[] for _ in range(16)] # 5 * n_chain * 2 for id in range(5): data_subset = data[id] for sample in global_MP_H: log_lik_H_global[id].append(comp_log_p_sample(sample, data_subset)) for id in range(16): data_subset = data[5+id] for sample in global_MP_SJ: log_lik_SJ_global[id].append(comp_log_p_sample(sample, data_subset)) log_lik_global = log_lik_H_global + log_lik_SJ_global def draw_plot(data, edge_color, fill_color): bp = ax.boxplot(data, patch_artist=True) for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']: plt.setp(bp[element], color=edge_color) for patch in bp['boxes']: patch.set(facecolor=fill_color) fig, ax = plt.subplots(figsize=(8,3)) draw_plot(log_lik.T, 'red', 'tan') draw_plot(log_lik_global.T, 'blue', 'cyan') ax.set_ylabel('Log likelihood',fontsize=12) #plt.setp(ax.get_yticklabels(),visible=False) groups = ['H%s' % i for i in range(1,6)] + ['S%s' % i for i in range(1,17)] plt.plot([], c='#D7191C', label='MP+RE') plt.plot([], c='#2C7BB6', label='Global MP') plt.legend(fontsize=12) plt.plot([5.5, 5.5],[-400000, -150000], c = 'k', linestyle = ':') plt.xticks(range(1,22),groups) plt.xticks(fontsize=12) #plt.xlabel('Subjects') ax.yaxis.get_major_formatter().set_powerlimits((0,1)) plt.yticks(fontsize=12) plt.tight_layout() plt.savefig('log_lik_comparison.png') plt.show()
_____no_output_____
MIT
small_run/Flow_Cytometry_Mondrian_Processes-Random-Effects-Final_n_chain_5_n_sample_1000.ipynb
disiji/fc_mondrian
Baseline 2: K means (use centers of pooled healthy data and pooled AML data as feature extractors)
V_Kmeans_H = [[None for chain in range(n_mcmc_chain)] for _ in range(21)] V_Kmeans_SJ = [[None for chain in range(n_mcmc_chain)] for _ in range(21)] from sklearn.cluster import KMeans from scipy.spatial import distance for chain in range(n_mcmc_chain): cluster_centers_H = KMeans(n_clusters=14, random_state=chain).\ fit(np.concatenate(data[0:5])).cluster_centers_ for id in range(21): closest_pt_index = distance.cdist(data[id], cluster_centers_H).argmin(axis=1) V_Kmeans_H[id][chain] = [sum(closest_pt_index == k)*1.0 / \ len(closest_pt_index) for k in range(14)] cluster_centers_SJ = KMeans(n_clusters=14, random_state=chain).\ fit(np.concatenate(data[6:21])).cluster_centers_ for id in range(21): closest_pt_index = distance.cdist(data[id], cluster_centers_SJ).argmin(axis=1) V_Kmeans_SJ[id][chain] = [sum(closest_pt_index == k)*1.0 / \ len(closest_pt_index) for k in range(14)] X_Kmeans = [[V_Kmeans_H[id][chain] + V_Kmeans_SJ[id][chain] for id in range(21)] \ for chain in range(n_mcmc_chain)] predict_prob_Kmeans,models_Kmeans = [],[] for chain in range(n_mcmc_chain): res = LOO(X_Kmeans[chain],Y) predict_prob_Kmeans.append(res[0]) models_Kmeans.append(res[1]) # draw box plot fig, ax = plt.subplots(figsize=(8,3)) res_1 = np.array(predict_prob) res_1[:,6:] = 1 - res_1[:,6:] res_2 = np.array(predict_prob_Kmeans) res_2[:,6:] = 1 - res_2[:,6:] draw_plot(res_1, 'red', 'tan') draw_plot(res_2, 'blue', 'cyan') ax.set_ylabel('p(Y_hat = Y)',fontsize=12) #plt.setp(ax.get_yticklabels(),visible=False) groups = ['H%s' % i for i in range(1,6)] + ['S%s' % i for i in range(1,17)] plt.plot([], c='#D7191C', label='MP+RE') plt.plot([], c='#2C7BB6', label='kmeans') plt.legend(fontsize=12) plt.plot([5.5, 5.5],[0,1], c = 'k', linestyle = ':') plt.xticks(range(1,22),groups) plt.xticks(fontsize=12) #plt.xlabel('Subjects') ax.yaxis.get_major_formatter().set_powerlimits((0,1)) plt.yticks(fontsize=12) plt.tight_layout() plt.show()
_____no_output_____
MIT
small_run/Flow_Cytometry_Mondrian_Processes-Random-Effects-Final_n_chain_5_n_sample_1000.ipynb
disiji/fc_mondrian
Random Effect Analysis
def find_first_cut(theta_space): # find the dimension and location of first cut when there is a cut root_rec = theta_space[0] left_rec = theta_space[1][0] for _ in range(root_rec.shape[0]): if root_rec[_,1] != left_rec[_,1]: break dim, pos = _, left_rec[_,1] return dim , pos def compute_diff_mp(template_mp,mp): """ Input: 2 mondrian trees Output: returns mp - tempatlate_mp D: tree structured (dimenison of cuts, shared across 2 mp trees), each node is an integer C: tree structured (position of cuts), each node is a real value """ if mp[1] == None and mp[2] == None: return None, None d_0_template, c_0_template = find_first_cut(template_mp) d_0_mp, c_0_mp = find_first_cut(mp) d_0 = d_0_template len_d_0 = template_mp[0][d_0][1] - template_mp[0][d_0][0] c_0 = abs(c_0_mp - c_0_template) / len_d_0 D_left, C_left = compute_diff_mp(template_mp[1],mp[1]) D_right, C_right = compute_diff_mp(template_mp[2],mp[2]) D = [d_0, D_left, D_right] C = [c_0, C_left, C_right] return D, C
_____no_output_____
MIT
small_run/Flow_Cytometry_Mondrian_Processes-Random-Effects-Final_n_chain_5_n_sample_1000.ipynb
disiji/fc_mondrian
Compare magnitude of random effects in 2 groups
random_effect_H = [[None for chain in range(n_mcmc_chain)] for id in range(5)] random_effect_SJ = [[None for chain in range(n_mcmc_chain)] for id in range(16)] for id in range(5): for chain in range(n_mcmc_chain): random_effect_H[id][chain] = compute_diff_mp(accepts_template_mp_H[chain][-1],\ accepts_indiv_mp_lists_H[chain][id][-1]) for id in range(16): for chain in range(n_mcmc_chain): random_effect_SJ[id][chain] = compute_diff_mp(accepts_template_mp_SJ[chain][-1],\ accepts_indiv_mp_lists_SJ[chain][id][-1]) def flatten_tree(tree): if tree == None: return [] if len(tree) == 1: return tree else: return [tree[0]] + flatten_tree(tree[1]) + flatten_tree(tree[2]) """ random_effect_H_flattened[patient_id][chain] = a list of unordered offsets random_effect_SJ_flattened[patient_id][chain] = a list of unordered offsets """ random_effect_H_flattened = [[flatten_tree(random_effect_H[id][chain][1]) \ for chain in range(n_mcmc_chain)] for id in range(5)] random_effect_SJ_flattened = [[flatten_tree(random_effect_SJ[id][chain][1]) \ for chain in range(n_mcmc_chain)] for id in range(16)] import itertools import seaborn as sns; sns.set(color_codes=True) from sklearn.neighbors import KernelDensity random_effect_H_set = [j for i in random_effect_H_flattened for _ in i for j in _] random_effect_SJ_set = [j for i in random_effect_SJ_flattened for _ in i for j in _] # bins = 20 # plt.hist(random_effect_H_set,bins = bins) # plt.show() # plt.hist(random_effect_SJ_set, bins = bins) # plt.show() # kde_H = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(random_effect_H_set) plt.plot() offset_H = sns.distplot(random_effect_H_set,label="Healthy") offset_SJ = sns.distplot(random_effect_SJ_set, label="AML") plt.legend() plt.show()
_____no_output_____
MIT
small_run/Flow_Cytometry_Mondrian_Processes-Random-Effects-Final_n_chain_5_n_sample_1000.ipynb
disiji/fc_mondrian
Visualize random effects(find chains and dimensions what random effects are obvious)
chain = 1 random_effect_H_set = [random_effect_H_flattened[id][chain][0] for id in range(5)] random_effect_SJ_set = [random_effect_SJ_flattened[id][chain][0] for id in range(16)] # bins = 20 # plt.hist(random_effect_H_set,bins = bins) # plt.show() # plt.hist(random_effect_SJ_set, bins = bins) # plt.show() # kde_H = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(random_effect_H_set) plt.plot() offset_H = sns.distplot(random_effect_H_set,label="Healthy") offset_SJ = sns.distplot(random_effect_SJ_set, label="AML") plt.legend() plt.show() jkdsa
_____no_output_____
MIT
small_run/Flow_Cytometry_Mondrian_Processes-Random-Effects-Final_n_chain_5_n_sample_1000.ipynb
disiji/fc_mondrian
Classifying Surnames with a Multilayer Perceptron Imports
from argparse import Namespace from collections import Counter import json import os import string import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import Dataset, DataLoader from tqdm import tqdm_notebook
_____no_output_____
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
Data Vectorization classes The Vocabulary
class Vocabulary(object): """Class to process text and extract vocabulary for mapping""" def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"): """ Args: token_to_idx (dict): a pre-existing map of tokens to indices add_unk (bool): a flag that indicates whether to add the UNK token unk_token (str): the UNK token to add into the Vocabulary """ if token_to_idx is None: token_to_idx = {} self._token_to_idx = token_to_idx self._idx_to_token = {idx: token for token, idx in self._token_to_idx.items()} self._add_unk = add_unk self._unk_token = unk_token self.unk_index = -1 if add_unk: self.unk_index = self.add_token(unk_token) def to_serializable(self): """ returns a dictionary that can be serialized """ return {'token_to_idx': self._token_to_idx, 'add_unk': self._add_unk, 'unk_token': self._unk_token} @classmethod def from_serializable(cls, contents): """ instantiates the Vocabulary from a serialized dictionary """ return cls(**contents) def add_token(self, token): """Update mapping dicts based on the token. Args: token (str): the item to add into the Vocabulary Returns: index (int): the integer corresponding to the token """ try: index = self._token_to_idx[token] except KeyError: index = len(self._token_to_idx) self._token_to_idx[token] = index self._idx_to_token[index] = token return index def add_many(self, tokens): """Add a list of tokens into the Vocabulary Args: tokens (list): a list of string tokens Returns: indices (list): a list of indices corresponding to the tokens """ return [self.add_token(token) for token in tokens] def lookup_token(self, token): """Retrieve the index associated with the token or the UNK index if token isn't present. Args: token (str): the token to look up Returns: index (int): the index corresponding to the token Notes: `unk_index` needs to be >=0 (having been added into the Vocabulary) for the UNK functionality """ if self.unk_index >= 0: return self._token_to_idx.get(token, self.unk_index) else: return self._token_to_idx[token] def lookup_index(self, index): """Return the token associated with the index Args: index (int): the index to look up Returns: token (str): the token corresponding to the index Raises: KeyError: if the index is not in the Vocabulary """ if index not in self._idx_to_token: raise KeyError("the index (%d) is not in the Vocabulary" % index) return self._idx_to_token[index] def __str__(self): return "<Vocabulary(size=%d)>" % len(self) def __len__(self): return len(self._token_to_idx)
_____no_output_____
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
The Vectorizer
class SurnameVectorizer(object): """ The Vectorizer which coordinates the Vocabularies and puts them to use""" def __init__(self, surname_vocab, nationality_vocab): """ Args: surname_vocab (Vocabulary): maps characters to integers nationality_vocab (Vocabulary): maps nationalities to integers """ self.surname_vocab = surname_vocab self.nationality_vocab = nationality_vocab def vectorize(self, surname): """ Args: surname (str): the surname Returns: one_hot (np.ndarray): a collapsed one-hot encoding """ vocab = self.surname_vocab one_hot = np.zeros(len(vocab), dtype=np.float32) for token in surname: one_hot[vocab.lookup_token(token)] = 1 return one_hot @classmethod def from_dataframe(cls, surname_df): """Instantiate the vectorizer from the dataset dataframe Args: surname_df (pandas.DataFrame): the surnames dataset Returns: an instance of the SurnameVectorizer """ surname_vocab = Vocabulary(unk_token="@") nationality_vocab = Vocabulary(add_unk=False) for index, row in surname_df.iterrows(): for letter in row.surname: surname_vocab.add_token(letter) nationality_vocab.add_token(row.nationality) return cls(surname_vocab, nationality_vocab) @classmethod def from_serializable(cls, contents): surname_vocab = Vocabulary.from_serializable(contents['surname_vocab']) nationality_vocab = Vocabulary.from_serializable(contents['nationality_vocab']) return cls(surname_vocab=surname_vocab, nationality_vocab=nationality_vocab) def to_serializable(self): return {'surname_vocab': self.surname_vocab.to_serializable(), 'nationality_vocab': self.nationality_vocab.to_serializable()}
_____no_output_____
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
The Dataset
class SurnameDataset(Dataset): def __init__(self, surname_df, vectorizer): """ Args: surname_df (pandas.DataFrame): the dataset vectorizer (SurnameVectorizer): vectorizer instatiated from dataset """ self.surname_df = surname_df self._vectorizer = vectorizer self.train_df = self.surname_df[self.surname_df.split=='train'] self.train_size = len(self.train_df) self.val_df = self.surname_df[self.surname_df.split=='val'] self.validation_size = len(self.val_df) self.test_df = self.surname_df[self.surname_df.split=='test'] self.test_size = len(self.test_df) self._lookup_dict = {'train': (self.train_df, self.train_size), 'val': (self.val_df, self.validation_size), 'test': (self.test_df, self.test_size)} self.set_split('train') # Class weights class_counts = surname_df.nationality.value_counts().to_dict() def sort_key(item): return self._vectorizer.nationality_vocab.lookup_token(item[0]) sorted_counts = sorted(class_counts.items(), key=sort_key) frequencies = [count for _, count in sorted_counts] self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32) @classmethod def load_dataset_and_make_vectorizer(cls, surname_csv): """Load dataset and make a new vectorizer from scratch Args: surname_csv (str): location of the dataset Returns: an instance of SurnameDataset """ surname_df = pd.read_csv(surname_csv) train_surname_df = surname_df[surname_df.split=='train'] return cls(surname_df, SurnameVectorizer.from_dataframe(train_surname_df)) @classmethod def load_dataset_and_load_vectorizer(cls, surname_csv, vectorizer_filepath): """Load dataset and the corresponding vectorizer. Used in the case in the vectorizer has been cached for re-use Args: surname_csv (str): location of the dataset vectorizer_filepath (str): location of the saved vectorizer Returns: an instance of SurnameDataset """ surname_df = pd.read_csv(surname_csv) vectorizer = cls.load_vectorizer_only(vectorizer_filepath) return cls(surname_df, vectorizer) @staticmethod def load_vectorizer_only(vectorizer_filepath): """a static method for loading the vectorizer from file Args: vectorizer_filepath (str): the location of the serialized vectorizer Returns: an instance of SurnameVectorizer """ with open(vectorizer_filepath) as fp: return SurnameVectorizer.from_serializable(json.load(fp)) def save_vectorizer(self, vectorizer_filepath): """saves the vectorizer to disk using json Args: vectorizer_filepath (str): the location to save the vectorizer """ with open(vectorizer_filepath, "w") as fp: json.dump(self._vectorizer.to_serializable(), fp) def get_vectorizer(self): """ returns the vectorizer """ return self._vectorizer def set_split(self, split="train"): """ selects the splits in the dataset using a column in the dataframe """ self._target_split = split self._target_df, self._target_size = self._lookup_dict[split] def __len__(self): return self._target_size def __getitem__(self, index): """the primary entry point method for PyTorch datasets Args: index (int): the index to the data point Returns: a dictionary holding the data point's: features (x_surname) label (y_nationality) """ row = self._target_df.iloc[index] surname_vector = \ self._vectorizer.vectorize(row.surname) nationality_index = \ self._vectorizer.nationality_vocab.lookup_token(row.nationality) return {'x_surname': surname_vector, 'y_nationality': nationality_index} def get_num_batches(self, batch_size): """Given a batch size, return the number of batches in the dataset Args: batch_size (int) Returns: number of batches in the dataset """ return len(self) // batch_size def generate_batches(dataset, batch_size, shuffle=True, drop_last=True, device="cpu"): """ A generator function which wraps the PyTorch DataLoader. It will ensure each tensor is on the write device location. """ dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last) for data_dict in dataloader: out_data_dict = {} for name, tensor in data_dict.items(): out_data_dict[name] = data_dict[name].to(device) yield out_data_dict
_____no_output_____
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
The Model: SurnameClassifier
class SurnameClassifier(nn.Module): """ A 2-layer Multilayer Perceptron for classifying surnames """ def __init__(self, input_dim, hidden_dim, output_dim): """ Args: input_dim (int): the size of the input vectors hidden_dim (int): the output size of the first Linear layer output_dim (int): the output size of the second Linear layer """ super(SurnameClassifier, self).__init__() self.fc1 = nn.Linear(input_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, output_dim) def forward(self, x_in, apply_softmax=False): """The forward pass of the classifier Args: x_in (torch.Tensor): an input data tensor. x_in.shape should be (batch, input_dim) apply_softmax (bool): a flag for the softmax activation should be false if used with the Cross Entropy losses Returns: the resulting tensor. tensor.shape should be (batch, output_dim) """ intermediate_vector = F.relu(self.fc1(x_in)) prediction_vector = self.fc2(intermediate_vector) if apply_softmax: prediction_vector = F.softmax(prediction_vector, dim=1) return prediction_vector
_____no_output_____
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
Training Routine Helper functions
def make_train_state(args): return {'stop_early': False, 'early_stopping_step': 0, 'early_stopping_best_val': 1e8, 'learning_rate': args.learning_rate, 'epoch_index': 0, 'train_loss': [], 'train_acc': [], 'val_loss': [], 'val_acc': [], 'test_loss': -1, 'test_acc': -1, 'model_filename': args.model_state_file} def update_train_state(args, model, train_state): """Handle the training state updates. Components: - Early Stopping: Prevent overfitting. - Model Checkpoint: Model is saved if the model is better :param args: main arguments :param model: model to train :param train_state: a dictionary representing the training state values :returns: a new train_state """ # Save one model at least if train_state['epoch_index'] == 0: torch.save(model.state_dict(), train_state['model_filename']) train_state['stop_early'] = False # Save model if performance improved elif train_state['epoch_index'] >= 1: loss_tm1, loss_t = train_state['val_loss'][-2:] # If loss worsened if loss_t >= train_state['early_stopping_best_val']: # Update step train_state['early_stopping_step'] += 1 # Loss decreased else: # Save the best model if loss_t < train_state['early_stopping_best_val']: torch.save(model.state_dict(), train_state['model_filename']) # Reset early stopping step train_state['early_stopping_step'] = 0 # Stop early ? train_state['stop_early'] = \ train_state['early_stopping_step'] >= args.early_stopping_criteria return train_state def compute_accuracy(y_pred, y_target): _, y_pred_indices = y_pred.max(dim=1) n_correct = torch.eq(y_pred_indices, y_target).sum().item() return n_correct / len(y_pred_indices) * 100
_____no_output_____
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
general utilities
def set_seed_everywhere(seed, cuda): np.random.seed(seed) torch.manual_seed(seed) if cuda: torch.cuda.manual_seed_all(seed) def handle_dirs(dirpath): if not os.path.exists(dirpath): os.makedirs(dirpath)
_____no_output_____
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
Settings and some prep work
args = Namespace( # Data and path information surname_csv="data/surnames/surnames_with_splits.csv", vectorizer_file="vectorizer.json", model_state_file="model.pth", save_dir="model_storage/ch4/surname_mlp", # Model hyper parameters hidden_dim=300, # Training hyper parameters seed=1337, num_epochs=100, early_stopping_criteria=5, learning_rate=0.001, batch_size=64, # Runtime options cuda=False, reload_from_files=False, expand_filepaths_to_save_dir=True, ) if args.expand_filepaths_to_save_dir: args.vectorizer_file = os.path.join(args.save_dir, args.vectorizer_file) args.model_state_file = os.path.join(args.save_dir, args.model_state_file) print("Expanded filepaths: ") print("\t{}".format(args.vectorizer_file)) print("\t{}".format(args.model_state_file)) # Check CUDA if not torch.cuda.is_available(): args.cuda = False args.device = torch.device("cuda" if args.cuda else "cpu") print("Using CUDA: {}".format(args.cuda)) # Set seed for reproducibility set_seed_everywhere(args.seed, args.cuda) # handle dirs handle_dirs(args.save_dir)
Expanded filepaths: model_storage/ch4/surname_mlp/vectorizer.json model_storage/ch4/surname_mlp/model.pth Using CUDA: False
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
Initializations
if args.reload_from_files: # training from a checkpoint print("Reloading!") dataset = SurnameDataset.load_dataset_and_load_vectorizer(args.surname_csv, args.vectorizer_file) else: # create dataset and vectorizer print("Creating fresh!") dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv) dataset.save_vectorizer(args.vectorizer_file) vectorizer = dataset.get_vectorizer() classifier = SurnameClassifier(input_dim=len(vectorizer.surname_vocab), hidden_dim=args.hidden_dim, output_dim=len(vectorizer.nationality_vocab))
Creating fresh!
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
Training loop
classifier = classifier.to(args.device) dataset.class_weights = dataset.class_weights.to(args.device) loss_func = nn.CrossEntropyLoss(dataset.class_weights) optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', factor=0.5, patience=1) train_state = make_train_state(args) epoch_bar = tqdm_notebook(desc='training routine', total=args.num_epochs, position=0) dataset.set_split('train') train_bar = tqdm_notebook(desc='split=train', total=dataset.get_num_batches(args.batch_size), position=1, leave=True) dataset.set_split('val') val_bar = tqdm_notebook(desc='split=val', total=dataset.get_num_batches(args.batch_size), position=1, leave=True) try: for epoch_index in range(args.num_epochs): train_state['epoch_index'] = epoch_index # Iterate over training dataset # setup: batch generator, set loss and acc to 0, set train mode on dataset.set_split('train') batch_generator = generate_batches(dataset, batch_size=args.batch_size, device=args.device) running_loss = 0.0 running_acc = 0.0 classifier.train() for batch_index, batch_dict in enumerate(batch_generator): # the training routine is these 5 steps: # -------------------------------------- # step 1. zero the gradients optimizer.zero_grad() # step 2. compute the output y_pred = classifier(batch_dict['x_surname']) # step 3. compute the loss loss = loss_func(y_pred, batch_dict['y_nationality']) loss_t = loss.item() running_loss += (loss_t - running_loss) / (batch_index + 1) # step 4. use loss to produce gradients loss.backward() # step 5. use optimizer to take gradient step optimizer.step() # ----------------------------------------- # compute the accuracy acc_t = compute_accuracy(y_pred, batch_dict['y_nationality']) running_acc += (acc_t - running_acc) / (batch_index + 1) # update bar train_bar.set_postfix(loss=running_loss, acc=running_acc, epoch=epoch_index) train_bar.update() train_state['train_loss'].append(running_loss) train_state['train_acc'].append(running_acc) # Iterate over val dataset # setup: batch generator, set loss and acc to 0; set eval mode on dataset.set_split('val') batch_generator = generate_batches(dataset, batch_size=args.batch_size, device=args.device) running_loss = 0. running_acc = 0. classifier.eval() for batch_index, batch_dict in enumerate(batch_generator): # compute the output y_pred = classifier(batch_dict['x_surname']) # step 3. compute the loss loss = loss_func(y_pred, batch_dict['y_nationality']) loss_t = loss.to("cpu").item() running_loss += (loss_t - running_loss) / (batch_index + 1) # compute the accuracy acc_t = compute_accuracy(y_pred, batch_dict['y_nationality']) running_acc += (acc_t - running_acc) / (batch_index + 1) val_bar.set_postfix(loss=running_loss, acc=running_acc, epoch=epoch_index) val_bar.update() train_state['val_loss'].append(running_loss) train_state['val_acc'].append(running_acc) train_state = update_train_state(args=args, model=classifier, train_state=train_state) scheduler.step(train_state['val_loss'][-1]) if train_state['stop_early']: break train_bar.n = 0 val_bar.n = 0 epoch_bar.update() except KeyboardInterrupt: print("Exiting loop") # compute the loss & accuracy on the test set using the best available model classifier.load_state_dict(torch.load(train_state['model_filename'])) classifier = classifier.to(args.device) dataset.class_weights = dataset.class_weights.to(args.device) loss_func = nn.CrossEntropyLoss(dataset.class_weights) dataset.set_split('test') batch_generator = generate_batches(dataset, batch_size=args.batch_size, device=args.device) running_loss = 0. running_acc = 0. classifier.eval() for batch_index, batch_dict in enumerate(batch_generator): # compute the output y_pred = classifier(batch_dict['x_surname']) # compute the loss loss = loss_func(y_pred, batch_dict['y_nationality']) loss_t = loss.item() running_loss += (loss_t - running_loss) / (batch_index + 1) # compute the accuracy acc_t = compute_accuracy(y_pred, batch_dict['y_nationality']) running_acc += (acc_t - running_acc) / (batch_index + 1) train_state['test_loss'] = running_loss train_state['test_acc'] = running_acc print("Test loss: {};".format(train_state['test_loss'])) print("Test Accuracy: {}".format(train_state['test_acc']))
Test loss: 1.7435305690765381; Test Accuracy: 47.875
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
Inference
def predict_nationality(surname, classifier, vectorizer): """Predict the nationality from a new surname Args: surname (str): the surname to classifier classifier (SurnameClassifer): an instance of the classifier vectorizer (SurnameVectorizer): the corresponding vectorizer Returns: a dictionary with the most likely nationality and its probability """ vectorized_surname = vectorizer.vectorize(surname) vectorized_surname = torch.tensor(vectorized_surname).view(1, -1) result = classifier(vectorized_surname, apply_softmax=True) probability_values, indices = result.max(dim=1) index = indices.item() predicted_nationality = vectorizer.nationality_vocab.lookup_index(index) probability_value = probability_values.item() return {'nationality': predicted_nationality, 'probability': probability_value} new_surname = input("Enter a surname to classify: ") classifier = classifier.to("cpu") prediction = predict_nationality(new_surname, classifier, vectorizer) print("{} -> {} (p={:0.2f})".format(new_surname, prediction['nationality'], prediction['probability']))
Enter a surname to classify: McMahan McMahan -> Irish (p=0.55)
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
Top-K Inference
vectorizer.nationality_vocab.lookup_index(8) def predict_topk_nationality(name, classifier, vectorizer, k=5): vectorized_name = vectorizer.vectorize(name) vectorized_name = torch.tensor(vectorized_name).view(1, -1) prediction_vector = classifier(vectorized_name, apply_softmax=True) probability_values, indices = torch.topk(prediction_vector, k=k) # returned size is 1,k probability_values = probability_values.detach().numpy()[0] indices = indices.detach().numpy()[0] results = [] for prob_value, index in zip(probability_values, indices): nationality = vectorizer.nationality_vocab.lookup_index(index) results.append({'nationality': nationality, 'probability': prob_value}) return results new_surname = input("Enter a surname to classify: ") classifier = classifier.to("cpu") k = int(input("How many of the top predictions to see? ")) if k > len(vectorizer.nationality_vocab): print("Sorry! That's more than the # of nationalities we have.. defaulting you to max size :)") k = len(vectorizer.nationality_vocab) predictions = predict_topk_nationality(new_surname, classifier, vectorizer, k=k) print("Top {} predictions:".format(k)) print("===================") for prediction in predictions: print("{} -> {} (p={:0.2f})".format(new_surname, prediction['nationality'], prediction['probability']))
Enter a surname to classify: McMahan How many of the top predictions to see? 5 Top 5 predictions: =================== McMahan -> Irish (p=0.55) McMahan -> Scottish (p=0.21) McMahan -> Czech (p=0.05) McMahan -> German (p=0.04) McMahan -> English (p=0.03)
Apache-2.0
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb
prampampam/PyTorchNLPBook
Task 2 dpm.load_task2()
dpm.load_task2() data = dpm.train_task2_df data["keyword"].value_counts().keys() data["text"][0] def get_text_for(data,label = 0): """ Returns text that corresponds to the label as a single string. """ text = [] for i in range(len(data["text"])): if data["label"][i][label] == 1: text.append(str(label)) z = data["text"][i] z = z.split(" ") for i in z: text.append(i) text.append(str(label)) return " ".join(text) text_cat = [] for i in range(7): text_cat.append(get_text_for(data,i).split(" ")) # text_cat = " ".join(text_cat) wvec = mwv.Word2VecModelTrainer(sentences=text_cat, path="label.wordvectors") wvec.train(size=50) wvp = wvec.load_trained("label.wordvectors") z = [wvp.distance("we","0"), wvp.distance("we","1"), wvp.distance("we","2"), wvp.distance("we","3"), wvp.distance("we","4"), wvp.distance("we","5"), wvp.distance("we","6")] z = np.array(z) z.mean() np.where(z<z.mean()) z.index(min(z)) def predict(text, pl=84): """ Returns a list of predictions for the text. """ text = text.split(" ") pred = [] count_dict = {0:0,1:0,2:0,3:0,4:0,5:0,6:0} labels = ["0","1","2","3","4","5","6"] for i in text: z = [wvp.distance(i,val) for val in labels] z = np.array(z) res = np.where(z<z.mean())[0] for i in res: count_dict[i] += 1 # return count_dict res= np.array(list(count_dict.values())) nres = np.zeros(7) # return res nres[np.where(res>np.percentile(res,pl))] = 1 return nres z = predict(data["text"][18],pl=82) z zx = data["label"][0] == z len(np.where(zx == False)[0]) def calculate_hit(y_true, y_pred): hits = 0 misses = 0 for i in range(len(y_true)): res = y_true[i] == y_pred[i] if all(res): hits += 1 else: misses += 1 return hits, misses def calculate_hit_partial(y_true, y_pred): hits = 0 misses = 0 for i in range(len(y_true)): res = y_true[i] == y_pred[i] misses += len(np.where(res == False)[0]) return misses losses =[] for pl in range(75, 100): y_pred = [predict(data["text"][i],pl=pl) for i in range(len(data["text"]))] losses.append((pl,calculate_hit_partial(data["label"],y_pred))) y_pred = [predict(data["text"][i],pl=84) for i in range(len(data["text"]))] y_pred = [i.astype(int) for i in y_pred] labels2file(y_pred, os.path.join('res/', 'task2.txt')) labels2file(dpm.train_task1_df.label.apply(lambda x:[x]).tolist(), os.path.join('ref/', 'task1.txt')) labels2file(dpm.train_task2_df.label.tolist(), os.path.join('ref/', 'task2.txt')) ! python evaluation.py . . ! cat scores.txt ! zip submission.zip task1.txt task2.txt
zip warning: name not matched: task2.txt updating: task1.txt (deflated 92%)
MIT
proto_two.ipynb
sarveshbhatnagar/PCL_DETECTION
Pre-processing mouse dataLoad the clusters obtained from previous section, so that we can bootsrap on them.
### Load DataFrame of log-transfromed averaged time-series for each cluster # (1) Healthy Group df_log_healthy = pd.read_pickle("data/df_log_healthy.pkl") # (2) IBD Group df_log_ibd = pd.read_pickle("data/df_log_ibd.pkl") ### Load cluster memberships for every OTU # (1) Healthy Group tree_healthy = pd.read_pickle( "data/OTU_dm_kmclusters.p") NMF_healthy = pd.read_pickle( "data/OTU_NMF_healthy.p") time_healthy = pd.read_pickle( "data/OTU_time_healthy.p") # (2) IBD Group tree_ibd = pd.read_pickle( "data/OTU_dm_kmclusters_IBD.p" ) NMF_ibd = pd.read_pickle( "data/OTU_NMF_ibd.p" ) time_ibd = pd.read_pickle( "data/OTU_time_ibd.p")
_____no_output_____
MIT
Bootstrapping_augmentation.ipynb
sytseng/CS109b_Final_Project_Spring_2019
Bootstrapping Step A. Subset df by cluster MembershipRecall that we have three methods to generate clusters: - Tree based: 3 clusters- NMF correlation: 9 clusters- Time correlation: 5 clustersAnd we have loaded the cluster membership for every OTU above. In this section, we will subset the OTU into those different clusters.
### Function to subset the dataframe by cluster membership def subset_df_by_membership(df, tree, NMF, time): # get the total number of otu and time points (otu_length,time_length) = df.shape # add the membership as the last column df['tree']=tree df['NMF']=NMF df['time']=time # loop through 3 different memberships methods = ['tree', 'NMF', 'time'] method_list = list() ###########1############## # method_list[0]: 'tree' # # method_list[1]: 'NMF' # # method_list[2]: 'time' # ########################## for method in methods: # loop through all clusters culsters = list(df[method].unique()) df_list = list() #########################2########################### # for example: # # df_list[0]: OTU with membership as first clusters # # ... # ##################################################### for cluster in culsters: df_selected = df[df[method] == cluster].iloc[:,:time_length] df_list.append(df_selected) #1# method_list.append(df_list) #2# return method_list ### Split the DataFrame into clusters based on their Membership, pack them up into the list method_list_healthy = subset_df_by_membership(df_log_healthy, tree_healthy, NMF_healthy, time_healthy) method_list_ibd = subset_df_by_membership(df_log_ibd, tree_ibd, NMF_ibd, time_ibd)
_____no_output_____
MIT
Bootstrapping_augmentation.ipynb
sytseng/CS109b_Final_Project_Spring_2019
Step B. Bootstrap to generate more mice dataNow that we have the clusters, we do bootstrap:- For each single sample step, within every cluster, we randomly choose 30% of the OTUs, took the average of them to generate one time series representing that cluster.- We repeated the sampling for 30 times, to generate the 30 mice.
### Function to Bootstrap: def bootrapping(method_list, mice_count): methods = list() for method in range(3): mice = list() for time in range(mice_count): clusters = list() for cluster in range(len(method_list[method])): one_sample = method_list[method][cluster].sample(frac=0.3, replace=True) log_mean = one_sample[:].mean(axis=0) # inverse natural log transform real_mean = np.exp(log_mean) clusters.append(real_mean) mice.append(np.array(clusters)) methods.append(mice) tree = methods[0] NMF = methods[1] time = methods[2] return tree, NMF, time ### Generate 30 mice for both group mice_count=30 # (1) Healthy Mice tree_healthy_30_mice, NMF_healthy_30_mice, time_healthy_30_mice = bootrapping(method_list_healthy, mice_count) # (2) IBD Mice tree_ibd_30_mice, NMF_ibd_30_mice, time_ibd_30_mice = bootrapping(method_list_ibd, mice_count) ### save the mice as a pickle file # (1) Healthy Mice pickle.dump(tree_healthy_30_mice, open( "data/30_mice_tree_healthy.p", "wb" ) ) pickle.dump(NMF_healthy_30_mice, open( "data/30_mice_NMF_healthy.p", "wb" ) ) pickle.dump(time_healthy_30_mice, open( "data/30_mice_time_healthy.p", "wb" ) ) # (2) IBD Mice pickle.dump(tree_ibd_30_mice, open( "data/30_mice_tree_ibd.p", "wb" ) ) pickle.dump(NMF_ibd_30_mice, open( "data/30_mice_NMF_ibd.p", "wb" ) ) pickle.dump(time_ibd_30_mice, open( "data/30_mice_time_ibd.p", "wb" ) )
_____no_output_____
MIT
Bootstrapping_augmentation.ipynb
sytseng/CS109b_Final_Project_Spring_2019
Data Structure ExampleThese are the simulated absolute values (not the log-transformed, they have already been transformed back).
####################################################################### # For example: tree_healthy_30_mice # # tree_healthy_30_mice: the first mice data # # tree_healthy_30_mice[0]: the first cluster in the first mice data # # tree_healthy_30_mice[0][0]: the 75 time points of the first cluster # ####################################################################### print('the nunmber of simulated mice data is: ', len(tree_healthy_30_mice)) print('within each mouse, the number of the tree_based clusters is: ', len(tree_healthy_30_mice[0])) print('for each cluster, the number of the time points is: ', len(tree_healthy_30_mice[0][0]))
the nunmber of simulated mice data is: 30 within each mouse, the number of the tree_based clusters is: 3 for each cluster, the number of the time points is: 75
MIT
Bootstrapping_augmentation.ipynb
sytseng/CS109b_Final_Project_Spring_2019
Fashion MNIST
from keras.datasets import fashion_mnist from sklearn.metrics import roc_auc_score from sklearn.metrics import mean_squared_error _, (fashion_x_test, _) = fashion_mnist.load_data() fashion_x_test = fashion_x_test.astype('float32') / 255. fashion_x_test = np.reshape(fashion_x_test, (len(x_test), 28, 28, 1)) show_10_images(fashion_x_test) show_10_images(autoencoder.predict(fashion_x_test)) labels = len(x_test) * [0] + len(fashion_x_test) * [1] test_samples = np.concatenate((x_test, fashion_x_test)) losses = anomaly_detector.predict(test_samples) print("AUROC:", roc_auc_score(labels, losses))
AUROC: 0.99937089
MIT
notebooks/autoencoders/MNIST10/one_anomaly_detector.ipynb
tayden/NoveltyDetection
EMNIST Letters
from torchvision.datasets import EMNIST emnist_letters = EMNIST('./', "letters", train=False, download=True) emnist_letters = emnist_letters.test_data.numpy() emnist_letters = emnist_letters.astype('float32') / 255. emnist_letters = np.swapaxes(emnist_letters, 1, 2) emnist_letters = np.reshape(emnist_letters, (len(emnist_letters), 28, 28, 1)) show_10_images(emnist_letters) show_10_images(autoencoder.predict(emnist_letters)) labels = len(x_test) * [0] + len(emnist_letters) * [1] test_samples = np.concatenate((x_test, emnist_letters)) losses = anomaly_detector.predict(test_samples) print("AUROC:", roc_auc_score(labels, losses))
AUROC: 0.9604927475961538
MIT
notebooks/autoencoders/MNIST10/one_anomaly_detector.ipynb
tayden/NoveltyDetection
Gaussian Noise
mnist_mean = np.mean(x_train) mnist_std = np.std(x_train) gaussian_data = np.random.normal(mnist_mean, mnist_std, size=(10000, 28, 28, 1)) show_10_images(gaussian_data) show_10_images(autoencoder.predict(gaussian_data)) labels = len(x_test) * [0] + len(gaussian_data) * [1] test_samples = np.concatenate((x_test, gaussian_data)) losses = anomaly_detector.predict(test_samples) print("AUROC:", roc_auc_score(labels, losses))
AUROC: 1.0
MIT
notebooks/autoencoders/MNIST10/one_anomaly_detector.ipynb
tayden/NoveltyDetection
Uniform Noise
import math b = math.sqrt(3.) * mnist_std a = -b + mnist_mean b += mnist_mean uniform_data = np.random.uniform(low=a, high=b, size=(10000, 28, 28, 1)) show_10_images(uniform_data) show_10_images(autoencoder.predict(uniform_data)) labels = len(x_test) * [0] + len(uniform_data) * [1] test_samples = np.concatenate((x_test, uniform_data)) losses = anomaly_detector.predict(test_samples) print("AUROC:", roc_auc_score(labels, losses))
AUROC: 1.0
MIT
notebooks/autoencoders/MNIST10/one_anomaly_detector.ipynb
tayden/NoveltyDetection
Check River Flows
from __future__ import division import matplotlib.pyplot as plt import netCDF4 as nc import numpy as np from salishsea_tools import nc_tools %matplotlib inline def find_points(flow): for i in range(390,435): for j in range(280,398): if flow1[0,i,j] > 0: print i,j, lat[i,j], lon[i,j], flow[0,i,j] grid = nc.Dataset('/ocean/sallen/allen/research/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc') lat = grid.variables['nav_lat'][:,:] lon = grid.variables['nav_lon'][:,:] depth = grid.variables['Bathymetry'][:] river1 = nc.Dataset('/ocean/sallen/allen/research/MEOPAR/Rivers/RFraserCElse_y2015m05d15.nc') river2 = nc.Dataset('/ocean/sallen/allen/research/MEOPAR/Rivers/RFraserCElse_y2015m07d01.nc') river3 = nc.Dataset('/ocean/sallen/allen/research/MEOPAR/Rivers/RFraserCElse_y2015m07d02.nc') river4 = nc.Dataset('/ocean/sallen/allen/research/MEOPAR/Rivers/RFraserCElse_y2015m07d03.nc') river5 = nc.Dataset('/ocean/sallen/allen/research/MEOPAR/Rivers/RFraserCElse_y2015m07d04.nc') river6 = nc.Dataset('/ocean/sallen/allen/research/MEOPAR/Rivers/RFraserCElse_y2015m07d05.nc') print 'May 15' find_points(river1.variables['rorunoff'][:,:,:]) print 'Jul 1' find_points(river2.variables['rorunoff'][:,:,:]) print 'Jul 2' find_points(river3.variables['rorunoff'][:,:,:]) print 'Jul 3' find_points(river4.variables['rorunoff'][:,:,:]) print 'Jul 4' find_points(river5.variables['rorunoff'][:,:,:]) print 'Jul 5' find_points(river6.variables['rorunoff'][:,:,:]) ik = 425; jk = 302; d = 6 fig, ax = plt.subplots(1,1,figsize=(15,7.5)) imin = 390; imax = 435; jmin = 280; jmax = 398 cmap = plt.get_cmap('winter_r') cmap.set_bad('burlywood') mesh = ax.pcolormesh(depth[imin:imax,jmin:jmax], vmax = 10., cmap=cmap) ax.set_xlim((0,110)) ax.set_xlabel('Grid Points') ax.set_ylabel('Grid Points') ax.text(40, 28, "Short Fraser River", fontsize=14) cbar=fig.colorbar(mesh) cbar.set_label('Depth (m)') ax.plot(np.array((324,324,334,334,334,318))-jmin+0.5,np.array((411,412,414,415,416,434))-imin+0.5,'ko');
_____no_output_____
Apache-2.0
Susan/Check River Files.ipynb
SalishSeaCast/analysis
import pandas as pd ws = pd.read_csv('winshares.txt') ws.head() ws.shape # clean up player name column ws['Player'] = ws['Player'].str.split('/').str[0] ws.head() sals = pd.read_csv('salaries.txt') sals.head() sals.shape sals['Player'] = sals['Player'].str.split('/').str[0] sals.head() # merge columns 2019-2020 salaries with ws dataframe final = pd.merge(ws, sals[['Player', '2019-20']], how='inner', on='Player') final.head() # lost players who did not renew contracts this year + new players entering the league in 2019-20, but dataframe is ready to go - salaries are together with stats # use WS as primary statistic to assess value # drop some unnecessary columns final = final.drop(columns=['Unnamed: 19', 'Unnamed: 24']) final.head() # rename WS▼, get rid of extra character final.rename(columns={'WS▼':'WS'}, inplace=True) final.head() final.dtypes # change 2019-20 dtype to float so we can use it to divide $/WS final['2019-20'] = final['2019-20'].replace('[\$,]', '', regex=True).astype(float) final.head() final['value'] = final['WS']/final['2019-20'] final['value'].head() # sort by most valuable players in terms of WS/salary for 2019-20 final.sort_values(by=['value'], inplace=True, ascending=False) final.sort_values(by=['2019-20'], inplace=True, ascending=False) final.head(10) final.head() # drop inf values, replace with nan import numpy as np final = final.replace([np.inf, -np.inf], np.nan) # drop nan values final = final.dropna() final.head(10) # lets choose a few columns to narrow down and work with final1 = final[['Player', 'Pos', 'Age', 'WS', '2019-20', 'value']] final1.head(10) # value column is hard to read because the numbers are so small - double digit winshares divided by millions in salary so let's scale that up final1['value'] = final1['value'].apply(lambda x: x*10000000) # gave me the warning above, but seems to have worked final1['value'].head() # top 20 most valuable players, in terms of WS last year / salary this season final1.head(20) # top 20 LEAST valuable players from last season final1.tail(20) # let's make a visual of the top 50 most valuable players top20 = final1.head(20) import matplotlib.pyplot as plt plt.scatter(top15['WS'], top15['2019-20'] , s=top15['value']) plt.title('Top 15 Most Valuable Players by WS/$') plt.xlabel('WS') plt.ylabel('2019-20 Salary') plt.show() df = pd.DataFrame({ 'x': top20['2019-20'], 'y': top20['WS'], 's': top20['value'], 'group': top20['Player'] }) df.shape # lets try adding labels to the points import pandas as pd import matplotlib.pylab as plt import seaborn as sns plt.figure(figsize = (17,12)) ax = sns.scatterplot(df.x, df.y, alpha = 0.5,s = 1000) for line in range(0,df.shape[0]): ax.text(df.x.iloc[line], df.y.iloc[line], df.group.iloc[line], horizontalalignment='center', size='medium', color='black') plt.title('Top 20 Most Valuable NBA Players by WS/$') plt.xlabel('2019-20 Salary') plt.ylabel('WS') plt.show() # do the same, but for 20 least valuable players bot20 = final1.tail(20) df = pd.DataFrame({ 'x': bot20['2019-20'], 'y': bot20['WS'], 's': bot20['value'], 'group': bot20['Player'] }) df.shape import pandas as pd import matplotlib.pylab as plt import seaborn as sns plt.figure(figsize = (17,12)) ax = sns.scatterplot(df.x, df.y, alpha = 0.5,s = 1000) for line in range(0,df.shape[0]): ax.text(df.x.iloc[line], df.y.iloc[line], df.group.iloc[line], horizontalalignment='center', size='medium', color='black') plt.title('Top 20 Most Least NBA Players by WS/$') plt.xlabel('2019-20 Salary') plt.ylabel('WS') plt.show()
_____no_output_____
MIT
dataproject.ipynb
mugilc/mugilc.github.io
- How many items are NaN in the is hk column?- How many items are known housekeeping genes?- How many items are known tissue specific genes?
print("NaN %s" % len(data[data["is_hk"].isnull()])) print("Housekeeping %s" % len(data[data["is_hk"] == 1])) print("Specific %s" % len(data[data["is_hk"] == 0])) def split_train_test(data): split = (int) (len(data) * 0.9) return data[0:split], data[split:] def split_data(data): # Shuffle data data = data.sample(frac = 1) # train_set, test_set hk_yes = data[data["is_hk"] == IS_HK] hk_no = data[data["is_hk"] == IS_NOT_HK] train_yes, test_yes = split_train_test(hk_yes) train_no , test_no = split_train_test(hk_no) train_set = train_yes train_set = train_set.append(train_no) train_set = train_set.sample(frac = 1) test_set = test_yes test_set = test_set.append(test_no) test_set = test_set.sample(frac = 1) # unsup_train_set unsup_train_set = data[data["is_hk"].isnull()] # sup_train_set sup_train_set = data[data["is_hk"].notnull()] return train_set, test_set, unsup_train_set, sup_train_set train_set, test_set, unsup_train_set, sup_train_set = split_data(data) def bin_plot(hist, bin_edge): # make sure to import matplotlib.pyplot as plt # plot the histogram plt.figure(figsize=(6,4)) plt.fill_between(bin_edge.repeat(2)[1:-1],hist.repeat(2),facecolor="steelblue") plt.show() # plot the first 100 bins only plt.figure(figsize=(6,4)) plt.fill_between(bin_edge.repeat(2)[1:100],hist.repeat(2)[1:100],facecolor="steelblue") plt.show() # plot the first 500 bins only plt.figure(figsize=(6,4)) plt.fill_between(bin_edge.repeat(2)[1:500],hist.repeat(2)[1:500],facecolor="steelblue") plt.show() # remove NaN values train_set_clength_no_nan = data["cDNA_length"][~np.isnan(data["cDNA_length"])] # bin the data into 1000 equally spaced bins # hist is the count for each bin # bin_edge is the edge values of the bins hist, bin_edge = np.histogram(train_set_clength_no_nan,1000) bin_plot(hist, bin_edge)
_____no_output_____
MIT
gene-prediction-gaussian.ipynb
neungkl/MLE-and-naive-bayes-classification
How many bins have zero counts?
print("Total %s" % len(hist)) print("Zeros %s" % sum(hist == 0))
Total 1000 Zeros 823
MIT
gene-prediction-gaussian.ipynb
neungkl/MLE-and-naive-bayes-classification
**cDNA Density Plot**
train_set_clength_no_nan_sorted = data["cDNA_length"][data["cDNA_length"].notnull()].sort_values() bin_edge = np.unique(train_set_clength_no_nan_sorted[0::70]) hist = np.bincount(np.digitize(train_set_clength_no_nan_sorted, bin_edge)) hist = hist[1:-1] bin_plot(hist, bin_edge)
_____no_output_____
MIT
gene-prediction-gaussian.ipynb
neungkl/MLE-and-naive-bayes-classification
**CDS Density Plot**
train_set_clength_no_nan_sorted = data["cds_length"][data["cds_length"].notnull()].sort_values() bin_edge = np.unique(train_set_clength_no_nan_sorted[0::100]) hist = np.bincount(np.digitize(train_set_clength_no_nan_sorted, bin_edge)) hist = hist[1:-1] bin_plot(hist, bin_edge) for feature in list(train_set): if feature == "is_hk": continue f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(12,4)) bin_size = 2 if feature in category_features else 500 X = train_set[train_set["is_hk"] == IS_HK][feature][~np.isnan(train_set[feature])] hist, bin_edge = np.histogram(X, bin_size) ax1.fill_between(bin_edge.repeat(2)[1:-1],hist.repeat(2),facecolor="orange") ax1.set_title(feature + " (is_hk)") X = train_set[train_set["is_hk"] == IS_NOT_HK][feature][~np.isnan(train_set[feature])] hist, bin_edge = np.histogram(X, bin_size) ax2.fill_between(bin_edge.repeat(2)[1:-1],hist.repeat(2),facecolor="steelblue") ax2.set_title(feature + " (is_not_hk)") plt.show()
_____no_output_____
MIT
gene-prediction-gaussian.ipynb
neungkl/MLE-and-naive-bayes-classification
MLE calculation
def calc_mean_var(data): data = data[data.notnull()] u = data.mean() v = data.var() return u, v def calc_prob_eq_zero(data): data = data[data.notnull()] return len(data[data == 0]) * 1.0 / len(data) likelihood = {} for feature in list(train_set): if feature == "is_hk": continue param_type = "category" if feature in category_features else "gaussian" ll = [0, 0] for i in [IS_HK, IS_NOT_HK]: variable = {} train_data = sup_train_set[sup_train_set["is_hk"] == i] if param_type == "category": variable["prob_zero"] = calc_prob_eq_zero( train_data[feature] ) else: u, var = calc_mean_var(train_data[feature]) variable["u"] = u variable["var"] = var ll[i] = variable likelihood[feature] = ll prior = [0, 0] prior[IS_NOT_HK] = len(sup_train_set[sup_train_set["is_hk"] == IS_NOT_HK]) / len(sup_train_set) prior[IS_HK] = 1 - prior[0] print("Prior: is_not_hk = %s, is_hk = %s" % (prior[IS_NOT_HK], prior[IS_HK])) def normal_pdf(x, u, var): if var < 1e-12: return np.zeros(len(x)) + 1e-12 return np.exp(-(x - u)**2 / (2 * var)) / np.sqrt(2 * np.pi * var) for feature in list(train_set): if feature == "is_hk": continue if feature in category_features: continue plt.figure(figsize=(5,4)) X_max = train_set[feature].max() if X_max < 10: X_norm = np.arange(0, X_max, 0.01) else: X_norm = np.arange(0, X_max, 1) Y_norm = normal_pdf( X_norm, likelihood[feature][IS_HK]["u"], likelihood[feature][IS_HK]["var"] + 1e-12 ) hist, bin_edge = np.histogram(X, bin_size) plt.plot(X_norm, Y_norm, color="orange") Y_norm = normal_pdf( X_norm, likelihood[feature][IS_NOT_HK]["u"], likelihood[feature][IS_NOT_HK]["var"] + 1e-12 ) plt.plot(X_norm, Y_norm, color="steelblue") plt.legend(['is_hk', 'is_not_hk']) plt.title(feature) plt.show() def prob_category(x, ll, is_hk): if x == 0: return ll[is_hk]["prob_zero"] else: return 1 - ll[is_hk]["prob_zero"] def predict(test_data): L = np.zeros(len(test_data)) for feature in list(test_data): if feature == "is_hk": continue data = test_data[feature] ll = likelihood[feature] if feature in category_features: p_house = np.fromiter((prob_category(x, ll, IS_HK) for x in data), data.dtype) p_not_house = np.fromiter((prob_category(x, ll, IS_NOT_HK) for x in data), data.dtype) L += np.log(p_house) - np.log(p_not_house) else: not_null_idx = data.notnull() L[not_null_idx] += np.log(normal_pdf(data[not_null_idx], ll[IS_HK]["u"], ll[IS_HK]["var"])) L[not_null_idx] -= np.log(normal_pdf(data[not_null_idx], ll[IS_NOT_HK]["u"], ll[IS_NOT_HK]["var"])) L += np.log(prior[IS_HK]) - np.log(prior[IS_NOT_HK]) return L def activate_predict(y, threshold = 0.0): return (y > threshold).astype(int) def accuracy(y_test, y_pred): return np.sum(y_test == y_pred) / len(y_test) def precision(y_test, y_pred): n_y_pred = np.sum(y_pred == 1) return np.sum(np.logical_and(y_test == y_pred, y_pred == 1)) / (np.sum(y_pred == 1) + 1e-12) # true positive rate def recall(y_test, y_pred): return np.sum(np.logical_and(y_test == y_pred, y_test == 1)) / (np.sum(y_test == 1) + 1e-12) def false_positive_rate(y_test, y_pred): return np.sum(np.logical_and(y_test != y_pred, y_test == 0)) / np.sum(y_test == 0) def measure_metrics(y_test, y_pred): print("Accuracy: %f" % accuracy(y_test, y_pred)) pcs = precision(y_test, y_pred) rc = recall(y_test, y_pred) print("Precision: %f" % pcs) print("Recall: %f" % rc) f1 = 2 * pcs * rc / (pcs + rc + 1e-12) print("F1: %f" % f1) y_test = test_set["is_hk"] y_pred = activate_predict(predict(test_set)) measure_metrics(y_test, y_pred)
Accuracy: 0.923077 Precision: 0.647059 Recall: 1.000000 F1: 0.785714
MIT
gene-prediction-gaussian.ipynb
neungkl/MLE-and-naive-bayes-classification
Baseline 1\. Random Choice Baseline
def create_random_pred(): return np.random.random_sample((len(y_test),)) - 0.5 y_pred = activate_predict(create_random_pred()) measure_metrics(y_test, y_pred)
Accuracy: 0.435897 Precision: 0.133333 Recall: 0.545455 F1: 0.214286
MIT
gene-prediction-gaussian.ipynb
neungkl/MLE-and-naive-bayes-classification
2\. Majority
def create_majority_pred(): return np.ones(len(y_test)) * test_set["is_hk"].mode().values.astype(int) y_pred = create_majority_pred() measure_metrics(y_test, y_pred)
Accuracy: 0.858974 Precision: 0.000000 Recall: 0.000000 F1: 0.000000
MIT
gene-prediction-gaussian.ipynb
neungkl/MLE-and-naive-bayes-classification
ROC
t = np.arange(-5,5,0.01) tp = [] tp_random = [] tp_majority = [] fp = [] fp_random = [] fp_majority = [] y_test = test_set["is_hk"] y_pred = predict(test_set) y_random = create_random_pred() y_act_majority = create_majority_pred() for t_i in t: y_act_pred = activate_predict(y_pred, threshold = t_i) y_act_random = activate_predict(y_random, threshold = t_i) tp.append(recall(y_test, y_act_pred)) fp.append(false_positive_rate(y_test, y_act_pred)) tp_random.append(recall(y_test, y_act_random)) fp_random.append(false_positive_rate(y_test, y_act_random)) tp_majority.append(recall(y_test, y_act_majority)) fp_majority.append(false_positive_rate(y_test, y_act_majority)) plt.figure(figsize=(7,5)) plt.plot(fp_random, tp_random) plt.plot(fp_majority, tp_majority) plt.plot(fp, tp) plt.legend(['Random', 'Majority', 'Naive Bayes']) plt.show()
_____no_output_____
MIT
gene-prediction-gaussian.ipynb
neungkl/MLE-and-naive-bayes-classification
Getting started with TensorFlow (Graph Mode)**Learning Objectives** - Understand the difference between Tensorflow's two modes: Eager Execution and Graph Execution - Get used to deferred execution paradigm: first define a graph then run it in a `tf.Session()` - Understand how to parameterize a graph using `tf.placeholder()` and `feed_dict` - Understand the difference between constant Tensors and variable Tensors, and how to define each - Practice using mid-level `tf.train` module for gradient descent Introduction**Eager Execution**Eager mode evaluates operations immediatley and return concrete values immediately. To enable eager mode simply place `tf.enable_eager_execution()` at the top of your code. We recommend using eager execution when prototyping as it is intuitive, easier to debug, and requires less boilerplate code.**Graph Execution**Graph mode is TensorFlow's default execution mode (although it will change to eager in TF 2.0). In graph mode operations only produce a symbolic graph which doesn't get executed until run within the context of a tf.Session(). This style of coding is less inutitive and has more boilerplate, however it can lead to performance optimizations and is particularly suited for distributing training across multiple devices. We recommend using delayed execution for performance sensitive production code.
import tensorflow as tf print(tf.__version__)
_____no_output_____
Apache-2.0
courses/machine_learning/deepdive/02_tensorflow/b_tfstart_graph.ipynb
KayvanShah1/training-data-analyst
Graph Execution Adding Two Tensors Build the GraphUnlike eager mode, no concrete value will be returned yet. Just a name, shape and type are printed. Behind the scenes a directed graph is being created.
a = tf.constant(value = [5, 3, 8], dtype = tf.int32) b = tf.constant(value = [3, -1, 2], dtype = tf.int32) c = tf.add(x = a, y = b) print(c)
_____no_output_____
Apache-2.0
courses/machine_learning/deepdive/02_tensorflow/b_tfstart_graph.ipynb
KayvanShah1/training-data-analyst
Run the GraphA graph can be executed in the context of a `tf.Session()`. Think of a session as the bridge between the front-end Python API and the back-end C++ execution engine. Within a session, passing a tensor operation to `run()` will cause Tensorflow to execute all upstream operations in the graph required to calculate that value.
with tf.Session() as sess: result = sess.run(fetches = c) print(result)
_____no_output_____
Apache-2.0
courses/machine_learning/deepdive/02_tensorflow/b_tfstart_graph.ipynb
KayvanShah1/training-data-analyst
Parameterizing the Grpah What if values of `a` and `b` keep changing? How can you parameterize them so they can be fed in at runtime? *Step 1: Define Placeholders*Define `a` and `b` using `tf.placeholder()`. You'll need to specify the data type of the placeholder, and optionally a tensor shape.*Step 2: Provide feed_dict*Now when invoking `run()` within the `tf.Session()`, in addition to providing a tensor operation to evaluate, you also provide a dictionary whose keys are the names of the placeholders.
a = tf.placeholder(dtype = tf.int32, shape = [None]) b = tf.placeholder(dtype = tf.int32, shape = [None]) c = tf.add(x = a, y = b) with tf.Session() as sess: result = sess.run(fetches = c, feed_dict = { a: [3, 4, 5], b: [-1, 2, 3] }) print(result)
_____no_output_____
Apache-2.0
courses/machine_learning/deepdive/02_tensorflow/b_tfstart_graph.ipynb
KayvanShah1/training-data-analyst
Linear Regression Toy DatasetWe'll model the following:\begin{equation}y= 2x + 10\end{equation}
X = tf.constant(value = [1,2,3,4,5,6,7,8,9,10], dtype = tf.float32) Y = 2 * X + 10 print("X:{}".format(X)) print("Y:{}".format(Y))
_____no_output_____
Apache-2.0
courses/machine_learning/deepdive/02_tensorflow/b_tfstart_graph.ipynb
KayvanShah1/training-data-analyst
2.2 Loss FunctionUsing mean squared error, our loss function is:\begin{equation}MSE = \frac{1}{m}\sum_{i=1}^{m}(\hat{Y}_i-Y_i)^2\end{equation}$\hat{Y}$ represents the vector containing our model's predictions:\begin{equation}\hat{Y} = w_0X + w_1\end{equation}Note below we introduce TF variables for the first time. Unlike constants, variables are mutable. Browse the official TensorFlow [guide on variables](https://www.tensorflow.org/guide/variables) for more information on when/how to use them.
with tf.variable_scope(name_or_scope = "training", reuse = tf.AUTO_REUSE): w0 = tf.get_variable(name = "w0", initializer = tf.constant(value = 0.0, dtype = tf.float32)) w1 = tf.get_variable(name = "w1", initializer = tf.constant(value = 0.0, dtype = tf.float32)) Y_hat = w0 * X + w1 loss_mse = tf.reduce_mean(input_tensor = (Y_hat - Y)**2)
_____no_output_____
Apache-2.0
courses/machine_learning/deepdive/02_tensorflow/b_tfstart_graph.ipynb
KayvanShah1/training-data-analyst
OptimizerAn optimizer in TensorFlow both calculates gradients and updates weights. In addition to basic gradient descent, TF provides implementations of several more advanced optimizers such as ADAM and FTRL. They can all be found in the [tf.train](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/train) module. Note below we're not expclictly telling the optimizer which tensors are our weight tensors. So how does it know what to update? Optimizers will update all variables in the `tf.GraphKeys.TRAINABLE_VARIABLES` [collection](https://www.tensorflow.org/guide/variablesvariable_collections). All variables are added to this collection by default. Since our only variables are `w0` and `w1`, this is the behavior we want. If we had a variable that we *didn't* want to be added to the collection we would set `trainable=false` when creating it.
LEARNING_RATE = tf.placeholder(dtype = tf.float32, shape = None) optimizer = tf.train.GradientDescentOptimizer(learning_rate = LEARNING_RATE).minimize(loss = loss_mse)
_____no_output_____
Apache-2.0
courses/machine_learning/deepdive/02_tensorflow/b_tfstart_graph.ipynb
KayvanShah1/training-data-analyst
Training LoopNote our results are identical to what we found in Eager mode.
STEPS = 1000 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # initialize variables for step in range(STEPS): #1. Calculate gradients and update seights sess.run(fetches = optimizer, feed_dict = {LEARNING_RATE: 0.02}) #2. Periodically print MSE if step % 100 == 0: print("STEP: {} MSE: {}".format(step, sess.run(fetches = loss_mse))) # Print final MSE and weights print("STEP: {} MSE: {}".format(STEPS, sess.run(loss_mse))) print("w0:{}".format(round(float(sess.run(w0)), 4))) print("w1:{}".format(round(float(sess.run(w1)), 4)))
_____no_output_____
Apache-2.0
courses/machine_learning/deepdive/02_tensorflow/b_tfstart_graph.ipynb
KayvanShah1/training-data-analyst
This is only the tested and reported cases John Hopkins CCSE has data for this is by no means a definitive view of the global epidemic. The repo is updated daily around 5:00pm PDT
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt confirmed_url = "https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv" recovered_url = "https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv" deaths_url = "https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv" conf_df = pd.read_csv(confirmed_url) # ,index_col=['Province/State', 'Country/Region', 'Lat', 'Long']) recv_df = pd.read_csv(recovered_url) # ,index_col=['Province/State', 'Country/Region', 'Lat', 'Long']) death_df = pd.read_csv(deaths_url) # ,index_col=['Province/State', 'Country/Region', 'Lat', 'Long']) latest = conf_df.columns[-1] latest # create a differenced series function def difference(dataset, interval=1): return pd.Series([dataset[i] - dataset[i - interval] for i in range(interval, len(dataset))])
_____no_output_____
MIT
Covid19.ipynb
BryanSouza91/COVID-19
Plots total confirmed cases by country Changing the logx=False to True shows the logarithmic scales of x-axis Changing the logy=False to True shows the logarithmic scales of y-axis Changing the loglog=False to True shows the logarithmic scales of both axes
conf_df.loc[:,'1/22/20':].loc[conf_df['Country/Region'] == 'China'].sum().plot(figsize=(25,6),logx=False,logy=False,loglog=True); conf_df.loc[:,'1/22/20':].loc[conf_df['Country/Region'] == 'US'].sum().plot(figsize=(25,6),logx=False,logy=False,loglog=False); conf_df.loc[:,'1/22/20':].loc[conf_df['Country/Region'] == 'Japan'].sum().plot(figsize=(25,6),logx=False,logy=False,loglog=True); conf_df.loc[:,'1/22/20':].loc[conf_df['Country/Region'] == 'Italy'].sum().plot(figsize=(25,6),logx=False,logy=False,loglog=True); conf_df.loc[:,'1/22/20':].loc[conf_df['Country/Region'] == 'Iran'].sum().plot(figsize=(25,6),logx=False,logy=False,loglog=True); conf_df.loc[:,'1/22/20':].loc[conf_df['Country/Region'] == 'Russia'].sum().plot(figsize=(25,6),logx=False,logy=False,loglog=True); conf_df.loc[:,'1/22/20':].loc[conf_df['Country/Region'] == 'Greece'].sum().plot(figsize=(25,6),logx=False,logy=False,loglog=True); conf_df.loc[:,'1/22/20':].loc[conf_df['Country/Region'] == 'India'].sum().plot(figsize=(25,6),logx=False,logy=False,loglog=True); plt.figure(figsize=(26,13)) plt.title("SARS-Cov-2 COVID-19 Confirmed Cases") sns.set_palette('colorblind') sns.scatterplot(x='Long',y='Lat',size=latest,hue='Country/Region',data=conf_df,sizes=(10,10000),legend=False,edgecolor='k'); plt.figure(figsize=(26,13)) plt.title("SARS-Cov-2 COVID-19 Recovered Cases") sns.set_palette('colorblind') sns.scatterplot(x='Long',y='Lat',size=latest,hue='Country/Region',data=recv_df,sizes=(10,10000),legend=False,edgecolor='k'); plt.figure(figsize=(26,13)) plt.title("SARS-Cov-2 COVID-19 Deaths") sns.set_palette('colorblind') sns.scatterplot(x='Long',y='Lat',size=latest,hue='Country/Region',data=death_df,sizes=(10,10000),legend=False,edgecolor='k');
_____no_output_____
MIT
Covid19.ipynb
BryanSouza91/COVID-19
World report
# Create reusable series objects conf_sum = conf_df.loc[:,'1/22/20':].sum() recv_sum = recv_df.loc[:,'1/22/20':].sum() death_sum = death_df.loc[:,'1/22/20':].sum() conf_sum_dif = difference(conf_sum, 1).values recv_sum_dif = difference(recv_sum, 1).values death_sum_dif = difference(death_sum, 1).values # Print world report print("World numbers current as of {}".format(conf_df.columns[-1])) print("New cases: {}".format(conf_sum_dif[-1])) print("Total confirmed cases: {}".format(conf_sum[-1])) print("New case rate: {0:>.3%}".format(conf_sum_dif[-1] / conf_sum[-2])) print("New case 7-day Moving Average: {0:>.0f}".format(difference(conf_sum, 1).rolling(7).mean().values[-1])) print("New case 30-day Moving Average: {0:>.0f}".format(difference(conf_sum, 1).rolling(30).mean().values[-1])) print("New Recovered cases: {}".format(recv_sum_dif[-1])) print("Total recovered cases: {}".format(recv_sum[-1])) print("Recovery rate: {0:>.3%}".format(recv_sum[-1]/conf_sum[-1])) print("New Deaths: {}".format(death_sum_dif[-1])) print("Total deaths: {}".format(death_sum[-1])) print("Death rate: {0:>.3%}".format(death_sum[-1]/conf_sum[-1])) print() print("Growth rate above 1.0 is sign of exponential growth, but also skewed by increased testing.") print("World Growth rate: {0:>.4}".format((conf_sum_dif[-1])/(conf_sum_dif[-2])))
World numbers current as of 4/5/20 New cases: 74710 Total confirmed cases: 1272115 New case rate: 6.239% New case 7-day Moving Average: 78857 New case 30-day Moving Average: 39010 New Recovered cases: 13860 Total recovered cases: 260012 Recovery rate: 20.439% New Deaths: 4768 Total deaths: 69374 Death rate: 5.453% Growth rate above 1.0 is sign of exponential growth, but also skewed by increased testing. World Growth rate: 0.7361
MIT
Covid19.ipynb
BryanSouza91/COVID-19
Report for each country reporting cases
# define report function def report(country): # Create reusable series objects country_conf_sum = conf_df.loc[:,'1/22/20':].loc[conf_df['Country/Region'] == country].sum() country_recv_sum = recv_df.loc[:,'1/22/20':].loc[conf_df['Country/Region'] == country].sum() country_death_sum = death_df.loc[:,'1/22/20':].loc[conf_df['Country/Region'] == country].sum() country_conf_sum_dif = difference(country_conf_sum, 1).values country_recv_sum_dif = difference(country_recv_sum, 1).values country_death_sum_dif = difference(country_death_sum, 1).values print() print('_'*60) print("Numbers for {} current as of {}".format(country, conf_df.columns[-1])) print() print("New cases: {}".format(country_conf_sum_dif[-1])) print("Total confirmed cases: {}".format(country_conf_sum[-1])) print("New case rate: {0:>.3%}".format(country_conf_sum_dif[-1]/country_conf_sum[-1])) print("New case 7-day Moving Average: {0:>.0f}".format(difference(country_conf_sum, 1).rolling(7).mean().values[-1])) print("New case 30-day Moving Average: {0:>.0f}".format(difference(country_conf_sum, 1).rolling(30).mean().values[-1])) print("New Recovered cases: {}".format(country_recv_sum_dif[-1])) print("Total recovered cases: {}".format(country_recv_sum[-1])) print("Recovery rate: {0:>.3%}".format(country_recv_sum_dif[-1]/country_recv_sum[-1])) print("New Deaths: {}".format(country_death_sum_dif[-1])) print("Total deaths: {}".format(country_death_sum[-1])) print("Death rate: {0:>.3%}".format(country_death_sum_dif[-1]/country_conf_sum[-1])) print() print("Growth rate: {0:>.4}".format(country_conf_sum_dif[-1]/country_conf_sum_dif[-2])) print("_"*60) report('US') report('Italy') for each in conf_df['Country/Region'].sort_values().unique(): report(each)
_____no_output_____
MIT
Covid19.ipynb
BryanSouza91/COVID-19
!git clone https://github.com/AadSah/fixmatch.git !scp -r ./fixmatch/* ./ !pip install -r ./requirements.txt !mkdir datasets !pip uninstall tensorflow !pip uninstall tensorflow-gpu !pip install tensorflow-gpu==1.14.0 !pip install libml import os os.environ['ML_DATA'] = './datasets' %set_env PYTHONPATH=$PYTHONPATH:. !CUDA_VISIBLE_DEVICES= ./scripts/create_datasets.py !cp $ML_DATA/svhn-test.tfrecord $ML_DATA/svhn_noextra-test.tfrecord %%shell # Create unlabeled datasets CUDA_VISIBLE_DEVICES= scripts/create_unlabeled.py $ML_DATA/SSL2/svhn $ML_DATA/svhn-train.tfrecord $ML_DATA/svhn-extra.tfrecord & CUDA_VISIBLE_DEVICES= scripts/create_unlabeled.py $ML_DATA/SSL2/svhn_noextra $ML_DATA/svhn-train.tfrecord & CUDA_VISIBLE_DEVICES= scripts/create_unlabeled.py $ML_DATA/SSL2/cifar10 $ML_DATA/cifar10-train.tfrecord & CUDA_VISIBLE_DEVICES= scripts/create_unlabeled.py $ML_DATA/SSL2/cifar100 $ML_DATA/cifar100-train.tfrecord & CUDA_VISIBLE_DEVICES= scripts/create_unlabeled.py $ML_DATA/SSL2/stl10 $ML_DATA/stl10-train.tfrecord $ML_DATA/stl10-unlabeled.tfrecord & wait %%shell # Create semi-supervised subsets for seed in 0 1 2 3 4 5; do for size in 10 20 30 40 100 250 1000 4000; do CUDA_VISIBLE_DEVICES= scripts/create_split.py --seed=$seed --size=$size $ML_DATA/SSL2/svhn $ML_DATA/svhn-train.tfrecord $ML_DATA/svhn-extra.tfrecord & CUDA_VISIBLE_DEVICES= scripts/create_split.py --seed=$seed --size=$size $ML_DATA/SSL2/svhn_noextra $ML_DATA/svhn-train.tfrecord & CUDA_VISIBLE_DEVICES= scripts/create_split.py --seed=$seed --size=$size $ML_DATA/SSL2/cifar10 $ML_DATA/cifar10-train.tfrecord & done for size in 400 1000 2500 10000; do CUDA_VISIBLE_DEVICES= scripts/create_split.py --seed=$seed --size=$size $ML_DATA/SSL2/cifar100 $ML_DATA/cifar100-train.tfrecord & done CUDA_VISIBLE_DEVICES= scripts/create_split.py --seed=$seed --size=1000 $ML_DATA/SSL2/stl10 $ML_DATA/stl10-train.tfrecord $ML_DATA/stl10-unlabeled.tfrecord & wait done !CUDA_VISIBLE_DEVICES= scripts/create_split.py --seed=1 --size=5000 $ML_DATA/SSL2/stl10 $ML_DATA/stl10-train.tfrecord $ML_DATA/stl10-unlabeled.tfrecord #Run this for CIFAR-10 seed=3 and size=40 only !CUDA_VISIBLE_DEVICES= scripts/create_split.py --seed=3 --size=40 $ML_DATA/SSL2/cifar10 $ML_DATA/cifar10-train.tfrecord & !CUDA_VISIBLE_DEVICES=0 python fixmatch.py --filters=32 --dataset=cifar10.3@40-1 --train_dir ./experiments/fixmatch
_____no_output_____
Apache-2.0
covid_fixmatch_xray.ipynb
AadSah/fixmatch
Get NFL Player Data
import requests from pandas.io.json import json_normalize import pandas as pd import requests # https://sportsdata.io/developers/api-documentation/nfl # Player overall information #url = "https://api.sportsdata.io/v3/nfl/scores/json/Players?key=d072122708d34423857116889b72f55b" # Player Season stats for 2020 url = "https://api.sportsdata.io/v3/nfl/stats/json/PlayerSeasonStats/2020?key=d072122708d34423857116889b72f55b" # create a dataframe from data df = pd.read_json(url) url2019 = "https://api.sportsdata.io/v3/nfl/stats/json/PlayerSeasonStats/2019?key=d072122708d34423857116889b72f55b" df2019 = pd.read_json(url) df.append(df2019, ignore_index = True) url2018 = "https://api.sportsdata.io/v3/nfl/stats/json/PlayerSeasonStats/2018?key=d072122708d34423857116889b72f55b" df2018 = pd.read_json(url) df.append(df2018, ignore_index = True) df.shape[0] # number of players that played in 2018,2019, 2020
_____no_output_____
Apache-2.0
NFL (1).ipynb
humberhutch/NFLAnalysis
Show the first few rows of data returned - All players
df.head()
_____no_output_____
Apache-2.0
NFL (1).ipynb
humberhutch/NFLAnalysis
Focus on Wide Receivers
wr = df[ df['Position'] =='WR' ] print (wr.shape) # Number of players (rows) and attributes (columns) # remove players with few games played or less than 10 Receiving Yards wr = wr[ wr['Played'] >10] wr = wr[ wr['ReceivingYards'] >10] wr.describe() yardsPerGame = wr['ReceivingYards']/wr['Played'] wr['yardsPerGame'] = yardsPerGame # sample 2 rows from the dataframe wr.sample(2)
_____no_output_____
Apache-2.0
NFL (1).ipynb
humberhutch/NFLAnalysis
Create a histogram of the Yards Per Game
wr['yardsPerGame'].hist()
_____no_output_____
Apache-2.0
NFL (1).ipynb
humberhutch/NFLAnalysis
Boxplot to show distribution
wr['yardsPerGame'].plot.box();
_____no_output_____
Apache-2.0
NFL (1).ipynb
humberhutch/NFLAnalysis
Keep the main columns for analysis
colsKeep = ['PlayerID', 'Season','Team', 'Activated','Played','Started','ReceivingTargets', 'Receptions', 'ReceivingYards', 'ReceivingYardsPerReception','ReceivingTouchdowns','ReceivingLong','yardsPerGame'] new_wr = wr[colsKeep] new_wr.head()
_____no_output_____
Apache-2.0
NFL (1).ipynb
humberhutch/NFLAnalysis
Retrieve data for all players for past 3 years and add salary for analysis
new_wr.groupby(['Team']).mean()['yardsPerGame']
_____no_output_____
Apache-2.0
NFL (1).ipynb
humberhutch/NFLAnalysis
Dummy Variables ExerciseIn this exercise, you'll create dummy variables from the projects data set. The idea is to transform categorical data like this:| Project ID | Project Category ||------------|------------------|| 0 | Energy || 1 | Transportation || 2 | Health || 3 | Employment |into new features that look like this:| Project ID | Energy | Transportation | Health | Employment ||------------|--------|----------------|--------|------------|| 0 | 1 | 0 | 0 | 0 || 1 | 0 | 1 | 0 | 0 || 2 | 0 | 0 | 1 | 0 || 3 | 0 | 0 | 0 | 1 |(Note if you were going to use this data with a model influenced by multicollinearity, you would want to eliminate one of the columns to avoid redundant information.) The reasoning behind these transformations is that machine learning algorithms read in numbers not text. Text needs to be converted into numbers. You could assign a number to each category like 1, 2, 3, and 4. But a categorical variable has no inherent order.Pandas makes it very easy to create dummy variables with the [get_dummies](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html) method. In this exercise, you'll create dummy variables from the World Bank projects data; however, there's a caveat. The World Bank data is not particularly clean, so you'll need to explore and wrangle the data first.You'll focus on the text values in the sector variables.Run the code cells below to read in the World Bank projects data set and then to filter out the data for text variables.
import pandas as pd import numpy as np # read in the projects data set and do basic wrangling projects = pd.read_csv('../data/projects_data.csv', dtype=str) projects.drop('Unnamed: 56', axis=1, inplace=True) projects['totalamt'] = pd.to_numeric(projects['totalamt'].str.replace(',', '')) projects['countryname'] = projects['countryname'].str.split(';', expand=True)[0] projects['boardapprovaldate'] = pd.to_datetime(projects['boardapprovaldate']) # keep the project name, lending, sector and theme data sector = projects.copy() sector = sector[['project_name', 'lendinginstr', 'sector1', 'sector2', 'sector3', 'sector4', 'sector5', 'sector', 'mjsector1', 'mjsector2', 'mjsector3', 'mjsector4', 'mjsector5', 'mjsector', 'theme1', 'theme2', 'theme3', 'theme4', 'theme5', 'theme ', 'goal', 'financier', 'mjtheme1name', 'mjtheme2name', 'mjtheme3name', 'mjtheme4name', 'mjtheme5name']]
_____no_output_____
MIT
lessons/ETLPipelines/12_dummyvariables_exercise/12_dummyvariables_exercise.ipynb
rabadzhiyski/Data_Science_Udacity
Run the code cell below. This cell shows the percentage of each variable that is null. Notice the mjsector1 through mjsector5 variables are all null. The mjtheme1name through mjtheme5name are also all null as well as the theme variable. Because these variables contain so many null values, they're probably not very useful.
# output percentage of values that are missing 100 * sector.isnull().sum() / sector.shape[0]
_____no_output_____
MIT
lessons/ETLPipelines/12_dummyvariables_exercise/12_dummyvariables_exercise.ipynb
rabadzhiyski/Data_Science_Udacity
The sector1 variable looks promising; it doesn't contain any null values at all. In the next cell, store the unique sector1 values in a list and output the results. Use the sort_values() and unique() methods.
# TODO: Create a list of the unique values in sector1. Use the sort_values() and unique() pandas methods. # And then convert those results into a Python list uniquesectors1 = list(sector['sector1'].sort_values().unique()) uniquesectors1 # run this code cell to see the number of unique values print('Number of unique values in sector1:', len(uniquesectors1))
Number of unique values in sector1: 3060
MIT
lessons/ETLPipelines/12_dummyvariables_exercise/12_dummyvariables_exercise.ipynb
rabadzhiyski/Data_Science_Udacity
3060 different categories is quite a lot! Remember that with dummy variables, if you have n categorical values, you need n - 1 new variables! That means 3059 extra columns! Exercise 2There are a few issues with this 'sector1' variable. First, there are values labeled '!$!0'. These should be substituted with NaN.Furthermore, each sector1 value ends with a ten or eleven character string like '!$!49!$!EP'. Some sectors show up twice in the list like: 'Other Industry; Trade and Services!$!70!$!YZ', 'Other Industry; Trade and Services!$!63!$!YZ',But it seems like those are actually the same sector. You'll need to remove everything past the exclamation point. Many values in the sector1 variable start with the term '(Historic)'. Try removing that phrase as well. replace() methodWith pandas, you can use the replace() method to search for text and replace parts of a string with another string. If you know the exact string you're looking for, the replace() method is straight forward. For example, say you wanted to remove the string '(Trial)' from this data:| data ||--------------------------|| '(Trial) Banking' || 'Banking' || 'Farming' || '(Trial) Transportation' |You could use `df['data'].replace('(Trial'), '')` to replace (Trial) with an empty string.What about this data?| data ||------------------------------------------------|| 'Other Industry; Trade and Services?$ab' || 'Other Industry; Trade and Services?ceg' |This type of data is trickier. In this case, there's a pattern where you want to remove a string that starts with an exclamation point and then has an unknown number of characters after it. When you need to match patterns of character, you can use [regular expressions](https://en.wikipedia.org/wiki/Regular_expression).The replace method can take a regular expression. Sodf['data'].replace('?.+', regex=True) where '?.+' means find a set of characters that starts with a question mark is then followed by one or more characters. You can see a [regular expression cheat sheet](https://medium.com/factory-mind/regex-tutorial-a-simple-cheatsheet-by-examples-649dc1c3f285) here.Fix these issues in the code cell below.
# TODO: In the sector1 variable, replace the string '!$10' with nan # HINT: you can use the pandas replace() method and numpy.nan sector['sector1'] = sector['sector1'].replace('!$!0', np.nan) # TODO: In the sector1 variable, remove the last 10 or 11 characters from the sector1 variable. # HINT: There is more than one way to do this including the replace method # HINT: You can use a regex expression '!.+' # That regex expression looks for a string with an exclamation # point followed by one or more characters sector['sector1'] = sector['sector1'].replace('!.+', '', regex=True) # TODO: Remove the string '(Historic)' from the sector1 variable # HINT: You can use the replace method sector['sector1'] = sector['sector1'].replace('^(\(Historic\))', '', regex=True) print('Number of unique sectors after cleaning:', len(list(sector['sector1'].unique()))) print('Percentage of null values after cleaning:', 100 * sector['sector1'].isnull().sum() / sector['sector1'].shape[0])
Number of unique sectors after cleaning: 156 Percentage of null values after cleaning: 3.4962735642262164
MIT
lessons/ETLPipelines/12_dummyvariables_exercise/12_dummyvariables_exercise.ipynb
rabadzhiyski/Data_Science_Udacity
Now there are 156 unique categorical values. That's better than 3060. If you were going to use this data with a supervised learning machine model, you could try converting these 156 values to dummy variables. You'd still have to train and test a model to see if those are good features.But can you do anything else with the sector1 variable?The percentage of null values for 'sector1' is now 3.49%. That turns out to be the same number as the null values for the 'sector' column. You can see this if you scroll back up to where the code calculated the percentage of null values for each variable. Perhaps the 'sector1' and 'sector' variable have the same information. If you look at the 'sector' variable, however, it also needs cleaning. The values look like this:'Urban Transport;Urban Transport;Public Administration - Transportation'It turns out the 'sector' variable combines information from the 'sector1' through 'sector5' variables and the 'mjsector' variable. Run the code cell below to look at the sector variable.
sector['sector']
_____no_output_____
MIT
lessons/ETLPipelines/12_dummyvariables_exercise/12_dummyvariables_exercise.ipynb
rabadzhiyski/Data_Science_Udacity
What else can you do? If you look at all of the diferent sector1 categories, it might be useful to combine a few of them together. For example, there are various categories with the term "Energy" in them. And then there are other categories that seem related to energy but don't have the word energy in them like "Thermal" and "Hydro". Some categories have the term "Renewable Energy", so perhaps you could make a separate "Renewable Energy" category.Similarly, there are categories with the term "Transportation" in them, and then there are related categories like "Highways".In the next cell, find all sector1 values with the term 'Energy' in them. For each of these rows, put the string 'energy' in a new column called 'sector1_aggregates'. Do the same for "Transportation".
import re # Create the sector1_aggregates variable sector.loc[:,'sector1_aggregates'] = sector['sector1'] # TODO: The code above created a new variable called sector1_aggregates. # Currently, sector1_aggregates has all of the same values as sector1 # For this task, find all the rows in sector1_aggregates with the term 'Energy' in them, # For all of these rows, replace whatever is the value is with the term 'Energy'. # The idea is to simplify the category names by combining various categories together. # Then, do the same for the term 'Transportation # HINT: You can use the contains() methods. See the documentation for how to ignore case using the re library # HINT: You might get an error saying "cannot index with vector containing NA / NaN values." # Try converting NaN values to something else like False or a string sector.loc[sector['sector1_aggregates'].str.contains('Energy', re.IGNORECASE).replace(np.nan, False),'sector1_aggregates'] = 'Energy' sector.loc[sector['sector1_aggregates'].str.contains('Transportation', re.IGNORECASE).replace(np.nan, False),'sector1_aggregates'] = 'Transportation' print('Number of unique sectors after cleaning:', len(list(sector['sector1_aggregates'].unique())))
Number of unique sectors after cleaning: 145
MIT
lessons/ETLPipelines/12_dummyvariables_exercise/12_dummyvariables_exercise.ipynb
rabadzhiyski/Data_Science_Udacity
The number of unique sectors continues to go down. Keep in mind that how much to consolidate will depend on your machine learning model performance and your hardware's ability to handle the extra features in memory. If your hardware's memory can handle 3060 new features and your machine learning algorithm performs better, then go for it!There are still 638 entries with NaN values. How could you fill these in? You might try to determine an appropriate category from the 'project_name' or 'lendinginstr' variables. If you make dummy variables including NaN values, then you could consider a feature with all zeros to represent NaN. Or you could delete these records from the data set. Pandas will ignore NaN values by default. That means, for a given row, all dummy variables will have a value of 0 if the sector1 value was NaN.Don't forget about the bigger context! This data is being prepared for a machine learning algorithm. Whatever techniques you use to engineer new features, you'll need to use those when running your model on new data. So if your new data does not contain a sector1 value, you'll have to run whatever feature engineering processes you did on your training set.In this final set, use the pandas pd.get_dummies() method to create dummy variables. Then use the concat() method to concatenate the dummy variables to a dataframe that contains the project totalamt variable and the project year from the boardapprovaldate.
# TODO: Create dummy variables from the sector1_aggregates data. Put the results into a dataframe called dummies # Hint: Use the get_dummies method dummies = pd.DataFrame(pd.get_dummies(sector['sector1_aggregates'])) # TODO: Filter the projects data for the totalamt, the year from boardapprovaldate, and the dummy variables projects['year'] = projects['boardapprovaldate'].dt.year df = projects[['totalamt','year']] df_final = pd.concat([df, dummies], axis=1) df_final.head()
_____no_output_____
MIT
lessons/ETLPipelines/12_dummyvariables_exercise/12_dummyvariables_exercise.ipynb
rabadzhiyski/Data_Science_Udacity
load data
DATASET_ID = 'BIRD_DB_Vireo_cassinii' df_loc = DATA_DIR / 'syllable_dfs' / DATASET_ID / 'cassins.pickle' syllable_df = pd.read_pickle(df_loc) del syllable_df['audio'] syllable_df[:3] np.shape(syllable_df.spectrogram.values[0])
_____no_output_____
MIT
notebooks/02.5-make-projection-dfs/higher-spread/.ipynb_checkpoints/cassins-umap-checkpoint.ipynb
xingjeffrey/avgn_paper
project
specs = list(syllable_df.spectrogram.values) specs = [i/np.max(i) for i in tqdm(specs)] specs_flattened = flatten_spectrograms(specs) np.shape(specs_flattened) cuml_umap = cumlUMAP(min_dist = 0.5) embedding = cuml_umap.fit_transform(specs_flattened) fig, ax = plt.subplots() ax.scatter(embedding[:,0], embedding[:,1], s=1, color='k', alpha = 0.005) ax.set_xlim([-8,8]) ax.set_ylim([-8,8]) syllable_df['umap'] = list(embedding)
_____no_output_____
MIT
notebooks/02.5-make-projection-dfs/higher-spread/.ipynb_checkpoints/cassins-umap-checkpoint.ipynb
xingjeffrey/avgn_paper
Save
ensure_dir(DATA_DIR / 'embeddings' / DATASET_ID / 'full') syllable_df.to_pickle(DATA_DIR / 'embeddings' / DATASET_ID / (str(min_dist) + '_full.pickle'))
_____no_output_____
MIT
notebooks/02.5-make-projection-dfs/higher-spread/.ipynb_checkpoints/cassins-umap-checkpoint.ipynb
xingjeffrey/avgn_paper
Qualitatively replicate: [1] Elman, J. L. (1990). Finding structure in time. Cognitive Science, 14(2), 179–211. https://doi.org/10.1016/0364-0213(90)90002-E[2] Saffran, J. R., Aslin, R. N., & Newport, E. L. (1996). Statistical learning by 8-month-old infants. Science, 274(5294), 1926–1928. https://doi.org/10.1126/science.274.5294.1926
import os import time import warnings import itertools import numpy as np from sklearn.preprocessing import OneHotEncoder import torch import torch.nn as nn import matplotlib.pyplot as plt import seaborn as sns warnings.filterwarnings("ignore") sns.set(style='white', context='poster', font_scale=.8, rc={"lines.linewidth": 2}) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(f'device = {device}') seed_val = 0 torch.manual_seed(seed_val) np.random.seed(seed_val) %matplotlib inline %autosave 5 import string all_letters = string.ascii_lowercase # define all vocabs chunk_size = 4 all_vocabs = [ all_letters[i:i + chunk_size] for i in range(0, len(all_letters), chunk_size) ] print(f'All vocabs:\n{all_vocabs}') # gen seqs, given some vocabs def gen_story(all_vocabs, seq_len): n_vocabs = len(all_vocabs) seq_ids = np.random.randint(n_vocabs, size=seq_len) seq = [all_vocabs[i] for i in seq_ids] # integer representation seq_int = [ [all_letters.index(letter) for letter in vocab] for vocab in seq ] return seq, seq_int seq_len = 12 seq, seq_int = gen_story(all_vocabs, seq_len) print(f'Here\'s a "story":\n{seq}') print(f'The corresponding int representation:\n{seq_int}') # vectorize the input def onehot_transform(seq_int_): # get the unit of representation n_letters = len(all_letters) all_letters_ohe_template = np.reshape(np.arange(n_letters),newshape=(-1,1)) # init one hot encoder ohe = OneHotEncoder(sparse=False) ohe.fit(all_letters_ohe_template) # reformat the sequence seq_int_ = [np.reshape(vocab, newshape=(-1,1)) for vocab in seq_int_] # transform to one hot seq_ohe = [ohe.transform(vocab) for vocab in seq_int_] return seq_ohe seq_ohe = onehot_transform(seq_int) f, ax = plt.subplots(1,1, figsize=(9,5)) vocab_id = 0 ax.imshow(seq_ohe[vocab_id], cmap='bone') ax.set_xlabel('Feature dim') ax.set_yticks([]) ax.set_title(f'The one hot representation of "{seq[vocab_id]}"') # generate training data def gen_data(seq_len): seq, seq_int = gen_story(all_vocabs, seq_len) seq_ohe = onehot_transform(seq_int) # to sequence to pytorch format seq_ohe_merged = list(itertools.chain(*seq_ohe)) # X = np.expand_dims(seq_ohe_merged, axis=-1) X = np.array(seq_ohe_merged) X = torch.from_numpy(X).type(torch.FloatTensor) return X, seq # how to use `gen_data` seq_len = 25 X, seq = gen_data(seq_len) n_time_steps = X.size()[0] print(X.size()) # model params dim_output = dim_input = X.size()[1] dim_hidden = 32 # training params seq_len = 25 learning_rate = 3e-3 n_epochs = 10 # init model model = nn.RNN(dim_input, dim_hidden) readout = nn.Linear(dim_hidden, dim_output) # init optimizer criterion = nn.MSELoss() optimizer = torch.optim.Adam( list(model.parameters())+list(readout.parameters()), lr=learning_rate ) # loop over epoch losses_torch = np.zeros(n_epochs,) for i in range(n_epochs): # gen data X, _ = gen_data(seq_len) n_time_steps = X.size()[0] time_start = time.time() # feed seq out, hidden_T = model(X.unsqueeze(0)) xhat = readout(out) # compute loss out_sqed = torch.squeeze(xhat, dim=0) loss = criterion(out_sqed, X) losses_torch[i] += loss.item() # update weights optimizer.zero_grad() loss.backward() optimizer.step() # print out some stuff time_end = time.time() print(f'Epoch {i} : \t loss = {losses_torch[i]}, \t time = {time_end - time_start}') # gen some new data seq_len_test = 25 X_test, seq = gen_data(seq_len_test) n_time_steps = X_test.size()[0] loss_test = np.zeros(n_time_steps,) h_0 = torch.randn(1, 1, dim_hidden) c_0 = torch.randn(1, 1, dim_hidden) # loop over time, for one training example for t, x_t in enumerate(X_test): if t == 0: h_t = h_0 # recurrent computation at time t out, (h_t) = model(x_t.view(1, 1, -1), h_t) xhat = readout(out) # compute loss out_sqed = torch.squeeze(xhat, dim=0) loss = criterion(out_sqed, x_t) loss_test[t] = loss.item() """ in general, the error function over time peaks right after event(word) boundaries. """ word_boundaries = np.cumsum([len(vocab) for vocab in seq])-1 seq_letters = list(itertools.chain(*seq)) seq_len_test = len(seq_letters) f,ax = plt.subplots(1,1, figsize=(16, 5)) ax.plot(np.arange(0,seq_len_test,1), loss_test) ax.set_title('Instantaneous prediction error') ax.set_xlabel('Time') ax.set_ylabel('Error') for i, letter in enumerate(seq_letters): ax.annotate(letter, (i, loss_test[i]), fontsize=14) for wb in word_boundaries: ax.axvline(wb, color='grey', linestyle='--') sns.despine() f.tight_layout()
_____no_output_____
MIT
elman_pytorch.ipynb
qihongl/demo-elman-1990
Pandas II More indexing tricks We'll start out with some data from Beer Advocate (see [Tom Augspurger](https://github.com/TomAugspurger/pydata-chi-h2t/blob/master/3-Indexing.ipynb) for some cool details on how he extracted this data)
import numpy as np import pandas as pd pd.options.display.max_rows = 10 df = pd.read_csv('data/beer_subset.csv.gz', parse_dates=['time'], compression='gzip')
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
Boolean indexingLike a where clause in SQL. The indexer (or boolean mask) should be 1-dimensional and the same length as the thing being indexed.
df.loc[((df['abv'] < 5) & (df['time'] > pd.Timestamp('2009-06'))) | (df['review_overall'] >= 4.5)].head()
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
Be careful with the order of operations... Safest to use parentheses... Select just the rows where the `beer_style` contains `'IPA'`: Find the rows where the beer style is either `'American IPA'` or `'Pilsner'`:
(df['beer_style'] == 'American IPA')
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
Or more succinctly:
df[df['beer_style'].isin(['American IPA', 'Pilsner'])].head()
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
Mini Exercise- Select the rows where the scores of the 5 review_cols ('review_appearance', 'review_aroma', 'review_overall', 'review_palate', 'review_taste') are all at least 4.0.- _Hint_: Like NumPy arrays, DataFrames have an any and all methods that check whether it contains any or all True values. These methods also take an axis argument for the dimension to remove. - 0 or 'index' removes (or aggregates over) the vertical dimension - 1 or 'columns' removes (aggregates over) the horizontal dimension. Or the short way: Now select rows where the _average_ of the 5 `review_cols` is at least 4. Hierarchical Indexing - One of the most powerful and most complicated features of pandas- Let's you represent high-dimensional datasets in a table
reviews = df.set_index(['profile_name', 'beer_id', 'time'])
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
Top ReviewersLet's select all the reviews by the top reviewers, by label. The syntax is a bit trickier when you want to specify a row Indexer *and* a column Indexer:
reviews.loc[(top_reviewers, 99, :), ['beer_name', 'brewer_name']] reviews.loc[pd.IndexSlice[top_reviewers, 99, :], ['beer_name', 'brewer_id']]
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
Use `.loc` to select the `beer_name` and `beer_style` for the 10 most popular beers, as measured by number of reviews: Beware "chained indexing"You can sometimes get away with using `[...][...]`, but try to avoid it!
df.loc[df['beer_style'].str.contains('IPA')]['beer_name'] df.loc[df['beer_style'].str.contains('IPA')]['beer_name'] = 'yummy' df.loc[df['beer_style'].str.contains('IPA')]['beer_name']
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
Dates and Times - Date and time data are inherently problematic - An unequal number of days in every month - An unequal number of days in a year (due to leap years) - Time zones that vary over space - etc - The datetime built-in library handles temporal information down to the nanosecond Having a custom data type for dates and times is convenient because we can perform operations on them easily. For example, we may want to calculate the difference between two times: See [the docs](http://pandas.pydata.org/pandas-docs/stable/timeseries.html) for more information on Pandas' complex time and date functionalities... ExampleIn this section, we will manipulate data collected from ocean-going vessels on the eastern seaboard. Vessel operations are monitored using the Automatic Identification System (AIS), a safety at sea navigation technology which vessels are required to maintain and that uses transponders to transmit very high frequency (VHF) radio signals containing static information including ship name, call sign, and country of origin, as well as dynamic information unique to a particular voyage such as vessel location, heading, and speed. The International Maritime Organization’s (IMO) International Convention for the Safety of Life at Sea requires functioning AIS capabilities on all vessels 300 gross tons or greater and the US Coast Guard requires AIS on nearly all vessels sailing in U.S. waters. The Coast Guard has established a national network of AIS receivers that provides coverage of nearly all U.S. waters. AIS signals are transmitted several times each minute and the network is capable of handling thousands of reports per minute and updates as often as every two seconds. Therefore, a typical voyage in our study might include the transmission of hundreds or thousands of AIS encoded signals. This provides a rich source of spatial data that includes both spatial and temporal information.For our purposes, we will use summarized data that describes the transit of a given vessel through a particular administrative area. The data includes the start and end time of the transit segment, as well as information about the speed of the vessel, how far it travelled, etc.
segments = pd.read_csv('data/AIS/transit_segments.csv')
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
For example, we might be interested in the distribution of transit lengths, so we can plot them as a histogram: Though most of the transits appear to be short, there are a few longer distances that make the plot difficult to read. This is where a transformation is useful: We can see that although there are date/time fields in the dataset, they are not in any specialized format, such as `datetime`. Our first order of business will be to convert these data to `datetime`. The `strptime` method parses a string representation of a date and/or time field, according to the expected format of this information.
datetime.strptime(segments['st_time'].ix[0], '%m/%d/%y %H:%M')
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
As a convenience, Pandas has a `to_datetime` method that will parse and convert an entire Series of formatted strings into `datetime` objects. Pandas also has a custom NA value for missing datetime objects, `NaT`.
pd.to_datetime([None])
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
Finally, if `to_datetime()` has problems parsing any particular date/time format, you can pass the spec in using the `format=` argument. Merging and joining `DataFrame`s In Pandas, we can combine tables according to the value of one or more *keys* that are used to identify rows, much like an index.
df1 = pd.DataFrame({'id': range(4), 'age': np.random.randint(18, 31, size=4)}) df2 = pd.DataFrame({'id': list(range(3))*2, 'score': np.random.random(size=6)})
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
Notice that without any information about which column to use as a key, Pandas did the right thing and used the `id` column in both tables. Unless specified otherwise, `merge` will used any common column names as keys for merging the tables. Notice also that `id=3` from `df1` was omitted from the merged table. This is because, by default, `merge` performs an **inner join** on the tables, meaning that the merged table represents an intersection of the two tables. The **outer join** above yields the union of the two tables, so all rows are represented, with missing values inserted as appropriate. One can also perform **right** and **left** joins to include all rows of the right or left table (*i.e.* first or second argument to `merge`), but not necessarily the other. Back to the exampleNow that we have the vessel transit information as we need it, we may want a little more information regarding the vessels themselves. In the `data/AIS` folder there is a second table that contains information about each of the ships that traveled the segments in the `segments` table.
vessels = pd.read_csv('data/AIS/vessel_information.csv', index_col='mmsi')
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
We see that there is a `mmsi` value (a vessel identifier) in each table, but it is used as an index for the `vessels` table. In this case, we have to specify to join on the index for this table, and on the `mmsi` column for the other. Notice that `mmsi` field that was an index on the `vessels` table is no longer an index on the merged table. Each `DataFrame` also has a `.merge()` method that could have been used: Occasionally, there will be fields with the same in both tables that we do not wish to use to join the tables; they may contain different information, despite having the same name. In this case, Pandas will by default append suffixes `_x` and `_y` to the columns to uniquely identify them. This behavior can be overridden by specifying a `suffixes` argument, containing a list of the suffixes to be used for the columns of the left and right columns, respectively. Reshaping `DataFrame`s This dataset in from Table 6.9 of [Statistical Methods for the Analysis of Repeated Measurements](http://www.amazon.com/Statistical-Methods-Analysis-Repeated-Measurements/dp/0387953701) by Charles S. Davis, pp. 161-163 (Springer, 2002). These data are from a multicenter, randomized controlled trial of botulinum toxin type B (BotB) in patients with cervical dystonia from nine U.S. sites.* Randomized to placebo (N=36), 5000 units of BotB (N=36), 10,000 units of BotB (N=37)* Response variable: total score on Toronto Western Spasmodic Torticollis Rating Scale (TWSTRS), measuring severity, pain, and disability of cervical dystonia (high scores mean more impairment)* TWSTRS measured at baseline (week 0) and weeks 2, 4, 8, 12, 16 after treatment began
cdystonia = pd.read_csv('data/cdystonia.csv', index_col=None)
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
This dataset includes repeated measurements of the same individuals (longitudinal data). Its possible to present such information in (at least) two ways: showing each repeated measurement in their own row, or in multiple columns representing multiple measurements. `.stack()` rotates the data frame so that columns are represented in rows: And there's a corresponding `.unstack()` which pivots back into columns: For this dataset, it makes sense to create a hierarchical index based on the patient and observation:
cdystonia2 = cdystonia.set_index(['patient','obs'])
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
If we want to transform this data so that repeated measurements are in columns, we can `unstack` the `twstrs` measurements according to `obs`: And if we want to keep the other variables:
cdystonia_wide = (cdystonia[['patient','site','id','treat','age','sex']] .drop_duplicates() .merge(twstrs_wide, right_index=True, left_on='patient', how='inner') .head())
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
Or to simplify things, we can set the patient-level information as an index before unstacking:
(cdystonia.set_index(['patient','site','id','treat','age','sex','week'])['twstrs'] .unstack('week').head())
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
[`.melt()`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.melt.html)- To convert our "wide" format back to long, we can use the `melt` function. - This function is useful for `DataFrame`s where one or more columns are identifier variables (`id_vars`), with the remaining columns being measured variables (`value_vars`). - The measured variables are "unpivoted" to the row axis, leaving just two non-identifier columns, a *variable* and its corresponding *value*, which can both be renamed using optional arguments.
pd.melt(cdystonia_wide, id_vars=['patient','site','id','treat','age','sex'], var_name='obs', value_name='twsters').head()
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
Pivoting The `pivot` method allows a DataFrame to be transformed easily between long and wide formats in the same way as a pivot table is created in a spreadsheet. It takes three arguments: `index`, `columns` and `values`, corresponding to the DataFrame index (the row headers), columns and cell values, respectively. For example, we may want the `twstrs` variable (the response variable) in wide format according to patient, as we saw with the unstacking method above:
cdystonia.pivot(index='patient', columns='obs', values='twstrs').head()
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
If we omit the `values` argument, we get a `DataFrame` with hierarchical columns, just as when we applied `unstack` to the hierarchically-indexed table: A related method, `pivot_table`, creates a spreadsheet-like table with a hierarchical index, and allows the values of the table to be populated using an arbitrary aggregation function.
cdystonia.head() cdystonia.pivot_table(index=['site', 'treat'], columns='week', values='twstrs', aggfunc=max).head(20)
_____no_output_____
CC-BY-3.0
Lecture 4/Lecture 4 - Pandas II (Template).ipynb
iEvidently/ihme-python-course
PrefacedescriptionAbout notebookLoad librariesLoad DatasetColumn DescriptionCleaning DatasetEDA- Null values- Via which channel did user visited - Mobile users- Browser based - Device Category- Operating system- Continent Based- Metro Based- Network Domain- Region- Country Based- Sub Continent Based- Page view V/S bounces- new cusotmer or old customer- Minmum & maximum revenu on daily basis- Revenue based on month- Revenue based on day- Revenue based on weekday- Most Ad Content- Keywords used by users- Source from where users came Description In this competition, you’re challenged to analyze a Google Merchandise Store (also known as GStore, where Google swag is sold) customer dataset to predict revenue per customer. About notebook In this notebook we will look into the Dataset provided in the competetion and we will analyze the users of GStore. Load libraries
import numpy as np import pandas as pd import plotly.graph_objs as go import plotly.offline as py from plotly.offline import init_notebook_mode, iplot, download_plotlyjs import plotly.graph_objs as go from plotly import tools import matplotlib.pyplot as plt init_notebook_mode(connected=True) from plotly.tools import FigureFactory as ff import random from collections import Counter import warnings import json import os import datetime from pandas.io.json import json_normalize warnings.filterwarnings('ignore') pd.set_option('display.max_columns', 500) import pycountry from wordcloud import WordCloud import matplotlib.pyplot as plt import seaborn as sns
_____no_output_____
MIT
9 google customer revenue prediction/exploratory-google-store-analysis.ipynb
MLVPRASAD/KaggleProjects
Load Dataset
train = pd.read_csv("../input/train.csv") test = pd.read_csv("../input/test.csv") # train_df = pd.read_csv('flatten_train.csv') # test_df = pd.read_csv('flatten_test.csv') # helper functions def constant_cols(df): cols = [] columns = df.columns.values for col in columns: if df[col].nunique(dropna = False) == 1: cols.append(col) return cols def diff_cols(df1,df2): columns1 = df1.columns.values columns2 = df2.columns.values print(list(set(columns1) - set(columns2))) def count_mean(col,color1,color2): col_count = train_df[col].value_counts() col_count_chart = go.Bar(x = col_count.head(10).index, y = col_count.head(10).values, name="Count",marker = dict(color=color1)) col_mean_count = train_df[[col,'totals.transactionRevenue']][(train_df['totals.transactionRevenue'] >1)] col_mean_count = col_mean_count.groupby(col)['totals.transactionRevenue'].mean().sort_values(ascending=False) col_mean_count_chart = go.Bar(x = col_mean_count.head(10).index, y = col_mean_count.head(10).values, name="Mean",marker = dict(color=color2)) fig = tools.make_subplots(rows = 1, cols = 2,subplot_titles=('Total Count','Mean Revenue')) fig.append_trace(col_count_chart, 1,1) fig.append_trace(col_mean_count_chart,1,2) py.iplot(fig) train.head()
_____no_output_____
MIT
9 google customer revenue prediction/exploratory-google-store-analysis.ipynb
MLVPRASAD/KaggleProjects
Column Description - fullVisitorId- A unique identifier for each user of the Google Merchandise Store.- channelGrouping - The channel via which the user came to the Store.- date - The date on which the user visited the Store.- device - The specifications for the device used to access the Store.- geoNetwork - This section contains information about the geography of the user.- sessionId - A unique identifier for this visit to the store.- socialEngagementType - Engagement type, either "Socially Engaged" or "Not Socially Engaged".- totals - This section contains aggregate values across the session.- trafficSource - This section contains information about the Traffic Source from which the session originated.- visitId - An identifier for this session. This is part of the value usually stored as the _utmb cookie. This is only unique to the user. For a completely unique ID, you should use a combination of fullVisitorId and visitId.- visitNumber - The session number for this user. If this is the first session, then this is set to 1.-visitStartTime - The timestamp (expressed as POSIX time) Since few columns have json values lets convert flatten them Source from where i got the code to flatten the json columnshttps://www.kaggle.com/julian3833/1-quick-start-read-csv-and-flatten-json-fields/notebook
def load_df(csv_path='../input/train.csv', nrows=None): JSON_COLUMNS = ['device', 'geoNetwork', 'totals', 'trafficSource'] df = pd.read_csv(csv_path, converters={column: json.loads for column in JSON_COLUMNS}, dtype={'fullVisitorId': 'str'}, # Important!! nrows=nrows) for column in JSON_COLUMNS: column_as_df = json_normalize(df[column]) column_as_df.columns = [f"{column}.{subcolumn}" for subcolumn in column_as_df.columns] df = df.drop(column, axis=1).merge(column_as_df, right_index=True, left_index=True) print(f"Loaded {os.path.basename(csv_path)}. Shape: {df.shape}") return df train_df = load_df() train_df.head() test_df = load_df("../input/test.csv") # train_df.to_csv('flatten_train.csv') # test_df.to_csv('flatten_test.csv') diff_cols(train_df,test_df)
_____no_output_____
MIT
9 google customer revenue prediction/exploratory-google-store-analysis.ipynb
MLVPRASAD/KaggleProjects
Since totals transaction Revenue is what we are going to predict.and there is no campaignCode in test set Cleaning Dataset
train_constants = constant_cols(train_df) test_constants = constant_cols(test_df) print(train_constants) print(test_constants) train_df["totals.transactionRevenue"] = train_df["totals.transactionRevenue"].astype('float') train_df['totals.transactionRevenue'] = train_df['totals.transactionRevenue'].fillna(0) train_df['date'] = train_df['date'].astype(str) train_df["date"] = train_df["date"].apply(lambda x : x[:4] + "-" + x[4:6] + "-" + x[6:]) train_df["date"] = pd.to_datetime(train_df["date"])
_____no_output_____
MIT
9 google customer revenue prediction/exploratory-google-store-analysis.ipynb
MLVPRASAD/KaggleProjects
both the df has same cols with constant values lets remove them
train_constants = constant_cols(train_df) test_constants = constant_cols(test_df) train_df = train_df.drop(columns=train_constants,axis = 1) test_df = test_df.drop(columns=test_constants, axis = 1)
_____no_output_____
MIT
9 google customer revenue prediction/exploratory-google-store-analysis.ipynb
MLVPRASAD/KaggleProjects
EDA Null values
null_values = train_df.isna().sum(axis = 0).reset_index() null_values = null_values[null_values[0] > 50] null_chart = [go.Bar(y = null_values['index'],x = null_values[0]*100/len(train_df), orientation = 'h')] py.iplot(null_chart)
_____no_output_____
MIT
9 google customer revenue prediction/exploratory-google-store-analysis.ipynb
MLVPRASAD/KaggleProjects
**Summary**- So many coloumns has null values- we will find why these columns are null and we will also see how we can manage them. Via which channel did user visited
data = train_df[['channelGrouping','totals.transactionRevenue']] temp = data['channelGrouping'].value_counts() chart = [go.Pie(labels = temp.index, values = temp.values)] py.iplot(chart)
_____no_output_____
MIT
9 google customer revenue prediction/exploratory-google-store-analysis.ipynb
MLVPRASAD/KaggleProjects
**Summary**- Most of the users came via organic search.- Paid search and affilate users are very less. Mobile users
temp = train_df['device.isMobile'].value_counts() chart = go.Bar(x = ["False","True"], y = temp.values) py.iplot([chart])
_____no_output_____
MIT
9 google customer revenue prediction/exploratory-google-store-analysis.ipynb
MLVPRASAD/KaggleProjects
**Summary**- Many users browse the site from desktop or tablet Browser based
count_mean('device.browser',"#7FDBFF","#3D9970")
_____no_output_____
MIT
9 google customer revenue prediction/exploratory-google-store-analysis.ipynb
MLVPRASAD/KaggleProjects