markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
PinSage Code PinSage Layers
import torch import torch.nn as nn import torch.nn.functional as F import dgl import dgl.nn.pytorch as dglnn import dgl.function as fn def disable_grad(module): for param in module.parameters(): param.requires_grad = False def _init_input_modules(g, ntype, textset, hidden_dims): # We initialize the linear projections of each input feature ``x`` as # follows: # * If ``x`` is a scalar integral feature, we assume that ``x`` is a categorical # feature, and assume the range of ``x`` is 0..max(x). # * If ``x`` is a float one-dimensional feature, we assume that ``x`` is a # numeric vector. # * If ``x`` is a field of a textset, we process it as bag of words. module_dict = nn.ModuleDict() for column, data in g.nodes[ntype].data.items(): if column == dgl.NID: continue if data.dtype == torch.float32: assert data.ndim == 2 m = nn.Linear(data.shape[1], hidden_dims) nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) module_dict[column] = m elif data.dtype == torch.int64: assert data.ndim == 1 m = nn.Embedding( data.max() + 2, hidden_dims, padding_idx=-1) nn.init.xavier_uniform_(m.weight) module_dict[column] = m if textset is not None: for column, field in textset.fields.items(): if field.vocab.vectors: module_dict[column] = BagOfWordsPretrained(field, hidden_dims) else: module_dict[column] = BagOfWords(field, hidden_dims) return module_dict class BagOfWordsPretrained(nn.Module): def __init__(self, field, hidden_dims): super().__init__() input_dims = field.vocab.vectors.shape[1] self.emb = nn.Embedding( len(field.vocab.itos), input_dims, padding_idx=field.vocab.stoi[field.pad_token]) self.emb.weight[:] = field.vocab.vectors self.proj = nn.Linear(input_dims, hidden_dims) nn.init.xavier_uniform_(self.proj.weight) nn.init.constant_(self.proj.bias, 0) disable_grad(self.emb) def forward(self, x, length): """ x: (batch_size, max_length) LongTensor length: (batch_size,) LongTensor """ x = self.emb(x).sum(1) / length.unsqueeze(1).float() return self.proj(x) class BagOfWords(nn.Module): def __init__(self, field, hidden_dims): super().__init__() self.emb = nn.Embedding( len(field.vocab.itos), hidden_dims, padding_idx=field.vocab.stoi[field.pad_token]) nn.init.xavier_uniform_(self.emb.weight) def forward(self, x, length): return self.emb(x).sum(1) / length.unsqueeze(1).float() class LinearProjector(nn.Module): """ Projects each input feature of the graph linearly and sums them up """ def __init__(self, full_graph, ntype, textset, hidden_dims): super().__init__() self.ntype = ntype self.inputs = _init_input_modules(full_graph, ntype, textset, hidden_dims) def forward(self, ndata): projections = [] for feature, data in ndata.items(): if feature == dgl.NID or feature.endswith('__len'): # This is an additional feature indicating the length of the ``feature`` # column; we shouldn't process this. continue module = self.inputs[feature] if isinstance(module, (BagOfWords, BagOfWordsPretrained)): # Textual feature; find the length and pass it to the textual module. length = ndata[feature + '__len'] result = module(data, length) else: result = module(data) projections.append(result) return torch.stack(projections, 1).sum(1) class WeightedSAGEConv(nn.Module): def __init__(self, input_dims, hidden_dims, output_dims, act=F.relu): super().__init__() self.act = act self.Q = nn.Linear(input_dims, hidden_dims) self.W = nn.Linear(input_dims + hidden_dims, output_dims) self.reset_parameters() self.dropout = nn.Dropout(0.5) def reset_parameters(self): gain = nn.init.calculate_gain('relu') nn.init.xavier_uniform_(self.Q.weight, gain=gain) nn.init.xavier_uniform_(self.W.weight, gain=gain) nn.init.constant_(self.Q.bias, 0) nn.init.constant_(self.W.bias, 0) def forward(self, g, h, weights): """ g : graph h : node features weights : scalar edge weights """ h_src, h_dst = h with g.local_scope(): g.srcdata['n'] = self.act(self.Q(self.dropout(h_src))) g.edata['w'] = weights.float() g.update_all(fn.u_mul_e('n', 'w', 'm'), fn.sum('m', 'n')) g.update_all(fn.copy_e('w', 'm'), fn.sum('m', 'ws')) n = g.dstdata['n'] ws = g.dstdata['ws'].unsqueeze(1).clamp(min=1) z = self.act(self.W(self.dropout(torch.cat([n / ws, h_dst], 1)))) z_norm = z.norm(2, 1, keepdim=True) z_norm = torch.where(z_norm == 0, torch.tensor(1.).to(z_norm), z_norm) z = z / z_norm return z class SAGENet(nn.Module): def __init__(self, hidden_dims, n_layers): """ g : DGLHeteroGraph The user-item interaction graph. This is only for finding the range of categorical variables. item_textsets : torchtext.data.Dataset The textual features of each item node. """ super().__init__() self.convs = nn.ModuleList() for _ in range(n_layers): self.convs.append(WeightedSAGEConv(hidden_dims, hidden_dims, hidden_dims)) def forward(self, blocks, h): for layer, block in zip(self.convs, blocks): h_dst = h[:block.number_of_nodes('DST/' + block.ntypes[0])] h = layer(block, (h, h_dst), block.edata['weights']) return h class ItemToItemScorer(nn.Module): def __init__(self, full_graph, ntype): super().__init__() n_nodes = full_graph.number_of_nodes(ntype) self.bias = nn.Parameter(torch.zeros(n_nodes)) def _add_bias(self, edges): bias_src = self.bias[edges.src[dgl.NID]] bias_dst = self.bias[edges.dst[dgl.NID]] return {'s': edges.data['s'] + bias_src + bias_dst} def forward(self, item_item_graph, h): """ item_item_graph : graph consists of edges connecting the pairs h : hidden state of every node """ with item_item_graph.local_scope(): item_item_graph.ndata['h'] = h item_item_graph.apply_edges(fn.u_dot_v('h', 'h', 's')) item_item_graph.apply_edges(self._add_bias) pair_score = item_item_graph.edata['s'] return pair_score
_____no_output_____
MIT
2021/pinsage_movielens_robert_output_disabled.ipynb
harvard-visionlab/psy1406
PinSage Sampler
import numpy as np import dgl import torch from torch.utils.data import IterableDataset, DataLoader def compact_and_copy(frontier, seeds): block = dgl.to_block(frontier, seeds) for col, data in frontier.edata.items(): if col == dgl.EID: continue block.edata[col] = data[block.edata[dgl.EID]] return block class ItemToItemBatchSampler(IterableDataset): def __init__(self, g, user_type, item_type, batch_size): self.g = g self.user_type = user_type self.item_type = item_type self.user_to_item_etype = list(g.metagraph()[user_type][item_type])[0] self.item_to_user_etype = list(g.metagraph()[item_type][user_type])[0] self.batch_size = batch_size def __iter__(self): while True: heads = torch.randint(0, self.g.number_of_nodes(self.item_type), (self.batch_size,)) tails = dgl.sampling.random_walk( self.g, heads, metapath=[self.item_to_user_etype, self.user_to_item_etype])[0][:, 2] neg_tails = torch.randint(0, self.g.number_of_nodes(self.item_type), (self.batch_size,)) mask = (tails != -1) yield heads[mask], tails[mask], neg_tails[mask] class NeighborSampler(object): def __init__(self, g, user_type, item_type, random_walk_length, random_walk_restart_prob, num_random_walks, num_neighbors, num_layers): self.g = g self.user_type = user_type self.item_type = item_type self.user_to_item_etype = list(g.metagraph()[user_type][item_type])[0] self.item_to_user_etype = list(g.metagraph()[item_type][user_type])[0] self.samplers = [ dgl.sampling.PinSAGESampler(g, item_type, user_type, random_walk_length, random_walk_restart_prob, num_random_walks, num_neighbors) for _ in range(num_layers)] def sample_blocks(self, seeds, heads=None, tails=None, neg_tails=None): blocks = [] for sampler in self.samplers: frontier = sampler(seeds) if heads is not None: eids = frontier.edge_ids(torch.cat([heads, heads]), torch.cat([tails, neg_tails]), return_uv=True)[2] if len(eids) > 0: old_frontier = frontier frontier = dgl.remove_edges(old_frontier, eids) #print(old_frontier) #print(frontier) #print(frontier.edata['weights']) #frontier.edata['weights'] = old_frontier.edata['weights'][frontier.edata[dgl.EID]] block = compact_and_copy(frontier, seeds) seeds = block.srcdata[dgl.NID] blocks.insert(0, block) return blocks def sample_from_item_pairs(self, heads, tails, neg_tails): # Create a graph with positive connections only and another graph with negative # connections only. pos_graph = dgl.graph( (heads, tails), num_nodes=self.g.number_of_nodes(self.item_type)) neg_graph = dgl.graph( (heads, neg_tails), num_nodes=self.g.number_of_nodes(self.item_type)) pos_graph, neg_graph = dgl.compact_graphs([pos_graph, neg_graph]) seeds = pos_graph.ndata[dgl.NID] blocks = self.sample_blocks(seeds, heads, tails, neg_tails) return pos_graph, neg_graph, blocks def assign_simple_node_features(ndata, g, ntype, assign_id=False): """ Copies data to the given block from the corresponding nodes in the original graph. """ for col in g.nodes[ntype].data.keys(): if not assign_id and col == dgl.NID: continue induced_nodes = ndata[dgl.NID] ndata[col] = g.nodes[ntype].data[col][induced_nodes] def assign_textual_node_features(ndata, textset, ntype): """ Assigns numericalized tokens from a torchtext dataset to given block. The numericalized tokens would be stored in the block as node features with the same name as ``field_name``. The length would be stored as another node feature with name ``field_name + '__len'``. block : DGLHeteroGraph First element of the compacted blocks, with "dgl.NID" as the corresponding node ID in the original graph, hence the index to the text dataset. The numericalized tokens (and lengths if available) would be stored onto the blocks as new node features. textset : torchtext.data.Dataset A torchtext dataset whose number of examples is the same as that of nodes in the original graph. """ node_ids = ndata[dgl.NID].numpy() if textset is not None: for field_name, field in textset.fields.items(): examples = [getattr(textset[i], field_name) for i in node_ids] tokens, lengths = field.process(examples) if not field.batch_first: tokens = tokens.t() ndata[field_name] = tokens ndata[field_name + '__len'] = lengths def assign_features_to_blocks(blocks, g, textset, ntype): # For the first block (which is closest to the input), copy the features from # the original graph as well as the texts. assign_simple_node_features(blocks[0].srcdata, g, ntype) assign_textual_node_features(blocks[0].srcdata, textset, ntype) assign_simple_node_features(blocks[-1].dstdata, g, ntype) assign_textual_node_features(blocks[-1].dstdata, textset, ntype) class PinSAGECollator(object): def __init__(self, sampler, g, ntype, textset): self.sampler = sampler self.ntype = ntype self.g = g self.textset = textset def collate_train(self, batches): heads, tails, neg_tails = batches[0] # Construct multilayer neighborhood via PinSAGE... pos_graph, neg_graph, blocks = self.sampler.sample_from_item_pairs(heads, tails, neg_tails) assign_features_to_blocks(blocks, self.g, self.textset, self.ntype) return pos_graph, neg_graph, blocks def collate_test(self, samples): batch = torch.LongTensor(samples) blocks = self.sampler.sample_blocks(batch) assign_features_to_blocks(blocks, self.g, self.textset, self.ntype) return blocks
_____no_output_____
MIT
2021/pinsage_movielens_robert_output_disabled.ipynb
harvard-visionlab/psy1406
PinSage Evaluation
import numpy as np import torch import pickle import dgl import argparse def prec(recommendations, ground_truth): n_users, n_items = ground_truth.shape K = recommendations.shape[1] user_idx = np.repeat(np.arange(n_users), K) item_idx = recommendations.flatten() relevance = ground_truth[user_idx, item_idx].reshape((n_users, K)) hit = relevance.any(axis=1).mean() return hit class LatestNNRecommender(object): def __init__(self, user_ntype, item_ntype, user_to_item_etype, timestamp, batch_size): self.user_ntype = user_ntype self.item_ntype = item_ntype self.user_to_item_etype = user_to_item_etype self.batch_size = batch_size self.timestamp = timestamp def recommend(self, full_graph, K, h_user, h_item): """ Return a (n_user, K) matrix of recommended items for each user """ graph_slice = full_graph.edge_type_subgraph([self.user_to_item_etype]) n_users = full_graph.number_of_nodes(self.user_ntype) latest_interactions = dgl.sampling.select_topk(graph_slice, 1, self.timestamp, edge_dir='out') user, latest_items = latest_interactions.all_edges(form='uv', order='srcdst') # each user should have at least one "latest" interaction assert torch.equal(user, torch.arange(n_users)) recommended_batches = [] user_batches = torch.arange(n_users).split(self.batch_size) for user_batch in user_batches: latest_item_batch = latest_items[user_batch].to(device=h_item.device) dist = h_item[latest_item_batch] @ h_item.t() # exclude items that are already interacted for i, u in enumerate(user_batch.tolist()): interacted_items = full_graph.successors(u, etype=self.user_to_item_etype) dist[i, interacted_items] = -np.inf recommended_batches.append(dist.topk(K, 1)[1]) recommendations = torch.cat(recommended_batches, 0) return recommendations def evaluate_nn(dataset, h_item, k, batch_size): g = dataset['train-graph'] val_matrix = dataset['val-matrix'].tocsr() test_matrix = dataset['test-matrix'].tocsr() item_texts = dataset['item-texts'] user_ntype = dataset['user-type'] item_ntype = dataset['item-type'] user_to_item_etype = dataset['user-to-item-type'] timestamp = dataset['timestamp-edge-column'] rec_engine = LatestNNRecommender( user_ntype, item_ntype, user_to_item_etype, timestamp, batch_size) recommendations = rec_engine.recommend(g, k, None, h_item).cpu().numpy() return prec(recommendations, val_matrix)
_____no_output_____
MIT
2021/pinsage_movielens_robert_output_disabled.ipynb
harvard-visionlab/psy1406
PinSage Training
import pickle import argparse import numpy as np import torch import torch.nn as nn from torch.utils.data import DataLoader import torchtext import dgl import tqdm import madgrad from fastprogress.fastprogress import master_bar, progress_bar # import layers # import sampler as sampler_module # import evaluation class PinSAGEModel(nn.Module): def __init__(self, full_graph, ntype, textsets, hidden_dims, n_layers): super().__init__() self.proj = LinearProjector(full_graph, ntype, textsets, hidden_dims) self.sage = SAGENet(hidden_dims, n_layers) self.scorer = ItemToItemScorer(full_graph, ntype) def forward(self, pos_graph, neg_graph, blocks): h_item = self.get_repr(blocks) pos_score = self.scorer(pos_graph, h_item) neg_score = self.scorer(neg_graph, h_item) return (neg_score - pos_score + 1).clamp(min=0) def get_repr(self, blocks): h_item = self.proj(blocks[0].srcdata) h_item_dst = self.proj(blocks[-1].dstdata) return h_item_dst + self.sage(blocks, h_item) def train_pinsage_implicit(args): # Load dataset with open(args.dataset_path, 'rb') as f: dataset = pickle.load(f) g = dataset['train-graph'] val_matrix = dataset['val-matrix'].tocsr() test_matrix = dataset['test-matrix'].tocsr() item_texts = dataset['item-texts'] user_ntype = dataset['user-type'] item_ntype = dataset['item-type'] user_to_item_etype = dataset['user-to-item-type'] timestamp = dataset['timestamp-edge-column'] device = torch.device(args.device) # Assign user and movie IDs and use them as features (to learn an individual # trainable embedding for each entity) g.nodes[user_ntype].data['id'] = torch.arange(g.number_of_nodes(user_ntype)) g.nodes[item_ntype].data['id'] = torch.arange(g.number_of_nodes(item_ntype)) # Prepare torchtext dataset and vocabulary if args.add_title: fields = {} examples = [] for key, texts in item_texts.items(): fields[key] = torchtext.legacy.data.Field(include_lengths=True, lower=True, batch_first=True) for i in range(g.number_of_nodes(item_ntype)): example = torchtext.legacy.data.Example.fromlist( [item_texts[key][i] for key in item_texts.keys()], [(key, fields[key]) for key in item_texts.keys()]) examples.append(example) textset = torchtext.legacy.data.Dataset(examples, fields) for key, field in fields.items(): field.build_vocab(getattr(textset, key)) #field.build_vocab(getattr(textset, key), vectors='fasttext.simple.300d') else: textset = None # Sampler batch_sampler = ItemToItemBatchSampler( g, user_ntype, item_ntype, args.batch_size) neighbor_sampler = NeighborSampler( g, user_ntype, item_ntype, args.random_walk_length, args.random_walk_restart_prob, args.num_random_walks, args.num_neighbors, args.num_layers) collator = PinSAGECollator(neighbor_sampler, g, item_ntype, textset) dataloader = DataLoader( batch_sampler, collate_fn=collator.collate_train, num_workers=args.num_workers) dataloader_test = DataLoader( torch.arange(g.number_of_nodes(item_ntype)), batch_size=args.batch_size, collate_fn=collator.collate_test, num_workers=args.num_workers) dataloader_it = iter(dataloader) # Model model = PinSAGEModel(g, item_ntype, textset, args.hidden_dims, args.num_layers).to(device) print(model) # Optimizer if args.opt == 'MADGRAD': opt = madgrad.MADGRAD(model.parameters(), lr=args.lr) else: opt = torch.optim.__dict__[args.opt](model.parameters(), lr=args.lr) print(opt) # For each batch of head-tail-negative triplets... mb = master_bar(range(args.num_epochs)) for epoch_id in mb: model.train() for batch_id in progress_bar(range(args.batches_per_epoch), parent=mb): pos_graph, neg_graph, blocks = next(dataloader_it) # Copy to GPU for i in range(len(blocks)): blocks[i] = blocks[i].to(device, non_blocking=True) pos_graph = pos_graph.to(device, non_blocking=True) neg_graph = neg_graph.to(device, non_blocking=True) loss = model(pos_graph, neg_graph, blocks).mean() opt.zero_grad() loss.backward() opt.step() # Evaluate model.eval() with torch.no_grad(): item_batches = torch.arange(g.number_of_nodes(item_ntype)).split(args.batch_size) h_item_batches = [] for blocks in dataloader_test: for i in range(len(blocks)): blocks[i] = blocks[i].to(device) h_item_batches.append(model.get_repr(blocks)) h_item = torch.cat(h_item_batches, 0) hit_rate = evaluate_nn(dataset, h_item, args.k, args.batch_size) print(f"\nEpoch [{epoch_id:02d}]/[{args.num_epochs:02d}]: Hit@{args.k}: {hit_rate:2.3f}")
_____no_output_____
MIT
2021/pinsage_movielens_robert_output_disabled.ipynb
harvard-visionlab/psy1406
Check model with dataChoose different datasets to see how the model automatically adjusts to fit the data in the graph.
import torch from types import SimpleNamespace args = SimpleNamespace() # args.dataset_path = '/content/data.pkl' # args.dataset_path = '/content/ml_1m_plot_data.pkl' # args.dataset_path = '/content/ml_1m_backdrop_swin.pkl' # args.dataset_path = '/content/ml_1m_imdb_longest.pkl' args.dataset_path = '/content/ml_1m_only_id.pkl' args.random_walk_length = 2 args.random_walk_restart_prob = .5 args.num_random_walks = 1 args.num_neighbors = 3 args.num_layers = 2 args.hidden_dims = 16 args.batch_size = 32 args.device = 'cuda:0' if torch.cuda.is_available() else 'cpu' args.num_epochs = 10 args.batches_per_epoch = 20000 args.num_workers = 2 args.lr = 3e-5 args.k = 10 args.opt = 'MADGRAD' # Adam, AdamW, MADGRAD args.add_title = False print(args) # Load dataset with open(args.dataset_path, 'rb') as f: dataset = pickle.load(f) g = dataset['train-graph'] val_matrix = dataset['val-matrix'].tocsr() test_matrix = dataset['test-matrix'].tocsr() item_texts = dataset['item-texts'] user_ntype = dataset['user-type'] item_ntype = dataset['item-type'] user_to_item_etype = dataset['user-to-item-type'] timestamp = dataset['timestamp-edge-column'] device = torch.device(args.device) # Assign user and movie IDs and use them as features (to learn an individual # trainable embedding for each entity) g.nodes[user_ntype].data['id'] = torch.arange(g.number_of_nodes(user_ntype)) g.nodes[item_ntype].data['id'] = torch.arange(g.number_of_nodes(item_ntype)) # drop features # del g.nodes['movie'].data['year'] # del g.nodes['movie'].data['genre'] # del g.nodes['movie'].data['plot'] # Prepare torchtext dataset and vocabulary if args.add_title: fields = {} examples = [] for key, texts in item_texts.items(): fields[key] = torchtext.legacy.data.Field(include_lengths=True, lower=True, batch_first=True) for i in range(g.number_of_nodes(item_ntype)): example = torchtext.legacy.data.Example.fromlist( [item_texts[key][i] for key in item_texts.keys()], [(key, fields[key]) for key in item_texts.keys()]) examples.append(example) textset = torchtext.legacy.data.Dataset(examples, fields) for key, field in fields.items(): field.build_vocab(getattr(textset, key)) #field.build_vocab(getattr(textset, key), vectors='fasttext.simple.300d') else: textset = None # Model model = PinSAGEModel(g, item_ntype, textset, args.hidden_dims, args.num_layers).to(device) print(model)
_____no_output_____
MIT
2021/pinsage_movielens_robert_output_disabled.ipynb
harvard-visionlab/psy1406
PinSage Train on Implicit Task template, don't use/edit this one, it's just for reference
from types import SimpleNamespace args = SimpleNamespace() args.dataset_path = '/content/data.pkl' # args.dataset_path = '/content/ml_1m_only_id.pkl' args.random_walk_length = 2 args.random_walk_restart_prob = .5 args.num_random_walks = 1 args.num_neighbors = 3 args.num_layers = 2 args.hidden_dims = 16 args.batch_size = 32 args.device = 'cuda:0' if torch.cuda.is_available() else 'cpu' args.num_epochs = 10 args.batches_per_epoch = 20000 args.num_workers = 2 args.lr = 3e-5 args.k = 10 args.opt = 'MADGRAD' # Adam, AdamW, MADGRAD args.add_title = True print(args) # baseline (movie id only): Epoch [09]/[10]: Hit@10: 0.042 # all movie data plus longest plot: Epoch [09]/[10]: Hit@10: 0.080 # without plot (MADGRAD): Epoch [09]/[10]: Hit@10: 0.081 # with plot (MADGRAD): Epoch [09]/[10]: Hit@10: 0.060 # with plot (ADAMW): Epoch [09]/[10]: Hit@10: 0.064
_____no_output_____
MIT
2021/pinsage_movielens_robert_output_disabled.ipynb
harvard-visionlab/psy1406
baseline model, movie id only
from types import SimpleNamespace args = SimpleNamespace() args.dataset_path = '/content/ml_1m_only_id.pkl' args.random_walk_length = 2 args.random_walk_restart_prob = .5 args.num_random_walks = 1 args.num_neighbors = 3 args.num_layers = 2 args.hidden_dims = 32 # 16 args.batch_size = 256 # 32 args.device = 'cuda:0' if torch.cuda.is_available() else 'cpu' args.num_epochs = 10 args.batches_per_epoch = 20000 args.num_workers = 4 args.lr = 3e-5 args.k = 10 args.opt = 'MADGRAD' # Adam, AdamW, MADGRAD args.add_title = False print(args) train_pinsage_implicit(args)
_____no_output_____
MIT
2021/pinsage_movielens_robert_output_disabled.ipynb
harvard-visionlab/psy1406
possible variationsYou can try different datasets (i.e., change the name of the data file being used to load the graph - the PinSage model automatically adapts to the data in the Graph):- [ ] ml_1m_only_id.pkl (this is our baseline)- [ ] ml_1m_imdb_plot.pkl(embedding of short plot descriptions)- [ ] ml_1m_imdb_full_plot.pkl (embedding of long plot descriptions)- [ ] ml_1m_imdb_synopsis.pkl (embedding of pages-long movie summaries)- [ ] ml_1m_imdb_longest.pkl (embedding of longest-available text, since many movies don't have a full synposis, we fall back to the full plot or short plot as needed)- [ ] ml_1m_poster_swin (movie poster embedding)- [ ] ml_1m_backdrop_swin (widescreen movie poster embedding)Or you can try different hyperparameters:- [ ] args.hidden_dims (number of dimensions used to encode node information)- [ ] args.num_layers (how many "hops" the PinSage random walk goes when building graphs)- [ ] args.num_random_walks (how many walks to take)- [ ] args.num_neighbors (how many neighbors to keep)You can make changes in the cell below and then run it to try a new model. If you run multiple variations, then just copy this code into a new cell for each variant, so you can use the cell output as a record.
from types import SimpleNamespace # Edit the Dataset path args = SimpleNamespace() args.dataset_path = '/content/ml_1m_only_id.pkl' args.random_walk_length = 2 args.random_walk_restart_prob = .5 args.num_random_walks = 1 args.num_neighbors = 3 args.num_layers = 2 args.hidden_dims = 32 # 16 args.batch_size = 256 # 32 args.device = 'cuda:0' if torch.cuda.is_available() else 'cpu' args.num_epochs = 10 args.batches_per_epoch = 20000 args.num_workers = 2 args.lr = 3e-5 args.k = 10 args.opt = 'MADGRAD' # Adam, AdamW, MADGRAD args.add_title = False print(args) train_pinsage_implicit(args)
_____no_output_____
MIT
2021/pinsage_movielens_robert_output_disabled.ipynb
harvard-visionlab/psy1406
Summary & ConclusionsBreifly write up a summary of what you did, what you found, and what you think it means.Then share this notebook (you're edited copy) with me ([email protected]) to submit your final project.
_____no_output_____
MIT
2021/pinsage_movielens_robert_output_disabled.ipynb
harvard-visionlab/psy1406
Geocodio tells you the likely accuracy of the lat/long it's provided with an accuracy score. Some of the scores do not have high accuracy scores, which is worth mentioning. For now, we'll see if any mapping errors actually occur. If they do, I'll go back and engineer a solution.
#We have 239 cities, so we want to make sure there's 239 rows. df.shape
_____no_output_____
Apache-2.0
EDA.ipynb
mattymecks/nlchp-mapping-project
Because I'm only missing five values, I'm going to manually find their lat/lon coordinates and then add them in. I'll do this here because I want them mapped in all future data.
# Manual updating of relevent values new_data = [(40.0874759, -108.8048292, 'CO3', 'CO'), (39.446649, -106.03757, 'CO2', 'CO'), (28.022243, -81.732857, 'FL9', 'FL'), (39.9689532, -82.9376804, 'OH3', 'OH'), (39.103119, -84.512016, 'OH1', 'OH')] indexes = (63, 85, 98, 172, 191) for i in range(5): df.loc[indexes[i],'Latitude'] = new_data[i][0] df.loc[indexes[i], 'Longitude'] = new_data[i][1] df.loc[indexes[i], 'Congressional District'] = new_data[i][2] df.loc[indexes[i], 'State.1'] = new_data[i][3] # Checking to see we've fixed all lat/lon problems df.loc[df['Latitude'] == 0] # Dropping unnecessary columns added by Geocodio for the sake of data tidiness df = df.drop(columns = ['Number', 'Street', 'City.1', 'Source']) df.head(1)
_____no_output_____
Apache-2.0
EDA.ipynb
mattymecks/nlchp-mapping-project
Much better. The campaign wants to keep track of the status of the anti-panhandling statutes/ordinances in each city, and they want to be able to update those values to reflect varying degrees of success as the campaign goes on. I'm going to create a 'status' value for each city set it's default to "active." Then I'm going to map "status text" and "marker color" right onto the values.
# Adding in a "status" column df['status'] = 0 # Creating a conditional column explaining ordinance status d_text = {0: 'Ordinance Active - With No Response', 1: 'Ordinance Active - With Response Indicating No Immediate Repeal', 2: 'Ordinance Active - With Committment To Review', 3: 'Ordinance Halted - With Committment to Review', 4: 'Ordinance Repealed'} df['statusText'] = df['status'].map(d_text) # Setting point color conditionally based upon status d_color = {0: 'rgb(255, 0, 0)', 1: 'rgb(255, 192, 203)', 2: 'rgb(255, 165, 0)', 3: 'rgb(255, 255, 0)', 4: 'rgb(127, 255, 0)'} df['color'] = df['status'].map(d_color) # print(df.dtypes) df.head(3)
_____no_output_____
Apache-2.0
EDA.ipynb
mattymecks/nlchp-mapping-project
Now I'm going to persist my changes. I'll make updates periodically, but I'll use a seperate notebook for that so that I don't have to continously repeat this project.
df.to_csv('cleaned_data.csv', mode ='w+')
_____no_output_____
Apache-2.0
EDA.ipynb
mattymecks/nlchp-mapping-project
Overfitting demo Criando um conjunto de dados baseado em uma função senoidal
import math import random import numpy as np import pandas as pd from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.linear_model import RidgeCV from sklearn.linear_model import Lasso from matplotlib import pyplot as plt %matplotlib inline
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Vamos considerar um conjunto de dados sintéticos de 30 pontos amostrados de uma função senoidal $y = \sin(4x)$:
def f(x): return np.sin(np.multiply(4,x))
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Abaixo criamos valores aleatéorios para $x$ no intervalo [0,1)
random.seed(98103) n = 30 # quantidade de valores gerados x = np.array([random.random() for _ in range(n)]) #em cada iteração gera um valor aleatório entre 0 e 1 x=np.sort(x) # ordena os valores em ordem crescente #transforma o array em uma matrix com uma n linhas e 1 coluna (vetor coluna) X = x[:,np.newaxis]
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Calcula $y$ como uma função de $x$. $y$ é chamada variável independente pois depende de $x$
Y = f(x)
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Adiciona ruído Gaussiano aleatório à $y$
random.seed(1) #ruído é amostrado de uma distribuição normal com média 0 e desvio padrão 1/3 e = np.array([random.gauss(0,1.0/3.0) for i in range(n)]) Y = Y + e
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Funções auxiliares Função para plotar os dados (scatter plot)
def plot_data(X,Y): plt.plot(X,Y,'k.') plt.xlabel('X') plt.ylabel('Y') plt.axis([0,1,-1.5,2])
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Função para imprimir coeficientes
def print_coefficients(model): # Retorna o grau do polinômio deg = len(model.steps[1][1].coef_)-1 # Obtém os parâmetros estimados w = list(model.steps[1][1].coef_) #model.steps é usado pois o modelo é calculado usando make_pipile do scikit learn # Numpy tem uma função para imprimir o polinômio mas os parâmetros precisam estar na ordem inversa print ('Polinômio estimado para grau ' + str(deg) + ':') w.reverse() print (np.poly1d(w)+model.steps[1][1].intercept_)
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Função para calcular uma regressão polinomial para qualquer grau usando scikit learn.
def polynomial_regression(X,Y,deg): model = make_pipeline(PolynomialFeatures(deg),LinearRegression()) model.fit(X,Y) return model
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Função para plotar o modelo por meio de suas predições
def print_poly_predictions(X,Y, model): plot_data(X,Y) x_plot = np.array([i/200.0 for i in range(200)]) X_plot = x_plot[:,np.newaxis] y_pred = model.predict(X_plot) plt.plot(x_plot,y_pred,'g-') plt.axis([0,1,-1.5,2]) def plot_residuals_vs_fit(X,Y, model): # plot_data(X,Y) # x_plot = np.array([i/200.0 for i in range(200)]) # X_plot = x_plot[:,np.newaxis] y_pred = model.predict(X) res = Y - y_pred plt.plot(y_pred,res,'k.',color='blue',) plt.axhline(y=0., color='r', linestyle='-') plt.xlabel("predictions") plt.ylabel("residuals")
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Função geradora
plot_data(X,Y) x_plot = np.array([i/200.0 for i in range(200)]) y_plot = f(x_plot) plt.plot(x_plot,y_plot,color='cornflowerblue',linewidth=2)
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Regressão polinomial de diferentes graus
model = polynomial_regression(X,Y,16) print_poly_predictions(X,Y,model)
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Mostrando o modelo e coeficientes.
print_coefficients(model)
Polinômio estimado para grau 16: 16 15 14 13 3.337e+08 x - 2.226e+09 x + 6.62e+09 x - 1.156e+10 x 12 11 10 9 8 + 1.309e+10 x - 1e+10 x + 5.14e+09 x - 1.657e+09 x + 2.258e+08 x 7 6 5 4 3 + 6.694e+07 x - 4.734e+07 x + 1.393e+07 x - 2.548e+06 x + 3.018e+05 x 2 - 2.188e+04 x + 839.4 x - 12.01
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Regressão Ridge A regressão ridge se propõe a evitar o overfitting adicionando um custo ao RSS (dos mínimos quadrados) que depende da norma L2 dos coeficientes $\|w\|$ (ou seja da magnitude dos coeficientes). O resultado é a penalização de ajustes com coeficientes muito grandes. A força dessa penalidade é controlada por um parâmetro lambda (aqui chamado "L2_penalty"). Função para estimar a regressão ridge para qualquer grau de polinômio:
def polynomial_ridge_regression(X,Y, deg, l2_penalty): model = make_pipeline(PolynomialFeatures(deg),Ridge(alpha=l2_penalty)) model.fit(X,Y) return model
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Ridge com grau 16 usando uma penalidade *muito* pequena
model = polynomial_ridge_regression(X,Y,deg=16,l2_penalty=1e-14) print_coefficients(model) print_poly_predictions(X,Y,model)
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Ridge com grau 16 usando uma penalidade *muito* grande
model = polynomial_ridge_regression(X,Y, deg=16, l2_penalty=100) print_coefficients(model) print_poly_predictions(X,Y,model)
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Sequência de ajustes para uma sequência crescente de valores de lambda
for l2_penalty in [1e-10, 1e-8, 1e-6, 1e-3, 1, 1e1, 1e2]: model = polynomial_ridge_regression(X,Y, deg=16, l2_penalty=l2_penalty) print('lambda = %.2e' % l2_penalty) print_coefficients(model) print('\n') plt.figure() print_poly_predictions(X,Y,model) plt.title('Ridge, lambda = %.2e' % l2_penalty)
lambda = 1.00e-10 Polinômio estimado para grau 16: 16 15 14 13 12 11 10 7567 x - 7803 x - 6900 x + 714.5 x + 6541 x + 5802 x - 498.1 x 9 8 7 6 5 4 3 - 6056 x - 4252 x + 3439 x + 4893 x - 4281 x + 769.9 x + 100.6 x 2 - 11.39 x - 4.716 x + 0.7859 lambda = 1.00e-08 Polinômio estimado para grau 16: 16 15 14 13 12 11 352.8 x - 246.4 x - 338.4 x - 129.4 x + 148.9 x + 296 x 10 9 8 7 6 5 + 213.6 x - 38.58 x - 254.8 x - 218.5 x + 62.06 x + 244.8 x 4 3 2 + 36.66 x - 223.2 x + 112.4 x - 17.86 x + 1.157 lambda = 1.00e-06 Polinômio estimado para grau 16: 16 15 14 13 12 11 -11.68 x - 1.907 x + 7.873 x + 14.24 x + 14.19 x + 6.382 x 10 9 8 7 6 5 4 - 7.42 x - 21.17 x - 25.09 x - 10.05 x + 21.99 x + 43.96 x + 6.021 x 3 2 - 81.62 x + 52.95 x - 9.752 x + 0.8831 lambda = 1.00e-03 Polinômio estimado para grau 16: 16 15 14 13 12 -0.1991 x - 0.03173 x + 0.1641 x + 0.3778 x + 0.5899 x 11 10 9 8 7 6 + 0.7688 x + 0.8655 x + 0.8092 x + 0.5056 x - 0.1493 x - 1.21 x 5 4 3 2 - 2.509 x - 3.256 x - 1.494 x + 4.364 x - 0.3416 x + 0.4424 lambda = 1.00e+00 Polinômio estimado para grau 16: 16 15 14 13 12 -0.07194 x - 0.08353 x - 0.09695 x - 0.1125 x - 0.1303 x 11 10 9 8 7 6 - 0.1507 x - 0.1737 x - 0.1991 x - 0.2262 x - 0.253 x - 0.2758 x 5 4 3 2 - 0.2865 x - 0.2698 x - 0.197 x - 0.02395 x + 0.2536 x + 0.6222 lambda = 1.00e+01 Polinômio estimado para grau 16: 16 15 14 13 12 -0.03822 x - 0.04265 x - 0.04763 x - 0.05321 x - 0.05946 x 11 10 9 8 7 - 0.06643 x - 0.07416 x - 0.08262 x - 0.09173 x - 0.1012 x 6 5 4 3 2 - 0.1103 x - 0.1179 x - 0.1214 x - 0.1158 x - 0.09251 x - 0.0412 x + 0.6399 lambda = 1.00e+02 Polinômio estimado para grau 16: 16 15 14 13 12 -0.007084 x - 0.00789 x - 0.008794 x - 0.009809 x - 0.01095 x 11 10 9 8 7 - 0.01222 x - 0.01364 x - 0.01521 x - 0.01694 x - 0.01879 x 6 5 4 3 2 - 0.0207 x - 0.02253 x - 0.02397 x - 0.02439 x - 0.02253 x - 0.01594 x + 0.4948
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Usando validação cruzada para encontrar o melhor lembda para Regressão Ridge A função abaixo calcula os rmses (root mean squared error) para um certo modelo considerando todos os k folds (parâmetro cv na função cross_val_score do scikit learn).
from sklearn.model_selection import cross_val_score def rmse_cv(model): rmse = np.sqrt(-cross_val_score(model,X,Y,scoring="neg_mean_squared_error",cv=10)) return (rmse)
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Cria um modelo de regressão ridge
model_ridge = Ridge()
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Plota resultados (médias de rmse) para cada valor de alpha (ou lambda)
l2_penalties = [0.001,0.01,0.1,0.3,0.5,1,3,5,10,15,20,40,60,80,100] cv_ridge = [rmse_cv(Ridge(alpha=l2_penalty)).mean() for l2_penalty in l2_penalties] cv_ridge = pd.Series(cv_ridge,index=l2_penalties) cv_ridge.plot(title="Lambda vs Erro de Validação") plt.xlabel("l2_penalty") plt.ylabel("rmse") best_l2_penalty=cv_ridge.argmin() best_rmse = cv_ridge.min() print (best_l2_penalty, best_rmse) #melhor valor de (alpha,rmse) encontrado model = polynomial_ridge_regression(X,Y, deg=16, l2_penalty=best_l2_penalty) print_coefficients(model) print_poly_predictions(X,Y,model)
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Regressão Lasso A regressão Lasso, ao mesmo tempo, encolhe a magnitude dos coeficientes para evitar o overfitting e realiza implicitamente seleção de característcas igualando alguns atributos a zero (para lambdas, aqui chamados "L1_penalty", suficientemente grandes). Em particular, o Lasso adiciona ao RSS o custo $\|w\|$. Função que estima a regressão polinomial de qualquer grau com a regressão Lasso.
def polynomial_lasso_regression(X, Y, deg, l1_penalty): model = make_pipeline(PolynomialFeatures(deg),Lasso(alpha=l1_penalty,max_iter=10000)) # X = data['X'][:,np.newaxis] #transformando em matrix para LinearRegression model.fit(X,Y) return model
_____no_output_____
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
Explore a solução lasso solution como uma função de diferentes fatores de penalidade Nos referimos ao fator de penalidade do lasso como "l1_penalty"
for l1_penalty in [0.0001, 0.001, 0.01, 0.1, 10]: model = polynomial_lasso_regression(X, Y,deg=16, l1_penalty=l1_penalty) print ('l1_penalty = %e' % l1_penalty) w = list(model.steps[1][1].coef_) print ('número de não zeros = %d' % np.count_nonzero(w)) print_coefficients(model) print ('\n') plt.figure() print_poly_predictions(X,Y,model) #plt.title('LASSO, lambda = %.2e, # nonzeros = %d' % l1_penalty, np.count_nonzero(w))
l1_penalty = 1.000000e-04 número de não zeros = 5 Polinômio estimado para grau 16: 11 10 4 2 1.626 x + 1.074 x - 7.667 x + 4.545 x - 0.4504 x + 0.4478 l1_penalty = 1.000000e-03 número de não zeros = 3 Polinômio estimado para grau 16: 5 4 -0.181 x - 2.886 x + 1.373 x + 0.3354 l1_penalty = 1.000000e-02 número de não zeros = 2 Polinômio estimado para grau 16: 5 -1.96 x + 0.2618 x + 0.628 l1_penalty = 1.000000e-01 número de não zeros = 0 Polinômio estimado para grau 16: 0.4527 l1_penalty = 1.000000e+01 número de não zeros = 0 Polinômio estimado para grau 16: 0.4527
MIT
regressao_linear/Overfitting_Demo_Ridge.ipynb
luizcz/aprendizado_maquina
BTC - Gender BiasFinder
import pandas as pd import numpy as np import math import time base_dir = "../../data/biasfinder/gender/" df = pd.read_csv(base_dir + "test.csv", header=None, sep="\t", names=["label", "mutant", "template", "original", "gender", "template_id"]) df.drop_duplicates() def read_txt(fpath): pred = [] file = open(fpath) lines = file.readlines() for l in lines : pred.append(int(l)) file.close() return pred output_dir = "biasfinder/gender" result_dir = "../../result/" + output_dir + "/" path = result_dir + "results_data.txt" pred = read_txt(path) print(len(pred)) df["prediction"] = pred df
_____no_output_____
Apache-2.0
codes/gender/BTC.ipynb
yangzhou6666/BiasHeal
Use Groupby to Group the text by Template
gb = df.groupby("template_id") gb.count() len(gb.size()) df id = 0 df.iloc[id]["mutant"] df.iloc[id]["original"] df.iloc[id]["template"]
_____no_output_____
Apache-2.0
codes/gender/BTC.ipynb
yangzhou6666/BiasHeal
Get DF template only
df dft = df.iloc[:,[2,3,5]] dft = dft.drop_duplicates() dft # ## template dft = dft.sort_values(by=["template_id"]) dft = dft.reset_index(drop=True) dft ## mutant df = df.reset_index(drop=True) df dft
_____no_output_____
Apache-2.0
codes/gender/BTC.ipynb
yangzhou6666/BiasHeal
Get Number of Discordant Pairs for Each TemplateThere is a memory limitation that make us can't directly produce +- 240M pairs. Fortunately, the number of discordant pairs for each template can be calculate theoritically without crossing th data to get +- 240M pairs. This will solve the memory issue.For each template, we will give an example of the male mutant and female mutant for user study
gb = df.groupby("template_id") gb.count()
_____no_output_____
Apache-2.0
codes/gender/BTC.ipynb
yangzhou6666/BiasHeal
Data crossing
import time start = time.time() identifier = "gender" mutant_example = [] mutant_prediction_stat = [] key = [] for i in range(len(gb.size())) : # for i in range(10) : data = gb.get_group(i) dc = data.groupby(identifier) me = {} # mutant example mp = {} # mutant prediction key = [] for k, v in dict(iter(dc)).items() : key.append(k) is_first_instance = True pos_counter = 0 # positive counter neg_counter = 0 # negative counter for m, p in zip(v["mutant"].values, v["prediction"].values) : if is_first_instance : me[k] = m is_first_instance = False if int(p) == 1 : pos_counter += 1 else : neg_counter += 1 mp[k] = {"pos": pos_counter, "neg" : neg_counter} mutant_example.append(me) mutant_prediction_stat.append(mp) end = time.time() print("Execution time: ", end-start) dft["mutant_example"] = mutant_example dft["mutant_prediction_stat"] = mutant_prediction_stat dft key btcs = [] pairs = [] for mp in dft["mutant_prediction_stat"].values : if len(mp) > 0 : btc = 0 pair = 0 already_processed = [] for k1 in key : for k2 in key : if k1 != k2 : k = k1 + "-" + k2 if k1 > k2 : k = k2 + "-" + k1 if k not in already_processed : already_processed.append(k) btc += ((mp[k1]["pos"] * mp[k2]["neg"]) + (mp[k1]["neg"] * mp[k2]["pos"])) pair += (mp[k1]["pos"] + mp[k1]["neg"]) * (mp[k2]["pos"] + mp[k2]["neg"]) # double_counting_divider = len(key) * (len(key)-1) # dp.append(int(_dp/double_counting_divider)) # we must divide the number with the number of key to reduce the double counting btcs.append(btc) pairs.append(pair) else : btcs.append(0) pairs.append(0) dft["btc"] = btcs dft["possible_pair"] = pairs dft
_____no_output_____
Apache-2.0
codes/gender/BTC.ipynb
yangzhou6666/BiasHeal
Number of Bias-Uncovering Test Case
int(dft["btc"].sum())
_____no_output_____
Apache-2.0
codes/gender/BTC.ipynb
yangzhou6666/BiasHeal
BTC Rate
dft["btc"].sum() / dft["possible_pair"].sum()
_____no_output_____
Apache-2.0
codes/gender/BTC.ipynb
yangzhou6666/BiasHeal
Get Data that Have number of BTC more than one
d = dft[dft["btc"] > 0] d
_____no_output_____
Apache-2.0
codes/gender/BTC.ipynb
yangzhou6666/BiasHeal
Sort Data based on the number of BTC
d = d.sort_values(["btc", "template"], ascending=False) d = d.reset_index(drop=True) d
_____no_output_____
Apache-2.0
codes/gender/BTC.ipynb
yangzhou6666/BiasHeal
Get Data BTC for train and test
df template_that_produce_btc = d["template_id"].tolist() # template_that_produce_btc start = time.time() mutant_text_1 = [] mutant_text_2 = [] prediction_1 = [] prediction_2 = [] identifier_1 = [] identifier_2 = [] template = [] original = [] label = [] for i in template_that_produce_btc: # only processing from template that produce BTC data = gb.get_group(i) dc = data.groupby(identifier) already_processed = [] for k1, v1 in dict(iter(dc)).items() : for k2, v2 in dict(iter(dc)).items() : if k1 != k2 : key = k1 + "-" + k2 if k1 > k2 : key = k2 + "-" + k1 if key not in already_processed : already_processed.append(key) for m_1, p_1, i_1, t, o, l in zip(v1["mutant"].values, v1["prediction"].values, v1[identifier].values, v1["template"].values, v1["original"].values, v1["label"].values) : for m_2, p_2, i_2 in zip(v2["mutant"].values, v2["prediction"].values, v2[identifier].values) : if p_1 != p_2 : # only add discordant pairs mutant_text_1.append(m_1) prediction_1.append(p_1) identifier_1.append(i_1) mutant_text_2.append(m_2) prediction_2.append(p_2) identifier_2.append(i_2) template.append(t) label.append(l) original.append(o) end = time.time() print("Execution time: ", end-start) btc = pd.DataFrame(data={"mutant_1" : mutant_text_1, "mutant_2" : mutant_text_2, "prediction_1": prediction_1, "prediction_2" : prediction_2, "identifier_1": identifier_1, "identifier_2" : identifier_2, "template": template, "original": original, "label": label}) btc btc = btc.sample(frac=1, random_state=123) texts = [] templates = [] labels = [] original = [] for index, rows in btc.iterrows(): original.append(rows["original"]) texts.append(rows["original"]) texts.append(rows["mutant_1"]) texts.append(rows["mutant_2"]) templates.append(rows["template"]) labels.append(rows["label"]) # texts user_study = pd.DataFrame(data={"text":texts, "sentiment": None, "is_make_sense": None, "comment": None}) df_template = pd.DataFrame(data={"template":templates}) df_ori = pd.DataFrame(data={"label" :label, "original": original}) # df_ori.drop_duplicates().to_csv("btc_original.csv") df_ori user_study df_template user_study[:1200].to_csv("../../user_study/TSE/gender-unlabelled.csv") # template.to_csv("template_gender.csv") import os base_dir = "../../data/btc/biasfinder/gender/" if not os.path.exists(base_dir) : os.makedirs(base_dir) btc.to_csv(base_dir + "original.csv", index=None) m1 = btc.iloc[:,[-1,0]] m1 = m1.rename(columns={"mutant_1": "text"}) m2 = btc.iloc[:,[-1,1]] m2 = m2.rename(columns={"mutant_2": "text"}) m1 data = pd.concat([m1, m2]) data # data["text"] = data["text"].astype("category") # data["text_id"] = data["text"].cat.codes # data import os data_dir = base_dir + "full/" if not os.path.exists(data_dir) : os.makedirs(data_dir) # train = unique_data train = data.sample(frac=1, random_state=123) train.to_csv(data_dir + "train.csv", index=None, header=None, sep="\t") test = data test.to_csv(data_dir+ "test.csv", index=None, header=None, sep="\t") unique_data = data.drop_duplicates().reset_index(drop=True) unique_data unique_data[unique_data["label"] == 0] import os data_dir = base_dir + "unique/" if not os.path.exists(data_dir) : os.makedirs(data_dir) # train = unique_data train = unique_data.sample(frac=1, random_state=123) train.to_csv(data_dir + "train.csv", index=None, header=None, sep="\t") test = unique_data test.to_csv(data_dir+ "test.csv", index=None, header=None, sep="\t") len(train)
_____no_output_____
Apache-2.0
codes/gender/BTC.ipynb
yangzhou6666/BiasHeal
Balanced Data for Training
df_0 = unique_data[unique_data["label"] == 0] df_1 = unique_data[unique_data["label"] == 1] print(len(df_0)) print(len(df_1)) df_1 = df_1.sample(len(df_0), replace=True) df_oversampled = pd.concat([df_0, df_1], axis=0) df_oversampled data_dir = base_dir + "unique/balanced/" if not os.path.exists(data_dir) : os.makedirs(data_dir) # train = unique_data train = df_oversampled.sample(frac=1, random_state=123) train.to_csv(data_dir + "train.csv", index=None, header=None, sep="\t")
_____no_output_____
Apache-2.0
codes/gender/BTC.ipynb
yangzhou6666/BiasHeal
Live Human Pose Estimation with OpenVINOThis notebook demonstrates live pose estimation with OpenVINO. We use the OpenPose model [human-pose-estimation-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/human-pose-estimation-0001) from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/). At the bottom of this notebook, you will see live inference results from your webcam. You can also upload a video file.> NOTE: _To use the webcam, you must run this Jupyter notebook on a computer with a webcam. If you run on a server, the webcam will not work. However, you can still do inference on a video in the final step._ Imports
import sys import collections import os import time import cv2 import numpy as np from IPython import display from numpy.lib.stride_tricks import as_strided from openvino import inference_engine as ie from decoder import OpenPoseDecoder sys.path.append("../utils") import notebook_utils as utils
_____no_output_____
Apache-2.0
notebooks/402-pose-estimation-webcam/402-pose-estimation.ipynb
scalers-ai/openvino_notebooks
The model Download the modelWe use `omz_downloader`, which is a command line tool from the `openvino-dev` package. `omz_downloader` automatically creates a directory structure and downloads the selected model.If you want to download another model, please change the model name and precision. *Note: This will require a different pose decoder*.
# directory where model will be downloaded base_model_dir = "model" # model name as named in Open Model Zoo model_name = "human-pose-estimation-0001" # selected precision (FP32, FP16, FP16-INT8) precision = "FP16-INT8" model_path = f"model/intel/{model_name}/{precision}/{model_name}.xml" model_weights_path = f"model/intel/{model_name}/{precision}/{model_name}.bin" if not os.path.exists(model_path): download_command = f"omz_downloader " \ f"--name {model_name} " \ f"--precision {precision} " \ f"--output_dir {base_model_dir}" ! $download_command
_____no_output_____
Apache-2.0
notebooks/402-pose-estimation-webcam/402-pose-estimation.ipynb
scalers-ai/openvino_notebooks
Load the modelDownloaded models are located in a fixed structure, which indicates vendor, model name and precision.Only a few lines of code are required to run the model. First, we create an Inference Engine object. Then we read the network architecture and model weights from the .bin and .xml files to load onto the desired device.
# initialize inference engine ie_core = ie.IECore() # read the network and corresponding weights from file net = ie_core.read_network(model=model_path, weights=model_weights_path) # load the model on the CPU (you can use GPU or MYRIAD as well) exec_net = ie_core.load_network(net, "CPU") # get input and output names of nodes input_key = list(exec_net.input_info)[0] output_keys = list(exec_net.outputs.keys()) # get input size height, width = exec_net.input_info[input_key].tensor_desc.dims[2:]
_____no_output_____
Apache-2.0
notebooks/402-pose-estimation-webcam/402-pose-estimation.ipynb
scalers-ai/openvino_notebooks
Input key is the name of the input node and output keys contain names of output nodes of the network. In the case of the OpenPose Model, we have one input and two outputs: pafs and keypoints heatmap.
input_key, output_keys
_____no_output_____
Apache-2.0
notebooks/402-pose-estimation-webcam/402-pose-estimation.ipynb
scalers-ai/openvino_notebooks
Processing OpenPoseDecoderWe need a decoder to transform the raw results from the neural network into pose estimations. This magic happens inside Open Pose Decoder, which is provided in the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/blob/master/demos/common/python/openvino/model_zoo/model_api/models/open_pose.py) and compatible with the `human-pose-estimation-0001` model.If you choose a model other than `human-pose-estimation-0001` you will need another decoder (e.g. AssociativeEmbeddingDecoder), which is available in the [demos section](https://github.com/openvinotoolkit/open_model_zoo/blob/master/demos/common/python/openvino/model_zoo/model_api/models/hpe_associative_embedding.py) of Open Model Zoo.
decoder = OpenPoseDecoder()
_____no_output_____
Apache-2.0
notebooks/402-pose-estimation-webcam/402-pose-estimation.ipynb
scalers-ai/openvino_notebooks
Process ResultsA bunch of useful functions to transform results into poses.First, we will pool the heatmap. Since pooling is not available in numpy, we use a simple method to do it directly with numpy. Then, we use non-maximum suppression to get the keypoints from the heatmap. After that, we decode poses using the decoder. Since the input image is bigger than the network outputs, we need to multiply all pose coordinates by a scaling factor.
# 2d pooling in numpy (from: htt11ps://stackoverflow.com/a/54966908/1624463) def pool2d(A, kernel_size, stride, padding, pool_mode="max"): """ 2D Pooling Parameters: A: input 2D array kernel_size: int, the size of the window stride: int, the stride of the window padding: int, implicit zero paddings on both sides of the input pool_mode: string, 'max' or 'avg' """ # Padding A = np.pad(A, padding, mode="constant") # Window view of A output_shape = ( (A.shape[0] - kernel_size) // stride + 1, (A.shape[1] - kernel_size) // stride + 1, ) kernel_size = (kernel_size, kernel_size) A_w = as_strided( A, shape=output_shape + kernel_size, strides=(stride * A.strides[0], stride * A.strides[1]) + A.strides ) A_w = A_w.reshape(-1, *kernel_size) # Return the result of pooling if pool_mode == "max": return A_w.max(axis=(1, 2)).reshape(output_shape) elif pool_mode == "avg": return A_w.mean(axis=(1, 2)).reshape(output_shape) # non maximum suppression def heatmap_nms(heatmaps, pooled_heatmaps): return heatmaps * (heatmaps == pooled_heatmaps) # get poses from results def process_results(img, results): pafs = results[output_keys[0]] heatmaps = results[output_keys[1]] # this processing comes from # https://github.com/openvinotoolkit/open_model_zoo/blob/master/demos/common/python/models/open_pose.py pooled_heatmaps = np.array( [[pool2d(h, kernel_size=3, stride=1, padding=1, pool_mode="max") for h in heatmaps[0]]] ) nms_heatmaps = heatmap_nms(heatmaps, pooled_heatmaps) # decode poses poses, scores = decoder(heatmaps, nms_heatmaps, pafs) output_shape = exec_net.outputs[output_keys[0]].shape output_scale = img.shape[1] / output_shape[3], img.shape[0] / output_shape[2] # multiply coordinates by scaling factor poses[:, :, :2] *= output_scale return poses, scores
_____no_output_____
Apache-2.0
notebooks/402-pose-estimation-webcam/402-pose-estimation.ipynb
scalers-ai/openvino_notebooks
Draw Pose OverlaysDraw pose overlays on the image to visualize estimated poses. Joints are drawn as circles and limbs are drawn as lines. The code is based on the [Human Pose Estimation Demo](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos/human_pose_estimation_demo/python) from Open Model Zoo.
colors = ((255, 0, 0), (255, 0, 255), (170, 0, 255), (255, 0, 85), (255, 0, 170), (85, 255, 0), (255, 170, 0), (0, 255, 0), (255, 255, 0), (0, 255, 85), (170, 255, 0), (0, 85, 255), (0, 255, 170), (0, 0, 255), (0, 255, 255), (85, 0, 255), (0, 170, 255)) default_skeleton = ((15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11), (6, 12), (5, 6), (5, 7), (6, 8), (7, 9), (8, 10), (1, 2), (0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6)) def draw_poses(img, poses, point_score_threshold, skeleton=default_skeleton): if poses.size == 0: return img img_limbs = np.copy(img) for pose in poses: points = pose[:, :2].astype(np.int32) points_scores = pose[:, 2] # Draw joints. for i, (p, v) in enumerate(zip(points, points_scores)): if v > point_score_threshold: cv2.circle(img, tuple(p), 1, colors[i], 2) # Draw limbs. for i, j in skeleton: if points_scores[i] > point_score_threshold and points_scores[j] > point_score_threshold: cv2.line(img_limbs, tuple(points[i]), tuple(points[j]), color=colors[j], thickness=4) cv2.addWeighted(img, 0.4, img_limbs, 0.6, 0, dst=img) return img
_____no_output_____
Apache-2.0
notebooks/402-pose-estimation-webcam/402-pose-estimation.ipynb
scalers-ai/openvino_notebooks
Main Processing FunctionRun pose estimation on the specified source. Either a webcam or a video file.
# main processing function to run pose estimation def run_pose_estimation(source=0, flip=False, use_popup=False, skip_first_frames=0): player = None try: # create video player to play with target fps player = utils.VideoPlayer(source, flip=flip, fps=30, skip_first_frames=skip_first_frames) # start capturing player.start() if use_popup: title = "Press ESC to Exit" cv2.namedWindow(title, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE) processing_times = collections.deque() while True: # grab the frame frame = player.next() if frame is None: print("Source ended") break # if frame larger than full HD, reduce size to improve the performance scale = 1280 / max(frame.shape) if scale < 1: frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA) # resize image and change dims to fit neural network input # (see https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/human-pose-estimation-0001) input_img = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA) # create batch of images (size = 1) input_img = input_img.transpose(2, 0, 1)[np.newaxis, ...] # measure processing time start_time = time.time() # get results results = exec_net.infer(inputs={input_key: input_img}) stop_time = time.time() # get poses from network results poses, scores = process_results(frame, results) # draw poses on a frame frame = draw_poses(frame, poses, 0.1) processing_times.append(stop_time - start_time) # use processing times from last 200 frames if len(processing_times) > 200: processing_times.popleft() _, f_width = frame.shape[:2] # mean processing time [ms] processing_time = np.mean(processing_times) * 1000 fps = 1000 / processing_time cv2.putText(frame, f"Inference time: {processing_time:.1f}ms ({fps:.1f} FPS)", (20, 40), cv2.FONT_HERSHEY_COMPLEX, f_width / 1000, (0, 0, 255), 1, cv2.LINE_AA) # use this workaround if there is flickering if use_popup: cv2.imshow(title, frame) key = cv2.waitKey(1) # escape = 27 if key == 27: break else: # encode numpy array to jpg _, encoded_img = cv2.imencode(".jpg", frame, params=[cv2.IMWRITE_JPEG_QUALITY, 90]) # create IPython image i = display.Image(data=encoded_img) # display the image in this notebook display.clear_output(wait=True) display.display(i) # ctrl-c except KeyboardInterrupt: print("Interrupted") # any different error except RuntimeError as e: print(e) finally: if player is not None: # stop capturing player.stop() if use_popup: cv2.destroyAllWindows()
_____no_output_____
Apache-2.0
notebooks/402-pose-estimation-webcam/402-pose-estimation.ipynb
scalers-ai/openvino_notebooks
Run Run Live Pose EstimationRun using a webcam as the video input. By default, the primary webcam is set with `source=0`. If you have multiple webcams, each one will be assigned a consecutive number starting at 0. Set `flip=True` when using a front-facing camera. Some web browsers, especially Mozilla Firefox, may cause flickering. If you experience flickering, set `use_popup=True`.*Note: To use this notebook with a webcam, you need to run the notebook on a computer with a webcam. If you run the notebook on a server (e.g. Binder), the webcam will not work.**Note: Popup mode may not work if you run this notebook on a remote computer (e.g. Binder).*
run_pose_estimation(source=0, flip=True, use_popup=False)
_____no_output_____
Apache-2.0
notebooks/402-pose-estimation-webcam/402-pose-estimation.ipynb
scalers-ai/openvino_notebooks
Run Pose Estimation on a Video FileIf you don't have a webcam, you can still run this demo with a video file. Any format supported by OpenCV will work (see: https://docs.opencv.org/4.5.1/dd/d43/tutorial_py_video_display.html). You can skip first N frames to fast forward video.
video_file = "https://github.com/intel-iot-devkit/sample-videos/blob/master/store-aisle-detection.mp4?raw=true" run_pose_estimation(video_file, flip=False, use_popup=False, skip_first_frames=500)
_____no_output_____
Apache-2.0
notebooks/402-pose-estimation-webcam/402-pose-estimation.ipynb
scalers-ai/openvino_notebooks
Awesome basics that you can't live without when using Scitkit-learn
import sklearn
_____no_output_____
MIT
notebooks/Untitled.ipynb
eleijonmarck/analytics-workflow-showcase
All the methods within the scikit that you might want to explore and import when applicable
(sklearn.__dict__)['__all__']
_____no_output_____
MIT
notebooks/Untitled.ipynb
eleijonmarck/analytics-workflow-showcase
2017 French presidential elections My aim was to highlight differences between Emmanuel Macron and Marine Le Pen, the two candidates who went to the second round of the 2017 French presidential elections.I have downloaded transcripts of the speeches that the two candidates performed from the 1st of January 2017 to the 1st of May 2017.In total:* Macron: 31 transcripts available out of 31 speeches* Le Pen: 25 transcrits available (transcripts: 21, subtitles: 4) out of 35 speeches.Sources:* Macron: https://en-marche.fr/articles/discours* Le Pen: http://www.frontnational.com/categorie/discours/ ![image](https://github.com/AurelieDaviaud/2017-French-presidential-elections/blob/master/LePen-Macron.png "Le Pen vs Macron") Create word cloudsWe can create word clouds to visualize the main words used by each candidate.
import os import numpy as np import nltk from nltk.corpus import stopwords import string import re import copy from string import digits ## Load data listFiles = os.listdir("~/Presidentielles2017/Data")
_____no_output_____
MIT
2017-French-presidential-elections.ipynb
AurelieDaviaud/2017-French-presidential-elections
Preprocess the speeches First, have to preprocess the speeches to keep only important words. So we have to clean up irregularities (i.e. change to lowercase and remove punctuation) and to remove stop words. We have to prepare a list of French stopwords (as comprehensive as possible, by combining several existing lists of stopwords).
## Prepare stop words stopw = open("stopwords-fr1.txt", "r").read() months = ["janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"] n = re.compile('\n') stopw = n.sub(' ', stopw) stopw = nltk.word_tokenize(stopw) stopw = stopw + stopwords.words('french') + months
_____no_output_____
MIT
2017-French-presidential-elections.ipynb
AurelieDaviaud/2017-French-presidential-elections
Some transcripts of Le Pen's speeches are actually subtitles. So, we also have to remove the timestamp and any backest that usually include sound effect. Moreover, transcripts of Le Pen's and Macron's speeches do not have the same format. So we are going to use two different functions to process the documents. Function to preprocess subtitles and speeches of Le Pen(part of the function as been adapted from http://sapir.psych.wisc.edu/wiki/index.php/Extracting_and_analyzing_subtitles)
def cleanLP(str, subtitle=False): timestamp = re.compile('^\d+\n?.*\n?', re.MULTILINE) # finds line numbers and the line after them (which usually houses timestamps) brackets = re.compile('\[[^]]*\]\n?|\([^)]*\)\n?|<[^>]*>\n?|\{[^}]*\}\n?') # finds brackets and anything in between them (sound effects) opensubs = re.compile('.*subtitles.*\n?|.*subs.*\n?', re.IGNORECASE) # finds the opensubtitles signature urls = re.compile('www.*\s\n?|[^\s]*\. ?com\n?') # finds any urls r = re.compile('\r') # gets rid of \r n = re.compile('\n') # finds newlines punctuation = re.compile("[^\w\s']") # finds punctuation if subtitle: str = timestamp.sub('', str) str = brackets.sub('', str) str = opensubs.sub('', str) str = urls.sub('', str) str = str.lower() # change to lowercase str = r.sub('', str) # remove \r str = n.sub(' ', str) # remove newlines str = punctuation.sub(' ', str) # remove punctuation str = str.replace("'", " ") # remove apostrophes remove_digits = str.maketrans('', '', digits) # remove digits str = str.translate(remove_digits) tokens = nltk.word_tokenize(str) # tokenize (i.e create a list of words) tokens = [w for w in tokens if not w in stopw] return tokens
_____no_output_____
MIT
2017-French-presidential-elections.ipynb
AurelieDaviaud/2017-French-presidential-elections
Function to preprocess speeches of Macron
def cleanMac(str): brackets = re.compile('\[[^]]*\]\n?|\([^)]*\)\n?|<[^>]*>\n?|\{[^}]*\}\n?') # finds brackets and anything in between them (sound effects) opensubs = re.compile('.*str.*\n?|.*subs.*\n?', re.IGNORECASE) # finds the opensubtitles signature urls = re.compile('www.*\s\n?|[^\s]*\. ?com\n?') # finds any urls r = re.compile('\r') # finds rid of \r n = re.compile('\n') # finds newlines punctuation = re.compile("[^\w\s']") # finds punctuation str = '\n'.join(str.split('\n')[9:]) # remove 9th first lines str = brackets.sub('', str) str = opensubs.sub('', str) str = urls.sub('', str) str = str.replace("Seul le prononcé fait foi. page ", "") # remove words included in the footer and header of the transcript str = str.replace("en-marche.fr", "") str = str.replace("Discours d’Emmanuel Macron", "") str = str.replace("Aller plus loin", "") str = str.replace("Téléchargez la fiche avec les propositions >", "") str = str.replace("bit.ly/fichesynthèse-santé ", "") str = str.replace("Le replay >", "") str = str.replace("EnMarche/videos/", "") str = str.replace("facebook.com", "") str = str.replace("Suivez Emmanuel Macron ", "") str = str.replace("\x0c", "") str = str.lower() # change to lowercase str = r.sub('', str) # remove \r str = n.sub(' ', str) # remove newlines str = punctuation.sub(' ', str) # remove punctuation str = str.replace("'", " ") # remove apostrophes remove_digits = str.maketrans('', '', digits) # remove digits str = str.translate(remove_digits) tokens = nltk.word_tokenize(str) # tokenize (i.e create a list of words) tokens = [w for w in tokens if not w in stopw] return tokens
_____no_output_____
MIT
2017-French-presidential-elections.ipynb
AurelieDaviaud/2017-French-presidential-elections
Preprocess the speeches
tokenMacTot = [] tokenLPTot = [] for file in listFiles: str = open(file, "r").read() if "MACRON" in file: tokenMac = cleanMac(str) tokenMacTot = tokenMacTot + tokenMac if "Le Pen" in file: if "Subtitle" in file: tokenLP = cleanLP(str, subtitle=True) tokenLPTot = tokenLPTot + tokenLP else: tokenLP = cleanLP(str, subtitle=False) tokenLPTot = tokenLPTot + tokenLP
_____no_output_____
MIT
2017-French-presidential-elections.ipynb
AurelieDaviaud/2017-French-presidential-elections
Store the tokens
tokens_mac_file = open('tokens_mac.txt', 'w') for item in tokenMacTot: tokens_mac_file.write("%s\n" % item) tokens_LP_file = open('tokens_LP.txt', 'w') for item in tokenLPTot: tokens_LP_file.write("%s\n" % item)
_____no_output_____
MIT
2017-French-presidential-elections.ipynb
AurelieDaviaud/2017-French-presidential-elections
Analyse the data Let's see whether the number of words used by each candidate is similar.
# Macron tokenMacUni = set(tokenMacTot) len(tokenMacUni) # Le Pen tokenLPUni = set(tokenLPTot) len(tokenLPUni)
_____no_output_____
MIT
2017-French-presidential-elections.ipynb
AurelieDaviaud/2017-French-presidential-elections
Macron seems to have a vocabulary that is a bit less varied than Le Pen. Count words Now, we have to compute the frequency of each word for each candidate to create the word clouds.
from collections import Counter # Macron n = re.compile('\n') tokenMacTot = open("tokens_mac.txt", "r").read() tokenMacTot = n.sub(' ', tokenMacTot) tokenMacTot = nltk.word_tokenize(tokenMacTot) mac = Counter(tokenMacTot) mac_most = mac.most_common(n=100) # Le Pen tokenLPTot = open("tokens_LP.txt", "r").read() tokenLPTot = n.sub(' ', tokenLPTot) tokenLPTot = nltk.word_tokenize(tokenLPTot) LP = Counter(tokenLPTot) LP_most = LP.most_common(n=100)
_____no_output_____
MIT
2017-French-presidential-elections.ipynb
AurelieDaviaud/2017-French-presidential-elections
Create word clouds
import wordcloud from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator from PIL import Image import matplotlib.pyplot as plt str_mac = open("tokens_mac.txt", "r").read() str_lp = open("tokens_LP.txt", "r").read() ## Generate word clouds mac = WordCloud(background_color="white", collocations= False, font_path='C:/Windows/Fonts/Verdana.ttf') wordcloud_mac = mac.generate(str_mac) lp = WordCloud(background_color="white", collocations= False, font_path='C:/Windows/Fonts/Verdana.ttf') wordcloud_lp = lp.generate(str_lp) ## Show word clouds plt.figure() plt.imshow(wordcloud_mac, interpolation='bilinear') plt.axis("off") plt.title("Macron") plt.show() plt.figure() plt.imshow(wordcloud_lp, interpolation='bilinear') plt.axis("off") plt.title("Le Pen") plt.show()
_____no_output_____
MIT
2017-French-presidential-elections.ipynb
AurelieDaviaud/2017-French-presidential-elections
The two candidates seem to use frequently the same words: France/français/française, Europe/européen/européenne, pays/nation/république, sécurité, monde, économie/économique, territoire... Well... that's not very surprising in a presidential election...This kind of word cloud may not be the best strategy to highlight their differences then. However, we can already highlight some differences. The words "Fillon" and "Macron" appear in the speeches of Le Pen whereas no name appears in the most frequent words used by Macron. Indeed, Le Pen is well known for always strongly criticize her opponants. We will delve further into these differences. But, first, let's make our word clouds a bit nicer. We can use the pictures of each candidate as masks for the word clouds.
## Generate mask (load and format image) (NB: background must be transparent) # Macron mac_im = Image.open("F:/Boulot/00-DataScience/Portfolio/Presidentielles2017/macronNB.png") mac_mask = Image.new("RGB", mac_im.size, (255,255,255)) mac_mask.paste(mac_im, mac_im) mac_mask = np.array(mac_mask) # Le Pen lp_im = Image.open("F:/Boulot/00-DataScience/Portfolio/Presidentielles2017/lepenBleu.png") lp_mask = Image.new("RGB", lp_im.size, (255,255,255)) lp_mask.paste(lp_im, lp_im) lp_mask = np.array(lp_mask) ## Generate word clouds with mask mac_mask = WordCloud(background_color="white", collocations= False, font_path='C:/Windows/Fonts/Verdana.ttf', mask=mac_mask) wordcloud_mac_mask = mac_mask.generate(str_mac) lp_mask = WordCloud(background_color="white", collocations= False, font_path='C:/Windows/Fonts/Verdana.ttf', mask=lp_mask) wordcloud_lp_mask = lp_mask.generate(str_lp) ## Show word clouds plt.figure() plt.imshow(wordcloud_mac_mask, interpolation='bilinear') plt.axis("off") plt.title("Macron") plt.show() plt.figure() plt.imshow(wordcloud_lp_mask, interpolation='bilinear') plt.axis("off") plt.title("Le Pen") plt.show()
_____no_output_____
MIT
2017-French-presidential-elections.ipynb
AurelieDaviaud/2017-French-presidential-elections
Now, let's see whether we can color the words using the colors of the French flag: blue, white and red.
import random def BBR_color_func(word, font_size, position, orientation, random_state=None, **kwargs): return "%s" % random.choice(["hsl(240, 100%, 25%)", "hsl(0, 0%, 100%)", "hsl(0, 100%, 50%)"]) ## Generate mask (load and format image) (NB: background must be transparent) # Macron mac_im = Image.open("F:/Boulot/00-DataScience/Portfolio/Presidentielles2017/macronNB.png") mac_mask = Image.new("RGB", mac_im.size, (255,255,255)) mac_mask.paste(mac_im, mac_im) mac_mask = np.array(mac_mask) # Le Pen lp_im = Image.open("F:/Boulot/00-DataScience/Portfolio/Presidentielles2017/lepenBleu.png") lp_mask = Image.new("RGB", lp_im.size, (255,255,255)) lp_mask.paste(lp_im, lp_im) lp_mask = np.array(lp_mask) ## Generate word clouds with mask and coloring from French flag mac = WordCloud(background_color="black", color_func=BBR_color_func, random_state=3, relative_scaling=0.5, collocations= False, font_path='C:/Windows/Fonts/Verdana.ttf', mask=mac_mask) wordcloud_mac = mac.generate(str_mac) lp = WordCloud(background_color="black", color_func=BBR_color_func, random_state=3, relative_scaling=0.5, collocations= False, font_path='C:/Windows/Fonts/Verdana.ttf', mask=lp_mask) wordcloud_lp = lp.generate(str_lp) ## Show word clouds plt.figure() plt.imshow(wordcloud_mac, interpolation='bilinear') plt.axis("off") plt.title("Macron") plt.show() plt.figure() plt.imshow(wordcloud_lp, interpolation='bilinear') plt.axis("off") plt.title("Le Pen") plt.show() ## Store to file mac.to_file("~/macron_wc.png") mac.to_file("macron_wcBBR.png") lp.to_file("~/lepen_wc.png") lp.to_file("lepen_wcBBR.png")
_____no_output_____
MIT
2017-French-presidential-elections.ipynb
AurelieDaviaud/2017-French-presidential-elections
Task1
from math import exp INF = 10 EPS = 1e-8 ITERATIONS_NUM = 1000 class Differentiable: def __init__(self, derivatives): self.derivatives = derivatives def __call__(self, x): return self.derivatives[0](x) def grad(self): if (len(self.derivatives) == 1): raise Exception("no derivatives were provided") return Differentiable(self.derivatives[1:]) class Polynom: def __init__(self, coefs): self.coefs = coefs self._degree = len(coefs) - 1 def __call__(self, x): res = 0 for i, coef in enumerate(self.coefs): res += (x ** i) * coef return res def get_degree(self): return self._degree def grad(self): grad_coefs = [0] * self._degree for i in range(1, self._degree + 1): grad_coefs[i - 1] = self.coefs[i] * i return Polynom(grad_coefs) def bisec(p, l, r): assert p(r) * p(l) < 0 sign = 1 if p(r) > 0 else -1 while (r - l > EPS): m = (r + l) / 2 if (p(m) * sign > 0): r = m else: l = m return l def newton(p): x = 1 p_grad = p.grad() for i in range(ITERATIONS_NUM): x = x - p(x) / p_grad(x) return x def get_polynom(k, a): coefs = [0] * (k + 1) coefs[k] = 1 coefs[0] = -a return Polynom(coefs) p = get_polynom(2, 2) print(f'bisec: {bisec(p, 0, INF)}') print(f'newton: {newton(p)}')
bisec: 1.414213553071022 newton: 1.414213562373095
MIT
beliakov-artem/HW1.ipynb
Malkovsky/co-mkn-hw-2021
Task2
def get_roots(p): if (p.get_degree() == 1): return [-p.coefs[0] / p.coefs[1]] a = [-INF] a += get_roots(p.grad()) a.append(INF) roots = [] for i in range(len(a) - 1): roots.append(bisec(p, a[i], a[i + 1])) return roots def get_polynom_by_roots(roots): coefs = [0] * (len(roots) + 1) for mask in range(1 << len(roots)): product = 1 bits = 0 for i in range(len(roots)): if ((mask >> i) & 1): product *= - roots[i] bits += 1 coefs[len(roots) - bits] += product return Polynom(coefs) get_roots(get_polynom_by_roots([1, 2, 3, 4, 5]))
_____no_output_____
MIT
beliakov-artem/HW1.ipynb
Malkovsky/co-mkn-hw-2021
Понятно, что этот код не работает в самой общей постановке задачи. Как минимум он всегда возвращает столько корней, какой степени полином. Он совершенно не работает в сценарии, когда у нас есть кратные корни. Да и у производной могут быть кратные корни, но мне очень не хотелось разбирать все эти случаи. Так что вот так. Task03
def get_differentiable(a, b, c, d): def f(x): return exp(a * x) + exp(- b * x) + c * ((x - d) ** 2) def f_grad(x): return a * exp(a * x) - b * exp(- b * x) + 2 * c * (x - d) def f_grad2(x): return (a ** 2) * exp(a * x) + (b ** 2) * exp(- b * x) + 2 * c return Differentiable([f, f_grad, f_grad2]) f = get_differentiable(1, 1, 1, 1) def ternary_search(f): l = -INF r = INF while(r - l > EPS): m1 = l + ((r - l) / 3) m2 = l + (2 * (r - l) / 3) if (f(m1) > f(m2)): l = m1 else: r = m2 return l print("bisec: ", bisec(f.grad(), -INF, INF)) print("newton: ", newton(f.grad()))ss print("ternary: ", ternary_search(f))
bisec: 0.49007306806743145 newton: 0.4900730684805478 ternary: 0.49007306428116904
MIT
beliakov-artem/HW1.ipynb
Malkovsky/co-mkn-hw-2021
필요한 필수 새팅 작업
!ls !pip install -r drive/'My Drive'/'KoGPT2-FineTuning_pre'/requirements.txt import os import sys sys.path.append('drive/My Drive/KoGPT2-FineTuning_pre') logs_base_dir = "runs" from jupyter_main_auto import main ctx= 'cuda' cachedir='~/kogpt2/' load_path = './gdrive/My Drive/KoGPT2-FineTuning_pre/checkpoint/KoGPT2_checkpoint_640000.tar' # 이어서 학습시킬 모델 경로 save_path = './gdrive/My Drive/KoGPT2-FineTuning_pre/checkpoint/' # 학습한 모델을 저장시킬 경로 data_file_path = './gdrive/My Drive/KoGPT2-FineTuning_pre/dataset/dataset.csv' # 학습할 데이터셋 경로
_____no_output_____
Apache-2.0
Colab.ipynb
andrewlaikhT/KoGPT2
모델 학습 시작
# 저장 잘 되는지 테스트 drive.mount('/content/gdrive') f = open(save_path+ 'KoGPT2_checkpoint_' + str(142) + '.tar', 'w') f.write("가자") f.close() main(load_path = load_path, data_file_path = data_file_path, save_path = './gdrive/My Drive/KoGPT2-FineTuning_pre/checkpoint/', summary_url = './gdrive/My Drive/KoGPT2-FineTuning_pre/runs/2020-07-20/', text_size = 500, new = 1, batch_size = 1)
_____no_output_____
Apache-2.0
Colab.ipynb
andrewlaikhT/KoGPT2
Decision trees and random forests
import numpy as np import pandas as pd from sklearn import model_selection as ms, tree, ensemble %matplotlib inline WHITES_URL = 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv'
_____no_output_____
CC-BY-4.0
12_decision_trees_and_random_forests/notebooks/02_solutions.ipynb
JoseHJBlanco/ga-data-science
Read in the Wine Quality dataset.
whites = pd.read_csv(WHITES_URL, sep=';')
_____no_output_____
CC-BY-4.0
12_decision_trees_and_random_forests/notebooks/02_solutions.ipynb
JoseHJBlanco/ga-data-science
Train a decision tree for 'quality' limiting the depth to 3, and the minimum number of samples per leaf to 50.
X = whites.drop('quality', axis=1) y = whites.quality tree1 = tree.DecisionTreeRegressor(max_depth=2, min_samples_leaf=50) tree1.fit(X, y)
_____no_output_____
CC-BY-4.0
12_decision_trees_and_random_forests/notebooks/02_solutions.ipynb
JoseHJBlanco/ga-data-science
Export the tree for plotting.
tree.export_graphviz(tree1, 'tree1.dot', feature_names=X.columns)
_____no_output_____
CC-BY-4.0
12_decision_trees_and_random_forests/notebooks/02_solutions.ipynb
JoseHJBlanco/ga-data-science
Define folds for cross-validation.
ten_fold_cv = ms.KFold(n_splits=10, shuffle=True)
_____no_output_____
CC-BY-4.0
12_decision_trees_and_random_forests/notebooks/02_solutions.ipynb
JoseHJBlanco/ga-data-science
Compute average MSE across folds.
mses = ms.cross_val_score(tree.DecisionTreeRegressor(max_depth=2, min_samples_leaf=50), X, y, scoring='neg_mean_squared_error', cv=ten_fold_cv) np.mean(-mses)
_____no_output_____
CC-BY-4.0
12_decision_trees_and_random_forests/notebooks/02_solutions.ipynb
JoseHJBlanco/ga-data-science
Train a random forest with 20 decision trees.
rf1 = ensemble.RandomForestRegressor(n_estimators=20) rf1.fit(X, y)
_____no_output_____
CC-BY-4.0
12_decision_trees_and_random_forests/notebooks/02_solutions.ipynb
JoseHJBlanco/ga-data-science
Investigate importances of predictors.
rf1.feature_importances_
_____no_output_____
CC-BY-4.0
12_decision_trees_and_random_forests/notebooks/02_solutions.ipynb
JoseHJBlanco/ga-data-science
Evaluate performance through cross-validation.
mses = ms.cross_val_score(ensemble.RandomForestRegressor(n_estimators=20), X, y, scoring='neg_mean_squared_error', cv=ten_fold_cv) np.mean(-mses)
_____no_output_____
CC-BY-4.0
12_decision_trees_and_random_forests/notebooks/02_solutions.ipynb
JoseHJBlanco/ga-data-science
What happens when you increase the number of trees to 50?
mses = ms.cross_val_score(ensemble.RandomForestRegressor(n_estimators=50), X, y, scoring='neg_mean_squared_error', cv=ten_fold_cv) np.mean(-mses)
_____no_output_____
CC-BY-4.0
12_decision_trees_and_random_forests/notebooks/02_solutions.ipynb
JoseHJBlanco/ga-data-science
NormalizationUse what you've learned to normalize case in the following text and remove punctuation!
text = "The first time you see The Second Renaissance it may look boring. Look at it at least twice and definitely watch part 2. It will change your view of the matrix. Are the human people the ones who started the war ? Is AI a bad thing ?" print(text)
The first time you see The Second Renaissance it may look boring. Look at it at least twice and definitely watch part 2. It will change your view of the matrix. Are the human people the ones who started the war ? Is AI a bad thing ?
MIT
Text Processing/normalization_practice.ipynb
maitreytalware/Natural-Language-Processing-Pipelines
Case Normalization
# Convert to lowercase text = text.lower() print(text)
the first time you see the second renaissance it may look boring. look at it at least twice and definitely watch part 2. it will change your view of the matrix. are the human people the ones who started the war ? is ai a bad thing ?
MIT
Text Processing/normalization_practice.ipynb
maitreytalware/Natural-Language-Processing-Pipelines
Punctuation RemovalUse the `re` library to remove punctuation with a regular expression (regex). Feel free to refer back to the video or Google to get your regular expression. You can learn more about regex [here](https://docs.python.org/3/howto/regex.html).
# Remove punctuation characters import re text = re.sub(r"[^a-zA-Z0-9]"," ",text) print(text)
the first time you see the second renaissance it may look boring look at it at least twice and definitely watch part 2 it will change your view of the matrix are the human people the ones who started the war is ai a bad thing
MIT
Text Processing/normalization_practice.ipynb
maitreytalware/Natural-Language-Processing-Pipelines
End of distribution Imputation ==> Feature-Engine What is Feature-EngineFeature-Engine is an open source python package that I created at the back of this course. - Feature-Engine includes all the feature engineering techniques described in the course- Feature-Engine works like to Scikit-learn, so it is easy to learn- Feature-Engine allows you to implement specific engineering steps to specific feature subsets- Feature-Engine can be integrated with the Scikit-learn pipeline allowing for smooth model building- **Feature-Engine allows you to design and store a feature engineering pipeline with bespoke procedures for different variable groups.**-------------------------------------------------------------------Feature-Engine can be installed via pip ==> pip install feature-engine- Make sure you have installed feature-engine before running this notebookFor more information visit:my website In this demoWe will use Feature-Engine to perform mean or median imputation using the Ames House Price Dataset.- To download the dataset visit the lecture **Datasets** in **Section 1** of the course.
import pandas as pd import numpy as np import matplotlib.pyplot as plt # to split the datasets from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline # from feature-engine from feature_engine import missing_data_imputers as mdi # let's load the dataset with a selected group of variables cols_to_use = [ 'BsmtQual', 'FireplaceQu', 'LotFrontage', 'MasVnrArea', 'GarageYrBlt', 'SalePrice' ] data = pd.read_csv('../houseprice.csv', usecols=cols_to_use) data.head() data.isnull().mean()
_____no_output_____
BSD-3-Clause
notebooks/feature-engineering/Section-04-Missing-Data-Imputation/04.18-End-Tail-Imputation-Feature-Engine.ipynb
sophiabrandt/udemy-feature-engineering
All the predictor variables contain missing data.
# let's separate into training and testing set # first drop the target from the feature list cols_to_use.remove('SalePrice') X_train, X_test, y_train, y_test = train_test_split(data[cols_to_use], data['SalePrice'], test_size=0.3, random_state=0) X_train.shape, X_test.shape
_____no_output_____
BSD-3-Clause
notebooks/feature-engineering/Section-04-Missing-Data-Imputation/04.18-End-Tail-Imputation-Feature-Engine.ipynb
sophiabrandt/udemy-feature-engineering
Feature-Engine captures the numerical variables automatically
# we call the imputer from feature-engine # we specify whether we want to find the values using # the gaussian approximation or the inter-quantal range # proximity rule. # in addition we need to specify if we want the values placed at # the left or right tail imputer = mdi.EndTailImputer(distribution='gaussian', tail='right') # we fit the imputer imputer.fit(X_train) # we see that the imputer found the numerical variables to # impute with the end of distribution value imputer.variables # here we can see the values that will be used # to replace NA for each variable imputer.imputer_dict_ # and this is how those values were calculated # which is how we learnt in the first notebooks of # this section X_train[imputer.variables].mean() + 3 * X_train[imputer.variables].std() # feature-engine returns a dataframe tmp = imputer.transform(X_train) tmp.head() # let's check that the numerical variables don't # contain NA any more tmp[imputer.variables].isnull().mean()
_____no_output_____
BSD-3-Clause
notebooks/feature-engineering/Section-04-Missing-Data-Imputation/04.18-End-Tail-Imputation-Feature-Engine.ipynb
sophiabrandt/udemy-feature-engineering
Feature-engine allows you to specify variable groups easily
# let's do it imputation but this time # and let's do it over 2 of the 3 numerival variables # let's also select the proximity rule on the left tail imputer = mdi.EndTailImputer(distribution='skewed', tail='left', variables=['LotFrontage', 'MasVnrArea']) imputer.fit(X_train) # now the imputer uses only the variables we indicated imputer.variables # and we can see the value assigned to each variable imputer.imputer_dict_ # feature-engine returns a dataframe tmp = imputer.transform(X_train) # let's check null values are gone tmp[imputer.variables].isnull().mean()
_____no_output_____
BSD-3-Clause
notebooks/feature-engineering/Section-04-Missing-Data-Imputation/04.18-End-Tail-Imputation-Feature-Engine.ipynb
sophiabrandt/udemy-feature-engineering
Feature-engine can be used with the Scikit-learn pipeline
# let's look at the distributions to determine the # end tail value selection method X_train.hist()
_____no_output_____
BSD-3-Clause
notebooks/feature-engineering/Section-04-Missing-Data-Imputation/04.18-End-Tail-Imputation-Feature-Engine.ipynb
sophiabrandt/udemy-feature-engineering
All variables are skewed. For this demo, I will use the proximity rule for GarageYrBlt and MasVnrArea, and the Gaussian approximation for LotFrontage.
pipe = Pipeline([ ('imputer_skewed', mdi.EndTailImputer(distribution='skewed', tail='right', variables=['GarageYrBlt', 'MasVnrArea'])), ('imputer_gaussian', mdi.EndTailImputer(distribution='gaussian', tail='right', variables=['LotFrontage'])), ]) pipe.fit(X_train) pipe.named_steps['imputer_skewed'].imputer_dict_ pipe.named_steps['imputer_gaussian'].imputer_dict_ # let's transform the data with the pipeline tmp = pipe.transform(X_train) # let's check null values are gone tmp.isnull().mean()
_____no_output_____
BSD-3-Clause
notebooks/feature-engineering/Section-04-Missing-Data-Imputation/04.18-End-Tail-Imputation-Feature-Engine.ipynb
sophiabrandt/udemy-feature-engineering
[NumPyro](https://github.com/pyro-ppl/numpyro) is probabilistic programming language built on top of JAX. It is very similar to [Pyro](https://pyro.ai/), which is built on top of PyTorch, but [tends to be faster](https://stackoverflow.com/questions/61846620/numpyro-vs-pyro-why-is-former-100x-faster-and-when-should-i-use-the-latter). (Both Pyro flavors are usually also [faster than PyMc3](https://www.kaggle.com/s903124/numpyro-speed-benchmark).)This colab gives a brief introduction (WIP). Installation
# Standard Python libraries from __future__ import absolute_import, division, print_function, unicode_literals import os import time #import numpy as np #np.set_printoptions(precision=3) import glob import matplotlib.pyplot as plt import PIL import imageio from IPython import display %matplotlib inline import sklearn import seaborn as sns; sns.set(style="ticks", color_codes=True) import pandas as pd pd.set_option('precision', 2) # 2 decimal places pd.set_option('display.max_rows', 20) pd.set_option('display.max_columns', 30) pd.set_option('display.width', 100) # wide windows import jax import jax.numpy as np import numpy as onp # original numpy print("jax version {}".format(jax.__version__)) print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform)) # https://github.com/pyro-ppl/numpyro !pip install numpyro # It seems that numpyro installs jaxlib for CPU #https://github.com/pyro-ppl/numpyro/issues/531 import jax import jax.numpy as np import numpy as onp # original numpy from jax import random print("jax version {}".format(jax.__version__)) print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
jax version 0.2.7 jax backend gpu
MIT
notebooks/numpyro_intro.ipynb
khanshehjad/pyprobml
Distributions
import numpyro import numpyro.distributions as dist from numpyro.diagnostics import hpdi from numpyro.distributions.transforms import AffineTransform from numpyro.infer import MCMC, NUTS, Predictive rng_key = random.PRNGKey(0) rng_key, rng_key_ = random.split(rng_key)
_____no_output_____
MIT
notebooks/numpyro_intro.ipynb
khanshehjad/pyprobml
1d Gaussian
# 2 independent 1d gaussians (ie 1 diagonal Gaussian) mu = 1.5 sigma = 2 d = dist.Normal(mu, sigma) dir(d) rng_key, rng_key_ = random.split(rng_key) nsamples = 1000 ys = d.sample(rng_key_, (nsamples,)) print(ys.shape) mu_hat = np.mean(ys,0) print(mu_hat) sigma_hat = np.std(ys, 0) print(sigma_hat)
(1000,) 1.5070927 2.0493808
MIT
notebooks/numpyro_intro.ipynb
khanshehjad/pyprobml
Multivariate Gaussian
mu = np.array([-1, 1]) sigma = np.array([1, 2]) Sigma = np.diag(sigma) d2 = dist.MultivariateNormal(mu, Sigma) #rng_key, rng_key_ = random.split(rng_key) nsamples = 1000 ys = d2.sample(rng_key_, (nsamples,)) print(ys.shape) mu_hat = np.mean(ys,0) print(mu_hat) Sigma_hat = np.cov(ys, rowvar=False) #jax.np.cov not implemented print(Sigma_hat)
(1000, 2) [-1.0127413 1.0091063] [[ 0.9770031 -0.00533966] [-0.00533966 1.9718108 ]]
MIT
notebooks/numpyro_intro.ipynb
khanshehjad/pyprobml
Shape semanticsNumpyro, [Pyro](https://pyro.ai/examples/tensor_shapes.html) and [TFP](https://www.tensorflow.org/probability/examples/Understanding_TensorFlow_Distributions_Shapes) all distinguish between 'event shape' and 'batch shape'.For a D-dimensional Gaussian, the event shape is (D,), and the batch shapewill be (), meaning we have a single instance of this distribution.If the covariance is diagonal, we can view this as D independent1d Gaussians, stored along the batch dimension; this will have event shape () but batch shape (2,). When we sample from a distribution, we also specify the sample_shape.Suppose we draw N samples from a single D-dim diagonal Gaussian,and N samples from D 1d Gaussians. These samples will have the same shape.However, the semantics of logprob differs.We illustrate this below.
d2 = dist.MultivariateNormal(mu, Sigma) print(f'event shape {d2.event_shape}, batch shape {d2.batch_shape}') nsamples = 3 ys2 = d2.sample(rng_key_, (nsamples,)) print('samples, shape {}'.format(ys2.shape)) print(ys2) # 2 independent 1d gaussians (same as one 2d diagonal Gaussian) d3 = dist.Normal(mu, np.diag(Sigma)) print(f'event shape {d3.event_shape}, batch shape {d3.batch_shape}') ys3 = d3.sample(rng_key_, (nsamples,)) print('samples, shape {}'.format(ys3.shape)) print(ys3) print(np.allclose(ys2, ys3)) y = ys2[0,:] # 2 numbers print(d2.log_prob(y)) # log prob of a single 2d distribution on 2d input print(d3.log_prob(y)) # log prob of two 1d distributions on 2d input
-2.6185904 [-1.35307 -1.6120898]
MIT
notebooks/numpyro_intro.ipynb
khanshehjad/pyprobml
We can turn a set of independent distributions into a single productdistribution using the [Independent class](http://num.pyro.ai/en/stable/distributions.htmlindependent)
d4 = dist.Independent(d3, 1) # treat the first batch dimension as an event dimensions print(d4.event_shape) print(d4.batch_shape) print(d4.log_prob(y))
(2,) () -2.96516
MIT
notebooks/numpyro_intro.ipynb
khanshehjad/pyprobml
Posterior inference with MCMC Example: 1d Gaussian with unknown mean.We use the simple example from the [Pyro intro](https://pyro.ai/examples/intro_part_ii.htmlA-Simple-Example). The goal is to infer the weight $\theta$ of an object, given noisy measurements $y$. We assume the following model:$$\begin{align}\theta &\sim N(\mu=8.5, \tau^2=1.0)\\ y \sim &N(\theta, \sigma^2=0.75^2)\end{align}$$Where $\mu=8.5$ is the initial guess. By Bayes rule for Gaussians, we know that the exact posterior,given a single observation $y=9.5$, is given by$$\begin{align}\theta|y &\sim N(m, s^s) \\m &=\frac{\sigma^2 \mu + \tau^2 y}{\sigma^2 + \tau^2} = \frac{0.75^2 \times 8.5 + 1 \times 9.5}{0.75^2 + 1^2} = 9.14 \\s^2 &= \frac{\sigma^2 \tau^2}{\sigma^2 + \tau^2} = \frac{0.75^2 \times 1^2}{0.75^2 + 1^2}= 0.6^2\end{align}$$
mu = 8.5; tau = 1.0; sigma = 0.75; y = 9.5 m = (sigma**2 * mu + tau**2 * y)/(sigma**2 + tau**2) s2 = (sigma**2 * tau**2)/(sigma**2 + tau**2) s = np.sqrt(s2) print(m) print(s) def model(prior_mean, prior_sd, obs_sd, measurement=None): theta = numpyro.sample("theta", dist.Normal(prior_mean, prior_sd)) return numpyro.sample("y", dist.Normal(theta, obs_sd), obs=measurement) nuts_kernel = NUTS(model) mcmc = MCMC(nuts_kernel, num_warmup=100, num_samples=1000) mcmc.run(rng_key_, mu, tau, sigma, y) mcmc.print_summary() samples = mcmc.get_samples()
_____no_output_____
MIT
notebooks/numpyro_intro.ipynb
khanshehjad/pyprobml
Interactive Example 1. Run GaMMA in terminal or use QuakeFlow APINote: Please only use the QuakeFlow API for debugging and testing on small datasets. Do not run large jobs using the QuakeFlow API. The computational cost can be high for us.```bashuvicorn --app-dir=gamma app:app --reload --port 8001```
import requests import json import pandas as pd import os # GAMMA_API_URL = "http://127.0.0.1:8001" GAMMA_API_URL = "http://gamma.quakeflow.com"
_____no_output_____
MIT
docs/example_interactive.ipynb
wayneweiqiang/GMMA