File size: 1,822 Bytes
ed74deb
 
01714af
ed74deb
77b655a
01714af
ed74deb
01714af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ed74deb
01714af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from transformers import AutoModelForMaskedLM
from transformers import AutoTokenizer
from sklearn.metrics.pairwise import cosine_similarity
import streamlit as st
import torch

model_checkpoint = "vives/distilbert-base-uncased-finetuned-imdb-accelerate"
model = AutoModelForMaskedLM.from_pretrained(model_checkpoint,output_hidden_states=True)
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
text1 = st.text_area("Enter first sentence")
text2 = st.text_area("Enter second sentence")

def concat_tokens(t1,t2):
  tokens = {'input_ids': [], 'attention_mask': []}
  sentences = [t1, t2]
  for sentence in sentences:
      # encode each sentence and append to dictionary
      new_tokens = tokenizer.encode_plus(sentence, max_length=128,
                                         truncation=True, padding='max_length',
                                         return_tensors='pt')
      tokens['input_ids'].append(new_tokens['input_ids'][0])
      tokens['attention_mask'].append(new_tokens['attention_mask'][0])
  
  # reformat list of tensors into single tensor
  tokens['input_ids'] = torch.stack(tokens['input_ids'])
  tokens['attention_mask'] = torch.stack(tokens['attention_mask'])
  return tokens

def pool_embeddings(out, tok):
  embeddings = out["hidden_states"][-1]
  attention_mask = tok['attention_mask']
  mask = attention_mask.unsqueeze(-1).expand(embeddings.size()).float()
  masked_embeddings = embeddings * mask
  summed = torch.sum(masked_embeddings, 1)
  summed_mask = torch.clamp(mask.sum(1), min=1e-9)
  mean_pooled = summed / summed_mask
  return mean_pooled
  
if text1 and text2:
  tokens = concat_tokens(text1,text2)
  outputs = model(**tokens)
  mean_pooled = pool_embeddings(outputs,tokens).detach().numpy()
  st.write(cosine_similarity(
      [mean_pooled[0]],
      mean_pooled[1:]
  ))