import streamlit as st from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import torch import torch.nn as nn class Net(nn.Module): def __init__(self): super(Net,self).__init__() self.layer = nn.Sequential( nn.Linear(768, 768), nn.ReLU(), nn.Linear(768, 768), nn.ReLU(), nn.Linear(768, 8), ) def forward(self,x): return self.layer(x) def get_hidden_states(encoded, model): """Push input IDs through model. Stack and sum `layers` (last four by default). Select only those subword token outputs that belong to our word of interest and average them.""" with torch.no_grad(): output = model(decoder_input_ids=encoded['input_ids'], output_hidden_states=True, **encoded) layers = [-4, -3, -2, -1] states = output['decoder_hidden_states'] output = torch.stack([states[i] for i in layers]).sum(0).squeeze() return output.mean(dim=0) def get_word_vector(sent, tokenizer, model): encoded = tokenizer.encode_plus(sent, return_tensors="pt") return get_hidden_states(encoded, model) model=Net() model.load_state_dict(torch.load('dummy_model.txt', map_location=torch.device('cpu'))) model.eval() labels_articles = {1: 'Computer Science',2: 'Economics',3: "Electrical Engineering And Systems Science", 4: "Mathematics",5: "Physics",6: "Quantitative Biology",7: "Quantitative Finance", 8: "Statistics"} tokenizer = AutoTokenizer.from_pretrained("Callidior/bert2bert-base-arxiv-titlegen") model_emb = AutoModelForSeq2SeqLM.from_pretrained("Callidior/bert2bert-base-arxiv-titlegen") title = st.text_area("Write title of your article") summary = st.text_area("Write summary of your article or dont write anything (but you should press Ctrl + Enter)") text = title + '. ' + summary embed = get_word_vector(text, tokenizer, model_emb) logits = torch.nn.functional.softmax(model(embed), dim=0) best_tags = torch.argsort(logits, descending=True) sum = 0 res = '' for tag in best_tags: if sum > 0.95: break sum += logits[tag.item()] # print(tag.item()) new_tag = labels_articles[tag.item() + 1] res += new_tag + '\n' st.write('best tags = \n', res)