Spaces:
Sleeping
Sleeping
import gradio as gr | |
import torch | |
from transformers import GPT2Tokenizer, T5ForConditionalGeneration | |
tokenizer = GPT2Tokenizer.from_pretrained('RussianNLP/FRED-T5-Summarizer', eos_token='</s>') | |
model = T5ForConditionalGeneration.from_pretrained('RussianNLP/FRED-T5-Summarizer') | |
device = 'cpu' | |
model.to(device) | |
input_text = "<LM> Сократи текст.\n " | |
def make_summarization(user_text): | |
processing_text = input_text + user_text | |
input_ids = torch.tensor([tokenizer.encode(processing_text)]).to(device) | |
outputs = model.generate(input_ids, eos_token_id=tokenizer.eos_token_id, | |
num_beams=3, | |
min_new_tokens=17, | |
max_new_tokens=200, | |
do_sample=True, | |
no_repeat_ngram_size=4, | |
top_p=0.9) | |
return tokenizer.decode(outputs[0][1:]) | |
demo = gr.Interface(fn=make_summarization, inputs="text", outputs="text") | |
demo.launch(share=True) |