ai-interface / app.py
eloi-goncalves's picture
Update app.py
03113eb
raw
history blame
1.93 kB
from transformers import AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoTokenizer, PegasusForConditionalGeneration, PegasusTokenizer, pipeline
import gradio as grad
import ast
mdl_name = "deepset/roberta-base-squad2"
my_pipeline = pipeline('question-answering', model=mdl_name, tokenizer=mdl_name)
model_translate_name = 'danhsf/m2m100_418M-finetuned-kde4-en-to-pt_BR'
model_translate = AutoModelForSeq2SeqLM.from_pretrained(model_translate_name)
model_translate_token = AutoTokenizer.from_pretrained(model_translate_name)
translate_pipeline = pipeline('translation', model=model_translate_name)
def answer_question(question,context):
text= "{"+"'question': '"+question+"','context': '"+context+"'}"
di=ast.literal_eval(text)
response = my_pipeline(di)
print('response', response)
return response
def translate(text):
inputs = model_translate_token(text, return_tensor='pt')
translate_output = model_translate.generate(**inputs)
response = model_translate_token(translate_output[0], skip_special_tokens=True)
#response = translate_pipeline(text)
return response
mdl_name = "google/pegasus-xsum"
pegasus_tkn = PegasusTokenizer.from_pretrained(mdl_name)
mdl = PegasusForConditionalGeneration.from_pretrained(mdl_name)
def summarize(text):
tokens = pegasus_tkn(text, truncation=True, padding="longest", return_tensors="pt")
txt_summary = mdl.generate(**tokens, num_return_sequences=5, max_length=200, temperature=1.5,num_beams=10)
response = pegasus_tkn.batch_decode(txt_summary, skip_special_tokens=True)
return response
txt=grad.Textbox(lines=10, label="English", placeholder="English Text here")
out=grad.Textbox(lines=10, label="Summary")
grad.Interface(summarize, inputs=txt, outputs=out).launch()
#grad.Interface(answer_question, inputs=["text","text"], outputs="text").launch()
# grad.Interface(translate, inputs=['text',], outputs='text').launch()