shawon100's picture
Updated app.py with pretrained model
1416484
raw
history blame
602 Bytes
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
import gradio as gr
nlp = pipeline('question-answering', model='deepset/roberta-base-squad2', tokenizer='deepset/roberta-base-squad2')
def qnamodel(ques,content):
question_set = {'question':ques,'context':content}
results = nlp(question_set)
return results['answer']
interface = gr.Interface(fn=qnamodel,
inputs=["text","text"],
outputs="text",
title='Context Question Answering')
interface.launch(inline=False)