shawon100's picture
Updated app.py with pretrained model
e492060
raw
history blame
1.01 kB
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
import gradio as gr
nlp = pipeline('question-answering', model='deepset/roberta-base-squad2', tokenizer='deepset/roberta-base-squad2')
def qnamodel(question,context):
context = "This procedure is used to guide technician to replace equipment EQ1. The procedure is valid in normal operation. There is different procedure for emergency situations. The replacement of EQ1 may be done by SE, MT, or MS."
question = "Who can replace the equipment?"
question_set = {'context':context,'question':question}
results = nlp(question_set)
return results['answer']
interface = gr.Interface(fn=qnamodel,
inputs=[gr.inputs.Textbox(lines=7, default=context, label="Context Paragraph"), gr.inputs.Textbox(lines=2, default=question, label="Question")],
outputs="text",
title='Context Question Answering')
interface.launch(inline=False)