Spaces:
Sleeping
Sleeping
File size: 1,031 Bytes
4506e75 f132c1d ee04bd8 f132c1d 4506e75 f132c1d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio as gr
import torch
# Import libraries from transformers
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
# Define model and tokenizer
model_name = "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
def answer_question(context, question):
# Encode the context and question
inputs = tokenizer(context, question, return_tensors="pt")
# Get answer tokens and convert them to string
answer = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
answer = "".join(answer)
return answer
# Define the Gradio interface
interface = gr.Interface(
fn=answer_question,
inputs=[gr.Textbox("Context"), gr.Textbox("Question")],
outputs="text",
title="Question Answering with BERT",
description="Ask a question about the provided context and get an answer powered by Google BERT model.",
)
# Launch the Gradio app
interface.launch()
|