Spaces:
Sleeping
Sleeping
import gradio as gr | |
# Import libraries from transformers | |
from transformers import AutoTokenizer, AutoModelForQuestionAnswering | |
# Define model and tokenizer | |
model_name = "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForQuestionAnswering.from_pretrained(model_name) | |
def answer_question(context, question): | |
# Encode the context and question | |
inputs = tokenizer(context, question, return_tensors="pt") | |
# Get answer tokens and convert them to string | |
answer = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0]) | |
answer = "".join(answer) | |
return answer | |
# Define the Gradio interface | |
interface = gr.Interface( | |
fn=answer_question, | |
inputs=[gr.Textbox("Context"), gr.Textbox("Question")], | |
outputs="text", | |
title="Question Answering with BERT", | |
description="Ask a question about the provided context and get an answer powered by Google BERT model.", | |
) | |
# Launch the Gradio app | |
interface.launch() | |