import gradio as gr import torch # Import libraries from transformers from transformers import AutoTokenizer, AutoModelForQuestionAnswering # Define model and tokenizer model_name = "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForQuestionAnswering.from_pretrained(model_name) def answer_question(context, question): # Encode the context and question inputs = tokenizer(context, question, return_tensors="pt") # Perform question answering outputs = model(**inputs) # Get the predicted start and end token positions start_scores, end_scores = outputs.start_logits, outputs.end_logits # Decode the answer based on predicted positions answer_start = torch.argmax(start_scores) answer_end = torch.argmax(end_scores) + 1 # Get answer tokens and convert them to string answer = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0]) answer = "".join(answer) return answer # Define the Gradio interface interface = gr.Interface( fn=answer_question, inputs=[gr.Textbox("Context"), gr.Textbox("Question")], outputs="text", title="Question Answering with BERT", description="Ask a question about the provided context and get an answer powered by Google BERT model.", ) # Launch the Gradio app interface.launch()