Spaces:
Configuration error
Configuration error
import streamlit as st | |
import torch | |
from transformers import AutoTokenizer, AutoModelForQuestionAnswering | |
# Load the trained model | |
model_path = 'trained.pt' | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
model = AutoModelForQuestionAnswering.from_pretrained(model_path) | |
# Define a function to get the model's answer to a question | |
def get_answer(context, question): | |
encoding = tokenizer.encode_plus(question, context, return_tensors='pt') | |
input_ids = encoding['input_ids'] | |
attention_mask = encoding['attention_mask'] | |
start_scores, end_scores = model(input_ids, attention_mask=attention_mask) | |
start_index = torch.argmax(start_scores) | |
end_index = torch.argmax(end_scores) | |
answer_tokens = input_ids[0][start_index:end_index+1] | |
answer = tokenizer.decode(answer_tokens) | |
return answer | |
# Set up the Streamlit app | |
st.title("Question Answering with Transformers") | |
# Prompt the user for a context and a question | |
context = st.text_area("Context:", "Enter the context here...") | |
question = st.text_input("Question:", "Enter your question here...") | |
# When the user clicks the "Answer" button, get the answer and display it | |
if st.button("Answer"): | |
if not context or not question: | |
st.error("Please provide both a context and a question.") | |
else: | |
answer = get_answer(context, question) | |
st.success(f"Answer: {answer}") | |