File size: 1,379 Bytes
c34c47c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForQuestionAnswering

# Load the trained model
model_path = 'trained.pt'
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForQuestionAnswering.from_pretrained(model_path)

# Define a function to get the model's answer to a question
def get_answer(context, question):
    encoding = tokenizer.encode_plus(question, context, return_tensors='pt')
    input_ids = encoding['input_ids']
    attention_mask = encoding['attention_mask']
    start_scores, end_scores = model(input_ids, attention_mask=attention_mask)
    start_index = torch.argmax(start_scores)
    end_index = torch.argmax(end_scores)
    answer_tokens = input_ids[0][start_index:end_index+1]
    answer = tokenizer.decode(answer_tokens)
    return answer

# Set up the Streamlit app
st.title("Question Answering with Transformers")

# Prompt the user for a context and a question
context = st.text_area("Context:", "Enter the context here...")
question = st.text_input("Question:", "Enter your question here...")

# When the user clicks the "Answer" button, get the answer and display it
if st.button("Answer"):
    if not context or not question:
        st.error("Please provide both a context and a question.")
    else:
        answer = get_answer(context, question)
        st.success(f"Answer: {answer}")