Spaces:
Sleeping
Sleeping
File size: 1,715 Bytes
d85a345 ae159d3 d85a345 e1e3112 d85a345 439b228 d85a345 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import streamlit as st
from transformers import AutoModelForQuestionAnswering, AutoTokenizer
import torch
@st.cache_resource
def load_model():
model_path = "YasirAbdali/roberta_qoura" # Replace with your actual model path
model = AutoModelForQuestionAnswering.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
return model, tokenizer
def answer_question(question, model, tokenizer):
inputs = tokenizer(question, return_tensors="pt", max_length=512, truncation=True, padding="max_length")
with torch.no_grad():
outputs = model(**inputs)
start_logits = outputs.start_logits
end_logits = outputs.end_logits
start_index = torch.argmax(start_logits)
end_index = torch.argmax(end_logits)
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][start_index:end_index+1]))
return answer
st.title("Quora Question Answering")
model, tokenizer = load_model()
st.write("Enter a question, and the model will provide an answer based on its knowledge.")
question = st.text_area("Question")
if st.button("Get Answer"):
if question:
answer = answer_question(question, model, tokenizer)
st.write("Answer:", answer)
else:
st.write("Please provide a question.")
# Optional: Add some example questions
st.sidebar.header("Example Questions")
example_questions = [
"What is the capital of France?",
"Who wrote 'Romeo and Juliet'?",
"What is the boiling point of water?",
"What year did World War II end?",
]
for example in example_questions:
if st.sidebar.button(example):
st.text_input("Question", value=example) |