Spaces:
Sleeping
Sleeping
File size: 1,693 Bytes
f7b4690 badb581 f7b4690 badb581 f7b4690 badb581 f7b4690 badb581 f7b4690 badb581 f7b4690 badb581 f7b4690 badb581 f7b4690 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline
st.title("Question Answering Web App")
st.write("""
### Powered by Hugging Face and Streamlit
This app uses a pre-trained NLP model from Hugging Face to answer questions based on the text you provide.
Try entering a context and a question to get an answer!
""")
# Load the tokenizer and model
@st.cache_resource
def load_model():
tokenizer = AutoTokenizer.from_pretrained("Rakib/roberta-base-on-cuad")
model = AutoModelForQuestionAnswering.from_pretrained("Rakib/roberta-base-on-cuad")
return tokenizer, model
tokenizer, model = load_model()
# Define the question-answering pipeline
@st.cache_resource
def get_qa_pipeline():
return pipeline("question-answering", model=model, tokenizer=tokenizer)
qa_pipeline = get_qa_pipeline()
# UI input for context and question
context = st.text_area("Enter the context:", "Type the paragraph here where the answer will be extracted.")
question = st.text_input("Enter the question:", "What is being asked here?")
# Button to perform question answering
if st.button("Answer Question"):
if context and question:
result = qa_pipeline(question=question, context=context)
answer = result['answer']
# Display the result
st.subheader("Answer")
st.write(f"**Answer:** {answer}")
else:
st.warning("Please enter both context and question!")
# Sidebar with About Information
st.sidebar.title("About")
st.sidebar.info("""
This app demonstrates the use of Hugging Face's NLP models with Streamlit.
It uses the `Rakib/roberta-base-on-cuad` model for question answering tasks.
""")
|