Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline | |
import os | |
# Load the Hugging Face access token from secrets | |
access_token = os.getenv("HF_TOKEN") # Token is securely retrieved from secrets | |
# Model name | |
MODEL_NAME = "Alaaeldin/pubmedBERT-demo" | |
def load_pipeline(): | |
# Load the model and tokenizer with authentication | |
return pipeline("text-generation", model=MODEL_NAME, tokenizer=MODEL_NAME, use_auth_token=access_token) | |
# Load the pipeline | |
qa_pipeline = load_pipeline() | |
# Streamlit app UI | |
st.title("PubMed BERT Q&A App") | |
st.write("Ask questions directly based on the model's training!") | |
# User input for the question | |
question = st.text_input("Enter your question:") | |
# Button to get the answer | |
if st.button("Get Answer"): | |
if question.strip(): | |
with st.spinner("Generating the answer..."): | |
result = qa_pipeline(question, max_length=100, num_return_sequences=1) | |
st.success(f"Answer: {result[0]['generated_text']}") | |
else: | |
st.warning("Please enter a question.") | |
# Footer | |
st.markdown("---") | |
st.markdown("Powered by **PubMed BERT** fine-tuned by [Alaaeldin](https://huggingface.co/Alaaeldin).") | |