Spaces:
Running
Running
File size: 1,178 Bytes
9d79421 484975f f52f1d6 9d79421 f52f1d6 9d79421 f52f1d6 9d79421 f52f1d6 9d79421 f52f1d6 9d79421 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import streamlit as st
from transformers import pipeline
import os
# Load the Hugging Face access token from secrets
access_token = os.getenv("HF_TOKEN") # Token is securely retrieved from secrets
# Model name
MODEL_NAME = "Alaaeldin/pubmedBERT-demo"
@st.cache_resource
def load_pipeline():
# Load the model and tokenizer with authentication
return pipeline("text-generation", model=MODEL_NAME, tokenizer=MODEL_NAME, use_auth_token=access_token)
# Load the pipeline
qa_pipeline = load_pipeline()
# Streamlit app UI
st.title("PubMed BERT Q&A App")
st.write("Ask questions directly based on the model's training!")
# User input for the question
question = st.text_input("Enter your question:")
# Button to get the answer
if st.button("Get Answer"):
if question.strip():
with st.spinner("Generating the answer..."):
result = qa_pipeline(question, max_length=100, num_return_sequences=1)
st.success(f"Answer: {result[0]['generated_text']}")
else:
st.warning("Please enter a question.")
# Footer
st.markdown("---")
st.markdown("Powered by **PubMed BERT** fine-tuned by [Alaaeldin](https://huggingface.co/Alaaeldin).")
|