File size: 5,420 Bytes
c167991 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
import streamlit as st
import PyPDF2
from huggingface_hub import InferenceClient
# Initialize the Inference Client
client = InferenceClient("meta-llama/Llama-3.2-3B-Instruct")
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
uploaded_pdf=None
):
messages = [{"role": "system", "content": system_message}]
# Add previous conversation history to the messages
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
# If a new message is entered, add it to the conversation history
messages.append({"role": "user", "content": message})
# If a PDF is uploaded, process its content
if uploaded_pdf is not None:
file_content = extract_pdf_text(uploaded_pdf)
if file_content:
messages.append({"role": "user", "content": f"Document Content: {file_content}"})
# Get response from the model
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
def extract_pdf_text(file):
"""Extract text from a PDF file."""
try:
reader = PyPDF2.PdfReader(file)
text = ""
for page in reader.pages:
text += page.extract_text()
return text
except Exception as e:
return f"Error extracting text from PDF: {str(e)}"
# Streamlit UI
st.set_page_config(page_title="Health Assistant", layout="wide")
# Custom CSS for Streamlit app
st.markdown(
"""
<style>
body {
background-color: #1e2a38; /* Dark blue background */
color: #ffffff; /* White text for readability */
font-family: 'Arial', sans-serif; /* Clean and modern font */
}
.stButton button {
background-color: #42B3CE !important; /* Light blue button */
color: #2e3b4e !important; /* Dark text for contrast */
border: none !important;
padding: 10px 20px !important;
border-radius: 8px !important;
font-size: 16px;
font-weight: bold;
transition: background-color 0.3s ease, transform 0.2s ease;
}
.stButton button:hover {
background-color: #3189A2 !important; /* Darker blue on hover */
transform: scale(1.05);
}
.stTextInput input {
background-color: #2f3b4d;
color: white;
border: 2px solid #42B3CE;
padding: 12px;
border-radius: 8px;
font-size: 16px;
transition: border 0.3s ease;
}
.stTextInput input:focus {
border-color: #3189A2;
}
</style>
""",
unsafe_allow_html=True,
)
# Title and description
st.title("Health Assistant Chat")
st.subheader("Chat with your health assistant and upload a document for analysis")
# System message for health-related responses
system_message = (
"You are a virtual health assistant designed to provide accurate and reliable information "
"related to health, wellness, and medical topics. Your primary goal is to assist users with "
"their health-related queries, offer general guidance, and suggest when to consult a licensed "
"medical professional. If a user asks a question that is unrelated to health, wellness, or medical "
"topics, respond politely but firmly with: 'I'm sorry, I can't help with that because I am a virtual "
"health assistant designed to assist with health-related needs. Please let me know if you have any health-related questions.'"
)
# Upload a PDF file
uploaded_pdf = st.file_uploader("Upload a PDF file (Optional)", type="pdf")
# User input message
message = st.text_input("Type your health-related question:")
# History for conversation tracking
if 'history' not in st.session_state:
st.session_state['history'] = []
# Collect and display previous conversation history
history = st.session_state['history']
for user_message, assistant_message in history:
st.markdown(f"**You:** {user_message}")
st.markdown(f"**Assistant:** {assistant_message}")
# Max tokens, temperature, and top-p sliders
max_tokens = st.slider("Max new tokens", min_value=1, max_value=2048, value=512)
temperature = st.slider("Temperature", min_value=0.1, max_value=4.0, value=0.7, step=0.1)
top_p = st.slider("Top-p (nucleus sampling)", min_value=0.1, max_value=1.0, value=0.95, step=0.05)
# Button to generate response
if st.button("Generate Response"):
if message:
# Append the user's question to the conversation history
st.session_state.history.append((message, ""))
# Generate the response based on the user's input and any uploaded document
response = respond(
message,
st.session_state.history,
system_message,
max_tokens,
temperature,
top_p,
uploaded_pdf
)
# Display the response
for resp in response:
st.markdown(f"**Assistant:** {resp}")
# Update the conversation history with the assistant's response
st.session_state.history[-1] = (message, resp)
else:
st.error("Please enter a question to proceed.")
|