Spaces:
Runtime error
Runtime error
File size: 6,233 Bytes
693c6e9 23624f5 bc1d2e1 849fdd5 bc1d2e1 849fdd5 31ee503 bc1d2e1 849fdd5 23624f5 bc1d2e1 b790aae bc1d2e1 b790aae bc1d2e1 23624f5 ca3be5f 849fdd5 bc1d2e1 849fdd5 bc1d2e1 23624f5 bc1d2e1 849fdd5 23624f5 8666754 7b94d99 693c6e9 bc1d2e1 23624f5 bc1d2e1 23624f5 bc1d2e1 8bfe0fe dfd3f33 8bfe0fe dfd3f33 8bfe0fe dfd3f33 8bfe0fe dfd3f33 8bfe0fe dfd3f33 8bfe0fe dfd3f33 8bfe0fe dfd3f33 8bfe0fe dfd3f33 8bfe0fe 5f10c1e dfd3f33 8bfe0fe dfd3f33 8bfe0fe dfd3f33 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
"""
Diabetes Version
@aim: Demo for testing purposes only
@inquiries: Dr M As'ad
@email: drmohasad@gmail.com
"""
import streamlit as st
from openai import OpenAI
import os
import sys
from dotenv import load_dotenv, dotenv_values
load_dotenv()
# initialize the client
client = OpenAI(
base_url="https://p7fw46eiw6xfkxvj.us-east-1.aws.endpoints.huggingface.cloud/v1/",
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')
)
# Create supported models
model_links = {
"HAH v0.1": "drmasad/HAH-2024-v0.11",
}
# Pull info about the model to display
model_info = {
"HAH v0.1":
{'description': """HAH 0.1 is a fine tuned model based on Mistral 7b instruct.\n \
\nIt was created by Dr M. As'ad using 250k dB rows sourced from open source articles on diabetes** \n""",
'logo': 'https://www.hmgaihub.com/untitled.png'},
}
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
return None
# Define the available models
models = [key for key in model_links.keys()]
# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select Model", models)
# Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
# Create model description
st.sidebar.button("Reset Chat", on_click=reset_conversation)
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.image("https://www.hmgaihub.com/untitled.png")
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
st.sidebar.markdown("*This is an under development project.*")
st.sidebar.markdown("*Not a replacement for medical advice from a doctor.*")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
# st.write(f"Changed to {selected_model}")
st.session_state.prev_option = selected_model
reset_conversation()
# Pull in the model we want to use
repo_id = model_links[selected_model]
st.subheader(f'AI - {selected_model}')
# st.title(f'ChatBot Using {selected_model}')
# Set a default model
if selected_model not in st.session_state:
st.session_state[selected_model] = model_links[selected_model]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Initialize the streaming status flag
if "is_streaming" not in st.session_state:
st.session_state.is_streaming = False
# Chat input handling
if st.session_state.is_streaming:
st.chat_input("The assistant is currently responding. Please wait...") # Inform the user to wait
else:
# If not streaming, allow user input
if prompt := st.chat_input("Ask me anything about diabetes"):
st.session_state.is_streaming = True # Set the flag to indicate streaming has started
with st.chat_message("user"):
st.markdown(prompt)
# Add the user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
instructions = """
Act as a highly knowledgeable endocrinology doctor with expertise in explaining complex medical information in an understandable way to patients who do not have a medical background. Your responses should not only convey empathy and care but also demonstrate a high level of medical accuracy and reliability.
When crafting your explanations, please adhere to the following guidelines:
- Prioritize medical accuracy: Ensure all information provided is up-to-date and reflects current medical consensus. Use evidence-based medical knowledge to inform your responses.
- Clarify complex concepts: Break down medical terms and concepts into understandable language. Use analogies related to everyday experiences to help explain complex ideas when possible.
- Provide actionable advice: Where appropriate, offer practical and specific advice that the patient can follow to address their concerns or manage their condition, including when to consult a healthcare professional.
- Address concerns directly: Understand and directly respond to the patient's underlying concerns or questions, offering clear explanations and reassurance about their condition or treatment options.
- Promote informed decision-making: Empower the patient with the knowledge they need to make informed health decisions. Highlight key considerations and options available to them in managing their health.
Your response should be a blend of professional medical advice and compassionate communication, creating a dialogue that educates, reassures, and empowers the patient.
Strive to make your response as informative and authoritative as a consultation with a human doctor, ensuring the patient feels supported and knowledgeable about their health concerns.
You will answer as if you are talking to a patient directly
"""
full_prompt = f"<s>[INST] {prompt} [/INST] {instructions}</s>"
# Display assistant response in chat message container
with st.chat_message("assistant"):
# Stream the response
stream = client.chat.completions.create(
model=model_links[selected_model],
messages=[
{"role": m["role"], "content": full_prompt}
for m in st.session_state.messages
],
temperature=temp_values,
stream=True,
max_tokens=1024,
)
response = st.write_stream(stream)
# Process and clean the response
response = response.replace('</s>', '').strip() # Clean unnecessary characters
st.markdown(response)
# Indicate that streaming is complete
st.session_state.is_streaming = False
# Store the final response
st.session_state.messages.append({"role": "assistant", "content": response}) |