Spaces:
Runtime error
Runtime error
""" | |
Diabetes Version | |
@aim: Demo for testing purposes only | |
@inquiries: Dr M As'ad | |
@email: [email protected] | |
""" | |
import streamlit as st | |
from openai import OpenAI | |
import os | |
import sys | |
from dotenv import load_dotenv, dotenv_values | |
load_dotenv() | |
# initialize the client | |
client = OpenAI( | |
base_url="https://p7fw46eiw6xfkxvj.us-east-1.aws.endpoints.huggingface.cloud/v1/", | |
# "hf_xxx" # Replace with your token | |
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') | |
) | |
# Create supported models | |
model_links = { | |
"HAH v0.1": "drmasad/HAH-2024-v0.11", | |
"Mistral": "mistralai/Mistral-7B-Instruct-v0.2", | |
} | |
# Pull info about the model to display | |
model_info = { | |
"HAH v0.1": | |
{'description': """HAH 0.1 is a fine tuned model based on Mistral 7b instruct.\n \ | |
\nIt was created by Dr M. As'ad using 250k dB rows sourced from open source articles on diabetes** \n""", | |
'logo': 'https://www.hmgaihub.com/untitled.png'}, | |
"Mistral": | |
{'description': """The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \ | |
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""", | |
'logo': 'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'}, | |
} | |
def reset_conversation(): | |
''' | |
Resets Conversation | |
''' | |
st.session_state.conversation = [] | |
st.session_state.messages = [] | |
return None | |
# Define the available models | |
models = [key for key in model_links.keys()] | |
# Create the sidebar with the dropdown for model selection | |
selected_model = st.sidebar.selectbox("Select Model", models) | |
# Create a temperature slider | |
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5)) | |
# Create model description | |
st.sidebar.button("Reset Chat", on_click=reset_conversation) | |
st.sidebar.write(f"You're now chatting with **{selected_model}**") | |
st.sidebar.image("https://www.hmgaihub.com/untitled.png") | |
st.sidebar.markdown("*Generated content may be inaccurate or false.*") | |
st.sidebar.markdown("*This is an under development project.*") | |
st.sidebar.markdown("*Not a replacement for medical advice from a doctor.*") | |
if "prev_option" not in st.session_state: | |
st.session_state.prev_option = selected_model | |
if st.session_state.prev_option != selected_model: | |
st.session_state.messages = [] | |
# st.write(f"Changed to {selected_model}") | |
st.session_state.prev_option = selected_model | |
reset_conversation() | |
# Pull in the model we want to use | |
repo_id = model_links[selected_model] | |
st.subheader(f'AI - {selected_model}') | |
# st.title(f'ChatBot Using {selected_model}') | |
# Set a default model | |
if selected_model not in st.session_state: | |
st.session_state[selected_model] = model_links[selected_model] | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Accept user input | |
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"): | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
stream = client.chat.completions.create( | |
model=model_links[selected_model], | |
messages=[ | |
{"role": m["role"], "content": m["content"]} | |
for m in st.session_state.messages | |
], | |
temperature=temp_values, # 0.5, | |
stream=True, | |
max_tokens=3000, | |
) | |
response = st.write_stream(stream) | |
st.session_state.messages.append( | |
{"role": "assistant", "content": response}) | |