Spaces:
Runtime error
Runtime error
File size: 4,071 Bytes
693c6e9 23624f5 bc1d2e1 849fdd5 bc1d2e1 849fdd5 31ee503 bc1d2e1 849fdd5 23624f5 bc1d2e1 b790aae bc1d2e1 b790aae bc1d2e1 23624f5 ca3be5f 849fdd5 bc1d2e1 849fdd5 bc1d2e1 23624f5 bc1d2e1 849fdd5 23624f5 8666754 7b94d99 693c6e9 bc1d2e1 23624f5 bc1d2e1 23624f5 bc1d2e1 23624f5 bc1d2e1 849fdd5 8666754 bc1d2e1 23624f5 bc1d2e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
"""
Diabetes Version
@aim: Demo for testing purposes only
@inquiries: Dr M As'ad
@email: [email protected]
"""
import streamlit as st
from openai import OpenAI
import os
import sys
from dotenv import load_dotenv, dotenv_values
load_dotenv()
# initialize the client
client = OpenAI(
base_url="https://p7fw46eiw6xfkxvj.us-east-1.aws.endpoints.huggingface.cloud/v1/",
# "hf_xxx" # Replace with your token
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')
)
# Create supported models
model_links = {
"HAH v0.1": "drmasad/HAH-2024-v0.11",
"Mistral": "mistralai/Mistral-7B-Instruct-v0.2",
}
# Pull info about the model to display
model_info = {
"HAH v0.1":
{'description': """HAH 0.1 is a fine tuned model based on Mistral 7b instruct.\n \
\nIt was created by Dr M. As'ad using 250k dB rows sourced from open source articles on diabetes** \n""",
'logo': 'https://www.hmgaihub.com/untitled.png'},
"Mistral":
{'description': """The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
'logo': 'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
}
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
return None
# Define the available models
models = [key for key in model_links.keys()]
# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select Model", models)
# Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
# Create model description
st.sidebar.button("Reset Chat", on_click=reset_conversation)
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.image("https://www.hmgaihub.com/untitled.png")
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
st.sidebar.markdown("*This is an under development project.*")
st.sidebar.markdown("*Not a replacement for medical advice from a doctor.*")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
# st.write(f"Changed to {selected_model}")
st.session_state.prev_option = selected_model
reset_conversation()
# Pull in the model we want to use
repo_id = model_links[selected_model]
st.subheader(f'AI - {selected_model}')
# st.title(f'ChatBot Using {selected_model}')
# Set a default model
if selected_model not in st.session_state:
st.session_state[selected_model] = model_links[selected_model]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
stream = client.chat.completions.create(
model=model_links[selected_model],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
temperature=temp_values, # 0.5,
stream=True,
max_tokens=3000,
)
response = st.write_stream(stream)
st.session_state.messages.append(
{"role": "assistant", "content": response})
|