Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,7 @@ api_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
|
17 |
if api_token:
|
18 |
login(token=api_token)
|
19 |
else:
|
20 |
-
|
21 |
|
22 |
# Define model links
|
23 |
model_links = {
|
@@ -27,6 +27,9 @@ model_links = {
|
|
27 |
# Set selected model
|
28 |
selected_model = "HAH-2024-v0.1"
|
29 |
|
|
|
|
|
|
|
30 |
# Sidebar setup
|
31 |
temp_values = st.sidebar.slider("Select a temperature value", 0.0, 1.0, (0.5))
|
32 |
def reset_conversation():
|
@@ -37,7 +40,9 @@ st.sidebar.button("Reset Chat", on_click=reset_conversation)
|
|
37 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
38 |
st.sidebar.image("https://www.hmgaihub.com/untitled.png")
|
39 |
|
|
|
40 |
def load_model(selected_model_name):
|
|
|
41 |
model_name = model_links[selected_model_name]
|
42 |
|
43 |
# Set a specific device
|
@@ -79,10 +84,10 @@ def load_model(selected_model_name):
|
|
79 |
tokenizer = AutoTokenizer.from_pretrained(
|
80 |
"mistralai/Mistral-7B-Instruct-v0.2", trust_remote_code=True
|
81 |
)
|
|
|
82 |
|
83 |
return model, tokenizer
|
84 |
|
85 |
-
|
86 |
# Load model and tokenizer
|
87 |
model, tokenizer = load_model(selected_model)
|
88 |
|
@@ -98,28 +103,16 @@ if prompt := st.chat_input("Ask me anything about diabetes"):
|
|
98 |
with st.chat_message("user"):
|
99 |
st.markdown(prompt)
|
100 |
|
101 |
-
# Append user prompt to session state
|
102 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
103 |
|
104 |
-
# Structured instructions as used in your Colab example
|
105 |
instructions = """
|
106 |
Act as a highly knowledgeable endocrinology doctor with expertise in explaining complex medical information in an understandable way to patients who do not have a medical background. Your responses should not only convey empathy and care but also demonstrate a high level of medical accuracy and reliability.
|
107 |
-
|
108 |
-
- Prioritize medical accuracy: Ensure all information provided is up-to-date and reflects current medical consensus. Use evidence-based medical knowledge to inform your responses.
|
109 |
-
- Clarify complex concepts: Break down medical terms and concepts into understandable language. Use analogies related to everyday experiences to help explain complex ideas when possible.
|
110 |
-
- Provide actionable advice: Where appropriate, offer practical and specific advice that the patient can follow to address their concerns or manage their condition, including when to consult a healthcare professional.
|
111 |
-
- Address concerns directly: Understand and directly respond to the patient's underlying concerns or questions, offering clear explanations and reassurance about their condition or treatment options.
|
112 |
-
- Promote informed decision-making: Empower the patient with the knowledge they need to make informed health decisions. Highlight key considerations and options available to them in managing their health.
|
113 |
-
Your response should be a blend of professional medical advice and compassionate communication, creating a dialogue that educates, reassures, and empowers the patient.
|
114 |
-
Strive to make your response as informative and authoritative as a consultation with a human doctor, ensuring the patient feels supported and knowledgeable about their health concerns.
|
115 |
-
You will answer as if you are talking to a patient directly
|
116 |
"""
|
117 |
|
118 |
-
|
119 |
-
full_prompt = f"<s>[INST] {prompt} [/INST] {instructions}</s>"
|
120 |
|
121 |
with st.chat_message("assistant"):
|
122 |
-
# Generate response using the pipeline with structured prompting
|
123 |
result = pipeline(
|
124 |
task="text-generation",
|
125 |
model=model,
|
@@ -128,13 +121,9 @@ if prompt := st.chat_input("Ask me anything about diabetes"):
|
|
128 |
temperature=temp_values
|
129 |
)(full_prompt)
|
130 |
|
131 |
-
# Extract the answer from the generated text
|
132 |
generated_text = result[0]['generated_text']
|
133 |
response = generated_text.split("</s>")[-1].strip()
|
134 |
|
135 |
-
# Display the response
|
136 |
st.markdown(response)
|
137 |
|
138 |
-
# Append assistant response to session state
|
139 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
140 |
-
|
|
|
17 |
if api_token:
|
18 |
login(token=api_token)
|
19 |
else:
|
20 |
+
st.error("API token is not set in the environment variables.")
|
21 |
|
22 |
# Define model links
|
23 |
model_links = {
|
|
|
27 |
# Set selected model
|
28 |
selected_model = "HAH-2024-v0.1"
|
29 |
|
30 |
+
# Display welcome message
|
31 |
+
st.title("Welcome to the Diabetes Management Assistant")
|
32 |
+
|
33 |
# Sidebar setup
|
34 |
temp_values = st.sidebar.slider("Select a temperature value", 0.0, 1.0, (0.5))
|
35 |
def reset_conversation():
|
|
|
40 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
41 |
st.sidebar.image("https://www.hmgaihub.com/untitled.png")
|
42 |
|
43 |
+
# Function to load model
|
44 |
def load_model(selected_model_name):
|
45 |
+
st.info("Loading the model, please wait...")
|
46 |
model_name = model_links[selected_model_name]
|
47 |
|
48 |
# Set a specific device
|
|
|
84 |
tokenizer = AutoTokenizer.from_pretrained(
|
85 |
"mistralai/Mistral-7B-Instruct-v0.2", trust_remote_code=True
|
86 |
)
|
87 |
+
st.success("Model is ready. Now we are ready!")
|
88 |
|
89 |
return model, tokenizer
|
90 |
|
|
|
91 |
# Load model and tokenizer
|
92 |
model, tokenizer = load_model(selected_model)
|
93 |
|
|
|
103 |
with st.chat_message("user"):
|
104 |
st.markdown(prompt)
|
105 |
|
|
|
106 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
107 |
|
|
|
108 |
instructions = """
|
109 |
Act as a highly knowledgeable endocrinology doctor with expertise in explaining complex medical information in an understandable way to patients who do not have a medical background. Your responses should not only convey empathy and care but also demonstrate a high level of medical accuracy and reliability.
|
110 |
+
you will answer only what the need and in professional way. do not add extra unnecessary information. you can however chat with the patient casually
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
"""
|
112 |
|
113 |
+
full_prompt = f"<s>[INST] {instructions} [/INST] {prompt}</s>"
|
|
|
114 |
|
115 |
with st.chat_message("assistant"):
|
|
|
116 |
result = pipeline(
|
117 |
task="text-generation",
|
118 |
model=model,
|
|
|
121 |
temperature=temp_values
|
122 |
)(full_prompt)
|
123 |
|
|
|
124 |
generated_text = result[0]['generated_text']
|
125 |
response = generated_text.split("</s>")[-1].strip()
|
126 |
|
|
|
127 |
st.markdown(response)
|
128 |
|
|
|
129 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
|