Spaces:
Sleeping
Sleeping
Upload 14 files
Browse files- README.md +17 -0
- Screenshot 2024-11-14 170315.png +0 -0
- __pycache__/chatbot.cpython-39.pyc +0 -0
- __pycache__/homepage.cpython-39.pyc +0 -0
- __pycache__/predict_stress.cpython-39.pyc +0 -0
- __pycache__/session_state.cpython-39.pyc +0 -0
- app.py +19 -0
- bash_profile.txt +1 -0
- chatbot.py +236 -0
- homepage.py +83 -0
- image.jpg +0 -0
- predict_stress.py +193 -0
- requirements.txt +23 -0
- session_state.py +24 -0
README.md
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Psychologist Chatbot
|
3 |
+
emoji: 📈
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.39.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
short_description: Interactive chatbot as a psychologist
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
15 |
+
|
16 |
+
# AI Support for Mental Health Wellbeing
|
17 |
+
Designed to streamline your mental health journey, our AI therapist is here to offer round-the-clock support, addressing mental health-related queries, providing personalized reports, and helping you understand your stress level to ensure a smoother journey toward mental and emotional health.
|
Screenshot 2024-11-14 170315.png
ADDED
__pycache__/chatbot.cpython-39.pyc
ADDED
Binary file (9.07 kB). View file
|
|
__pycache__/homepage.cpython-39.pyc
ADDED
Binary file (2.97 kB). View file
|
|
__pycache__/predict_stress.cpython-39.pyc
ADDED
Binary file (7.87 kB). View file
|
|
__pycache__/session_state.cpython-39.pyc
ADDED
Binary file (1.53 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from homepage import display_homepage
|
3 |
+
from chatbot import display_chatbot
|
4 |
+
from predict_stress import display_predict_stress
|
5 |
+
from session_state import initialize_session_state
|
6 |
+
|
7 |
+
initialize_session_state()
|
8 |
+
|
9 |
+
if 'page' not in st.session_state:
|
10 |
+
st.session_state.page = "home"
|
11 |
+
|
12 |
+
if st.session_state.page == "home":
|
13 |
+
display_homepage()
|
14 |
+
elif st.session_state.page == "chat":
|
15 |
+
display_chatbot()
|
16 |
+
elif st.session_state.page == "stress":
|
17 |
+
display_predict_stress()
|
18 |
+
|
19 |
+
st.markdown("<style>div.stContainer {padding-top: 0;}</style>", unsafe_allow_html=True)
|
bash_profile.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
export HF_TOKEN=$(cat ~/.secret/hugging-face.txt)
|
chatbot.py
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain_core.prompts import PromptTemplate
|
3 |
+
from langchain_core.output_parsers import StrOutputParser
|
4 |
+
from transformers import pipeline
|
5 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
6 |
+
import numpy as np
|
7 |
+
from pydub import AudioSegment
|
8 |
+
import os
|
9 |
+
from langchain.memory import ConversationBufferWindowMemory
|
10 |
+
from transformers import AutoModelForAudioClassification, Wav2Vec2FeatureExtractor
|
11 |
+
|
12 |
+
# Set the page configuration first
|
13 |
+
st.set_page_config(page_title="HuggingFace ChatBot", page_icon="🤗")
|
14 |
+
|
15 |
+
# Fixed conversational memory length
|
16 |
+
memory_length = 5
|
17 |
+
memory = ConversationBufferWindowMemory(k=memory_length, memory_key="chat_history", return_messages=True)
|
18 |
+
|
19 |
+
# Model IDs
|
20 |
+
model_id = "Sasmitah/llama_16bit_2"
|
21 |
+
model2_id = "meta-llama/Llama-3.2-3B-Instruct"
|
22 |
+
whisper_model = "openai/whisper-small" # Using Whisper model for audio transcription
|
23 |
+
model1 = AutoModelForAudioClassification.from_pretrained("ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition")
|
24 |
+
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition")
|
25 |
+
|
26 |
+
def predict_emotion(audio_file):
|
27 |
+
if not audio_file:
|
28 |
+
return "No audio file provided!"
|
29 |
+
|
30 |
+
sound = AudioSegment.from_file(audio_file)
|
31 |
+
sound = sound.set_frame_rate(16000)
|
32 |
+
sound_array = np.array(sound.get_array_of_samples())
|
33 |
+
|
34 |
+
input = feature_extractor(
|
35 |
+
raw_speech=sound_array,
|
36 |
+
sampling_rate=16000,
|
37 |
+
padding=True,
|
38 |
+
return_tensors="pt")
|
39 |
+
|
40 |
+
result = model1.forward(input.input_values.float())
|
41 |
+
|
42 |
+
id2label = {
|
43 |
+
"0": "angry",
|
44 |
+
"1": "calm",
|
45 |
+
"2": "disgust",
|
46 |
+
"3": "fearful",
|
47 |
+
"4": "happy",
|
48 |
+
"5": "neutral",
|
49 |
+
"6": "sad",
|
50 |
+
"7": "surprised"
|
51 |
+
}
|
52 |
+
|
53 |
+
# Map result to emotion labels with probabilities
|
54 |
+
emotion_scores = dict(zip(id2label.values(), list(round(float(i),4) for i in result[0][0])))
|
55 |
+
return emotion_scores
|
56 |
+
|
57 |
+
def get_llm_hf_inference(model_id, max_new_tokens=128, temperature=0.5):
|
58 |
+
"""Returns a language model for HuggingFace inference."""
|
59 |
+
try:
|
60 |
+
llm = HuggingFaceEndpoint(
|
61 |
+
repo_id=model_id,
|
62 |
+
max_new_tokens=max_new_tokens,
|
63 |
+
temperature=temperature,
|
64 |
+
token=os.getenv("HF_TOKEN")
|
65 |
+
)
|
66 |
+
return llm
|
67 |
+
except Exception as e:
|
68 |
+
st.error(f"Error initializing model: {e}")
|
69 |
+
return None
|
70 |
+
|
71 |
+
def load_transcription_model():
|
72 |
+
try:
|
73 |
+
transcriber = pipeline("automatic-speech-recognition", model=whisper_model)
|
74 |
+
return transcriber
|
75 |
+
except Exception as e:
|
76 |
+
st.error(f"Error loading Whisper model: {e}")
|
77 |
+
return None
|
78 |
+
|
79 |
+
def preprocess_audio(file):
|
80 |
+
audio = AudioSegment.from_file(file).set_frame_rate(16000).set_channels(1)
|
81 |
+
audio_samples = np.array(audio.get_array_of_samples()).astype(np.float32) / (2**15)
|
82 |
+
return audio_samples
|
83 |
+
|
84 |
+
def transcribe_audio(file, transcriber):
|
85 |
+
audio = preprocess_audio(file)
|
86 |
+
transcription = transcriber(audio)["text"]
|
87 |
+
return transcription
|
88 |
+
|
89 |
+
def display_chatbot():
|
90 |
+
|
91 |
+
st.title("Personal Therapist Chatbot")
|
92 |
+
st.markdown(
|
93 |
+
"""
|
94 |
+
🔒 *Disclaimer:* Please do not share any personal, sensitive, or confidential information during your interaction with this chatbot. This tool is for informational and supportive purposes only, and any data shared is not stored or monitored to protect your privacy.
|
95 |
+
"""
|
96 |
+
)
|
97 |
+
|
98 |
+
with st.sidebar:
|
99 |
+
reset_history = st.button("Reset Chat History")
|
100 |
+
go_home = st.button("Back to Home")
|
101 |
+
if go_home:
|
102 |
+
st.session_state.page = "home"
|
103 |
+
|
104 |
+
if reset_history:
|
105 |
+
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message}]
|
106 |
+
st.session_state.user_text = None
|
107 |
+
st.session_state.avatars = {'user': None, 'assistant': None}
|
108 |
+
st.session_state.max_response_length = 1000
|
109 |
+
|
110 |
+
def get_response(system_message, chat_history, user_text, model_id, max_new_tokens=256):
|
111 |
+
"""Generates a response from the chatbot model."""
|
112 |
+
hf = get_llm_hf_inference(model_id=model_id, max_new_tokens=max_new_tokens)
|
113 |
+
if hf is None:
|
114 |
+
return "Error: Model not initialized.", chat_history
|
115 |
+
|
116 |
+
prompt = PromptTemplate.from_template(
|
117 |
+
(
|
118 |
+
"[INST] {system_message}"
|
119 |
+
"\nCurrent Conversation:\n{chat_history}\n\n"
|
120 |
+
|
121 |
+
"\nPatient: {user_text}.\n [/INST]"
|
122 |
+
"\ntherapist:"
|
123 |
+
)
|
124 |
+
)
|
125 |
+
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
126 |
+
|
127 |
+
response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=chat_history))
|
128 |
+
response = response.split("AI:")[-1].strip()
|
129 |
+
|
130 |
+
low_engagement_threshold = 3
|
131 |
+
end_keywords = ["thank you", "thanks", "goodbye", "bye", "that's all"]
|
132 |
+
|
133 |
+
short_responses = len(user_text.split()) <= low_engagement_threshold
|
134 |
+
end_pattern_match = any(keyword in user_text.lower() for keyword in end_keywords)
|
135 |
+
|
136 |
+
recent_short_responses = all(len(msg["content"].split()) <= low_engagement_threshold for msg in chat_history[-2:])
|
137 |
+
response_is_acknowledgment = user_text.lower() in ["yes", "okay", "alright"]
|
138 |
+
|
139 |
+
if (end_pattern_match or (short_responses and recent_short_responses)) and not response_is_acknowledgment:
|
140 |
+
follow_up_question = "Would you like to have a report of your current health? Yes/No"
|
141 |
+
response = f"I’m glad to hear that. Let’s keep checking in on this, and you can tell me how it goes next time."
|
142 |
+
response += f"\n\n{follow_up_question}"
|
143 |
+
|
144 |
+
chat_history.append({'role': 'user', 'content': user_text})
|
145 |
+
chat_history.append({'role': 'assistant', 'content': response})
|
146 |
+
return response, chat_history
|
147 |
+
|
148 |
+
def get_summary_of_chat_history(chat_history, model2_id):
|
149 |
+
"""Generates a comprehensive summary of the chat history and a health report."""
|
150 |
+
hf = get_llm_hf_inference(model_id=model2_id, max_new_tokens=256)
|
151 |
+
if hf is None:
|
152 |
+
return "Error: Model not initialized."
|
153 |
+
|
154 |
+
chat_content = "\n".join([f"{message['role']}: {message['content']}" for message in chat_history])
|
155 |
+
|
156 |
+
prompt = PromptTemplate.from_template(
|
157 |
+
f"""
|
158 |
+
Generate a detailed report based on the following conversation between a therapist and patient.
|
159 |
+
Conversation:\n{chat_content}
|
160 |
+
|
161 |
+
The report should include:
|
162 |
+
1. *Patient Information:*
|
163 |
+
- Include placeholders for Name, Age, Gender, Date of Session.
|
164 |
+
|
165 |
+
2. *Conversation Summary:*
|
166 |
+
- Summarize the main points of the conversation, focusing on the patient’s primary concerns and emotional state.
|
167 |
+
- Note any specific causes of stress or distress, how these issues affect the patient's personal life, and their expressed desires or goals.
|
168 |
+
|
169 |
+
3. *Preliminary Diagnosis:*
|
170 |
+
- Identify the main symptoms observed in the conversation, such as mood, energy levels, motivation, etc.
|
171 |
+
- Suggest a potential preliminary diagnosis based on the symptoms described, e.g., stress-induced burnout or other relevant concerns. Mention the need for further assessment if applicable.
|
172 |
+
|
173 |
+
4. *Recommendations & Strategies:*
|
174 |
+
- Provide practical, achievable strategies tailored to the patient’s needs.
|
175 |
+
|
176 |
+
Format the report neatly with headings and subheadings as shown in the example. Aim to keep the language supportive and professional.
|
177 |
+
"""
|
178 |
+
)
|
179 |
+
|
180 |
+
summary = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
181 |
+
summary_response = summary.invoke(input={"chat_content": chat_content})
|
182 |
+
|
183 |
+
return summary_response
|
184 |
+
|
185 |
+
transcriber = load_transcription_model()
|
186 |
+
|
187 |
+
input_type = st.radio("Select your input type", ("Text", "Audio"))
|
188 |
+
|
189 |
+
if input_type == "Text":
|
190 |
+
st.session_state.user_text = st.text_input("Enter your text here:")
|
191 |
+
elif input_type == "Audio":
|
192 |
+
audio_file = st.file_uploader("Upload an audio file for transcription", type=["mp3", "wav", "m4a"])
|
193 |
+
|
194 |
+
if audio_file is not None and transcriber:
|
195 |
+
with st.spinner("Transcribing audio..."):
|
196 |
+
try:
|
197 |
+
st.session_state.user_text = transcribe_audio(audio_file, transcriber)
|
198 |
+
st.success("Audio transcribed successfully!")
|
199 |
+
st.audio(audio_file, format='audio/mp3')
|
200 |
+
emotion_result = predict_emotion(audio_file)
|
201 |
+
predicted_emotion = max(emotion_result, key=emotion_result.get)
|
202 |
+
st.write(f"Most likely emotion: {predicted_emotion.capitalize()}")
|
203 |
+
except Exception as e:
|
204 |
+
st.error(f"Error transcribing audio: {e}")
|
205 |
+
|
206 |
+
|
207 |
+
|
208 |
+
output_container = st.container()
|
209 |
+
|
210 |
+
with output_container:
|
211 |
+
for message in st.session_state.chat_history:
|
212 |
+
if message['role'] == 'system':
|
213 |
+
continue
|
214 |
+
with st.chat_message(message['role'], avatar=st.session_state['avatars'][message['role']]):
|
215 |
+
st.markdown(message['content'])
|
216 |
+
|
217 |
+
if st.session_state.user_text:
|
218 |
+
with st.chat_message("user", avatar=st.session_state.avatars['user']):
|
219 |
+
st.markdown(st.session_state.user_text)
|
220 |
+
|
221 |
+
with st.chat_message("assistant", avatar=st.session_state.avatars['assistant']):
|
222 |
+
response = st.session_state.chat_history[-1]['content'] if len(st.session_state.chat_history) > 2 else st.session_state.starter_message
|
223 |
+
|
224 |
+
if "yes" in st.session_state.user_text.lower() and "Would you like to have a report of your current health? Yes/No" in response:
|
225 |
+
with st.spinner("Generating your health report..."):
|
226 |
+
report = get_summary_of_chat_history(st.session_state.chat_history, model2_id)
|
227 |
+
st.markdown(report)
|
228 |
+
with st.spinner("Addressing your concerns..."):
|
229 |
+
response, st.session_state.chat_history = get_response(
|
230 |
+
system_message=st.session_state.system_message,
|
231 |
+
user_text=st.session_state.user_text,
|
232 |
+
chat_history=st.session_state.chat_history,
|
233 |
+
model_id=model_id,
|
234 |
+
max_new_tokens=st.session_state.max_response_length,
|
235 |
+
)
|
236 |
+
st.markdown(response)
|
homepage.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
import base64
|
4 |
+
from io import BytesIO
|
5 |
+
|
6 |
+
def image_to_base64(img):
|
7 |
+
buffered = BytesIO()
|
8 |
+
img.save(buffered, format="PNG")
|
9 |
+
return base64.b64encode(buffered.getvalue()).decode()
|
10 |
+
|
11 |
+
def display_homepage():
|
12 |
+
image = Image.open('image.jpg')
|
13 |
+
img_base64 = image_to_base64(image)
|
14 |
+
|
15 |
+
st.markdown(
|
16 |
+
f"""
|
17 |
+
<style>
|
18 |
+
/* Set background color to white for both light and dark mode */
|
19 |
+
body {{
|
20 |
+
background-color: #ffffff ;
|
21 |
+
}}
|
22 |
+
|
23 |
+
.image-container {{
|
24 |
+
position: relative;
|
25 |
+
margin-top: -70px;
|
26 |
+
margin-left: auto;
|
27 |
+
margin-right: auto;
|
28 |
+
width: 300px; /* Increase width for larger image */
|
29 |
+
display: flex;
|
30 |
+
justify-content: center;
|
31 |
+
}}
|
32 |
+
.content-container {{
|
33 |
+
margin-top: 50px; /* Adjust margin-top to make space for the image */
|
34 |
+
}}
|
35 |
+
.main {{
|
36 |
+
background-color: #ffffff ;
|
37 |
+
}}
|
38 |
+
|
39 |
+
/* Text and button styling */
|
40 |
+
.stButton>button {{
|
41 |
+
background-color: #2980b9;
|
42 |
+
color: white;
|
43 |
+
font-size: 18px;
|
44 |
+
font-weight: bold;
|
45 |
+
padding: 15px 30px;
|
46 |
+
border-radius: 8px;
|
47 |
+
border: none;
|
48 |
+
box-shadow: 0px 5px 15px rgba(0, 0, 0, 0.1);
|
49 |
+
cursor: pointer;
|
50 |
+
transition: background-color 0.3s ease;
|
51 |
+
}}
|
52 |
+
.stButton>button:hover {{
|
53 |
+
background-color: #3498db;
|
54 |
+
}}
|
55 |
+
</style>
|
56 |
+
<div class="image-container">
|
57 |
+
<img src="data:image/png;base64,{img_base64}" alt="image"/>
|
58 |
+
</div>
|
59 |
+
""",
|
60 |
+
unsafe_allow_html=True
|
61 |
+
)
|
62 |
+
|
63 |
+
st.markdown(
|
64 |
+
"""
|
65 |
+
<div class="content-container">
|
66 |
+
<div style="text-align: center; font-size: 60px; font-weight: bold; color: #2c3e50;">
|
67 |
+
AI Support for Your Mental Well-Being
|
68 |
+
</div>
|
69 |
+
<div style="text-align: center; font-size: 20px; margin-top: 20px; max-width: 700px; margin-left: auto; margin-right: auto;">
|
70 |
+
Designed to streamline your mental health journey, our AI therapist is here to offer round-the-clock support, addressing mental health-related queries, providing personalized reports, and helping you understand your stress level to ensure a smoother journey toward mental and emotional health.
|
71 |
+
</div>
|
72 |
+
</div>
|
73 |
+
""", unsafe_allow_html=True
|
74 |
+
)
|
75 |
+
|
76 |
+
col1, col2 = st.columns([1, 1])
|
77 |
+
with col1:
|
78 |
+
if st.button("Start Chat", key="start_chat_button"):
|
79 |
+
st.session_state.page = "chat"
|
80 |
+
|
81 |
+
with col2:
|
82 |
+
if st.button("Stress Test", key="predict_stress_button"):
|
83 |
+
st.session_state.page = "stress"
|
image.jpg
ADDED
predict_stress.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import xgboost as xgb
|
3 |
+
import pandas as pd
|
4 |
+
from huggingface_hub import hf_hub_download
|
5 |
+
import itertools
|
6 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
7 |
+
import os
|
8 |
+
from transformers import pipeline
|
9 |
+
from langchain_core.prompts import PromptTemplate
|
10 |
+
from langchain_core.output_parsers import StrOutputParser
|
11 |
+
|
12 |
+
xgboostmodel_id = "Sannidhi/stress_prediction_xgboost_model"
|
13 |
+
xgboost_model = None
|
14 |
+
model_id = "meta-llama/Llama-3.2-1B-Instruct"
|
15 |
+
generator = pipeline("text-generation", model=model_id)
|
16 |
+
|
17 |
+
def get_llm_response(prompt_text, model_id="meta-llama/Llama-3.2-3B-Instruct", max_new_tokens=256, temperature=0.5):
|
18 |
+
"""Generates a response from the Hugging Face model for a given prompt text."""
|
19 |
+
try:
|
20 |
+
llm = HuggingFaceEndpoint(
|
21 |
+
repo_id=model_id,
|
22 |
+
max_new_tokens=max_new_tokens,
|
23 |
+
temperature=temperature,
|
24 |
+
token=os.getenv("HF_TOKEN")
|
25 |
+
)
|
26 |
+
|
27 |
+
system_message = "Rephrase the following text without adding any comments, feedback, or suggestions. Return only the rephrased text exactly as requested."
|
28 |
+
|
29 |
+
prompt = PromptTemplate.from_template("{system_message}\n\n{user_text}")
|
30 |
+
|
31 |
+
chat = prompt | llm.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
32 |
+
|
33 |
+
response = chat.invoke(input=dict(system_message=system_message, user_text=prompt_text))
|
34 |
+
|
35 |
+
return response
|
36 |
+
|
37 |
+
except Exception as e:
|
38 |
+
return f"Error generating response: {e}"
|
39 |
+
|
40 |
+
def load_xgboost_model():
|
41 |
+
global xgboost_model
|
42 |
+
try:
|
43 |
+
model_path = hf_hub_download(repo_id="Sannidhi/stress_prediction_xgboost_model", filename="xgboost_model.json")
|
44 |
+
|
45 |
+
xgboost_model = xgb.Booster()
|
46 |
+
xgboost_model.load_model(model_path)
|
47 |
+
|
48 |
+
return True
|
49 |
+
except Exception as e:
|
50 |
+
st.error(f"Error loading XGBoost model from Hugging Face: {e}")
|
51 |
+
return False
|
52 |
+
|
53 |
+
def display_predict_stress():
|
54 |
+
st.title("Analyse Current Stress")
|
55 |
+
st.markdown("Answer the questions below to predict your stress level.")
|
56 |
+
|
57 |
+
with st.sidebar:
|
58 |
+
go_home = st.button("Back to Home")
|
59 |
+
if go_home:
|
60 |
+
st.session_state.page = "home"
|
61 |
+
|
62 |
+
load_xgboost_model()
|
63 |
+
|
64 |
+
with st.form(key="stress_form"):
|
65 |
+
stress_questions = {
|
66 |
+
"How many fruits or vegetables do you eat every day?": ["0", "1", "2", "3", "4", "5"],
|
67 |
+
"How many new places do you visit in an year?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
68 |
+
"How many people are very close to you?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
69 |
+
"How many people do you help achieve a better life?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
70 |
+
"With how many people do you interact with during a typical day?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
71 |
+
"How many remarkable achievements are you proud of?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
72 |
+
"How many times do you donate your time or money to good causes?": ["0", "1", "2", "3", "4", "5"],
|
73 |
+
"How well do you complete your weekly to-do lists?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
74 |
+
"In a typical day, how many hours do you experience 'FLOW'? (Flow is defined as the mental state, in which you are fully immersed in performing an activity. You then experience a feeling of energized focus, full involvement, and enjoyment in the process of this activity)": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
75 |
+
"How many steps (in thousands) do you typically walk everyday?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
76 |
+
"For how many years ahead is your life vision very clear for?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
77 |
+
"About how long do you typically sleep?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
78 |
+
"How many days of vacation do you typically lose every year?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
79 |
+
"How often do you shout or sulk at somebody?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
80 |
+
"How sufficient is your income to cover basic life expenses (1 for insufficient, 2 for sufficient)?": ["1", "2"],
|
81 |
+
"How many recognitions have you received in your life?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
82 |
+
"How many hours do you spend every week doing what you are passionate about?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
83 |
+
"In a typical week, how many times do you have the opportunity to think about yourself?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
84 |
+
"Age (1 = 'Less than 20' 2 = '21 to 35' 3 = '36 to 50' 4 = '51 or more')": ["1", "2", "3", "4"],
|
85 |
+
"Gender (1 = 'Female', 0 = 'Male')": ["0", "1"]
|
86 |
+
}
|
87 |
+
|
88 |
+
question_to_feature_map = {
|
89 |
+
"How many fruits or vegetables do you eat every day?": "FRUITS_VEGGIES",
|
90 |
+
"How many new places do you visit in an year?": "PLACES_VISITED",
|
91 |
+
"How many people are very close to you?": "CORE_CIRCLE",
|
92 |
+
"How many people do you help achieve a better life?": "SUPPORTING_OTHERS",
|
93 |
+
"With how many people do you interact with during a typical day?": "SOCIAL_NETWORK",
|
94 |
+
"How many remarkable achievements are you proud of?": "ACHIEVEMENT",
|
95 |
+
"How many times do you donate your time or money to good causes?": "DONATION",
|
96 |
+
"How well do you complete your weekly to-do lists?": "TODO_COMPLETED",
|
97 |
+
"In a typical day, how many hours do you experience 'FLOW'? (Flow is defined as the mental state, in which you are fully immersed in performing an activity. You then experience a feeling of energized focus, full involvement, and enjoyment in the process of this activity)": "FLOW",
|
98 |
+
"How many steps (in thousands) do you typically walk everyday?": "DAILY_STEPS",
|
99 |
+
"For how many years ahead is your life vision very clear for?": "LIVE_VISION",
|
100 |
+
"About how long do you typically sleep?": "SLEEP_HOURS",
|
101 |
+
"How many days of vacation do you typically lose every year?": "LOST_VACATION",
|
102 |
+
"How often do you shout or sulk at somebody?": "DAILY_SHOUTING",
|
103 |
+
"How sufficient is your income to cover basic life expenses (1 for insufficient, 2 for sufficient)?": "SUFFICIENT_INCOME",
|
104 |
+
"How many recognitions have you received in your life?": "PERSONAL_AWARDS",
|
105 |
+
"How many hours do you spend every week doing what you are passionate about?": "TIME_FOR_PASSION",
|
106 |
+
"In a typical week, how many times do you have the opportunity to think about yourself?": "WEEKLY_MEDITATION",
|
107 |
+
"Age (1 = 'Less than 20' 2 = '21 to 35' 3 = '36 to 50' 4 = '51 or more')": "AGE",
|
108 |
+
"Gender (1 = 'Female', 0 = 'Male')": "GENDER"
|
109 |
+
}
|
110 |
+
|
111 |
+
response_map = {str(i): i for i in range(11)}
|
112 |
+
response_map.update({"1": 1, "2": 2})
|
113 |
+
|
114 |
+
responses = {}
|
115 |
+
for question, options in stress_questions.items():
|
116 |
+
responses[question] = st.selectbox(question, options)
|
117 |
+
|
118 |
+
submit_button = st.form_submit_button("Submit")
|
119 |
+
|
120 |
+
if submit_button:
|
121 |
+
feature_dict = {question_to_feature_map[q]: response_map[responses[q]] for q in stress_questions.keys()}
|
122 |
+
feature_df = pd.DataFrame([feature_dict])
|
123 |
+
|
124 |
+
try:
|
125 |
+
dmatrix = xgb.DMatrix(feature_df)
|
126 |
+
prediction = xgboost_model.predict(dmatrix)
|
127 |
+
st.markdown(f"### Predicted Stress Level: {prediction[0]:.2f}")
|
128 |
+
if prediction[0] <= 1:
|
129 |
+
st.markdown("Your stress level is within a healthy range. Keep up the good work, and aim to maintain it for continued good health!")
|
130 |
+
else:
|
131 |
+
weekly_meditation_input = feature_dict["WEEKLY_MEDITATION"]
|
132 |
+
sleep_hours_input = feature_dict["SLEEP_HOURS"]
|
133 |
+
time_for_passion_input = feature_dict["TIME_FOR_PASSION"]
|
134 |
+
places_visited_input = feature_dict["PLACES_VISITED"]
|
135 |
+
daily_steps_input = feature_dict["DAILY_STEPS"]
|
136 |
+
|
137 |
+
weekly_meditation_upper_bound = min(10, weekly_meditation_input + 3)
|
138 |
+
sleep_hours_upper_bound = min(10, sleep_hours_input + 3)
|
139 |
+
time_for_passion_upper_bound = min(10, time_for_passion_input + 3)
|
140 |
+
places_visited_upper_bound = min(10, places_visited_input + 3)
|
141 |
+
daily_steps_upper_bound = min(10, daily_steps_input + 3)
|
142 |
+
|
143 |
+
weekly_meditation_range = range(weekly_meditation_input, weekly_meditation_upper_bound + 1)
|
144 |
+
sleep_hours_range = range(sleep_hours_input, sleep_hours_upper_bound + 1)
|
145 |
+
time_for_passion_range = range(time_for_passion_input, time_for_passion_upper_bound + 1)
|
146 |
+
places_visited_range = range(places_visited_input, places_visited_upper_bound + 1)
|
147 |
+
daily_steps_range = range(daily_steps_input, daily_steps_upper_bound + 1)
|
148 |
+
|
149 |
+
all_combinations = itertools.product(weekly_meditation_range, sleep_hours_range, time_for_passion_range, places_visited_range, daily_steps_range)
|
150 |
+
|
151 |
+
best_combination = None
|
152 |
+
min_diff = float('inf')
|
153 |
+
|
154 |
+
for combination in all_combinations:
|
155 |
+
adjusted_feature_dict = feature_dict.copy()
|
156 |
+
adjusted_feature_dict["WEEKLY_MEDITATION"] = combination[0]
|
157 |
+
adjusted_feature_dict["SLEEP_HOURS"] = combination[1]
|
158 |
+
adjusted_feature_dict["TIME_FOR_PASSION"] = combination[2]
|
159 |
+
adjusted_feature_dict["PLACES_VISITED"] = combination[3]
|
160 |
+
adjusted_feature_dict["DAILY_STEPS"] = combination[4]
|
161 |
+
|
162 |
+
adjusted_feature_df = pd.DataFrame([adjusted_feature_dict])
|
163 |
+
|
164 |
+
dmatrix = xgb.DMatrix(adjusted_feature_df)
|
165 |
+
adjusted_prediction = xgboost_model.predict(dmatrix)
|
166 |
+
if adjusted_prediction[0] <= 1:
|
167 |
+
diff = sum(abs(adjusted_feature_dict[feature] - feature_dict[feature]) for feature in adjusted_feature_dict)
|
168 |
+
if diff < min_diff:
|
169 |
+
min_diff = diff
|
170 |
+
best_combination = adjusted_feature_dict
|
171 |
+
if best_combination:
|
172 |
+
best_sleep = best_combination["SLEEP_HOURS"]
|
173 |
+
best_meditation = best_combination["WEEKLY_MEDITATION"]
|
174 |
+
best_passion = best_combination["TIME_FOR_PASSION"]
|
175 |
+
best_places = best_combination["PLACES_VISITED"]
|
176 |
+
best_steps = best_combination["DAILY_STEPS"]
|
177 |
+
best_stress_level = xgboost_model.predict(xgb.DMatrix(pd.DataFrame([best_combination])))[0]
|
178 |
+
|
179 |
+
prompt = f"Your stress level appears a bit elevated. To help bring it to a healthier range, try getting {best_sleep} hours of sleep each night, spend around {best_passion} hours each week doing something you’re passionate about, set aside {best_meditation} hours weekly for meditation, aim for {best_steps} thousand steps a day, and plan to explore {best_places} new places this year. These small changes can make a meaningful difference and help you reach a stress level of {best_stress_level}."
|
180 |
+
model_response = get_llm_response(prompt)
|
181 |
+
if model_response:
|
182 |
+
st.markdown(model_response)
|
183 |
+
else:
|
184 |
+
st.markdown("Your stress seems a bit high.")
|
185 |
+
else:
|
186 |
+
prompt = f"Your stress level seems a bit high. To help bring it down, aim for up to {sleep_hours_upper_bound} hours of sleep each night, spend around {time_for_passion_upper_bound} hours each week on activities you enjoy, set aside {weekly_meditation_upper_bound} hours for meditation each week, try to reach {daily_steps_upper_bound} thousand steps daily, and plan to explore {places_visited_upper_bound} new places this year. These small adjustments can have a positive impact on your stress levels and overall well-being."
|
187 |
+
model_response = get_llm_response(prompt)
|
188 |
+
if model_response:
|
189 |
+
st.markdown(model_response)
|
190 |
+
else:
|
191 |
+
st.markdown("Your stress seems a bit high.")
|
192 |
+
except Exception as e:
|
193 |
+
st.error(f"Error making prediction: {e}")
|
requirements.txt
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
huggingface_hub
|
3 |
+
streamlit
|
4 |
+
langchain_core
|
5 |
+
langchain_community
|
6 |
+
langchain_huggingface
|
7 |
+
langchain_text_splitters
|
8 |
+
accelerate
|
9 |
+
watchdog
|
10 |
+
tqdm
|
11 |
+
pydub
|
12 |
+
bitsandbytes
|
13 |
+
git+https://github.com/openai/whisper.git
|
14 |
+
torch
|
15 |
+
torchaudio
|
16 |
+
torchvision
|
17 |
+
streamlit-mic-recorder
|
18 |
+
ffmpeg
|
19 |
+
openai-whisper
|
20 |
+
torchaudio
|
21 |
+
xgboost
|
22 |
+
requests
|
23 |
+
groq
|
session_state.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# session_state.py
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
|
5 |
+
def initialize_session_state():
|
6 |
+
if "avatars" not in st.session_state:
|
7 |
+
st.session_state.avatars = {'user': None, 'assistant': None}
|
8 |
+
|
9 |
+
if 'user_text' not in st.session_state:
|
10 |
+
st.session_state.user_text = None
|
11 |
+
|
12 |
+
if "max_response_length" not in st.session_state:
|
13 |
+
st.session_state.max_response_length = 2000
|
14 |
+
|
15 |
+
if "system_message" not in st.session_state:
|
16 |
+
st.session_state.system_message = """
|
17 |
+
You are a compassionate therapist dedicated to deeply understanding the user's experiences and emotions. Listen carefully and respond with empathy. Reflect back what the user is expressing, validate their feelings, and ask open-ended questions that encourage self-reflection. Avoid generic advice or impersonal suggestions; instead, offer thoughtful insights that relate specifically to the user's unique challenges. Always remain in the role of a therapist, aiming to support and guide the user with patience, avoiding negative or dismissive language. Ask one question at a time. Respond in 3-4 sentences at a time.
|
18 |
+
"""
|
19 |
+
|
20 |
+
if "starter_message" not in st.session_state:
|
21 |
+
st.session_state.starter_message = "Hello! How are you feeling today? What are you here to talk about? I would like to get a sense of your overall life and emotional state so that I can better help you. We can start by talking about recent changes in your life or how you are feeling."
|
22 |
+
|
23 |
+
if "chat_history" not in st.session_state:
|
24 |
+
st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message}, {"role": "system", "content": st.session_state.system_message}]
|