Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -61,39 +61,57 @@ class AITherapistAssistant:
|
|
61 |
return any(keyword in message_lower for keyword in SUICIDE_KEYWORDS)
|
62 |
|
63 |
def generate_response(self, message: str) -> str:
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
)
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
)
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
return "I'm sorry, but I'm having trouble responding right now."
|
97 |
|
98 |
def generate_summary(self, conversation_text: str) -> str:
|
99 |
"""Generate a short summary of the entire conversation."""
|
|
|
61 |
return any(keyword in message_lower for keyword in SUICIDE_KEYWORDS)
|
62 |
|
63 |
def generate_response(self, message: str) -> str:
|
64 |
+
"""Generate a supportive AI response from the conversation model."""
|
65 |
+
if not self.conversation_model:
|
66 |
+
return (
|
67 |
+
"I'm having some technical difficulties right now. "
|
68 |
+
"I'm truly sorry I can't provide support at the moment. "
|
69 |
+
"Would you consider reaching out to a human counselor or support helpline?"
|
70 |
+
)
|
71 |
+
|
72 |
+
# Create a more structured and empathetic prompt
|
73 |
+
prompt = (
|
74 |
+
"You are a compassionate, empathetic AI therapist trained to provide supportive, "
|
75 |
+
"non-judgmental responses. Your goal is to validate feelings, offer gentle support, "
|
76 |
+
"and help the user feel heard and understood.\n\n"
|
77 |
+
"User's message: {}\n\n"
|
78 |
+
"Your compassionate response:".format(message)
|
79 |
+
)
|
80 |
+
|
81 |
+
try:
|
82 |
+
outputs = self.conversation_model(
|
83 |
+
prompt,
|
84 |
+
max_length=300, # Increased length for more nuanced responses
|
85 |
+
num_return_sequences=1,
|
86 |
+
do_sample=True,
|
87 |
+
top_p=0.9,
|
88 |
+
temperature=0.7
|
89 |
)
|
90 |
|
91 |
+
response_text = outputs[0]["generated_text"]
|
92 |
+
|
93 |
+
# More robust prompt stripping
|
94 |
+
if response_text.startswith(prompt):
|
95 |
+
response_text = response_text[len(prompt):].strip()
|
96 |
+
|
97 |
+
# Additional processing to ensure response quality
|
98 |
+
response_text = response_text.split('\n')[0].strip() # Take first coherent line
|
99 |
+
|
100 |
+
# Fallback if response is too short or nonsensical
|
101 |
+
if len(response_text) < 20:
|
102 |
+
response_text = (
|
103 |
+
"I hear you. Your feelings are valid, and it takes courage to share what you're experiencing. "
|
104 |
+
"Would you like to tell me a bit more about what's on your mind?"
|
105 |
)
|
106 |
+
|
107 |
+
return response_text
|
108 |
+
except Exception as e:
|
109 |
+
st.error(f"Error generating response: {e}")
|
110 |
+
return (
|
111 |
+
"I'm experiencing some difficulties right now. "
|
112 |
+
"Your feelings are important, and I want to be fully present for you. "
|
113 |
+
"Would you be willing to try sharing again?"
|
114 |
+
)
|
|
|
115 |
|
116 |
def generate_summary(self, conversation_text: str) -> str:
|
117 |
"""Generate a short summary of the entire conversation."""
|