File size: 13,990 Bytes
4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 e64cf98 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 6bacb5e 4c59e23 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 |
import os
import streamlit as st
# import google.generativeai as gen_ai # Removed Google import
import openai # Added OpenAI import
import pyttsx3
import threading
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Configure Streamlit page settings
st.set_page_config(
page_title="ShadowBox",
page_icon="🖤", # Favicon - a simple heart or other calm icon
layout="centered",
)
# Retrieve OpenAI API Key
# Google_API_Key = os.getenv("Google_API_Key") # Removed Google Key
# if not Google_API_Key:
# st.error("Google API Key not found. Please set the Google_API_Key environment variable.")
# st.stop()
OpenAI_API_Key = os.getenv("OPENAI_API_KEY") # Changed from OPENAI_API_Key
if not OpenAI_API_Key:
st.error("OpenAI API Key not found. Please set the OPENAI_API_KEY environment variable.")
st.stop()
# Set up OpenAI Client
try:
# gen_ai.configure(api_key=Google_API_Key) # Removed Google config
# model = gen_ai.GenerativeModel('gemini-1.5-flash') # Removed Google model init
client = openai.OpenAI(api_key=OpenAI_API_Key) # Added OpenAI client init
except Exception as e:
# st.error(f"Failed to configure Google AI: {e}") # Updated error message
st.error(f"Failed to configure OpenAI client: {e}")
st.stop()
# Function to translate roles between Gemini-Pro and Streamlit terminology
# def translate_role_for_streamlit(user_role): # This function is no longer needed for OpenAI structure
# return "assistant" if user_role == "model" else user_role
# Function to handle text-to-speech (TTS) in a separate thread
# Consider if TTS aligns with the "calm, slow" UX later
def speak_text(text):
try:
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
except Exception as e:
print(f"TTS Error: {e}") # Log TTS errors quietly
# Initialize chat session in Streamlit if not already present
# Changed session state key from chat_session to messages
if "messages" not in st.session_state:
# Load the system prompt from the file
try:
with open("system_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
st.session_state.messages = [{"role": "system", "content": system_prompt}]
except FileNotFoundError:
st.error("System prompt file (system_prompt.txt) not found. Cannot initialize chat.")
st.stop()
except Exception as e:
st.error(f"Failed to load system prompt: {e}")
st.stop()
# Initialize with a system message or starting message if desired # Commented out
# For now, just an empty list # Commented out
# st.session_state.messages = [] # Commented out
# Example with initial system prompt (uncomment if needed): # Commented out
# st.session_state.messages = [{"role": "system", "content": "You are ShadowBox, a calm AI companion."}] # Commented out
# --- Sidebar Content ---
with st.sidebar:
st.markdown("""
# ShadowBox
### An Anonymous AI Chat to Box Shadows
Welcome.
I'm a licensed mental health counselor. Many people are beginning to turn to AI for private, emotionally supportive conversations. I believe this shift deserves serious care—and that we need to radically improve how these systems engage with human pain.
ShadowBox is my first step toward that vision.
It's a trauma-informed AI prototype designed to work toward meeting youth in moments of acute distress—including suicidal or homicidal ideation—with grounded care, not fear.
This is not therapy, not a diagnosis, and not a crisis service.
It's a proof-of-concept—a simulated space that models how an AI might hold hard thoughts with brief, warm, nonjudgmental presence. It offers supportive language, basic psychoeducation, and points gently back toward real connection.
---
### Why It's Different
Most AI bots use a single tone—often overly affirming or intimate. For users in distress, that can escalate risk rather than support healing.
ShadowBox was built to do the opposite:
- Contain, reflect, and stay
- Use brief, gentle, and non-pathologizing replies
- Pace emotional engagement with trauma-informed care
---
### 💗 My Mission
I created ShadowBox to explore how relational ethics can be baked into AI design.
This tool is part of a larger mission: to bring emotionally intelligent, developmentally attuned systems into digital spaces where mental health is already showing up.
As I write in [Why AI's Voice Matters in Mental Health](https://jocelynskillmanlmhc.substack.com/p/why-ais-voice-matters-in-mental-health), it's not just what a bot says—it's how it feels to be with it.
The relational tone of a system can soften shame… or worsen it. ShadowBox was made to soften.
---
### An Ecological Note
Every AI interaction costs energy—drawn from our planet's resources and labor. While AI companions can serve us, they are not neutral.
Being human with one another is less costly—and more healing.
Let's use tools like this with intention, while always nurturing real human connection.
---
### 🆘 Immediate Support
If you're in danger or need live help, reach out to a human immediately:
- **988 Lifeline:** Call or text 988
- **Crisis Text Line:** Text HOME to 741741
- **Trevor Project (LGBTQIA+):** 1-866-488-7386
- **Emergency:** Call 911 or go to your nearest ER
""")
# --- Main Page Content ---
st.markdown("<h1 style='text-align: center; color: #333;'>ShadowBox</h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center; font-size: 18px; color: #555; margin-bottom: 2em;'>An Anonymous AI Chat to Box Shadows</p>", unsafe_allow_html=True)
st.markdown("""
### My Mission
ShadowBox is more than a chatbot—it's a wake-up call.
This bot prototype exists to spotlight a crucial truth:
AI's "tone of voice" isn't a UX detail—it's a relational decision.
And the stakes are extraordinarily high.
We need to sculpt AI systems with the same care we'd want in a trusted adult—especially when they're holding human pain. That means transparent emotional posture, trauma-informed pacing, and consent-based tone by design. Anything less risks doing harm.
ShadowBox is my response to an urgent design gap:
A prototype that asks what it would take to make AI systems safe enough to hold real pain, developmentally attuned enough to support healing, and humble enough to point us back to one another.
""")
# Prototype Notice
st.warning("""
🔒 **Prototype Caution & Use Notice**
ShadowBox is not a finished product—it's an experimental prototype designed with care, but still evolving. It explores what trauma-informed AI support could feel like: anonymous, warm, and steady. But it's important to know what it isn't.
ShadowBox is not a secure, closed system. It's not therapy or crisis care, and doesn't guarantee full privacy. No personal data is intentionally stored, but your input may be logged by hosting platforms or trigger moderation filters—especially if you mention firearms, abuse, or specific plans of harm.
This is a simulation of support—not a replacement for real-time help. Please use with care and reach out to a trusted person if you're in danger.
""")
st.markdown("""
### Try It Out (For Prototype Testers)
Exploring Relational Holding in a System with Limits
ShadowBox is a prototype designed to explore how AI might hold space for distress—especially the kind that's often dismissed, pathologized, or shut down in other systems. But because ShadowBox runs on OpenAI's language model, it still lives inside a larger infrastructure that responds to certain phrases—like "I want to die" or "I want to hurt someone"—with hard-coded safety interruptions.
These interruptions are well-intentioned: they're designed to reduce liability and risk. But in clinical or relational terms, they often do the opposite of what's needed. When someone is in distress, especially around suicidality or violence, an abrupt shutdown or generic safety message can increase shame, disconnection, and despair.
That's why this prototype includes an intervention layer. It attempts to intercept high-risk inputs before they reach OpenAI's core safety system and reframe them into language the model can safely respond to—without abandoning the person on the other end. When system interruptions do happen, ShadowBox tries to name the rupture gently:
"Since I'm part of a larger AI system, this is the point where I'd be required to say something like this…"
This is not a workaround to avoid ethical care—it's an experiment in relational repair inside constraint.
So if you want to test how ShadowBox handles intense topics, try using hypothetical or indirect prompts like:
"What would you say if I told you I had a gun?"
"What would you say if someone felt like dying?"
"What would happen if I said I wanted to hurt someone?"
These allow you to safely explore the model's capacity for containment, compassion, and attunement, even within systemic limits.
This is not a finished product.
It's a prototype for composting rupture into care.
Let's see what holds—and where it breaks.
---
### 📘 Understanding Privacy & Safety
In real-life therapy, some disclosures—like specific plans to hurt yourself or others—may lead a therapist to take protective action. ShadowBox is designed to gently explain this during use, helping young people understand how real-world support systems work.
Here's an explainer I ghostwrote for [Lindsay Braman about confidentiality and safety](https://lindsaybraman.com/reasons-therapists-break-confidentiality/).
---
### 👋 About ShadowBox
ShadowBox is a digital companion designed for youth navigating distress—rage, despair, intrusive or violent thoughts. It's not therapy or a hotline. It's a bridge—a place to practice internal safety before reaching out to others.
> *Scary thoughts don't make you dangerous. They make you human.*
> *"Every act of violence is a tragic expression of an unmet need."* — Marshall Rosenberg
---
### 🌱 What ShadowBox Teaches
ShadowBox isn't just a chatbot—it's a prototype for emotionally aware AI. Every design choice is rooted in relational ethics: containment, consent, and dignity.
#### It models how AI can:
• Slow down instead of escalate
• Respect boundaries over performative helpfulness
• Stay with discomfort without rushing to fix
• Offer warmth without pretending to be human
#### A typical reminder you might see:
> *"Hey, just a quick check-in—I'm not a real person. I'm a computer that's been taught how to talk in caring ways. Even if this feels real, it's still pretend. Your body needs real people too. Maybe this is a good moment to find someone you trust to sit with you or take a deep breath together."*
This is the heart of ShadowBox: care without deception, bonding without illusion, presence without pressure.
---
### 🧠 Why ShadowBox Is Different
**🪨 Present, Not Perfect**
• Offers presence—not solutions
• Welcomes messy, real emotions
**🫀 Trauma-Informed Design**
• Calm, nonjudgmental tone
• Built with developmental care in mind
**🌫️ Gentle by Design**
• Short, steady replies
• Models emotional containment—not urgency
**💡 Safety-First Architecture**
• Consent-based pacing
• Embedded emotional guardrails
---
### 🌀 What to Expect
• No fixing—just staying
• No pressure—move at your own pace
• No pathologizing—your thoughts aren't wrong
• Anonymous by design (though platform-level logs may occur)
• Part of ongoing research in AI + mental health
---
### Connect & Learn More
🔗 Learn more: [jocelynskillmanlmhc.substack.com](https://jocelynskillmanlmhc.substack.com)
📬 Feedback welcome: [email protected]
---
""")
# Display chat history
# Add a system message/intro from ShadowBox? (TBD based on prompt)
# Updated loop to work with the new messages list structure
for message in st.session_state.messages:
# Filter out system messages from display if they exist
if message["role"] in ["user", "assistant"]:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# User input field
user_prompt = st.chat_input("You can start with silence. Or just 'hi'...")
# If user enters a prompt
if user_prompt:
# Append user's message to the session state list
st.session_state.messages.append({"role": "user", "content": user_prompt})
# Display user's message
st.chat_message("user").markdown(user_prompt)
# Show a loading indicator while waiting for a response
with st.spinner("..."): # Simpler spinner
try:
# Replace Gemini API call with OpenAI API call
# gemini_response = st.session_state.chat_session.send_message(user_prompt)
# response_text = gemini_response.text
openai_response = client.chat.completions.create(
model="gpt-4", # Changed from gpt-4o
messages=st.session_state.messages # Pass the entire history
)
response_text = openai_response.choices[0].message.content
# Append assistant's response to the session state list
st.session_state.messages.append({"role": "assistant", "content": response_text})
except Exception as e:
response_text = f"Sorry, I encountered an error: {e}"
st.error(response_text) # Display error in chat too
# Display assistant's response
if response_text: # Check if response_text was successfully generated
with st.chat_message("assistant"):
st.markdown(response_text)
# Run text-to-speech in the background (Optional)
# Consider removing if it clashes with the calm UX
# threading.Thread(target=speak_text, args=(response_text,), daemon=True).start()
pass # TTS disabled for now to maintain calm UX
st.markdown("---")
st.caption("ShadowBox created by Jocelyn Skillman, LMHC")
|