File size: 3,816 Bytes
fd64912
c017ee5
b4bd0de
d2a9917
9216a0a
c017ee5
 
 
f1985ef
d2a9917
8f34be7
9216a0a
 
8f34be7
 
9dfe35d
 
8f34be7
 
9216a0a
 
 
d2a9917
8f34be7
9216a0a
 
d2a9917
7645fdc
9216a0a
d2a9917
 
 
 
c017ee5
d2a9917
c017ee5
 
 
afcc215
d2a9917
c017ee5
 
 
30a16a1
afcc215
c17e34b
afcc215
 
c17e34b
 
 
 
 
 
 
c017ee5
d2a9917
3dfc83c
d2a9917
 
 
 
 
 
 
 
dad612d
d2a9917
dad612d
9216a0a
80d269f
f1985ef
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
from gtts import gTTS
import torch

# Load DialoGPT model and tokenizer from Hugging Face
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")

# Set up Streamlit page configuration
st.set_page_config(page_title="Grief and Loss Support Bot", page_icon="๐ŸŒฟ", layout="centered")
st.markdown("""
    <style>
    .css-1d391kg { background-color: #F3F7F6; }
    .css-ffhzg2 { font-size: 1.5em; font-weight: 500; color: #4C6D7D; }
    .stTextInput>div>div>input { background-color: #D8E3E2; }
    .stButton>button { background-color: #A9D0B6; color: white; border-radius: 5px; }
    .stButton>button:hover { background-color: #8FB79A; }
    .stTextInput>div>label { color: #4C6D7D; }
    </style>
""", unsafe_allow_html=True)

# Title and introduction to the bot
st.title("Grief and Loss Support Bot ๐ŸŒฟ")
st.subheader("Your compassionate companion in tough times ๐Ÿ’š")

# User input
user_input = st.text_input("Share what's on your mind...", placeholder="Type here...", max_chars=500)

# Store previous responses to check for repetition
if 'previous_responses' not in st.session_state:
    st.session_state.previous_responses = []

# Function to generate a more empathetic and focused response using DialoGPT
def generate_response(user_input):
    # Encode the input text and generate a response
    new_user_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt')
    bot_input_ids = new_user_input_ids
    chat_history_ids = model.generate(bot_input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id, temperature=0.7, top_k=50, repetition_penalty=1.2)
    
    # Decode the response to text
    chat_history_ids = chat_history_ids[:, bot_input_ids.shape[-1]:]  # remove the input from the response
    bot_output = tokenizer.decode(chat_history_ids[0], skip_special_tokens=True)
    
    # Build a more empathetic and thoughtful response
    response = f"{bot_output}\n\nI'm really sorry you're feeling like this. It's completely okay to feel sad and overwhelmed. Your emotions are valid, and it's important to acknowledge them rather than push them aside. Sometimes, hunger can make everything feel more intense, so taking care of your body is just as important as your emotional well-being."

    # Add coping strategies based on the situation
    if "hunger" in user_input.lower():
        response += "\n\nIf possible, try to get something nourishing to eat. A healthy snack or meal can help soothe the physical discomfort you're feeling and also support your emotional resilience. Remember, it's okay to take breaks for your well-being."
    elif "overwhelmed" in user_input.lower():
        response += "\n\nItโ€™s okay to feel like you're carrying a heavy load. Break down your tasks into small, manageable pieces and give yourself permission to take things one step at a time. You don't need to have everything figured out at once. Be gentle with yourself."

    # Add general supportive message
    response += "\n\nYou're doing the best you can, and it's okay to ask for help when needed. Remember that it's okay to not be okay sometimes. Please be kind to yourself during this challenging time."

    return response

# Check if the user has typed something
if user_input:
    # Generate the empathetic response
    response = generate_response(user_input)
    
    # Store and show the new response
    st.session_state.previous_responses.append(response)
    st.text_area("Bot's Response:", response, height=250)

    # Text-to-speech output (optional)
    tts = gTTS(response, lang='en')
    audio_file = "response.mp3"
    tts.save(audio_file)
    st.audio(audio_file, format="audio/mp3")