File size: 3,283 Bytes
fd64912
d2a9917
b4bd0de
d2a9917
9216a0a
d2a9917
 
 
f1985ef
d2a9917
8f34be7
9216a0a
 
8f34be7
 
9dfe35d
 
8f34be7
 
9216a0a
 
 
d2a9917
8f34be7
9216a0a
 
d2a9917
7645fdc
9216a0a
d2a9917
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3dfc83c
d2a9917
 
 
 
 
 
 
 
dad612d
d2a9917
dad612d
9216a0a
80d269f
f1985ef
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import streamlit as st
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from gtts import gTTS
import torch

# Load GPT-2 model and tokenizer from Hugging Face
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("gpt2")

# Set up Streamlit page configuration
st.set_page_config(page_title="Grief and Loss Support Bot", page_icon="🌿", layout="centered")
st.markdown("""
    <style>
    .css-1d391kg { background-color: #F3F7F6; }
    .css-ffhzg2 { font-size: 1.5em; font-weight: 500; color: #4C6D7D; }
    .stTextInput>div>div>input { background-color: #D8E3E2; }
    .stButton>button { background-color: #A9D0B6; color: white; border-radius: 5px; }
    .stButton>button:hover { background-color: #8FB79A; }
    .stTextInput>div>label { color: #4C6D7D; }
    </style>
""", unsafe_allow_html=True)

# Title and introduction to the bot
st.title("Grief and Loss Support Bot 🌿")
st.subheader("Your compassionate companion in tough times πŸ’š")

# User input
user_input = st.text_input("Share what's on your mind...", placeholder="Type here...", max_chars=500)

# Store previous responses to check for repetition
if 'previous_responses' not in st.session_state:
    st.session_state.previous_responses = []

# Function to generate a more empathetic and focused response
def generate_response(user_input):
    # Tokenize input and set parameters for text generation
    inputs = tokenizer.encode(user_input, return_tensors="pt")
    outputs = model.generate(
        inputs, 
        max_length=200, 
        temperature=0.8, 
        top_k=50, 
        num_return_sequences=1,
        pad_token_id=tokenizer.eos_token_id
    )
    response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    
    # Add a suggestion for coping activity based on keywords in user input
    if "angry" in user_input.lower() or "frustrated" in user_input.lower():
        activity_suggestion = (
            "Sometimes, deep breathing exercises can help calm your mind. "
            "Try taking slow, deep breaths to regain a sense of calm and focus."
        )
    elif "sad" in user_input.lower() or "lonely" in user_input.lower():
        activity_suggestion = (
            "Writing about your feelings can be very therapeutic. "
            "Try journaling as a way to process and release some of your emotions."
        )
    else:
        activity_suggestion = (
            "Finding a creative outlet like drawing or painting can help. "
            "Art is a way to express feelings that might be difficult to put into words."
        )
    
    # Append the activity suggestion to the generated response
    response = f"{response_text}\n\nHere's something you could try to help cope with how you're feeling:\n{activity_suggestion}"
    
    return response

# Check if the user has typed something
if user_input:
    # Generate the empathetic response
    response = generate_response(user_input)
    
    # Store and show the new response
    st.session_state.previous_responses.append(response)
    st.text_area("Bot's Response:", response, height=250)

    # Text-to-speech output (optional)
    tts = gTTS(response, lang='en')
    audio_file = "response.mp3"
    tts.save(audio_file)
    st.audio(audio_file, format="audio/mp3")