CompanAIon / app.py
Bey007's picture
Update app.py
c17e34b verified
raw
history blame
3.82 kB
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
from gtts import gTTS
import torch
# Load DialoGPT model and tokenizer from Hugging Face
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
# Set up Streamlit page configuration
st.set_page_config(page_title="Grief and Loss Support Bot", page_icon="๐ŸŒฟ", layout="centered")
st.markdown("""
<style>
.css-1d391kg { background-color: #F3F7F6; }
.css-ffhzg2 { font-size: 1.5em; font-weight: 500; color: #4C6D7D; }
.stTextInput>div>div>input { background-color: #D8E3E2; }
.stButton>button { background-color: #A9D0B6; color: white; border-radius: 5px; }
.stButton>button:hover { background-color: #8FB79A; }
.stTextInput>div>label { color: #4C6D7D; }
</style>
""", unsafe_allow_html=True)
# Title and introduction to the bot
st.title("Grief and Loss Support Bot ๐ŸŒฟ")
st.subheader("Your compassionate companion in tough times ๐Ÿ’š")
# User input
user_input = st.text_input("Share what's on your mind...", placeholder="Type here...", max_chars=500)
# Store previous responses to check for repetition
if 'previous_responses' not in st.session_state:
st.session_state.previous_responses = []
# Function to generate a more empathetic and focused response using DialoGPT
def generate_response(user_input):
# Encode the input text and generate a response
new_user_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt')
bot_input_ids = new_user_input_ids
chat_history_ids = model.generate(bot_input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id, temperature=0.7, top_k=50, repetition_penalty=1.2)
# Decode the response to text
chat_history_ids = chat_history_ids[:, bot_input_ids.shape[-1]:] # remove the input from the response
bot_output = tokenizer.decode(chat_history_ids[0], skip_special_tokens=True)
# Build a more empathetic and thoughtful response
response = f"{bot_output}\n\nI'm really sorry you're feeling like this. It's completely okay to feel sad and overwhelmed. Your emotions are valid, and it's important to acknowledge them rather than push them aside. Sometimes, hunger can make everything feel more intense, so taking care of your body is just as important as your emotional well-being."
# Add coping strategies based on the situation
if "hunger" in user_input.lower():
response += "\n\nIf possible, try to get something nourishing to eat. A healthy snack or meal can help soothe the physical discomfort you're feeling and also support your emotional resilience. Remember, it's okay to take breaks for your well-being."
elif "overwhelmed" in user_input.lower():
response += "\n\nItโ€™s okay to feel like you're carrying a heavy load. Break down your tasks into small, manageable pieces and give yourself permission to take things one step at a time. You don't need to have everything figured out at once. Be gentle with yourself."
# Add general supportive message
response += "\n\nYou're doing the best you can, and it's okay to ask for help when needed. Remember that it's okay to not be okay sometimes. Please be kind to yourself during this challenging time."
return response
# Check if the user has typed something
if user_input:
# Generate the empathetic response
response = generate_response(user_input)
# Store and show the new response
st.session_state.previous_responses.append(response)
st.text_area("Bot's Response:", response, height=250)
# Text-to-speech output (optional)
tts = gTTS(response, lang='en')
audio_file = "response.mp3"
tts.save(audio_file)
st.audio(audio_file, format="audio/mp3")