File size: 2,487 Bytes
fd64912
06bf795
09abbb7
 
fd64912
06bf795
b1dc82e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd64912
b1dc82e
 
 
 
09abbb7
 
06bf795
09abbb7
 
 
06bf795
 
 
09abbb7
06bf795
09abbb7
 
 
06bf795
09abbb7
06bf795
b1dc82e
 
d7596e9
 
06bf795
 
09abbb7
 
 
 
 
b1dc82e
09abbb7
 
 
 
b1dc82e
09abbb7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import streamlit as st
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import pyttsx3

# Set up the page configuration for a welcoming appearance
st.set_page_config(page_title="Grief and Loss Support Bot", page_icon="πŸ•ŠοΈ", layout="centered")

# Customizing the app style for a soothing and modern look
st.markdown("""
    <style>
    .css-1d391kg {
        background-color: #F3F7F6;
    }
    .css-ffhzg2 {
        font-size: 1.5em;
        font-weight: 500;
        color: #4C6D7D;
    }
    .stTextInput>div>div>input {
        background-color: #D8E3E2;
    }
    .stButton>button {
        background-color: #A9D0B6;
        color: white;
        border-radius: 5px;
        border: none;
    }
    .stButton>button:hover {
        background-color: #8FB79A;
    }
    .stTextInput>div>label {
        color: #4C6D7D;
    }
    </style>
    """, unsafe_allow_html=True)

# Title and introduction
st.title("Grief and Loss Support Bot πŸ•ŠοΈ")
st.subheader("We are here for you. πŸ’š Your companion in tough times")

# Load the model and tokenizer for text generation
model_name = "microsoft/DialoGPT-medium"
try:
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name)
    text_gen_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
except Exception as e:
    st.error(f"Error loading the conversational model: {e}")

# Initialize the TTS engine
try:
    tts_engine = pyttsx3.init()
    tts_engine.setProperty('rate', 150)  # Adjust the speed of speech
    tts_engine.setProperty('voice', tts_engine.getProperty('voices')[0].id)  # Choose the first voice option
except Exception as e:
    st.error(f"Error initializing the TTS engine: {e}")

# User input for conversation
user_input = st.text_input("Share what's on your mind...", placeholder="Type here...", max_chars=500)

if user_input:
    # Generate a conversational response
    try:
        response = text_gen_pipeline(user_input, max_length=100, num_return_sequences=1)
        response_text = response[0]['generated_text']
        
        st.write("Bot's Response:")
        st.write(response_text)

        # Convert the response text to speech
        if st.button("Play Response Audio"):
            tts_engine.say(response_text)
            tts_engine.runAndWait()

    except Exception as e:
        st.error(f"Error generating response: {e}")