Bey007 commited on
Commit
d6c911d
1 Parent(s): 3812715

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -56
app.py CHANGED
@@ -1,18 +1,14 @@
1
  import streamlit as st
2
- from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer
3
  from gtts import gTTS
4
- from pytube import Search
5
- import os
6
  import random
 
7
 
8
- # Initialize GPT-2 model and tokenizer from Hugging Face
9
- tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
10
- model = GPT2LMHeadModel.from_pretrained("gpt2")
11
 
12
- # Create a text generation pipeline using GPT-2
13
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
14
-
15
- # Set up Streamlit page
16
  st.set_page_config(page_title="Grief and Loss Support Bot", page_icon="🌿", layout="centered")
17
  st.markdown("""
18
  <style>
@@ -25,61 +21,38 @@ st.markdown("""
25
  </style>
26
  """, unsafe_allow_html=True)
27
 
28
- # Title
29
  st.title("Grief and Loss Support Bot 🌿")
30
  st.subheader("Your compassionate companion in tough times 💚")
31
 
32
- # Get user input
33
  user_input = st.text_input("Share what's on your mind...", placeholder="Type here...", max_chars=500)
34
 
35
  # Store previous responses to check for repetition
36
  if 'previous_responses' not in st.session_state:
37
  st.session_state.previous_responses = []
38
 
39
- # Check if user has entered text
40
- if user_input:
41
- # Refined prompt for better empathetic responses
42
- prompt = f"User is feeling overwhelmed with emotional distress and is going through a tough time. Respond empathetically, offering support and understanding. The user's input: {user_input}"
43
-
44
- # Run the text generation model to generate a response based on user input
45
- generated_responses = generator(prompt, max_length=200, num_return_sequences=3, temperature=0.7)
46
-
47
- # Filter out any responses that are too similar to previous responses or user input
48
- new_responses = [response['generated_text'].strip() for response in generated_responses]
49
- new_responses = [resp for resp in new_responses if resp.lower() not in [prev.lower() for prev in st.session_state.previous_responses] and resp.lower() != user_input.lower()]
50
-
51
- # If there are valid new responses, pick one, otherwise fallback
52
- if new_responses:
53
- selected_response = random.choice(new_responses)
54
- else:
55
- # If no new response, fallback to a more generic empathetic message
56
- fallback_responses = [
57
- "I understand how you're feeling. You're not alone in this. I'm here to listen and help.",
58
- "I'm really sorry you're going through this. Let's take one step at a time. I'm here for you.",
59
- "It sounds really tough right now. It's okay to feel overwhelmed. You're doing your best, and that's enough."
60
- ]
61
- selected_response = random.choice(fallback_responses)
62
-
63
- # Add extra empathetic phrases to the response
64
- extra_empathy = [
65
- "It’s completely normal to feel this way when things get tough. You're doing great by reaching out.",
66
- "I know it can feel like a lot right now, but one step at a time. You're not alone in this.",
67
- "Even in the toughest times, remember that there’s always support around you."
68
- ]
69
- selected_response += " " + random.choice(extra_empathy)
70
-
71
- # Store the new response for future checks
72
- st.session_state.previous_responses.append(selected_response)
73
-
74
- # Display response
75
- st.text_area("Bot's Response:", selected_response, height=250)
76
-
77
- # Text-to-speech output
78
- tts = gTTS(selected_response, lang='en')
79
- audio_file = "response.mp3"
80
- tts.save(audio_file)
81
- st.audio(audio_file, format="audio/mp3")
82
-
83
  # Suggest a productive activity based on detected keywords
84
  if any(keyword in user_input.lower() for keyword in ["lonely", "lost", "sad", "overwhelmed", "academic", "exam"]):
85
  st.info("Here's a suggestion to help you cope:")
@@ -111,3 +84,20 @@ if user_input:
111
  if any(keyword in user_input.lower() for keyword in crisis_keywords):
112
  st.warning("It seems like you might be in distress. Please reach out to a crisis hotline or a trusted individual.")
113
  st.write("[Find emergency resources here](https://www.helpguide.org/find-help.htm)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from gtts import gTTS
 
 
4
  import random
5
+ from youtubesearchpython import Search
6
 
7
+ # Load DialoGPT model and tokenizer from Hugging Face
8
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
9
+ model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
10
 
11
+ # Set up Streamlit page configuration
 
 
 
12
  st.set_page_config(page_title="Grief and Loss Support Bot", page_icon="🌿", layout="centered")
13
  st.markdown("""
14
  <style>
 
21
  </style>
22
  """, unsafe_allow_html=True)
23
 
24
+ # Title and introduction to the bot
25
  st.title("Grief and Loss Support Bot 🌿")
26
  st.subheader("Your compassionate companion in tough times 💚")
27
 
28
+ # User input
29
  user_input = st.text_input("Share what's on your mind...", placeholder="Type here...", max_chars=500)
30
 
31
  # Store previous responses to check for repetition
32
  if 'previous_responses' not in st.session_state:
33
  st.session_state.previous_responses = []
34
 
35
+ # Function to generate a more empathetic and focused response using DialoGPT
36
+ def generate_response(user_input):
37
+ # Encode the input text and generate a response
38
+ new_user_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt')
39
+ bot_input_ids = new_user_input_ids
40
+ chat_history_ids = model.generate(bot_input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id, temperature=0.7, top_k=50, repetition_penalty=1.2)
41
+
42
+ # Decode the response to text
43
+ chat_history_ids = chat_history_ids[:, bot_input_ids.shape[-1]:] # remove the input from the response
44
+ bot_output = tokenizer.decode(chat_history_ids[0], skip_special_tokens=True)
45
+
46
+ # Build a more empathetic and thoughtful response
47
+ response = f"I’m really sorry you're feeling like this. It’s completely normal to feel overwhelmed when you're facing a heavy workload. It’s important to acknowledge how you feel and not keep it bottled up. Sometimes, stress and emotional exhaustion can build up, and it’s okay to let yourself feel those emotions."
48
+
49
+ # Add coping strategies based on the situation
50
+ if "workload" in user_input.lower():
51
+ response += "\n\nWhen the workload feels too heavy, it can be helpful to break tasks down into smaller, more manageable steps. Focus on one thing at a time, and remember that it’s okay to take breaks when needed. Asking for support from colleagues or friends is also a good way to lighten the load."
52
+
53
+ # Add general supportive message
54
+ response += "\n\nYou're doing your best, and that’s all anyone can ask for. Please take care of yourself and know that it’s okay to take a step back when things feel too much. Your well-being is the most important thing."
55
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  # Suggest a productive activity based on detected keywords
57
  if any(keyword in user_input.lower() for keyword in ["lonely", "lost", "sad", "overwhelmed", "academic", "exam"]):
58
  st.info("Here's a suggestion to help you cope:")
 
84
  if any(keyword in user_input.lower() for keyword in crisis_keywords):
85
  st.warning("It seems like you might be in distress. Please reach out to a crisis hotline or a trusted individual.")
86
  st.write("[Find emergency resources here](https://www.helpguide.org/find-help.htm)")
87
+
88
+ return response
89
+
90
+ # Check if the user has typed something
91
+ if user_input:
92
+ # Generate the empathetic response
93
+ response = generate_response(user_input)
94
+
95
+ # Store and show the new response
96
+ st.session_state.previous_responses.append(response)
97
+ st.text_area("Bot's Response:", response, height=250)
98
+
99
+ # Text-to-speech output (optional)
100
+ tts = gTTS(response, lang='en')
101
+ audio_file = "response.mp3"
102
+ tts.save(audio_file)
103
+ st.audio(audio_file, format="audio/mp3")