Bey007 commited on
Commit
7f87605
·
verified ·
1 Parent(s): a452a78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -15
app.py CHANGED
@@ -3,6 +3,7 @@ from transformers import pipeline
3
  from gtts import gTTS
4
  from pytube import Search
5
  import os
 
6
 
7
  # Initialize conversational model for empathetic dialogue
8
  conversational_bot = pipeline("text-generation", model="microsoft/DialoGPT-medium")
@@ -30,33 +31,42 @@ st.subheader("Your compassionate companion in tough times 💚")
30
  # Get user input
31
  user_input = st.text_input("Share what's on your mind...", placeholder="Type here...", max_chars=500)
32
 
33
- # Store previous response to check for repetition
34
- if 'previous_response' not in st.session_state:
35
- st.session_state.previous_response = ""
36
 
37
  # Check if user has entered text
38
  if user_input:
39
  # Run sentiment analysis to check for distress
40
  sentiment = sentiment_analysis(user_input)[0]
41
 
42
- # Generate empathetic response with increased length and more context
43
- response = conversational_bot(user_input, max_length=300, temperature=0.9, top_k=50, num_return_sequences=1)
44
 
45
- # Choose the response that does not repeat what the user said
46
- best_response = response[0]['generated_text']
 
47
 
48
- # Ensure the response is supportive and does not repeat the user's input
49
- if user_input.lower() in best_response.lower():
50
- best_response = "I understand how you're feeling. You're not alone in this. I'm here to listen and help."
51
-
52
- # Store the response for future comparison
53
- st.session_state.previous_response = best_response
 
 
 
 
 
 
 
 
54
 
55
  # Display response
56
- st.text_area("Bot's Response:", best_response, height=250)
57
 
58
  # Text-to-speech output
59
- tts = gTTS(best_response, lang='en')
60
  audio_file = "response.mp3"
61
  tts.save(audio_file)
62
  st.audio(audio_file, format="audio/mp3")
 
3
  from gtts import gTTS
4
  from pytube import Search
5
  import os
6
+ import random
7
 
8
  # Initialize conversational model for empathetic dialogue
9
  conversational_bot = pipeline("text-generation", model="microsoft/DialoGPT-medium")
 
31
  # Get user input
32
  user_input = st.text_input("Share what's on your mind...", placeholder="Type here...", max_chars=500)
33
 
34
+ # Store previous responses to check for repetition
35
+ if 'previous_responses' not in st.session_state:
36
+ st.session_state.previous_responses = []
37
 
38
  # Check if user has entered text
39
  if user_input:
40
  # Run sentiment analysis to check for distress
41
  sentiment = sentiment_analysis(user_input)[0]
42
 
43
+ # Generate multiple responses with increased randomness
44
+ responses = conversational_bot(user_input, max_length=300, temperature=1, top_k=50, num_return_sequences=3)
45
 
46
+ # Filter out any responses that are too similar to previous responses
47
+ new_responses = [response['generated_text'] for response in responses]
48
+ new_responses = [resp for resp in new_responses if resp.lower() not in [prev.lower() for prev in st.session_state.previous_responses]]
49
 
50
+ # If there are valid new responses, pick one, otherwise fallback
51
+ if new_responses:
52
+ selected_response = random.choice(new_responses)
53
+ else:
54
+ # If no new response, fallback to a more generic empathetic message
55
+ fallback_responses = [
56
+ "I understand how you're feeling. You're not alone in this. I'm here to listen and help.",
57
+ "I'm really sorry you're going through this. Let's take one step at a time. I'm here for you.",
58
+ "It sounds really tough right now. It's okay to feel overwhelmed. You're doing your best, and that's enough."
59
+ ]
60
+ selected_response = random.choice(fallback_responses)
61
+
62
+ # Store the new response for future checks
63
+ st.session_state.previous_responses.append(selected_response)
64
 
65
  # Display response
66
+ st.text_area("Bot's Response:", selected_response, height=250)
67
 
68
  # Text-to-speech output
69
+ tts = gTTS(selected_response, lang='en')
70
  audio_file = "response.mp3"
71
  tts.save(audio_file)
72
  st.audio(audio_file, format="audio/mp3")