Ganesh89 commited on
Commit
ec43d7b
·
verified ·
1 Parent(s): 8ea7224

removed speech feature

Browse files
Files changed (1) hide show
  1. app.py +122 -166
app.py CHANGED
@@ -1,166 +1,122 @@
1
- import streamlit as st
2
-
3
- page_bg_img="""
4
- <style>
5
- [data-testid="stAppViewContainer"] {
6
- background-image: url("https://i.pinimg.com/originals/d4/d7/2f/d4d72f71231ae5995e425b7a813d87f6.webp");
7
- background-size: cover;
8
- }
9
-
10
- [data-testid="stAppViewContainer"]::before {
11
- content: "";
12
- position: absolute;
13
- top: 0;
14
- left: 0;
15
- right: 0;
16
- bottom: 0;
17
- background: rgba(0, 0, 0, 0.5);
18
- pointer-events: none;
19
- }
20
-
21
-
22
- [data-testid="stToolbar"] {
23
- right: 2rem;
24
- }
25
-
26
- [data-testid="stSidebar"] {
27
- background-image: url("https://i.pinimg.com/originals/cb/74/8b/cb748be384b8ccc3e757fceb3820f9d4.jpg");
28
- background-size: 220%;
29
- background-position: center top;
30
- }
31
-
32
- [data-testid="stSidebar"]::before {
33
- background-image: url("https://i.pinimg.com/originals/cb/74/8b/cb748be384b8ccc3e757fceb3820f9d4.jpg");
34
- background-size: 220%;
35
- background-position: center top;
36
- content: "";
37
- position: absolute;
38
- top: 0;
39
- left: 0;
40
- right: 0;
41
- bottom: 0;
42
- background: rgba(0, 0, 0, 0.4);
43
- pointer-events: none;
44
- }
45
-
46
-
47
- </style>
48
- """
49
- # imports
50
-
51
- import streamlit as st
52
- import os
53
- from huggingface_hub import InferenceClient
54
- from textblob import TextBlob
55
- from langchain.prompts import PromptTemplate
56
- from gtts import gTTS
57
- from dotenv import load_dotenv
58
- import tempfile
59
- from playsound import playsound
60
-
61
- # Load environment variables
62
- load_dotenv()
63
-
64
- # Configure Hugging Face API
65
- client = InferenceClient(
66
- "microsoft/Phi-3-mini-4k-instruct",
67
- token=os.getenv("HF_API_KEY"),
68
- )
69
-
70
- # Define System Prompts
71
- SYSTEM_PROMPT_GENERAL = """Answer the following question in a comforting and supportive manner.
72
- If the user expresses negative sentiment, prioritize empathetic responses and open-ended questions."""
73
-
74
- # Define LangChain Prompt Template
75
- prompt_template = PromptTemplate(
76
- input_variables=["system_prompt", "user_input"],
77
- template="{system_prompt}\n\nUser: {user_input}\nAssistant:"
78
- )
79
-
80
- st.markdown(page_bg_img, unsafe_allow_html=True)
81
- st.title("What's on your mind today?")
82
- # Define the desired navy blue color in hex code
83
- navy_blue = "#edf7fc"
84
-
85
- st.sidebar.markdown("")
86
- st.sidebar.markdown(f"""<h1 style="color: {navy_blue}; ">Feel Ashley like your BestFriend!. she will support you and helps you!</h1>""", unsafe_allow_html=True)
87
-
88
- if "messages" not in st.session_state:
89
- st.session_state["messages"] = [
90
- {"role": "assistant", "content": "Hi there! I'm Ashley, your best friend. How can I support you today?"}
91
- ]
92
-
93
- if "playback_states" not in st.session_state:
94
- st.session_state["playback_states"] = {}
95
-
96
- # Function to speak text
97
- def speak_text(text, message_index):
98
- # Check if the message index is in playback states and is being played
99
- if st.session_state["playback_states"].get(message_index, False):
100
- return
101
-
102
- # Set playback state for the message index
103
- st.session_state["playback_states"][message_index] = True
104
-
105
- try:
106
- tts = gTTS(text, lang='en', slow=False) # `slow=False` speeds up the speech rate
107
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp:
108
- temp_path = fp.name
109
- tts.save(temp_path)
110
-
111
- # Play audio using playsound
112
- playsound(temp_path)
113
- os.remove(temp_path)
114
- except Exception as e:
115
- st.error(f"Error playing audio: {e}")
116
- finally:
117
- # Reset playback state for the message index
118
- st.session_state["playback_states"][message_index] = False
119
-
120
- # Display previous messages
121
- for i, msg in enumerate(st.session_state.messages):
122
- st.chat_message(msg["role"]).write(msg["content"])
123
- if msg["role"] == "assistant":
124
- if st.button(f"🔊", key=f"voice_button_{i}"):
125
- speak_text(msg["content"], i)
126
-
127
- # Chat input and processing
128
- if prompt := st.chat_input():
129
- # Append user message to the session state
130
- st.session_state.messages.append({"role": "user", "content": prompt})
131
- st.chat_message("user").write(prompt)
132
-
133
- # Sentiment Analysis
134
- user_sentiment = TextBlob(prompt).sentiment.polarity
135
-
136
- # Craft System Prompt based on sentiment
137
- system_prompt = SYSTEM_PROMPT_GENERAL
138
- if user_sentiment < 0: # User expresses negative sentiment
139
- system_prompt = f"""{system_prompt}
140
- The user seems to be feeling down. Prioritize empathetic responses and open-ended questions."""
141
-
142
- # Format prompt using LangChain's PromptTemplate
143
- formatted_prompt = prompt_template.format(
144
- system_prompt=system_prompt,
145
- user_input=prompt
146
- )
147
-
148
- # Generate a response using Hugging Face API
149
- response = ""
150
- for message in client.chat_completion(
151
- messages=[{"role": "user", "content": formatted_prompt}],
152
- max_tokens=500,
153
- stream=True,
154
- ):
155
- response += message.choices[0].delta.content
156
-
157
- # Append assistant message to the session state
158
- st.session_state.messages.append({"role": "assistant", "content": response.strip()})
159
- st.chat_message("assistant").write(response.strip())
160
- st.button(f"🔊", key=f"voice_button_{len(st.session_state.messages)-2}", on_click=speak_text, args=(response.strip(), len(st.session_state.messages)-1))
161
-
162
-
163
- ## https://wallpapercave.com/wp/wp9668133.jpg
164
- ## https://wallpapercave.com/wp/wp14059461.jpg
165
- ## https://wallpapercave.com/wp/wp8219187.jpg
166
- ## https://wallpapercave.com/uwp/uwp4189566.png
 
1
+ import streamlit as st
2
+ import os
3
+ from huggingface_hub import InferenceClient
4
+ from textblob import TextBlob
5
+ from langchain.prompts import PromptTemplate
6
+ from dotenv import load_dotenv
7
+
8
+ # Load environment variables
9
+ load_dotenv()
10
+
11
+ # Configure Hugging Face API
12
+ client = InferenceClient(
13
+ "microsoft/Phi-3-mini-4k-instruct",
14
+ token=os.getenv("HF_API_KEY"),
15
+ )
16
+
17
+ # Define System Prompts
18
+ SYSTEM_PROMPT_GENERAL = """Answer the following question in a comforting and supportive manner.
19
+ If the user expresses negative sentiment, prioritize empathetic responses and open-ended questions."""
20
+
21
+ # Define LangChain Prompt Template
22
+ prompt_template = PromptTemplate(
23
+ input_variables=["system_prompt", "user_input"],
24
+ template="{system_prompt}\n\nUser: {user_input}\nAssistant:"
25
+ )
26
+
27
+ page_bg_img="""
28
+ <style>
29
+ [data-testid="stAppViewContainer"] {
30
+ background-image: url("https://i.pinimg.com/originals/d4/d7/2f/d4d72f71231ae5995e425b7a813d87f6.webp");
31
+ background-size: cover;
32
+ }
33
+
34
+ [data-testid="stAppViewContainer"]::before {
35
+ content: "";
36
+ position: absolute;
37
+ top: 0;
38
+ left: 0;
39
+ right: 0;
40
+ bottom: 0;
41
+ background: rgba(0, 0, 0, 0.5);
42
+ pointer-events: none;
43
+ }
44
+
45
+ [data-testid="stToolbar"] {
46
+ right: 2rem;
47
+ }
48
+
49
+ [data-testid="stSidebar"] {
50
+ background-image: url("https://i.pinimg.com/originals/cb/74/8b/cb748be384b8ccc3e757fceb3820f9d4.jpg");
51
+ background-size: 220%;
52
+ background-position: center top;
53
+ }
54
+
55
+ [data-testid="stSidebar"]::before {
56
+ background-image: url("https://i.pinimg.com/originals/cb/74/8b/cb748be384b8ccc3e757fceb3820f9d4.jpg");
57
+ background-size: 220%;
58
+ background-position: center top;
59
+ content: "";
60
+ position: absolute;
61
+ top: 0;
62
+ left: 0;
63
+ right: 0;
64
+ bottom: 0;
65
+ background: rgba(0, 0, 0, 0.4);
66
+ pointer-events: none;
67
+ }
68
+ </style>
69
+ """
70
+
71
+ # Streamlit app layout
72
+ st.markdown(page_bg_img, unsafe_allow_html=True)
73
+ st.title("What's on your mind today?")
74
+
75
+ # Define the desired navy blue color in hex code
76
+ navy_blue = "#edf7fc"
77
+
78
+ st.sidebar.markdown("")
79
+ st.sidebar.markdown(f"""<h1 style="color: {navy_blue}; ">Feel Ashley like your BestFriend!. she will support you and helps you!</h1>""", unsafe_allow_html=True)
80
+
81
+ if "messages" not in st.session_state:
82
+ st.session_state["messages"] = [
83
+ {"role": "assistant", "content": "Hi there! I'm Ashley, your best friend. How can I support you today?"}
84
+ ]
85
+
86
+ # Display previous messages
87
+ for msg in st.session_state.messages:
88
+ st.chat_message(msg["role"]).write(msg["content"])
89
+
90
+ # Chat input and processing
91
+ if prompt := st.chat_input():
92
+ # Append user message to the session state
93
+ st.session_state.messages.append({"role": "user", "content": prompt})
94
+ st.chat_message("user").write(prompt)
95
+
96
+ # Sentiment Analysis
97
+ user_sentiment = TextBlob(prompt).sentiment.polarity
98
+
99
+ # Craft System Prompt based on sentiment
100
+ system_prompt = SYSTEM_PROMPT_GENERAL
101
+ if user_sentiment < 0: # User expresses negative sentiment
102
+ system_prompt = f"""{system_prompt}
103
+ The user seems to be feeling down. Prioritize empathetic responses and open-ended questions."""
104
+
105
+ # Format prompt using LangChain's PromptTemplate
106
+ formatted_prompt = prompt_template.format(
107
+ system_prompt=system_prompt,
108
+ user_input=prompt
109
+ )
110
+
111
+ # Generate a response using Hugging Face API
112
+ response = ""
113
+ for message in client.chat_completion(
114
+ messages=[{"role": "user", "content": formatted_prompt}],
115
+ max_tokens=500,
116
+ stream=True,
117
+ ):
118
+ response += message.choices[0].delta.content
119
+
120
+ # Append assistant message to the session state
121
+ st.session_state.messages.append({"role": "assistant", "content": response.strip()})
122
+ st.chat_message("assistant").write(response.strip())