karar-shah commited on
Commit
38bf7e6
Β·
1 Parent(s): 4cf38db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +180 -56
app.py CHANGED
@@ -1,32 +1,54 @@
1
- # in new conda env install below pakages
2
- # pip install tensorflow==2.13.0
3
- # pip install -U "tensorflow-text==2.13.*"
4
- # pip install -q streamlit==1.26.0
5
- # pip install openai==0.28.0
6
  import os
 
7
  import openai
 
8
  import streamlit as st
9
  import tensorflow as tf
10
  import tensorflow_text
11
- import numpy as np
 
 
 
 
 
 
 
 
12
 
13
- ###############################################
14
- # Setting up styles for app
15
- ###############################################
16
  # Set page title and icon
17
- # st.set_page_config(page_title="Bard ChatBot",
18
- # page_icon=":robot_face:",
19
- # initial_sidebar_state="collapsed",)
 
 
 
 
 
20
 
21
- # # Custom css styles
22
- # with open('style.css') as f:
23
- # st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
24
 
 
25
 
26
- # st.title("ChatGPT-like clone")
27
 
28
- openai.api_key = os.environ['API_TOKEN']
29
- reloaded_model = tf.saved_model.load('one_2')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  emotion_categories = {
32
  0: 'anger',
@@ -38,63 +60,165 @@ emotion_categories = {
38
  6: 'surprise'
39
  }
40
 
41
- if "openai_model" not in st.session_state:
42
- st.session_state["openai_model"] = "gpt-3.5-turbo"
43
 
44
- if "messages" not in st.session_state:
45
- st.session_state.messages = []
46
 
47
- # col1, col2 = st.columns([4, 3])
 
 
48
 
49
- # with col1:
50
- # st.markdown('helow')
51
- # for message in st.session_state.messages:
52
- # with st.chat_message(message["role"]):
53
- # st.markdown(message["content"])
54
 
 
 
 
 
55
 
56
- # with col2:
57
- # a = st.session_state.messages[-1]['content']
58
- # st.markdown(len(st.session_state.messages) > 1)
59
- # if len(st.session_state.messages) != 0:
60
- # st.markdown('prediction:')
61
- # st.markdown(st.session_state.messages[-2])
62
- # q = st.session_state.messages[-2]['content']
63
- # emotion = reloaded_model([q])
64
- # true_classes = np.argmax(emotion, axis=1)
65
- # emotion_category = emotion_categories.get(int(true_classes))
66
- # st.markdown(emotion_category)
67
 
68
- st.markdown('Chat Bot')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  if prompt := st.chat_input("What is up?"):
70
- st.session_state.messages.append({"role": "user", "content": prompt})
71
  # USER
72
  with st.chat_message("user"):
73
  st.markdown(prompt)
 
 
 
 
 
 
 
 
 
 
 
74
 
75
  # EMOTION
76
- with st.chat_message("Emotion", avatar='😢'):
77
- emotion = reloaded_model([prompt])
78
- true_classes = np.argmax(emotion, axis=1)
79
- emotion_category = emotion_categories.get(int(true_classes))
80
- st.write("Emotion: {}".format(emotion_category))
81
 
82
  # AI BOT
83
  with st.chat_message("assistant"):
84
  message_placeholder = st.empty()
85
  full_response = ""
 
 
86
  for response in openai.ChatCompletion.create(
87
- model=st.session_state["openai_model"],
88
- messages=[
89
- {"role": m["role"], "content": m["content"]}
90
- for m in st.session_state.messages
91
- ],
92
- stream=True,
93
- ):
 
 
94
  full_response += response.choices[0].delta.get("content", "")
95
-
96
  message_placeholder.markdown(full_response + "β–Œ")
97
-
98
  message_placeholder.markdown(full_response)
99
- st.session_state.messages.append(
 
 
100
  {"role": "assistant", "content": full_response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import time
3
  import openai
4
+ import numpy as np
5
  import streamlit as st
6
  import tensorflow as tf
7
  import tensorflow_text
8
+ # import plotly.graph_objects as go
9
+ # from dotenv import load_dotenv
10
+ from langchain.llms import OpenAI
11
+ from langchain.memory import ConversationSummaryMemory, ChatMessageHistory
12
+ from llm import sys_instruction
13
+
14
+
15
+ ##############
16
+ # PAGE STYLES
17
 
 
 
 
18
  # Set page title and icon
19
+ st.set_page_config(page_title="EmoInsight",
20
+ page_icon=":robot_face:",
21
+ initial_sidebar_state="expanded",)
22
+
23
+ # Custom css styles
24
+ with open('style.css') as f:
25
+ st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
26
+
27
 
28
+ # Load variables from .env file
29
+ # load_dotenv()
 
30
 
31
+ # Load large model
32
 
 
33
 
34
+ @st.cache_resource # Decorator to cache non-data objects
35
+ def Loading_sentiment_analysis_model():
36
+ model = tf.saved_model.load('one_2')
37
+ return model
38
+
39
+
40
+ senti_model = Loading_sentiment_analysis_model()
41
+
42
+
43
+ emoji_mapping = {
44
+ "sadness": "😒",
45
+ "neutral": "😐",
46
+ "joy": "πŸ˜„",
47
+ "anger": "😑",
48
+ "fear": "😨",
49
+ "love": "❀️",
50
+ "surprise": "😲",
51
+ }
52
 
53
  emotion_categories = {
54
  0: 'anger',
 
60
  6: 'surprise'
61
  }
62
 
 
 
63
 
64
+ ##################
65
+ # STATE VARIABLES
66
 
67
+ # set api key
68
+ if 'key' not in st.session_state:
69
+ st.session_state.key = os.environ["API_TOKEN"]
70
 
71
+ openai.api_key = st.session_state.key
 
 
 
 
72
 
73
+ # gpt llm
74
+ if 'llm' not in st.session_state:
75
+ st.session_state.llm = OpenAI(
76
+ temperature=0.2, openai_api_key=st.session_state.key)
77
 
78
+ # model name
79
+ if "openai_model" not in st.session_state:
80
+ st.session_state["openai_model"] = "gpt-3.5-turbo"
 
 
 
 
 
 
 
 
81
 
82
+ # st chat history
83
+ if "message_history" not in st.session_state:
84
+ st.session_state.message_history = []
85
+
86
+ # set instruction for gpt response
87
+ if 'sys_inst' not in st.session_state:
88
+ st.session_state.sys_inst = sys_instruction()
89
+
90
+ # dict to store user question emotion
91
+ if 'emotion_counts' not in st.session_state:
92
+ st.session_state.emotion_counts = {
93
+ 'anger': 0,
94
+ 'fear': 0,
95
+ 'joy': 0,
96
+ 'love': 0,
97
+ 'neutral': 0,
98
+ 'sadness': 0,
99
+ 'surprise': 0
100
+ }
101
+
102
+
103
+ #######################
104
+ # LANG-CHAIN VARIABLES
105
+
106
+ # storing chat history
107
+ if 'old_summary' not in st.session_state:
108
+ st.session_state.old_summary = 'User came to psychological assistant chatbot'
109
+
110
+ # langChian msg history
111
+ if 'lg_msg_history' not in st.session_state:
112
+ st.session_state.lg_msg_history = ChatMessageHistory()
113
+
114
+ # summarize old conversation
115
+ if 'memory' not in st.session_state:
116
+ st.session_state.memory = ConversationSummaryMemory.from_messages(
117
+ llm=st.session_state.llm,
118
+ buffer=st.session_state.old_summary,
119
+ return_messages=True,
120
+ chat_memory=st.session_state.lg_msg_history)
121
+
122
+
123
+ #############################################
124
+ # MAIN APP #
125
+ #############################################
126
+ st.sidebar.markdown('')
127
+ st.sidebar.markdown('')
128
+ st.sidebar.markdown('')
129
+ st.sidebar.success("Select `Sentiment Plot` button to see the Emotino Graph")
130
+ st.sidebar.markdown('')
131
+ clear_chats = st.sidebar.button('Clear Chat')
132
+
133
+ if clear_chats:
134
+ st.session_state.lg_msg_history.clear()
135
+ st.session_state.old_summary = 'User came to psychological assistant chatbot'
136
+ st.session_state.message_history = []
137
+ alert = st.sidebar.warning('Chat cleared', icon='🚨')
138
+ time.sleep(2) # Wait for 3 seconds
139
+ alert.empty() # Clear the alert
140
+
141
+
142
+ st.markdown("<h1><center>EmoInsight</center></h1>",
143
+ unsafe_allow_html=True)
144
+
145
+
146
+ # greetings
147
+ if len(st.session_state.message_history) == 0:
148
+ # add to st history
149
+ st.session_state.message_history.append(
150
+ {"role": "assistant", "content": "How can I help you?"})
151
+ # add to lg history
152
+ # st.session_state.lg_msg_history.add_ai_message("How can I help you?")
153
+
154
+ # HISTORY
155
+ for message in st.session_state.message_history:
156
+ if message['role'] == 'system':
157
+ with st.chat_message("Emotion", avatar=emoji_mapping.get(message["content"])):
158
+ a = "Sentiment: {}".format(message["content"])
159
+ st.markdown(a)
160
+ else:
161
+ with st.chat_message(message["role"]):
162
+ st.markdown(message["content"])
163
+
164
+ # CHAT BOT
165
  if prompt := st.chat_input("What is up?"):
 
166
  # USER
167
  with st.chat_message("user"):
168
  st.markdown(prompt)
169
+ # add to st history
170
+ st.session_state.message_history.append(
171
+ {"role": "user", "content": prompt})
172
+ # add to lg history
173
+ st.session_state.lg_msg_history.add_user_message(prompt)
174
+
175
+ # SENTIMENT PREDICION
176
+ emotion = senti_model([prompt])
177
+ true_classes = np.argmax(emotion, axis=1)
178
+ emotion_category = emotion_categories.get(int(true_classes))
179
+ st.session_state.emotion_counts[emotion_category] += 1
180
 
181
  # EMOTION
182
+ with st.chat_message("Emotion", avatar=emoji_mapping.get(emotion_category)):
183
+ st.write("Sentiment: {}".format(emotion_category))
184
+ st.session_state.message_history.append(
185
+ {"role": "system", "content": emotion_category})
 
186
 
187
  # AI BOT
188
  with st.chat_message("assistant"):
189
  message_placeholder = st.empty()
190
  full_response = ""
191
+
192
+ # get response
193
  for response in openai.ChatCompletion.create(
194
+ model=st.session_state["openai_model"],
195
+ messages=[
196
+ {"role": "system", "content": st.session_state.sys_inst.format(
197
+ history=st.session_state.old_summary)},
198
+ {"role": "user", "content": prompt}
199
+ ], # pass old chat history
200
+ stream=True):
201
+
202
+ # render gpt response in realtime
203
  full_response += response.choices[0].delta.get("content", "")
 
204
  message_placeholder.markdown(full_response + "β–Œ")
 
205
  message_placeholder.markdown(full_response)
206
+
207
+ # add to st history
208
+ st.session_state.message_history.append(
209
  {"role": "assistant", "content": full_response})
210
+ # add to lg history
211
+ st.session_state.lg_msg_history.add_ai_message(prompt)
212
+
213
+ # Clear old chat after 4 dialogs
214
+ # And update old summary with new summary
215
+ chat_len = len(st.session_state.lg_msg_history.messages)
216
+ if (chat_len >= 4) and (chat_len % 4 == 0):
217
+
218
+ # get new summary of chat
219
+ st.session_state.old_summary = st.session_state.memory.predict_new_summary(
220
+ messages=st.session_state.lg_msg_history.messages,
221
+ existing_summary=st.session_state.old_summary)
222
+
223
+ # flush old lg-chat history
224
+ st.session_state.lg_msg_history.clear()