haohsiang commited on
Commit
23ce61e
·
1 Parent(s): 5e5d77e

use gr.State() to define global variables

Browse files
Files changed (2) hide show
  1. README.md +0 -1
  2. app.py +39 -63
README.md CHANGED
@@ -8,7 +8,6 @@ colorTo: blue
8
  app_file: app.py
9
  pinned: false
10
  short_description: A self-healing ChatBot to comfort yourself.
11
- header: mini
12
  models:
13
  - stabilityai/stable-diffusion-xl-base-1.0
14
  ---
 
8
  app_file: app.py
9
  pinned: false
10
  short_description: A self-healing ChatBot to comfort yourself.
 
11
  models:
12
  - stabilityai/stable-diffusion-xl-base-1.0
13
  ---
app.py CHANGED
@@ -1,5 +1,7 @@
1
  import spaces
2
  import os
 
 
3
  import gradio as gr
4
  import torch
5
  import numpy as np
@@ -11,6 +13,7 @@ from openai import OpenAI
11
  from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, EulerAncestralDiscreteScheduler
12
  from peft import PeftModel
13
  from transformers import AutoModel
 
14
 
15
  # OpenAI API
16
  api_key = os.getenv('OPENAI_API_KEY')
@@ -27,24 +30,12 @@ lora_path = "./tbh368-sdxl.safetensors"
27
  pipe_t2i.load_lora_weights(lora_path, adapter_name="milton-glaser")
28
  pipe_t2i.set_adapters(["milton-glaser"], adapter_weights=[1.0])
29
 
30
- import jieba
31
- import numpy as np
32
- import pandas as pd
33
-
34
- valence_scores = []
35
- arousal_scores = []
36
- conversation_times = 0
37
-
38
  # 載入 CVAW Corpus 資料
39
  cvaw_data = pd.read_csv('./CVAW_all_SD.csv', delimiter='\t')
40
  cvaw_dict = dict(zip(cvaw_data['Word'], zip(cvaw_data['Valence_Mean'], cvaw_data['Arousal_Mean'])))
41
 
42
- def analyze_sentiment_corpus(text):
43
  words = jieba.cut(text)
44
- global conversation_times
45
- global valence_scores
46
- global arousal_scores
47
-
48
  conversation_times += 1
49
 
50
  for word in words:
@@ -57,11 +48,11 @@ def analyze_sentiment_corpus(text):
57
  arousal_scores = arousal_scores[-3:]
58
 
59
  if conversation_times < 4: # 當對話次數<4的時候,返回10,代表不會進入放鬆模式
60
- return 10, 10
61
  else:
62
  avg_valence = np.mean(valence_scores)
63
  avg_arousal = np.mean(arousal_scores)
64
- return avg_valence, avg_arousal
65
 
66
  def call_gpt(input_text, history):
67
  messages = [{"role":"system", "content":"對話請以繁體中文進行:你是一位熟悉現象學的諮商實習生,擅長引導使用者描述他當下的所知覺到的事物。回答問題的時候必須有同理心,請同理使用者說的內容,再繼續回答,且不要超過20個字。"}]
@@ -80,33 +71,29 @@ def call_gpt(input_text, history):
80
 
81
  return chat_reply.choices[0].message.content
82
 
83
- # Testing meditation function
84
- meditation_flag = True # 判斷是否進入放鬆模式
85
-
86
- def chat_with_bot(input_text, history):
87
- global meditation_flag
88
  response = ""
89
  med_confirm_layout = False # 是否顯示放鬆選項
90
  jump2med_btn = True # 是否允許跳轉到放鬆介面
91
 
92
  # 進行情感分析
93
- valence, arousal = analyze_sentiment_corpus(input_text)
94
 
95
  # 判斷是否建議放鬆練習
96
  if 3 <= arousal <= 4 and meditation_flag is True: # 詢問是否進行放鬆練習
97
  response = "我知道你的處境了\n我有一個建議,我們來進行一個可以讓自己放鬆的呼吸覺察練習好嗎?"
98
  history.append((input_text, response))
99
  med_confirm_layout = True
100
- return history, med_confirm_layout, jump2med_btn
101
  elif meditation_flag is False: # 已經放鬆過,不顯示跳轉按鈕
102
  response = call_gpt(input_text, history)
103
  history.append((input_text, response))
104
  jump2med_btn = False
105
- return history, med_confirm_layout, jump2med_btn
106
  else: # 繼續對話
107
  response = call_gpt(input_text, history)
108
  history.append((input_text, response))
109
- return history, med_confirm_layout, jump2med_btn
110
 
111
  def translate_to_english(text):
112
  character = "You are a professional text to image prompt generator, please use the following text to generate prompt in English. It's very important to summarize it in 70 tokens."
@@ -120,16 +107,10 @@ def translate_to_english(text):
120
 
121
  return chat_reply.choices[0].message.content
122
 
123
- generated_images = None
124
- last_genimg_times = 0
125
-
126
  @spaces.GPU(duration=120)
127
- def generate_images(history):
128
- global generated_images # 使用全域變數來儲存圖片
129
- global last_genimg_times
130
-
131
  if generated_images is not None and last_genimg_times == conversation_times:
132
- return generated_images # 如果圖片已生成,直接返回
133
 
134
  user_story = " ".join([h[0] for h in history])
135
  prompt = translate_to_english(user_story)
@@ -152,7 +133,7 @@ def generate_images(history):
152
  images.append(img)
153
 
154
  generated_images = images # 儲存生成的圖片
155
- return images
156
 
157
  def select_image(choice, img1, img2, img3, img4):
158
  index = int(choice.split()[-1]) - 1
@@ -190,8 +171,8 @@ def chat_about_image(input_text, history, selected_image):
190
  audio_file = "./meditation_v2.mp3"
191
 
192
  # UI handle functions
193
- def handle_chat(input_text, history):
194
- updated_history, meditation, jump2med_btn = chat_with_bot(input_text, history)
195
  if meditation:
196
  return (
197
  updated_history,
@@ -199,7 +180,8 @@ def handle_chat(input_text, history):
199
  gr.update(visible=False), # submit
200
  gr.update(visible=False), # jump_to_med
201
  gr.update(visible=True), # meditation_buttons
202
- gr.update(visible=False) # clear
 
203
  )
204
  elif jump2med_btn is False:
205
  return (
@@ -208,7 +190,8 @@ def handle_chat(input_text, history):
208
  gr.update(visible=True), # submit
209
  gr.update(visible=False), # jump_to_med
210
  gr.update(visible=False), # meditation_buttons
211
- gr.update(visible=True) # clear
 
212
  )
213
  else:
214
  return (
@@ -217,15 +200,14 @@ def handle_chat(input_text, history):
217
  gr.update(visible=True), # submit
218
  gr.update(visible=True), # jump_to_med
219
  gr.update(visible=False), # meditation_buttons
220
- gr.update(visible=True) # clear
 
221
  )
222
 
223
- def start_meditation(history):
224
- global meditation_flag
225
  meditation_flag = False
226
- audio = audio_file
227
  return (
228
- audio
229
  )
230
 
231
  def continue_chat():
@@ -252,8 +234,6 @@ def return_to_chat():
252
  gr.update(visible=False) # jump_to_med
253
  )
254
 
255
- import asyncio
256
-
257
  async def show_loading():
258
  # 顯示載入訊息
259
  yield (gr.update(visible=True), # loading_message
@@ -268,7 +248,6 @@ async def show_loading():
268
  gr.update(visible=False), # main_interface
269
  gr.update(visible=True)) # audio_interface
270
 
271
- # Testing meditation function
272
  theme = gr.themes.Base(
273
  primary_hue="amber",
274
  secondary_hue="sky",
@@ -303,6 +282,13 @@ css = """
303
  """
304
 
305
  with gr.Blocks(theme=theme, css=css) as demo:
 
 
 
 
 
 
 
306
  loading_message = gr.Textbox(visible=False, show_label=False)
307
  with gr.Column(visible=False) as audio_interface:
308
  audio_player = gr.Audio(label="呼吸覺察練習", show_download_button=False, interactive=False)
@@ -314,8 +300,8 @@ with gr.Blocks(theme=theme, css=css) as demo:
314
  msg = gr.Textbox(show_label=False, placeholder="今天想要跟我分享什麼呢?", autofocus=True, scale=2)
315
  with gr.Row():
316
  submit = gr.Button("送出", variant="primary", scale=2)
317
- jump_to_med = gr.Button("我想要現在進行呼吸覺察練習", scale=2)
318
- gen_other_img = gr.Button("結合聯想生成更多圖像", scale=2, visible=False)
319
  clear = gr.Button("清除對話紀錄", scale=1)
320
  with gr.Row(visible=False) as meditation_buttons:
321
  relax_yes = gr.Button("好", variant="primary")
@@ -329,19 +315,19 @@ with gr.Blocks(theme=theme, css=css) as demo:
329
  image_chat_button = gr.Button("與聊天機器人分享", variant="primary")
330
 
331
  # chatbot events handle
332
- submit.click(handle_chat, [msg, chatbot], [chatbot, msg, submit, jump_to_med, meditation_buttons, clear])
333
- msg.submit(handle_chat, [msg, chatbot], [chatbot, msg, submit, jump_to_med, meditation_buttons, clear])
334
  clear.click(lambda: None, None, chatbot, queue=False)
335
 
336
  # going to meditation events handle
337
- jump_to_med.click(start_meditation, [chatbot], audio_player)
338
- relax_yes.click(start_meditation, [chatbot], audio_player)
339
  relax_no.click(continue_chat, None, [msg, submit, jump_to_med, meditation_buttons, clear])
340
 
341
  # meditation events handle
342
  jump_to_med.click(show_loading, None, [loading_message, loading_message, main_interface, audio_interface])
343
  relax_yes.click(show_loading, None, [loading_message, loading_message, main_interface, audio_interface])
344
- audio_player.play(generate_images, [chatbot], image_outputs)
345
  audio_player.stop(return_to_chat, None, [audio_player, main_interface, chatbot_interface, selected_image_interface, audio_interface, msg, submit, clear, meditation_buttons, gen_other_img, jump_to_med])
346
  back_to_chat.click(return_to_chat, None, [audio_player, main_interface, chatbot_interface, selected_image_interface, audio_interface, msg, submit, clear, meditation_buttons, gen_other_img, jump_to_med])
347
 
@@ -351,18 +337,8 @@ with gr.Blocks(theme=theme, css=css) as demo:
351
  image_chat_button.click(chat_about_image, [image_chat_input, chatbot, selected_image], [chatbot, chatbot, chatbot_interface]).then(lambda: None, None, image_chat_input , queue=False)
352
 
353
  # generate other images event handle
354
- gen_other_img.click(generate_images, [chatbot], image_outputs)
355
 
356
  if __name__ == "__main__":
357
- # initialize the variables
358
- torch.cuda.empty_cache()
359
- generated_images = None
360
- meditation_flag = True
361
- last_genimg_times = 0
362
- conversation_times = 0
363
- valence_scores = []
364
- arousal_scores = []
365
- print("variables initialized")
366
-
367
  demo.queue(max_size=20)
368
  demo.launch(show_api=False)
 
1
  import spaces
2
  import os
3
+ import jieba
4
+ import pandas as pd
5
  import gradio as gr
6
  import torch
7
  import numpy as np
 
13
  from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, EulerAncestralDiscreteScheduler
14
  from peft import PeftModel
15
  from transformers import AutoModel
16
+ import asyncio
17
 
18
  # OpenAI API
19
  api_key = os.getenv('OPENAI_API_KEY')
 
30
  pipe_t2i.load_lora_weights(lora_path, adapter_name="milton-glaser")
31
  pipe_t2i.set_adapters(["milton-glaser"], adapter_weights=[1.0])
32
 
 
 
 
 
 
 
 
 
33
  # 載入 CVAW Corpus 資料
34
  cvaw_data = pd.read_csv('./CVAW_all_SD.csv', delimiter='\t')
35
  cvaw_dict = dict(zip(cvaw_data['Word'], zip(cvaw_data['Valence_Mean'], cvaw_data['Arousal_Mean'])))
36
 
37
+ def analyze_sentiment_corpus(text, conversation_times, valence_scores, arousal_scores):
38
  words = jieba.cut(text)
 
 
 
 
39
  conversation_times += 1
40
 
41
  for word in words:
 
48
  arousal_scores = arousal_scores[-3:]
49
 
50
  if conversation_times < 4: # 當對話次數<4的時候,返回10,代表不會進入放鬆模式
51
+ return 10, 10, conversation_times, valence_scores, arousal_scores
52
  else:
53
  avg_valence = np.mean(valence_scores)
54
  avg_arousal = np.mean(arousal_scores)
55
+ return avg_valence, avg_arousal, conversation_times, valence_scores, arousal_scores
56
 
57
  def call_gpt(input_text, history):
58
  messages = [{"role":"system", "content":"對話請以繁體中文進行:你是一位熟悉現象學的諮商實習生,擅長引導使用者描述他當下的所知覺到的事物。回答問題的時候必須有同理心,請同理使用者說的內容,再繼續回答,且不要超過20個字。"}]
 
71
 
72
  return chat_reply.choices[0].message.content
73
 
74
+ def chat_with_bot(input_text, history, conversation_times, valence_scores, arousal_scores, meditation_flag):
 
 
 
 
75
  response = ""
76
  med_confirm_layout = False # 是否顯示放鬆選項
77
  jump2med_btn = True # 是否允許跳轉到放鬆介面
78
 
79
  # 進行情感分析
80
+ valence, arousal, conversation_times, valence_scores, arousal_scores = analyze_sentiment_corpus(input_text, conversation_times, valence_scores, arousal_scores)
81
 
82
  # 判斷是否建議放鬆練習
83
  if 3 <= arousal <= 4 and meditation_flag is True: # 詢問是否進行放鬆練習
84
  response = "我知道你的處境了\n我有一個建議,我們來進行一個可以讓自己放鬆的呼吸覺察練習好嗎?"
85
  history.append((input_text, response))
86
  med_confirm_layout = True
87
+ return history, med_confirm_layout, jump2med_btn, conversation_times, valence_scores, arousal_scores, meditation_flag
88
  elif meditation_flag is False: # 已經放鬆過,不顯示跳轉按鈕
89
  response = call_gpt(input_text, history)
90
  history.append((input_text, response))
91
  jump2med_btn = False
92
+ return history, med_confirm_layout, jump2med_btn, conversation_times, valence_scores, arousal_scores, meditation_flag
93
  else: # 繼續對話
94
  response = call_gpt(input_text, history)
95
  history.append((input_text, response))
96
+ return history, med_confirm_layout, jump2med_btn, conversation_times, valence_scores, arousal_scores, meditation_flag
97
 
98
  def translate_to_english(text):
99
  character = "You are a professional text to image prompt generator, please use the following text to generate prompt in English. It's very important to summarize it in 70 tokens."
 
107
 
108
  return chat_reply.choices[0].message.content
109
 
 
 
 
110
  @spaces.GPU(duration=120)
111
+ def generate_images(history, conversation_times, last_genimg_times, generated_images):
 
 
 
112
  if generated_images is not None and last_genimg_times == conversation_times:
113
+ return conversation_times, last_genimg_times, *generated_images # 如果圖片已生成,直接返回
114
 
115
  user_story = " ".join([h[0] for h in history])
116
  prompt = translate_to_english(user_story)
 
133
  images.append(img)
134
 
135
  generated_images = images # 儲存生成的圖片
136
+ return conversation_times, last_genimg_times, *images
137
 
138
  def select_image(choice, img1, img2, img3, img4):
139
  index = int(choice.split()[-1]) - 1
 
171
  audio_file = "./meditation_v2.mp3"
172
 
173
  # UI handle functions
174
+ def handle_chat(input_text, history, conversation_times, valence_scores, arousal_scores, meditation_flag):
175
+ updated_history, meditation, jump2med_btn, conversation_times, valence_scores, arousal_scores, meditation_flag = chat_with_bot(input_text, history, conversation_times, valence_scores, arousal_scores, meditation_flag)
176
  if meditation:
177
  return (
178
  updated_history,
 
180
  gr.update(visible=False), # submit
181
  gr.update(visible=False), # jump_to_med
182
  gr.update(visible=True), # meditation_buttons
183
+ gr.update(visible=False), # clear
184
+ conversation_times, valence_scores, arousal_scores, meditation_flag
185
  )
186
  elif jump2med_btn is False:
187
  return (
 
190
  gr.update(visible=True), # submit
191
  gr.update(visible=False), # jump_to_med
192
  gr.update(visible=False), # meditation_buttons
193
+ gr.update(visible=True), # clear
194
+ conversation_times, valence_scores, arousal_scores, meditation_flag
195
  )
196
  else:
197
  return (
 
200
  gr.update(visible=True), # submit
201
  gr.update(visible=True), # jump_to_med
202
  gr.update(visible=False), # meditation_buttons
203
+ gr.update(visible=True), # clear
204
+ conversation_times, valence_scores, arousal_scores, meditation_flag
205
  )
206
 
207
+ def start_meditation(meditation_flag):
 
208
  meditation_flag = False
 
209
  return (
210
+ audio_file, meditation_flag
211
  )
212
 
213
  def continue_chat():
 
234
  gr.update(visible=False) # jump_to_med
235
  )
236
 
 
 
237
  async def show_loading():
238
  # 顯示載入訊息
239
  yield (gr.update(visible=True), # loading_message
 
248
  gr.update(visible=False), # main_interface
249
  gr.update(visible=True)) # audio_interface
250
 
 
251
  theme = gr.themes.Base(
252
  primary_hue="amber",
253
  secondary_hue="sky",
 
282
  """
283
 
284
  with gr.Blocks(theme=theme, css=css) as demo:
285
+ generated_images = gr.State(value=None)
286
+ meditation_flag = gr.State(value=True)
287
+ last_genimg_times = gr.State(value=0)
288
+ conversation_times = gr.State(value=0)
289
+ valence_scores = gr.State(value=[])
290
+ arousal_scores = gr.State(value=[])
291
+
292
  loading_message = gr.Textbox(visible=False, show_label=False)
293
  with gr.Column(visible=False) as audio_interface:
294
  audio_player = gr.Audio(label="呼吸覺察練習", show_download_button=False, interactive=False)
 
300
  msg = gr.Textbox(show_label=False, placeholder="今天想要跟我分享什麼呢?", autofocus=True, scale=2)
301
  with gr.Row():
302
  submit = gr.Button("送出", variant="primary", scale=2)
303
+ jump_to_med = gr.Button("我想要現在進行呼吸覺察練習", variant="secondary", scale=2)
304
+ gen_other_img = gr.Button("結合聯想生成更多圖像", variant="secondary", scale=2, visible=False)
305
  clear = gr.Button("清除對話紀錄", scale=1)
306
  with gr.Row(visible=False) as meditation_buttons:
307
  relax_yes = gr.Button("好", variant="primary")
 
315
  image_chat_button = gr.Button("與聊天機器人分享", variant="primary")
316
 
317
  # chatbot events handle
318
+ submit.click(handle_chat, [msg, chatbot, conversation_times, valence_scores, arousal_scores, meditation_flag], [chatbot, msg, submit, jump_to_med, meditation_buttons, clear, conversation_times, valence_scores, arousal_scores, meditation_flag])
319
+ msg.submit(handle_chat, [msg, chatbot, conversation_times, valence_scores, arousal_scores, meditation_flag], [chatbot, msg, submit, jump_to_med, meditation_buttons, clear, conversation_times, valence_scores, arousal_scores, meditation_flag])
320
  clear.click(lambda: None, None, chatbot, queue=False)
321
 
322
  # going to meditation events handle
323
+ jump_to_med.click(start_meditation, meditation_flag, [audio_player, meditation_flag])
324
+ relax_yes.click(start_meditation, meditation_flag, [audio_player, meditation_flag])
325
  relax_no.click(continue_chat, None, [msg, submit, jump_to_med, meditation_buttons, clear])
326
 
327
  # meditation events handle
328
  jump_to_med.click(show_loading, None, [loading_message, loading_message, main_interface, audio_interface])
329
  relax_yes.click(show_loading, None, [loading_message, loading_message, main_interface, audio_interface])
330
+ audio_player.play(generate_images, [chatbot, conversation_times, last_genimg_times, generated_images], [conversation_times, last_genimg_times] + image_outputs)
331
  audio_player.stop(return_to_chat, None, [audio_player, main_interface, chatbot_interface, selected_image_interface, audio_interface, msg, submit, clear, meditation_buttons, gen_other_img, jump_to_med])
332
  back_to_chat.click(return_to_chat, None, [audio_player, main_interface, chatbot_interface, selected_image_interface, audio_interface, msg, submit, clear, meditation_buttons, gen_other_img, jump_to_med])
333
 
 
337
  image_chat_button.click(chat_about_image, [image_chat_input, chatbot, selected_image], [chatbot, chatbot, chatbot_interface]).then(lambda: None, None, image_chat_input , queue=False)
338
 
339
  # generate other images event handle
340
+ gen_other_img.click(generate_images, [chatbot, conversation_times, last_genimg_times, generated_images], [conversation_times, last_genimg_times] + image_outputs)
341
 
342
  if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
343
  demo.queue(max_size=20)
344
  demo.launch(show_api=False)