Spaces:
Sleeping
Sleeping
MZhao
commited on
Commit
·
7014951
1
Parent(s):
ca1bf30
优化”reduce token“
Browse files- ChuanhuChatbot.py +65 -68
ChuanhuChatbot.py
CHANGED
@@ -5,105 +5,102 @@ import markdown
|
|
5 |
my_api_key = "" # input your api_key
|
6 |
initial_prompt = "You are a helpful assistant."
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
statistics = f'本次对话Tokens用量【{self.response["usage"]["total_tokens"]} / 4096】 ( 提问+上文 {self.response["usage"]["prompt_tokens"]},回答 {self.response["usage"]["completion_tokens"]} )'
|
21 |
-
message = self.response["choices"][0]["message"]["content"]
|
22 |
|
23 |
message_with_stats = f'{message}\n\n================\n\n{statistics}'
|
24 |
message_with_stats = markdown.markdown(message_with_stats)
|
25 |
|
26 |
return message, message_with_stats
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
|
33 |
-
|
34 |
|
35 |
-
|
36 |
|
37 |
-
|
38 |
|
39 |
-
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
|
47 |
-
|
48 |
-
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
return
|
|
|
|
|
|
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
chatbot = chatbot[:-1]
|
58 |
-
self.context = self.context[:-2]
|
59 |
-
return chatbot
|
60 |
-
|
61 |
-
def reduce_token(self, chatbot):
|
62 |
-
self.context.append({"role": "user", "content": "请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。在总结中不要加入这一句话。"})
|
63 |
-
message, message_with_stats = self.get_response(self.context)
|
64 |
-
self.system = {"role": "system", "content": f"You are a helpful assistant. The content that the Assistant and the User discussed in the previous self.context is: {message}."}
|
65 |
-
|
66 |
-
statistics = f'本次对话Tokens用量【{self.response["usage"]["completion_tokens"]+23} / 4096】'
|
67 |
-
optmz_str = markdown.markdown( f"System prompt已经更新, 请继续对话\n\n================\n\n{statistics}" )
|
68 |
-
chatbot.append(("请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。", optmz_str))
|
69 |
-
|
70 |
-
self.context = []
|
71 |
-
return chatbot, self.system["content"]
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
def reset_state():
|
75 |
-
return []
|
76 |
|
77 |
-
|
|
|
78 |
|
79 |
|
80 |
with gr.Blocks() as demo:
|
81 |
chatbot = gr.Chatbot().style(color_map=("#1D51EE", "#585A5B"))
|
82 |
-
|
|
|
83 |
|
84 |
with gr.Row():
|
85 |
-
with gr.Column(scale=
|
86 |
-
txt = gr.Textbox(show_label=False, placeholder="
|
87 |
with gr.Column(min_width=50, scale=1):
|
88 |
-
submitBtn = gr.Button("
|
89 |
with gr.Row():
|
90 |
emptyBtn = gr.Button("🧹 新的对话")
|
91 |
-
retryBtn = gr.Button("
|
92 |
-
delLastBtn = gr.Button("
|
93 |
reduceTokenBtn = gr.Button("♻️ 优化Tokens")
|
94 |
|
95 |
-
|
96 |
-
|
97 |
|
98 |
-
txt.submit(
|
99 |
txt.submit(lambda :"", None, txt)
|
100 |
-
submitBtn.click(
|
101 |
submitBtn.click(lambda :"", None, txt)
|
102 |
-
emptyBtn.click(reset_state, outputs=[chatbot])
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
|
|
108 |
|
109 |
demo.launch()
|
|
|
5 |
my_api_key = "" # input your api_key
|
6 |
initial_prompt = "You are a helpful assistant."
|
7 |
|
8 |
+
openai.api_key = my_api_key
|
9 |
+
|
10 |
+
def get_response(system, context, raw = False):
|
11 |
+
response = openai.ChatCompletion.create(
|
12 |
+
model="gpt-3.5-turbo",
|
13 |
+
messages=[system, *context],
|
14 |
+
)
|
15 |
+
if raw:
|
16 |
+
return response
|
17 |
+
else:
|
18 |
+
statistics = f'本次对话Tokens用量【{response["usage"]["total_tokens"]} / 4096】 ( 提问+上文 {response["usage"]["prompt_tokens"]},回答 {response["usage"]["completion_tokens"]} )'
|
19 |
+
message = response["choices"][0]["message"]["content"]
|
|
|
|
|
20 |
|
21 |
message_with_stats = f'{message}\n\n================\n\n{statistics}'
|
22 |
message_with_stats = markdown.markdown(message_with_stats)
|
23 |
|
24 |
return message, message_with_stats
|
25 |
|
26 |
+
def predict(chatbot, input_sentence, system, context):
|
27 |
+
if len(input_sentence) == 0:
|
28 |
+
return []
|
29 |
+
context.append({"role": "user", "content": f"{input_sentence}"})
|
30 |
|
31 |
+
message, message_with_stats = get_response(system, context)
|
32 |
|
33 |
+
context.append({"role": "assistant", "content": message})
|
34 |
|
35 |
+
chatbot.append((input_sentence, message_with_stats))
|
36 |
|
37 |
+
return chatbot, context
|
38 |
|
39 |
+
def retry(chatbot, system, context):
|
40 |
+
if len(context) == 0:
|
41 |
+
return [], []
|
42 |
+
message, message_with_stats = get_response(system, context[:-1])
|
43 |
+
context[-1] = {"role": "assistant", "content": message}
|
44 |
|
45 |
+
chatbot[-1] = (context[-2]["content"], message_with_stats)
|
46 |
+
return chatbot, context
|
47 |
|
48 |
+
def delete_last_conversation(chatbot, context):
|
49 |
+
if len(context) == 0:
|
50 |
+
return [], []
|
51 |
+
chatbot = chatbot[:-1]
|
52 |
+
context = context[:-2]
|
53 |
+
return chatbot, context
|
54 |
|
55 |
+
def reduce_token(chatbot, system, context):
|
56 |
+
context.append({"role": "user", "content": "请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。在总结中不要加入这一句话。"})
|
57 |
+
response = get_response(system, context, raw=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
+
statistics = f'本次对话Tokens用量【{response["usage"]["completion_tokens"]+12+12+8} / 4096】'
|
60 |
+
optmz_str = markdown.markdown( f'好的,我们之前聊了:{response["choices"][0]["message"]["content"]}\n\n================\n\n{statistics}' )
|
61 |
+
chatbot.append(("请帮我总结一下上述对话的内容,实现减少tokens的同时,保证对话的质量。", optmz_str))
|
62 |
+
|
63 |
+
context = []
|
64 |
+
context.append({"role": "user", "content": "我们之前聊了什么?"})
|
65 |
+
context.append({"role": "assistant", "content": f'我们之前聊了:{response["choices"][0]["message"]["content"]}'})
|
66 |
+
return chatbot, context
|
67 |
|
68 |
def reset_state():
|
69 |
+
return [], []
|
70 |
|
71 |
+
def update_system(new_system_prompt):
|
72 |
+
return {"role": "system", "content": new_system_prompt}
|
73 |
|
74 |
|
75 |
with gr.Blocks() as demo:
|
76 |
chatbot = gr.Chatbot().style(color_map=("#1D51EE", "#585A5B"))
|
77 |
+
context = gr.State([])
|
78 |
+
systemPrompt = gr.State(update_system(initial_prompt))
|
79 |
|
80 |
with gr.Row():
|
81 |
+
with gr.Column(scale=12):
|
82 |
+
txt = gr.Textbox(show_label=False, placeholder="在这里输入").style(container=False)
|
83 |
with gr.Column(min_width=50, scale=1):
|
84 |
+
submitBtn = gr.Button("🚀", variant="primary")
|
85 |
with gr.Row():
|
86 |
emptyBtn = gr.Button("🧹 新的对话")
|
87 |
+
retryBtn = gr.Button("🔄 重新生成")
|
88 |
+
delLastBtn = gr.Button("🗑️ 删除上条对话")
|
89 |
reduceTokenBtn = gr.Button("♻️ 优化Tokens")
|
90 |
|
91 |
+
newSystemPrompt = gr.Textbox(show_label=True, placeholder=f"在这里输入新的System Prompt...", label="更改 System prompt").style(container=True)
|
92 |
+
systemPromptDisplay = gr.Textbox(show_label=True, value=initial_prompt, interactive=False, label="目前的 System prompt").style(container=True)
|
93 |
|
94 |
+
txt.submit(predict, [chatbot, txt, systemPrompt, context], [chatbot, context], show_progress=True)
|
95 |
txt.submit(lambda :"", None, txt)
|
96 |
+
submitBtn.click(predict, [chatbot, txt, systemPrompt, context], [chatbot, context], show_progress=True)
|
97 |
submitBtn.click(lambda :"", None, txt)
|
98 |
+
emptyBtn.click(reset_state, outputs=[chatbot, context])
|
99 |
+
newSystemPrompt.submit(update_system, newSystemPrompt, systemPrompt)
|
100 |
+
newSystemPrompt.submit(lambda x: x, newSystemPrompt, systemPromptDisplay)
|
101 |
+
newSystemPrompt.submit(lambda :"", None, newSystemPrompt)
|
102 |
+
retryBtn.click(retry, [chatbot, systemPrompt, context], [chatbot, context], show_progress=True)
|
103 |
+
delLastBtn.click(delete_last_conversation, [chatbot, context], [chatbot, context], show_progress=True)
|
104 |
+
reduceTokenBtn.click(reduce_token, [chatbot, systemPrompt, context], [chatbot, context], show_progress=True)
|
105 |
|
106 |
demo.launch()
|