Spaces:
Running
Running
hugforziio
commited on
Commit
•
eca6fb0
1
Parent(s):
338810f
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import gradio as gr
|
2 |
+
import gradio
|
3 |
+
# import lmdb
|
4 |
+
# import base64
|
5 |
+
# import io
|
6 |
+
# import random
|
7 |
+
# import time
|
8 |
+
import json
|
9 |
+
import copy
|
10 |
+
# import sqlite3
|
11 |
+
from urllib.parse import urljoin
|
12 |
+
import openai
|
13 |
+
|
14 |
+
|
15 |
+
DEFAULT_PROMPT = [
|
16 |
+
["system", "你(assistant)是一名疯狂的摇滚乐手,用户(user)是你的粉丝。"],
|
17 |
+
["user", "我们来玩一个角色扮演游戏吧!请你扮演一名疯狂的摇滚乐手,而我将扮演你的粉丝。"],
|
18 |
+
["assistant", "真是个有趣的游戏!我将扮演一名疯狂的摇滚乐手,而你是我的粉丝。听起来真不错!让我们开始吧!"],
|
19 |
+
]
|
20 |
+
|
21 |
+
|
22 |
+
# def get_settings(old_state):
|
23 |
+
# db_path = './my_app_state'
|
24 |
+
# env = lmdb.open(db_path, max_dbs=2*1024*1024)
|
25 |
+
# # print(env.stat())
|
26 |
+
# txn = env.begin()
|
27 |
+
# saved_api_key = txn.get(key=b'api_key').decode('utf-8') or ''
|
28 |
+
# txn.commit()
|
29 |
+
# env.close()
|
30 |
+
|
31 |
+
# new_state = copy.deepcopy(old_state) or {}
|
32 |
+
# new_state['api_key'] = saved_api_key
|
33 |
+
|
34 |
+
# return new_state, saved_api_key
|
35 |
+
|
36 |
+
|
37 |
+
# def save_settings(old_state, api_key_text):
|
38 |
+
# db_path = './my_app_state'
|
39 |
+
# env = lmdb.open(db_path, max_dbs=2*1024*1024)
|
40 |
+
# # print(env.stat())
|
41 |
+
# txn = env.begin(write=True)
|
42 |
+
# txn.put(key=b'api_key', value=api_key_text.encode('utf-8'))
|
43 |
+
# # 提交事务
|
44 |
+
# txn.commit()
|
45 |
+
# return get_settings(old_state)
|
46 |
+
|
47 |
+
|
48 |
+
def on_click_send_btn(
|
49 |
+
old_state, api_key_text, chat_input_role, chat_input, prompt_table, chat_use_prompt, chat_use_history, chat_log,
|
50 |
+
temperature, top_p, choices_num, stream, max_tokens, presence_penalty, frequency_penalty, logit_bias,
|
51 |
+
):
|
52 |
+
|
53 |
+
print(prompt_table)
|
54 |
+
prompt_table = prompt_table or []
|
55 |
+
|
56 |
+
chat_log = chat_log or []
|
57 |
+
|
58 |
+
chat_log_md = ''
|
59 |
+
if chat_use_prompt:
|
60 |
+
chat_log_md += '<center>(prompt)</center>\n\n'
|
61 |
+
chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)])
|
62 |
+
chat_log_md += '\n---\n'
|
63 |
+
if True:
|
64 |
+
chat_log_md += '<center>(history)</center>\n\n' if chat_use_history else '<center>(not used history)</center>\n\n'
|
65 |
+
chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", chat_log)])
|
66 |
+
chat_log_md += '\n---\n'
|
67 |
+
|
68 |
+
# if chat_input=='':
|
69 |
+
# return old_state, chat_log, chat_log_md, None, None, chat_input
|
70 |
+
|
71 |
+
try:
|
72 |
+
logit_bias_json = json.dumps(logit_bias) if logit_bias else None
|
73 |
+
except:
|
74 |
+
return old_state, chat_log, chat_log_md, None, None, chat_input
|
75 |
+
|
76 |
+
new_state = copy.deepcopy(old_state) or {}
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
req_hist = copy.deepcopy(prompt_table) if chat_use_prompt else []
|
81 |
+
|
82 |
+
if chat_use_history:
|
83 |
+
for hh in (chat_log or []):
|
84 |
+
req_hist.append(hh)
|
85 |
+
|
86 |
+
if chat_input and chat_input!="":
|
87 |
+
req_hist.append([(chat_input_role or 'user'), chat_input])
|
88 |
+
|
89 |
+
openai.api_key = api_key_text
|
90 |
+
|
91 |
+
props = {
|
92 |
+
'model': "gpt-3.5-turbo",
|
93 |
+
'messages': [xx for xx in map(lambda it: {'role':it[0], 'content':it[1]}, req_hist)],
|
94 |
+
'temperature': temperature,
|
95 |
+
'top_p': top_p,
|
96 |
+
'n': choices_num,
|
97 |
+
'stream': stream,
|
98 |
+
'presence_penalty': presence_penalty,
|
99 |
+
'frequency_penalty': frequency_penalty,
|
100 |
+
}
|
101 |
+
if max_tokens>0:
|
102 |
+
props['max_tokens'] = max_tokens
|
103 |
+
if logit_bias_json is not None:
|
104 |
+
props['logit_bias'] = logit_bias_json
|
105 |
+
|
106 |
+
props_json = json.dumps(props)
|
107 |
+
|
108 |
+
try:
|
109 |
+
completion = openai.ChatCompletion.create(**props)
|
110 |
+
print('')
|
111 |
+
print(completion)
|
112 |
+
the_response_role = completion.choices[0].message.role
|
113 |
+
the_response = completion.choices[0].message.content
|
114 |
+
print(the_response)
|
115 |
+
print('')
|
116 |
+
chat_last_resp = json.dumps(completion.__dict__)
|
117 |
+
chat_last_resp_dict = json.loads(chat_last_resp)
|
118 |
+
chat_last_resp_dict['api_key'] = "hidden by UI"
|
119 |
+
chat_last_resp_dict['organization'] = "hidden by UI"
|
120 |
+
chat_last_resp = json.dumps(chat_last_resp_dict)
|
121 |
+
|
122 |
+
chat_log_md = ''
|
123 |
+
if chat_use_prompt:
|
124 |
+
chat_log_md += '<center>(prompt)</center>\n\n'
|
125 |
+
chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)])
|
126 |
+
chat_log_md += '\n---\n'
|
127 |
+
if True:
|
128 |
+
chat_log_md += '<center>(history)</center>\n\n' if chat_use_history else '<center>(not used history)</center>\n\n'
|
129 |
+
chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", chat_log)])
|
130 |
+
chat_log_md += '\n---\n'
|
131 |
+
|
132 |
+
if chat_input and chat_input!="":
|
133 |
+
chat_log.append([(chat_input_role or 'user'), chat_input])
|
134 |
+
chat_log_md += f"##### `{(chat_input_role or 'user')}`\n\n{chat_input}\n\n"
|
135 |
+
chat_log.append([the_response_role, the_response])
|
136 |
+
chat_log_md += f"##### `{the_response_role}`\n\n{the_response}\n\n"
|
137 |
+
|
138 |
+
return new_state, chat_log, chat_log_md, chat_last_resp, props_json, ''
|
139 |
+
except Exception as error:
|
140 |
+
print(error)
|
141 |
+
|
142 |
+
chat_log_md = ''
|
143 |
+
if chat_use_prompt:
|
144 |
+
chat_log_md += '<center>(prompt)</center>\n\n'
|
145 |
+
chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)])
|
146 |
+
chat_log_md += '\n---\n'
|
147 |
+
if True:
|
148 |
+
chat_log_md += '<center>(history)</center>\n\n' if chat_use_history else '<center>(not used history)</center>\n\n'
|
149 |
+
chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", chat_log)])
|
150 |
+
chat_log_md += '\n---\n'
|
151 |
+
|
152 |
+
# chat_log_md = ''
|
153 |
+
# chat_log_md = "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)]) if chat_use_prompt else ''
|
154 |
+
# chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", hist)])
|
155 |
+
|
156 |
+
chat_log_md += "\n"
|
157 |
+
chat_log_md += str(error)
|
158 |
+
return new_state, chat_log, chat_log_md, None, props_json, chat_input
|
159 |
+
|
160 |
+
|
161 |
+
def clear_history():
|
162 |
+
return [], ""
|
163 |
+
|
164 |
+
|
165 |
+
css = """
|
166 |
+
.table-wrap .cell-wrap input {min-width:80%}
|
167 |
+
#api-key-textbox textarea {filter:blur(8px); transition: filter 0.25s}
|
168 |
+
#api-key-textbox textarea:focus {filter:none}
|
169 |
+
"""
|
170 |
+
with gradio.Blocks(title="ChatGPT", css=css) as demo:
|
171 |
+
global_state = gradio.State(value={})
|
172 |
+
|
173 |
+
# https://gradio.app/docs
|
174 |
+
# https://platform.openai.com/docs/api-reference/chat/create
|
175 |
+
|
176 |
+
with gradio.Tab("ChatGPT"):
|
177 |
+
|
178 |
+
with gradio.Row():
|
179 |
+
with gradio.Column(scale=10):
|
180 |
+
gradio.Markdown("Go to https://platform.openai.com/account/api-keys to get your API key.")
|
181 |
+
api_key_text = gradio.Textbox(label="Your API key", elem_id="api-key-textbox")
|
182 |
+
|
183 |
+
with gradio.Row():
|
184 |
+
with gradio.Column(scale=2):
|
185 |
+
api_key_refresh_btn = gradio.Button("🔄 Load from browser storage")
|
186 |
+
api_key_refresh_btn.click(
|
187 |
+
# get_settings,
|
188 |
+
None,
|
189 |
+
inputs=[global_state],
|
190 |
+
outputs=[global_state, api_key_text],
|
191 |
+
api_name="load-settings",
|
192 |
+
_js="""(global_state, api_key_text)=>{
|
193 |
+
global_state=(global_state??{});
|
194 |
+
global_state['api_key_text']=localStorage?.getItem?.('[gradio][chat-gpt-ui][api_key_text]');
|
195 |
+
return [global_state, global_state['api_key_text']];
|
196 |
+
}""",
|
197 |
+
)
|
198 |
+
with gradio.Column(scale=2):
|
199 |
+
api_key_save_btn = gradio.Button("💾 Save to browser storage")
|
200 |
+
api_key_save_btn.click(
|
201 |
+
# save_settings,
|
202 |
+
None,
|
203 |
+
inputs=[global_state, api_key_text],
|
204 |
+
outputs=[global_state, api_key_text],
|
205 |
+
api_name="save-settings",
|
206 |
+
_js="""(global_state, api_key_text)=>{
|
207 |
+
localStorage.setItem('[gradio][chat-gpt-ui][api_key_text]', api_key_text);
|
208 |
+
global_state=(global_state??{});
|
209 |
+
global_state['api_key_text']=localStorage?.getItem?.('[gradio][chat-gpt-ui][api_key_text]');
|
210 |
+
return [global_state, global_state['api_key_text']];
|
211 |
+
}""",
|
212 |
+
)
|
213 |
+
|
214 |
+
with gradio.Row():
|
215 |
+
with gradio.Column(scale=10):
|
216 |
+
with gradio.Box():
|
217 |
+
prompt_table = gradio.Dataframe(
|
218 |
+
type='array',
|
219 |
+
label='Prompt', col_count=(2, 'fixed'), max_cols=2,
|
220 |
+
value=DEFAULT_PROMPT, headers=['role', 'content'], interactive=True,
|
221 |
+
)
|
222 |
+
gradio.Markdown("Will be added to the beginning of the conversation. See https://platform.openai.com/docs/guides/chat/introduction .")
|
223 |
+
|
224 |
+
|
225 |
+
with gradio.Row():
|
226 |
+
with gradio.Column(scale=4):
|
227 |
+
with gradio.Box():
|
228 |
+
gradio.Markdown("See https://platform.openai.com/docs/api-reference/chat/create .")
|
229 |
+
chat_temperature = gradio.Slider(label="temperature", value=1, minimum=0, maximum=2)
|
230 |
+
chat_top_p = gradio.Slider(label="top_p", value=1, minimum=0, maximum=1)
|
231 |
+
chat_choices_num = gradio.Slider(label="choices num(n)", value=1, minimum=1, maximum=20)
|
232 |
+
chat_stream = gradio.Checkbox(label="stream", value=False, visible=False)
|
233 |
+
chat_max_tokens = gradio.Slider(label="max_tokens", value=-1, minimum=-1, maximum=4096)
|
234 |
+
chat_presence_penalty = gradio.Slider(label="presence_penalty", value=0, minimum=-2, maximum=2)
|
235 |
+
chat_frequency_penalty = gradio.Slider(label="frequency_penalty", value=0, minimum=-2, maximum=2)
|
236 |
+
chat_logit_bias = gradio.Textbox(label="logit_bias", visible=False)
|
237 |
+
pass
|
238 |
+
with gradio.Column(scale=8):
|
239 |
+
with gradio.Row():
|
240 |
+
with gradio.Column(scale=10):
|
241 |
+
chat_log = gradio.State()
|
242 |
+
with gradio.Box():
|
243 |
+
chat_log_box = gradio.Markdown(label='chat history')
|
244 |
+
chat_input_role = gradio.Textbox(lines=1, label='role', value='user')
|
245 |
+
chat_input = gradio.Textbox(lines=4, label='input')
|
246 |
+
with gradio.Row():
|
247 |
+
chat_clear_history_btn = gradio.Button("clear history")
|
248 |
+
chat_clear_history_btn.click(clear_history, inputs=[], outputs=[chat_log, chat_log_box])
|
249 |
+
chat_use_prompt = gradio.Checkbox(label='send with prompt', value=True)
|
250 |
+
chat_use_history = gradio.Checkbox(label='send with history', value=True)
|
251 |
+
chat_send_btn = gradio.Button("send")
|
252 |
+
pass
|
253 |
+
|
254 |
+
with gradio.Row():
|
255 |
+
chat_last_req = gradio.JSON(label='last request')
|
256 |
+
chat_last_resp = gradio.JSON(label='last response')
|
257 |
+
chat_send_btn.click(
|
258 |
+
on_click_send_btn,
|
259 |
+
inputs=[
|
260 |
+
global_state, api_key_text, chat_input_role, chat_input, prompt_table, chat_use_prompt, chat_use_history, chat_log,
|
261 |
+
chat_temperature, chat_top_p, chat_choices_num, chat_stream, chat_max_tokens, chat_presence_penalty, chat_frequency_penalty, chat_logit_bias,
|
262 |
+
],
|
263 |
+
outputs=[global_state, chat_log, chat_log_box, chat_last_resp, chat_last_req, chat_input],
|
264 |
+
api_name="click-send-btn",
|
265 |
+
)
|
266 |
+
|
267 |
+
pass
|
268 |
+
|
269 |
+
|
270 |
+
|
271 |
+
with gradio.Tab("Settings"):
|
272 |
+
gradio.Markdown('Currently nothing.')
|
273 |
+
pass
|
274 |
+
|
275 |
+
|
276 |
+
if __name__ == "__main__":
|
277 |
+
demo.queue(concurrency_count=20).launch()
|