Spaces:
Runtime error
Runtime error
ai: Enable gradio default stop button.
Browse files
jarvis.py
CHANGED
|
@@ -61,10 +61,15 @@ class SessionWithID(requests.Session):
|
|
| 61 |
def __init__(self):
|
| 62 |
super().__init__()
|
| 63 |
self.session_id = str(uuid.uuid4())
|
|
|
|
| 64 |
|
| 65 |
def create_session():
|
| 66 |
return SessionWithID()
|
| 67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
def get_available_items(items, marked):
|
| 69 |
a = [i for i in items if i not in marked]
|
| 70 |
random.shuffle(a)
|
|
@@ -199,7 +204,7 @@ def extract_file_content(fp):
|
|
| 199 |
return f"{fp}: {e}"
|
| 200 |
|
| 201 |
async def fetch_response_async(host, key, model, msgs, cfg, sid):
|
| 202 |
-
for t in [
|
| 203 |
try:
|
| 204 |
async with httpx.AsyncClient(timeout=t) as client:
|
| 205 |
r = await client.post(host, json={"model": model, "messages": msgs, **cfg, "session_id": sid}, headers={"Authorization": f"Bearer {key}"})
|
|
@@ -223,6 +228,7 @@ async def chat_with_model_async(history, user_input, model_display, sess, custom
|
|
| 223 |
return RESPONSES["RESPONSE_3"]
|
| 224 |
if not hasattr(sess, "session_id"):
|
| 225 |
sess.session_id = str(uuid.uuid4())
|
|
|
|
| 226 |
model_key = get_model_key(model_display)
|
| 227 |
cfg = MODEL_CONFIG.get(model_key, DEFAULT_CONFIG)
|
| 228 |
msgs = [{"role": "user", "content": u} for u, _ in history] + [{"role": "assistant", "content": a} for _, a in history if a]
|
|
@@ -250,54 +256,73 @@ async def chat_with_model_async(history, user_input, model_display, sess, custom
|
|
| 250 |
return RESPONSES["RESPONSE_2"]
|
| 251 |
|
| 252 |
async def respond_async(multi, history, model_display, sess, custom_prompt):
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
|
|
|
|
|
|
|
|
|
| 256 |
return
|
| 257 |
inp = ""
|
| 258 |
-
for f in
|
| 259 |
if isinstance(f, dict):
|
| 260 |
fp = f.get("data", f.get("name", ""))
|
| 261 |
else:
|
| 262 |
fp = f
|
| 263 |
inp += f"{Path(fp).name}\n\n{extract_file_content(fp)}\n\n"
|
| 264 |
-
if
|
| 265 |
-
inp +=
|
| 266 |
-
history.append([inp, ""])
|
|
|
|
| 267 |
ai = await chat_with_model_async(history, inp, model_display, sess, custom_prompt)
|
| 268 |
history[-1][1] = ""
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
if
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 283 |
|
| 284 |
def change_model(new):
|
| 285 |
visible = new != MODEL_CHOICES[0]
|
| 286 |
default = SYSTEM_PROMPT_MAPPING.get(get_model_key(new), SYSTEM_PROMPT_DEFAULT)
|
| 287 |
return [], create_session(), new, default, gr.update(value=default, visible=visible)
|
| 288 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], head=META_TAGS) as jarvis:
|
| 290 |
user_history = gr.State([])
|
| 291 |
user_session = gr.State(create_session())
|
| 292 |
selected_model = gr.State(MODEL_CHOICES[0] if MODEL_CHOICES else "")
|
| 293 |
custom_prompt_state = gr.State("")
|
| 294 |
chatbot = gr.Chatbot(label=AI_TYPES["AI_TYPE_1"], show_copy_button=True, scale=1, elem_id=AI_TYPES["AI_TYPE_2"])
|
| 295 |
-
|
| 296 |
-
msg = gr.MultimodalTextbox(show_label=False, placeholder=RESPONSES["RESPONSE_5"], interactive=True, file_count="single", file_types=ALLOWED_EXTENSIONS)
|
| 297 |
with gr.Accordion(AI_TYPES["AI_TYPE_6"], open=False):
|
| 298 |
model_dropdown = gr.Dropdown(show_label=False, choices=MODEL_CHOICES, value=MODEL_CHOICES[0])
|
| 299 |
system_prompt = gr.Textbox(label=AI_TYPES["AI_TYPE_7"], lines=2, interactive=True, visible=False)
|
| 300 |
model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, user_session, selected_model, custom_prompt_state, system_prompt])
|
| 301 |
system_prompt.change(fn=lambda x: x, inputs=[system_prompt], outputs=[custom_prompt_state])
|
| 302 |
msg.submit(fn=respond_async, inputs=[msg, user_history, selected_model, user_session, custom_prompt_state], outputs=[chatbot, msg, user_session], api_name=INTERNAL_AI_GET_SERVER)
|
|
|
|
| 303 |
jarvis.launch(max_file_size="1mb")
|
|
|
|
| 61 |
def __init__(self):
|
| 62 |
super().__init__()
|
| 63 |
self.session_id = str(uuid.uuid4())
|
| 64 |
+
self.stop_event = asyncio.Event()
|
| 65 |
|
| 66 |
def create_session():
|
| 67 |
return SessionWithID()
|
| 68 |
|
| 69 |
+
def ensure_stop_event(sess):
|
| 70 |
+
if not hasattr(sess, "stop_event"):
|
| 71 |
+
sess.stop_event = asyncio.Event()
|
| 72 |
+
|
| 73 |
def get_available_items(items, marked):
|
| 74 |
a = [i for i in items if i not in marked]
|
| 75 |
random.shuffle(a)
|
|
|
|
| 204 |
return f"{fp}: {e}"
|
| 205 |
|
| 206 |
async def fetch_response_async(host, key, model, msgs, cfg, sid):
|
| 207 |
+
for t in [1, 2]:
|
| 208 |
try:
|
| 209 |
async with httpx.AsyncClient(timeout=t) as client:
|
| 210 |
r = await client.post(host, json={"model": model, "messages": msgs, **cfg, "session_id": sid}, headers={"Authorization": f"Bearer {key}"})
|
|
|
|
| 228 |
return RESPONSES["RESPONSE_3"]
|
| 229 |
if not hasattr(sess, "session_id"):
|
| 230 |
sess.session_id = str(uuid.uuid4())
|
| 231 |
+
sess.stop_event = asyncio.Event()
|
| 232 |
model_key = get_model_key(model_display)
|
| 233 |
cfg = MODEL_CONFIG.get(model_key, DEFAULT_CONFIG)
|
| 234 |
msgs = [{"role": "user", "content": u} for u, _ in history] + [{"role": "assistant", "content": a} for _, a in history if a]
|
|
|
|
| 256 |
return RESPONSES["RESPONSE_2"]
|
| 257 |
|
| 258 |
async def respond_async(multi, history, model_display, sess, custom_prompt):
|
| 259 |
+
if not hasattr(sess, "stop_event"):
|
| 260 |
+
sess.stop_event = asyncio.Event()
|
| 261 |
+
sess.stop_event.clear()
|
| 262 |
+
msg_input = {"text": multi.get("text", "").strip(), "files": multi.get("files", [])}
|
| 263 |
+
if not msg_input["text"] and not msg_input["files"]:
|
| 264 |
+
yield history, gr.MultimodalTextbox(value="", interactive=True, submit_btn=True, stop_btn=False), sess
|
| 265 |
return
|
| 266 |
inp = ""
|
| 267 |
+
for f in msg_input["files"]:
|
| 268 |
if isinstance(f, dict):
|
| 269 |
fp = f.get("data", f.get("name", ""))
|
| 270 |
else:
|
| 271 |
fp = f
|
| 272 |
inp += f"{Path(fp).name}\n\n{extract_file_content(fp)}\n\n"
|
| 273 |
+
if msg_input["text"]:
|
| 274 |
+
inp += msg_input["text"]
|
| 275 |
+
history.append([inp, RESPONSES["RESPONSE_8"]])
|
| 276 |
+
yield history, gr.MultimodalTextbox(value="", interactive=False, submit_btn=False, stop_btn=True), sess
|
| 277 |
ai = await chat_with_model_async(history, inp, model_display, sess, custom_prompt)
|
| 278 |
history[-1][1] = ""
|
| 279 |
+
buffer = []
|
| 280 |
+
last_update = asyncio.get_event_loop().time()
|
| 281 |
+
for char in ai:
|
| 282 |
+
if sess.stop_event.is_set():
|
| 283 |
+
history[-1][1] = RESPONSES["RESPONSE_1"]
|
| 284 |
+
yield history, gr.MultimodalTextbox(value="", interactive=True, submit_btn=True, stop_btn=False), sess
|
| 285 |
+
sess.stop_event.clear()
|
| 286 |
+
return
|
| 287 |
+
buffer.append(char)
|
| 288 |
+
current_time = asyncio.get_event_loop().time()
|
| 289 |
+
if len(buffer) >= 8 or (current_time - last_update) > 0.04:
|
| 290 |
+
history[-1][1] += "".join(buffer)
|
| 291 |
+
buffer.clear()
|
| 292 |
+
last_update = current_time
|
| 293 |
+
yield history, gr.MultimodalTextbox(value="", interactive=False, submit_btn=False, stop_btn=True), sess
|
| 294 |
+
await asyncio.sleep(0.016)
|
| 295 |
+
if buffer:
|
| 296 |
+
history[-1][1] += "".join(buffer)
|
| 297 |
+
yield history, gr.MultimodalTextbox(value="", interactive=False, submit_btn=False, stop_btn=True), sess
|
| 298 |
+
yield history, gr.MultimodalTextbox(value="", interactive=True, submit_btn=True, stop_btn=False), sess
|
| 299 |
|
| 300 |
def change_model(new):
|
| 301 |
visible = new != MODEL_CHOICES[0]
|
| 302 |
default = SYSTEM_PROMPT_MAPPING.get(get_model_key(new), SYSTEM_PROMPT_DEFAULT)
|
| 303 |
return [], create_session(), new, default, gr.update(value=default, visible=visible)
|
| 304 |
|
| 305 |
+
def stop_response(history, sess):
|
| 306 |
+
if not hasattr(sess, "stop_event"):
|
| 307 |
+
sess.stop_event = asyncio.Event()
|
| 308 |
+
sess.stop_event.set()
|
| 309 |
+
if history:
|
| 310 |
+
history[-1][1] = RESPONSES["RESPONSE_1"]
|
| 311 |
+
new_session = create_session()
|
| 312 |
+
return history, gr.MultimodalTextbox(value="", interactive=True, submit_btn=True, stop_btn=False), new_session
|
| 313 |
+
|
| 314 |
with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], head=META_TAGS) as jarvis:
|
| 315 |
user_history = gr.State([])
|
| 316 |
user_session = gr.State(create_session())
|
| 317 |
selected_model = gr.State(MODEL_CHOICES[0] if MODEL_CHOICES else "")
|
| 318 |
custom_prompt_state = gr.State("")
|
| 319 |
chatbot = gr.Chatbot(label=AI_TYPES["AI_TYPE_1"], show_copy_button=True, scale=1, elem_id=AI_TYPES["AI_TYPE_2"])
|
| 320 |
+
msg = gr.MultimodalTextbox(show_label=False, placeholder=RESPONSES["RESPONSE_5"], interactive=True, file_count="single", file_types=ALLOWED_EXTENSIONS)
|
|
|
|
| 321 |
with gr.Accordion(AI_TYPES["AI_TYPE_6"], open=False):
|
| 322 |
model_dropdown = gr.Dropdown(show_label=False, choices=MODEL_CHOICES, value=MODEL_CHOICES[0])
|
| 323 |
system_prompt = gr.Textbox(label=AI_TYPES["AI_TYPE_7"], lines=2, interactive=True, visible=False)
|
| 324 |
model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, user_session, selected_model, custom_prompt_state, system_prompt])
|
| 325 |
system_prompt.change(fn=lambda x: x, inputs=[system_prompt], outputs=[custom_prompt_state])
|
| 326 |
msg.submit(fn=respond_async, inputs=[msg, user_history, selected_model, user_session, custom_prompt_state], outputs=[chatbot, msg, user_session], api_name=INTERNAL_AI_GET_SERVER)
|
| 327 |
+
msg.stop(fn=stop_response, inputs=[user_history, user_session], outputs=[chatbot, msg, user_session])
|
| 328 |
jarvis.launch(max_file_size="1mb")
|