HaveAI commited on
Commit
f2c1196
·
verified ·
1 Parent(s): b986c5c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -1034
app.py CHANGED
@@ -1,1046 +1,45 @@
1
- import os
2
- import uuid
3
- import json
4
  import gradio as gr
5
- import modelscope_studio.components.antd as antd
6
- import modelscope_studio.components.antdx as antdx
7
- import modelscope_studio.components.base as ms
8
- from openai import OpenAI
9
  import requests
10
- from typing import Generator, Dict, Any
11
- import logging
12
- import time
13
-
14
- # =========== Configuration
15
- # MODEL NAME
16
- model = os.getenv("MODEL_NAME")
17
- # 代理服务器配置
18
- PROXY_BASE_URL = os.getenv("PROXY_API_BASE", "http://localhost:8000")
19
- PROXY_TIMEOUT = int(os.getenv("PROXY_TIMEOUT", 30))
20
- MAX_RETRIES = int(os.getenv("MAX_RETRIES", 5))
21
- # 保存历史
22
- save_history = True
23
-
24
- # =========== Configuration
25
-
26
- # 配置日志
27
- logging.basicConfig(level=logging.INFO)
28
- logger = logging.getLogger(__name__)
29
-
30
- class DeltaObject:
31
- """模拟OpenAI Delta对象"""
32
- def __init__(self, data: dict):
33
- self.content = data.get('content')
34
- self.role = data.get('role')
35
-
36
- class ChoiceObject:
37
- """模拟OpenAI Choice对象"""
38
- def __init__(self, choice_data: dict):
39
- delta_data = choice_data.get('delta', {})
40
- self.delta = DeltaObject(delta_data)
41
- self.finish_reason = choice_data.get('finish_reason')
42
- self.index = choice_data.get('index', 0)
43
-
44
- class ChunkObject:
45
- """模拟OpenAI Chunk对象"""
46
- def __init__(self, chunk_data: dict):
47
- choices_data = chunk_data.get('choices', [])
48
- self.choices = [ChoiceObject(choice) for choice in choices_data]
49
- self.id = chunk_data.get('id', '')
50
- self.object = chunk_data.get('object', 'chat.completion.chunk')
51
- self.created = chunk_data.get('created', 0)
52
- self.model = chunk_data.get('model', '')
53
-
54
- class ProxyClient:
55
- """代理客户端,用于与中间服务通信"""
56
-
57
- def __init__(self, base_url: str, timeout: int = 30):
58
- self.base_url = base_url.rstrip('/')
59
- self.timeout = timeout
60
- self.session = requests.Session()
61
-
62
- def chat_completions_create(self, model: str, messages: list, stream: bool = True, **kwargs):
63
- """创建聊天完成请求"""
64
- url = f"{self.base_url}/chat/completions"
65
-
66
- payload = {
67
- "model": model,
68
- "messages": messages,
69
- "stream": stream,
70
- **kwargs
71
- }
72
-
73
- try:
74
- response = self.session.post(
75
- url,
76
- json=payload,
77
- stream=stream,
78
- timeout=self.timeout,
79
- headers={"Content-Type": "application/json"}
80
- )
81
- response.raise_for_status()
82
-
83
- if stream:
84
- return self._parse_stream_response(response)
85
- else:
86
- return response.json()
87
-
88
- except requests.exceptions.RequestException as e:
89
- logger.error(f"Request failed: {str(e)}")
90
- raise Exception(f"Failed to connect to proxy server: {str(e)}")
91
-
92
- def _parse_stream_response(self, response) -> Generator[ChunkObject, None, None]:
93
- """解析流式响应"""
94
- try:
95
- # 确保响应编码正确
96
- response.encoding = 'utf-8'
97
-
98
- for line in response.iter_lines(decode_unicode=True):
99
- if not line:
100
- continue
101
-
102
- line = line.strip()
103
- if line.startswith('data: '):
104
- data = line[6:] # 移除 'data: ' 前缀
105
-
106
- if data == '[DONE]':
107
- break
108
-
109
- try:
110
- chunk_data = json.loads(data)
111
-
112
- # 检查是否是错误响应
113
- if 'error' in chunk_data:
114
- raise Exception(f"Stream error: {chunk_data.get('detail', chunk_data['error'])}")
115
-
116
- # 创建与OpenAI客户端兼容的响应对象
117
- yield ChunkObject(chunk_data)
118
-
119
- except json.JSONDecodeError as e:
120
- logger.warning(f"Failed to parse JSON: {data}, error: {str(e)}")
121
- continue
122
-
123
- except Exception as e:
124
- logger.error(f"Error parsing stream response: {str(e)}")
125
- raise
126
-
127
- def health_check(self) -> dict:
128
- """健康检查"""
129
- try:
130
- url = f"{self.base_url}/health"
131
- response = self.session.get(url, timeout=self.timeout)
132
- response.raise_for_status()
133
- return response.json()
134
- except Exception as e:
135
- logger.error(f"Health check failed: {str(e)}")
136
- return {"status": "unhealthy", "error": str(e)}
137
-
138
- # 初始化代理客户端
139
- client = ProxyClient(PROXY_BASE_URL, PROXY_TIMEOUT)
140
-
141
- def chat_with_retry(history_messages, max_retries=MAX_RETRIES):
142
- """带重试机制的聊天函数"""
143
- last_exception = None
144
-
145
- for attempt in range(max_retries):
146
- try:
147
- logger.info(f"Chat attempt {attempt + 1}/{max_retries}")
148
-
149
- # 检查代理服务健康状态
150
- health = client.health_check()
151
- if health.get("status") != "healthy":
152
- raise Exception(f"Proxy service unhealthy: {health}")
153
-
154
- response = client.chat_completions_create(
155
- model=model,
156
- messages=history_messages,
157
- stream=True,
158
- temperature = 0.7, top_p = 0.8
159
- )
160
-
161
- return response
162
-
163
- except Exception as e:
164
- last_exception = e
165
- logger.warning(f"Attempt {attempt + 1} failed: {str(e)}")
166
-
167
- if attempt < max_retries - 1:
168
- # 指数退避
169
- wait_time = min(2 ** attempt, 4)
170
- logger.info(f"Retrying in {wait_time} seconds...")
171
- time.sleep(wait_time)
172
- else:
173
- logger.error(f"All {max_retries} attempts failed")
174
-
175
- raise last_exception
176
-
177
-
178
- is_modelscope_studio = os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio'
179
- def get_text(text: str, cn_text: str):
180
- if is_modelscope_studio:
181
- return cn_text
182
- return text
183
-
184
- logo_img = os.path.join(os.path.dirname(__file__), "rednote_hilab.png")
185
-
186
- DEFAULT_PROMPTS = [{
187
- "category":
188
- "🖋 Make a plan",
189
- "prompts": [
190
- "Help me with a plan to start a business",
191
- "Help me with a plan to achieve my goals",
192
- "Help me with a plan for a successful interview"
193
- ]
194
- }, {
195
- "category":
196
- "📅 Help me write",
197
- "prompts": [
198
- "Help me write a story with a twist ending",
199
- "Help me write a blog post on mental health",
200
- "Help me write a letter to my future self"
201
- ]
202
- }]
203
-
204
- DEFAULT_SUGGESTIONS = [{
205
- "label":
206
- 'Make a plan',
207
- "value":
208
- "Make a plan",
209
- "children": [{
210
- "label": "Start a business",
211
- "value": "Help me with a plan to start a business"
212
- }, {
213
- "label": "Achieve my goals",
214
- "value": "Help me with a plan to achieve my goals"
215
- }, {
216
- "label": "Successful interview",
217
- "value": "Help me with a plan for a successful interview"
218
- }]
219
- }, {
220
- "label":
221
- 'Help me write',
222
- "value":
223
- "Help me write",
224
- "children": [{
225
- "label": "Story with a twist ending",
226
- "value": "Help me write a story with a twist ending"
227
- }, {
228
- "label": "Blog post on mental health",
229
- "value": "Help me write a blog post on mental health"
230
- }, {
231
- "label": "Letter to my future self",
232
- "value": "Help me write a letter to my future self"
233
- }]
234
- }]
235
-
236
- DEFAULT_CONVERSATIONS_HISTORY = [{"role": "placeholder"}]
237
-
238
- DEFAULT_LOCALE = 'zh_CN' if is_modelscope_studio else 'en_US'
239
-
240
- DEFAULT_THEME = {
241
- "token": {
242
- "colorPrimary": "#6A57FF",
243
- }
244
- }
245
-
246
-
247
- def format_history(history):
248
- messages = [{
249
- "role": "system",
250
- "content": "You are a helpful assistant.",
251
- }]
252
- for item in history:
253
- if item["role"] == "user":
254
- messages.append({"role": "user", "content": item["content"]})
255
- elif item["role"] == "assistant":
256
- messages.append({"role": "assistant", "content": item["content"]})
257
- return messages
258
-
259
-
260
- class Gradio_Events:
261
-
262
- @staticmethod
263
- def _submit(state_value):
264
- history = state_value["conversations_history"][
265
- state_value["conversation_id"]]
266
- # submit
267
- history_messages = format_history(history)
268
-
269
- history.append({
270
- "role": "assistant",
271
- "content": "",
272
- "key": str(uuid.uuid4()),
273
- "meta": {
274
- "reason_content": ""
275
- },
276
- "loading": True,
277
- })
278
-
279
- yield {
280
- chatbot: gr.update(items=history),
281
- state: gr.update(value=state_value),
282
- }
283
- try:
284
- response = chat_with_retry(history_messages)
285
-
286
- thought_done = False
287
- for chunk in response:
288
- # 安全地访问chunk属性
289
- if chunk.choices and len(chunk.choices) > 0:
290
- content = chunk.choices[0].delta.content
291
- else:
292
- content = None
293
- raise ValueError('Content is None')
294
-
295
- history[-1]["loading"] = False
296
-
297
- if content and not thought_done:
298
- thought_done = True
299
- history[-1]["content"] = ""
300
-
301
- if content:
302
- history[-1]["content"] += content
303
-
304
- yield {
305
- chatbot: gr.update(items=history),
306
- state: gr.update(value=state_value)
307
- }
308
-
309
- history[-1]["meta"]["end"] = True
310
- print("Answer: ", history[-1]["content"])
311
-
312
- except Exception as e:
313
- history[-1]["loading"] = False
314
- history[-1]["meta"]["end"] = True
315
- history[-1]["meta"]["error"] = True
316
- history[-1]["content"] = "Failed to respond, please try again."
317
- yield {
318
- chatbot: gr.update(items=history),
319
- state: gr.update(value=state_value)
320
- }
321
- print('Error: ',e)
322
- raise e
323
-
324
-
325
- @staticmethod
326
- def submit(sender_value, state_value):
327
- if not state_value["conversation_id"]:
328
- random_id = str(uuid.uuid4())
329
- history = []
330
- state_value["conversation_id"] = random_id
331
- state_value["conversations_history"][random_id] = history
332
- state_value["conversations"].append({
333
- "label": sender_value,
334
- "key": random_id
335
- })
336
-
337
- history = state_value["conversations_history"][
338
- state_value["conversation_id"]]
339
- history.append({
340
- "role": "user",
341
- "meta": {},
342
- "key": str(uuid.uuid4()),
343
- "content": sender_value
344
- })
345
-
346
- # preprocess submit
347
- yield Gradio_Events.preprocess_submit()(state_value)
348
- try:
349
- for chunk in Gradio_Events._submit(state_value):
350
- yield chunk
351
- except Exception as e:
352
- raise e
353
- finally:
354
- # postprocess submit
355
- yield Gradio_Events.postprocess_submit(state_value)
356
-
357
- @staticmethod
358
- def regenerate_message(state_value, e: gr.EventData):
359
- conversation_key = e._data["component"]["conversationKey"]
360
- history = state_value["conversations_history"][
361
- state_value["conversation_id"]]
362
- index = -1
363
- for i, conversation in enumerate(history):
364
- if conversation["key"] == conversation_key:
365
- index = i
366
- break
367
- if index == -1:
368
- yield gr.skip()
369
- history = history[:index]
370
- state_value["conversations_history"][
371
- state_value["conversation_id"]] = history
372
-
373
- yield {
374
- chatbot:gr.update(items=history),
375
- state: gr.update(value=state_value)
376
- }
377
-
378
- # preprocess submit
379
- yield Gradio_Events.preprocess_submit(clear_input=False)(state_value)
380
- try:
381
- for chunk in Gradio_Events._submit(state_value):
382
- yield chunk
383
- except Exception as e:
384
- raise e
385
- finally:
386
- # postprocess submit
387
- yield Gradio_Events.postprocess_submit(state_value)
388
-
389
-
390
- @staticmethod
391
- def preprocess_submit(clear_input=True):
392
-
393
- def preprocess_submit_handler(state_value):
394
- history = state_value["conversations_history"][
395
- state_value["conversation_id"]]
396
- for conversation in history:
397
- if "meta" in conversation:
398
- conversation["meta"]["disabled"] = True
399
- return {
400
- sender: gr.update(value=None, loading=True) if clear_input else gr.update(loading=True),
401
- conversations:
402
- gr.update(active_key=state_value["conversation_id"],
403
- items=list(
404
- map(
405
- lambda item: {
406
- **item,
407
- "disabled":
408
- True if item["key"] != state_value[
409
- "conversation_id"] else False,
410
- }, state_value["conversations"]))),
411
- add_conversation_btn:
412
- gr.update(disabled=True),
413
- clear_btn:
414
- gr.update(disabled=True),
415
- conversation_delete_menu_item:
416
- gr.update(disabled=True),
417
- chatbot:
418
- gr.update(items=history),
419
- state:
420
- gr.update(value=state_value),
421
- }
422
-
423
- return preprocess_submit_handler
424
-
425
- @staticmethod
426
- def postprocess_submit(state_value):
427
- history = state_value["conversations_history"][
428
- state_value["conversation_id"]]
429
- for conversation in history:
430
- if "meta" in conversation:
431
- conversation["meta"]["disabled"] = False
432
- return {
433
- sender: gr.update(loading=False),
434
- conversation_delete_menu_item: gr.update(disabled=False),
435
- clear_btn: gr.update(disabled=False),
436
- conversations: gr.update(items=state_value["conversations"]),
437
- add_conversation_btn: gr.update(disabled=False),
438
- chatbot: gr.update(items=history),
439
- state: gr.update(value=state_value),
440
- }
441
-
442
- @staticmethod
443
- def cancel(state_value):
444
- history = state_value["conversations_history"][
445
- state_value["conversation_id"]]
446
- history[-1]["loading"] = False
447
- history[-1]["meta"]["end"] = True
448
- history[-1]["meta"]["canceled"] = True
449
- return Gradio_Events.postprocess_submit(state_value)
450
-
451
- @staticmethod
452
- def delete_message(state_value, e: gr.EventData):
453
- conversation_key = e._data["component"]["conversationKey"]
454
- history = state_value["conversations_history"][
455
- state_value["conversation_id"]]
456
- history = [item for item in history if item["key"] != conversation_key]
457
- state_value["conversations_history"][
458
- state_value["conversation_id"]] = history
459
-
460
- return gr.update(items=history if len(history) >
461
- 0 else DEFAULT_CONVERSATIONS_HISTORY), gr.update(
462
- value=state_value)
463
 
 
 
 
 
464
 
 
465
 
466
- @staticmethod
467
- def edit_message(state_value, e: gr.EventData):
468
- conversation_key = e._data["component"]["conversationKey"]
469
- history = state_value["conversations_history"][
470
- state_value["conversation_id"]]
471
- index = -1
472
- for i, conversation in enumerate(history):
473
- if conversation["key"] == conversation_key:
474
- index = i
475
- break
476
- if index == -1:
477
- return gr.skip()
478
- state_value["editing_message_index"] = index
479
- text = ''
480
- if isinstance(history[index]["content"], str):
481
- text = history[index]["content"]
482
- else:
483
- text = history[index]["content"]["text"]
484
- return gr.update(value=text), gr.update(value=state_value)
485
 
486
- @staticmethod
487
- def confirm_edit_message(edit_textarea_value, state_value):
488
- history = state_value["conversations_history"][
489
- state_value["conversation_id"]]
490
- message = history[state_value["editing_message_index"]]
491
- if isinstance(message["content"], str):
492
- message["content"] = edit_textarea_value
493
  else:
494
- message["content"]["text"] = edit_textarea_value
495
- return gr.update(items=history), gr.update(value=state_value)
496
-
497
- @staticmethod
498
- def select_suggestion(sender_value, e: gr.EventData):
499
- return gr.update(value=sender_value[:-1] + e._data["payload"][0])
500
-
501
- @staticmethod
502
- def apply_prompt(e: gr.EventData):
503
- return gr.update(value=e._data["payload"][0]["data"]["description"])
504
-
505
- @staticmethod
506
- def new_chat(state_value):
507
- if not state_value["conversation_id"]:
508
- return gr.skip()
509
- state_value["conversation_id"] = ""
510
- return gr.update(active_key=state_value["conversation_id"]), gr.update(
511
- items=DEFAULT_CONVERSATIONS_HISTORY), gr.update(value=state_value)
512
-
513
- @staticmethod
514
- def select_conversation(state_value, e: gr.EventData):
515
- active_key = e._data["payload"][0]
516
- if state_value["conversation_id"] == active_key or (
517
- active_key not in state_value["conversations_history"]):
518
- return gr.skip()
519
- state_value["conversation_id"] = active_key
520
- return gr.update(active_key=active_key), gr.update(
521
- items=state_value["conversations_history"][active_key]), gr.update(
522
- value=state_value)
523
-
524
- @staticmethod
525
- def click_conversation_menu(state_value, e: gr.EventData):
526
- conversation_id = e._data["payload"][0]["key"]
527
- operation = e._data["payload"][1]["key"]
528
- if operation == "delete":
529
- del state_value["conversations_history"][conversation_id]
530
-
531
- state_value["conversations"] = [
532
- item for item in state_value["conversations"]
533
- if item["key"] != conversation_id
534
- ]
535
-
536
- if state_value["conversation_id"] == conversation_id:
537
- state_value["conversation_id"] = ""
538
- return gr.update(
539
- items=state_value["conversations"],
540
- active_key=state_value["conversation_id"]), gr.update(
541
- items=DEFAULT_CONVERSATIONS_HISTORY), gr.update(
542
- value=state_value)
543
- else:
544
- return gr.update(
545
- items=state_value["conversations"]), gr.skip(), gr.update(
546
- value=state_value)
547
- return gr.skip()
548
-
549
- @staticmethod
550
- def clear_conversation_history(state_value):
551
- if not state_value["conversation_id"]:
552
- return gr.skip()
553
- state_value["conversations_history"][
554
- state_value["conversation_id"]] = []
555
- return gr.update(items=DEFAULT_CONVERSATIONS_HISTORY), gr.update(
556
- value=state_value)
557
-
558
- @staticmethod
559
- def close_modal():
560
- return gr.update(open=False)
561
-
562
- @staticmethod
563
- def open_modal():
564
- return gr.update(open=True)
565
-
566
- @staticmethod
567
- def update_browser_state(state_value):
568
-
569
- return gr.update(value=dict(
570
- conversations=state_value["conversations"],
571
- conversations_history=state_value["conversations_history"]))
572
-
573
- @staticmethod
574
- def apply_browser_state(browser_state_value, state_value):
575
- state_value["conversations"] = browser_state_value["conversations"]
576
- state_value["conversations_history"] = browser_state_value[
577
- "conversations_history"]
578
- return gr.update(
579
- items=browser_state_value["conversations"]), gr.update(
580
- value=state_value)
581
-
582
-
583
- css = """
584
- .gradio-container {
585
- padding: 0 !important;
586
- }
587
- .gradio-container > main.fillable {
588
- padding: 0 !important;
589
- }
590
- #chatbot {
591
- height: calc(100vh - 21px - 16px);
592
- }
593
- #chatbot .chatbot-conversations {
594
- height: 100%;
595
- background-color: var(--ms-gr-ant-color-bg-layout);
596
- }
597
- #chatbot .chatbot-conversations .chatbot-conversations-list {
598
- padding-left: 0;
599
- padding-right: 0;
600
- }
601
- #chatbot .chatbot-chat {
602
- padding: 32px;
603
- height: 100%;
604
- }
605
- @media (max-width: 768px) {
606
- #chatbot .chatbot-chat {
607
- padding: 0;
608
- }
609
- }
610
- #chatbot .chatbot-chat .chatbot-chat-messages {
611
- flex: 1;
612
- }
613
- #chatbot .chatbot-chat .chatbot-chat-messages .chatbot-chat-message .chatbot-chat-message-footer {
614
- visibility: hidden;
615
- opacity: 0;
616
- transition: opacity 0.2s;
617
- }
618
- #chatbot .chatbot-chat .chatbot-chat-messages .chatbot-chat-message:last-child .chatbot-chat-message-footer {
619
- visibility: visible;
620
- opacity: 1;
621
- }
622
- #chatbot .chatbot-chat .chatbot-chat-messages .chatbot-chat-message:hover .chatbot-chat-message-footer {
623
- visibility: visible;
624
- opacity: 1;
625
- }
626
- """
627
-
628
-
629
- def logo():
630
- with antd.Typography.Title(level=1,
631
- elem_style=dict(fontSize=24,
632
- padding=8,
633
- margin=0)):
634
- with antd.Flex(align="center", gap="small", justify="center"):
635
- antd.Image(logo_img,
636
- preview=False,
637
- alt="logo",
638
- width=24,
639
- height=24)
640
- ms.Span("dots.llm1.inst")
641
-
642
-
643
- with gr.Blocks(css=css, fill_width=True) as demo:
644
- state = gr.State({
645
- "conversations_history": {},
646
- "conversations": [],
647
- "conversation_id": "",
648
- "editing_message_index": -1,
649
- })
650
-
651
- with ms.Application(), antdx.XProvider(
652
- theme=DEFAULT_THEME, locale=DEFAULT_LOCALE), ms.AutoLoading():
653
- with antd.Row(gutter=[20, 20], wrap=False, elem_id="chatbot"):
654
- # Left Column
655
- with antd.Col(md=dict(flex="0 0 260px", span=24, order=0),
656
- span=0,
657
- order=1,
658
- elem_classes="chatbot-conversations",
659
- elem_style=dict(
660
- maxWidth="260px",
661
- minWidth="260px",
662
- overflow="hidden")):
663
- with antd.Flex(vertical=True,
664
- gap="small",
665
- elem_style=dict(height="100%", width="100%", minWidth="0")):
666
- # Logo
667
- logo()
668
-
669
- # New Conversation Button
670
- with antd.Button(value=None,
671
- color="primary",
672
- variant="filled",
673
- block=True, elem_style=dict(maxWidth="100%")) as add_conversation_btn:
674
- ms.Text(get_text("New Conversation", "新建对话"))
675
- with ms.Slot("icon"):
676
- antd.Icon("PlusOutlined")
677
-
678
- # Conversations List
679
- with antdx.Conversations(
680
- elem_classes="chatbot-conversations-list",
681
- elem_style=dict(
682
- width="100%",
683
- minWidth="0",
684
- overflow="hidden",
685
- flex="1"
686
- )
687
- ) as conversations:
688
- with ms.Slot('menu.items'):
689
- with antd.Menu.Item(
690
- label="Delete", key="delete", danger=True
691
- ) as conversation_delete_menu_item:
692
- with ms.Slot("icon"):
693
- antd.Icon("DeleteOutlined")
694
- # Right Column
695
- with antd.Col(flex=1, elem_style=dict(height="100%")):
696
- with antd.Flex(vertical=True,
697
- gap="middle",
698
- elem_classes="chatbot-chat"):
699
- # Chatbot
700
- with antdx.Bubble.List(
701
- items=DEFAULT_CONVERSATIONS_HISTORY,
702
- elem_classes="chatbot-chat-messages") as chatbot:
703
- # Define Chatbot Roles
704
- with ms.Slot("roles"):
705
- # Placeholder Role
706
- with antdx.Bubble.List.Role(
707
- role="placeholder",
708
- styles=dict(content=dict(width="100%")),
709
- variant="borderless"):
710
- with ms.Slot("messageRender"):
711
- with antd.Space(
712
- direction="vertical",
713
- size=16,
714
- elem_style=dict(width="100%")):
715
- with antdx.Welcome(
716
- styles=dict(icon=dict(
717
- flexShrink=0)),
718
- variant="borderless",
719
- title=get_text(
720
- "Hello, I'm dots.",
721
- "你好,我是 dots."),
722
- description=get_text(
723
- "You can type text to get started.",
724
- "你可以输入文本开始对话。"),
725
- ):
726
- with ms.Slot("icon"):
727
- antd.Image(logo_img,
728
- preview=False)
729
- with antdx.Prompts(title=get_text(
730
- "How can I help you today?",
731
- "有什么我能帮助你的吗?"),
732
- styles={
733
- "list": {
734
- "width":
735
- '100%',
736
- },
737
- "item": {
738
- "flex": 1,
739
- },
740
- }) as prompts:
741
- for item in DEFAULT_PROMPTS:
742
- with antdx.Prompts.Item(
743
- label=item["category"]
744
- ):
745
- for prompt in item[
746
- "prompts"]:
747
- antdx.Prompts.Item(
748
- description=prompt,
749
- )
750
-
751
- # User Role
752
- with antdx.Bubble.List.Role(
753
- role="user",
754
- placement="end",
755
- elem_classes="chatbot-chat-message",
756
- class_names=dict(
757
- footer="chatbot-chat-message-footer"),
758
- styles=dict(content=dict(
759
- maxWidth="100%",
760
- overflow='auto',
761
- ))):
762
- with ms.Slot(
763
- "messageRender",
764
- params_mapping="(content) => content"):
765
-
766
- ms.Markdown()
767
- with ms.Slot("footer",
768
- params_mapping="""(bubble) => {
769
- return {
770
- copy_btn: {
771
- copyable: { text: typeof bubble.content === 'string' ? bubble.content : bubble.content?.text, tooltips: false },
772
- },
773
- edit_btn: { conversationKey: bubble.key, disabled: bubble.meta.disabled },
774
- delete_btn: { conversationKey: bubble.key, disabled: bubble.meta.disabled },
775
- };
776
- }"""):
777
- with antd.Typography.Text(
778
- copyable=dict(tooltips=False),
779
- as_item="copy_btn"):
780
- with ms.Slot("copyable.icon"):
781
- with antd.Button(value=None,
782
- size="small",
783
- color="default",
784
- variant="text"):
785
- with ms.Slot("icon"):
786
- antd.Icon("CopyOutlined")
787
- with antd.Button(value=None,
788
- size="small",
789
- color="default",
790
- variant="text"):
791
- with ms.Slot("icon"):
792
- antd.Icon("CheckOutlined")
793
- with antd.Button(value=None,
794
- size="small",
795
- color="default",
796
- variant="text",
797
- as_item="edit_btn"
798
- ) as user_edit_btn:
799
- with ms.Slot("icon"):
800
- antd.Icon("EditOutlined")
801
- with antd.Popconfirm(
802
- title="Delete the message",
803
- description=
804
- "Are you sure to delete this message?",
805
- ok_button_props=dict(danger=True),
806
- as_item="delete_btn"
807
- ) as user_delete_popconfirm:
808
- with antd.Button(value=None,
809
- size="small",
810
- color="default",
811
- variant="text",
812
- as_item="delete_btn"):
813
- with ms.Slot("icon"):
814
- antd.Icon("DeleteOutlined")
815
-
816
- # Chatbot Role
817
- with antdx.Bubble.List.Role(
818
- role="assistant",
819
- placement="start",
820
- elem_classes="chatbot-chat-message",
821
- class_names=dict(
822
- footer="chatbot-chat-message-footer"),
823
- styles=dict(content=dict(
824
- maxWidth="100%", overflow='auto'))):
825
- with ms.Slot("avatar"):
826
- antd.Avatar(
827
- os.path.join(os.path.dirname(__file__),
828
- "rednote_hilab.png"))
829
- with ms.Slot(
830
- "messageRender",
831
- params_mapping="""(content, bubble) => {
832
- const has_error = bubble?.meta?.error
833
- return {
834
- answer: {
835
- value: content
836
- },
837
- canceled: bubble.meta?.canceled ? undefined : { style: { display: 'none' } }
838
- }
839
- }"""):
840
- # 直接显示答案内容,不再有thinking相关的组件
841
- ms.Markdown(
842
- as_item="answer",
843
- elem_classes="answer-content")
844
-
845
- antd.Divider(as_item="canceled")
846
- antd.Typography.Text(get_text(
847
- "Chat completion paused.", "聊天已暂停。"),
848
- as_item="canceled",
849
- type="warning")
850
-
851
- with ms.Slot("footer",
852
- params_mapping="""(bubble) => {
853
- if (bubble?.meta?.end) {
854
- return {
855
- copy_btn: {
856
- copyable: { text: bubble.content, tooltips: false },
857
- },
858
- regenerate_btn: { conversationKey: bubble.key, disabled: bubble.meta.disabled },
859
- delete_btn: { conversationKey: bubble.key, disabled: bubble.meta.disabled },
860
- edit_btn: { conversationKey: bubble.key, disabled: bubble.meta.disabled },
861
- };
862
- }
863
- return { actions_container: { style: { display: 'none' } } };
864
- }"""):
865
- with ms.Div(as_item="actions_container"):
866
- with antd.Typography.Text(
867
- copyable=dict(tooltips=False),
868
- as_item="copy_btn"):
869
- with ms.Slot("copyable.icon"):
870
- with antd.Button(
871
- value=None,
872
- size="small",
873
- color="default",
874
- variant="text"):
875
- with ms.Slot("icon"):
876
- antd.Icon(
877
- "CopyOutlined")
878
- with antd.Button(
879
- value=None,
880
- size="small",
881
- color="default",
882
- variant="text"):
883
- with ms.Slot("icon"):
884
- antd.Icon(
885
- "CheckOutlined")
886
-
887
- with antd.Popconfirm(
888
- title=get_text(
889
- "Regenerate the message",
890
- "重新生成消息"),
891
- description=get_text(
892
- "Regenerate the message will also delete all subsequent messages.",
893
- "重新生成消息将会删除所有的后续消息。"),
894
- ok_button_props=dict(
895
- danger=True),
896
- as_item="regenerate_btn"
897
- ) as chatbot_regenerate_popconfirm:
898
- with antd.Button(
899
- value=None,
900
- size="small",
901
- color="default",
902
- variant="text",
903
- as_item="regenerate_btn",
904
- ):
905
- with ms.Slot("icon"):
906
- antd.Icon("SyncOutlined")
907
- with antd.Button(value=None,
908
- size="small",
909
- color="default",
910
- variant="text",
911
- as_item="edit_btn"
912
- ) as chatbot_edit_btn:
913
- with ms.Slot("icon"):
914
- antd.Icon("EditOutlined")
915
- with antd.Popconfirm(
916
- title=get_text("Delete the message", "删除消息"),
917
- description=get_text(
918
- "Are you sure to delete this message?",
919
- "确定要删除这条消息吗?"),
920
- ok_button_props=dict(
921
- danger=True),
922
- as_item="delete_btn"
923
- ) as chatbot_delete_popconfirm:
924
- with antd.Button(
925
- value=None,
926
- size="small",
927
- color="default",
928
- variant="text",
929
- as_item="delete_btn"):
930
- with ms.Slot("icon"):
931
- antd.Icon("DeleteOutlined")
932
-
933
-
934
- # Sender
935
- with antdx.Suggestion(
936
- items=DEFAULT_SUGGESTIONS,
937
- # onKeyDown Handler in Javascript
938
- should_trigger="""(e, { onTrigger, onKeyDown }) => {
939
- switch(e.key) {
940
- case '/':
941
- onTrigger()
942
- break
943
- case 'ArrowRight':
944
- case 'ArrowLeft':
945
- case 'ArrowUp':
946
- case 'ArrowDown':
947
- break;
948
- default:
949
- onTrigger(false)
950
- }
951
- onKeyDown(e)
952
- }""") as suggestion:
953
- with ms.Slot("children"):
954
- with antdx.Sender(placeholder=get_text(
955
- "Enter / to get suggestions",
956
- "输入 / 获取建议"), ) as sender:
957
- with ms.Slot("prefix"):
958
- # Clear Button
959
- with antd.Tooltip(title=get_text(
960
- "Clear Conversation History",
961
- "清空对话历史"), ):
962
- with antd.Button(
963
- value=None,
964
- type="text") as clear_btn:
965
- with ms.Slot("icon"):
966
- antd.Icon("ClearOutlined")
967
-
968
- # Modals
969
- with antd.Modal(title=get_text("Edit Message", "编辑消息"),
970
- open=False,
971
- centered=True,
972
- width="60%") as edit_modal:
973
- edit_textarea = antd.Input.Textarea(auto_size=dict(minRows=2,
974
- maxRows=6),
975
- elem_style=dict(width="100%"))
976
- # Events Handler
977
- if save_history:
978
- browser_state = gr.BrowserState(
979
- {
980
- "conversations_history": {},
981
- "conversations": [],
982
- },
983
- storage_key="dots_chatbot_storage")
984
- state.change(fn=Gradio_Events.update_browser_state,
985
- inputs=[state],
986
- outputs=[browser_state])
987
-
988
- demo.load(fn=Gradio_Events.apply_browser_state,
989
- inputs=[browser_state, state],
990
- outputs=[conversations, state])
991
-
992
- add_conversation_btn.click(fn=Gradio_Events.new_chat,
993
- inputs=[state],
994
- outputs=[conversations, chatbot, state])
995
- conversations.active_change(fn=Gradio_Events.select_conversation,
996
- inputs=[state],
997
- outputs=[conversations, chatbot, state])
998
- conversations.menu_click(fn=Gradio_Events.click_conversation_menu,
999
- inputs=[state],
1000
- outputs=[conversations, chatbot, state])
1001
- prompts.item_click(fn=Gradio_Events.apply_prompt, outputs=[sender])
1002
-
1003
- clear_btn.click(fn=Gradio_Events.clear_conversation_history,
1004
- inputs=[state],
1005
- outputs=[chatbot, state])
1006
 
1007
- suggestion.select(fn=Gradio_Events.select_suggestion,
1008
- inputs=[sender],
1009
- outputs=[sender])
 
 
 
1010
 
1011
- gr.on(triggers=[user_edit_btn.click, chatbot_edit_btn.click],
1012
- fn=Gradio_Events.edit_message,
1013
- inputs=[state],
1014
- outputs=[edit_textarea, state]).then(fn=Gradio_Events.open_modal,
1015
- outputs=[edit_modal])
1016
- edit_modal.ok(fn=Gradio_Events.confirm_edit_message,
1017
- inputs=[edit_textarea, state],
1018
- outputs=[chatbot, state]).then(fn=Gradio_Events.close_modal,
1019
- outputs=[edit_modal])
1020
- edit_modal.cancel(fn=Gradio_Events.close_modal, outputs=[edit_modal])
1021
- gr.on(triggers=[
1022
- chatbot_delete_popconfirm.confirm, user_delete_popconfirm.confirm
1023
- ],
1024
- fn=Gradio_Events.delete_message,
1025
- inputs=[state],
1026
- outputs=[chatbot, state])
1027
 
1028
- regenerating_event = chatbot_regenerate_popconfirm.confirm(
1029
- fn=Gradio_Events.regenerate_message,
1030
- inputs=[state],
1031
- outputs=[sender, clear_btn, conversation_delete_menu_item, add_conversation_btn, conversations, chatbot, state])
1032
 
1033
- submit_event = sender.submit(fn=Gradio_Events.submit,
1034
- inputs=[sender, state],
1035
- outputs=[sender, clear_btn, conversation_delete_menu_item,
1036
- add_conversation_btn, conversations,chatbot, state])
1037
- sender.cancel(fn=None, cancels=[submit_event, regenerating_event])
1038
- sender.cancel(fn=Gradio_Events.cancel,
1039
- inputs=[state],
1040
- outputs=[
1041
- sender, conversation_delete_menu_item, clear_btn,
1042
- conversations, add_conversation_btn, chatbot, state
1043
- ])
1044
 
1045
- if __name__ == "__main__":
1046
- demo.queue(default_concurrency_limit=200).launch(ssr_mode=False, max_threads=200)
 
 
 
 
1
  import gradio as gr
 
 
 
 
2
  import requests
3
+ import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ # Получаем переменные окружения
6
+ API_TOKEN = os.getenv("HF_API_TOKEN")
7
+ MODEL_NAME = os.getenv("MODEL_NAME", "google/flan-t5-small")
8
+ API_URL = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
9
 
10
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
11
 
12
+ def query_huggingface(prompt):
13
+ payload = {"inputs": prompt}
14
+ response = requests.post(API_URL, headers=headers, json=payload)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ try:
17
+ result = response.json()
18
+ if isinstance(result, list) and "generated_text" in result[0]:
19
+ return result[0]["generated_text"]
20
+ elif isinstance(result, dict) and "generated_text" in result:
21
+ return result["generated_text"]
 
22
  else:
23
+ return f"⚠️ Ответ не распознан: {result}"
24
+ except Exception as e:
25
+ return f"❌ Ошибка: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ def chat(user_input, history):
28
+ history = history or []
29
+ prompt = f"Q: {user_input}\nA:"
30
+ output = query_huggingface(prompt)
31
+ history.append((user_input, output))
32
+ return history, history
33
 
34
+ with gr.Blocks() as demo:
35
+ gr.Markdown("## 🤖 Чат с моделью Hugging Face")
36
+ chatbot = gr.Chatbot()
37
+ msg = gr.Textbox(placeholder="Введите сообщение и нажмите Enter...")
38
+ clear = gr.Button("Очистить")
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ state = gr.State([])
 
 
 
41
 
42
+ msg.submit(chat, [msg, state], [chatbot, state])
43
+ clear.click(lambda: ([], []), None, [chatbot, state])
 
 
 
 
 
 
 
 
 
44
 
45
+ demo.launch()