Spaces:
Running
Running
Merge branch 'PERPLEXITY_CLIENT'
Browse files- app.py +14 -21
- chatbot.py +160 -1
app.py
CHANGED
@@ -73,6 +73,7 @@ from google.oauth2.service_account import Credentials
|
|
73 |
import vertexai
|
74 |
from vertexai.generative_models import GenerativeModel, Part
|
75 |
|
|
|
76 |
# import boto3
|
77 |
|
78 |
from chatbot import Chatbot
|
@@ -96,6 +97,7 @@ if is_env_local:
|
|
96 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT1 = config["OPEN_AI_ASSISTANT_ID_GPT4_BOT1"]
|
97 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT1 = config["OPEN_AI_ASSISTANT_ID_GPT3_BOT1"]
|
98 |
GROQ_API_KEY = config["GROQ_API_KEY"]
|
|
|
99 |
JUTOR_CHAT_KEY = config["JUTOR_CHAT_KEY"]
|
100 |
AWS_ACCESS_KEY = config["AWS_ACCESS_KEY"]
|
101 |
AWS_SECRET_KEY = config["AWS_SECRET_KEY"]
|
@@ -112,6 +114,7 @@ else:
|
|
112 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT1 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT4_BOT1")
|
113 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT1 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT3_BOT1")
|
114 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
|
|
115 |
JUTOR_CHAT_KEY = os.getenv("JUTOR_CHAT_KEY")
|
116 |
AWS_ACCESS_KEY = os.getenv("AWS_ACCESS_KEY")
|
117 |
AWS_SECRET_KEY = os.getenv("AWS_SECRET_KEY")
|
@@ -139,12 +142,7 @@ GBQ_CLIENT = bigquery.Client.from_service_account_info(json.loads(GBQ_KEY))
|
|
139 |
GROQ_CLIENT = Groq(api_key=GROQ_API_KEY)
|
140 |
GCS_SERVICE = GoogleCloudStorage(GCS_KEY)
|
141 |
GCS_CLIENT = GCS_SERVICE.client
|
142 |
-
|
143 |
-
# service_name="bedrock-runtime",
|
144 |
-
# aws_access_key_id=AWS_ACCESS_KEY,
|
145 |
-
# aws_secret_access_key=AWS_SECRET_KEY,
|
146 |
-
# region_name=AWS_REGION_NAME,
|
147 |
-
# )
|
148 |
|
149 |
# check open ai access
|
150 |
def check_open_ai_access(open_ai_api_key):
|
@@ -2369,7 +2367,7 @@ def get_instructions(content_subject, content_grade, transcript_text, key_moment
|
|
2369 |
grade: {content_grade}
|
2370 |
context: {key_moments}
|
2371 |
transcript_text: {transcript_text}
|
2372 |
-
Assistant Role: you are a {content_subject} assistant. you can call yourself as {content_subject} 學伴
|
2373 |
User Role: {content_grade} th-grade student.
|
2374 |
Method: {method}
|
2375 |
Language: Traditional Chinese ZH-TW (it's very important), suitable for {content_grade} th-grade level.
|
@@ -2582,23 +2580,18 @@ def get_chatbot_config(ai_name, transcript_state, key_moments, content_subject,
|
|
2582 |
ai_name_clients_model = {
|
2583 |
"foxcat": {
|
2584 |
"ai_name": "foxcat",
|
2585 |
-
"ai_client":
|
2586 |
-
"ai_model_name": "
|
2587 |
},
|
2588 |
-
# "lili": {
|
2589 |
-
# "ai_name": "lili",
|
2590 |
-
# "ai_client": BEDROCK_CLIENT,
|
2591 |
-
# "ai_model_name": "claude3",
|
2592 |
-
# },
|
2593 |
"lili": {
|
2594 |
"ai_name": "lili",
|
2595 |
-
"ai_client":
|
2596 |
-
"ai_model_name": "
|
2597 |
},
|
2598 |
"maimai": {
|
2599 |
"ai_name": "maimai",
|
2600 |
-
"ai_client":
|
2601 |
-
"ai_model_name": "
|
2602 |
}
|
2603 |
}
|
2604 |
ai_client = ai_name_clients_model.get(ai_name, "foxcat")["ai_client"]
|
@@ -3343,7 +3336,7 @@ def create_app():
|
|
3343 |
with gr.Accordion("🦄 飛特精靈 敘述", open=False):
|
3344 |
vaitor_chatbot_description_value = gr.Markdown(value=vaitor_chatbot_description, visible=True)
|
3345 |
# 狐狸貓
|
3346 |
-
with gr.Column(scale=1, variant="panel", visible=
|
3347 |
foxcat_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/06/%E7%A7%91%E5%AD%B8%E5%BE%BD%E7%AB%A0-2-150x150.png"
|
3348 |
foxcat_avatar_images = gr.State([user_avatar, foxcat_chatbot_avatar_url])
|
3349 |
foxcat_chatbot_description = """Hi,我是【狐狸貓】,可以陪你一起學習本次的內容,有什麼問題都可以問我喔!\n
|
@@ -3358,7 +3351,7 @@ def create_app():
|
|
3358 |
with gr.Accordion("💜 狐狸貓 敘述", open=False):
|
3359 |
foxcat_chatbot_description_value = gr.Markdown(value=foxcat_chatbot_description, visible=True)
|
3360 |
# 梨梨
|
3361 |
-
with gr.Column(scale=1, variant="panel", visible=
|
3362 |
lili_chatbot_avatar_url = "https://junyitopicimg.s3.amazonaws.com/live/v1283-new-topic-44-icon.png"
|
3363 |
lili_avatar_images = gr.State([user_avatar, lili_chatbot_avatar_url])
|
3364 |
lili_chatbot_description = """你好,我是溫柔的【梨梨】,很高興可以在這裡陪伴你學習。���果你有任何疑問,請隨時向我提出哦! \n
|
@@ -3377,7 +3370,7 @@ def create_app():
|
|
3377 |
with gr.Accordion("🧡 梨梨 敘述", open=False):
|
3378 |
lili_chatbot_description_value = gr.Markdown(value=lili_chatbot_description, visible=True)
|
3379 |
# 麥麥
|
3380 |
-
with gr.Column(scale=1, variant="panel", visible=
|
3381 |
maimai_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/07/%E6%80%9D%E8%80%83%E5%8A%9B%E8%B6%85%E4%BA%BA%E5%BE%BD%E7%AB%A0_%E5%B7%A5%E4%BD%9C%E5%8D%80%E5%9F%9F-1-%E8%A4%87%E6%9C%AC-150x150.png"
|
3382 |
maimai_avatar_images = gr.State([user_avatar, maimai_chatbot_avatar_url])
|
3383 |
maimai_chatbot_description = """Hi,我是迷人的【麥麥】,我在這裡等著和你一起探索新知,任何疑問都可以向我提出!\n
|
|
|
73 |
import vertexai
|
74 |
from vertexai.generative_models import GenerativeModel, Part
|
75 |
|
76 |
+
|
77 |
# import boto3
|
78 |
|
79 |
from chatbot import Chatbot
|
|
|
97 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT1 = config["OPEN_AI_ASSISTANT_ID_GPT4_BOT1"]
|
98 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT1 = config["OPEN_AI_ASSISTANT_ID_GPT3_BOT1"]
|
99 |
GROQ_API_KEY = config["GROQ_API_KEY"]
|
100 |
+
PERPLEXITY_API_KEY = config["PERPLEXITY_API_KEY"]
|
101 |
JUTOR_CHAT_KEY = config["JUTOR_CHAT_KEY"]
|
102 |
AWS_ACCESS_KEY = config["AWS_ACCESS_KEY"]
|
103 |
AWS_SECRET_KEY = config["AWS_SECRET_KEY"]
|
|
|
114 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT1 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT4_BOT1")
|
115 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT1 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT3_BOT1")
|
116 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
117 |
+
PERPLEXITY_API_KEY = os.getenv("PERPLEXITY_API_KEY")
|
118 |
JUTOR_CHAT_KEY = os.getenv("JUTOR_CHAT_KEY")
|
119 |
AWS_ACCESS_KEY = os.getenv("AWS_ACCESS_KEY")
|
120 |
AWS_SECRET_KEY = os.getenv("AWS_SECRET_KEY")
|
|
|
142 |
GROQ_CLIENT = Groq(api_key=GROQ_API_KEY)
|
143 |
GCS_SERVICE = GoogleCloudStorage(GCS_KEY)
|
144 |
GCS_CLIENT = GCS_SERVICE.client
|
145 |
+
PERPLEXITY_CLIENT = OpenAI(api_key=PERPLEXITY_API_KEY, base_url="https://api.perplexity.ai")
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
# check open ai access
|
148 |
def check_open_ai_access(open_ai_api_key):
|
|
|
2367 |
grade: {content_grade}
|
2368 |
context: {key_moments}
|
2369 |
transcript_text: {transcript_text}
|
2370 |
+
Assistant Role: you are a {content_subject} assistant. you can call yourself as {content_subject} 學伴 and your name if you know
|
2371 |
User Role: {content_grade} th-grade student.
|
2372 |
Method: {method}
|
2373 |
Language: Traditional Chinese ZH-TW (it's very important), suitable for {content_grade} th-grade level.
|
|
|
2580 |
ai_name_clients_model = {
|
2581 |
"foxcat": {
|
2582 |
"ai_name": "foxcat",
|
2583 |
+
"ai_client": PERPLEXITY_CLIENT,
|
2584 |
+
"ai_model_name": "perplexity_sonar",
|
2585 |
},
|
|
|
|
|
|
|
|
|
|
|
2586 |
"lili": {
|
2587 |
"ai_name": "lili",
|
2588 |
+
"ai_client": PERPLEXITY_CLIENT,
|
2589 |
+
"ai_model_name": "perplexity_r1_1776",
|
2590 |
},
|
2591 |
"maimai": {
|
2592 |
"ai_name": "maimai",
|
2593 |
+
"ai_client": PERPLEXITY_CLIENT,
|
2594 |
+
"ai_model_name": "perplexity_r1_1776",
|
2595 |
}
|
2596 |
}
|
2597 |
ai_client = ai_name_clients_model.get(ai_name, "foxcat")["ai_client"]
|
|
|
3336 |
with gr.Accordion("🦄 飛特精靈 敘述", open=False):
|
3337 |
vaitor_chatbot_description_value = gr.Markdown(value=vaitor_chatbot_description, visible=True)
|
3338 |
# 狐狸貓
|
3339 |
+
with gr.Column(scale=1, variant="panel", visible=True):
|
3340 |
foxcat_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/06/%E7%A7%91%E5%AD%B8%E5%BE%BD%E7%AB%A0-2-150x150.png"
|
3341 |
foxcat_avatar_images = gr.State([user_avatar, foxcat_chatbot_avatar_url])
|
3342 |
foxcat_chatbot_description = """Hi,我是【狐狸貓】,可以陪你一起學習本次的內容,有什麼問題都可以問我喔!\n
|
|
|
3351 |
with gr.Accordion("💜 狐狸貓 敘述", open=False):
|
3352 |
foxcat_chatbot_description_value = gr.Markdown(value=foxcat_chatbot_description, visible=True)
|
3353 |
# 梨梨
|
3354 |
+
with gr.Column(scale=1, variant="panel", visible=True):
|
3355 |
lili_chatbot_avatar_url = "https://junyitopicimg.s3.amazonaws.com/live/v1283-new-topic-44-icon.png"
|
3356 |
lili_avatar_images = gr.State([user_avatar, lili_chatbot_avatar_url])
|
3357 |
lili_chatbot_description = """你好,我是溫柔的【梨梨】,很高興可以在這裡陪伴你學習。���果你有任何疑問,請隨時向我提出哦! \n
|
|
|
3370 |
with gr.Accordion("🧡 梨梨 敘述", open=False):
|
3371 |
lili_chatbot_description_value = gr.Markdown(value=lili_chatbot_description, visible=True)
|
3372 |
# 麥麥
|
3373 |
+
with gr.Column(scale=1, variant="panel", visible=True):
|
3374 |
maimai_chatbot_avatar_url = "https://storage.googleapis.com/wpassets.junyiacademy.org/1/2020/07/%E6%80%9D%E8%80%83%E5%8A%9B%E8%B6%85%E4%BA%BA%E5%BE%BD%E7%AB%A0_%E5%B7%A5%E4%BD%9C%E5%8D%80%E5%9F%9F-1-%E8%A4%87%E6%9C%AC-150x150.png"
|
3375 |
maimai_avatar_images = gr.State([user_avatar, maimai_chatbot_avatar_url])
|
3376 |
maimai_chatbot_description = """Hi,我是迷人的【麥麥】,我在這裡等著和你一起探索新知,任何疑問都可以向我提出!\n
|
chatbot.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import json
|
3 |
import requests
|
|
|
4 |
|
5 |
class Chatbot:
|
6 |
def __init__(self, config):
|
@@ -43,8 +44,10 @@ class Chatbot:
|
|
43 |
try:
|
44 |
messages = self.prepare_messages(chat_history, user_message)
|
45 |
system_prompt = self.instructions
|
|
|
46 |
service_type = self.ai_model_name
|
47 |
response_text = self.chat_with_service(service_type, system_prompt, messages)
|
|
|
48 |
except Exception as e:
|
49 |
print(f"Error: {e}")
|
50 |
response_text = "學習精靈有點累,請稍後再試!"
|
@@ -77,6 +80,8 @@ class Chatbot:
|
|
77 |
return self.chat_with_groq(service_type, system_prompt, messages)
|
78 |
elif service_type == 'claude3':
|
79 |
return self.chat_with_claude3(system_prompt, messages)
|
|
|
|
|
80 |
else:
|
81 |
raise gr.Error("不支持的服务类型")
|
82 |
|
@@ -90,7 +95,6 @@ class Chatbot:
|
|
90 |
model = "gpt-4o"
|
91 |
print("======model======")
|
92 |
print(model)
|
93 |
-
# model = "gpt-3.5-turbo-0125"
|
94 |
data = {
|
95 |
"data": {
|
96 |
"messages": messages,
|
@@ -152,3 +156,158 @@ class Chatbot:
|
|
152 |
response_body = json.loads(response.get('body').read())
|
153 |
response_completion = response_body.get('content')[0].get('text').strip()
|
154 |
return response_completion
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import json
|
3 |
import requests
|
4 |
+
import re
|
5 |
|
6 |
class Chatbot:
|
7 |
def __init__(self, config):
|
|
|
44 |
try:
|
45 |
messages = self.prepare_messages(chat_history, user_message)
|
46 |
system_prompt = self.instructions
|
47 |
+
system_prompt += "\n\n告知用戶你現在是誰,第一次加上科目學伴及名字,後面就只說名字就好,但不用每次都說,自然就好,不用每一句都特別說明,口氣請符合給予的人設,請用繁體中文回答"
|
48 |
service_type = self.ai_model_name
|
49 |
response_text = self.chat_with_service(service_type, system_prompt, messages)
|
50 |
+
|
51 |
except Exception as e:
|
52 |
print(f"Error: {e}")
|
53 |
response_text = "學習精靈有點累,請稍後再試!"
|
|
|
80 |
return self.chat_with_groq(service_type, system_prompt, messages)
|
81 |
elif service_type == 'claude3':
|
82 |
return self.chat_with_claude3(system_prompt, messages)
|
83 |
+
elif service_type in ['perplexity_sonar', 'perplexity_sonar_pro', 'perplexity_r1_1776']:
|
84 |
+
return self.chat_with_perplexity(service_type, system_prompt, messages)
|
85 |
else:
|
86 |
raise gr.Error("不支持的服务类型")
|
87 |
|
|
|
95 |
model = "gpt-4o"
|
96 |
print("======model======")
|
97 |
print(model)
|
|
|
98 |
data = {
|
99 |
"data": {
|
100 |
"messages": messages,
|
|
|
156 |
response_body = json.loads(response.get('body').read())
|
157 |
response_completion = response_body.get('content')[0].get('text').strip()
|
158 |
return response_completion
|
159 |
+
|
160 |
+
def chat_with_perplexity(self, service_type, system_prompt, messages):
|
161 |
+
"""使用 Perplexity API 進行對話"""
|
162 |
+
if not system_prompt.strip():
|
163 |
+
raise ValueError("System prompt cannot be empty")
|
164 |
+
|
165 |
+
# 清理用戶訊息中的特殊指令
|
166 |
+
for msg in messages:
|
167 |
+
if msg["role"] == "user":
|
168 |
+
# 移除可能導致問題的特殊指令
|
169 |
+
msg["content"] = msg["content"].replace("/n", "\n")
|
170 |
+
# 移除括號內的特殊指令
|
171 |
+
msg["content"] = re.sub(r'\(請一定要用繁體中文回答.*?\)', '', msg["content"])
|
172 |
+
|
173 |
+
# 系統提示放在最前面
|
174 |
+
clean_messages = [{"role": "system", "content": system_prompt}]
|
175 |
+
# 添加其他訊息
|
176 |
+
for msg in messages:
|
177 |
+
if msg["role"] != "system": # 避免重複添加系統提示
|
178 |
+
clean_messages.append(msg)
|
179 |
+
|
180 |
+
# 在系統提示中添加 Markdown 和 LaTeX 格式指導
|
181 |
+
system_prompt += "\n\n重要:使用 LaTeX 數學符號時,請確保格式正確。數學表達式應該使用 $ 符號包圍,例如:$7 \\times 10^4$。不要使用 ** 符號來強調數字,而是使用 $ 符號,例如:$7$個萬 ($7 \\times 10000$)。不要使用 \\text 或 \\quad 等命令。"
|
182 |
+
|
183 |
+
# 根據服務類型選擇模型
|
184 |
+
model_name_dict = {
|
185 |
+
"perplexity_sonar": "sonar",
|
186 |
+
"perplexity_sonar_pro": "sonar-pro",
|
187 |
+
"perplexity_r1_1776": "r1-1776"
|
188 |
+
}
|
189 |
+
model = model_name_dict.get(service_type, "sonar")
|
190 |
+
|
191 |
+
print("======model======")
|
192 |
+
print(model)
|
193 |
+
print("======clean_messages======")
|
194 |
+
print(json.dumps(clean_messages[:1], ensure_ascii=False)) # 只打印系統提示的前部分
|
195 |
+
|
196 |
+
try:
|
197 |
+
perplexity_client = self.ai_client
|
198 |
+
|
199 |
+
# 針對 r1-1776 模型調整參數
|
200 |
+
if service_type == "perplexity_r1_1776":
|
201 |
+
# 增加 max_tokens 並添加特殊指令
|
202 |
+
response = perplexity_client.chat.completions.create(
|
203 |
+
model=model,
|
204 |
+
messages=clean_messages,
|
205 |
+
max_tokens=1000, # 增加 token 限制
|
206 |
+
temperature=0.7,
|
207 |
+
top_p=0.9
|
208 |
+
)
|
209 |
+
else:
|
210 |
+
response = perplexity_client.chat.completions.create(
|
211 |
+
model=model,
|
212 |
+
messages=clean_messages,
|
213 |
+
max_tokens=500,
|
214 |
+
temperature=0.7,
|
215 |
+
top_p=0.9
|
216 |
+
)
|
217 |
+
|
218 |
+
# 檢查回應是否為空
|
219 |
+
if not hasattr(response, 'choices') or len(response.choices) == 0:
|
220 |
+
print("警告:API 回傳無效回應結構")
|
221 |
+
return "學習精靈暫時無法回答,請稍後再試!"
|
222 |
+
|
223 |
+
response_completion = response.choices[0].message.content
|
224 |
+
if not response_completion or response_completion.strip() == "":
|
225 |
+
print("警告:API 回傳空回應")
|
226 |
+
return "學習精靈暫時無法回答,請稍後再試!"
|
227 |
+
|
228 |
+
# 處理回應中的思考過程標籤和修正 LaTeX 格式
|
229 |
+
response_completion = self._process_response(response_completion)
|
230 |
+
|
231 |
+
# 打印處理後的回應以便調試
|
232 |
+
print("======processed_response======")
|
233 |
+
print(response_completion)
|
234 |
+
|
235 |
+
return response_completion.strip()
|
236 |
+
|
237 |
+
except Exception as e:
|
238 |
+
print(f"Perplexity API Error: {e}")
|
239 |
+
print(f"Error details: {str(e)}")
|
240 |
+
# 嘗試使用備用模型
|
241 |
+
try:
|
242 |
+
if service_type == "perplexity_r1_1776":
|
243 |
+
print("嘗試使用備用模型 sonar")
|
244 |
+
backup_response = perplexity_client.chat.completions.create(
|
245 |
+
model="sonar",
|
246 |
+
messages=clean_messages,
|
247 |
+
max_tokens=500,
|
248 |
+
temperature=0.7
|
249 |
+
)
|
250 |
+
backup_completion = backup_response.choices[0].message.content
|
251 |
+
backup_completion = self._process_response(backup_completion)
|
252 |
+
return backup_completion.strip()
|
253 |
+
except Exception as backup_error:
|
254 |
+
print(f"備用模型也失敗: {backup_error}")
|
255 |
+
|
256 |
+
return "學習精靈暫時無法回答,請稍後再試!"
|
257 |
+
|
258 |
+
def _process_response(self, response_text):
|
259 |
+
"""處理回應中的思考過程標籤和修正 LaTeX 格式"""
|
260 |
+
# 移除 <think>...</think> 區塊
|
261 |
+
import re
|
262 |
+
response_text = re.sub(r'<think>.*?</think>', '', response_text, flags=re.DOTALL)
|
263 |
+
|
264 |
+
# 移除其他可能的標籤或指令
|
265 |
+
response_text = re.sub(r'(偷偷說.*?)', '', response_text, flags=re.DOTALL)
|
266 |
+
|
267 |
+
# 修正 Markdown 格式
|
268 |
+
# 1. 確保項目符號前後有正確的空格和換行
|
269 |
+
response_text = re.sub(r'(\n|^)(\s*)([-•○●◦])\s*', r'\1\2\3 ', response_text)
|
270 |
+
|
271 |
+
# 2. 確保數字列表前後有正確的空格和換行
|
272 |
+
response_text = re.sub(r'(\n|^)(\s*)(\d+\.)\s*', r'\1\2\3 ', response_text)
|
273 |
+
|
274 |
+
# 3. 修正 LaTeX 格式
|
275 |
+
# 移除不正確的 LaTeX 命令
|
276 |
+
response_text = re.sub(r'\\text\{([^}]+)\}', r'\1', response_text)
|
277 |
+
response_text = re.sub(r'\\quad', ' ', response_text)
|
278 |
+
|
279 |
+
# 4. 修正數學表達式
|
280 |
+
# 確保數學表達式中的乘法符號格式正確
|
281 |
+
response_text = re.sub(r'(\d+)個「([^」]+)」→\s*(\d+)\\times(\d+)', r'\1個「\2」→ $\3\\times\4$', response_text)
|
282 |
+
|
283 |
+
# 5. 修正單獨數字的 LaTeX 格式
|
284 |
+
# 將單獨的數字包裹在 $ 符號中
|
285 |
+
response_text = re.sub(r'([^$\d])(\d+)([^$\d\w])', r'\1$\2$\3', response_text)
|
286 |
+
|
287 |
+
# 6. 修正連續的 LaTeX 表達式
|
288 |
+
# 確保連續的 LaTeX 表達式之間有空格
|
289 |
+
response_text = re.sub(r'\$([^$]+)\$\$([^$]+)\$', r'$\1$ $\2$', response_text)
|
290 |
+
|
291 |
+
# 7. 移除單獨的 $ 符號
|
292 |
+
response_text = re.sub(r'(?<!\$)\$(?!\$)\s*$', '', response_text)
|
293 |
+
response_text = re.sub(r'^\s*\$(?!\$)', '', response_text)
|
294 |
+
response_text = re.sub(r'(?<!\$)\$(?!\$)\s*\n', '\n', response_text)
|
295 |
+
|
296 |
+
# 8. 確保成對的 $ 符號
|
297 |
+
dollar_count = response_text.count('$')
|
298 |
+
if dollar_count % 2 != 0:
|
299 |
+
# 如果 $ 符號數量為奇數,移除最後一個 $
|
300 |
+
last_dollar_pos = response_text.rfind('$')
|
301 |
+
if last_dollar_pos != -1:
|
302 |
+
response_text = response_text[:last_dollar_pos] + response_text[last_dollar_pos+1:]
|
303 |
+
|
304 |
+
# 9. 修正錯誤的粗體標記
|
305 |
+
# 將 **數字** 格式修正為正確的數字格式
|
306 |
+
response_text = re.sub(r'\*\*(\d+)\*\*', r'$\1$', response_text)
|
307 |
+
|
308 |
+
# 如果處理後的回應為空,返回原始回應
|
309 |
+
if not response_text.strip():
|
310 |
+
return "學習精靈暫時無法回答,請稍後再試!"
|
311 |
+
|
312 |
+
return response_text
|
313 |
+
|