Spaces:
Sleeping
Sleeping
generate_supporting_sentences
Browse files
app.py
CHANGED
@@ -457,35 +457,51 @@ def update_topic_sentence_input(topic_sentences_json, selected_topic_sentence):
|
|
457 |
|
458 |
def generate_supporting_sentences(model, max_tokens, sys_content, scenario, eng_level, topic, points, topic_sentence, user_generate_supporting_sentences_prompt):
|
459 |
"""
|
460 |
-
根据系统提示和用户输入的情境、主题、要点、主题句,调用
|
461 |
"""
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
|
|
|
|
475 |
|
476 |
-
|
477 |
-
"
|
478 |
-
|
479 |
-
|
480 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
481 |
|
482 |
-
try:
|
483 |
-
response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
|
484 |
-
content = response.choices[0].message.content.strip()
|
485 |
gr_update = gr.update(choices=[content], visible=True)
|
|
|
486 |
except Exception as e:
|
487 |
-
print(f"An error occurred while generating supporting sentences: {e}")
|
488 |
-
|
|
|
|
|
|
|
|
|
|
|
489 |
|
490 |
return gr_update
|
491 |
|
|
|
457 |
|
458 |
def generate_supporting_sentences(model, max_tokens, sys_content, scenario, eng_level, topic, points, topic_sentence, user_generate_supporting_sentences_prompt):
|
459 |
"""
|
460 |
+
根据系统提示和用户输入的情境、主题、要点、主题句,调用 LLM API 生成相关的支持句。
|
461 |
"""
|
462 |
+
try:
|
463 |
+
user_content = f"""
|
464 |
+
scenario is: {scenario}
|
465 |
+
english level is: {eng_level}
|
466 |
+
topic is: {topic}
|
467 |
+
points is: {points}
|
468 |
+
topic sentence is: {topic_sentence}
|
469 |
+
---
|
470 |
+
{user_generate_supporting_sentences_prompt}
|
471 |
+
"""
|
472 |
+
|
473 |
+
messages = [
|
474 |
+
{"role": "system", "content": sys_content},
|
475 |
+
{"role": "user", "content": user_content}
|
476 |
+
]
|
477 |
|
478 |
+
# 根據模型選擇 provider
|
479 |
+
if "gemini" in model.lower():
|
480 |
+
print("====gemini====")
|
481 |
+
provider = GeminiProvider()
|
482 |
+
else:
|
483 |
+
print("====openai====")
|
484 |
+
provider = OpenAIProvider(OPEN_AI_CLIENT)
|
485 |
+
|
486 |
+
# 使用 LLMService 處理請求
|
487 |
+
llm_service = LLMService(provider)
|
488 |
+
content = llm_service.chat(
|
489 |
+
prompt=f"{sys_content}\n{user_content}" if "gemini" in model.lower() else None,
|
490 |
+
messages=messages,
|
491 |
+
model=model,
|
492 |
+
max_tokens=max_tokens
|
493 |
+
)
|
494 |
|
|
|
|
|
|
|
495 |
gr_update = gr.update(choices=[content], visible=True)
|
496 |
+
|
497 |
except Exception as e:
|
498 |
+
print(f"An error occurred while generating supporting sentences: {str(e)}")
|
499 |
+
error_msg = "網路塞車,請重新嘗試一次!"
|
500 |
+
if "rate limit" in str(e).lower():
|
501 |
+
error_msg = "請求過於頻繁,請稍後再試"
|
502 |
+
elif "invalid_request_error" in str(e).lower():
|
503 |
+
error_msg = "請求格式錯誤,請檢查輸入"
|
504 |
+
raise gr.Error(error_msg)
|
505 |
|
506 |
return gr_update
|
507 |
|