Spaces:
Sleeping
Sleeping
generate_refine_paragraph
Browse files
app.py
CHANGED
@@ -785,34 +785,44 @@ def update_paragraph_correct_grammatical_spelling_errors_input(paragraph):
|
|
785 |
|
786 |
def generate_refine_paragraph(model, sys_content, eng_level, paragraph, user_refine_paragraph_prompt):
|
787 |
"""
|
788 |
-
根据用户输入的段落,调用
|
789 |
"""
|
790 |
-
|
791 |
-
|
792 |
-
|
793 |
-
|
794 |
-
|
795 |
-
|
796 |
-
|
797 |
-
|
798 |
-
|
799 |
-
|
|
|
|
|
800 |
|
801 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
802 |
|
803 |
-
|
804 |
-
|
805 |
-
|
806 |
-
|
807 |
-
|
808 |
-
|
|
|
|
|
|
|
809 |
|
810 |
-
|
811 |
-
|
812 |
-
|
|
|
813 |
try:
|
814 |
-
response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
|
815 |
-
content = response.choices[0].message.content
|
816 |
data = json.loads(content)
|
817 |
headers = ["原文", "建議", "解釋"]
|
818 |
table_data = [
|
@@ -821,15 +831,21 @@ def generate_refine_paragraph(model, sys_content, eng_level, paragraph, user_ref
|
|
821 |
]
|
822 |
|
823 |
refine_paragraph_gr_update = gr.update(value=table_data, headers=headers, visible=True)
|
824 |
-
revised_paragraph_gr_update = gr.update(value=data["Revised Paragraph"],visible=False)
|
825 |
-
|
826 |
-
except Exception as e:
|
827 |
-
print(f"An error occurred while generating refine paragraph: {e}")
|
828 |
-
attempt += 1
|
829 |
-
if attempt == max_attempts:
|
830 |
-
raise gr.Error("網路塞車,請重新嘗試一次!")
|
831 |
|
832 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
833 |
|
834 |
def update_paragraph_refine_input(text):
|
835 |
return text
|
|
|
785 |
|
786 |
def generate_refine_paragraph(model, sys_content, eng_level, paragraph, user_refine_paragraph_prompt):
|
787 |
"""
|
788 |
+
根据用户输入的段落,调用 LLM API 生成相关的段落改善建議。
|
789 |
"""
|
790 |
+
try:
|
791 |
+
user_content = f"""
|
792 |
+
eng_level is: {eng_level}
|
793 |
+
paragraph is: {paragraph}
|
794 |
+
---
|
795 |
+
{user_refine_paragraph_prompt}
|
796 |
+
"""
|
797 |
+
|
798 |
+
messages = [
|
799 |
+
{"role": "system", "content": sys_content},
|
800 |
+
{"role": "user", "content": user_content}
|
801 |
+
]
|
802 |
|
803 |
+
# 根據模型選擇 provider
|
804 |
+
if "gemini" in model.lower():
|
805 |
+
print("====gemini====")
|
806 |
+
provider = GeminiProvider()
|
807 |
+
else:
|
808 |
+
print("====openai====")
|
809 |
+
provider = OpenAIProvider(OPEN_AI_CLIENT)
|
810 |
|
811 |
+
# 使用 LLMService 處理請求
|
812 |
+
llm_service = LLMService(provider)
|
813 |
+
content = llm_service.chat(
|
814 |
+
prompt=f"{sys_content}\n{user_content}" if "gemini" in model.lower() else None,
|
815 |
+
messages=messages,
|
816 |
+
model=model,
|
817 |
+
max_tokens=4000,
|
818 |
+
response_format={"type": "json_object"}
|
819 |
+
)
|
820 |
|
821 |
+
# 處理回應格式
|
822 |
+
if isinstance(content, str) and "```json" in content:
|
823 |
+
content = content.replace("```json", "").replace("```", "")
|
824 |
+
|
825 |
try:
|
|
|
|
|
826 |
data = json.loads(content)
|
827 |
headers = ["原文", "建議", "解釋"]
|
828 |
table_data = [
|
|
|
831 |
]
|
832 |
|
833 |
refine_paragraph_gr_update = gr.update(value=table_data, headers=headers, visible=True)
|
834 |
+
revised_paragraph_gr_update = gr.update(value=data["Revised Paragraph"], visible=False)
|
835 |
+
return refine_paragraph_gr_update, revised_paragraph_gr_update
|
|
|
|
|
|
|
|
|
|
|
836 |
|
837 |
+
except (json.JSONDecodeError, KeyError, ValueError) as e:
|
838 |
+
print(f"Error parsing refine suggestions: {e}")
|
839 |
+
raise gr.Error("無法解析改善建議,請重新嘗試")
|
840 |
+
|
841 |
+
except Exception as e:
|
842 |
+
print(f"An error occurred while generating refine paragraph: {str(e)}")
|
843 |
+
error_msg = "網路塞車,請重新嘗試一次!"
|
844 |
+
if "rate limit" in str(e).lower():
|
845 |
+
error_msg = "請求過於頻繁,請稍後再試"
|
846 |
+
elif "invalid_request_error" in str(e).lower():
|
847 |
+
error_msg = "請求格式錯誤,請檢查輸入"
|
848 |
+
raise gr.Error(error_msg)
|
849 |
|
850 |
def update_paragraph_refine_input(text):
|
851 |
return text
|