Spaces:
Sleeping
Sleeping
generate_topic_sentence_feedback
Browse files
app.py
CHANGED
@@ -286,89 +286,148 @@ def update_points_input(points):
|
|
286 |
|
287 |
def generate_topic_sentences(model, max_tokens, sys_content, scenario, eng_level, topic, points, user_generate_topic_sentences_prompt):
|
288 |
"""
|
289 |
-
根据系统提示和用户输入的情境及要点,调用
|
290 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
291 |
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
{
|
297 |
-
|
|
|
|
|
|
|
298 |
"""
|
299 |
-
|
300 |
-
|
|
|
|
|
|
|
301 |
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
---
|
310 |
-
{user_generate_topic_sentences_prompt}
|
311 |
-
"""
|
312 |
-
messages = [
|
313 |
-
{"role": "system", "content": sys_content},
|
314 |
-
{"role": "user", "content": user_content}
|
315 |
-
]
|
316 |
-
response_format = { "type": "json_object" }
|
317 |
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
324 |
|
325 |
-
try:
|
326 |
-
response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
|
327 |
-
response_content = json.loads(response.choices[0].message.content)
|
328 |
-
json_content = response_content["results"]
|
329 |
-
topic_sentences_list = [item["topic-sentence"] for item in json_content]
|
330 |
-
random.shuffle(topic_sentences_list)
|
331 |
-
|
332 |
-
gr_update_json = gr.update(value=json_content)
|
333 |
-
gr_update_radio = gr.update(choices=topic_sentences_list, visible=True)
|
334 |
except Exception as e:
|
335 |
print(f"An error occurred while generating topic sentences: {e}")
|
336 |
-
|
337 |
-
|
338 |
-
|
|
|
|
|
|
|
339 |
|
340 |
def generate_topic_sentence_feedback(model, max_tokens, sys_content, scenario, eng_level, topic, points, topic_sentence, user_generate_topic_sentence_feedback_prompt):
|
341 |
"""
|
342 |
-
根据系统提示和用户输入的情境、主题、要点、主题句,调用
|
343 |
-
"""
|
344 |
-
user_content = f"""
|
345 |
-
scenario is: {scenario}
|
346 |
-
english level is: {eng_level}
|
347 |
-
topic is: {topic}
|
348 |
-
points is: {points}
|
349 |
-
---
|
350 |
-
my written topic sentence is: {topic_sentence}
|
351 |
-
---
|
352 |
-
{user_generate_topic_sentence_feedback_prompt}
|
353 |
"""
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
358 |
|
359 |
-
|
360 |
-
"
|
361 |
-
|
362 |
-
|
363 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
|
365 |
-
try:
|
366 |
-
response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
|
367 |
-
content = response.choices[0].message.content.strip()
|
368 |
gr_update = gr.update(value=content, visible=True)
|
|
|
369 |
except Exception as e:
|
370 |
-
print(f"An error occurred while generating topic sentence feedback: {e}")
|
371 |
-
|
|
|
|
|
|
|
|
|
|
|
372 |
|
373 |
return gr_update
|
374 |
|
|
|
286 |
|
287 |
def generate_topic_sentences(model, max_tokens, sys_content, scenario, eng_level, topic, points, user_generate_topic_sentences_prompt):
|
288 |
"""
|
289 |
+
根据系统提示和用户输入的情境及要点,调用 LLM API 生成相关的主题句及其合理性解释。
|
290 |
"""
|
291 |
+
try:
|
292 |
+
exam_history_prompt = ""
|
293 |
+
if eng_level == "台灣學科能力測驗等級":
|
294 |
+
exam_history = get_exam_history()
|
295 |
+
exam_history_prompt = f"""
|
296 |
+
Please refer a topic scenario from the following exam history:
|
297 |
+
{exam_history}
|
298 |
+
give similar topic scenario and level of English. But don't use the same topic scenario.
|
299 |
+
"""
|
300 |
|
301 |
+
user_content = f"""
|
302 |
+
scenario is: {scenario}
|
303 |
+
english level is: {eng_level}
|
304 |
+
topic is: {topic}
|
305 |
+
points is: {points}
|
306 |
+
---
|
307 |
+
exam_history_prompt: {exam_history_prompt}
|
308 |
+
---
|
309 |
+
{user_generate_topic_sentences_prompt}
|
310 |
"""
|
311 |
+
|
312 |
+
messages = [
|
313 |
+
{"role": "system", "content": sys_content},
|
314 |
+
{"role": "user", "content": user_content}
|
315 |
+
]
|
316 |
|
317 |
+
# 根據模型選擇 provider
|
318 |
+
if "gemini" in model.lower():
|
319 |
+
print("====gemini====")
|
320 |
+
provider = GeminiProvider()
|
321 |
+
else:
|
322 |
+
print("====openai====")
|
323 |
+
provider = OpenAIProvider(OPEN_AI_CLIENT)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
324 |
|
325 |
+
# 使用 LLMService 處理請求
|
326 |
+
llm_service = LLMService(provider)
|
327 |
+
content = llm_service.chat(
|
328 |
+
prompt=f"{sys_content}\n{user_content}" if "gemini" in model.lower() else None,
|
329 |
+
messages=messages,
|
330 |
+
model=model,
|
331 |
+
max_tokens=max_tokens,
|
332 |
+
response_format={"type": "json_object"}
|
333 |
+
)
|
334 |
+
|
335 |
+
print(f"====generate_topic_sentences====")
|
336 |
+
print(content)
|
337 |
+
|
338 |
+
# 處理回應格式
|
339 |
+
try:
|
340 |
+
# 如果回應包含多個 JSON 區塊,嘗試找出正確的那個
|
341 |
+
if isinstance(content, str):
|
342 |
+
# 移除可能的 markdown 格式
|
343 |
+
if "```json" in content:
|
344 |
+
json_blocks = content.split("```json")
|
345 |
+
for block in json_blocks:
|
346 |
+
if "```" in block:
|
347 |
+
content = block.split("```")[0].strip()
|
348 |
+
else:
|
349 |
+
content = block.strip()
|
350 |
+
|
351 |
+
# 清理內容,確保是有效的 JSON
|
352 |
+
content = content.strip()
|
353 |
+
if content.startswith("```") and content.endswith("```"):
|
354 |
+
content = content[3:-3].strip()
|
355 |
+
|
356 |
+
print(f"Cleaned content: {content}")
|
357 |
+
response_content = json.loads(content)
|
358 |
+
else:
|
359 |
+
response_content = content
|
360 |
+
|
361 |
+
json_content = response_content["results"]
|
362 |
+
topic_sentences_list = [item["topic-sentence"] for item in json_content]
|
363 |
+
random.shuffle(topic_sentences_list)
|
364 |
+
|
365 |
+
gr_update_json = gr.update(value=json_content)
|
366 |
+
gr_update_radio = gr.update(choices=topic_sentences_list, visible=True)
|
367 |
+
return gr_update_json, gr_update_radio
|
368 |
+
|
369 |
+
except (json.JSONDecodeError, KeyError, ValueError) as e:
|
370 |
+
print(f"Error parsing topic sentences: {e}")
|
371 |
+
print(f"Content causing error: {content}")
|
372 |
+
raise gr.Error("無法解析主題句,請重新嘗試")
|
373 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
374 |
except Exception as e:
|
375 |
print(f"An error occurred while generating topic sentences: {e}")
|
376 |
+
error_msg = "網路塞車,請重新嘗試一次!"
|
377 |
+
if "rate limit" in str(e).lower():
|
378 |
+
error_msg = "請求過於頻繁,請稍後再試"
|
379 |
+
elif "invalid_request_error" in str(e).lower():
|
380 |
+
error_msg = "請求格式錯誤,請檢查輸入"
|
381 |
+
raise gr.Error(error_msg)
|
382 |
|
383 |
def generate_topic_sentence_feedback(model, max_tokens, sys_content, scenario, eng_level, topic, points, topic_sentence, user_generate_topic_sentence_feedback_prompt):
|
384 |
"""
|
385 |
+
根据系统提示和用户输入的情境、主题、要点、主题句,调用 LLM API 生成相关的主题句反饋。
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
386 |
"""
|
387 |
+
try:
|
388 |
+
user_content = f"""
|
389 |
+
scenario is: {scenario}
|
390 |
+
english level is: {eng_level}
|
391 |
+
topic is: {topic}
|
392 |
+
points is: {points}
|
393 |
+
---
|
394 |
+
my written topic sentence is: {topic_sentence}
|
395 |
+
---
|
396 |
+
{user_generate_topic_sentence_feedback_prompt}
|
397 |
+
"""
|
398 |
+
|
399 |
+
messages = [
|
400 |
+
{"role": "system", "content": sys_content},
|
401 |
+
{"role": "user", "content": user_content}
|
402 |
+
]
|
403 |
|
404 |
+
# 根據模型選擇 provider
|
405 |
+
if "gemini" in model.lower():
|
406 |
+
print("====gemini====")
|
407 |
+
provider = GeminiProvider()
|
408 |
+
else:
|
409 |
+
print("====openai====")
|
410 |
+
provider = OpenAIProvider(OPEN_AI_CLIENT)
|
411 |
+
|
412 |
+
# 使用 LLMService 處理請求
|
413 |
+
llm_service = LLMService(provider)
|
414 |
+
content = llm_service.chat(
|
415 |
+
prompt=f"{sys_content}\n{user_content}" if "gemini" in model.lower() else None,
|
416 |
+
messages=messages,
|
417 |
+
model=model,
|
418 |
+
max_tokens=max_tokens
|
419 |
+
)
|
420 |
|
|
|
|
|
|
|
421 |
gr_update = gr.update(value=content, visible=True)
|
422 |
+
|
423 |
except Exception as e:
|
424 |
+
print(f"An error occurred while generating topic sentence feedback: {str(e)}")
|
425 |
+
error_msg = "網路塞車,請重新嘗試一次!"
|
426 |
+
if "rate limit" in str(e).lower():
|
427 |
+
error_msg = "請求過於頻繁,請稍後再試"
|
428 |
+
elif "invalid_request_error" in str(e).lower():
|
429 |
+
error_msg = "請求格式錯誤,請檢查輸入"
|
430 |
+
raise gr.Error(error_msg)
|
431 |
|
432 |
return gr_update
|
433 |
|