youngtsai commited on
Commit
505e6ac
·
1 Parent(s): d3c4560

generate_topic_sentence_feedback

Browse files
Files changed (1) hide show
  1. app.py +126 -67
app.py CHANGED
@@ -286,89 +286,148 @@ def update_points_input(points):
286
 
287
  def generate_topic_sentences(model, max_tokens, sys_content, scenario, eng_level, topic, points, user_generate_topic_sentences_prompt):
288
  """
289
- 根据系统提示和用户输入的情境及要点,调用OpenAI API生成相关的主题句及其合理性解释。
290
  """
 
 
 
 
 
 
 
 
 
291
 
292
- if eng_level == "台灣學科能力測驗等級":
293
- exam_history = get_exam_history()
294
- exam_history_prompt = f"""
295
- Please refer a topic scenario from the following exam history:
296
- {exam_history}
297
- give similar topic scenario and level of English. But don't use the same topic scenario.
 
 
 
298
  """
299
- else:
300
- exam_history_prompt = ""
 
 
 
301
 
302
- user_content = f"""
303
- scenario is: {scenario}
304
- english level is: {eng_level}
305
- topic is: {topic}
306
- points is: {points}
307
- ---
308
- exam_history_prompt: {exam_history_prompt}
309
- ---
310
- {user_generate_topic_sentences_prompt}
311
- """
312
- messages = [
313
- {"role": "system", "content": sys_content},
314
- {"role": "user", "content": user_content}
315
- ]
316
- response_format = { "type": "json_object" }
317
 
318
- request_payload = {
319
- "model": model,
320
- "messages": messages,
321
- "max_tokens": max_tokens,
322
- "response_format": response_format
323
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324
 
325
- try:
326
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
327
- response_content = json.loads(response.choices[0].message.content)
328
- json_content = response_content["results"]
329
- topic_sentences_list = [item["topic-sentence"] for item in json_content]
330
- random.shuffle(topic_sentences_list)
331
-
332
- gr_update_json = gr.update(value=json_content)
333
- gr_update_radio = gr.update(choices=topic_sentences_list, visible=True)
334
  except Exception as e:
335
  print(f"An error occurred while generating topic sentences: {e}")
336
- raise gr.Error("網路塞車,請重新嘗試一次!")
337
-
338
- return gr_update_json, gr_update_radio
 
 
 
339
 
340
  def generate_topic_sentence_feedback(model, max_tokens, sys_content, scenario, eng_level, topic, points, topic_sentence, user_generate_topic_sentence_feedback_prompt):
341
  """
342
- 根据系统提示和用户输入的情境、主题、要点、主题句,调用OpenAI API生成相关的主题句反饋。
343
- """
344
- user_content = f"""
345
- scenario is: {scenario}
346
- english level is: {eng_level}
347
- topic is: {topic}
348
- points is: {points}
349
- ---
350
- my written topic sentence is: {topic_sentence}
351
- ---
352
- {user_generate_topic_sentence_feedback_prompt}
353
  """
354
- messages = [
355
- {"role": "system", "content": sys_content},
356
- {"role": "user", "content": user_content}
357
- ]
 
 
 
 
 
 
 
 
 
 
 
 
358
 
359
- request_payload = {
360
- "model": model,
361
- "messages": messages,
362
- "max_tokens": max_tokens,
363
- }
 
 
 
 
 
 
 
 
 
 
 
364
 
365
- try:
366
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
367
- content = response.choices[0].message.content.strip()
368
  gr_update = gr.update(value=content, visible=True)
 
369
  except Exception as e:
370
- print(f"An error occurred while generating topic sentence feedback: {e}")
371
- raise gr.Error("網路塞車,請重新嘗試一次!")
 
 
 
 
 
372
 
373
  return gr_update
374
 
 
286
 
287
  def generate_topic_sentences(model, max_tokens, sys_content, scenario, eng_level, topic, points, user_generate_topic_sentences_prompt):
288
  """
289
+ 根据系统提示和用户输入的情境及要点,调用 LLM API 生成相关的主题句及其合理性解释。
290
  """
291
+ try:
292
+ exam_history_prompt = ""
293
+ if eng_level == "台灣學科能力測驗等級":
294
+ exam_history = get_exam_history()
295
+ exam_history_prompt = f"""
296
+ Please refer a topic scenario from the following exam history:
297
+ {exam_history}
298
+ give similar topic scenario and level of English. But don't use the same topic scenario.
299
+ """
300
 
301
+ user_content = f"""
302
+ scenario is: {scenario}
303
+ english level is: {eng_level}
304
+ topic is: {topic}
305
+ points is: {points}
306
+ ---
307
+ exam_history_prompt: {exam_history_prompt}
308
+ ---
309
+ {user_generate_topic_sentences_prompt}
310
  """
311
+
312
+ messages = [
313
+ {"role": "system", "content": sys_content},
314
+ {"role": "user", "content": user_content}
315
+ ]
316
 
317
+ # 根據模型選擇 provider
318
+ if "gemini" in model.lower():
319
+ print("====gemini====")
320
+ provider = GeminiProvider()
321
+ else:
322
+ print("====openai====")
323
+ provider = OpenAIProvider(OPEN_AI_CLIENT)
 
 
 
 
 
 
 
 
324
 
325
+ # 使用 LLMService 處理請求
326
+ llm_service = LLMService(provider)
327
+ content = llm_service.chat(
328
+ prompt=f"{sys_content}\n{user_content}" if "gemini" in model.lower() else None,
329
+ messages=messages,
330
+ model=model,
331
+ max_tokens=max_tokens,
332
+ response_format={"type": "json_object"}
333
+ )
334
+
335
+ print(f"====generate_topic_sentences====")
336
+ print(content)
337
+
338
+ # 處理回應格式
339
+ try:
340
+ # 如果回應包含多個 JSON 區塊,嘗試找出正確的那個
341
+ if isinstance(content, str):
342
+ # 移除可能的 markdown 格式
343
+ if "```json" in content:
344
+ json_blocks = content.split("```json")
345
+ for block in json_blocks:
346
+ if "```" in block:
347
+ content = block.split("```")[0].strip()
348
+ else:
349
+ content = block.strip()
350
+
351
+ # 清理內容,確保是有效的 JSON
352
+ content = content.strip()
353
+ if content.startswith("```") and content.endswith("```"):
354
+ content = content[3:-3].strip()
355
+
356
+ print(f"Cleaned content: {content}")
357
+ response_content = json.loads(content)
358
+ else:
359
+ response_content = content
360
+
361
+ json_content = response_content["results"]
362
+ topic_sentences_list = [item["topic-sentence"] for item in json_content]
363
+ random.shuffle(topic_sentences_list)
364
+
365
+ gr_update_json = gr.update(value=json_content)
366
+ gr_update_radio = gr.update(choices=topic_sentences_list, visible=True)
367
+ return gr_update_json, gr_update_radio
368
+
369
+ except (json.JSONDecodeError, KeyError, ValueError) as e:
370
+ print(f"Error parsing topic sentences: {e}")
371
+ print(f"Content causing error: {content}")
372
+ raise gr.Error("無法解析主題句,請重新嘗試")
373
 
 
 
 
 
 
 
 
 
 
374
  except Exception as e:
375
  print(f"An error occurred while generating topic sentences: {e}")
376
+ error_msg = "網路塞車,請重新嘗試一次!"
377
+ if "rate limit" in str(e).lower():
378
+ error_msg = "請求過於頻繁,請稍後再試"
379
+ elif "invalid_request_error" in str(e).lower():
380
+ error_msg = "請求格式錯誤,請檢查輸入"
381
+ raise gr.Error(error_msg)
382
 
383
  def generate_topic_sentence_feedback(model, max_tokens, sys_content, scenario, eng_level, topic, points, topic_sentence, user_generate_topic_sentence_feedback_prompt):
384
  """
385
+ 根据系统提示和用户输入的情境、主题、要点、主题句,调用 LLM API 生成相关的主题句反饋。
 
 
 
 
 
 
 
 
 
 
386
  """
387
+ try:
388
+ user_content = f"""
389
+ scenario is: {scenario}
390
+ english level is: {eng_level}
391
+ topic is: {topic}
392
+ points is: {points}
393
+ ---
394
+ my written topic sentence is: {topic_sentence}
395
+ ---
396
+ {user_generate_topic_sentence_feedback_prompt}
397
+ """
398
+
399
+ messages = [
400
+ {"role": "system", "content": sys_content},
401
+ {"role": "user", "content": user_content}
402
+ ]
403
 
404
+ # 根據模型選擇 provider
405
+ if "gemini" in model.lower():
406
+ print("====gemini====")
407
+ provider = GeminiProvider()
408
+ else:
409
+ print("====openai====")
410
+ provider = OpenAIProvider(OPEN_AI_CLIENT)
411
+
412
+ # 使用 LLMService 處理請求
413
+ llm_service = LLMService(provider)
414
+ content = llm_service.chat(
415
+ prompt=f"{sys_content}\n{user_content}" if "gemini" in model.lower() else None,
416
+ messages=messages,
417
+ model=model,
418
+ max_tokens=max_tokens
419
+ )
420
 
 
 
 
421
  gr_update = gr.update(value=content, visible=True)
422
+
423
  except Exception as e:
424
+ print(f"An error occurred while generating topic sentence feedback: {str(e)}")
425
+ error_msg = "網路塞車,請重新嘗試一次!"
426
+ if "rate limit" in str(e).lower():
427
+ error_msg = "請求過於頻繁,請稍後再試"
428
+ elif "invalid_request_error" in str(e).lower():
429
+ error_msg = "請求格式錯誤,請檢查輸入"
430
+ raise gr.Error(error_msg)
431
 
432
  return gr_update
433