youngtsai commited on
Commit
611fc85
·
1 Parent(s): 505e6ac

generate_supporting_sentences

Browse files
Files changed (1) hide show
  1. app.py +40 -24
app.py CHANGED
@@ -457,35 +457,51 @@ def update_topic_sentence_input(topic_sentences_json, selected_topic_sentence):
457
 
458
  def generate_supporting_sentences(model, max_tokens, sys_content, scenario, eng_level, topic, points, topic_sentence, user_generate_supporting_sentences_prompt):
459
  """
460
- 根据系统提示和用户输入的情境、主题、要点、主题句,调用OpenAI API生成相关的支持句。
461
  """
462
- user_content = f"""
463
- scenario is: {scenario}
464
- english level is: {eng_level}
465
- topic is: {topic}
466
- points is: {points}
467
- topic sentence is: {topic_sentence}
468
- ---
469
- {user_generate_supporting_sentences_prompt}
470
- """
471
- messages = [
472
- {"role": "system", "content": sys_content},
473
- {"role": "user", "content": user_content}
474
- ]
 
 
475
 
476
- request_payload = {
477
- "model": model,
478
- "messages": messages,
479
- "max_tokens": max_tokens,
480
- }
 
 
 
 
 
 
 
 
 
 
 
481
 
482
- try:
483
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
484
- content = response.choices[0].message.content.strip()
485
  gr_update = gr.update(choices=[content], visible=True)
 
486
  except Exception as e:
487
- print(f"An error occurred while generating supporting sentences: {e}")
488
- raise gr.Error("網路塞車,請重新嘗試一次!")
 
 
 
 
 
489
 
490
  return gr_update
491
 
 
457
 
458
  def generate_supporting_sentences(model, max_tokens, sys_content, scenario, eng_level, topic, points, topic_sentence, user_generate_supporting_sentences_prompt):
459
  """
460
+ 根据系统提示和用户输入的情境、主题、要点、主题句,调用 LLM API 生成相关的支持句。
461
  """
462
+ try:
463
+ user_content = f"""
464
+ scenario is: {scenario}
465
+ english level is: {eng_level}
466
+ topic is: {topic}
467
+ points is: {points}
468
+ topic sentence is: {topic_sentence}
469
+ ---
470
+ {user_generate_supporting_sentences_prompt}
471
+ """
472
+
473
+ messages = [
474
+ {"role": "system", "content": sys_content},
475
+ {"role": "user", "content": user_content}
476
+ ]
477
 
478
+ # 根據模型選擇 provider
479
+ if "gemini" in model.lower():
480
+ print("====gemini====")
481
+ provider = GeminiProvider()
482
+ else:
483
+ print("====openai====")
484
+ provider = OpenAIProvider(OPEN_AI_CLIENT)
485
+
486
+ # 使用 LLMService 處理請求
487
+ llm_service = LLMService(provider)
488
+ content = llm_service.chat(
489
+ prompt=f"{sys_content}\n{user_content}" if "gemini" in model.lower() else None,
490
+ messages=messages,
491
+ model=model,
492
+ max_tokens=max_tokens
493
+ )
494
 
 
 
 
495
  gr_update = gr.update(choices=[content], visible=True)
496
+
497
  except Exception as e:
498
+ print(f"An error occurred while generating supporting sentences: {str(e)}")
499
+ error_msg = "網路塞車,請重新嘗試一次!"
500
+ if "rate limit" in str(e).lower():
501
+ error_msg = "請求過於頻繁,請稍後再試"
502
+ elif "invalid_request_error" in str(e).lower():
503
+ error_msg = "請求格式錯誤,請檢查輸入"
504
+ raise gr.Error(error_msg)
505
 
506
  return gr_update
507