youngtsai commited on
Commit
ec43651
·
1 Parent(s): 3845141

raise gr.Error("網路塞車,請重新嘗試一次!")

Browse files
Files changed (1) hide show
  1. app.py +109 -126
app.py CHANGED
@@ -152,14 +152,17 @@ def generate_topics(model, max_tokens, sys_content, scenario, eng_level, user_ge
152
  "response_format": { "type": "json_object" }
153
  }
154
 
155
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
156
- content = response.choices[0].message.content
157
- topics = json.loads(content)["topics"]
158
-
159
- print(f"====generate_topics====")
160
- print(topics)
161
-
162
- gr_update = gr.update(choices=topics, visible=True)
 
 
 
163
 
164
  return gr_update
165
 
@@ -188,10 +191,14 @@ def generate_points(model, max_tokens, sys_content, scenario, eng_level, topic,
188
  "max_tokens": max_tokens,
189
  }
190
 
191
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
192
- content = response.choices[0].message.content
193
- points = json.loads(content)["points"]
194
- gr_update = gr.update(choices=points, visible=True)
 
 
 
 
195
 
196
  return gr_update
197
 
@@ -236,14 +243,18 @@ def generate_topic_sentences(model, max_tokens, sys_content, scenario, eng_level
236
  "response_format": response_format
237
  }
238
 
239
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
240
- response_content = json.loads(response.choices[0].message.content)
241
- json_content = response_content["results"]
242
- topic_sentences_list = [item["topic-sentence"] for item in json_content]
243
- random.shuffle(topic_sentences_list)
244
-
245
- gr_update_json = gr.update(value=json_content)
246
- gr_update_radio = gr.update(choices=topic_sentences_list, visible=True)
 
 
 
 
247
 
248
  return gr_update_json, gr_update_radio
249
 
@@ -272,9 +283,13 @@ def generate_topic_sentence_feedback(model, max_tokens, sys_content, scenario, e
272
  "max_tokens": max_tokens,
273
  }
274
 
275
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
276
- content = response.choices[0].message.content.strip()
277
- gr_update = gr.update(value=content, visible=True)
 
 
 
 
278
 
279
  return gr_update
280
 
@@ -326,9 +341,13 @@ def generate_supporting_sentences(model, max_tokens, sys_content, scenario, eng_
326
  "max_tokens": max_tokens,
327
  }
328
 
329
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
330
- content = response.choices[0].message.content.strip()
331
- gr_update = gr.update(choices=[content], visible=True)
 
 
 
 
332
 
333
  return gr_update
334
 
@@ -360,10 +379,14 @@ def generate_conclusion_sentences(model, max_tokens, sys_content, scenario, eng_
360
  "response_format": { "type": "json_object" }
361
  }
362
 
363
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
364
- response_content = json.loads(response.choices[0].message.content)
365
- json_content = response_content["results"]
366
- gr_update = gr.update(choices=[json_content], visible=True)
 
 
 
 
367
 
368
  return gr_update
369
 
@@ -400,22 +423,26 @@ def generate_paragraph_evaluate(model, sys_content, paragraph, user_generate_par
400
  "response_format": response_format
401
  }
402
 
403
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
404
- content = response.choices[0].message.content
 
405
 
406
- print(f"====generate_paragraph_evaluate====")
407
- print(content)
408
 
409
- data = json.loads(content)
410
- table_data = [
411
- ["學測架構|內容(Content)", data['content']['level'], data['content']['explanation']],
412
- ["學測架構|組織(Organization)", data['organization']['level'], data['organization']['explanation']],
413
- ["學測架構|文法、句構(Grammar/Sentence Structure)", data['grammar_and_usage']['level'], data['grammar_and_usage']['explanation']],
414
- ["學測架構|字彙、拼字(Vocabulary/Spelling)", data['vocabulary']['level'], data['vocabulary']['explanation']],
415
- ["JUTOR 架構|連貫性和連接詞(Coherence and Cohesion)", data['coherence_and_cohesion']['level'], data['coherence_and_cohesion']['explanation']]
416
- ]
417
- headers = ["架構", "評分", "解釋"]
418
- gr_update = gr.update(value=table_data, headers=headers, visible=True)
 
 
 
419
 
420
  return gr_update
421
 
@@ -441,19 +468,23 @@ def generate_correct_grammatical_spelling_errors(model, sys_content, eng_level,
441
  "response_format": response_format
442
  }
443
 
444
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
445
- content = response.choices[0].message.content
446
- data = json.loads(content)
447
- print(f"data: {data}")
448
-
449
- corrections_list = [
450
- [item['original'], item['correction'], item['explanation']]
451
- for item in data['Corrections and Explanations']
452
- ]
453
- headers = ["原文", "建議", "解釋"]
 
454
 
455
- corrections_list_gr_update = gr.update(value=corrections_list, headers=headers, wrap=True, visible=True)
456
- reverse_paragraph_gr_update = gr.update(value=data["Revised Paragraph"], visible=False)
 
 
 
457
 
458
  return corrections_list_gr_update, reverse_paragraph_gr_update
459
 
@@ -507,17 +538,21 @@ def generate_refine_paragraph(model, sys_content, eng_level, paragraph, user_ref
507
  "response_format": response_format
508
  }
509
 
510
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
511
- content = response.choices[0].message.content
512
- data = json.loads(content)
513
- headers = ["原文", "建議", "解釋"]
514
- table_data = [
515
- [item['origin'], item['suggestion'], item['explanation']]
516
- for item in data['Suggestions and Explanations']
517
- ]
 
518
 
519
- refine_paragraph_gr_update = gr.update(value=table_data, headers=headers, visible=True)
520
- revised_paragraph_gr_update = gr.update(value=data["Revised Paragraph"],visible=False)
 
 
 
521
 
522
  return refine_paragraph_gr_update, revised_paragraph_gr_update
523
 
@@ -627,7 +662,6 @@ def get_logs_sessions(user_data, log_type):
627
  else:
628
  file_names = []
629
 
630
-
631
  # file_names sort by timestamp DESC
632
  file_names.sort(reverse=True)
633
  choices = [
@@ -1068,10 +1102,6 @@ def generate_content_by_open_ai_assistant(user_content, thread_id=None, model_na
1068
  tools=[{"type": "file_search"}],
1069
  )
1070
 
1071
- # Wait for the response
1072
- # run_status = poll_run_status(run.id, thread.id, timeout=30)
1073
- # print(f"Run status: {run_status}")
1074
-
1075
  if run.status == "completed":
1076
  print("==============completed====================")
1077
  print(f"Thread ID: {thread.id}")
@@ -1081,62 +1111,11 @@ def generate_content_by_open_ai_assistant(user_content, thread_id=None, model_na
1081
  response_text = messages.data[0].content[0].text.value
1082
  print(f"Response: {response_text}")
1083
  except Exception as e:
1084
- print(f"Error: {e}")
1085
- response_text = " 出現錯誤,請稍後再試!"
1086
 
1087
  return response_text
1088
 
1089
- def poll_run_status(run_id, thread_id, timeout=600, poll_interval=5):
1090
- """
1091
- Polls the status of a Run and handles different statuses appropriately.
1092
-
1093
- :param run_id: The ID of the Run to poll.
1094
- :param thread_id: The ID of the Thread associated with the Run.
1095
- :param timeout: Maximum time to wait for the Run to complete, in seconds.
1096
- :param poll_interval: Time to wait between each poll, in seconds.
1097
- """
1098
- client = OPEN_AI_CLIENT
1099
- start_time = time.time()
1100
- while time.time() - start_time < timeout:
1101
- run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
1102
-
1103
- if run.status in ["completed", "cancelled", "failed"]:
1104
- print(f"Run completed with status: {run.status}")
1105
- break
1106
- elif run.status == "requires_action":
1107
- print("Run requires action. Performing required action...")
1108
- # Here, you would perform the required action, e.g., running functions
1109
- # and then submitting the outputs. This is simplified for this example.
1110
- # After performing the required action, you'd complete the action:
1111
- # OPEN_AI_CLIENT.beta.threads.runs.complete_required_action(...)
1112
- elif run.status == "expired":
1113
- print("Run expired. Exiting...")
1114
- break
1115
- else:
1116
- print(f"Run status is {run.status}. Waiting for updates...")
1117
-
1118
- time.sleep(poll_interval)
1119
- else:
1120
- print("Timeout reached. Run did not complete in the expected time.")
1121
-
1122
- # Once the Run is completed, handle the result accordingly
1123
- if run.status == "completed":
1124
- # Retrieve and handle messages or run steps as needed
1125
- messages = client.beta.threads.messages.list(thread_id=thread_id)
1126
- for message in messages.data:
1127
- if message.role == "assistant":
1128
- print(f"Assistant response: {message.content}")
1129
- elif run.status in ["cancelled", "failed"]:
1130
- # Handle cancellation or failure
1131
- print(f"Run ended with status: {run.status}")
1132
- elif run.status == "expired":
1133
- # Handle expired run
1134
- print("Run expired without completion.")
1135
-
1136
- return run.status
1137
-
1138
-
1139
-
1140
  # 小工具
1141
  def show_elements():
1142
  return gr.update(visible=True)
@@ -1165,8 +1144,12 @@ def generate_chinese_essay_idea(model, user_prompt, chinese_essay_title_input):
1165
  "max_tokens": 2000,
1166
  }
1167
 
1168
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
1169
- content = response.choices[0].message.content.strip()
 
 
 
 
1170
 
1171
  return content
1172
 
 
152
  "response_format": { "type": "json_object" }
153
  }
154
 
155
+ try:
156
+ response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
157
+ content = response.choices[0].message.content
158
+ topics = json.loads(content)["topics"]
159
+ topics = json.loads(content)["error"]
160
+ print(f"====generate_topics====")
161
+ print(topics)
162
+ gr_update = gr.update(choices=topics, visible=True)
163
+ except Exception as e:
164
+ print(f"An error occurred while generating topics: {e}")
165
+ raise gr.Error("網路塞車,請重新嘗試一次!")
166
 
167
  return gr_update
168
 
 
191
  "max_tokens": max_tokens,
192
  }
193
 
194
+ try:
195
+ response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
196
+ content = response.choices[0].message.content
197
+ points = json.loads(content)["points"]
198
+ gr_update = gr.update(choices=points, visible=True)
199
+ except Exception as e:
200
+ print(f"An error occurred while generating points: {e}")
201
+ raise gr.Error("網路塞車,請重新嘗試一次!")
202
 
203
  return gr_update
204
 
 
243
  "response_format": response_format
244
  }
245
 
246
+ try:
247
+ response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
248
+ response_content = json.loads(response.choices[0].message.content)
249
+ json_content = response_content["results"]
250
+ topic_sentences_list = [item["topic-sentence"] for item in json_content]
251
+ random.shuffle(topic_sentences_list)
252
+
253
+ gr_update_json = gr.update(value=json_content)
254
+ gr_update_radio = gr.update(choices=topic_sentences_list, visible=True)
255
+ except Exception as e:
256
+ print(f"An error occurred while generating topic sentences: {e}")
257
+ raise gr.Error("網路塞車,請重新嘗試一次!")
258
 
259
  return gr_update_json, gr_update_radio
260
 
 
283
  "max_tokens": max_tokens,
284
  }
285
 
286
+ try:
287
+ response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
288
+ content = response.choices[0].message.content.strip()
289
+ gr_update = gr.update(value=content, visible=True)
290
+ except Exception as e:
291
+ print(f"An error occurred while generating topic sentence feedback: {e}")
292
+ raise gr.Error("網路塞車,請重新嘗試一次!")
293
 
294
  return gr_update
295
 
 
341
  "max_tokens": max_tokens,
342
  }
343
 
344
+ try:
345
+ response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
346
+ content = response.choices[0].message.content.strip()
347
+ gr_update = gr.update(choices=[content], visible=True)
348
+ except Exception as e:
349
+ print(f"An error occurred while generating supporting sentences: {e}")
350
+ raise gr.Error("網路塞車,請重新嘗試一次!")
351
 
352
  return gr_update
353
 
 
379
  "response_format": { "type": "json_object" }
380
  }
381
 
382
+ try:
383
+ response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
384
+ response_content = json.loads(response.choices[0].message.content)
385
+ json_content = response_content["results"]
386
+ gr_update = gr.update(choices=[json_content], visible=True)
387
+ except Exception as e:
388
+ print(f"An error occurred while generating conclusion sentences: {e}")
389
+ raise gr.Error("網路塞車,請重新嘗試一次!")
390
 
391
  return gr_update
392
 
 
423
  "response_format": response_format
424
  }
425
 
426
+ try:
427
+ response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
428
+ content = response.choices[0].message.content
429
 
430
+ print(f"====generate_paragraph_evaluate====")
431
+ print(content)
432
 
433
+ data = json.loads(content)
434
+ table_data = [
435
+ ["學測架構|內容(Content)", data['content']['level'], data['content']['explanation']],
436
+ ["學測架構|組織(Organization)", data['organization']['level'], data['organization']['explanation']],
437
+ ["學測架構|文法、句構(Grammar/Sentence Structure)", data['grammar_and_usage']['level'], data['grammar_and_usage']['explanation']],
438
+ ["學測架構|字彙、拼字(Vocabulary/Spelling)", data['vocabulary']['level'], data['vocabulary']['explanation']],
439
+ ["JUTOR 架構|連貫性和連接詞(Coherence and Cohesion)", data['coherence_and_cohesion']['level'], data['coherence_and_cohesion']['explanation']]
440
+ ]
441
+ headers = ["架構", "評分", "解釋"]
442
+ gr_update = gr.update(value=table_data, headers=headers, visible=True)
443
+ except Exception as e:
444
+ print(f"An error occurred while generating paragraph evaluation: {e}")
445
+ raise gr.Error("網路塞車,請重新嘗試一次!")
446
 
447
  return gr_update
448
 
 
468
  "response_format": response_format
469
  }
470
 
471
+ try:
472
+ response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
473
+ content = response.choices[0].message.content
474
+ data = json.loads(content)
475
+ print(f"data: {data}")
476
+
477
+ corrections_list = [
478
+ [item['original'], item['correction'], item['explanation']]
479
+ for item in data['Corrections and Explanations']
480
+ ]
481
+ headers = ["原文", "建議", "解釋"]
482
 
483
+ corrections_list_gr_update = gr.update(value=corrections_list, headers=headers, wrap=True, visible=True)
484
+ reverse_paragraph_gr_update = gr.update(value=data["Revised Paragraph"], visible=False)
485
+ except Exception as e:
486
+ print(f"An error occurred while generating correct grammatical and spelling errors: {e}")
487
+ raise gr.Error("網路塞車,請重新嘗試一次!")
488
 
489
  return corrections_list_gr_update, reverse_paragraph_gr_update
490
 
 
538
  "response_format": response_format
539
  }
540
 
541
+ try:
542
+ response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
543
+ content = response.choices[0].message.content
544
+ data = json.loads(content)
545
+ headers = ["原文", "建議", "解釋"]
546
+ table_data = [
547
+ [item['origin'], item['suggestion'], item['explanation']]
548
+ for item in data['Suggestions and Explanations']
549
+ ]
550
 
551
+ refine_paragraph_gr_update = gr.update(value=table_data, headers=headers, visible=True)
552
+ revised_paragraph_gr_update = gr.update(value=data["Revised Paragraph"],visible=False)
553
+ except Exception as e:
554
+ print(f"An error occurred while generating refine paragraph: {e}")
555
+ raise gr.Error("網路塞車,請重新嘗試一次!")
556
 
557
  return refine_paragraph_gr_update, revised_paragraph_gr_update
558
 
 
662
  else:
663
  file_names = []
664
 
 
665
  # file_names sort by timestamp DESC
666
  file_names.sort(reverse=True)
667
  choices = [
 
1102
  tools=[{"type": "file_search"}],
1103
  )
1104
 
 
 
 
 
1105
  if run.status == "completed":
1106
  print("==============completed====================")
1107
  print(f"Thread ID: {thread.id}")
 
1111
  response_text = messages.data[0].content[0].text.value
1112
  print(f"Response: {response_text}")
1113
  except Exception as e:
1114
+ print(f"An error occurred while generating content by OpenAI Assistant: {e}")
1115
+ raise gr.Error("網路塞車,請重新嘗試一次!")
1116
 
1117
  return response_text
1118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1119
  # 小工具
1120
  def show_elements():
1121
  return gr.update(visible=True)
 
1144
  "max_tokens": 2000,
1145
  }
1146
 
1147
+ try:
1148
+ response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
1149
+ content = response.choices[0].message.content.strip()
1150
+ except Exception as e:
1151
+ print(f"An error occurred while generating Chinese essay idea: {e}")
1152
+ raise gr.Error("網路塞車���請重新嘗試一次!")
1153
 
1154
  return content
1155