Spaces:
Running
Running
segment.text
Browse files
app.py
CHANGED
@@ -417,10 +417,10 @@ def generate_transcription_by_whisper(video_id):
|
|
417 |
|
418 |
# Adjusting the timestamps for the chunk based on its position in the full audio
|
419 |
adjusted_segments = [{
|
420 |
-
'text': segment['text']
|
421 |
-
'start': math.ceil(segment
|
422 |
-
'end': math.ceil(segment
|
423 |
-
'duration': math.ceil(segment
|
424 |
} for segment in response.segments]
|
425 |
|
426 |
transcription.extend(adjusted_segments)
|
@@ -1030,7 +1030,7 @@ def generate_summarise(df_string, metadata=None, LLM_model=None):
|
|
1030 |
all_content = []
|
1031 |
|
1032 |
for segment in segments:
|
1033 |
-
sys_content = "你是一個擅長資料分析跟影片教學的老師,user
|
1034 |
user_content = f"""
|
1035 |
課程名稱:{title}
|
1036 |
科目:{subject}
|
|
|
417 |
|
418 |
# Adjusting the timestamps for the chunk based on its position in the full audio
|
419 |
adjusted_segments = [{
|
420 |
+
'text': segment.text, # 使用 .text 屬性而不是 ['text']
|
421 |
+
'start': math.ceil(segment.start + start_time / 1000.0), # Converting milliseconds to seconds
|
422 |
+
'end': math.ceil(segment.end + start_time / 1000.0),
|
423 |
+
'duration': math.ceil(segment.end - segment.start)
|
424 |
} for segment in response.segments]
|
425 |
|
426 |
transcription.extend(adjusted_segments)
|
|
|
1030 |
all_content = []
|
1031 |
|
1032 |
for segment in segments:
|
1033 |
+
sys_content = "你是一個擅長資料分析跟影片教學的老師,user 為學生,請精讀資料文本,自行判斷賛料的種類,使用 zh-TW"
|
1034 |
user_content = f"""
|
1035 |
課程名稱:{title}
|
1036 |
科目:{subject}
|