Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import openai
|
3 |
+
from pydub import AudioSegment
|
4 |
+
import os
|
5 |
+
import re
|
6 |
+
|
7 |
+
# 轉譯音檔函式,使用 OpenAI Whisper 模型
|
8 |
+
def transcribe(filename, api_key):
|
9 |
+
client = openai.OpenAI(api_key=api_key)
|
10 |
+
with open(filename, "rb") as audio_file:
|
11 |
+
transcript_txt = client.audio.transcriptions.create(
|
12 |
+
model="whisper-1",
|
13 |
+
file=audio_file,
|
14 |
+
response_format="text"
|
15 |
+
)
|
16 |
+
return transcript_txt
|
17 |
+
|
18 |
+
# 檢查檔案大小並分段處理(若檔案過大)
|
19 |
+
def transcribe_large_audio(filename, api_key, segment_length_ms=30 * 60 * 1000):
|
20 |
+
def get_file_size_in_mb(file_path):
|
21 |
+
return os.path.getsize(file_path) / (1024 * 1024)
|
22 |
+
|
23 |
+
def split_audio_file(file_path, segment_length_ms=30 * 60 * 1000):
|
24 |
+
audio = AudioSegment.from_file(file_path, format="mp3")
|
25 |
+
segment_filenames = []
|
26 |
+
for i in range(0, len(audio), segment_length_ms):
|
27 |
+
end = min(i + segment_length_ms, len(audio))
|
28 |
+
segment = audio[i:end]
|
29 |
+
segment_filename = f"{file_path}_part{len(segment_filenames) + 1}.mp3"
|
30 |
+
segment.export(segment_filename, format="mp3", bitrate="36k")
|
31 |
+
segment_filenames.append(segment_filename)
|
32 |
+
return segment_filenames
|
33 |
+
|
34 |
+
transcript_txt = ""
|
35 |
+
|
36 |
+
if get_file_size_in_mb(filename) > 25:
|
37 |
+
audio_chunks = split_audio_file(filename)
|
38 |
+
for chunk_filename in audio_chunks:
|
39 |
+
transcript_txt += transcribe(chunk_filename, api_key)
|
40 |
+
os.remove(chunk_filename)
|
41 |
+
else:
|
42 |
+
transcript_txt = transcribe(filename, api_key)
|
43 |
+
|
44 |
+
return transcript_txt
|
45 |
+
|
46 |
+
# 字數統計函式
|
47 |
+
def count_words(text):
|
48 |
+
# 移除空白字符後計算字數
|
49 |
+
cleaned_text = re.sub(r'\s+', '', text)
|
50 |
+
return len(cleaned_text)
|
51 |
+
|
52 |
+
# 自動斷句與標點符號功能
|
53 |
+
def auto_punctuate(text, api_key):
|
54 |
+
openai.api_key = api_key
|
55 |
+
prompt = """請幫我將以下逐字稿加入適當的標點符號和段落分隔,使文本更容易閱讀:
|
56 |
+
原文:
|
57 |
+
""" + text
|
58 |
+
|
59 |
+
completion = openai.chat.completions.create(
|
60 |
+
model="gpt-4",
|
61 |
+
messages=[{"role": "user", "content": prompt}]
|
62 |
+
)
|
63 |
+
return completion.choices[0].message.content
|
64 |
+
|
65 |
+
# OpenAI 翻譯文本的函式
|
66 |
+
def openai_translate_text(text, target_lang, api_key):
|
67 |
+
openai.api_key = api_key
|
68 |
+
language_mapping = {
|
69 |
+
"繁體中文": "繁體中文",
|
70 |
+
"英文": "英文",
|
71 |
+
"日文": "日文",
|
72 |
+
"韓文": "韓文",
|
73 |
+
"法文": "法文",
|
74 |
+
"德文": "德文",
|
75 |
+
"西班牙文": "西班牙文"
|
76 |
+
}
|
77 |
+
|
78 |
+
prompt = f"請將以下文本翻譯成{language_mapping[target_lang]},保持原文的語氣和風格:\n\n{text}"
|
79 |
+
completion = openai.chat.completions.create(
|
80 |
+
model="gpt-4",
|
81 |
+
messages=[{"role": "user", "content": prompt}]
|
82 |
+
)
|
83 |
+
return completion.choices[0].message.content
|
84 |
+
|
85 |
+
# OpenAI 生成摘要的函式(根據不同受眾)
|
86 |
+
def openai_generate_summary(text, audience_type, api_key):
|
87 |
+
openai.api_key = api_key
|
88 |
+
audience_prompts = {
|
89 |
+
"學生": "請將以下會議內容整理成適合學生閱讀的摘要,重點放在學習價值和知識傳遞:",
|
90 |
+
"老師": "請將以下會議內容整理成適合教師參考的摘要,重點放在教學應用和教育意義:",
|
91 |
+
"會議": "請將以下會議內容整理成正式的會議摘要,重點放在決策、行動項目和重要討論:",
|
92 |
+
"主管": "請將以下會議內容整理成適合管理層閱讀的摘要,重點放在策略決策和關鍵績效:",
|
93 |
+
"技術人員": "請將以下會議內容整理成適合技術團隊閱讀的摘要,重點放在技術細節和實作方向:",
|
94 |
+
"行銷人員": "請將以下會議內容整理成適合行銷團隊閱讀的摘要,重點放在市場策略和推廣重點:",
|
95 |
+
"一般員工": "請將以下會議內容整理成適合一般員工閱讀的摘要,重點放在執行重點和日常工作相關內容:",
|
96 |
+
"客戶": "請將以下會議內容整理成適合客戶閱讀的摘要,重點放在服務優化和價值傳遞:",
|
97 |
+
"投資者": "請將以下會議內容整理成適合投資者閱讀的摘要,重點放在財務表現和未來展望:",
|
98 |
+
"研究人員": "請將以下會議內容整理成適合研究人員閱讀的摘要,重點放在研究方法和數據分析:"
|
99 |
+
}
|
100 |
+
|
101 |
+
prompt = f"{audience_prompts[audience_type]}\n\n{text}"
|
102 |
+
completion = openai.chat.completions.create(
|
103 |
+
model="gpt-4",
|
104 |
+
messages=[{"role": "user", "content": prompt}]
|
105 |
+
)
|
106 |
+
return completion.choices[0].message.content
|
107 |
+
|
108 |
+
# 處理音檔轉文字
|
109 |
+
def process_audio(audio_file, api_key):
|
110 |
+
if not audio_file or not api_key:
|
111 |
+
return "請確保上傳音檔和輸入API金鑰", "", 0
|
112 |
+
|
113 |
+
try:
|
114 |
+
# 轉譯音檔
|
115 |
+
transcript = transcribe_large_audio(audio_file, api_key)
|
116 |
+
# 計算字數
|
117 |
+
word_count = count_words(transcript)
|
118 |
+
return transcript, f"字數統計:{word_count} 字", word_count
|
119 |
+
except Exception as e:
|
120 |
+
return f"處理失敗:{str(e)}", "", 0
|
121 |
+
|
122 |
+
# 處理標點與段落
|
123 |
+
def process_punctuation(text, api_key):
|
124 |
+
if not text or not api_key:
|
125 |
+
return "請確保有文本內容和API金鑰"
|
126 |
+
try:
|
127 |
+
return auto_punctuate(text, api_key)
|
128 |
+
except Exception as e:
|
129 |
+
return f"標點處理失敗:{str(e)}"
|
130 |
+
|
131 |
+
# 處理翻譯
|
132 |
+
def process_translation(text, target_lang, api_key):
|
133 |
+
if not all([text, target_lang, api_key]):
|
134 |
+
return "請確保所有必要欄位都已填寫"
|
135 |
+
try:
|
136 |
+
return openai_translate_text(text, target_lang, api_key)
|
137 |
+
except Exception as e:
|
138 |
+
return f"翻譯失敗:{str(e)}"
|
139 |
+
|
140 |
+
# 處理摘要
|
141 |
+
def process_summary(text, audience_type, api_key):
|
142 |
+
if not all([text, audience_type, api_key]):
|
143 |
+
return "請確保所有必要欄位都已填寫"
|
144 |
+
try:
|
145 |
+
return openai_generate_summary(text, audience_type, api_key)
|
146 |
+
except Exception as e:
|
147 |
+
return f"摘要生成失敗:{str(e)}"
|
148 |
+
|
149 |
+
# 建立 Gradio 介面
|
150 |
+
with gr.Blocks() as demo:
|
151 |
+
gr.Markdown("會議音檔轉文字處理系統")
|
152 |
+
|
153 |
+
with gr.Row():
|
154 |
+
audio_file_input = gr.Audio(type="filepath", label="上傳音檔")
|
155 |
+
api_key_input = gr.Textbox(label="輸入 OpenAI API 金鑰", type="password")
|
156 |
+
|
157 |
+
with gr.Row():
|
158 |
+
transcript_output = gr.Textbox(label="原始逐字稿", lines=5)
|
159 |
+
word_count_output = gr.Textbox(label="字數統計")
|
160 |
+
|
161 |
+
with gr.Row():
|
162 |
+
punctuated_output = gr.Textbox(label="加入標點符號後的文本", lines=5)
|
163 |
+
|
164 |
+
with gr.Row():
|
165 |
+
target_lang_input = gr.Dropdown(
|
166 |
+
choices=["繁體中文", "英文", "日文", "韓文", "法文", "德文", "西班牙文"],
|
167 |
+
label="選擇目標語言",
|
168 |
+
value="繁體中文"
|
169 |
+
)
|
170 |
+
translated_output = gr.Textbox(label="翻譯結果", lines=5)
|
171 |
+
|
172 |
+
with gr.Row():
|
173 |
+
audience_type_input = gr.Dropdown(
|
174 |
+
choices=[
|
175 |
+
"學生", "老師", "會議", "主管", "技術人員",
|
176 |
+
"行銷人員", "一般員工", "客戶", "投資者", "研究人員"
|
177 |
+
],
|
178 |
+
label="選擇摘要類型",
|
179 |
+
value="會議"
|
180 |
+
)
|
181 |
+
summary_output = gr.Textbox(label="客製化摘要", lines=5)
|
182 |
+
|
183 |
+
with gr.Row():
|
184 |
+
transcribe_button = gr.Button("1. 開始轉譯")
|
185 |
+
punctuate_button = gr.Button("2. 添加標點符號")
|
186 |
+
translate_button = gr.Button("3. 翻譯文本")
|
187 |
+
summary_button = gr.Button("4. 生成摘要")
|
188 |
+
|
189 |
+
# 連接按鈕和函式
|
190 |
+
transcribe_button.click(
|
191 |
+
process_audio,
|
192 |
+
inputs=[audio_file_input, api_key_input],
|
193 |
+
outputs=[transcript_output, word_count_output]
|
194 |
+
)
|
195 |
+
|
196 |
+
punctuate_button.click(
|
197 |
+
process_punctuation,
|
198 |
+
inputs=[transcript_output, api_key_input],
|
199 |
+
outputs=punctuated_output
|
200 |
+
)
|
201 |
+
|
202 |
+
translate_button.click(
|
203 |
+
process_translation,
|
204 |
+
inputs=[punctuated_output, target_lang_input, api_key_input],
|
205 |
+
outputs=translated_output
|
206 |
+
)
|
207 |
+
|
208 |
+
summary_button.click(
|
209 |
+
process_summary,
|
210 |
+
inputs=[translated_output, audience_type_input, api_key_input],
|
211 |
+
outputs=summary_output
|
212 |
+
)
|
213 |
+
|
214 |
+
# 啟動介面
|
215 |
+
if __name__ == "__main__":
|
216 |
+
demo.launch()
|