Thao Pham commited on
Commit
a63917b
·
1 Parent(s): 301565c

update upload/clear logic

Browse files
Files changed (2) hide show
  1. app.py +15 -10
  2. video_utils.py +1 -3
app.py CHANGED
@@ -87,8 +87,6 @@ def check_exist_before_upsert(index, video_path):
87
  def chat(message, history):
88
  image_input_path = None
89
 
90
- # print(message['files'])
91
-
92
  video_name, video_input_path = None, None
93
  if len(message['files']) > 0:
94
  assert len(message['files']) == 1
@@ -105,6 +103,12 @@ def chat(message, history):
105
  history = []
106
 
107
  if video_name is not None:
 
 
 
 
 
 
108
  # Check metadata
109
  history.append((None, f"✅ Video uploaded succesfully! Your video's title is {video_name}..."))
110
  yield history
@@ -151,14 +155,15 @@ def chat(message, history):
151
  embed.indexing(INDEX, MODEL_STACK, metadatas_path)
152
 
153
  # summarizing video
154
- video_summary = rag.summarize_video(metadatas_path)
155
- with open(os.path.join(output_folder_path, "summary.txt"), "w") as f:
156
- f.write(video_summary)
 
157
 
158
- history.append((None, f"Video processing complete! You can now ask me questions about the video {video_name}!"))
159
  yield history
160
 
161
- global global_video_name
162
  global_video_name = video_name
163
  else:
164
  history.append((message, None))
@@ -169,7 +174,7 @@ def chat(message, history):
169
  yield history
170
  return
171
 
172
- output_folder_path = f"{UPLOAD_FOLDER}/{video_name}"
173
  metadatas_path = os.path.join(output_folder_path, 'metadatas.json')
174
 
175
  video_summary = ''
@@ -206,8 +211,8 @@ def main():
206
  clear = gr.Button("Clear") # Clear button
207
  # Clear chat history when clear button is clicked
208
  clear.click(clear_chat, [], chatbot)
209
- global video_name
210
- video_name = None
211
 
212
  demo.launch()
213
 
 
87
  def chat(message, history):
88
  image_input_path = None
89
 
 
 
90
  video_name, video_input_path = None, None
91
  if len(message['files']) > 0:
92
  assert len(message['files']) == 1
 
103
  history = []
104
 
105
  if video_name is not None:
106
+ global global_video_name
107
+ if global_video_name is not None:
108
+ history.append((None, "A video has already been uploaded. Please ask questions about the video or click `Clear` to work with another video."))
109
+ yield history
110
+ return
111
+
112
  # Check metadata
113
  history.append((None, f"✅ Video uploaded succesfully! Your video's title is {video_name}..."))
114
  yield history
 
155
  embed.indexing(INDEX, MODEL_STACK, metadatas_path)
156
 
157
  # summarizing video
158
+ if not os.path.exists(os.path.join(output_folder_path, "summary.txt")):
159
+ video_summary = rag.summarize_video(metadatas_path)
160
+ with open(os.path.join(output_folder_path, "summary.txt"), "w") as f:
161
+ f.write(video_summary)
162
 
163
+ history.append((None, f"Video processing complete! You can now ask me questions about the video {video_name}! Please remove the video from your input."))
164
  yield history
165
 
166
+ # global global_video_name
167
  global_video_name = video_name
168
  else:
169
  history.append((message, None))
 
174
  yield history
175
  return
176
 
177
+ output_folder_path = f"{UPLOAD_FOLDER}/{global_video_name}"
178
  metadatas_path = os.path.join(output_folder_path, 'metadatas.json')
179
 
180
  video_summary = ''
 
211
  clear = gr.Button("Clear") # Clear button
212
  # Clear chat history when clear button is clicked
213
  clear.click(clear_chat, [], chatbot)
214
+ global global_video_name
215
+ global_video_name = None
216
 
217
  demo.launch()
218
 
video_utils.py CHANGED
@@ -39,9 +39,7 @@ def transcribe_video(path_to_extracted_audio_file, output_folder, whisper_model=
39
  results = whisper_model.transcribe(path_to_extracted_audio_file, **options)
40
 
41
  vtt = getSubs(results["segments"], "vtt")
42
- # path to save generated transcript of video1
43
- video_name = os.path.basename(path_to_video).replace('.mp4', '')
44
- path_to_generated_transcript = os.path.join(output_folder, f'{video_name}.vtt')
45
 
46
  # write transcription to file
47
  with open(path_to_generated_transcript, 'w') as f:
 
39
  results = whisper_model.transcribe(path_to_extracted_audio_file, **options)
40
 
41
  vtt = getSubs(results["segments"], "vtt")
42
+ path_to_generated_transcript = os.path.join(output_folder, 'transcipt.vtt')
 
 
43
 
44
  # write transcription to file
45
  with open(path_to_generated_transcript, 'w') as f: