mhamilton723 commited on
Commit
1ed55a8
·
verified ·
1 Parent(s): 7782384

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -14
app.py CHANGED
@@ -15,13 +15,12 @@ from denseav.plotting import plot_attention_video, plot_2head_attention_video, p
15
  from denseav.shared import norm, crop_to_divisor, blur_dim
16
  from os.path import join
17
 
18
-
19
  if __name__ == "__main__":
20
 
21
- os.environ['TORCH_HOME'] = '/tmp/.cache'
22
- os.environ['GRADIO_EXAMPLES_CACHE'] = '/tmp/gradio_cache'
23
- sample_images_dir = "/tmp/samples"
24
- # sample_videos_dir = "samples"
25
 
26
 
27
  def download_video(url, save_path):
@@ -33,6 +32,10 @@ if __name__ == "__main__":
33
  base_url = "https://marhamilresearch4.blob.core.windows.net/denseav-public/samples/"
34
  sample_videos_urls = {
35
  "puppies.mp4": base_url + "puppies.mp4",
 
 
 
 
36
  }
37
 
38
  # Ensure the directory for sample videos exists
@@ -49,7 +52,7 @@ if __name__ == "__main__":
49
  print(f"{filename} already exists. Skipping download.")
50
 
51
  csv.field_size_limit(100000000)
52
- options = ['language', "sound", "sound_and_language"]
53
  load_size = 224
54
  plot_size = 224
55
 
@@ -145,21 +148,41 @@ if __name__ == "__main__":
145
  )
146
  return temp_video_path_1, temp_video_path_2, temp_video_path_3, temp_video_path_4
147
 
 
 
148
 
149
  with gr.Blocks() as demo:
150
  with gr.Column():
151
- video_input.render()
152
- model_option.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  with gr.Row():
154
  video_output1.render()
155
  video_output2.render()
156
- with gr.Row():
157
  video_output3.render()
158
- video_output4.render()
159
 
160
- demo.examples = [
161
- [join(sample_videos_dir, "puppies.mp4"), "language"],
162
- ]
163
 
164
  # demo.launch(server_name="0.0.0.0", server_port=6006, debug=True)
165
- demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)
 
 
 
15
  from denseav.shared import norm, crop_to_divisor, blur_dim
16
  from os.path import join
17
 
 
18
  if __name__ == "__main__":
19
 
20
+ # os.environ['TORCH_HOME'] = '/tmp/.cache'
21
+ # os.environ['GRADIO_EXAMPLES_CACHE'] = '/tmp/gradio_cache'
22
+ # sample_images_dir = "/tmp/samples"
23
+ sample_videos_dir = "samples"
24
 
25
 
26
  def download_video(url, save_path):
 
32
  base_url = "https://marhamilresearch4.blob.core.windows.net/denseav-public/samples/"
33
  sample_videos_urls = {
34
  "puppies.mp4": base_url + "puppies.mp4",
35
+ "peppers.mp4": base_url + "peppers.mp4",
36
+ "boat.mp4": base_url + "boat.mp4",
37
+ "elephant2.mp4": base_url + "elephant2.mp4",
38
+
39
  }
40
 
41
  # Ensure the directory for sample videos exists
 
52
  print(f"{filename} already exists. Skipping download.")
53
 
54
  csv.field_size_limit(100000000)
55
+ options = ['language', "sound_and_language", "sound"]
56
  load_size = 224
57
  plot_size = 224
58
 
 
148
  )
149
  return temp_video_path_1, temp_video_path_2, temp_video_path_3, temp_video_path_4
150
 
151
+ return temp_video_path_1, temp_video_path_2, temp_video_path_3
152
+
153
 
154
  with gr.Blocks() as demo:
155
  with gr.Column():
156
+ gr.Markdown("## Visualizing Sound and Language with DenseAV")
157
+ gr.Markdown(
158
+ "This demo allows you to explore the inner attention maps of DenseAV's dense multi-head contrastive operator.")
159
+ with gr.Row():
160
+ with gr.Column(scale=1):
161
+ model_option.render()
162
+ with gr.Column(scale=3):
163
+ video_input.render()
164
+ with gr.Row():
165
+ submit_button = gr.Button("Submit")
166
+ with gr.Row():
167
+ gr.Examples(
168
+ examples=[
169
+ [join(sample_videos_dir, "puppies.mp4"), "sound_and_language"],
170
+ [join(sample_videos_dir, "peppers.mp4"), "language"],
171
+ [join(sample_videos_dir, "elephant2.mp4"), "language"],
172
+ [join(sample_videos_dir, "boat.mp4"), "language"]
173
+
174
+ ],
175
+ inputs=[video_input, model_option]
176
+ )
177
  with gr.Row():
178
  video_output1.render()
179
  video_output2.render()
 
180
  video_output3.render()
 
181
 
182
+ submit_button.click(fn=process_video, inputs=[video_input, model_option],
183
+ outputs=[video_output1, video_output2])
 
184
 
185
  # demo.launch(server_name="0.0.0.0", server_port=6006, debug=True)
186
+
187
+ demo.launch(server_name="0.0.0.0", server_port=6006, debug=True)
188
+ # demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)