Spaces:
Sleeping
Sleeping
File size: 4,623 Bytes
79acef0 8ddb418 79acef0 8ddb418 79acef0 8ddb418 79acef0 8ddb418 79acef0 8ddb418 79acef0 73de835 79acef0 8ddb418 2c72cc4 8ddb418 2c72cc4 8ddb418 c426221 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import gradio as gr
import os
VIDEOS_PER_ROW = 3
VIDEO_EXAMPLES_PATH = "src/example_videos"
def build_video_to_camvideo(CAM_METHODS, ALL_CLASSES, gradcam_video):
with gr.Row():
with gr.Column(scale=2):
gr.Markdown("### Video to GradCAM-Video")
gr.Markdown("Here you can upload a video and visualize the GradCAM.")
gr.Markdown("Please note that this can take a while. Also currently only a maximum of 70 frames can be processed. The video will be cut to 70 frames if it is longer. Furthermore, the video can only consist of a maximum of 1000.")
gr.Markdown("The more frames and fps the video has, the longer it takes to process and the result will be more choppy.")
video_cam_method = gr.Radio(
["GradCAM", "GradCAM++"],
label="GradCAM Method",
value="GradCAM",
interactive=True,
scale=2,
)
video_cam_method.description = "Here you can choose the GradCAM method."
video_cam_method.description_place = "left"
video_alpha = gr.Slider(
minimum=.1,
maximum=.9,
value=0.5,
interactive=True,
step=.1,
label="Alpha",
scale=1,
)
video_alpha.description = "Here you can choose the alpha value."
video_alpha.description_place = "left"
video_layer = gr.Radio(
["layer1", "layer2", "layer3", "layer4", "all"],
label="Layer",
value="layer4",
interactive=True,
scale=2,
)
video_layer.description = "Here you can choose the layer to visualize."
video_layer.description_place = "left"
video_animal_to_explain = gr.Dropdown(
choices=["Predicted Class"] + ALL_CLASSES,
label="Animal",
value="Predicted Class",
interactive=True,
scale=2,
)
video_animal_to_explain.description = "Here you can choose the animal to explain. If you choose 'Predicted Class' the method will explain the predicted class."
video_animal_to_explain.description_place = "center"
with gr.Column(scale=1):
with gr.Column():
video_in = gr.Video(autoplay=False, include_audio=False)
video_out = gr.Video(autoplay=False, include_audio=False)
gif_cam_mode_button = gr.Button(value="Show GradCAM-Video", label="GradCAM", scale=1)
gif_cam_mode_button.click(fn=gradcam_video, inputs=[video_in, video_alpha, video_cam_method, video_layer, video_animal_to_explain], outputs=[video_out], queue=True)
with gr.Row():
with gr.Column():
gr.Markdown("## Examples", elem_id="video-examples-header")
gr.Markdown("Here you can choose an example video to visualize the GradCAM. Just click play and the video will be loaded as input above. Then you can click the button above to visualize the GradCAM.")
videos = os.listdir(VIDEO_EXAMPLES_PATH)
videos = [os.path.join(VIDEO_EXAMPLES_PATH, video) for video in videos]
videos = [video for video in videos if video.endswith(".mp4")]
rows = (len(videos) // VIDEOS_PER_ROW) + 1
loaded_videos = []
for i in range(rows):
with gr.Row(elem_classes=["row-example-videos"], equal_height=False):
for j in range(VIDEOS_PER_ROW):
if i * VIDEOS_PER_ROW + j >= len(videos): break
video = videos[i * VIDEOS_PER_ROW + j]
loaded_videos.append(
gr.Video(
value=video,
interactive=False,
label=f"video {i * VIDEOS_PER_ROW + j + 1}",
include_audio=False,
autoplay=False,
elem_classes=["selectable_videos"],
)
)
for video in loaded_videos:
video.play(fn=lambda x: x, inputs=[video], outputs=[video_in], scroll_to_output=True, queue=True, show_progress='full', max_batch_size=1) |