import gradio as gr import os VIDEOS_PER_ROW = 3 VIDEO_EXAMPLES_PATH = "src/example_videos" def build_video_to_camvideo(CAM_METHODS, CV2_COLORMAPS, LAYERS, ALL_CLASSES, gradcam_video, language): with gr.Row(): with gr.Column(scale=2): gr.Markdown("### Video to GradCAM-Video") gr.Markdown("Here you can upload a video and visualize the GradCAM.") gr.Markdown("Please note that this can take a while. Also currently only a maximum of 60 frames can be processed. The video will be cut to 60 frames if it is longer. Furthermore, the video can only consist of a maximum of 1000.") gr.Markdown("The more frames and fps the video has, the longer it takes to process and the result will be more choppy.") video_cam_method = gr.Radio( ["GradCAM", "GradCAM++"], label="GradCAM Method", value="GradCAM", interactive=True, scale=2, ) video_alpha = gr.Slider( minimum=.1, maximum=.9, value=0.5, interactive=True, step=.1, label="Alpha", scale=1, ) video_layer = gr.Radio( [f"layer{i}" for i in range(1, 5)], label="Layer", value="layer4", interactive=True, scale=2, ) with gr.Row(): video_animal_to_explain = gr.Dropdown( choices=["Predicted Class"] + ALL_CLASSES, label="Animal", value="Predicted Class", interactive=True, scale=4, ) show_predicted_class = gr.Checkbox( label="Show Predicted Class", value=False, interactive=True, scale=1, ) with gr.Row(): colormap = gr.Dropdown( choices=list(CV2_COLORMAPS.keys()), label="Colormap", value="Inferno", interactive=True, scale=2, ) bw_highlight = gr.Checkbox( label="BW Highlight", value=False, interactive=True, scale=1, ) with gr.Row(): use_eigen_smooth = gr.Checkbox( label="Eigen Smooth", value=False, interactive=True, scale=1, ) with gr.Column(scale=1): with gr.Column(): video_in = gr.Video(autoplay=False, include_audio=False, label="Input Video") video_out = gr.Video(autoplay=False, include_audio=False, show_label=False) gif_cam_mode_button = gr.Button(value="Show GradCAM-Video", label="GradCAM", scale=1) gif_cam_mode_button.click(fn=gradcam_video, inputs=[video_in, colormap, use_eigen_smooth, bw_highlight, video_alpha, video_cam_method, video_layer, video_animal_to_explain, show_predicted_class, language], outputs=[video_out], queue=True) with gr.Row(): with gr.Column(): gr.Markdown("## Examples", elem_id="video-examples-header") gr.Markdown("Here you can choose an example video to visualize the GradCAM. Just click play and the video will be loaded as input above. Then you can click the button above to visualize the GradCAM.") videos = os.listdir(VIDEO_EXAMPLES_PATH) videos = [os.path.join(VIDEO_EXAMPLES_PATH, video) for video in videos] videos = [video for video in videos if video.endswith(".mp4")] rows = (len(videos) // VIDEOS_PER_ROW) + 1 loaded_videos = [] for i in range(rows): with gr.Row(elem_classes=["row-example-videos"], equal_height=False): for j in range(VIDEOS_PER_ROW): if i * VIDEOS_PER_ROW + j >= len(videos): break video = videos[i * VIDEOS_PER_ROW + j] loaded_videos.append( gr.Video( value=video, interactive=False, label=f"video {i * VIDEOS_PER_ROW + j + 1}", include_audio=False, autoplay=False, elem_classes=["selectable_videos"], ) ) for video in loaded_videos: video.play(fn=lambda x: x, inputs=[video], outputs=[video_in], scroll_to_output=True, queue=True, show_progress='full', max_batch_size=1)