osanseviero commited on
Commit
6e4fb57
·
1 Parent(s): 60d58ef
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -8,7 +8,7 @@ import torch.nn as nn
8
  import torchvision
9
  import matplotlib.pyplot as plt
10
 
11
- torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
12
 
13
  def get_attention_maps(pixel_values, attentions, nh):
14
  threshold = 0.6
@@ -47,19 +47,24 @@ def get_attention_maps(pixel_values, attentions, nh):
47
  return attention_maps
48
 
49
 
50
- def visualize_attention(image):
 
 
51
  # normalize channels
52
  pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values
 
53
  # forward pass
54
  outputs = model(pixel_values, output_attentions=True, interpolate_pos_encoding=True)
 
55
  # get attentions of last layer
56
  attentions = outputs.attentions[-1]
57
  nh = attentions.shape[1] # number of heads
 
58
  # we keep only the output patch attention
59
  attentions = attentions[0, :, 0, 1:].reshape(nh, -1)
60
  attention_maps = get_attention_maps(pixel_values, attentions, nh)
61
 
62
- return attention_maps
63
 
64
  feature_extractor = ViTFeatureExtractor.from_pretrained("facebook/dino-vits8", do_resize=False)
65
  model = ViTModel.from_pretrained("facebook/dino-vits8", add_pooling_layer=False)
@@ -67,10 +72,10 @@ model = ViTModel.from_pretrained("facebook/dino-vits8", add_pooling_layer=False)
67
  title = "Interactive demo: DINO"
68
  description = "Demo for Facebook AI's DINO, a new method for self-supervised training of Vision Transformers. Using this method, they are capable of segmenting objects within an image without having ever been trained to do so. This can be observed by displaying the self-attention of the heads from the last layer for the [CLS] token query. This demo uses a ViT-S/8 trained with DINO. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
69
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.14294'>Emerging Properties in Self-Supervised Vision Transformers</a> | <a href='https://github.com/facebookresearch/dino'>Github Repo</a></p>"
70
- examples =[['cats.jpg']]
71
  iface = gr.Interface(fn=visualize_attention,
72
- inputs=gr.inputs.Image(shape=(480, 480), type="pil"),
73
- outputs=[gr.outputs.Image(type='file', label=f'attention_head_{i}') for i in range(6)],
74
  title=title,
75
  description=description,
76
  article=article,
 
8
  import torchvision
9
  import matplotlib.pyplot as plt
10
 
11
+ torch.hub.download_url_to_file('https://viratdata.org/video/VIRAT_S_010204_05_000856_000890.mp4', 'video.mp4')
12
 
13
  def get_attention_maps(pixel_values, attentions, nh):
14
  threshold = 0.6
 
47
  return attention_maps
48
 
49
 
50
+ def visualize_attention(video):
51
+ return video
52
+ """
53
  # normalize channels
54
  pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values
55
+
56
  # forward pass
57
  outputs = model(pixel_values, output_attentions=True, interpolate_pos_encoding=True)
58
+
59
  # get attentions of last layer
60
  attentions = outputs.attentions[-1]
61
  nh = attentions.shape[1] # number of heads
62
+
63
  # we keep only the output patch attention
64
  attentions = attentions[0, :, 0, 1:].reshape(nh, -1)
65
  attention_maps = get_attention_maps(pixel_values, attentions, nh)
66
 
67
+ return attention_maps"""
68
 
69
  feature_extractor = ViTFeatureExtractor.from_pretrained("facebook/dino-vits8", do_resize=False)
70
  model = ViTModel.from_pretrained("facebook/dino-vits8", add_pooling_layer=False)
 
72
  title = "Interactive demo: DINO"
73
  description = "Demo for Facebook AI's DINO, a new method for self-supervised training of Vision Transformers. Using this method, they are capable of segmenting objects within an image without having ever been trained to do so. This can be observed by displaying the self-attention of the heads from the last layer for the [CLS] token query. This demo uses a ViT-S/8 trained with DINO. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
74
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.14294'>Emerging Properties in Self-Supervised Vision Transformers</a> | <a href='https://github.com/facebookresearch/dino'>Github Repo</a></p>"
75
+ examples =[['video.mp4']]
76
  iface = gr.Interface(fn=visualize_attention,
77
+ inputs=gr.inputs.Video(gr.inputs.Video()),
78
+ outputs=[gr.outputs.Video(label=f'result_video')],
79
  title=title,
80
  description=description,
81
  article=article,