File size: 1,417 Bytes
282dd93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb8a0ae
282dd93
 
bae8433
 
282dd93
e7594ac
282dd93
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from transformers import AutoImageProcessor, SuperPointForKeypointDetection
import torch
import matplotlib.pyplot as plt
import uuid
import gradio as gr


processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint")
model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint")

def infer(image):
  inputs = processor(image,return_tensors="pt").to(model.device, model.dtype)
  model_outputs = model(**inputs)
  image_sizes = [(image.size[1], image.size[0])]
  outputs = processor.post_process_keypoint_detection(model_outputs, image_sizes)
  keypoints = outputs[0]["keypoints"].detach().numpy()
  scores = outputs[0]["scores"].detach().numpy()
  image_width, image_height = image.size

  plt.axis('off')
  plt.imshow(image)
  plt.scatter(
      keypoints[:, 0],
      keypoints[:, 1],
      s=scores * 100,
      c='cyan',
      alpha=0.4
  )
  path = "./" + uuid.uuid4().hex + ".png"
  plt.savefig(path)
  plt.close()
  return path

title = "SuperPoint"
description = "Try [SuperPoint](https://huggingface.co/docs/transformers/en/model_doc/superpoint) in this demo, foundation model for keypoint detection supported in 🤗 transformers. Simply upload an image or try the example. "
iface = gr.Interface(fn = infer, inputs = gr.Image(type="pil"),
                     outputs = gr.Image(), title=title, description=description, examples=["./bee.jpg"])

iface.launch()