merve HF staff commited on
Commit
282dd93
·
verified ·
1 Parent(s): fdf1a17

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -0
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoImageProcessor, SuperPointForKeypointDetection
2
+ import torch
3
+ import matplotlib.pyplot as plt
4
+ import uuid
5
+ import gradio as gr
6
+
7
+
8
+ processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint")
9
+ model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
10
+
11
+ def infer(image):
12
+ inputs = processor(image,return_tensors="pt").to(model.device, model.dtype)
13
+ model_outputs = model(**inputs)
14
+ image_sizes = [(image.size[1], image.size[0])]
15
+ outputs = processor.post_process_keypoint_detection(model_outputs, image_sizes)
16
+ keypoints = outputs[0]["keypoints"].detach().numpy()
17
+ scores = outputs[0]["scores"].detach().numpy()
18
+ image_width, image_height = image.size
19
+
20
+ plt.axis('off')
21
+ plt.imshow(image)
22
+ plt.scatter(
23
+ keypoints[:, 0],
24
+ keypoints[:, 1],
25
+ s=scores * 100,
26
+ c='cyan',
27
+ alpha=0.4
28
+ )
29
+ path = "./" + uuid.uuid4().hex + ".png"
30
+ plt.savefig(path)
31
+ return path
32
+
33
+ title = "## SuperPoint"
34
+ description = "Try [SuperPoint] in this demo, foundation model for keypoint detection supported in 🤗 transformers. Simply upload an image or try the example."
35
+ iface = gr.Interface(fn = infer, inputs = gr.Image(type="pil"),
36
+ outputs = gr.Image(), examples=["./bee.jpg"])
37
+
38
+ iface.launch()