yoon6173 commited on
Commit
808bedf
·
1 Parent(s): 6a0870f
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -7,34 +7,34 @@ import numpy as np
7
 
8
 
9
  def greet(url):
10
-
11
  processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic")
12
  model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic")
13
 
14
  image = Image.open(requests.get(url, stream=True).raw)
 
15
  inputs = processor(images=image, return_tensors="pt")
16
 
17
  with torch.no_grad():
18
  outputs = model(**inputs)
 
19
  # model predicts class_queries_logits of shape `(batch_size, num_queries)`
20
  # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
21
  class_queries_logits = outputs.class_queries_logits
22
  masks_queries_logits = outputs.masks_queries_logits
23
 
24
  # you can pass them to processor for postprocessing
25
- predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size])[0]
26
- predicted_semantic_map = predicted_semantic_map.numpy()
27
-
28
- return predicted_semantic_map
29
-
30
 
31
- url = "http://images.cocodataset.org/val2017/000000039769.jpg"
32
  #greet(url)
33
 
34
  iface = gr.Interface(
35
  fn=greet,
36
  inputs=gr.Image(value=url),
37
- outputs="image"
 
38
  )
39
 
40
  iface.launch(debug = True)
 
7
 
8
 
9
  def greet(url):
10
+ # load Mask2Former fine-tuned on Cityscapes semantic segmentation
11
  processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic")
12
  model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-large-cityscapes-semantic")
13
 
14
  image = Image.open(requests.get(url, stream=True).raw)
15
+
16
  inputs = processor(images=image, return_tensors="pt")
17
 
18
  with torch.no_grad():
19
  outputs = model(**inputs)
20
+
21
  # model predicts class_queries_logits of shape `(batch_size, num_queries)`
22
  # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
23
  class_queries_logits = outputs.class_queries_logits
24
  masks_queries_logits = outputs.masks_queries_logits
25
 
26
  # you can pass them to processor for postprocessing
27
+ predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
28
+ # we refer to the demo notebooks for visualization (see "Resources" section in the Mask2Former docs)
 
 
 
29
 
30
+ url = "http://www.apparelnews.co.kr/upfiles/manage/202302/5d5f694177b26fc86e5db623bf7ae4b7.jpg"
31
  #greet(url)
32
 
33
  iface = gr.Interface(
34
  fn=greet,
35
  inputs=gr.Image(value=url),
36
+ outputs=gr.Image(),
37
+ live=True
38
  )
39
 
40
  iface.launch(debug = True)