File size: 1,807 Bytes
9a72e69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#!/usr/bin/env python
# coding: utf-8

# In[6]:


import os
import cv2
import paddlehub as hub
import gradio as gr
import torch
import urllib.request


# In[7]:


# Fetch image for analysis
img_url = "http://claireye.com.tw/img/230212a.jpg"
urllib.request.urlretrieve(img_url, "pose.jpg")
model = hub.Module(name='U2Net')


# In[8]:


def infer(webcam, img,option):
  if option == "webcam":
      webcam.save('temp.jpg')
      result = model.Segmentation(
          images=[cv2.imread("temp.jpg")],
          paths=None,
          batch_size=1,
          input_size=320,
          output_dir='output',
          visualization=True)
  else:
      img.save('temp.jpg')
      result = model.Segmentation(
          images=[cv2.imread("temp.jpg")],
          paths=None,
          batch_size=1,
          input_size=320,
          output_dir='output',
          visualization=True)
  return result[0]['front'][:,:,::-1], result[0]['mask']


# In[9]:


inputs = [gr.inputs.Image(source="webcam", label="Webcam", type="pil",optional=True),gr.inputs.Image(source="upload", label="Input Image", type="pil",optional=True),gr.inputs.Radio(choices=["webcam","Image"], type="value", default="Image", label="Input Type")]
outputs = [
           gr.outputs.Image(type="numpy",label="Front"),
           gr.outputs.Image(type="numpy",label="Mask")
           ]

title = "U^2-Net"
description = "demo for U^2-Net. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='http://claireye.com.tw'>Claireye</a> | 2023</p>"
examples = [
  ['pose.jpg','pose.jpg','Image'],
]


# In[10]:


gr.Interface(infer, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()


# In[ ]: