aijack commited on
Commit
9a72e69
·
1 Parent(s): ce74b65

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +76 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[6]:
5
+
6
+
7
+ import os
8
+ import cv2
9
+ import paddlehub as hub
10
+ import gradio as gr
11
+ import torch
12
+ import urllib.request
13
+
14
+
15
+ # In[7]:
16
+
17
+
18
+ # Fetch image for analysis
19
+ img_url = "http://claireye.com.tw/img/230212a.jpg"
20
+ urllib.request.urlretrieve(img_url, "pose.jpg")
21
+ model = hub.Module(name='U2Net')
22
+
23
+
24
+ # In[8]:
25
+
26
+
27
+ def infer(webcam, img,option):
28
+ if option == "webcam":
29
+ webcam.save('temp.jpg')
30
+ result = model.Segmentation(
31
+ images=[cv2.imread("temp.jpg")],
32
+ paths=None,
33
+ batch_size=1,
34
+ input_size=320,
35
+ output_dir='output',
36
+ visualization=True)
37
+ else:
38
+ img.save('temp.jpg')
39
+ result = model.Segmentation(
40
+ images=[cv2.imread("temp.jpg")],
41
+ paths=None,
42
+ batch_size=1,
43
+ input_size=320,
44
+ output_dir='output',
45
+ visualization=True)
46
+ return result[0]['front'][:,:,::-1], result[0]['mask']
47
+
48
+
49
+ # In[9]:
50
+
51
+
52
+ inputs = [gr.inputs.Image(source="webcam", label="Webcam", type="pil",optional=True),gr.inputs.Image(source="upload", label="Input Image", type="pil",optional=True),gr.inputs.Radio(choices=["webcam","Image"], type="value", default="Image", label="Input Type")]
53
+ outputs = [
54
+ gr.outputs.Image(type="numpy",label="Front"),
55
+ gr.outputs.Image(type="numpy",label="Mask")
56
+ ]
57
+
58
+ title = "U^2-Net"
59
+ description = "demo for U^2-Net. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
60
+ article = "<p style='text-align: center'><a href='http://claireye.com.tw'>Claireye</a> | 2023</p>"
61
+ examples = [
62
+ ['pose.jpg','pose.jpg','Image'],
63
+ ]
64
+
65
+
66
+ # In[10]:
67
+
68
+
69
+ gr.Interface(infer, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
70
+
71
+
72
+ # In[ ]:
73
+
74
+
75
+
76
+
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ scikit-image
3
+ torch
4
+ torchvision
5
+ pillow
6
+ opencv-python-headless
7
+ paddlepaddle
8
+ paddlehub
9
+ gradio