aijack commited on
Commit
5cc46fa
·
1 Parent(s): 1b80989

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +188 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ import cv2
8
+ import mediapipe as mp
9
+ import urllib.request
10
+ import numpy as np
11
+ import pickle
12
+ import matplotlib as mpl
13
+ import matplotlib.pyplot as plt
14
+ from matplotlib import animation
15
+
16
+
17
+ # In[2]:
18
+
19
+
20
+ from PIL import Image
21
+ from IPython.display import Video
22
+
23
+
24
+ # In[3]:
25
+
26
+
27
+ mp_drawing = mp.solutions.drawing_utils
28
+ mp_drawing_styles = mp.solutions.drawing_styles
29
+ mp_holistic = mp.solutions.holistic
30
+ mp_pose = mp.solutions.pose
31
+ mp_face_mesh = mp.solutions.face_mesh
32
+
33
+
34
+ # In[4]:
35
+
36
+
37
+ face_url = "http://claireye.com.tw/img/20230222.jpg"
38
+ urllib.request.urlretrieve(face_url, "face_image.jpg")
39
+
40
+
41
+ # In[5]:
42
+
43
+
44
+ # Fetch image for analysis
45
+ img_url = "http://claireye.com.tw/img/230212a.jpg"
46
+ urllib.request.urlretrieve(img_url, "pose.jpg")
47
+
48
+
49
+ # In[6]:
50
+
51
+
52
+ import gradio as gr
53
+
54
+
55
+ # In[7]:
56
+
57
+
58
+ mp_selfie = mp.solutions.selfie_segmentation
59
+
60
+
61
+ # In[8]:
62
+
63
+
64
+ def segment(image):
65
+ with mp_selfie.SelfieSegmentation(model_selection=0) as model:
66
+ res = model.process(image)
67
+ mask = np.stack((res.segmentation_mask,)*3, axis=-1) > 0.5
68
+ return np.where(mask, image, cv2.blur(image, (40,40)))
69
+
70
+
71
+ # In[9]:
72
+
73
+
74
+ def facego(image):
75
+ with mp_face_mesh.FaceMesh(
76
+ static_image_mode=True,
77
+ max_num_faces=1,
78
+ refine_landmarks=True,
79
+ min_detection_confidence=0.5) as face_mesh:
80
+
81
+ # Read image file with cv2 and convert from BGR to RGB
82
+ results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
83
+
84
+
85
+ annotated_image = image.copy()
86
+ for face_landmarks in results.multi_face_landmarks:
87
+
88
+ mp_drawing.draw_landmarks(
89
+ image=annotated_image,
90
+ landmark_list=face_landmarks,
91
+ connections=mp_face_mesh.FACEMESH_CONTOURS,
92
+ landmark_drawing_spec=None,
93
+ connection_drawing_spec=mp_drawing_styles
94
+ .get_default_face_mesh_contours_style())
95
+
96
+ mp_drawing.draw_landmarks(
97
+ image=annotated_image,
98
+ landmark_list=face_landmarks,
99
+ connections=mp_face_mesh.FACEMESH_IRISES,
100
+ landmark_drawing_spec=None,
101
+ connection_drawing_spec=mp_drawing_styles
102
+ .get_default_face_mesh_iris_connections_style())
103
+
104
+ return annotated_image
105
+
106
+
107
+ # In[10]:
108
+
109
+
110
+ def posego(image):
111
+ # Create a MediaPipe `Pose` object
112
+ with mp_pose.Pose(static_image_mode=True,
113
+ model_complexity=2,
114
+ enable_segmentation=True) as pose:
115
+
116
+ results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
117
+
118
+ # Copy the iamge
119
+ annotated_image = image.copy()
120
+
121
+ # Draw pose, left and right hands, and face landmarks on the image with drawing specification defaults.
122
+ mp_drawing.draw_landmarks(annotated_image,
123
+ results.pose_landmarks,
124
+ mp_pose.POSE_CONNECTIONS,
125
+ landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())
126
+
127
+
128
+ return annotated_image
129
+
130
+
131
+ # In[11]:
132
+
133
+
134
+ def inference(img, version):
135
+ print(version)
136
+ print("1")
137
+ print(img)
138
+ img2 = cv2.imread(img)
139
+ print("2")
140
+ print(img2)
141
+ if version == 'face':
142
+ img1=facego(img2)
143
+ print("1a")
144
+ elif (version == 'pose'):
145
+ img1=posego(img2)
146
+ print("2a")
147
+ else:
148
+ img1=segment(img2)
149
+ print("3a")
150
+ print("3")
151
+ print(img1)
152
+ save_path = f'out.jpg'
153
+ cv2.imwrite(save_path, img1)
154
+ img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
155
+ return img1, save_path
156
+
157
+
158
+ # In[12]:
159
+
160
+
161
+ title = "pose-style"
162
+ description = "Gradio demo for pose-style. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
163
+ article = "<p style='text-align: center'><a href='http://claireye.com.tw'>Claireye</a> | 2023</p>"
164
+
165
+
166
+ # In[13]:
167
+
168
+
169
+ gr.Interface(
170
+ inference, [
171
+ gr.inputs.Image(type="filepath",label="Input"),
172
+ gr.inputs.Radio(['face', 'pose', 'seg'], type="value", default='pose', label='mode')
173
+ ], [
174
+ gr.outputs.Image(type="numpy", label="Output (The whole image)"),
175
+ gr.outputs.File(label="Download the output image")
176
+ ],
177
+ title=title,
178
+ description=description,
179
+ article=article,
180
+ examples=[['face_image.jpg', 'face'], ['pose.jpg', 'pose'],
181
+ ['pose.jpg', 'seg']]).launch()
182
+
183
+
184
+ # In[ ]:
185
+
186
+
187
+
188
+
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ numpy
2
+ opencv-python-headless
3
+ mediapipe
4
+