|
|
|
|
|
|
|
|
|
|
|
|
|
import cv2 |
|
import mediapipe as mp |
|
import urllib.request |
|
import numpy as np |
|
import pickle |
|
import matplotlib as mpl |
|
import matplotlib.pyplot as plt |
|
from matplotlib import animation |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mp_drawing = mp.solutions.drawing_utils |
|
mp_drawing_styles = mp.solutions.drawing_styles |
|
mp_holistic = mp.solutions.holistic |
|
mp_pose = mp.solutions.pose |
|
mp_face_mesh = mp.solutions.face_mesh |
|
|
|
|
|
|
|
|
|
|
|
face_url = "http://claireye.com.tw/img/20230222.jpg" |
|
urllib.request.urlretrieve(face_url, "face_image.jpg") |
|
|
|
|
|
|
|
|
|
|
|
|
|
img_url = "http://claireye.com.tw/img/230212a.jpg" |
|
urllib.request.urlretrieve(img_url, "pose.jpg") |
|
|
|
|
|
|
|
|
|
|
|
import gradio as gr |
|
|
|
|
|
|
|
|
|
|
|
mp_selfie = mp.solutions.selfie_segmentation |
|
|
|
|
|
|
|
|
|
|
|
def segment(image): |
|
with mp_selfie.SelfieSegmentation(model_selection=0) as model: |
|
res = model.process(image) |
|
mask = np.stack((res.segmentation_mask,)*3, axis=-1) > 0.5 |
|
return np.where(mask, image, cv2.blur(image, (40,40))) |
|
|
|
|
|
|
|
|
|
|
|
def facego(image): |
|
with mp_face_mesh.FaceMesh( |
|
static_image_mode=True, |
|
max_num_faces=1, |
|
refine_landmarks=True, |
|
min_detection_confidence=0.5) as face_mesh: |
|
|
|
|
|
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) |
|
|
|
|
|
annotated_image = image.copy() |
|
for face_landmarks in results.multi_face_landmarks: |
|
|
|
mp_drawing.draw_landmarks( |
|
image=annotated_image, |
|
landmark_list=face_landmarks, |
|
connections=mp_face_mesh.FACEMESH_CONTOURS, |
|
landmark_drawing_spec=None, |
|
connection_drawing_spec=mp_drawing_styles |
|
.get_default_face_mesh_contours_style()) |
|
|
|
mp_drawing.draw_landmarks( |
|
image=annotated_image, |
|
landmark_list=face_landmarks, |
|
connections=mp_face_mesh.FACEMESH_IRISES, |
|
landmark_drawing_spec=None, |
|
connection_drawing_spec=mp_drawing_styles |
|
.get_default_face_mesh_iris_connections_style()) |
|
|
|
return annotated_image |
|
|
|
|
|
|
|
|
|
|
|
def posego(image): |
|
|
|
with mp_pose.Pose(static_image_mode=True, |
|
model_complexity=2, |
|
enable_segmentation=True) as pose: |
|
|
|
results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) |
|
|
|
|
|
annotated_image = image.copy() |
|
|
|
|
|
mp_drawing.draw_landmarks(annotated_image, |
|
results.pose_landmarks, |
|
mp_pose.POSE_CONNECTIONS, |
|
landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style()) |
|
|
|
|
|
return annotated_image |
|
|
|
|
|
|
|
|
|
|
|
def inference(img, version): |
|
print(version) |
|
print("1") |
|
print(img) |
|
img2 = cv2.imread(img) |
|
print("2") |
|
print(img2) |
|
if version == 'face': |
|
img1=facego(img2) |
|
print("1a") |
|
elif (version == 'pose'): |
|
img1=posego(img2) |
|
print("2a") |
|
else: |
|
img1=segment(img2) |
|
print("3a") |
|
print("3") |
|
print(img1) |
|
save_path = f'out.jpg' |
|
cv2.imwrite(save_path, img1) |
|
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) |
|
return img1, save_path |
|
|
|
|
|
|
|
|
|
|
|
title = "pose-style" |
|
description = "Gradio demo for pose-style. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." |
|
article = "<p style='text-align: center'><a href='http://claireye.com.tw'>Claireye</a> | 2023</p>" |
|
|
|
|
|
|
|
|
|
|
|
gr.Interface( |
|
inference, [ |
|
gr.inputs.Image(type="filepath",label="Input"), |
|
gr.inputs.Radio(['face', 'pose', 'seg'], type="value", default='pose', label='mode') |
|
], [ |
|
gr.outputs.Image(type="numpy", label="Output (The whole image)"), |
|
gr.outputs.File(label="Download the output image") |
|
], |
|
title=title, |
|
description=description, |
|
article=article, |
|
examples=[['face_image.jpg', 'face'], ['pose.jpg', 'pose'], |
|
['pose.jpg', 'seg']]).launch() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|