#!/usr/bin/env python # coding: utf-8 # In[1]: import cv2 import mediapipe as mp import urllib.request import numpy as np import pickle import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib import animation # In[2]: # In[3]: mp_drawing = mp.solutions.drawing_utils mp_drawing_styles = mp.solutions.drawing_styles mp_holistic = mp.solutions.holistic mp_pose = mp.solutions.pose mp_face_mesh = mp.solutions.face_mesh # In[4]: face_url = "http://claireye.com.tw/img/20230222.jpg" urllib.request.urlretrieve(face_url, "face_image.jpg") # In[5]: # Fetch image for analysis img_url = "http://claireye.com.tw/img/230212a.jpg" urllib.request.urlretrieve(img_url, "pose.jpg") # In[6]: import gradio as gr # In[7]: mp_selfie = mp.solutions.selfie_segmentation # In[8]: def segment(image): with mp_selfie.SelfieSegmentation(model_selection=0) as model: res = model.process(image) mask = np.stack((res.segmentation_mask,)*3, axis=-1) > 0.5 return np.where(mask, image, cv2.blur(image, (40,40))) # In[9]: def facego(image): with mp_face_mesh.FaceMesh( static_image_mode=True, max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5) as face_mesh: # Read image file with cv2 and convert from BGR to RGB results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) annotated_image = image.copy() for face_landmarks in results.multi_face_landmarks: mp_drawing.draw_landmarks( image=annotated_image, landmark_list=face_landmarks, connections=mp_face_mesh.FACEMESH_CONTOURS, landmark_drawing_spec=None, connection_drawing_spec=mp_drawing_styles .get_default_face_mesh_contours_style()) mp_drawing.draw_landmarks( image=annotated_image, landmark_list=face_landmarks, connections=mp_face_mesh.FACEMESH_IRISES, landmark_drawing_spec=None, connection_drawing_spec=mp_drawing_styles .get_default_face_mesh_iris_connections_style()) return annotated_image # In[10]: def posego(image): # Create a MediaPipe `Pose` object with mp_pose.Pose(static_image_mode=True, model_complexity=2, enable_segmentation=True) as pose: results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # Copy the iamge annotated_image = image.copy() # Draw pose, left and right hands, and face landmarks on the image with drawing specification defaults. mp_drawing.draw_landmarks(annotated_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS, landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style()) return annotated_image # In[11]: def inference(img, version): print(version) print("1") print(img) img2 = cv2.imread(img) print("2") print(img2) if version == 'face': img1=facego(img2) print("1a") elif (version == 'pose'): img1=posego(img2) print("2a") else: img1=segment(img2) print("3a") print("3") print(img1) save_path = f'out.jpg' cv2.imwrite(save_path, img1) img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) return img1, save_path # In[12]: title = "pose-style" description = "Gradio demo for pose-style. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." article = "

Claireye | 2023

" # In[13]: gr.Interface( inference, [ gr.inputs.Image(type="filepath",label="Input"), gr.inputs.Radio(['face', 'pose', 'seg'], type="value", default='pose', label='mode') ], [ gr.outputs.Image(type="numpy", label="Output (The whole image)"), gr.outputs.File(label="Download the output image") ], title=title, description=description, article=article, examples=[['face_image.jpg', 'face'], ['pose.jpg', 'pose'], ['pose.jpg', 'seg']]).launch() # In[ ]: