Spaces:
Runtime error
Runtime error
File size: 4,439 Bytes
45e152d 23c7824 33e1b1c 7709e7b 23c7824 33e1b1c 23c7824 5943157 23c7824 5943157 23c7824 5943157 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
import os
os.system("pip install dlib")
import sys
import face_detection
from PIL import Image, ImageOps, ImageFile
import numpy as np
import cv2 as cv
import torch
import gradio as gr
torch.set_grad_enabled(False)
device = "cuda" if torch.cuda.is_available() else "cpu"
model = torch.hub.load("bryandlee/animegan2-pytorch:main", "generator", device=device).eval()
model2 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1", device=device).eval()
face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", device=device)
image_format = "png" #@param ["jpeg", "png"]
def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=2.0, threshold=0):
"""Return a sharpened version of the image, using an unsharp mask."""
blurred = cv.GaussianBlur(image, kernel_size, sigma)
sharpened = float(amount + 1) * image - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(image - blurred) < threshold
np.copyto(sharpened, image, where=low_contrast_mask)
return sharpened
def normPRED(d):
ma = np.max(d)
mi = np.min(d)
dn = (d-mi)/(ma-mi)
return dn
def array_to_np(array_in):
array_in = normPRED(array_in)
array_in = np.squeeze(255.0*(array_in))
array_in = np.transpose(array_in, (1, 2, 0))
return array_in
def array_to_image(array_in):
array_in = normPRED(array_in)
array_in = np.squeeze(255.0*(array_in))
array_in = np.transpose(array_in, (1, 2, 0))
im = Image.fromarray(array_in.astype(np.uint8))
return im
def image_as_array(image_in):
image_in = np.array(image_in, np.float32)
tmpImg = np.zeros((image_in.shape[0],image_in.shape[1],3))
image_in = image_in/np.max(image_in)
if image_in.shape[2]==1:
tmpImg[:,:,0] = (image_in[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image_in[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image_in[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image_in[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image_in[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image_in[:,:,2]-0.406)/0.225
tmpImg = tmpImg.transpose((2, 0, 1))
image_out = np.expand_dims(tmpImg, 0)
return image_out
# detect a face
def find_aligned_face(image_in, size=400):
aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size)
return aligned_image, n_faces, quad
# clip the face, return array
def align_first_face(image_in, size=400):
aligned_image, n_faces, quad = find_aligned_face(image_in,size=size)
if n_faces == 0:
try:
image_in = ImageOps.exif_transpose(image_in)
except:
print("exif problem, not rotating")
image_in = image_in.resize((size, size))
im_array = image_as_array(image_in)
else:
im_array = image_as_array(aligned_image)
return im_array
def img_concat_h(im1, im2):
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def paintface(img: Image.Image,size: int) -> Image.Image:
aligned_img = align_first_face(img,size)
if aligned_img is None:
output=None
else:
im_in = array_to_image(aligned_img).convert("RGB")
im_out1 = face2paint(model, im_in, side_by_side=False)
im_out2 = face2paint(model2, im_in, side_by_side=False)
output = img_concat_h(im_out1, im_out2)
return output
def generate(img):
out = paintface(img, 400)
return out
title = "Face from Photo into paint"
description = "Upload a photo, this Ai will detect and transfer only the face into cartoon/anime painting style. If cannot detect face, try edit button on the right side. Good for Avatar size. Output will be two results from different models"
article = "Examples are from Internet"
Example=[['Example01.jpg'],['Example02.jpg'],['Example03.jpg']]
demo = gr.Interface(
generate,
inputs = [gr.Image(type="pil", label="Upload a photo")],
outputs= [gr.Image(type="pil", label="Output")],
title=title,
description=description,
article=article,
examples=Example,
allow_flagging='never'
)
demo.launch()
|