Spaces:
Running
Running
import os | |
os.system("pip install --upgrade pip") | |
os.system("pip install dlib") | |
import sys | |
import face_detection | |
import PIL | |
from PIL import Image, ImageOps | |
import numpy as np | |
import torch | |
torch.set_grad_enabled(False) | |
net = torch.jit.load('apocalyptify_p2s2p_torchscript_cpu.pt') | |
net.eval() | |
def tensor2im(var): | |
var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy() | |
var = ((var + 1) / 2) | |
var[var < 0] = 0 | |
var[var > 1] = 1 | |
var = var * 255 | |
return Image.fromarray(var.astype('uint8')) | |
def image_as_array(image_in): | |
im_array = np.array(image_in, np.float32) | |
im_array = (im_array/255)*2 - 1 | |
im_array = np.transpose(im_array, (2, 0, 1)) | |
im_array = np.expand_dims(im_array, 0) | |
return im_array | |
def find_aligned_face(image_in, size=256): | |
aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size) | |
return aligned_image, n_faces, quad | |
def align_first_face(image_in, size=256): | |
aligned_image, n_faces, quad = find_aligned_face(image_in,size=size) | |
if n_faces == 0: | |
try: | |
image_in = ImageOps.exif_transpose(image_in) | |
except: | |
print("exif problem, not rotating") | |
image_in = image_in.resize((size, size)) | |
im_array = image_as_array(image_in) | |
else: | |
im_array = image_as_array(aligned_image) | |
return im_array | |
def img_concat_h(im1, im2): | |
dst = Image.new('RGB', (im1.width + im2.width, im1.height)) | |
dst.paste(im1, (0, 0)) | |
dst.paste(im2, (im1.width, 0)) | |
return dst | |
import gradio as gr | |
def face2drag( | |
img: Image.Image, | |
size: int | |
) -> Image.Image: | |
aligned_img = align_first_face(img) | |
if aligned_img is None: | |
output=None | |
else: | |
input = torch.Tensor(aligned_img) | |
output = net(input) | |
output = tensor2im(output[0]) | |
output = img_concat_h(tensor2im(torch.Tensor(aligned_img)[0]), output) | |
return output | |
import os | |
import collections | |
from typing import Union, List | |
import numpy as np | |
from PIL import Image | |
import PIL.Image | |
import PIL.ImageFile | |
import numpy as np | |
import scipy.ndimage | |
import requests | |
def inference(img): | |
out = face2drag(img, 256) | |
return out | |
title = "Apocalyptify" | |
description = "How will your face look after the Apocalypse? Will you become a Zombie? A Ghoul? A person who fights them? Upload an image with a face, or click one of the examples below. If a face could not be detected, A face will still be created based on the features of the input." | |
article = "<p style='text-align: center'><a href='https://github.com/justinpinkney/pixel2style2pixel/tree/nw' target='_blank'>Github Repo</a></p><p style='text-align: center'>samples: <img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00001.jpg' alt='Sample00001'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00002.jpg' alt='Sample00002'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00003.jpg' alt='Sample00003'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00004.jpg' alt='Sample00004'/><img src='https://hf.space/gradioiframe/Norod78/Apocalyptify/file/Sample00005.jpg' alt='Sample00005'/></p><p>The Apocalypse model was fine tuned on a pre-trained Pixel2Style2Pixel model by Doron Adler</p>" | |
examples=[['Example00001.jpg'],['Example00002.jpg'],['Example00003.jpg'],['Example00004.jpg'],['Example00005.jpg'], ['Example00006.jpg']] | |
gr.Interface(inference, gr.inputs.Image(type="pil",shape=(256,256)), gr.outputs.Image(type="pil"),title=title,description=description,article=article,examples=examples,enable_queue=True).launch() | |