|
import os |
|
|
|
|
|
import onnxruntime as rt |
|
import sys |
|
import PIL |
|
from PIL import Image, ImageOps, ImageFile |
|
import numpy as np |
|
from pathlib import Path |
|
import collections |
|
from typing import Union, List |
|
import scipy.ndimage |
|
import requests |
|
|
|
MODEL_FILE = "ffhqu2vintage512_pix2pixHD_v1E11-inp2inst-simp.onnx" |
|
so = rt.SessionOptions() |
|
so.inter_op_num_threads = 4 |
|
so.intra_op_num_threads = 4 |
|
session = rt.InferenceSession(MODEL_FILE, sess_options=so) |
|
input_name = session.get_inputs()[0].name |
|
print("input_name = " + str(input_name)) |
|
output_name = session.get_outputs()[0].name |
|
print("output_name = " + str(output_name)) |
|
|
|
import os |
|
os.system("pip install dlib") |
|
import face_detection |
|
|
|
def array_to_image(array_in): |
|
array_in = np.squeeze(255*(array_in + 1)/2) |
|
array_in = np.transpose(array_in, (1, 2, 0)) |
|
im = Image.fromarray(array_in.astype(np.uint8)) |
|
return im |
|
|
|
def image_as_array(image_in): |
|
im_array = np.array(image_in, np.float32) |
|
im_array = (im_array/255)*2 - 1 |
|
im_array = np.transpose(im_array, (2, 0, 1)) |
|
im_array = np.expand_dims(im_array, 0) |
|
return im_array |
|
|
|
def find_aligned_face(image_in, size=512): |
|
aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size) |
|
return aligned_image, n_faces, quad |
|
|
|
def align_first_face(image_in, size=512): |
|
aligned_image, n_faces, quad = find_aligned_face(image_in,size=size) |
|
if n_faces == 0: |
|
try: |
|
image_in = ImageOps.exif_transpose(image_in) |
|
except: |
|
print("exif problem, not rotating") |
|
image_in = image_in.resize((size, size)) |
|
im_array = image_as_array(image_in) |
|
else: |
|
im_array = image_as_array(aligned_image) |
|
|
|
return im_array |
|
|
|
def img_concat_h(im1, im2): |
|
dst = Image.new('RGB', (im1.width + im2.width, im1.height)) |
|
dst.paste(im1, (0, 0)) |
|
dst.paste(im2, (im1.width, 0)) |
|
return dst |
|
|
|
import gradio as gr |
|
|
|
def face2vintage( |
|
img: Image.Image, |
|
size: int |
|
) -> Image.Image: |
|
|
|
aligned_img = align_first_face(img) |
|
if aligned_img is None: |
|
output=None |
|
else: |
|
output = session.run([output_name], {input_name: aligned_img})[0] |
|
output = array_to_image(output) |
|
aligned_img = array_to_image(aligned_img).resize((output.width, output.height)) |
|
output = img_concat_h(aligned_img, output) |
|
|
|
return output |
|
|
|
def inference(img): |
|
out = face2vintage(img, 512) |
|
return out |
|
|
|
|
|
title = "<span style='color: #191970;'>Aiconvert.online</span>" |
|
description = "Style a face to look more \"Vintage\". Upload an image with a face, or click on one of the examples below. If a face could not be detected, an image will still be created." |
|
article = "" |
|
|
|
examples=[['Example00001.jpg'],['Example00002.jpg'],['Example00003.jpg'],['Example00004.jpg'],['Example00005.jpg'], ['Example00006.jpg']] |
|
gr.Interface( |
|
inference, |
|
gr.inputs.Image(type="pil", label="Input"), |
|
gr.outputs.Image(type="pil", label="Output"), |
|
title=title, |
|
description=description, |
|
article=article, |
|
css=".gradio-container {background-color: #FFF8DC;} footer{display:none !important;}", |
|
examples=examples, |
|
enable_queue=True, |
|
allow_flagging=False |
|
).launch() |