Spaces:
Running
Running
File size: 2,404 Bytes
bd72a39 1e4d453 b0c5c57 e853021 1e4d453 b0c5c57 1e4d453 8a23f94 e853021 b0c5c57 e853021 b0c5c57 1e4d453 8a23f94 e853021 9f9a50c e853021 cbff899 b0c5c57 e853021 b0c5c57 e853021 87bffa7 8a23f94 e853021 b0c5c57 e853021 bd72a39 1e4d453 bd72a39 87bffa7 e853021 bd72a39 e853021 bd72a39 e853021 697c3d3 1e4d453 b0c5c57 bd72a39 914a2f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
import os
import cv2
import torch
import warnings
import numpy as np
import gradio as gr
import paddlehub as hub
from PIL import Image
from methods.img2pixl import pixL
from examples.pixelArt.combine import combine
from methods.media import Media
warnings.filterwarnings("ignore")
U2Net = hub.Module(name='U2Net')
device = "cuda" if torch.cuda.is_available() else "cpu"
face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", device=device, size=512)
model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", device=device).eval()
def initilize(media,pixel_size,checkbox1):
#Author: Alican Akca
if media.name.endswith('.gif'):
return Media().split(media.name,pixel_size, 'gif')
elif media.name.endswith('.mp4'):
return None #Media().split(media.name,pixel_size, "video")
else:
media = Image.open(media.name).convert("RGB")
media = cv2.cvtColor(np.asarray(face2paint(model, media)), cv2.COLOR_BGR2RGB)
if checkbox1:
result = U2Net.Segmentation(images=[media],
paths=None,
batch_size=1,
input_size=320,
output_dir='output',
visualization=True)
result = combine().combiner(images = pixL().toThePixL([result[0]['front'][:,:,::-1], result[0]['mask']],
pixel_size),
background_image = media)
else:
result = pixL().toThePixL([media], pixel_size)
result = Image.fromarray(result)
result.save('cache.png')
return [None, result, 'cache.png']
inputs = [gr.File(label="Media"),
gr.Slider(4, 100, value=12, step = 2, label="Pixel Size"),
gr.Checkbox(label="Object-Oriented Inference", value=False)]
outputs = [gr.Video(label="Pixed Media"),
gr.Image(label="Pixed Media"),
gr.File(label="Download")]
title = "Pixera: Create your own Pixel Art"
description = """Object-Oriented Inference is currently only available for images. Also, Video Processing has currently suspended."""
gr.Interface(fn = initilize,
inputs = inputs,
outputs = outputs,
title=title,
description=description).launch()
|