Spaces:
Configuration error
Configuration error
import os | |
# os.environ["CUDA_VISIBLE_DEVICES"] = "2" | |
import tensorflow as tf | |
import gradio as gr | |
import tensorflow_hub as hub | |
import random | |
import time | |
import PIL.Image | |
from PIL import Image | |
import numpy as np | |
import requests | |
from io import BytesIO | |
# from diffusers import StableDiffusionUpscalePipeline | |
from simple_lama_inpainting import SimpleLama | |
import torch | |
from shutil import copyfile | |
from PowerPaint import app | |
import argparse | |
def pil_to_binary_mask(pil_image, threshold=0): | |
np_image = np.array(pil_image) | |
grayscale_image = Image.fromarray(np_image).convert("L") | |
binary_mask = np.array(grayscale_image) > threshold | |
mask = np.zeros(binary_mask.shape, dtype=np.uint8) | |
for i in range(binary_mask.shape[0]): | |
for j in range(binary_mask.shape[1]): | |
if binary_mask[i,j] == True : | |
mask[i,j] = 1 | |
mask = (mask*255).astype(np.uint8) | |
output_mask = Image.fromarray(mask) | |
return output_mask | |
def tensor_to_image(tensor): | |
tensor = tensor*255 | |
tensor = np.array(tensor, dtype=np.uint8) | |
if np.ndim(tensor)>3: | |
assert tensor.shape[0] == 1 | |
tensor = tensor[0] | |
return PIL.Image.fromarray(tensor) | |
def load_img(path_to_img): | |
max_dim = 512 | |
img = tf.io.read_file(path_to_img) | |
img = tf.image.decode_image(img, channels=3) | |
img = tf.image.convert_image_dtype(img, tf.float32) | |
shape = tf.cast(tf.shape(img)[:-1], tf.float32) | |
long_dim = max(shape) | |
scale = max_dim / long_dim | |
new_shape = tf.cast(shape * scale, tf.int32) | |
img = tf.image.resize(img, new_shape) | |
img = img[tf.newaxis, :] | |
return img | |
# Do main logic (simple version) | |
def start_stylize_simple(img, style_img): | |
# global hub_model | |
hub_model = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2') | |
# Save to disk, put random number as a ID to avoid collision | |
ID = int(time.time()) | |
img.save(filepath + f'/tmp/tmp_image-{ID}.jpg') | |
style_img.save(filepath + f'/tmp/tmp_style_image-{ID}.jpg') | |
# Load the input images. | |
content_image = load_img(filepath + f'/tmp/tmp_image-{ID}.jpg') | |
style_image = load_img(filepath + f'/tmp/tmp_style_image-{ID}.jpg') | |
stylized_image = hub_model(tf.constant(content_image), tf.constant(style_image))[0] | |
tensor_to_image(stylized_image).save(filepath + f'/tmp/result-{ID}.jpg') | |
return filepath + f'/tmp/result-{ID}.jpg' | |
def background_remove(img): | |
from rembg import new_session | |
from rembg import remove | |
session = new_session('isnet-general-use') | |
# Save to disk, put random number as a ID to avoid collision | |
ID = int(time.time()) | |
img.save(filepath + f'/tmp/tmp_image-{ID}.jpg') | |
with open(filepath + f'/tmp/tmp_image-{ID}.jpg', 'rb') as i: | |
with open(filepath + f'/tmp/tmp_result-{ID}.jpg', 'wb') as o: | |
input = i.read() | |
output = remove(input, session = session) | |
o.write(output) | |
return filepath + f'/tmp/tmp_result-{ID}.jpg' | |
def object_remove(imgs): | |
ts = int(time.time()) | |
os.mkdir(filepath + f'/tmp/tmp_image-{ts}') | |
os.mkdir(filepath + f'/tmp/tmp_mask-{ts}') | |
os.mkdir(filepath + f'/tmp/tmp_output-{ts}') | |
img = imgs["background"].convert("RGB") | |
mask = pil_to_binary_mask(imgs['layers'][-1].convert("RGB")) | |
img.save(filepath + f'/tmp/tmp_image-{ts}/image.png') | |
mask.save(filepath + f'/tmp/tmp_mask-{ts}/image.png') | |
simple_lama = SimpleLama() | |
img_path = filepath + f'/tmp/tmp_image-{ts}/image.png' | |
mask_path = filepath + f'/tmp/tmp_mask-{ts}/image.png' | |
image = Image.open(img_path) | |
mask = Image.open(mask_path).convert('L') | |
result = simple_lama(image, mask) | |
result.save(f"{filepath}/tmp/tmp_output-{ts}/image.png") | |
# os.system(f'simple_lama {filepath}/tmp/tmp_image-{ts}/image.png {filepath}/tmp/tmp_mask-{ts}/image.png {filepath}/tmp/tmp_output-{ts}/image.png') | |
# os.system(f'iopaint run --model=lama --device=cuda --image={filepath}/tmp/tmp_image-{ts} --mask={filepath}/tmp/tmp_mask-{ts} --output={filepath}/tmp/tmp_output-{ts}') | |
# filename = os.listdir(filepath + f'/tmp/tmp_output-{ts}')[0] | |
return filepath + f'/tmp/tmp_output-{ts}/image.png' | |
def upscale(img): #, prompt, upscale_radio): | |
# Save to disk, put random number as a ID to avoid collision | |
ID = int(time.time()) | |
img.save(filepath + f'/tmp/tmp_image-{ID}.jpg') | |
if False: #upscale_radio == 'Stable Diffusion x4 upscaler': | |
# load model and scheduler | |
model_id = "stabilityai/stable-diffusion-x4-upscaler" | |
pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16) | |
pipeline = pipeline.to("cuda") | |
# let's download an image | |
#url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale/low_res_cat.png" | |
#response = requests.get(url) | |
low_res_img = Image.open(filepath + f'/tmp/tmp_image-{ID}.jpg') | |
width, height = low_res_img.size | |
low_res_img = low_res_img.resize((128, 128)) | |
# prompt = "a white cat" | |
upscaled_image = pipeline(prompt = prompt, image=low_res_img).images[0] | |
upscaled_image.resize((width, height)).save(filepath + f'/tmp/tmp_result-{ID}.jpg') | |
# Image.open(filepath + f'/tmp/tmp_result-{ID}.jpg').resize((width, height)) | |
else: | |
os.system(f'python3 {filepath}/Real-ESRGAN/inference_realesrgan.py -n RealESRGAN_x4plus -i {filepath}/tmp/tmp_image-{ID}.jpg') | |
copyfile(f'{filepath}/results/tmp_image-{ID}_out.jpg', f'{filepath}/tmp/tmp_result-{ID}.jpg') | |
return filepath + f'/tmp/tmp_result-{ID}.jpg' | |
def in_painting(*args): | |
ID = int(time.time()) | |
global flag | |
global controller | |
if flag == 0: | |
try: | |
controller = app.PowerPaintController(weight_dtype, "./checkpoints/ppt-v1", True, "ppt-v1") | |
flag += 1 | |
except: | |
controller = app.PowerPaintController(weight_dtype, "./checkpoints/ppt-v1", False, "ppt-v1") | |
result = controller.infer(*args)[0][0] | |
result.save(f'{filepath}/tmp/tmp_result-{ID}.jpg') | |
return f'{filepath}/tmp/tmp_result-{ID}.jpg' | |
def radio_click(choice): | |
if choice == "Art style transfer": | |
return [gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)] | |
elif choice == "Object erasing": | |
return [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)] | |
elif choice == "In painting": | |
return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)] | |
elif choice == "Background removal": | |
return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)] | |
elif choice == "Image upscaling": | |
return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)] | |
else: | |
return [gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)] | |
if __name__ == '__main__': | |
args = argparse.ArgumentParser() | |
args.add_argument("--weight_dtype", type=str, default="float16") | |
args.add_argument("--checkpoint_dir", type=str, default="./checkpoints/ppt-v1") | |
args.add_argument("--version", type=str, default="ppt-v1") | |
args.add_argument("--share", action="store_true") | |
args.add_argument( | |
"--local_files_only", action="store_true", help="enable it to use cached files without requesting from the hub" | |
) | |
args.add_argument("--port", type=int, default=7860) | |
args = args.parse_args() | |
# initialize the pipeline controller | |
weight_dtype = torch.float16 if args.weight_dtype == "float16" else torch.float32 | |
flag = 0 | |
filepath = os.path.dirname(os.path.abspath(__file__)) | |
physical_devices = tf.config.experimental.list_physical_devices('GPU') | |
for i in physical_devices: | |
tf.config.experimental.set_memory_growth(i, True) | |
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED' | |
# hub_model = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2') | |
os.environ['GRADIO_TEMP_DIR']="/home/gradio_demos/tmp" | |
with gr.Blocks() as demo: | |
gr.Markdown("# Image2Image Demos") | |
#radio = gr.Radio(["Art style transfer", "Object erasing", "In painting", "Background removal", "Image upscaling"], value="Art style transfer", label = "Choose functionality") | |
radio = gr.Radio(["Art style transfer", "Object erasing", "In painting", "Background removal", "Image upscaling"], value="Art style transfer", label = "Choose functionality") | |
with gr.Column(visible = True) as art_style_transfer_block: | |
gr.Markdown("## Art style transfer") | |
gr.Markdown("### Using [arbitrary-image-stylization-v1](https://www.kaggle.com/models/google/arbitrary-image-stylization-v1/tensorFlow1/256/2) model") | |
with gr.Row(): | |
with gr.Column(): | |
img = gr.Image(sources='upload', type="pil", label='Image to apply art style') | |
img_list = os.listdir(filepath + "/images") | |
img_list_path = [os.path.join(filepath + "/images", image) for image in img_list] | |
example = gr.Examples( | |
inputs=img, | |
examples_per_page=6, | |
examples=img_list_path | |
) | |
with gr.Column(): | |
style_img = gr.Image(label="Art syle image", sources='upload', type="pil") | |
style_list = os.listdir(filepath + "/style_images") | |
style_list_path = [os.path.join(filepath + "/style_images", style_image) for style_image in style_list] | |
example = gr.Examples( | |
inputs=style_img, | |
examples_per_page=6, | |
examples=style_list_path | |
) | |
with gr.Column(): | |
# image_out = gr.Image(label="Output", elem_id="output-img", height=400) | |
image_out = gr.Image(label="Stylized image", elem_id="output-img" ,show_share_button=False, type = 'filepath') | |
stylize_button = gr.Button(value="Stylize") | |
with gr.Column(visible = False) as object_erasing_block: | |
gr.Markdown("## Object erasing") | |
gr.Markdown("### Using [lama](https://github.com/enesmsahin/simple-lama-inpainting) model") | |
with gr.Row(): | |
with gr.Column(): | |
imgs4 = gr.ImageEditor(sources='upload', type="pil", label='Image to erase object', interactive=True) | |
img_list = os.listdir(filepath + "/images4") | |
img_list_path = [os.path.join(filepath + "/images4", image) for image in img_list] | |
example = gr.Examples( | |
inputs=imgs4, | |
examples_per_page=6, | |
examples=img_list_path | |
) | |
with gr.Column(): | |
image_out4 = gr.Image(label="Object removed image" ,show_share_button=False, type = 'filepath') | |
object_remove_button = gr.Button(value="Remove object") | |
with gr.Column(visible = False) as in_painting_block: | |
gr.Markdown("## In painting") | |
gr.Markdown("### Using [Powerpaint](https://github.com/open-mmlab/PowerPaint) model") | |
with gr.Row(): | |
with gr.Column(): | |
#gr.Markdown("### Input image and draw mask") | |
input_image = gr.ImageEditor(sources="upload", type="pil", label='Image to in-paint', interactive=True) | |
img_list = os.listdir(filepath + "/images4") | |
img_list_path = [os.path.join(filepath + "/images4", image) for image in img_list] | |
example = gr.Examples( | |
inputs=input_image, | |
examples_per_page=6, | |
examples=img_list_path | |
) | |
task = gr.Radio( | |
["text-guided", "object-removal", "shape-guided", "image-outpainting"], | |
show_label=False, | |
visible=False, | |
) | |
# Text-guided object inpainting | |
with gr.Tab("Text-guided object inpainting") as tab_text_guided: | |
enable_text_guided = gr.Checkbox( | |
label="Enable text-guided object inpainting", value=True, interactive=False, visible = False | |
) | |
text_guided_prompt = gr.Textbox(label="Prompt") | |
text_guided_negative_prompt = gr.Textbox(label="negative_prompt") | |
tab_text_guided.select(fn=app.select_tab_text_guided, inputs=None, outputs=task) | |
# currently, we only support controlnet in PowerPaint-v1 | |
if args.version == "ppt-v1": | |
# gr.Markdown("### Controlnet setting") | |
enable_control = gr.Checkbox( | |
label="Enable controlnet", info="Enable this if you want to use controlnet", visible = False | |
) | |
controlnet_conditioning_scale = gr.Slider( | |
label="controlnet conditioning scale", | |
minimum=0, | |
maximum=1, | |
step=0.05, | |
value=0.5, | |
visible = False | |
) | |
control_type = gr.Radio(["canny", "pose", "depth", "hed"], label="Control type", visible = False) | |
input_control_image = gr.ImageEditor(sources="upload", type="pil", visible = False) | |
# Object removal inpainting | |
with gr.Tab("Object removal inpainting", visible = False) as tab_object_removal: | |
enable_object_removal = gr.Checkbox( | |
label="Enable object removal inpainting", | |
value=True, | |
info="The recommended configuration for the Guidance Scale is 10 or higher. \ | |
If undesired objects appear in the masked area, \ | |
you can address this by specifically increasing the Guidance Scale.", | |
interactive=False, | |
) | |
removal_prompt = gr.Textbox(label="Prompt") | |
removal_negative_prompt = gr.Textbox(label="negative_prompt") | |
tab_object_removal.select(fn=app.select_tab_object_removal, inputs=None, outputs=task) | |
# Object image outpainting | |
with gr.Tab("Image outpainting", visible = False) as tab_image_outpainting: | |
enable_object_removal = gr.Checkbox( | |
label="Enable image outpainting", | |
value=True, | |
info="The recommended configuration for the Guidance Scale is 10 or higher. \ | |
If unwanted random objects appear in the extended image region, \ | |
you can enhance the cleanliness of the extension area by increasing the Guidance Scale.", | |
interactive=False, | |
) | |
outpaint_prompt = gr.Textbox(label="Outpainting_prompt") | |
outpaint_negative_prompt = gr.Textbox(label="Outpainting_negative_prompt") | |
horizontal_expansion_ratio = gr.Slider( | |
label="horizontal expansion ratio", | |
minimum=1, | |
maximum=4, | |
step=0.05, | |
value=1, | |
) | |
vertical_expansion_ratio = gr.Slider( | |
label="vertical expansion ratio", | |
minimum=1, | |
maximum=4, | |
step=0.05, | |
value=1, | |
) | |
tab_image_outpainting.select(fn=app.select_tab_image_outpainting, inputs=None, outputs=task) | |
# Shape-guided object inpainting | |
with gr.Tab("Shape-guided object inpainting", visible = False) as tab_shape_guided: | |
enable_shape_guided = gr.Checkbox( | |
label="Enable shape-guided object inpainting", value=True, interactive=False | |
) | |
shape_guided_prompt = gr.Textbox(label="shape_guided_prompt") | |
shape_guided_negative_prompt = gr.Textbox(label="shape_guided_negative_prompt") | |
fitting_degree = gr.Slider( | |
label="fitting degree", | |
minimum=0, | |
maximum=1, | |
step=0.05, | |
value=1, | |
) | |
tab_shape_guided.select(fn=app.select_tab_shape_guided, inputs=None, outputs=task) | |
seed = gr.Slider( | |
label="Seed", | |
minimum=0, | |
maximum=2147483647, | |
step=1, | |
randomize=True, | |
) | |
with gr.Accordion("Advanced options", open=False, visible = False): | |
ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=45, step=1) | |
scale = gr.Slider( | |
label="Guidance Scale", | |
info="For object removal and image outpainting, it is recommended to set the value at 10 or above.", | |
minimum=0.1, | |
maximum=30.0, | |
value=7.5, | |
step=0.1, | |
) | |
with gr.Column(): | |
# gr.Markdown("### Inpainting result") | |
# inpaint_result = gr.Gallery(label="Generated image", show_label=True, columns=1) | |
inpaint_result = gr.Image(label="Generated image", elem_id="output-img" ,show_share_button=False, type = 'filepath') | |
#gr.Markdown("### Mask") | |
gallery = gr.Gallery(label="Generated masks", show_label=False, columns=2, visible = False) | |
run_button = gr.Button(value="In-paint") | |
run_button.click( | |
fn=in_painting, #controller.infer, | |
inputs=[ | |
input_image, | |
text_guided_prompt, | |
text_guided_negative_prompt, | |
shape_guided_prompt, | |
shape_guided_negative_prompt, | |
fitting_degree, | |
ddim_steps, | |
scale, | |
seed, | |
task, | |
vertical_expansion_ratio, | |
horizontal_expansion_ratio, | |
outpaint_prompt, | |
outpaint_negative_prompt, | |
removal_prompt, | |
removal_negative_prompt, | |
enable_control, | |
input_control_image, | |
control_type, | |
controlnet_conditioning_scale, | |
], | |
outputs=[inpaint_result]#, gallery], | |
) | |
with gr.Column(visible = False) as background_removal_block: | |
gr.Markdown("## Background removal") | |
gr.Markdown("### Using [rembg](https://pypi.org/project/rembg/) model") | |
with gr.Row(): | |
with gr.Column(): | |
img2 = gr.Image(sources='upload', type="pil", label='Image to remove background') | |
img_list = os.listdir(filepath + "/images2") | |
img_list_path = [os.path.join(filepath + "/images2", image) for image in img_list] | |
example = gr.Examples( | |
inputs=img2, | |
examples_per_page=6, | |
examples=img_list_path | |
) | |
with gr.Column(): | |
# image_out = gr.Image(label="Output", elem_id="output-img", height=400) | |
image_out2 = gr.Image(label="Background removed image", elem_id="output-img" ,show_share_button=False, type = 'filepath') | |
background_remove_button = gr.Button(value="Remove background") | |
with gr.Column(visible = False) as image_upscaling_block: | |
gr.Markdown("## Image upscaling") | |
# gr.Markdown("### Using [Stable Diffusion x4 upscaler](https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler) or [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN) model") | |
gr.Markdown("### Using [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN) model") | |
with gr.Row(): | |
with gr.Column(): | |
img3 = gr.Image(sources='upload', type="pil", label='Image to upscale') | |
img_list = os.listdir(filepath + "/images3") | |
img_list_path = [os.path.join(filepath + "/images3", image) for image in img_list] | |
example = gr.Examples( | |
inputs=img3, | |
examples_per_page=6, | |
examples=img_list_path | |
) | |
# prompt = gr.Textbox(label="Prompt") | |
# upscale_radio = gr.Radio(["Stable Diffusion x4 upscaler", "Real-ESRGAN"], value="Stable Diffusion x4 upscaler", label = "Choose a model") | |
with gr.Column(): | |
# image_out = gr.Image(label="Output", elem_id="output-img", height=400) | |
image_out3 = gr.Image(label="Upscaled image", elem_id="output-img" ,show_share_button=False, type = 'filepath') | |
upscale_button = gr.Button(value="Upscale") | |
stylize_button.click(fn=start_stylize_simple, inputs=[img, style_img], outputs=[image_out], api_name='stylize') | |
background_remove_button.click(fn=background_remove, inputs=[img2], outputs=[image_out2], api_name='background_removal') | |
object_remove_button.click(fn=object_remove, inputs=[imgs4], outputs=[image_out4], api_name='object_removal') | |
upscale_button.click(fn=upscale, inputs=[img3], outputs=[image_out3], api_name='upscale') | |
radio.change(radio_click, radio, [art_style_transfer_block, object_erasing_block, in_painting_block, background_removal_block, image_upscaling_block]) | |
demo.launch(share=False, server_name="0.0.0.0", ssl_verify=False) | |
# demo.launch(share=True) |