ysharma's picture
ysharma HF staff
Update app.py
506dc65 verified
import os
import cv2
import numpy as np
import torch
import gradio as gr
import spaces
from glob import glob
from typing import Tuple
from PIL import Image
from gradio_imageslider import ImageSlider
from transformers import AutoModelForImageSegmentation
from torchvision import transforms
import requests
from io import BytesIO
import zipfile
torch.set_float32_matmul_precision('high')
torch.jit.script = lambda f: f
device = "cuda" if torch.cuda.is_available() else "cpu"
### image_proc.py
def refine_foreground(image, mask, r=90):
if mask.size != image.size:
mask = mask.resize(image.size)
image = np.array(image) / 255.0
mask = np.array(mask) / 255.0
estimated_foreground = FB_blur_fusion_foreground_estimator_2(image, mask, r=r)
image_masked = Image.fromarray((estimated_foreground * 255.0).astype(np.uint8))
return image_masked
def FB_blur_fusion_foreground_estimator_2(image, alpha, r=90):
# Thanks to the source: https://github.com/Photoroom/fast-foreground-estimation
alpha = alpha[:, :, None]
F, blur_B = FB_blur_fusion_foreground_estimator(
image, image, image, alpha, r)
return FB_blur_fusion_foreground_estimator(image, F, blur_B, alpha, r=6)[0]
def FB_blur_fusion_foreground_estimator(image, F, B, alpha, r=90):
if isinstance(image, Image.Image):
image = np.array(image) / 255.0
blurred_alpha = cv2.blur(alpha, (r, r))[:, :, None]
blurred_FA = cv2.blur(F * alpha, (r, r))
blurred_F = blurred_FA / (blurred_alpha + 1e-5)
blurred_B1A = cv2.blur(B * (1 - alpha), (r, r))
blurred_B = blurred_B1A / ((1 - blurred_alpha) + 1e-5)
F = blurred_F + alpha * \
(image - alpha * blurred_F - (1 - alpha) * blurred_B)
F = np.clip(F, 0, 1)
return F, blurred_B
class ImagePreprocessor():
def __init__(self, resolution: Tuple[int, int] = (1024, 1024)) -> None:
self.transform_image = transforms.Compose([
transforms.Resize(resolution),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
def proc(self, image: Image.Image) -> torch.Tensor:
image = self.transform_image(image)
return image
usage_to_weights_file = {
'General': 'BiRefNet',
'General-Lite': 'BiRefNet_lite',
'General-Lite-2K': 'BiRefNet_lite-2K',
'Matting': 'BiRefNet-matting',
'Portrait': 'BiRefNet-portrait',
'DIS': 'BiRefNet-DIS5K',
'HRSOD': 'BiRefNet-HRSOD',
'COD': 'BiRefNet-COD',
'DIS-TR_TEs': 'BiRefNet-DIS5K-TR_TEs',
'General-legacy': 'BiRefNet-legacy'
}
birefnet = AutoModelForImageSegmentation.from_pretrained('zhengpeng7/BiRefNet_lite', trust_remote_code=True)
birefnet.to(device)
birefnet.eval()
@spaces.GPU
def predict(images):
assert (images is not None), 'AssertionError: images cannot be None.'
global birefnet
# Load BiRefNet with chosen weights
_weights_file = 'zhengpeng7/BiRefNet_lite'
print('Using weights: {}.'.format(_weights_file))
birefnet = AutoModelForImageSegmentation.from_pretrained(_weights_file, trust_remote_code=True)
birefnet.to(device)
birefnet.eval()
#try:
# resolution = [int(int(reso)//32*32) for reso in resolution.strip().split('x')]
#except:
# resolution = (1024, 1024) if weights_file not in ['General-Lite-2K'] else (2560, 1440)
# print('Invalid resolution input. Automatically changed to 1024x1024 or 2K.')
if isinstance(images, list):
# For tab_batch
save_paths = []
save_dir = 'preds-BiRefNet'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
tab_is_batch = True
else:
images = [images]
tab_is_batch = False
for idx_image, image_src in enumerate(images):
if isinstance(image_src, str):
if os.path.isfile(image_src):
image_ori = Image.open(image_src)
else:
response = requests.get(image_src)
image_data = BytesIO(response.content)
image_ori = Image.open(image_data)
else:
image_ori = Image.fromarray(image_src)
image = image_ori.convert('RGB')
# Preprocess the image
image_preprocessor = ImagePreprocessor() #(resolution=tuple(resolution))
image_proc = image_preprocessor.proc(image)
image_proc = image_proc.unsqueeze(0)
# Prediction
with torch.no_grad():
preds = birefnet(image_proc.to(device))[-1].sigmoid().cpu()
pred = preds[0].squeeze()
# Show Results
pred_pil = transforms.ToPILImage()(pred)
image_masked = refine_foreground(image, pred_pil)
image_masked.putalpha(pred_pil.resize(image.size))
torch.cuda.empty_cache()
if tab_is_batch:
save_file_path = os.path.join(save_dir, "{}.png".format(os.path.splitext(os.path.basename(image_src))[0]))
image_masked.save(save_file_path)
save_paths.append(save_file_path)
if tab_is_batch:
zip_file_path = os.path.join(save_dir, "{}.zip".format(save_dir))
with zipfile.ZipFile(zip_file_path, 'w') as zipf:
for file in save_paths:
zipf.write(file, os.path.basename(file))
return save_paths, zip_file_path
else:
return (image_masked, image_ori)
examples = [[_] for _ in glob('examples/*')][:]
# Add the option of resolution in a text box.
for idx_example, example in enumerate(examples):
examples[idx_example].append('1024x1024')
examples.append(examples[-1].copy())
examples[-1][1] = '512x512'
examples_url = [
['https://hips.hearstapps.com/hmg-prod/images/gettyimages-1229892983-square.jpg'],
]
for idx_example_url, example_url in enumerate(examples_url):
examples_url[idx_example_url].append('1024x1024')
descriptions = ('Upload a picture, our model will extract a highly accurate segmentation of the subject in it.\n)'
' The resolution used in our training was `1024x1024`, thus the suggested resolution to obtain good results!\n'
' Our codes can be found at https://github.com/ZhengPeng7/BiRefNet.\n'
' We also maintain the HF model of BiRefNet at https://huggingface.co/ZhengPeng7/BiRefNet for easier access.')
tab_image = gr.Interface(
fn=predict,
inputs=[
gr.Image(label='Upload an image'),
#gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"),
#gr.Radio(list(usage_to_weights_file.keys()), value='General', label="Weights", info="Choose the weights you want.")
],
outputs=ImageSlider(label="BiRefNet's prediction", type="pil"),
#examples=examples,
api_name="image",
description=descriptions,
)
demo = gr.TabbedInterface(
[tab_image],
['image'],
title="BiRefNet demo for subject extraction.",
)
if __name__ == "__main__":
demo.launch(debug=True)