bg_remover_v3 / app.py
Imadsarvm's picture
url test
04ba376 verified
raw
history blame
3.07 kB
import numpy as np
import torch
import torch.nn.functional as F
from torchvision.transforms.functional import normalize
import gradio as gr
from briarmbg import BriaRMBG
import PIL
from PIL import Image
from typing import Tuple
import requests
from io import BytesIO
net = BriaRMBG.from_pretrained("briaai/RMBG-1.4")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net.to(device)
def resize_image(image):
image = image.convert('RGB')
model_input_size = (1024, 1024)
image = image.resize(model_input_size, Image.BILINEAR)
return image
def get_url_image(url):
headers = {'User-Agent': 'gradio-app'}
response = requests.get(url, headers=headers)
return BytesIO(response.content)
def load_image(image_source):
if isinstance(image_source, str): # Check if input is a URL
print(f"Loading image from URL: {image_source}")
image = Image.open(get_url_image(image_source))
else:
print("Loading image from file upload")
image = Image.fromarray(image_source)
return image
def process(image_source):
try:
# Load and prepare input
orig_image = load_image(image_source)
w, h = orig_im_size = orig_image.size
image = resize_image(orig_image)
im_np = np.array(image)
im_tensor = torch.tensor(im_np, dtype=torch.float32).permute(2, 0, 1)
im_tensor = torch.unsqueeze(im_tensor, 0)
im_tensor = torch.divide(im_tensor, 255.0)
im_tensor = normalize(im_tensor, [0.5, 0.5, 0.5], [1.0, 1.0, 1.0])
if torch.cuda.is_available():
im_tensor = im_tensor.cuda()
# Inference
result = net(im_tensor)
# Post-process
result = torch.squeeze(F.interpolate(result[0][0], size=(h, w), mode='bilinear'), 0)
ma = torch.max(result)
mi = torch.min(result)
result = (result - mi) / (ma - mi)
# Image to PIL
im_array = (result * 255).cpu().data.numpy().astype(np.uint8)
pil_im = Image.fromarray(np.squeeze(im_array))
# Paste the mask on the original image
new_im = Image.new("RGBA", pil_im.size, (0, 0, 0, 0))
new_im.paste(orig_image, mask=pil_im)
return new_im
except Exception as e:
print(f"Error during processing: {e}")
return None
title = "Background Removal"
description = r"""Background removal model developed by <a href='https://BRIA.AI' target='_blank'><b>BRIA.AI</b></a>, trained on a carefully selected dataset and is available as an open-source model for non-commercial use.<br>
For test upload your image and wait. Read more at model card <a href='https://huggingface.co/briaai/RMBG-1.4' target='_blank'><b>briaai/RMBG-1.4</b></a>.<br>
"""
examples = [['./input.jpg'],]
demo = gr.Interface(
fn=process,
inputs=[
gr.Image(type="numpy", label="Upload Image"),
gr.Textbox(label="Image URL")
],
outputs="image",
examples=examples,
title=title,
description=description
)
if __name__ == "__main__":
demo.launch(share=False)