Spaces:
fantos
/
Runtime error

nuking / app.py
arxivgpt kim
Update app.py
34bcb5d verified
raw
history blame
3.41 kB
import numpy as np
import torch
import torch.nn.functional as F
from torchvision.transforms.functional import normalize
from huggingface_hub import hf_hub_download
import gradio as gr
from briarmbg import BriaRMBG
import PIL
from PIL import Image
net = BriaRMBG()
model_path = hf_hub_download("briaai/RMBG-1.4", 'model.pth')
if torch.cuda.is_available():
net.load_state_dict(torch.load(model_path))
net = net.cuda()
else:
net.load_state_dict(torch.load(model_path, map_location="cpu"))
net.eval()
def resize_image(image):
image = image.convert('RGB')
model_input_size = (1024, 1024)
image = image.resize(model_input_size, Image.BILINEAR)
return image
def process(image):
orig_image = Image.fromarray(image)
w, h = orig_image.size
image = resize_image(orig_image)
im_np = np.array(image)
im_tensor = torch.tensor(im_np, dtype=torch.float32).permute(2, 0, 1)
im_tensor = torch.unsqueeze(im_tensor, 0)
im_tensor = torch.divide(im_tensor, 255.0)
im_tensor = normalize(im_tensor, [0.5, 0.5, 0.5], [1.0, 1.0, 1.0])
if torch.cuda.is_available():
im_tensor = im_tensor.cuda()
result = net(im_tensor)
result = torch.squeeze(F.interpolate(result[0][0], size=(h, w), mode='bilinear'), 0)
ma = torch.max(result)
mi = torch.min(result)
result = (result - mi) / (ma - mi)
im_array = (result * 255).cpu().data.numpy().astype(np.uint8)
pil_im = Image.fromarray(np.squeeze(im_array))
new_im = Image.new("RGBA", pil_im.size, (0, 0, 0, 0))
new_im.paste(orig_image, mask=pil_im)
return new_im
css = """
body {
font-family: 'Arial', sans-serif;
margin: 0;
padding: 0;
background-color: #f0f2f5;
color: #333;
}
h1 {
color: #0000ff;
}
p {
color: #000000;
}
.gradio-app, .gradio-content {
background-color: #ffffff;
border-radius: 8px;
border: 1px solid #ccc;
box-shadow: 0 10px 25px 0 rgba(0,0,0,0.1);
padding: 20px;
}
button {
border: none;
color: white;
padding: 10px 20px;
margin: 10px 0;
cursor: pointer;
border-radius: 5px;
background-image: linear-gradient(to right, #6a11cb 0%, #2575fc 100%);
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.2);
transition: all 0.2s ease-in-out;
}
button:hover {
box-shadow: 0 6px 8px rgba(0, 0, 0, 0.3);
}
input, textarea {
border: 2px solid #2575fc;
border-radius: 4px;
padding: 10px;
margin: 10px 0;
width: 100%;
box-sizing: border-box;
box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.1);
}
.gradio-toolbar {
background-color: #f0f2f5;
}
footer {
visibility: hidden;
}
"""
title = "Background Removal"
description = """
This is a demo for BRIA RMBG 1.4 using the BRIA RMBG-1.4 image matting model as a backbone.<br>
Background removal model developed by <a href='https://BRIA.AI' target='_blank'><b>BRIA.AI</b></a>, trained on a carefully selected dataset and is available as an open-source model for non-commercial use.<br>
For a test, upload your image and wait. Read more at the model card <a href='https://huggingface.co/briaai/RMBG-1.4' target='_blank'><b>briaai/RMBG-1.4</b></a>.<br>
"""
demo = gr.Interface(
fn=process,
inputs=gr.Image(type="pil"),
outputs=gr.Image(type="pil"),
title=title,
description=description,
css=css
)
if __name__ == "__main__":
demo.launch(share=False)