Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,6 +9,8 @@ from briarmbg import BriaRMBG
|
|
9 |
import PIL
|
10 |
from PIL import Image
|
11 |
from typing import Tuple
|
|
|
|
|
12 |
|
13 |
net = BriaRMBG.from_pretrained("briaai/RMBG-1.4")
|
14 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
@@ -20,97 +22,46 @@ def get_url_im(url):
|
|
20 |
response = requests.get(url, headers=user_agent)
|
21 |
return BytesIO(response.content)
|
22 |
|
23 |
-
def resize_image(
|
24 |
-
|
25 |
-
image = Image.open(
|
26 |
image = image.convert('RGB')
|
27 |
model_input_size = (1024, 1024)
|
28 |
image = image.resize(model_input_size, Image.BILINEAR)
|
29 |
return image
|
30 |
|
31 |
-
|
32 |
-
|
33 |
# prepare input
|
34 |
-
orig_image =
|
35 |
-
w,h = orig_im_size = orig_image.size
|
36 |
-
|
37 |
-
|
38 |
-
im_tensor = torch.
|
39 |
-
im_tensor = torch.
|
40 |
-
im_tensor =
|
41 |
-
im_tensor = normalize(im_tensor,[0.5,0.5,0.5],[1.0,1.0,1.0])
|
42 |
if torch.cuda.is_available():
|
43 |
-
im_tensor=im_tensor.cuda()
|
44 |
|
45 |
-
#inference
|
46 |
-
result=net(im_tensor)
|
47 |
# post process
|
48 |
-
result = torch.squeeze(F.interpolate(result[0][0], size=(h,w), mode='bilinear')
|
49 |
ma = torch.max(result)
|
50 |
mi = torch.min(result)
|
51 |
-
result = (result-mi)/(ma-mi)
|
52 |
# image to pil
|
53 |
-
im_array = (result*255).cpu().data.numpy().astype(np.uint8)
|
54 |
pil_im = Image.fromarray(np.squeeze(im_array))
|
55 |
# paste the mask on the original image
|
56 |
-
new_im = Image.new("RGBA", pil_im.size, (0,0,0,0))
|
57 |
new_im.paste(orig_image, mask=pil_im)
|
58 |
-
|
59 |
-
|
60 |
return new_im
|
61 |
-
# return [new_orig_image, new_im]'''
|
62 |
-
|
63 |
-
def process(image):
|
64 |
-
|
65 |
-
# prepare input
|
66 |
-
orig_image = Image.fromarray(image)
|
67 |
-
w,h = orig_im_size = orig_image.size
|
68 |
-
|
69 |
-
return orig_image
|
70 |
-
# return [new_orig_image, new_im]
|
71 |
-
|
72 |
-
|
73 |
-
# block = gr.Blocks().queue()
|
74 |
-
|
75 |
-
# with block:
|
76 |
-
# gr.Markdown("## BRIA RMBG 1.4")
|
77 |
-
# gr.HTML('''
|
78 |
-
# <p style="margin-bottom: 10px; font-size: 94%">
|
79 |
-
# This is a demo for BRIA RMBG 1.4 that using
|
80 |
-
# <a href="https://huggingface.co/briaai/RMBG-1.4" target="_blank">BRIA RMBG-1.4 image matting model</a> as backbone.
|
81 |
-
# </p>
|
82 |
-
# ''')
|
83 |
-
# with gr.Row():
|
84 |
-
# with gr.Column():
|
85 |
-
# input_image = gr.Image(sources=None, type="pil") # None for upload, ctrl+v and webcam
|
86 |
-
# # input_image = gr.Image(sources=None, type="numpy") # None for upload, ctrl+v and webcam
|
87 |
-
# run_button = gr.Button(value="Run")
|
88 |
-
|
89 |
-
# with gr.Column():
|
90 |
-
# result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=[1], height='auto')
|
91 |
-
# ips = [input_image]
|
92 |
-
# run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
|
93 |
-
|
94 |
-
# block.launch(debug = True)
|
95 |
-
|
96 |
-
# block = gr.Blocks().queue()
|
97 |
-
|
98 |
-
gr.Markdown("## BRIA RMBG 1.4")
|
99 |
-
gr.HTML('''
|
100 |
-
<p style="margin-bottom: 10px; font-size: 94%">
|
101 |
-
This is a demo for BRIA RMBG 1.4 that using
|
102 |
-
<a href="https://huggingface.co/briaai/RMBG-1.4" target="_blank">BRIA RMBG-1.4 image matting model</a> as backbone.
|
103 |
-
</p>
|
104 |
-
''')
|
105 |
-
title = "Background Removal"
|
106 |
-
description = r"""Background removal model developed by <a href='https://BRIA.AI' target='_blank'><b>BRIA.AI</b></a>, trained on a carefully selected dataset and is available as an open-source model for non-commercial use.<br>
|
107 |
-
For test upload your image and wait. Read more at model card <a href='https://huggingface.co/briaai/RMBG-1.4' target='_blank'><b>briaai/RMBG-1.4</b></a>.<br>
|
108 |
-
"""
|
109 |
-
#examples = [['./input.jpg'],]
|
110 |
-
# output = ImageSlider(position=0.5,label='Image without background', type="pil", show_download_button=True)
|
111 |
-
# demo = gr.Interface(fn=process,inputs="image", outputs=output, examples=examples, title=title, description=description)
|
112 |
|
113 |
-
|
|
|
|
|
|
|
|
|
114 |
|
115 |
-
|
116 |
-
demo.launch(share=False)
|
|
|
9 |
import PIL
|
10 |
from PIL import Image
|
11 |
from typing import Tuple
|
12 |
+
import requests
|
13 |
+
from io import BytesIO
|
14 |
|
15 |
net = BriaRMBG.from_pretrained("briaai/RMBG-1.4")
|
16 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
22 |
response = requests.get(url, headers=user_agent)
|
23 |
return BytesIO(response.content)
|
24 |
|
25 |
+
def resize_image(image_url):
|
26 |
+
image_data = get_url_im(image_url)
|
27 |
+
image = Image.open(image_data)
|
28 |
image = image.convert('RGB')
|
29 |
model_input_size = (1024, 1024)
|
30 |
image = image.resize(model_input_size, Image.BILINEAR)
|
31 |
return image
|
32 |
|
33 |
+
def process(image_url):
|
|
|
34 |
# prepare input
|
35 |
+
orig_image = resize_image(image_url)
|
36 |
+
w, h = orig_im_size = orig_image.size
|
37 |
+
im_np = np.array(orig_image)
|
38 |
+
im_tensor = torch.tensor(im_np, dtype=torch.float32).permute(2, 0, 1)
|
39 |
+
im_tensor = torch.unsqueeze(im_tensor, 0)
|
40 |
+
im_tensor = torch.divide(im_tensor, 255.0)
|
41 |
+
im_tensor = normalize(im_tensor, [0.5, 0.5, 0.5], [1.0, 1.0, 1.0])
|
|
|
42 |
if torch.cuda.is_available():
|
43 |
+
im_tensor = im_tensor.cuda()
|
44 |
|
45 |
+
# inference
|
46 |
+
result = net(im_tensor)
|
47 |
# post process
|
48 |
+
result = torch.squeeze(F.interpolate(result[0][0], size=(h, w), mode='bilinear'), 0)
|
49 |
ma = torch.max(result)
|
50 |
mi = torch.min(result)
|
51 |
+
result = (result - mi) / (ma - mi)
|
52 |
# image to pil
|
53 |
+
im_array = (result * 255).cpu().data.numpy().astype(np.uint8)
|
54 |
pil_im = Image.fromarray(np.squeeze(im_array))
|
55 |
# paste the mask on the original image
|
56 |
+
new_im = Image.new("RGBA", pil_im.size, (0, 0, 0, 0))
|
57 |
new_im.paste(orig_image, mask=pil_im)
|
58 |
+
|
|
|
59 |
return new_im
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
+
iface = gr.Interface(
|
62 |
+
fn=process,
|
63 |
+
inputs=gr.inputs.Textbox(label="Text or Image URL"),
|
64 |
+
outputs=gr.outputs.Image(type="pil", label="Output Image"),
|
65 |
+
)
|
66 |
|
67 |
+
iface.launch()
|
|