BiRefNet_plus / app.py
ginipick's picture
Update app.py
ec38b03 verified
raw
history blame
13.1 kB
##########################################################
# 0. ํ™˜๊ฒฝ ์„ค์ • ๋ฐ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์ž„ํฌํŠธ
##########################################################
import os
import cv2
import numpy as np
import torch
import gradio as gr
import spaces
from glob import glob
from typing import Tuple, Optional
from PIL import Image
from gradio_imageslider import ImageSlider
from torchvision import transforms
import requests
from io import BytesIO
import zipfile
import random
# Transformers
from transformers import (
AutoConfig,
AutoModelForImageSegmentation,
)
# 1) Config๋ฅผ ๋จผ์ € ๋กœ๋“œํ•˜์—ฌ tie_weights ์ถฉ๋Œ์„ ๋ฐฉ์ง€
config = AutoConfig.from_pretrained(
"zhengpeng7/BiRefNet", # ๐Ÿ‘‰ ์›ํ•˜๋Š” Hugging Face ๋ชจ๋ธ Repo
trust_remote_code=True
)
# 2) config.get_text_config ์— ๋”๋ฏธ ๋ฉ”์„œ๋“œ ๋ถ€์—ฌ (tie_word_embeddings=False)
def dummy_get_text_config(decoder=True):
return type("DummyTextConfig", (), {"tie_word_embeddings": False})()
config.get_text_config = dummy_get_text_config
# 3) ๋ชจ๋ธ ๊ตฌ์กฐ๋งŒ ๋งŒ๋“ค๊ธฐ (from_config) -> tie_weights ์ž๋™ ํ˜ธ์ถœ ์•ˆ ๋จ
birefnet = AutoModelForImageSegmentation.from_config(config, trust_remote_code=True)
birefnet.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
birefnet.to(device)
birefnet.half()
# 4) state_dict ๋กœ๋“œ (๊ฐ€์ค‘์น˜) - ๋กœ์ปฌ ํŒŒ์ผ ์‚ฌ์šฉ ์˜ˆ์‹œ
# ์‹ค์ œ๋กœ๋Š” hf_hub_download / snapshot_download ๋“ฑ์œผ๋กœ "model.safetensors"๋ฅผ ๋ฏธ๋ฆฌ ๋ฐ›์€ ๋’ค ์‚ฌ์šฉ
print("Loading BiRefNet weights from local file: model.safetensors")
state_dict = torch.load("model.safetensors", map_location="cpu") # ์˜ˆ์‹œ
missing, unexpected = birefnet.load_state_dict(state_dict, strict=False)
print("[Info] Missing keys:", missing)
print("[Info] Unexpected keys:", unexpected)
torch.cuda.empty_cache()
##########################################################
# 1. ์ด๋ฏธ์ง€ ํ›„์ฒ˜๋ฆฌ ํ•จ์ˆ˜๋“ค
##########################################################
def refine_foreground(image, mask, r=90):
if mask.size != image.size:
mask = mask.resize(image.size)
image_np = np.array(image) / 255.0
mask_np = np.array(mask) / 255.0
estimated_foreground = FB_blur_fusion_foreground_estimator_2(image_np, mask_np, r=r)
image_masked = Image.fromarray((estimated_foreground * 255.0).astype(np.uint8))
return image_masked
def FB_blur_fusion_foreground_estimator_2(image, alpha, r=90):
alpha = alpha[:, :, None]
F, blur_B = FB_blur_fusion_foreground_estimator(image, image, image, alpha, r)
return FB_blur_fusion_foreground_estimator(image, F, blur_B, alpha, r=6)[0]
def FB_blur_fusion_foreground_estimator(image, F, B, alpha, r=90):
if isinstance(image, Image.Image):
image = np.array(image) / 255.0
blurred_alpha = cv2.blur(alpha, (r, r))[:, :, None]
blurred_FA = cv2.blur(F * alpha, (r, r))
blurred_F = blurred_FA / (blurred_alpha + 1e-5)
blurred_B1A = cv2.blur(B * (1 - alpha), (r, r))
blurred_B = blurred_B1A / ((1 - blurred_alpha) + 1e-5)
F = blurred_F + alpha * (image - alpha * blurred_F - (1 - alpha) * blurred_B)
F = np.clip(F, 0, 1)
return F, blurred_B
class ImagePreprocessor():
def __init__(self, resolution: Tuple[int, int] = (1024, 1024)) -> None:
self.transform_image = transforms.Compose([
transforms.Resize(resolution),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
def proc(self, image: Image.Image) -> torch.Tensor:
image = self.transform_image(image)
return image
##########################################################
# 2. ์˜ˆ์ œ ์„ค์ • ๋ฐ ์œ ํ‹ธ
##########################################################
usage_to_weights_file = {
'General': 'BiRefNet',
'General-HR': 'BiRefNet_HR',
'General-Lite': 'BiRefNet_lite',
'General-Lite-2K': 'BiRefNet_lite-2K',
'Matting': 'BiRefNet-matting',
'Portrait': 'BiRefNet-portrait',
'DIS': 'BiRefNet-DIS5K',
'HRSOD': 'BiRefNet-HRSOD',
'COD': 'BiRefNet-COD',
'DIS-TR_TEs': 'BiRefNet-DIS5K-TR_TEs',
'General-legacy': 'BiRefNet-legacy'
}
examples_image = [[path, "1024x1024", "General"] for path in glob('examples/*')]
examples_text = [[url, "1024x1024", "General"] for url in [
"https://hips.hearstapps.com/hmg-prod/images/gettyimages-1229892983-square.jpg"
]]
examples_batch = [[file, "1024x1024", "General"] for file in glob('examples/*')]
descriptions = (
"Upload a picture, our model will extract a highly accurate segmentation of the subject in it.\n"
"The resolution used in our training was `1024x1024`, which is suggested for good results! "
"`2048x2048` is suggested for BiRefNet_HR.\n"
"Our codes can be found at https://github.com/ZhengPeng7/BiRefNet.\n"
"We also maintain the HF model of BiRefNet at https://huggingface.co/ZhengPeng7/BiRefNet for easier access."
)
##########################################################
# 3. ์ถ”๋ก  ํ•จ์ˆ˜ (์ด๋ฏธ ๋กœ๋“œ๋œ birefnet ๋ชจ๋ธ ์‚ฌ์šฉ)
##########################################################
@spaces.GPU
def predict(images, resolution, weights_file):
"""
์—ฌ๊ธฐ์„œ๋Š”, ๋‹จ์ผ birefnet ๋ชจ๋ธ๋งŒ ์œ ์ง€ํ•˜๊ณ  ์žˆ์œผ๋ฉฐ,
weight_file์„ ๋ฐ”๊พธ๋”๋ผ๋„ ์‹ค์ œ๋กœ๋Š” ์ด๋ฏธ ๋กœ๋“œ๋œ 'birefnet' ๋ชจ๋ธ๋งŒ ์‚ฌ์šฉ.
(๋งŒ์•ฝ ๋‹ค๋ฅธ ๊ฐ€์ค‘์น˜๋ฅผ ๋กœ๋“œํ•˜๊ณ  ์‹ถ๋‹ค๋ฉด, ์•„๋ž˜์ฒ˜๋Ÿผ ๋กœ์ปฌ state_dict ๊ต์ฒด ๋ฐฉ์‹ ์ถ”๊ฐ€ ๊ฐ€๋Šฅ.)
"""
assert images is not None, 'Images cannot be None.'
# Resolution parse
try:
w, h = resolution.strip().split('x')
w, h = int(int(w)//32*32), int(int(h)//32*32)
resolution_list = (w, h)
except:
print('[WARN] Invalid resolution input. Fallback to 1024x1024.')
resolution_list = (1024, 1024)
# ์ด๋ฏธ์ง€๊ฐ€ ์—ฌ๋Ÿฌ ์žฅ์ผ ์ˆ˜ ์žˆ์œผ๋ฏ€๋กœ ๋ฆฌ์ŠคํŠธ๋กœ ์ฒ˜๋ฆฌ
if isinstance(images, list):
is_batch = True
outputs, save_paths = [], []
save_dir = 'preds-BiRefNet'
os.makedirs(save_dir, exist_ok=True)
else:
images = [images]
is_batch = False
for idx, image_src in enumerate(images):
# str์ด๋ฉด ํŒŒ์ผ ๊ฒฝ๋กœ ํ˜น์€ URL
if isinstance(image_src, str):
if os.path.isfile(image_src):
image_ori = Image.open(image_src)
else:
resp = requests.get(image_src)
image_ori = Image.open(BytesIO(resp.content))
# numpy ๋ฐฐ์—ด์ด๋ฉด Pillow ๋ณ€ํ™˜
elif isinstance(image_src, np.ndarray):
image_ori = Image.fromarray(image_src)
else:
image_ori = image_src.convert('RGB')
image = image_ori.convert('RGB')
preproc = ImagePreprocessor(resolution_list)
image_proc = preproc.proc(image).unsqueeze(0).to(device).half()
# ์‹ค์ œ ์ถ”๋ก 
with torch.inference_mode():
# ๊ฒฐ๊ณผ ๋งจ ๋งˆ์ง€๋ง‰ ๋ ˆ์ด์–ด preds
preds = birefnet(image_proc)[-1].sigmoid().cpu()
pred_mask = preds[0].squeeze()
# ํ›„์ฒ˜๋ฆฌ
pred_pil = transforms.ToPILImage()(pred_mask)
image_masked = refine_foreground(image, pred_pil)
image_masked.putalpha(pred_pil.resize(image.size))
if is_batch:
file_name = (
os.path.splitext(os.path.basename(image_src))[0]
if isinstance(image_src, str)
else f"img_{idx}"
)
out_path = os.path.join(save_dir, f"{file_name}.png")
image_masked.save(out_path)
save_paths.append(out_path)
outputs.append(image_masked)
else:
outputs = [image_masked, image_ori]
torch.cuda.empty_cache()
# ๋ฐฐ์น˜๋ผ๋ฉด ๊ฐค๋Ÿฌ๋ฆฌ + ZIP ๋ฐ˜ํ™˜
if is_batch:
zip_path = os.path.join(save_dir, f"{save_dir}.zip")
with zipfile.ZipFile(zip_path, 'w') as zipf:
for fpath in save_paths:
zipf.write(fpath, os.path.basename(fpath))
return (save_paths, zip_path)
else:
return outputs
##########################################################
# 4. Gradio UI
##########################################################
# ์ปค์Šคํ…€ CSS
css = """
body {
background: linear-gradient(135deg, #667eea, #764ba2);
font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
color: #333;
margin: 0;
padding: 0;
}
.gradio-container {
background: rgba(255, 255, 255, 0.95);
border-radius: 15px;
padding: 30px 40px;
box-shadow: 0 8px 30px rgba(0, 0, 0, 0.3);
margin: 40px auto;
max-width: 1200px;
}
.gradio-container h1 {
color: #333;
text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.2);
}
.fillable {
width: 95% !important;
max-width: unset !important;
}
#examples_container {
margin: auto;
width: 90%;
}
#examples_row {
justify-content: center;
}
.sidebar {
background: rgba(255, 255, 255, 0.98);
border-radius: 10px;
padding: 20px;
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2);
}
button, .btn {
background: linear-gradient(90deg, #ff8a00, #e52e71);
border: none;
color: #fff;
padding: 12px 24px;
text-transform: uppercase;
font-weight: bold;
letter-spacing: 1px;
border-radius: 5px;
cursor: pointer;
transition: transform 0.2s ease-in-out;
}
button:hover, .btn:hover {
transform: scale(1.05);
}
"""
title_html = """
<h1 align="center" style="margin-bottom: 0.2em;">BiRefNet Demo (No Tie-Weights Crash)</h1>
<p align="center" style="font-size:1.1em; color:#555;">
Using <code>from_config()</code> + local <code>state_dict</code> to bypass tie_weights issues
</p>
"""
with gr.Blocks(css=css, title="BiRefNet Demo") as demo:
gr.Markdown(title_html)
with gr.Tabs():
# ํƒญ 1: Image
with gr.Tab("Image"):
with gr.Row():
with gr.Column(scale=1):
image_input = gr.Image(type='pil', label='Upload an Image')
resolution_input = gr.Textbox(lines=1, placeholder="e.g., 1024x1024", label="Resolution")
weights_radio = gr.Radio(list(usage_to_weights_file.keys()), value="General", label="Weights")
predict_btn = gr.Button("Predict")
with gr.Column(scale=2):
output_slider = ImageSlider(label="Result", type="pil")
gr.Examples(
examples=examples_image,
inputs=[image_input, resolution_input, weights_radio],
label="Examples"
)
# ํƒญ 2: Text(URL)
with gr.Tab("Text"):
with gr.Row():
with gr.Column(scale=1):
image_url = gr.Textbox(label="Paste an Image URL")
resolution_input_text = gr.Textbox(lines=1, placeholder="e.g., 1024x1024", label="Resolution")
weights_radio_text = gr.Radio(list(usage_to_weights_file.keys()), value="General", label="Weights")
predict_btn_text = gr.Button("Predict")
with gr.Column(scale=2):
output_slider_text = ImageSlider(label="Result", type="pil")
gr.Examples(
examples=examples_text,
inputs=[image_url, resolution_input_text, weights_radio_text],
label="Examples"
)
# ํƒญ 3: Batch
with gr.Tab("Batch"):
with gr.Row():
with gr.Column(scale=1):
file_input = gr.File(
label="Upload Multiple Images",
type="filepath",
file_count="multiple"
)
resolution_input_batch = gr.Textbox(lines=1, placeholder="e.g., 1024x1024", label="Resolution")
weights_radio_batch = gr.Radio(list(usage_to_weights_file.keys()), value="General", label="Weights")
predict_btn_batch = gr.Button("Predict")
with gr.Column(scale=2):
output_gallery = gr.Gallery(label="Results", scale=1)
zip_output = gr.File(label="Zip Download")
gr.Examples(
examples=examples_batch,
inputs=[file_input, resolution_input_batch, weights_radio_batch],
label="Examples"
)
gr.Markdown("<p align='center'>Model by <a href='https://huggingface.co/ZhengPeng7/BiRefNet'>ZhengPeng7/BiRefNet</a></p>")
# ๋ฒ„ํŠผ ์ด๋ฒคํŠธ ์—ฐ๊ฒฐ
predict_btn.click(
fn=predict,
inputs=[image_input, resolution_input, weights_radio],
outputs=output_slider
)
predict_btn_text.click(
fn=predict,
inputs=[image_url, resolution_input_text, weights_radio_text],
outputs=output_slider_text
)
predict_btn_batch.click(
fn=predict,
inputs=[file_input, resolution_input_batch, weights_radio_batch],
outputs=[output_gallery, zip_output]
)
if __name__ == "__main__":
demo.launch(share=False, debug=True)