Spaces:
Running
on
Zero
Running
on
Zero
fix seg
Browse files- app.py +11 -9
- flow/utils.py +6 -5
app.py
CHANGED
@@ -68,14 +68,17 @@ def get_random_seed(randomize_seed, seed):
|
|
68 |
|
69 |
# process image
|
70 |
@spaces.GPU(duration=10)
|
71 |
-
def process_image(
|
72 |
-
|
|
|
73 |
# bg removal if there is no alpha channel
|
74 |
-
if
|
75 |
-
|
76 |
-
mask =
|
77 |
-
image = recenter_foreground(
|
78 |
-
image =
|
|
|
|
|
79 |
return image
|
80 |
|
81 |
# process generation
|
@@ -91,7 +94,6 @@ def process_3d(input_image, num_steps=50, cfg_scale=7, grid_res=384, seed=42, si
|
|
91 |
|
92 |
# input image (assume processed to RGBA uint8)
|
93 |
image = input_image.astype(np.float32) / 255.0
|
94 |
-
image = image[..., :3] * image[..., 3:4] + (1 - image[..., 3:4]) # white background
|
95 |
image_tensor = torch.from_numpy(image).permute(2, 0, 1).contiguous().unsqueeze(0).float().cuda()
|
96 |
|
97 |
data = {"cond_images": image_tensor}
|
@@ -162,7 +164,7 @@ with block:
|
|
162 |
with gr.Row():
|
163 |
# input image
|
164 |
input_image = gr.Image(label="Input Image", type="numpy")
|
165 |
-
seg_image = gr.Image(label="Segmentation Result", type="numpy",
|
166 |
with gr.Accordion("Settings", open=True):
|
167 |
# inference steps
|
168 |
num_steps = gr.Slider(label="Inference steps", minimum=1, maximum=100, step=1, value=50)
|
|
|
68 |
|
69 |
# process image
|
70 |
@spaces.GPU(duration=10)
|
71 |
+
def process_image(image):
|
72 |
+
image = np.array(image) # uint8
|
73 |
+
image = cv2.resize(image, (518, 518), interpolation=cv2.INTER_AREA)
|
74 |
# bg removal if there is no alpha channel
|
75 |
+
if image.shape[-1] == 3:
|
76 |
+
image = rembg.remove(image, session=bg_remover) # [H, W, 4]
|
77 |
+
mask = image[..., -1] > 0
|
78 |
+
image = recenter_foreground(image, mask, border_ratio=0.1)
|
79 |
+
image = image.astype(np.float32) / 255.0
|
80 |
+
image = image[..., :3] * image[..., 3:4] + (1 - image[..., 3:4]) # white background
|
81 |
+
image = (image * 255).astype(np.uint8)
|
82 |
return image
|
83 |
|
84 |
# process generation
|
|
|
94 |
|
95 |
# input image (assume processed to RGBA uint8)
|
96 |
image = input_image.astype(np.float32) / 255.0
|
|
|
97 |
image_tensor = torch.from_numpy(image).permute(2, 0, 1).contiguous().unsqueeze(0).float().cuda()
|
98 |
|
99 |
data = {"cond_images": image_tensor}
|
|
|
164 |
with gr.Row():
|
165 |
# input image
|
166 |
input_image = gr.Image(label="Input Image", type="numpy")
|
167 |
+
seg_image = gr.Image(label="Segmentation Result", type="numpy", interactive=False)
|
168 |
with gr.Accordion("Settings", open=True):
|
169 |
# inference steps
|
170 |
num_steps = gr.Slider(label="Inference steps", minimum=1, maximum=100, step=1, value=50)
|
flow/utils.py
CHANGED
@@ -40,11 +40,12 @@ def recenter_foreground(image, mask, border_ratio: float = 0.1):
|
|
40 |
H, W, C = image.shape
|
41 |
size = max(H, W)
|
42 |
|
43 |
-
# default to white bg
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
48 |
|
49 |
coords = np.nonzero(mask)
|
50 |
x_min, x_max = coords[0].min(), coords[0].max()
|
|
|
40 |
H, W, C = image.shape
|
41 |
size = max(H, W)
|
42 |
|
43 |
+
# default to white bg
|
44 |
+
result = np.ones((size, size, C), dtype=np.float32)
|
45 |
+
|
46 |
+
# if rgba, set alpha to 0
|
47 |
+
if C == 4:
|
48 |
+
result[..., -1] = 0
|
49 |
|
50 |
coords = np.nonzero(mask)
|
51 |
x_min, x_max = coords[0].min(), coords[0].max()
|