Spaces:
Running
Running
Alex Hortua
commited on
Commit
·
f73d0f9
1
Parent(s):
c8475b4
Adding basic 3d to enhance image
Browse files- requirements.txt +1 -0
- src/app.py +23 -13
- src/utils.py +38 -0
requirements.txt
CHANGED
@@ -5,3 +5,4 @@ datasets
|
|
5 |
opencv-python
|
6 |
gradio
|
7 |
numpy
|
|
|
|
5 |
opencv-python
|
6 |
gradio
|
7 |
numpy
|
8 |
+
scikit-image
|
src/app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
from PIL import Image
|
4 |
-
from utils import load_model, segment_person, resize_image
|
5 |
|
6 |
# Load model and processor once
|
7 |
processor, model = load_model()
|
@@ -15,29 +15,38 @@ default_bg = Image.new("RGB", (512, 512), color=(95, 147, 89))
|
|
15 |
|
16 |
def generate_3d_outputs(person_img, background_img=None, shift_pixels=10, person_size=100):
|
17 |
# Resize images to match
|
18 |
-
image =
|
|
|
19 |
|
20 |
-
if background_img is None:
|
21 |
-
background = default_bg.resize(image.size)
|
22 |
-
else:
|
23 |
-
background = Image.fromarray(background_img).convert("RGB").resize(image.size)
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
# Step 1: Segment person
|
26 |
mask = segment_person(image, processor, model)
|
27 |
|
28 |
image_np = np.array(image)
|
29 |
-
|
|
|
|
|
|
|
30 |
|
31 |
person_only = image_np * mask
|
32 |
-
|
|
|
33 |
|
34 |
# Step 2: Create stereo pair
|
35 |
person_left = np.roll(person_only, shift=-shift_pixels, axis=1)
|
36 |
person_right = np.roll(person_only, shift=shift_pixels, axis=1)
|
|
|
37 |
|
38 |
-
left_eye = np.clip(
|
39 |
-
right_eye = np.clip(
|
40 |
-
|
41 |
|
42 |
# --- Combine left and right images side by side ---
|
43 |
stereo_pair = np.concatenate([left_eye, right_eye], axis=1)
|
@@ -54,7 +63,7 @@ def generate_3d_outputs(person_img, background_img=None, shift_pixels=10, perso
|
|
54 |
left_img = Image.fromarray(left_eye)
|
55 |
right_img = Image.fromarray(right_eye)
|
56 |
|
57 |
-
return
|
58 |
|
59 |
# Gradio Interface
|
60 |
demo = gr.Interface(
|
@@ -67,8 +76,9 @@ demo = gr.Interface(
|
|
67 |
|
68 |
],
|
69 |
outputs=[
|
70 |
-
gr.Image(label="
|
71 |
gr.Image(label="Stereo_pair"),
|
|
|
72 |
],
|
73 |
title="3D Person Segmentation Viewer",
|
74 |
description="Upload a person photo and optionally a background image. Outputs anaglyph and stereo views."
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
from PIL import Image
|
4 |
+
from utils import load_model, segment_person, resize_image, split_stereo_image
|
5 |
|
6 |
# Load model and processor once
|
7 |
processor, model = load_model()
|
|
|
15 |
|
16 |
def generate_3d_outputs(person_img, background_img=None, shift_pixels=10, person_size=100):
|
17 |
# Resize images to match
|
18 |
+
image = person_img
|
19 |
+
background_img = background_img if background_img is not None else default_bg
|
20 |
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
# Split background image into left and right halves
|
23 |
+
leftBackground, rightBackground = split_stereo_image(Image.fromarray(background_img))
|
24 |
+
|
25 |
+
# Resize image to match background dimensions
|
26 |
+
|
27 |
+
|
28 |
+
image = Image.fromarray(np.array(image)).resize((leftBackground.shape[1], leftBackground.shape[0]))
|
29 |
# Step 1: Segment person
|
30 |
mask = segment_person(image, processor, model)
|
31 |
|
32 |
image_np = np.array(image)
|
33 |
+
|
34 |
+
leftBackground_np = np.array(leftBackground)
|
35 |
+
rightBackground_np = np.array(rightBackground)
|
36 |
+
|
37 |
|
38 |
person_only = image_np * mask
|
39 |
+
leftBackground_only = leftBackground_np * (1 - mask)
|
40 |
+
rightBackground_only = rightBackground_np * (1 - mask)
|
41 |
|
42 |
# Step 2: Create stereo pair
|
43 |
person_left = np.roll(person_only, shift=-shift_pixels, axis=1)
|
44 |
person_right = np.roll(person_only, shift=shift_pixels, axis=1)
|
45 |
+
|
46 |
|
47 |
+
left_eye = np.clip(person_right + leftBackground_only, 0, 255).astype(np.uint8)
|
48 |
+
right_eye = np.clip(person_left + rightBackground_only, 0, 255).astype(np.uint8)
|
49 |
+
person_segmentation = np.clip(person_only, 0, 255).astype(np.uint8)
|
50 |
|
51 |
# --- Combine left and right images side by side ---
|
52 |
stereo_pair = np.concatenate([left_eye, right_eye], axis=1)
|
|
|
63 |
left_img = Image.fromarray(left_eye)
|
64 |
right_img = Image.fromarray(right_eye)
|
65 |
|
66 |
+
return person_segmentation, stereo_image, anaglyph_img
|
67 |
|
68 |
# Gradio Interface
|
69 |
demo = gr.Interface(
|
|
|
76 |
|
77 |
],
|
78 |
outputs=[
|
79 |
+
gr.Image(label="segmentation mask"),
|
80 |
gr.Image(label="Stereo_pair"),
|
81 |
+
gr.Image(label="3D Anaglyph Image")
|
82 |
],
|
83 |
title="3D Person Segmentation Viewer",
|
84 |
description="Upload a person photo and optionally a background image. Outputs anaglyph and stereo views."
|
src/utils.py
CHANGED
@@ -3,6 +3,7 @@ import numpy as np
|
|
3 |
from PIL import Image
|
4 |
import cv2
|
5 |
from transformers import AutoImageProcessor, SegformerForSemanticSegmentation
|
|
|
6 |
|
7 |
def load_model():
|
8 |
processor = AutoImageProcessor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
|
@@ -56,3 +57,40 @@ def resize_image(image, size_percent):
|
|
56 |
resized_image.paste(scaled_content, (x, y))
|
57 |
|
58 |
return resized_image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from PIL import Image
|
4 |
import cv2
|
5 |
from transformers import AutoImageProcessor, SegformerForSemanticSegmentation
|
6 |
+
from imagehash import average_hash
|
7 |
|
8 |
def load_model():
|
9 |
processor = AutoImageProcessor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
|
|
|
57 |
resized_image.paste(scaled_content, (x, y))
|
58 |
|
59 |
return resized_image
|
60 |
+
|
61 |
+
# Check if two images are similar
|
62 |
+
def check_image_similarity(image1, image2):
|
63 |
+
|
64 |
+
hash1 = average_hash(Image.fromarray(image1))
|
65 |
+
hash2 = average_hash(Image.fromarray(image2))
|
66 |
+
return hash1 - hash2 < 10
|
67 |
+
|
68 |
+
|
69 |
+
def split_stereo_image(image):
|
70 |
+
"""
|
71 |
+
Splits an image into left and right halves for stereographic viewing.
|
72 |
+
|
73 |
+
Args:
|
74 |
+
image: PIL Image or numpy array
|
75 |
+
|
76 |
+
Returns:
|
77 |
+
tuple: (left_half, right_half) as numpy arrays
|
78 |
+
"""
|
79 |
+
# Convert to numpy array if PIL Image
|
80 |
+
if isinstance(image, Image.Image):
|
81 |
+
image = np.array(image)
|
82 |
+
|
83 |
+
# Get width and calculate split point
|
84 |
+
width = image.shape[1]
|
85 |
+
split_point = width // 2
|
86 |
+
|
87 |
+
# Split into left and right halves
|
88 |
+
left_half = image[:, :split_point]
|
89 |
+
right_half = image[:, split_point:]
|
90 |
+
|
91 |
+
#If stereo image is provided, return left and right halves
|
92 |
+
if check_image_similarity(left_half, right_half):
|
93 |
+
return left_half, right_half
|
94 |
+
else:
|
95 |
+
return image, resize_image(image, 99)
|
96 |
+
|