Soodoo
#2
by
Soodoo
- opened
- Makefile +9 -5
- hf.sh +45 -0
- src/app.py +59 -6
- src/assets/examples/test_4.jpg +3 -0
- src/assets/examples/test_5.jpg +3 -0
- src/modal_app.py +47 -29
- src/tools.py +133 -40
- src/utils.py +0 -32
Makefile
CHANGED
@@ -1,20 +1,24 @@
|
|
1 |
-
.PHONY: test
|
2 |
|
3 |
install:
|
4 |
-
pip install uv && \
|
5 |
uv venv && \
|
6 |
source .venv/bin/activate && \
|
7 |
uv sync && \
|
8 |
modal setup
|
9 |
|
10 |
-
|
11 |
modal deploy src/modal_app.py
|
12 |
|
13 |
test_modal:
|
14 |
uv run test/test_modal.py
|
15 |
|
16 |
-
run:
|
17 |
uv run src/app.py
|
18 |
|
19 |
dev:
|
20 |
-
gradio src/app.py
|
|
|
|
|
|
|
|
|
|
1 |
+
.PHONY: test deploy run dev install hf
|
2 |
|
3 |
install:
|
4 |
+
pip install -U uv && \
|
5 |
uv venv && \
|
6 |
source .venv/bin/activate && \
|
7 |
uv sync && \
|
8 |
modal setup
|
9 |
|
10 |
+
deploy:
|
11 |
modal deploy src/modal_app.py
|
12 |
|
13 |
test_modal:
|
14 |
uv run test/test_modal.py
|
15 |
|
16 |
+
run:deploy
|
17 |
uv run src/app.py
|
18 |
|
19 |
dev:
|
20 |
+
gradio src/app.py
|
21 |
+
|
22 |
+
hf:
|
23 |
+
chmod 777 hf.sh
|
24 |
+
./hf.sh
|
hf.sh
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Set variables
|
4 |
+
REPO_URL="https://github.com/mahan-ym/ImageAlfred"
|
5 |
+
REPO_DIR="ImageAlfred"
|
6 |
+
TEMP_DIR="./tmp"
|
7 |
+
SRC_DIR="src"
|
8 |
+
REQUIREMENTS_FILE="requirements.txt"
|
9 |
+
|
10 |
+
echo "🚀 Starting Huggingface Space update script..."
|
11 |
+
|
12 |
+
# Clone or update the repository
|
13 |
+
ORIGINAL_DIR=$(pwd)
|
14 |
+
if [ -d "$TEMP_DIR" ]; then
|
15 |
+
echo "📥 Updating repository..."
|
16 |
+
cd "$TEMP_DIR" && git pull
|
17 |
+
else
|
18 |
+
echo "📥 Cloning repository..."
|
19 |
+
mkdir -p "$TEMP_DIR" && cd "$TEMP_DIR" && git clone "$REPO_URL"
|
20 |
+
fi
|
21 |
+
cd "$ORIGINAL_DIR" # Return to original directory
|
22 |
+
|
23 |
+
# Copy src directory to current directory
|
24 |
+
echo "📁 Updating source code..."
|
25 |
+
if [ ! -d "$TEMP_DIR/$REPO_DIR/$SRC_DIR" ]; then
|
26 |
+
echo "❌ Source directory not found in the repository!"
|
27 |
+
exit 1
|
28 |
+
fi
|
29 |
+
|
30 |
+
if [ -d "$SRC_DIR" ]; then
|
31 |
+
rm -rf "$SRC_DIR"
|
32 |
+
fi
|
33 |
+
cp -r "$TEMP_DIR/$REPO_DIR/$SRC_DIR" .
|
34 |
+
mv "$TEMP_DIR/$REPO_DIR/Makefile" .
|
35 |
+
|
36 |
+
# Check if copy was successful
|
37 |
+
if [ $? -eq 0 ]; then
|
38 |
+
rm -rf "$TEMP_DIR"
|
39 |
+
echo "✅ Source code updated successfully!"
|
40 |
+
else
|
41 |
+
echo "❌ Failed to copy source code!"
|
42 |
+
exit 1
|
43 |
+
fi
|
44 |
+
|
45 |
+
echo "🎉 Update completed! Source code and requirements are now up to date."
|
src/app.py
CHANGED
@@ -12,20 +12,25 @@ gr.set_static_paths(paths=[Path.cwd().absolute() / "assets"])
|
|
12 |
|
13 |
icon = """<link rel="icon" type="image/x-icon" href="https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/icons/ImageAlfredIcon.png">"""
|
14 |
|
15 |
-
title = """Image Alfred - Recolor and Privacy Preserving Image Tools
|
16 |
<img src="https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/icons/ImageAlfredIcon.png" alt="Image Alfred Logo" style="width: 120px; height: auto; margin: 0 auto;">
|
17 |
-
""
|
|
|
18 |
|
19 |
hsv_df_input = gr.Dataframe(
|
20 |
headers=["Object", "Hue", "Saturation Scale"],
|
21 |
datatype=["str", "number", "number"],
|
|
|
|
|
22 |
label="Target Objects and New Settings",
|
23 |
type="array",
|
|
|
24 |
)
|
25 |
|
26 |
lab_df_input = gr.Dataframe(
|
27 |
headers=["Object", "New A", "New B"],
|
28 |
datatype=["str", "number", "number"],
|
|
|
29 |
label="Target Objects and New Settings",
|
30 |
type="array",
|
31 |
)
|
@@ -38,7 +43,23 @@ change_color_objects_hsv_tool = gr.Interface(
|
|
38 |
],
|
39 |
outputs=gr.Image(label="Output Image"),
|
40 |
title="Image Recolor Tool (HSV)",
|
41 |
-
description="
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
)
|
43 |
|
44 |
change_color_objects_lab_tool = gr.Interface(
|
@@ -49,7 +70,24 @@ change_color_objects_lab_tool = gr.Interface(
|
|
49 |
],
|
50 |
outputs=gr.Image(label="Output Image"),
|
51 |
title="Image Recolor Tool (LAB)",
|
52 |
-
description="
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
)
|
54 |
|
55 |
privacy_preserve_tool = gr.Interface(
|
@@ -59,11 +97,26 @@ privacy_preserve_tool = gr.Interface(
|
|
59 |
gr.Textbox(
|
60 |
label="Objects to Mask (dot-separated)",
|
61 |
placeholder="e.g., person. car. license plate",
|
62 |
-
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
],
|
64 |
outputs=gr.Image(label="Output Image"),
|
65 |
title="Privacy Preserving Tool",
|
66 |
description="Upload an image and provide a prompt for the object to enforce privacy. The tool will use blurring to obscure the specified objects in the image.", # noqa: E501
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
)
|
68 |
|
69 |
demo = gr.TabbedInterface(
|
@@ -88,4 +141,4 @@ demo = gr.TabbedInterface(
|
|
88 |
# tabs_interface.render()
|
89 |
|
90 |
if __name__ == "__main__":
|
91 |
-
demo.launch(mcp_server=True, max_file_size="
|
|
|
12 |
|
13 |
icon = """<link rel="icon" type="image/x-icon" href="https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/icons/ImageAlfredIcon.png">"""
|
14 |
|
15 |
+
title = """Image Alfred - Recolor and Privacy Preserving Image MCP Tools
|
16 |
<img src="https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/icons/ImageAlfredIcon.png" alt="Image Alfred Logo" style="width: 120px; height: auto; margin: 0 auto;">
|
17 |
+
<h4 style="text-align: center;"></h4>
|
18 |
+
""" # noqa: E501
|
19 |
|
20 |
hsv_df_input = gr.Dataframe(
|
21 |
headers=["Object", "Hue", "Saturation Scale"],
|
22 |
datatype=["str", "number", "number"],
|
23 |
+
col_count=(3, "fixed"),
|
24 |
+
show_row_numbers=True,
|
25 |
label="Target Objects and New Settings",
|
26 |
type="array",
|
27 |
+
# row_count=(1, "dynamic"),
|
28 |
)
|
29 |
|
30 |
lab_df_input = gr.Dataframe(
|
31 |
headers=["Object", "New A", "New B"],
|
32 |
datatype=["str", "number", "number"],
|
33 |
+
col_count=(3,"fixed"),
|
34 |
label="Target Objects and New Settings",
|
35 |
type="array",
|
36 |
)
|
|
|
43 |
],
|
44 |
outputs=gr.Image(label="Output Image"),
|
45 |
title="Image Recolor Tool (HSV)",
|
46 |
+
description="""
|
47 |
+
This tool allows you to recolor objects in an image using the HSV color space.
|
48 |
+
You can specify the hue and saturation scale for each object.""", # noqa: E501
|
49 |
+
examples=[
|
50 |
+
[
|
51 |
+
"https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/examples/test_1.jpg",
|
52 |
+
[["pants", 128, 1]],
|
53 |
+
],
|
54 |
+
[
|
55 |
+
"https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/examples/test_4.jpg",
|
56 |
+
[["desk", 15, 0.5], ["left cup", 40, 1.1]],
|
57 |
+
],
|
58 |
+
[
|
59 |
+
"https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/examples/test_5.jpg",
|
60 |
+
[["suits", 60, 1.5], ["pants", 10, 0.8]],
|
61 |
+
],
|
62 |
+
],
|
63 |
)
|
64 |
|
65 |
change_color_objects_lab_tool = gr.Interface(
|
|
|
70 |
],
|
71 |
outputs=gr.Image(label="Output Image"),
|
72 |
title="Image Recolor Tool (LAB)",
|
73 |
+
description="""
|
74 |
+
Recolor an image based on user input using the LAB color space.
|
75 |
+
You can specify the new A and new B values for each object.
|
76 |
+
""", # noqa: E501
|
77 |
+
examples=[
|
78 |
+
[
|
79 |
+
"https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/examples/test_1.jpg",
|
80 |
+
[["pants", 128, 1]],
|
81 |
+
],
|
82 |
+
[
|
83 |
+
"https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/examples/test_4.jpg",
|
84 |
+
[["desk", 15, 0.5], ["left cup", 40, 1.1]],
|
85 |
+
],
|
86 |
+
[
|
87 |
+
"https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/examples/test_5.jpg",
|
88 |
+
[["suits", 60, 1.5], ["pants", 10, 0.8]],
|
89 |
+
],
|
90 |
+
],
|
91 |
)
|
92 |
|
93 |
privacy_preserve_tool = gr.Interface(
|
|
|
97 |
gr.Textbox(
|
98 |
label="Objects to Mask (dot-separated)",
|
99 |
placeholder="e.g., person. car. license plate",
|
100 |
+
),
|
101 |
+
gr.Slider(
|
102 |
+
label="Privacy Strength",
|
103 |
+
minimum=1,
|
104 |
+
maximum=50,
|
105 |
+
value=15,
|
106 |
+
step=1,
|
107 |
+
info="Higher values result in stronger blurring.",
|
108 |
+
),
|
109 |
],
|
110 |
outputs=gr.Image(label="Output Image"),
|
111 |
title="Privacy Preserving Tool",
|
112 |
description="Upload an image and provide a prompt for the object to enforce privacy. The tool will use blurring to obscure the specified objects in the image.", # noqa: E501
|
113 |
+
examples=[
|
114 |
+
[
|
115 |
+
"https://raw.githubusercontent.com/mahan-ym/ImageAlfred/main/src/assets/examples/test_3.jpg",
|
116 |
+
"license plate.",
|
117 |
+
10,
|
118 |
+
],
|
119 |
+
],
|
120 |
)
|
121 |
|
122 |
demo = gr.TabbedInterface(
|
|
|
141 |
# tabs_interface.render()
|
142 |
|
143 |
if __name__ == "__main__":
|
144 |
+
demo.launch(mcp_server=True, max_file_size="15mb")
|
src/assets/examples/test_4.jpg
ADDED
![]() |
Git LFS Details
|
src/assets/examples/test_5.jpg
ADDED
![]() |
Git LFS Details
|
src/modal_app.py
CHANGED
@@ -55,6 +55,8 @@ image = (
|
|
55 |
gpu="A10G",
|
56 |
image=image,
|
57 |
volumes={volume_path: volume},
|
|
|
|
|
58 |
)
|
59 |
def lang_sam_segment(
|
60 |
image_pil: Image.Image,
|
@@ -67,11 +69,6 @@ def lang_sam_segment(
|
|
67 |
""" # noqa: E501
|
68 |
from lang_sam import LangSAM # type: ignore
|
69 |
|
70 |
-
os.environ["TORCH_HOME"] = TORCH_HOME
|
71 |
-
os.environ["HF_HOME"] = HF_HOME
|
72 |
-
os.makedirs(HF_HOME, exist_ok=True)
|
73 |
-
os.makedirs(TORCH_HOME, exist_ok=True)
|
74 |
-
|
75 |
model = LangSAM(sam_type="sam2.1_hiera_large")
|
76 |
langsam_results = model.predict(
|
77 |
images_pil=[image_pil],
|
@@ -79,6 +76,14 @@ def lang_sam_segment(
|
|
79 |
box_threshold=box_threshold,
|
80 |
text_threshold=text_threshold,
|
81 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
return langsam_results
|
84 |
|
@@ -87,6 +92,7 @@ def lang_sam_segment(
|
|
87 |
gpu="T4",
|
88 |
image=image,
|
89 |
volumes={volume_path: volume},
|
|
|
90 |
)
|
91 |
def change_image_objects_hsv(
|
92 |
image_pil: Image.Image,
|
@@ -111,15 +117,13 @@ def change_image_objects_hsv(
|
|
111 |
raise ValueError(
|
112 |
"targets_config must be a list of lists, each containing [target_name, hue, saturation_scale]." # noqa: E501
|
113 |
)
|
114 |
-
|
115 |
prompts = ". ".join(target[0] for target in targets_config)
|
116 |
|
117 |
-
os.environ["TORCH_HOME"] = TORCH_HOME
|
118 |
-
os.environ["HF_HOME"] = HF_HOME
|
119 |
-
os.makedirs(HF_HOME, exist_ok=True)
|
120 |
-
os.makedirs(TORCH_HOME, exist_ok=True)
|
121 |
-
|
122 |
langsam_results = lang_sam_segment.remote(image_pil=image_pil, prompt=prompts)
|
|
|
|
|
|
|
123 |
labels = langsam_results[0]["labels"]
|
124 |
scores = langsam_results[0]["scores"]
|
125 |
|
@@ -157,6 +161,7 @@ def change_image_objects_hsv(
|
|
157 |
gpu="T4",
|
158 |
image=image,
|
159 |
volumes={volume_path: volume},
|
|
|
160 |
)
|
161 |
def change_image_objects_lab(
|
162 |
image_pil: Image.Image,
|
@@ -182,17 +187,17 @@ def change_image_objects_lab(
|
|
182 |
"targets_config must be a list of lists, each containing [target_name, new_a, new_b]." # noqa: E501
|
183 |
)
|
184 |
|
185 |
-
|
186 |
|
187 |
-
|
188 |
-
os.environ["HF_HOME"] = HF_HOME
|
189 |
-
os.makedirs(HF_HOME, exist_ok=True)
|
190 |
-
os.makedirs(TORCH_HOME, exist_ok=True)
|
191 |
|
192 |
langsam_results = lang_sam_segment.remote(
|
193 |
image_pil=image_pil,
|
194 |
prompt=prompts,
|
195 |
)
|
|
|
|
|
|
|
196 |
labels = langsam_results[0]["labels"]
|
197 |
scores = langsam_results[0]["scores"]
|
198 |
img_array = np.array(image_pil)
|
@@ -226,10 +231,19 @@ def change_image_objects_lab(
|
|
226 |
gpu="T4",
|
227 |
image=image,
|
228 |
volumes={volume_path: volume},
|
|
|
229 |
)
|
230 |
-
def apply_mosaic_with_bool_mask(
|
|
|
|
|
|
|
|
|
231 |
h, w = image.shape[:2]
|
232 |
-
|
|
|
|
|
|
|
|
|
233 |
|
234 |
small = cv2.resize(
|
235 |
image, (w // block_size, h // block_size), interpolation=cv2.INTER_LINEAR
|
@@ -245,15 +259,17 @@ def apply_mosaic_with_bool_mask(image, mask, intensity: int = 50):
|
|
245 |
gpu="T4",
|
246 |
image=image,
|
247 |
volumes={volume_path: volume},
|
|
|
248 |
)
|
249 |
def preserve_privacy(
|
250 |
image_pil: Image.Image,
|
251 |
prompt: str,
|
|
|
252 |
) -> Image.Image:
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
|
258 |
langsam_results = lang_sam_segment.remote(
|
259 |
image_pil=image_pil,
|
@@ -261,15 +277,14 @@ def preserve_privacy(
|
|
261 |
box_threshold=0.35,
|
262 |
text_threshold=0.40,
|
263 |
)
|
|
|
|
|
264 |
|
265 |
img_array = np.array(image_pil)
|
266 |
|
267 |
for result in langsam_results:
|
268 |
-
print(f"Found {len(result['masks'])} masks for label: {result['labels']}")
|
269 |
-
if len(result["masks"]) == 0:
|
270 |
-
print("No masks found for the given prompt.")
|
271 |
-
return image_pil
|
272 |
print(f"result: {result}")
|
|
|
273 |
for i, mask in enumerate(result["masks"]):
|
274 |
if "mask_scores" in result:
|
275 |
if (
|
@@ -282,12 +297,15 @@ def preserve_privacy(
|
|
282 |
if mask_score < 0.6:
|
283 |
print(f"Skipping mask {i + 1}/{len(result['masks'])} -> low score.")
|
284 |
continue
|
285 |
-
print(
|
286 |
-
|
|
|
287 |
|
288 |
mask_bool = mask.astype(bool)
|
289 |
|
290 |
-
img_array = apply_mosaic_with_bool_mask.remote(
|
|
|
|
|
291 |
|
292 |
output_image_pil = Image.fromarray(img_array)
|
293 |
|
|
|
55 |
gpu="A10G",
|
56 |
image=image,
|
57 |
volumes={volume_path: volume},
|
58 |
+
# min_containers=1,
|
59 |
+
timeout=60 * 3,
|
60 |
)
|
61 |
def lang_sam_segment(
|
62 |
image_pil: Image.Image,
|
|
|
69 |
""" # noqa: E501
|
70 |
from lang_sam import LangSAM # type: ignore
|
71 |
|
|
|
|
|
|
|
|
|
|
|
72 |
model = LangSAM(sam_type="sam2.1_hiera_large")
|
73 |
langsam_results = model.predict(
|
74 |
images_pil=[image_pil],
|
|
|
76 |
box_threshold=box_threshold,
|
77 |
text_threshold=text_threshold,
|
78 |
)
|
79 |
+
if len(langsam_results[0]["labels"]) == 0:
|
80 |
+
print("No masks found for the given prompt.")
|
81 |
+
return None
|
82 |
+
|
83 |
+
print(f"found {len(langsam_results[0]['labels'])} masks for prompt: {prompt}")
|
84 |
+
print("labels:", langsam_results[0]["labels"])
|
85 |
+
print("scores:", langsam_results[0]["scores"])
|
86 |
+
print("masks scores:", langsam_results[0].get("mask_scores", "No mask scores available")) # noqa: E501
|
87 |
|
88 |
return langsam_results
|
89 |
|
|
|
92 |
gpu="T4",
|
93 |
image=image,
|
94 |
volumes={volume_path: volume},
|
95 |
+
timeout=60 * 3,
|
96 |
)
|
97 |
def change_image_objects_hsv(
|
98 |
image_pil: Image.Image,
|
|
|
117 |
raise ValueError(
|
118 |
"targets_config must be a list of lists, each containing [target_name, hue, saturation_scale]." # noqa: E501
|
119 |
)
|
120 |
+
print("Change image objects hsv targets config:", targets_config)
|
121 |
prompts = ". ".join(target[0] for target in targets_config)
|
122 |
|
|
|
|
|
|
|
|
|
|
|
123 |
langsam_results = lang_sam_segment.remote(image_pil=image_pil, prompt=prompts)
|
124 |
+
if not langsam_results:
|
125 |
+
return image_pil
|
126 |
+
|
127 |
labels = langsam_results[0]["labels"]
|
128 |
scores = langsam_results[0]["scores"]
|
129 |
|
|
|
161 |
gpu="T4",
|
162 |
image=image,
|
163 |
volumes={volume_path: volume},
|
164 |
+
timeout=60 * 3,
|
165 |
)
|
166 |
def change_image_objects_lab(
|
167 |
image_pil: Image.Image,
|
|
|
187 |
"targets_config must be a list of lists, each containing [target_name, new_a, new_b]." # noqa: E501
|
188 |
)
|
189 |
|
190 |
+
print("change image objects lab targets config:", targets_config)
|
191 |
|
192 |
+
prompts = ". ".join(target[0] for target in targets_config)
|
|
|
|
|
|
|
193 |
|
194 |
langsam_results = lang_sam_segment.remote(
|
195 |
image_pil=image_pil,
|
196 |
prompt=prompts,
|
197 |
)
|
198 |
+
if not langsam_results:
|
199 |
+
return image_pil
|
200 |
+
|
201 |
labels = langsam_results[0]["labels"]
|
202 |
scores = langsam_results[0]["scores"]
|
203 |
img_array = np.array(image_pil)
|
|
|
231 |
gpu="T4",
|
232 |
image=image,
|
233 |
volumes={volume_path: volume},
|
234 |
+
timeout=60 * 3,
|
235 |
)
|
236 |
+
def apply_mosaic_with_bool_mask(
|
237 |
+
image: np.ndarray,
|
238 |
+
mask: np.ndarray,
|
239 |
+
privacy_strength: int,
|
240 |
+
) -> np.ndarray:
|
241 |
h, w = image.shape[:2]
|
242 |
+
image_size_factor = min(h, w) / 1000
|
243 |
+
block_size = int(max(1, (privacy_strength * image_size_factor)))
|
244 |
+
|
245 |
+
# Ensure block_size is at least 1 and doesn't exceed half of image dimensions
|
246 |
+
block_size = max(1, min(block_size, min(h, w) // 2))
|
247 |
|
248 |
small = cv2.resize(
|
249 |
image, (w // block_size, h // block_size), interpolation=cv2.INTER_LINEAR
|
|
|
259 |
gpu="T4",
|
260 |
image=image,
|
261 |
volumes={volume_path: volume},
|
262 |
+
timeout=60 * 3,
|
263 |
)
|
264 |
def preserve_privacy(
|
265 |
image_pil: Image.Image,
|
266 |
prompt: str,
|
267 |
+
privacy_strength: int = 15,
|
268 |
) -> Image.Image:
|
269 |
+
"""
|
270 |
+
Preserves privacy in an image by applying a mosaic effect to specified objects.
|
271 |
+
"""
|
272 |
+
print(f"Preserving privacy for prompt: {prompt} with strength {privacy_strength}")
|
273 |
|
274 |
langsam_results = lang_sam_segment.remote(
|
275 |
image_pil=image_pil,
|
|
|
277 |
box_threshold=0.35,
|
278 |
text_threshold=0.40,
|
279 |
)
|
280 |
+
if not langsam_results:
|
281 |
+
return image_pil
|
282 |
|
283 |
img_array = np.array(image_pil)
|
284 |
|
285 |
for result in langsam_results:
|
|
|
|
|
|
|
|
|
286 |
print(f"result: {result}")
|
287 |
+
|
288 |
for i, mask in enumerate(result["masks"]):
|
289 |
if "mask_scores" in result:
|
290 |
if (
|
|
|
297 |
if mask_score < 0.6:
|
298 |
print(f"Skipping mask {i + 1}/{len(result['masks'])} -> low score.")
|
299 |
continue
|
300 |
+
print(
|
301 |
+
f"Processing mask {i + 1}/{len(result['masks'])} Mask score: {mask_score}" # noqa: E501
|
302 |
+
)
|
303 |
|
304 |
mask_bool = mask.astype(bool)
|
305 |
|
306 |
+
img_array = apply_mosaic_with_bool_mask.remote(
|
307 |
+
img_array, mask_bool, privacy_strength
|
308 |
+
)
|
309 |
|
310 |
output_image_pil = Image.fromarray(img_array)
|
311 |
|
src/tools.py
CHANGED
@@ -1,39 +1,55 @@
|
|
|
|
1 |
from pathlib import Path
|
2 |
|
|
|
3 |
import modal
|
4 |
import numpy as np
|
5 |
from PIL import Image
|
6 |
|
7 |
-
from utils import upload_image_to_tmpfiles
|
8 |
-
|
9 |
modal_app_name = "ImageAlfred"
|
10 |
|
11 |
|
12 |
def privacy_preserve_image(
|
13 |
input_img,
|
14 |
input_prompt,
|
|
|
15 |
) -> np.ndarray | Image.Image | str | Path | None:
|
16 |
"""
|
17 |
-
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
input_prompt (list): List of [object:str].
|
22 |
|
|
|
|
|
|
|
|
|
23 |
Returns:
|
24 |
bytes: Binary image data of the modified image.
|
25 |
|
26 |
example:
|
27 |
input_prompt = ["face", "license plate"]
|
28 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
func = modal.Function.from_name("ImageAlfred", "preserve_privacy")
|
30 |
-
output_pil = func.remote(
|
|
|
|
|
|
|
|
|
31 |
|
32 |
if output_pil is None:
|
33 |
-
raise
|
34 |
if not isinstance(output_pil, Image.Image):
|
35 |
-
raise
|
36 |
-
f"Expected Image.Image from
|
37 |
)
|
38 |
|
39 |
return output_pil
|
@@ -43,17 +59,32 @@ def change_color_objects_hsv(
|
|
43 |
input_img,
|
44 |
user_input,
|
45 |
) -> np.ndarray | Image.Image | str | Path | None:
|
46 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
-
Segments objects based on text prompts and alters their color in the HSV
|
49 |
-
color space. The HSV color space uses OpenCV ranges: H (0-179), S (0-255),
|
50 |
-
V (0-255). Common color examples include Green (hue=60), Red (hue=0),
|
51 |
-
Blue (hue=120), Yellow (hue=30), and Purple (hue=150), all with
|
52 |
-
saturation=255.
|
53 |
|
54 |
Args:
|
55 |
-
|
56 |
-
|
57 |
|
58 |
Returns:
|
59 |
Base64-encoded string.
|
@@ -62,23 +93,45 @@ def change_color_objects_hsv(
|
|
62 |
ValueError: If user_input format is invalid, hue values are outside [0, 179] range, saturation_scale is not positive, or image format is invalid or corrupted.
|
63 |
TypeError: If input_img is not a supported type or modal function returns unexpected type.
|
64 |
""" # noqa: E501
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
print("before processing input:", user_input)
|
66 |
-
|
67 |
for item in user_input:
|
68 |
if len(item) != 3:
|
69 |
-
raise
|
70 |
"Each item in user_input must be a list of [object, hue, saturation_scale]" # noqa: E501
|
71 |
)
|
|
|
|
|
|
|
|
|
|
|
72 |
if not isinstance(item[0], str):
|
73 |
item[0] = str(item[0])
|
|
|
|
|
74 |
if not isinstance(item[1], (int, float)):
|
75 |
-
|
|
|
|
|
|
|
76 |
if item[1] < 0 or item[1] > 179:
|
77 |
-
raise
|
|
|
|
|
78 |
if not isinstance(item[2], (int, float)):
|
79 |
-
|
|
|
|
|
|
|
80 |
if item[2] <= 0:
|
81 |
-
raise
|
82 |
|
83 |
print("after processing input:", user_input)
|
84 |
|
@@ -91,7 +144,6 @@ def change_color_objects_hsv(
|
|
91 |
raise TypeError(
|
92 |
f"Expected Image.Image from modal remote function, got {type(output_pil)}"
|
93 |
)
|
94 |
-
# img_link = upload_image_to_tmpfiles(output_pil)
|
95 |
|
96 |
return output_pil
|
97 |
|
@@ -100,17 +152,36 @@ def change_color_objects_lab(
|
|
100 |
input_img,
|
101 |
user_input,
|
102 |
) -> np.ndarray | Image.Image | str | Path | None:
|
103 |
-
"""
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
Args:
|
112 |
-
user_input
|
113 |
-
input_img
|
114 |
|
115 |
Returns:
|
116 |
Base64-encoded string
|
@@ -119,22 +190,44 @@ def change_color_objects_lab(
|
|
119 |
ValueError: If user_input format is invalid, a/b values are outside [0, 255] range, or image format is invalid or corrupted.
|
120 |
TypeError: If input_img is not a supported type or modal function returns unexpected type.
|
121 |
""" # noqa: E501
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
print("before processing input:", user_input)
|
|
|
123 |
for item in user_input:
|
124 |
if len(item) != 3:
|
125 |
-
raise
|
126 |
"Each item in user_input must be a list of [object, new_a, new_b]"
|
127 |
)
|
|
|
|
|
|
|
|
|
128 |
if not isinstance(item[0], str):
|
129 |
item[0] = str(item[0])
|
|
|
|
|
130 |
if not isinstance(item[1], int):
|
131 |
-
|
|
|
|
|
|
|
132 |
if item[1] < 0 or item[1] > 255:
|
133 |
-
raise
|
|
|
|
|
134 |
if not isinstance(item[2], int):
|
135 |
-
|
|
|
|
|
|
|
136 |
if item[2] < 0 or item[2] > 255:
|
137 |
-
raise
|
138 |
|
139 |
print("after processing input:", user_input)
|
140 |
func = modal.Function.from_name("ImageAlfred", "change_image_objects_lab")
|
|
|
1 |
+
import re
|
2 |
from pathlib import Path
|
3 |
|
4 |
+
import gradio as gr
|
5 |
import modal
|
6 |
import numpy as np
|
7 |
from PIL import Image
|
8 |
|
|
|
|
|
9 |
modal_app_name = "ImageAlfred"
|
10 |
|
11 |
|
12 |
def privacy_preserve_image(
|
13 |
input_img,
|
14 |
input_prompt,
|
15 |
+
privacy_strength: int = 15,
|
16 |
) -> np.ndarray | Image.Image | str | Path | None:
|
17 |
"""
|
18 |
+
Obscures specified objects in the input image based on a natural language prompt, using a privacy-preserving blur or distortion effect.
|
19 |
|
20 |
+
This function segments the image to detect objects described in the `input_prompt` and applies a pixelation effect to those regions. It is useful in scenarios where sensitive content (e.g., faces, license plates, logos,
|
21 |
+
personal belongings) needs to be hidden before sharing or publishing images.
|
|
|
22 |
|
23 |
+
Args:
|
24 |
+
input_img: Input image or can be URL string of the image or base64 string. Cannot be None.
|
25 |
+
input_prompt (str): Object to obscure in the image has to be a dot-separated string. It can be a single word or multiple words, e.g., "left person face", "license plate" but it must be as short as possible and avoid using symbols or punctuation. Also you have to use single form of the word, e.g., "person" instead of "people", "face" instead of "faces". e.g. input_prompt = "face. right car. blue shirt."
|
26 |
+
privacy_strength (int): Strength of the privacy preservation effect. Higher values result in stronger blurring. Default is 15.
|
27 |
Returns:
|
28 |
bytes: Binary image data of the modified image.
|
29 |
|
30 |
example:
|
31 |
input_prompt = ["face", "license plate"]
|
32 |
+
""" # noqa: E501
|
33 |
+
if not input_img:
|
34 |
+
raise gr.Error("Input image cannot be None or empty.")
|
35 |
+
valid_pattern = re.compile(r"^[a-zA-Z\s.]+$")
|
36 |
+
if not input_prompt or input_prompt.strip() == "":
|
37 |
+
raise gr.Error("Input prompt cannot be None or empty.")
|
38 |
+
if not valid_pattern.match(input_prompt):
|
39 |
+
raise gr.Error("Input prompt must contain only letters, spaces, and dots.")
|
40 |
+
|
41 |
func = modal.Function.from_name("ImageAlfred", "preserve_privacy")
|
42 |
+
output_pil = func.remote(
|
43 |
+
image_pil=input_img,
|
44 |
+
prompt=input_prompt,
|
45 |
+
privacy_strength=privacy_strength,
|
46 |
+
)
|
47 |
|
48 |
if output_pil is None:
|
49 |
+
raise gr.Error("Received None from server.")
|
50 |
if not isinstance(output_pil, Image.Image):
|
51 |
+
raise gr.Error(
|
52 |
+
f"Expected Image.Image from server function, got {type(output_pil)}"
|
53 |
)
|
54 |
|
55 |
return output_pil
|
|
|
59 |
input_img,
|
60 |
user_input,
|
61 |
) -> np.ndarray | Image.Image | str | Path | None:
|
62 |
+
"""
|
63 |
+
Changes the hue and saturation of specified objects in an image using the HSV color space.
|
64 |
+
|
65 |
+
This function segments objects in the image based on a user-provided text prompt, then
|
66 |
+
modifies their hue and saturation in the HSV (Hue, Saturation, Value) space. HSV is intuitive
|
67 |
+
for color manipulation where users think in terms of basic color categories and intensity,
|
68 |
+
making it useful for broad, vivid color shifts.
|
69 |
+
|
70 |
+
Use this method when:
|
71 |
+
- Performing broad color changes or visual effects (e.g., turning a shirt from red to blue).
|
72 |
+
- Needing intuitive control over color categories (e.g., shifting everything that's red to purple).
|
73 |
+
- Saturation and vibrancy manipulation are more important than accurate perceptual matching.
|
74 |
+
|
75 |
+
OpenCV HSV Ranges:
|
76 |
+
- H: 0-179 (Hue angle on color wheel, where 0 = red, 60 = green, 120 = blue, etc.)
|
77 |
+
- S: 0-255 (Saturation)
|
78 |
+
- V: 0-255 (Brightness)
|
79 |
+
|
80 |
+
Common HSV color references:
|
81 |
+
- Red: (Hue≈0), Green: (Hue≈60), Blue: (Hue≈120), Yellow: (Hue≈30), Purple: (Hue≈150)
|
82 |
+
- Typically used with Saturation=255 for vivid colors.
|
83 |
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
Args:
|
86 |
+
input_img: Input image or can be URL string of the image or base64 string. Cannot be None.
|
87 |
+
user_input : A list of target specifications for color transformation. Each inner list must contain exactly three elements in the following order: 1. target_object (str) - A short, human-readable description of the object to be modified.Multi-word descriptions are allowed for disambiguation (e.g., "right person shirt"), but they must be at most three words and concise and free of punctuation, symbols, or special characters.2. hue (int) - Desired hue value in the HSV color space, ranging from 0 to 179. Represents the color angle on the HSV color wheel (e.g., 0 = red, 60 = green, 120 = blue)3. saturation_scale (float) - A multiplicative scale factor applied to the current saturation of the object (must be > 0). For example, 1.0 preserves current saturation, 1.2 increases vibrancy, and 0.8 slightly desaturates. Each target object must be uniquely defined in the list to avoid conflicting transformations.Example: [["hair", 30, 1.2], ["right person shirt", 60, 1.0]]
|
88 |
|
89 |
Returns:
|
90 |
Base64-encoded string.
|
|
|
93 |
ValueError: If user_input format is invalid, hue values are outside [0, 179] range, saturation_scale is not positive, or image format is invalid or corrupted.
|
94 |
TypeError: If input_img is not a supported type or modal function returns unexpected type.
|
95 |
""" # noqa: E501
|
96 |
+
if len(user_input) == 0 or not isinstance(user_input, list):
|
97 |
+
raise gr.Error(
|
98 |
+
"user input must be a list of lists, each containing [object, hue, saturation_scale]." # noqa: E501
|
99 |
+
)
|
100 |
+
if not input_img:
|
101 |
+
raise gr.Error("input img cannot be None or empty.")
|
102 |
+
|
103 |
print("before processing input:", user_input)
|
104 |
+
valid_pattern = re.compile(r"^[a-zA-Z\s]+$")
|
105 |
for item in user_input:
|
106 |
if len(item) != 3:
|
107 |
+
raise gr.Error(
|
108 |
"Each item in user_input must be a list of [object, hue, saturation_scale]" # noqa: E501
|
109 |
)
|
110 |
+
if not item[0] or not valid_pattern.match(item[0]):
|
111 |
+
raise gr.Error(
|
112 |
+
"Object name must contain only letters and spaces and cannot be empty."
|
113 |
+
)
|
114 |
+
|
115 |
if not isinstance(item[0], str):
|
116 |
item[0] = str(item[0])
|
117 |
+
if not item[1]:
|
118 |
+
raise gr.Error("Hue must be set and cannot be empty.")
|
119 |
if not isinstance(item[1], (int, float)):
|
120 |
+
try:
|
121 |
+
item[1] = int(item[1])
|
122 |
+
except ValueError:
|
123 |
+
raise gr.Error("Hue must be an integer.")
|
124 |
if item[1] < 0 or item[1] > 179:
|
125 |
+
raise gr.Error("Hue must be in the range [0, 179]")
|
126 |
+
if not item[2]:
|
127 |
+
raise gr.Error("Saturation scale must be set and cannot be empty.")
|
128 |
if not isinstance(item[2], (int, float)):
|
129 |
+
try:
|
130 |
+
item[2] = float(item[2])
|
131 |
+
except ValueError:
|
132 |
+
raise gr.Error("Saturation scale must be a float number.")
|
133 |
if item[2] <= 0:
|
134 |
+
raise gr.Error("Saturation scale must be greater than 0")
|
135 |
|
136 |
print("after processing input:", user_input)
|
137 |
|
|
|
144 |
raise TypeError(
|
145 |
f"Expected Image.Image from modal remote function, got {type(output_pil)}"
|
146 |
)
|
|
|
147 |
|
148 |
return output_pil
|
149 |
|
|
|
152 |
input_img,
|
153 |
user_input,
|
154 |
) -> np.ndarray | Image.Image | str | Path | None:
|
155 |
+
"""
|
156 |
+
Changes the color of specified objects in an image using the LAB color space.
|
157 |
+
|
158 |
+
This function segments image regions based on a user-provided text prompt and applies
|
159 |
+
color transformations in the LAB color space. LAB separates luminance (L) from color
|
160 |
+
components (A for green-red, B for blue-yellow), making it more perceptually uniform
|
161 |
+
and closer to how humans perceive color differences.
|
162 |
+
|
163 |
+
Use this method when:
|
164 |
+
- Precise perceptual color control is needed (e.g., subtle shifts in tone or matching
|
165 |
+
specific brand colors).
|
166 |
+
- Working in lighting-sensitive tasks where separating lightness from chroma improves quality.
|
167 |
+
- You want color transformations that are less influenced by lighting conditions or
|
168 |
+
brightness variations.
|
169 |
+
|
170 |
+
OpenCV LAB Ranges:
|
171 |
+
- L: 0-255 (lightness)
|
172 |
+
- A: 0-255 (green-red, 128 = neutral)
|
173 |
+
- B: 0-255 (blue-yellow, 128 = neutral)
|
174 |
+
|
175 |
+
Common LAB color references:
|
176 |
+
- Green: (L=?, A≈80, B≈128)
|
177 |
+
- Red: (L=?, A≈180, B≈160)
|
178 |
+
- Blue: (L=?, A≈128, B≈80)
|
179 |
+
- Yellow: (L=?, A≈120, B≈180)
|
180 |
+
- Purple: (L=?, A≈180, B≈100)
|
181 |
|
182 |
Args:
|
183 |
+
user_input: A list of color transformation instructions, each as a three-element list:[object_name (str), new_a (int, 0-255), new_b (int, 0-255)].- object_name: A short, unique identifier for the object to be recolored. Multi-word names are allowed for specificity (e.g., "right person shirt") but must be 3 words or fewer and free of punctuation or special symbols.- new_a: The desired 'a' channel value in LAB space (green-red axis, 0-255, with 128 as neutral).- new_b: The desired 'b' channel value in LAB space (blue-yellow axis, 0-255, with 128 as neutral).Each object must appear only once in the list. Example:[["hair", 80, 128], ["right person shirt", 180, 160]]
|
184 |
+
input_img : Input image can be URL string of the image. Cannot be None.
|
185 |
|
186 |
Returns:
|
187 |
Base64-encoded string
|
|
|
190 |
ValueError: If user_input format is invalid, a/b values are outside [0, 255] range, or image format is invalid or corrupted.
|
191 |
TypeError: If input_img is not a supported type or modal function returns unexpected type.
|
192 |
""" # noqa: E501
|
193 |
+
if len(user_input) == 0 or not isinstance(user_input, list):
|
194 |
+
raise gr.Error(
|
195 |
+
"user input must be a list of lists, each containing [object, new_a, new_b]." # noqa: E501
|
196 |
+
)
|
197 |
+
if not input_img:
|
198 |
+
raise gr.Error("input img cannot be None or empty.")
|
199 |
+
valid_pattern = re.compile(r"^[a-zA-Z\s]+$")
|
200 |
print("before processing input:", user_input)
|
201 |
+
|
202 |
for item in user_input:
|
203 |
if len(item) != 3:
|
204 |
+
raise gr.Error(
|
205 |
"Each item in user_input must be a list of [object, new_a, new_b]"
|
206 |
)
|
207 |
+
if not item[0] or not valid_pattern.match(item[0]):
|
208 |
+
raise gr.Error(
|
209 |
+
"Object name must contain only letters and spaces and cannot be empty."
|
210 |
+
)
|
211 |
if not isinstance(item[0], str):
|
212 |
item[0] = str(item[0])
|
213 |
+
if not item[1]:
|
214 |
+
raise gr.Error("new A must be set and cannot be empty.")
|
215 |
if not isinstance(item[1], int):
|
216 |
+
try:
|
217 |
+
item[1] = int(item[1])
|
218 |
+
except ValueError:
|
219 |
+
raise gr.Error("new A must be an integer.")
|
220 |
if item[1] < 0 or item[1] > 255:
|
221 |
+
raise gr.Error("new A must be in the range [0, 255]")
|
222 |
+
if not item[2]:
|
223 |
+
raise gr.Error("new B must be set and cannot be empty.")
|
224 |
if not isinstance(item[2], int):
|
225 |
+
try:
|
226 |
+
item[2] = int(item[2])
|
227 |
+
except ValueError:
|
228 |
+
raise gr.Error("new B must be an integer.")
|
229 |
if item[2] < 0 or item[2] > 255:
|
230 |
+
raise gr.Error("new B must be in the range [0, 255]")
|
231 |
|
232 |
print("after processing input:", user_input)
|
233 |
func = modal.Function.from_name("ImageAlfred", "change_image_objects_lab")
|
src/utils.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
import base64
|
2 |
-
from enum import Enum
|
3 |
-
from io import BytesIO
|
4 |
-
|
5 |
-
import requests
|
6 |
-
from PIL import Image
|
7 |
-
|
8 |
-
|
9 |
-
def upload_image_to_tmpfiles(image):
|
10 |
-
"""
|
11 |
-
Upload an image to tmpfiles.org and return the URL.
|
12 |
-
"""
|
13 |
-
|
14 |
-
img_byte_arr = BytesIO()
|
15 |
-
image.save(img_byte_arr, format="PNG")
|
16 |
-
img_byte_arr.seek(0)
|
17 |
-
|
18 |
-
files = {"file": ("image.png", img_byte_arr, "image/png")}
|
19 |
-
response = requests.post("https://tmpfiles.org/api/v1/upload", files=files)
|
20 |
-
|
21 |
-
if response.status_code != 200:
|
22 |
-
raise ValueError(f"Upload failed: Status {response.status_code}")
|
23 |
-
|
24 |
-
data = response.json()
|
25 |
-
if "data" in data and "url" in data["data"]:
|
26 |
-
url = data["data"]["url"]
|
27 |
-
if not url:
|
28 |
-
raise ValueError("Invalid URL in response")
|
29 |
-
print(f"Uploaded image URL: {url}")
|
30 |
-
return url
|
31 |
-
else:
|
32 |
-
raise ValueError(f"Invalid response: {data}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|