File size: 11,752 Bytes
d63f692
748425c
 
d63f692
748425c
 
 
 
 
 
 
d8cda25
 
 
 
 
 
 
 
 
 
 
 
 
 
e4ccc11
d8cda25
 
 
 
 
 
 
 
 
 
 
 
 
 
cc8f276
 
 
d63f692
79b337b
cc8f276
748425c
d63f692
748425c
d63f692
 
748425c
d63f692
 
8b98658
d63f692
79b337b
748425c
 
 
 
50d1d7a
d63f692
 
 
 
 
79b337b
 
8b98658
 
d63f692
e4ccc11
d63f692
 
8b98658
d63f692
79b337b
d63f692
748425c
9c43fab
d63f692
9c43fab
d63f692
 
9c43fab
 
 
748425c
 
 
 
cc8f276
748425c
d63f692
 
e608228
 
 
d63f692
e608228
 
748425c
 
d63f692
50d1d7a
748425c
 
 
 
 
e608228
748425c
 
d63f692
 
 
 
 
 
d8cda25
748425c
d63f692
748425c
e608228
d63f692
e608228
748425c
d63f692
 
 
 
 
748425c
 
e608228
 
 
 
 
 
 
e4ccc11
e608228
 
 
 
 
 
e4ccc11
e608228
 
 
 
 
 
748425c
 
 
e4ccc11
748425c
 
 
 
 
 
 
 
 
 
 
 
cc8f276
 
 
 
d63f692
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
748425c
 
50d1d7a
d63f692
748425c
 
 
 
 
 
 
 
d63f692
 
 
 
 
 
 
748425c
d8cda25
748425c
 
d63f692
748425c
 
d63f692
 
 
 
748425c
 
d63f692
 
748425c
d63f692
 
 
 
748425c
d63f692
 
 
748425c
d63f692
 
 
 
748425c
d63f692
748425c
 
e4ccc11
748425c
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
import re
from pathlib import Path

import gradio as gr
import modal
import numpy as np
from PIL import Image

modal_app_name = "ImageAlfred"


def remove_background(
    input_img,
) -> np.ndarray | Image.Image | str | Path | None:
    """
    Remove the background of the image.

    Args:
        input_img: Input image or can be URL string of the image or base64 string. Cannot be None.
    Returns:
        bytes: Binary image data of the modified image.
    """  # noqa: E501
    if not input_img:
        raise gr.Error("Input image cannot be None or empty.")

    func = modal.Function.from_name(modal_app_name, "remove_background")
    output_pil = func.remote(
        image_pil=input_img,
    )

    if output_pil is None:
        raise gr.Error("Received None from server.")
    if not isinstance(output_pil, Image.Image):
        raise gr.Error(
            f"Expected Image.Image from server function, got {type(output_pil)}"
        )

    return output_pil


def privacy_preserve_image(
    input_img,
    input_prompt,
    privacy_strength: int = 15,
    threshold: float = 0.2,
) -> np.ndarray | Image.Image | str | Path | None:
    """
    Obscures specified objects in the input image based on a natural language prompt, using a privacy-preserving blur or distortion effect.

    This function segments the image to detect objects described in the `input_prompt` and applies a pixelation effect to those regions. It is useful in scenarios where sensitive content (e.g., faces, license plates, logos,
    personal belongings) needs to be hidden before sharing or publishing images.

    Args:
        input_img: Input image or can be URL string of the image or base64 string. Cannot be None.
        input_prompt (str): Object to obscure in the image has to be a dot-separated string. It can be a single word or multiple words, e.g., "left person face", "license plate" but it must be as short as possible and avoid using symbols or punctuation. e.g. input_prompt = "face. right car. blue shirt."
        privacy_strength (int): Strength of the privacy preservation effect. Higher values result in stronger blurring. Default is 15.
        threshold (float): Model threshold for detecting objects. It should be between 0.01 and 0.99. Default is 0.2. for detecting smaller objects, small regions or faces a lower threshold is recommended.
    Returns:
        bytes: Binary image data of the modified image.

    example:
        input_prompt = "faces, license plates, logos"
    """  # noqa: E501
    if not input_img:
        raise gr.Error("Input image cannot be None or empty.")
    if not input_prompt or input_prompt.strip() == "":
        raise gr.Error("Input prompt cannot be None or empty.")
    if threshold < 0.01 or threshold > 0.99:
        raise gr.Error("Threshold must be between 0.01 and 0.99.")
    if isinstance(input_prompt, str):
        prompts = [prompt.strip() for prompt in input_prompt.split(".")]

    func = modal.Function.from_name(modal_app_name, "preserve_privacy")
    output_pil = func.remote(
        image_pil=input_img,
        prompts=prompts,
        privacy_strength=privacy_strength,
        threshold=threshold,
    )

    if output_pil is None:
        raise gr.Error("Received None from server.")
    if not isinstance(output_pil, Image.Image):
        raise gr.Error(
            f"Expected Image.Image from server function, got {type(output_pil)}"
        )

    return output_pil


def change_color_objects_hsv(
    input_img,
    user_input,
) -> np.ndarray | Image.Image | str | Path | None:
    """
    Changes the hue and saturation of specified objects in an image using the HSV color space.
    This function segments image regions based on a user-provided text prompt and applies
    color transformations in the HSV color space. HSV separates chromatic content (hue) from
    intensity (value), making it more intuitive for color manipulation tasks.
    Use this method when:
    - You want to change the color of objects based on their hue and saturation.
    - You want to apply color transformations that are less influenced by lighting conditions or brightness variations.

    Args:
        input_img: Input image or can be URL string of the image or base64 string. Cannot be None.
        user_input : A list of target specifications for color transformation. Each inner list must contain exactly four elements in the following order: 1. target_object (str) - A short, human-readable description of the object to be modified. Multi-word, descriptions are allowed for disambiguation (e.g., "right person shirt"), but they must be concise and free of punctuation, symbols, or special characters.2. Red (int) - Desired red value in RGB color space from 0 to 255. 3. Green (int) - Desired green value in RGB color space from 0 to 255. 4. Blue (int) - Desired blue value in RGB color space from 0 to 255. Example: user_input = [["hair", 30, 55, 255], ["shirt", 70, 0 , 157]].

    Returns:
        Base64-encoded string.

    Raises:
        ValueError: If user_input format is invalid, or image format is invalid or corrupted.
        TypeError: If input_img is not a supported type or modal function returns unexpected type.
    """  # noqa: E501
    if len(user_input) == 0 or not isinstance(user_input, list):
        raise gr.Error(
            "user input must be a list of lists, each containing [object, hue, saturation_scale]."  # noqa: E501
        )
    if not input_img:
        raise gr.Error("input img cannot be None or empty.")

    print("before processing input:", user_input)
    valid_pattern = re.compile(r"^[a-zA-Z\s]+$")
    for item in user_input:
        if len(item) != 4:
            raise gr.Error(
                "Each item in user_input must be a list of [object, red, green, blue]"  # noqa: E501
            )
        if not item[0] or not valid_pattern.match(item[0]):
            raise gr.Error(
                "Object name must contain only letters and spaces and cannot be empty."
            )

        if not isinstance(item[0], str):
            item[0] = str(item[0])

        try:
            item[1] = int(item[1])
        except ValueError:
            raise gr.Error("Red must be an integer.")
        if item[1] < 0 or item[1] > 255:
            raise gr.Error("Red must be in the range [0, 255]")

        try:
            item[2] = int(item[2])
        except ValueError:
            raise gr.Error("Green must be an integer.")
        if item[2] < 0 or item[2] > 255:
            raise gr.Error("Green must be in the range [0, 255]")

        try:
            item[3] = int(item[3])
        except ValueError:
            raise gr.Error("Blue must be an integer.")
        if item[3] < 0 or item[3] > 255:
            raise gr.Error("Blue must be in the range [0, 255]")

    print("after processing input:", user_input)

    func = modal.Function.from_name(modal_app_name, "change_image_objects_hsv")
    output_pil = func.remote(image_pil=input_img, targets_config=user_input)

    if output_pil is None:
        raise ValueError("Received None from modal remote function.")
    if not isinstance(output_pil, Image.Image):
        raise TypeError(
            f"Expected Image.Image from modal remote function, got {type(output_pil)}"
        )

    return output_pil


def change_color_objects_lab(
    input_img,
    user_input,
) -> np.ndarray | Image.Image | str | Path | None:
    """
    Changes the color of specified objects in an image using the LAB color space.

    This function segments image regions based on a user-provided text prompt and applies
    color transformations in the LAB color space. LAB separates luminance (L) from color
    components (A for green-red, B for blue-yellow), making it more perceptually uniform
    and closer to how humans perceive color differences.

    Use this method when:
    - Precise perceptual color control is needed (e.g., subtle shifts in tone or matching
      specific brand colors).
    - Working in lighting-sensitive tasks where separating lightness from chroma improves quality.
    - You want color transformations that are less influenced by lighting conditions or
      brightness variations.

    OpenCV LAB Ranges:
        - L: 0-255 (lightness)
        - A: 0-255 (green-red, 128 = neutral)
        - B: 0-255 (blue-yellow, 128 = neutral)

    Common LAB color references:
        - Green: (L=?, A≈80, B≈128)
        - Red: (L=?, A≈180, B≈160)
        - Blue: (L=?, A≈128, B≈80)
        - Yellow: (L=?, A≈120, B≈180)
        - Purple: (L=?, A≈180, B≈100)

    Args:
        user_input: A list of color transformation instructions, each as a three-element list:[object_name (str), new_a (int, 0-255), new_b (int, 0-255)].- object_name: A short, unique identifier for the object to be recolored. Multi-word names are allowed for specificity (e.g., "right person shirt") but must be free of punctuation or special symbols.- new_a: The desired 'a' channel value in LAB space (green-red axis, 0-255, with 128 as neutral).- new_b: The desired 'b' channel value in LAB space (blue-yellow axis, 0-255, with 128 as neutral).Each object must appear only once in the list. Example:[["hair", 80, 128], ["right person shirt", 180, 160]]
        input_img : Input image can be URL string of the image. Cannot be None.

    Returns:
        Base64-encoded string

    Raises:
        ValueError: If user_input format is invalid, a/b values are outside [0, 255] range, or image format is invalid or corrupted.
        TypeError: If input_img is not a supported type or modal function returns unexpected type.
    """  # noqa: E501
    if len(user_input) == 0 or not isinstance(user_input, list):
        raise gr.Error(
            "user input must be a list of lists, each containing [object, new_a, new_b]."  # noqa: E501
        )
    if not input_img:
        raise gr.Error("input img cannot be None or empty.")
    valid_pattern = re.compile(r"^[a-zA-Z\s]+$")
    print("before processing input:", user_input)

    for item in user_input:
        if len(item) != 3:
            raise gr.Error(
                "Each item in user_input must be a list of [object, new_a, new_b]"
            )
        if not item[0] or not valid_pattern.match(item[0]):
            raise gr.Error(
                "Object name must contain only letters and spaces and cannot be empty."
            )
        if not isinstance(item[0], str):
            item[0] = str(item[0])
        if not item[1]:
            raise gr.Error("new A must be set and cannot be empty.")
        if not isinstance(item[1], int):
            try:
                item[1] = int(item[1])
            except ValueError:
                raise gr.Error("new A must be an integer.")
            if item[1] < 0 or item[1] > 255:
                raise gr.Error("new A must be in the range [0, 255]")
        if not item[2]:
            raise gr.Error("new B must be set and cannot be empty.")
        if not isinstance(item[2], int):
            try:
                item[2] = int(item[2])
            except ValueError:
                raise gr.Error("new B must be an integer.")
            if item[2] < 0 or item[2] > 255:
                raise gr.Error("new B must be in the range [0, 255]")

    print("after processing input:", user_input)
    func = modal.Function.from_name(modal_app_name, "change_image_objects_lab")
    output_pil = func.remote(image_pil=input_img, targets_config=user_input)
    if output_pil is None:
        raise ValueError("Received None from modal remote function.")
    if not isinstance(output_pil, Image.Image):
        raise TypeError(
            f"Expected Image.Image from modal remote function, got {type(output_pil)}"
        )

    return output_pil