File size: 6,310 Bytes
748425c cc8f276 748425c cc8f276 748425c 9c43fab 748425c 9c43fab 748425c cc8f276 748425c cc8f276 748425c cc8f276 748425c cc8f276 748425c cc8f276 748425c cc8f276 748425c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
from pathlib import Path
import modal
import numpy as np
from PIL import Image
from utils import upload_image_to_tmpfiles
modal_app_name = "ImageAlfred"
def privacy_preserve_image(
input_img,
input_prompt,
) -> np.ndarray | Image.Image | str | Path | None:
"""
Obscure specified objects in the input image based on the input prompt.
Args:
input_img (Image.Image): Input image in bytes format.
input_prompt (list): List of [object:str].
Returns:
bytes: Binary image data of the modified image.
example:
input_prompt = ["face", "license plate"]
"""
func = modal.Function.from_name("ImageAlfred", "preserve_privacy")
output_pil = func.remote(image_pil=input_img, prompt=input_prompt)
if output_pil is None:
raise ValueError("Received None from modal remote function.")
if not isinstance(output_pil, Image.Image):
raise TypeError(
f"Expected Image.Image from modal remote function, got {type(output_pil)}"
)
return output_pil
def change_color_objects_hsv(
input_img,
user_input,
) -> np.ndarray | Image.Image | str | Path | None:
"""Changes the hue and saturation of specified objects in an image.
Segments objects based on text prompts and alters their color in the HSV
color space. The HSV color space uses OpenCV ranges: H (0-179), S (0-255),
V (0-255). Common color examples include Green (hue=60), Red (hue=0),
Blue (hue=120), Yellow (hue=30), and Purple (hue=150), all with
saturation=255.
Args:
user_input (list[list[str | int | float]]): A list of lists where each inner list contains three elements: target object name (str), hue value (int, 0-179), and saturation scale factor (float, >0). Example: [["hair", 30, 1.2], ["shirt", 60, 1.0]].
input_img (np.ndarray | Image.Image | str | None): Input image as Base64-encoded string or URL string. Cannot be None.
Returns:
Base64-encoded string.
Raises:
ValueError: If user_input format is invalid, hue values are outside [0, 179] range, saturation_scale is not positive, or image format is invalid or corrupted.
TypeError: If input_img is not a supported type or modal function returns unexpected type.
""" # noqa: E501
print("before processing input:", user_input)
for item in user_input:
if len(item) != 3:
raise ValueError(
"Each item in user_input must be a list of [object, hue, saturation_scale]" # noqa: E501
)
if not isinstance(item[0], str):
item[0] = str(item[0])
if not isinstance(item[1], (int, float)):
item[1] = float(item[1])
if item[1] < 0 or item[1] > 179:
raise ValueError("Hue must be in the range [0, 179]")
if not isinstance(item[2], (int, float)):
item[2] = float(item[2])
if item[2] <= 0:
raise ValueError("Saturation scale must be greater than 0")
print("after processing input:", user_input)
func = modal.Function.from_name("ImageAlfred", "change_image_objects_hsv")
output_pil = func.remote(image_pil=input_img, targets_config=user_input)
if output_pil is None:
raise ValueError("Received None from modal remote function.")
if not isinstance(output_pil, Image.Image):
raise TypeError(
f"Expected Image.Image from modal remote function, got {type(output_pil)}"
)
# img_link = upload_image_to_tmpfiles(output_pil)
return output_pil
def change_color_objects_lab(
input_img,
user_input,
) -> np.ndarray | Image.Image | str | Path | None:
"""Changes the color of specified objects in an image using LAB color space.
Segments objects based on text prompts and alters their color in the LAB
color space. The LAB color space uses OpenCV ranges: L (0-255, lightness),
A (0-255, green-red, 128 is neutral), B (0-255, blue-yellow, 128 is neutral).
Common color examples include Green (a=80, b=128), Red (a=180, b=160),
Blue (a=128, b=80), Yellow (a=120, b=180), and Purple (a=180, b=100).
Args:
user_input (list[list[str | int | float]]): A list of lists where each inner list contains three elements: target object name (str), new_a value (int, 0-255), and new_b value (int, 0-255). Example: [["hair", 80, 128], ["shirt", 180, 160]].
input_img (np.ndarray | Image.Image | str | bytes | None): Input image as Base64-encoded string or URL string. Cannot be None.
Returns:
Base64-encoded string
Raises:
ValueError: If user_input format is invalid, a/b values are outside [0, 255] range, or image format is invalid or corrupted.
TypeError: If input_img is not a supported type or modal function returns unexpected type.
""" # noqa: E501
print("before processing input:", user_input)
for item in user_input:
if len(item) != 3:
raise ValueError(
"Each item in user_input must be a list of [object, new_a, new_b]"
)
if not isinstance(item[0], str):
item[0] = str(item[0])
if not isinstance(item[1], int):
item[1] = int(item[1])
if item[1] < 0 or item[1] > 255:
raise ValueError("new A must be in the range [0, 255]")
if not isinstance(item[2], int):
item[2] = int(item[2])
if item[2] < 0 or item[2] > 255:
raise ValueError("new B must be in the range [0, 255]")
print("after processing input:", user_input)
func = modal.Function.from_name("ImageAlfred", "change_image_objects_lab")
output_pil = func.remote(image_pil=input_img, targets_config=user_input)
if output_pil is None:
raise ValueError("Received None from modal remote function.")
if not isinstance(output_pil, Image.Image):
raise TypeError(
f"Expected Image.Image from modal remote function, got {type(output_pil)}"
)
# img_link = upload_image_to_tmpfiles(output_pil)
return output_pil
if __name__ == "__main__":
image_pil = Image.open("./src/assets/test_image.jpg")
change_color_objects_hsv(
user_input=[["hair", 30, 1.2], ["shirt", 60, 1.0]], input_img=image_pil
)
|