Spaces:
Paused
Paused
sachin
commited on
Commit
·
b8546a6
1
Parent(s):
19259ed
inpaint-mask
Browse files
runway.py
CHANGED
@@ -110,23 +110,39 @@ def fit_image_to_mask(original_image: Image, reference_image: Image, mask_x1: in
|
|
110 |
@app.post("/inpaint/")
|
111 |
async def inpaint_image(
|
112 |
image: UploadFile = File(...),
|
113 |
-
|
114 |
-
|
115 |
-
mask_y1: int = 100,
|
116 |
-
mask_x2: int = 200,
|
117 |
-
mask_y2: int = 200
|
118 |
):
|
119 |
"""
|
120 |
-
Endpoint for image inpainting using a text prompt and
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
"""
|
122 |
try:
|
|
|
123 |
image_bytes = await image.read()
|
|
|
|
|
124 |
original_image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
|
125 |
-
mask_image =
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
result = pipe(prompt=prompt, image=original_image, mask_image=mask_image).images[0]
|
|
|
|
|
127 |
result_bytes = io.BytesIO()
|
128 |
result.save(result_bytes, format="PNG")
|
129 |
result_bytes.seek(0)
|
|
|
|
|
130 |
return StreamingResponse(
|
131 |
result_bytes,
|
132 |
media_type="image/png",
|
@@ -146,7 +162,7 @@ async def inpaint_with_reference(
|
|
146 |
mask_y2: int = 200
|
147 |
):
|
148 |
"""
|
149 |
-
Endpoint for replacing masked areas with reference image content, refined to look natural.
|
150 |
"""
|
151 |
try:
|
152 |
image_bytes = await image.read()
|
@@ -190,7 +206,7 @@ async def fit_image_to_mask(
|
|
190 |
mask_y2: int = 200
|
191 |
):
|
192 |
"""
|
193 |
-
Endpoint for fitting a reference image into a masked region of the original image, refined to look natural.
|
194 |
"""
|
195 |
try:
|
196 |
# Load the uploaded images
|
@@ -202,7 +218,7 @@ async def fit_image_to_mask(
|
|
202 |
# Fit the reference image into the masked region
|
203 |
result = fit_image_to_mask(original_image, reference_image, mask_x1, mask_y1, mask_x2, mask_y2)
|
204 |
if not isinstance(result, tuple) or len(result) != 2:
|
205 |
-
raise ValueError(f"Expected tuple of (guided_image, mask_image), got {type(result)}")
|
206 |
guided_image, mask_image = result
|
207 |
|
208 |
# Soften the mask for smoother transitions
|
|
|
110 |
@app.post("/inpaint/")
|
111 |
async def inpaint_image(
|
112 |
image: UploadFile = File(...),
|
113 |
+
mask: UploadFile = File(...),
|
114 |
+
prompt: str = "Fill the masked area with appropriate content."
|
|
|
|
|
|
|
115 |
):
|
116 |
"""
|
117 |
+
Endpoint for image inpainting using a text prompt and an uploaded mask.
|
118 |
+
- `image`: Original image file (PNG/JPG).
|
119 |
+
- `mask`: Mask file indicating areas to inpaint (white for masked areas, black for unmasked).
|
120 |
+
- `prompt`: Text prompt describing the desired output.
|
121 |
+
|
122 |
+
Returns:
|
123 |
+
- The inpainted image as a PNG file.
|
124 |
"""
|
125 |
try:
|
126 |
+
# Load the uploaded image and mask
|
127 |
image_bytes = await image.read()
|
128 |
+
mask_bytes = await mask.read()
|
129 |
+
|
130 |
original_image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
|
131 |
+
mask_image = Image.open(io.BytesIO(mask_bytes)).convert("L")
|
132 |
+
|
133 |
+
# Ensure dimensions match between image and mask
|
134 |
+
if original_image.size != mask_image.size:
|
135 |
+
raise HTTPException(status_code=400, detail="Image and mask dimensions must match.")
|
136 |
+
|
137 |
+
# Perform inpainting using the pipeline
|
138 |
result = pipe(prompt=prompt, image=original_image, mask_image=mask_image).images[0]
|
139 |
+
|
140 |
+
# Convert result to bytes for response
|
141 |
result_bytes = io.BytesIO()
|
142 |
result.save(result_bytes, format="PNG")
|
143 |
result_bytes.seek(0)
|
144 |
+
|
145 |
+
# Return the image as a streaming response
|
146 |
return StreamingResponse(
|
147 |
result_bytes,
|
148 |
media_type="image/png",
|
|
|
162 |
mask_y2: int = 200
|
163 |
):
|
164 |
"""
|
165 |
+
Endpoint for replacing masked areas with reference image content, refined to look natural, using an autogenerated mask.
|
166 |
"""
|
167 |
try:
|
168 |
image_bytes = await image.read()
|
|
|
206 |
mask_y2: int = 200
|
207 |
):
|
208 |
"""
|
209 |
+
Endpoint for fitting a reference image into a masked region of the original image, refined to look natural, using an autogenerated mask.
|
210 |
"""
|
211 |
try:
|
212 |
# Load the uploaded images
|
|
|
218 |
# Fit the reference image into the masked region
|
219 |
result = fit_image_to_mask(original_image, reference_image, mask_x1, mask_y1, mask_x2, mask_y2)
|
220 |
if not isinstance(result, tuple) or len(result) != 2:
|
221 |
+
raise ValueError(f"Expected tuple of (guided_image, mask_image), got {type(result)}: {result}")
|
222 |
guided_image, mask_image = result
|
223 |
|
224 |
# Soften the mask for smoother transitions
|