Spaces:
Paused
Paused
sachin
commited on
Commit
·
2b9ad07
1
Parent(s):
dd59a2b
add-blend
Browse files
runway.py
CHANGED
@@ -26,34 +26,48 @@ async def root():
|
|
26 |
"""
|
27 |
return {"message": "InstructPix2Pix API is running. Use POST /inpaint/ or /inpaint-with-reference/ to edit images."}
|
28 |
|
29 |
-
def
|
30 |
"""
|
31 |
-
|
32 |
-
- Unmasked areas (white in mask)
|
33 |
-
- Masked areas (black in mask) take
|
34 |
|
35 |
Args:
|
36 |
original_image (Image): The original image (RGB).
|
37 |
-
reference_image (Image): The reference image to
|
38 |
mask_image (Image): The mask image (grayscale, L mode).
|
39 |
|
40 |
Returns:
|
41 |
-
Image: The blended image.
|
42 |
"""
|
43 |
# Convert images to numpy arrays
|
44 |
original_array = np.array(original_image)
|
45 |
reference_array = np.array(reference_image)
|
46 |
-
mask_array = np.array(mask_image) / 255.0 # Normalize
|
47 |
|
48 |
-
#
|
49 |
mask_array = mask_array[:, :, np.newaxis]
|
50 |
|
51 |
-
#
|
52 |
blended_array = original_array * mask_array + reference_array * (1 - mask_array)
|
53 |
blended_array = blended_array.astype(np.uint8)
|
54 |
|
55 |
return Image.fromarray(blended_array)
|
56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
@app.post("/inpaint/")
|
58 |
async def inpaint_image(
|
59 |
image: UploadFile = File(...),
|
@@ -104,17 +118,17 @@ async def inpaint_with_reference(
|
|
104 |
image: UploadFile = File(...),
|
105 |
mask: UploadFile = File(...),
|
106 |
reference_image: UploadFile = File(...),
|
107 |
-
prompt: str = "
|
108 |
):
|
109 |
"""
|
110 |
-
Endpoint for
|
111 |
- `image`: Original image file (PNG/JPG).
|
112 |
-
- `mask`: Mask file
|
113 |
-
- `reference_image`: Reference image to guide the
|
114 |
-
- `prompt`: Text prompt
|
115 |
|
116 |
Returns:
|
117 |
-
- The
|
118 |
"""
|
119 |
try:
|
120 |
# Load the uploaded image, mask, and reference image
|
@@ -132,11 +146,20 @@ async def inpaint_with_reference(
|
|
132 |
if original_image.size != reference_image.size:
|
133 |
reference_image = reference_image.resize(original_image.size, Image.Resampling.LANCZOS)
|
134 |
|
135 |
-
#
|
136 |
-
|
137 |
|
138 |
-
#
|
139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
|
141 |
# Convert result to bytes for response
|
142 |
result_bytes = io.BytesIO()
|
@@ -147,11 +170,11 @@ async def inpaint_with_reference(
|
|
147 |
return StreamingResponse(
|
148 |
result_bytes,
|
149 |
media_type="image/png",
|
150 |
-
headers={"Content-Disposition": "attachment; filename=
|
151 |
)
|
152 |
|
153 |
except Exception as e:
|
154 |
-
raise HTTPException(status_code=500, detail=f"Error during inpainting
|
155 |
|
156 |
if __name__ == "__main__":
|
157 |
import uvicorn
|
|
|
26 |
"""
|
27 |
return {"message": "InstructPix2Pix API is running. Use POST /inpaint/ or /inpaint-with-reference/ to edit images."}
|
28 |
|
29 |
+
def prepare_guided_image(original_image: Image, reference_image: Image, mask_image: Image) -> Image:
|
30 |
"""
|
31 |
+
Prepare an initial image by softly blending the reference image into the masked area.
|
32 |
+
- Unmasked areas (white in mask, 255) remain fully from the original image.
|
33 |
+
- Masked areas (black in mask, 0) take content from the reference image with soft blending.
|
34 |
|
35 |
Args:
|
36 |
original_image (Image): The original image (RGB).
|
37 |
+
reference_image (Image): The reference image to copy from (RGB).
|
38 |
mask_image (Image): The mask image (grayscale, L mode).
|
39 |
|
40 |
Returns:
|
41 |
+
Image: The blended image to guide inpainting.
|
42 |
"""
|
43 |
# Convert images to numpy arrays
|
44 |
original_array = np.array(original_image)
|
45 |
reference_array = np.array(reference_image)
|
46 |
+
mask_array = np.array(mask_image) / 255.0 # Normalize to [0, 1] for soft blending
|
47 |
|
48 |
+
# Expand mask to RGB channels
|
49 |
mask_array = mask_array[:, :, np.newaxis]
|
50 |
|
51 |
+
# Softly blend: unmasked areas (1) keep original, masked areas (0) use reference
|
52 |
blended_array = original_array * mask_array + reference_array * (1 - mask_array)
|
53 |
blended_array = blended_array.astype(np.uint8)
|
54 |
|
55 |
return Image.fromarray(blended_array)
|
56 |
|
57 |
+
def soften_mask(mask_image: Image, softness: int = 5) -> Image:
|
58 |
+
"""
|
59 |
+
Soften the edges of the mask for smoother transitions.
|
60 |
+
|
61 |
+
Args:
|
62 |
+
mask_image (Image): The original mask (grayscale, L mode).
|
63 |
+
softness (int): Size of the Gaussian blur kernel for softening edges.
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
Image: The softened mask.
|
67 |
+
"""
|
68 |
+
from PIL import ImageFilter
|
69 |
+
return mask_image.filter(ImageFilter.GaussianBlur(radius=softness))
|
70 |
+
|
71 |
@app.post("/inpaint/")
|
72 |
async def inpaint_image(
|
73 |
image: UploadFile = File(...),
|
|
|
118 |
image: UploadFile = File(...),
|
119 |
mask: UploadFile = File(...),
|
120 |
reference_image: UploadFile = File(...),
|
121 |
+
prompt: str = "Integrate the reference content naturally into the masked area, matching style and lighting."
|
122 |
):
|
123 |
"""
|
124 |
+
Endpoint for replacing masked areas with reference image content, refined to look natural.
|
125 |
- `image`: Original image file (PNG/JPG).
|
126 |
+
- `mask`: Mask file (black for areas to replace, white for areas to keep).
|
127 |
+
- `reference_image`: Reference image to guide the replacement (PNG/JPG).
|
128 |
+
- `prompt`: Text prompt for inpainting refinement.
|
129 |
|
130 |
Returns:
|
131 |
+
- The resulting image as a PNG file.
|
132 |
"""
|
133 |
try:
|
134 |
# Load the uploaded image, mask, and reference image
|
|
|
146 |
if original_image.size != reference_image.size:
|
147 |
reference_image = reference_image.resize(original_image.size, Image.Resampling.LANCZOS)
|
148 |
|
149 |
+
# Soften the mask for smoother transitions
|
150 |
+
softened_mask = soften_mask(mask_image, softness=5)
|
151 |
|
152 |
+
# Prepare the initial guided image by blending reference content into the masked area
|
153 |
+
guided_image = prepare_guided_image(original_image, reference_image, softened_mask)
|
154 |
+
|
155 |
+
# Perform inpainting to refine the result and make it look natural
|
156 |
+
result = pipe(
|
157 |
+
prompt=prompt,
|
158 |
+
image=guided_image,
|
159 |
+
mask_image=softened_mask, # Use softened mask for inpainting
|
160 |
+
strength=0.75, # Control how much inpainting modifies the image (0.0 to 1.0)
|
161 |
+
guidance_scale=7.5 # Control how closely the prompt is followed
|
162 |
+
).images[0]
|
163 |
|
164 |
# Convert result to bytes for response
|
165 |
result_bytes = io.BytesIO()
|
|
|
170 |
return StreamingResponse(
|
171 |
result_bytes,
|
172 |
media_type="image/png",
|
173 |
+
headers={"Content-Disposition": "attachment; filename=natural_inpaint_image.png"}
|
174 |
)
|
175 |
|
176 |
except Exception as e:
|
177 |
+
raise HTTPException(status_code=500, detail=f"Error during natural inpainting: {e}")
|
178 |
|
179 |
if __name__ == "__main__":
|
180 |
import uvicorn
|