Spaces:
Running
Running
ciover2024
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -32,9 +32,10 @@ import shutil
|
|
32 |
from gradio_client import Client, handle_file
|
33 |
|
34 |
# Load the model once globally to avoid repeated loading
|
|
|
35 |
def load_inpainting_model():
|
36 |
# Load pipeline
|
37 |
-
model_path = "urpmv13Inpainting.safetensors"
|
38 |
#model_path = "uberRealisticPornMerge_v23Inpainting.safetensors"
|
39 |
#model_path = "pornmasterFantasy_v4-inpainting.safetensors"
|
40 |
#model_path = "pornmasterAmateur_v6Vae-inpainting.safetensors"
|
@@ -45,7 +46,7 @@ def load_inpainting_model():
|
|
45 |
safety_checker=None
|
46 |
).to(device)
|
47 |
return pipe
|
48 |
-
|
49 |
"""
|
50 |
# Load the model once globally to avoid repeated loading
|
51 |
def load_upscaling_model():
|
@@ -65,7 +66,7 @@ def load_upscaling_model():
|
|
65 |
"""
|
66 |
|
67 |
# Preload the model once
|
68 |
-
inpaint_pipeline = load_inpainting_model()
|
69 |
# Preload the model once
|
70 |
#upscale_pipeline = load_upscaling_model()
|
71 |
|
@@ -95,7 +96,7 @@ def resize_to_match(input_image, output_image):
|
|
95 |
|
96 |
# Function to generate the mask using Florence SAM Masking API (Replicate)
|
97 |
def generate_mask(image_path, text_prompt="clothing"):
|
98 |
-
client_sam = Client("SkalskiP/florence-sam
|
99 |
mask_result = client_sam.predict(
|
100 |
image_input=handle_file(image_path), # Provide your image path here
|
101 |
text_input=text_prompt, # Use "clothing" as the prompt
|
@@ -121,11 +122,11 @@ def inpaint_image(input_image, mask_image):
|
|
121 |
blurred_mask_image = inpaint_pipeline.mask_processor.blur(initial_mask_image,blur_factor=10)
|
122 |
result = inpaint_pipeline(prompt=prompt, negative_prompt=negative_prompt, height=IMAGE_SIZE[0], width=IMAGE_SIZE[0], image=initial_input_image, mask_image=blurred_mask_image, padding_mask_crop=32)
|
123 |
"""
|
124 |
-
blurred_mask_image = inpaint_pipeline.mask_processor.blur(mask_image,blur_factor=10)
|
125 |
-
result = inpaint_pipeline(prompt=prompt, negative_prompt=negative_prompt, image=input_image, mask_image=blurred_mask_image, padding_mask_crop=10)
|
126 |
-
inpainted_image = result.images[0]
|
127 |
#inpainted_image = resize_to_match(input_image, inpainted_image)
|
128 |
-
return
|
129 |
|
130 |
# Function to process input image and mask
|
131 |
def process_image(input_image):
|
@@ -135,7 +136,8 @@ def process_image(input_image):
|
|
135 |
|
136 |
# Generate the mask using Florence SAM API
|
137 |
mask_local_path = generate_mask(image_path=input_image_path)
|
138 |
-
|
|
|
139 |
# Save the generated mask
|
140 |
mask_image_path = "generated_mask.png"
|
141 |
save_mask(mask_local_path, save_path=mask_image_path)
|
|
|
32 |
from gradio_client import Client, handle_file
|
33 |
|
34 |
# Load the model once globally to avoid repeated loading
|
35 |
+
"""
|
36 |
def load_inpainting_model():
|
37 |
# Load pipeline
|
38 |
+
#model_path = "urpmv13Inpainting.safetensors"
|
39 |
#model_path = "uberRealisticPornMerge_v23Inpainting.safetensors"
|
40 |
#model_path = "pornmasterFantasy_v4-inpainting.safetensors"
|
41 |
#model_path = "pornmasterAmateur_v6Vae-inpainting.safetensors"
|
|
|
46 |
safety_checker=None
|
47 |
).to(device)
|
48 |
return pipe
|
49 |
+
"""
|
50 |
"""
|
51 |
# Load the model once globally to avoid repeated loading
|
52 |
def load_upscaling_model():
|
|
|
66 |
"""
|
67 |
|
68 |
# Preload the model once
|
69 |
+
#inpaint_pipeline = load_inpainting_model()
|
70 |
# Preload the model once
|
71 |
#upscale_pipeline = load_upscaling_model()
|
72 |
|
|
|
96 |
|
97 |
# Function to generate the mask using Florence SAM Masking API (Replicate)
|
98 |
def generate_mask(image_path, text_prompt="clothing"):
|
99 |
+
client_sam = Client("SkalskiP/florence-sam")
|
100 |
mask_result = client_sam.predict(
|
101 |
image_input=handle_file(image_path), # Provide your image path here
|
102 |
text_input=text_prompt, # Use "clothing" as the prompt
|
|
|
122 |
blurred_mask_image = inpaint_pipeline.mask_processor.blur(initial_mask_image,blur_factor=10)
|
123 |
result = inpaint_pipeline(prompt=prompt, negative_prompt=negative_prompt, height=IMAGE_SIZE[0], width=IMAGE_SIZE[0], image=initial_input_image, mask_image=blurred_mask_image, padding_mask_crop=32)
|
124 |
"""
|
125 |
+
#blurred_mask_image = inpaint_pipeline.mask_processor.blur(mask_image,blur_factor=10)
|
126 |
+
#result = inpaint_pipeline(prompt=prompt, negative_prompt=negative_prompt, image=input_image, mask_image=blurred_mask_image, padding_mask_crop=10)
|
127 |
+
#inpainted_image = result.images[0]
|
128 |
#inpainted_image = resize_to_match(input_image, inpainted_image)
|
129 |
+
return mask_image
|
130 |
|
131 |
# Function to process input image and mask
|
132 |
def process_image(input_image):
|
|
|
136 |
|
137 |
# Generate the mask using Florence SAM API
|
138 |
mask_local_path = generate_mask(image_path=input_image_path)
|
139 |
+
mask_local_path1 = mask_local_path[0]
|
140 |
+
|
141 |
# Save the generated mask
|
142 |
mask_image_path = "generated_mask.png"
|
143 |
save_mask(mask_local_path, save_path=mask_image_path)
|