Spaces:
Paused
Paused
wrapping with a decorator
Browse files
app.py
CHANGED
@@ -20,7 +20,7 @@ def delete_model(model):
|
|
20 |
del model
|
21 |
torch.cuda.empty_cache()
|
22 |
|
23 |
-
@spaces.GPU
|
24 |
def run_language_model(edit_prompt, device):
|
25 |
language_model_id = "Qwen/Qwen1.5-0.5B-Chat"
|
26 |
language_model = AutoModelForCausalLM.from_pretrained(
|
@@ -55,7 +55,7 @@ def run_language_model(edit_prompt, device):
|
|
55 |
delete_model(language_model)
|
56 |
return (to_replace, replace_with)
|
57 |
|
58 |
-
@spaces.GPU
|
59 |
def run_image_captioner(image, device):
|
60 |
caption_model_id = "Salesforce/blip-image-captioning-base"
|
61 |
caption_model = BlipForConditionalGeneration.from_pretrained(caption_model_id).to(
|
@@ -70,7 +70,7 @@ def run_image_captioner(image, device):
|
|
70 |
delete_model(caption_model)
|
71 |
return caption
|
72 |
|
73 |
-
@spaces.GPU
|
74 |
def run_segmentation(image, object_to_segment, device):
|
75 |
# OWL-ViT for object detection
|
76 |
owl_vit_model_id = "google/owlvit-base-patch32"
|
@@ -106,7 +106,7 @@ def run_segmentation(image, object_to_segment, device):
|
|
106 |
delete_model(seg_model)
|
107 |
return masks
|
108 |
|
109 |
-
@spaces.GPU
|
110 |
def run_inpainting(image, replaced_caption, masks, device):
|
111 |
pipeline = AutoPipelineForInpainting.from_pretrained(
|
112 |
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
|
|
|
20 |
del model
|
21 |
torch.cuda.empty_cache()
|
22 |
|
23 |
+
@spaces.GPU()
|
24 |
def run_language_model(edit_prompt, device):
|
25 |
language_model_id = "Qwen/Qwen1.5-0.5B-Chat"
|
26 |
language_model = AutoModelForCausalLM.from_pretrained(
|
|
|
55 |
delete_model(language_model)
|
56 |
return (to_replace, replace_with)
|
57 |
|
58 |
+
@spaces.GPU()
|
59 |
def run_image_captioner(image, device):
|
60 |
caption_model_id = "Salesforce/blip-image-captioning-base"
|
61 |
caption_model = BlipForConditionalGeneration.from_pretrained(caption_model_id).to(
|
|
|
70 |
delete_model(caption_model)
|
71 |
return caption
|
72 |
|
73 |
+
@spaces.GPU()
|
74 |
def run_segmentation(image, object_to_segment, device):
|
75 |
# OWL-ViT for object detection
|
76 |
owl_vit_model_id = "google/owlvit-base-patch32"
|
|
|
106 |
delete_model(seg_model)
|
107 |
return masks
|
108 |
|
109 |
+
@spaces.GPU()
|
110 |
def run_inpainting(image, replaced_caption, masks, device):
|
111 |
pipeline = AutoPipelineForInpainting.from_pretrained(
|
112 |
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
|