Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,8 @@ dmodel = DetrForObjectDetection.from_pretrained('facebook/detr-resnet-50')
|
|
10 |
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
11 |
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
12 |
|
13 |
-
|
|
|
14 |
o1 = gr.outputs.Image()
|
15 |
o2 = gr.outputs.Textbox()
|
16 |
|
@@ -67,6 +68,6 @@ def extract_image(image, text, num=1):
|
|
67 |
|
68 |
title = "ClipnCrop"
|
69 |
description = "Extract sections of images from your image by using OpenAI's CLIP and Facebooks Detr implemented on HuggingFace Transformers"
|
70 |
-
examples=[['ex1.jpg'],['ex2.jpg']]
|
71 |
article = "<p style='text-align: center'>"
|
72 |
-
gr.Interface(fn=extract_image, inputs=
|
|
|
10 |
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
11 |
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
12 |
|
13 |
+
i1 = gr.inputs.Image()
|
14 |
+
i2 = gr.inputs.Textbox()
|
15 |
o1 = gr.outputs.Image()
|
16 |
o2 = gr.outputs.Textbox()
|
17 |
|
|
|
68 |
|
69 |
title = "ClipnCrop"
|
70 |
description = "Extract sections of images from your image by using OpenAI's CLIP and Facebooks Detr implemented on HuggingFace Transformers"
|
71 |
+
examples=[['ex1.jpg', 'woman in green dress'],['ex2.jpg', 'man in red dress']]
|
72 |
article = "<p style='text-align: center'>"
|
73 |
+
gr.Interface(fn=extract_image, inputs=[i1, i2], outputs=[o1, o2], title=title, description=description, article=article, examples=examples, enable_queue=True).launch()
|