Spaces:
Runtime error
Runtime error
hysts
commited on
Commit
·
8feee7f
1
Parent(s):
d95697d
Add title and examples
Browse files
.gitattributes
CHANGED
|
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import open_clip
|
| 3 |
import torch
|
|
@@ -8,14 +10,23 @@ model, _, transform = open_clip.create_model_and_transforms(
|
|
| 8 |
"coca_ViT-L-14",
|
| 9 |
pretrained="mscoco_finetuned_laion2B-s13B-b90k"
|
| 10 |
)
|
| 11 |
-
|
| 12 |
model.to(device)
|
| 13 |
|
|
|
|
| 14 |
def output_generate(image):
|
| 15 |
im = transform(image).unsqueeze(0).to(device)
|
| 16 |
with torch.no_grad(), torch.cuda.amp.autocast():
|
| 17 |
generated = model.generate(im, seq_len=20)
|
| 18 |
return open_clip.decode(generated[0].detach()).split("<end_of_text>")[0].replace("<start_of_text>", "")
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pathlib
|
| 2 |
+
|
| 3 |
import gradio as gr
|
| 4 |
import open_clip
|
| 5 |
import torch
|
|
|
|
| 10 |
"coca_ViT-L-14",
|
| 11 |
pretrained="mscoco_finetuned_laion2B-s13B-b90k"
|
| 12 |
)
|
|
|
|
| 13 |
model.to(device)
|
| 14 |
|
| 15 |
+
|
| 16 |
def output_generate(image):
|
| 17 |
im = transform(image).unsqueeze(0).to(device)
|
| 18 |
with torch.no_grad(), torch.cuda.amp.autocast():
|
| 19 |
generated = model.generate(im, seq_len=20)
|
| 20 |
return open_clip.decode(generated[0].detach()).split("<end_of_text>")[0].replace("<start_of_text>", "")
|
| 21 |
|
| 22 |
+
|
| 23 |
+
paths = sorted(pathlib.Path("images").glob("*.jpg"))
|
| 24 |
+
|
| 25 |
+
iface = gr.Interface(
|
| 26 |
+
fn=output_generate,
|
| 27 |
+
inputs=gr.Image(label="Input image", type="pil"),
|
| 28 |
+
outputs=gr.Text(label="Caption output"),
|
| 29 |
+
title="CoCa: Contrastive Captioners are Image-Text Foundation Models",
|
| 30 |
+
examples=[path.as_posix() for path in paths],
|
| 31 |
+
)
|
| 32 |
+
iface.launch()
|
images/pexels-andrea-piacquadio-3756616.jpg
ADDED
|
Git LFS Details
|
images/pexels-george-milton-7034219.jpg
ADDED
|
Git LFS Details
|
images/pexels-katarzyna-modrzejewska-1314550.jpg
ADDED
|
Git LFS Details
|