Upload 6 files
Browse files- .gitattributes +1 -0
- README.md +14 -12
- app.py +59 -0
- baklava.jpg +0 -0
- cat.png +3 -0
- cheetah.jpg +0 -0
- requirements.txt +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
cat.png filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
-
---
|
2 |
-
title: Test
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 4.44.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Clip Test
|
3 |
+
emoji: π
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.44.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
short_description: test zero-shot image classification models
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
|
5 |
+
clip_models = [
|
6 |
+
"zer0int/CLIP-GmP-ViT-L-14",
|
7 |
+
"John6666/zer0int_CLIP-GmP-ViT-L-14",
|
8 |
+
"openai/clip-vit-large-patch14",
|
9 |
+
"laion/CLIP-ViT-bigG-14-laion2B-39B-b160k",
|
10 |
+
]
|
11 |
+
|
12 |
+
clip_checkpoint = clip_models[0]
|
13 |
+
clip_detector = pipeline(model=clip_checkpoint, task="zero-shot-image-classification")
|
14 |
+
|
15 |
+
def postprocess(output):
|
16 |
+
return {out["label"]: float(out["score"]) for out in output}
|
17 |
+
|
18 |
+
|
19 |
+
def infer(image, candidate_labels):
|
20 |
+
candidate_labels = [label.lstrip(" ") for label in candidate_labels.split(",")]
|
21 |
+
clip_out = clip_detector(image, candidate_labels=candidate_labels)
|
22 |
+
return postprocess(clip_out)
|
23 |
+
|
24 |
+
|
25 |
+
def load_clip_model(modelname):
|
26 |
+
global clip_detector
|
27 |
+
try:
|
28 |
+
clip_detector = pipeline(model=modelname, task="zero-shot-image-classification")
|
29 |
+
except Exception as e:
|
30 |
+
raise gr.Error(f"Model load error: {modelname} {e}")
|
31 |
+
return modelname
|
32 |
+
|
33 |
+
|
34 |
+
with gr.Blocks() as demo:
|
35 |
+
gr.Markdown("# Test CLIP")
|
36 |
+
with gr.Row():
|
37 |
+
with gr.Column():
|
38 |
+
image_input = gr.Image(type="pil")
|
39 |
+
text_input = gr.Textbox(label="Input a list of labels")
|
40 |
+
model_input = gr.Dropdown(label="CLIP model", choices=clip_models, value=clip_models[0], allow_custom_value=True, interactive=True)
|
41 |
+
run_button = gr.Button("Run", visible=True)
|
42 |
+
|
43 |
+
with gr.Column():
|
44 |
+
clip_output = gr.Label(label = "CLIP Output", num_top_classes=3)
|
45 |
+
|
46 |
+
examples = [["./baklava.jpg", "baklava, souffle, tiramisu"], ["./cheetah.jpg", "cat, dog"], ["./cat.png", "cat, dog"]]
|
47 |
+
gr.Examples(
|
48 |
+
examples = examples,
|
49 |
+
inputs=[image_input, text_input],
|
50 |
+
outputs=[clip_output],
|
51 |
+
fn=infer,
|
52 |
+
cache_examples=True
|
53 |
+
)
|
54 |
+
run_button.click(fn=infer,
|
55 |
+
inputs=[image_input, text_input],
|
56 |
+
outputs=[clip_output])
|
57 |
+
model_input.change(load_clip_model, [model_input], [model_input])
|
58 |
+
|
59 |
+
demo.launch()
|
baklava.jpg
ADDED
![]() |
cat.png
ADDED
![]() |
Git LFS Details
|
cheetah.jpg
ADDED
![]() |
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
git+https://github.com/huggingface/transformers.git
|
2 |
+
sentencepiece
|
3 |
+
torch
|