sflindrs commited on
Commit
0fb4283
·
verified ·
1 Parent(s): 5279ec4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -0
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+ import os
4
+ import spaces
5
+
6
+ # Define some pre-populated vision models.
7
+ PREDEFINED_MODELS = {
8
+ "ViT Base (google/vit-base-patch16-224)": "google/vit-base-patch16-224",
9
+ "DeiT Base (facebook/deit-base-distilled-patch16-224)": "facebook/deit-base-distilled-patch16-224",
10
+ "CLIP ViT Base (openai/clip-vit-base-patch32)": "openai/clip-vit-base-patch32"
11
+ }
12
+
13
+ @spaces.GPU
14
+ def compare_vision_models(image, model1_choice, model1_custom, model2_choice, model2_custom):
15
+ """
16
+ For each model selection, use the pre-defined model identifier unless the user selects "Custom" and enters an identifier.
17
+ Then create an image-classification pipeline for each model and run inference on the provided image.
18
+ """
19
+ # Determine the model names to use:
20
+ model1_name = (
21
+ PREDEFINED_MODELS.get(model1_choice, model1_custom)
22
+ if model1_choice != "Custom" else model1_custom
23
+ )
24
+ model2_name = (
25
+ PREDEFINED_MODELS.get(model2_choice, model2_custom)
26
+ if model2_choice != "Custom" else model2_custom
27
+ )
28
+
29
+ # Optionally, if you deploy on a GPU-enabled space (e.g. using ZeroGPU), you can set device=0.
30
+ # Here, we check an environment variable "USE_GPU" (set it to "1" in your Space's settings if needed).
31
+ device = 0 if os.environ.get("USE_GPU", "0") == "1" else -1
32
+
33
+ # Create pipelines. In this example we assume the models support image classification.
34
+ classifier1 = pipeline("image-classification", model=model1_name, device=device)
35
+ classifier2 = pipeline("image-classification", model=model2_name, device=device)
36
+
37
+ # Run inference
38
+ preds1 = classifier1(image)
39
+ preds2 = classifier2(image)
40
+
41
+ # Format the predictions as text (each line shows the predicted label and its confidence score)
42
+ result1 = "\n".join([f"{pred['label']}: {pred['score']:.3f}" for pred in preds1])
43
+ result2 = "\n".join([f"{pred['label']}: {pred['score']:.3f}" for pred in preds2])
44
+
45
+ return result1, result2
46
+
47
+ # Build the Gradio interface using Blocks.
48
+ with gr.Blocks(title="Vision Model Comparison Tool") as demo:
49
+ gr.Markdown("## Vision Model Comparison Tool\nSelect two Hugging Face vision models to compare their outputs side-by-side!")
50
+ with gr.Row():
51
+ with gr.Column():
52
+ gr.Markdown("### Model 1")
53
+ model1_choice = gr.Dropdown(
54
+ choices=list(PREDEFINED_MODELS.keys()) + ["Custom"],
55
+ label="Select a pre-defined model or 'Custom'"
56
+ )
57
+ model1_custom = gr.Textbox(
58
+ label="Custom Hugging Face Model",
59
+ placeholder="e.g., username/model_name"
60
+ )
61
+ with gr.Column():
62
+ gr.Markdown("### Model 2")
63
+ model2_choice = gr.Dropdown(
64
+ choices=list(PREDEFINED_MODELS.keys()) + ["Custom"],
65
+ label="Select a pre-defined model or 'Custom'"
66
+ )
67
+ model2_custom = gr.Textbox(
68
+ label="Custom Hugging Face Model",
69
+ placeholder="e.g., username/model_name"
70
+ )
71
+ image_input = gr.Image(label="Input Image", type="pil")
72
+ compare_btn = gr.Button("Compare Models")
73
+ with gr.Row():
74
+ output1 = gr.Textbox(label="Model 1 Output")
75
+ output2 = gr.Textbox(label="Model 2 Output")
76
+
77
+ compare_btn.click(
78
+ fn=compare_vision_models,
79
+ inputs=[image_input, model1_choice, model1_custom, model2_choice, model2_custom],
80
+ outputs=[output1, output2]
81
+ )
82
+
83
+ demo.launch()