from transformers import ViTFeatureExtractor, ViTForImageClassification import torch import gradio as gr from PIL import Image feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224') model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224') def classify_image(image): with torch.no_grad(): model.eval() inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits prob = torch.nn.functional.softmax(logits, dim=1) top10_prob, top10_indices = torch.topk(prob, 10) top10_confidences = {} for i in range(10): top10_confidences[model.config.id2label[int(top10_indices[0][i])]] = float(top10_prob[0][i]) return top10_confidences #confidences with gr.Blocks(title="ViT ImageNet Classification - ClassCat", css=".gradio-container {background:mintcream;}" ) as demo: gr.HTML("""
ViT - ImageNet Classification
""") with gr.Row(): input_image = gr.Image(type="pil", image_mode="RGB", shape=(224, 224)) output_label=gr.Label(label="Probabilities", num_top_classes=3) send_btn = gr.Button("Infer") send_btn.click(fn=classify_image, inputs=input_image, outputs=output_label) #demo.queue(concurrency_count=3) demo.launch(debug=True)