Spaces:
Runtime error
Runtime error
File size: 3,343 Bytes
32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 af091f0 3692424 666b18f 32a0c88 3692424 0a84bc5 3692424 308bbdb 3692424 308bbdb 415f909 3692424 308bbdb 3692424 308bbdb fa449b5 3692424 fa449b5 0a84bc5 3692424 fa449b5 3692424 32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 c7e3a64 3692424 32a0c88 3692424 c7e3a64 3692424 32a0c88 3692424 32a0c88 3692424 eb3778e 32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 eb3778e 3692424 32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 f626afb 32a0c88 3692424 50bf796 3692424 50bf796 3692424 68fdef8 3692424 e0d03bf 68fdef8 3692424 68fdef8 3692424 32a0c88 3692424 b8aa0fe 3692424 32a0c88 3692424 165b6ba 3692424 165b6ba 3692424 32a0c88 3692424 32a0c88 3692424 b8aa0fe 32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 32a0c88 3692424 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import os
import torch
from PIL import Image
from torchvision import transforms
import gradio as gr
#https://huggingface.co/spaces/yuhe6/final_project/blob/main/Net_Rotate9.pth
#os.system("wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt")
#model = torch.hub.load('huawei-noah/ghostnet', 'ghostnet_1x', pretrained=True)
#model = torch.jit.load('https://huggingface.co/spaces/yuhe6/final_project/blob/main/Net_Rotate9.pth').eval().to(device)
model = torch.jit.load('Net2_Flip_jit.pt', map_location = torch.device('cpu'))
model.eval()
#torch.hub.download_url_to_file('https://huggingface.co/spaces/yuhe6/final_project/blob/main/Net_Rotate9.pth', '/tmp/temporary_file')
#model = torch.hub.load('/tmp', 'temporary_file', pretrained=True)
#model.eval()
# Download an example image from the pytorch website
torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
def inference(input_image):
preprocess = transforms.Compose([
transforms.Resize(size = (256, 256)), # Fixed resize from transforms.Resize(256)
#transforms.CenterCrop(224),
transforms.ToTensor(),
#transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# Used print statements to detect shapes between input tensor & batch
# e.g. input_tensor.shape
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
model.to('cuda')
with torch.no_grad():
output = model(input_batch) # model(input_tensor) # needed to have batch dimension
# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
probabilities = torch.nn.functional.softmax(output[0])
# Read the categories
#with open("dog_cat.txt", "r") as f:
#categories = [s.strip() for s in f.readlines()]
#with open("dog_cat.txt", "r") as f:
categories = ["cat","dog"]
#categories = [s.strip() for s in f.readlines()]
# Show top categories per image
top1_prob, top1_catid = torch.topk(probabilities, 2)
result = {}
for i in range(top1_prob.size(0)):
result[categories[top1_catid[i]]] = top1_prob[i].item()
return result
inputs = gr.inputs.Image(type='pil')
outputs = gr.outputs.Label(type="confidences",num_top_classes=2)
title = "GHOSTNET"
description = "Gradio demo for GHOSTNET, Efficient networks by generating more features from cheap operations. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1911.11907'>GhostNet: More Features from Cheap Operations</a> | <a href='https://github.com/huawei-noah/CV-Backbones'>Github Repo</a></p>"
examples = [
['dog.jpg']
]
gr.Interface(
inference, inputs, outputs,
title = title, description = description,
article = article, examples = examples,
analytics_enabled = False).launch(
#debug = True # Enabled debug mode to see the stacktrace on Google Colab.
) |