smy503 akhaliq HF Staff commited on
Commit
09079e6
·
0 Parent(s):

Duplicate from pytorch/EfficientNet

Browse files

Co-authored-by: AK <[email protected]>

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +12 -0
  3. app.py +45 -0
  4. food.jpeg +0 -0
  5. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: EfficientNet
3
+ emoji: 😻
4
+ colorFrom: blue
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: false
9
+ duplicated_from: pytorch/EfficientNet
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ import torchvision.transforms as transforms
4
+
5
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
6
+
7
+ efficientnet = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_efficientnet_b0', pretrained=True)
8
+ utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')
9
+
10
+ efficientnet.eval().to(device)
11
+
12
+ def inference(img):
13
+
14
+ img_transforms = transforms.Compose(
15
+ [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]
16
+ )
17
+
18
+ img = img_transforms(img)
19
+ with torch.no_grad():
20
+ # mean and std are not multiplied by 255 as they are in training script
21
+ # torch dataloader reads data into bytes whereas loading directly
22
+ # through PIL creates a tensor with floats in [0,1] range
23
+ mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
24
+ std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
25
+ img = img.float()
26
+ img = img.unsqueeze(0).sub_(mean).div_(std)
27
+
28
+ batch = torch.cat(
29
+ [img]
30
+ ).to(device)
31
+ with torch.no_grad():
32
+ output = torch.nn.functional.softmax(efficientnet(batch), dim=1)
33
+
34
+
35
+ results = utils.pick_n_best(predictions=output, n=5)
36
+
37
+ return results
38
+
39
+ title="EfficientNet"
40
+ description="Gradio demo for EfficientNet,EfficientNets are a family of image classification models, which achieve state-of-the-art accuracy, being an order-of-magnitude smaller and faster. Trained with mixed precision using Tensor Cores. To use it, simply upload your image or click on one of the examples below. Read more at the links below"
41
+
42
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1905.11946'>EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks</a> | <a href='https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/efficientnet'>Github Repo</a></p>"
43
+
44
+ examples=[['food.jpeg']]
45
+ gr.Interface(inference,gr.inputs.Image(type="pil"),"text",title=title,description=description,article=article,examples=examples).launch()
food.jpeg ADDED
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ torchvision
3
+ Pillow