aliabd HF staff commited on
Commit
54a8a4f
1 Parent(s): 4ca19cf

Upload with huggingface_hub

Browse files
Files changed (3) hide show
  1. DESCRIPTION.md +1 -0
  2. README.md +1 -1
  3. app.py +0 -9
DESCRIPTION.md ADDED
@@ -0,0 +1 @@
 
 
1
+ Simple image classification in Pytorch with Gradio's Image input and Label output.
README.md CHANGED
@@ -1,7 +1,7 @@
1
 
2
  ---
3
  title: image_classification
4
- emoji: 🤗
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
 
1
 
2
  ---
3
  title: image_classification
4
+ emoji: 🔥
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
app.py CHANGED
@@ -1,19 +1,12 @@
1
- # URL: https://huggingface.co/spaces/abidlabs/image_classification
2
- # DESCRIPTION: Simple image classification in Pytorch with Gradio's Image input and Label output.
3
- # imports
4
  import gradio as gr
5
  import torch
6
  import requests
7
  from torchvision import transforms
8
 
9
- # load the model
10
  model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
11
-
12
- # download human-readable labels for ImageNet.
13
  response = requests.get("https://git.io/JJkYN")
14
  labels = response.text.split("\n")
15
 
16
- # define core function
17
  def predict(inp):
18
  inp = transforms.ToTensor()(inp).unsqueeze(0)
19
  with torch.no_grad():
@@ -21,12 +14,10 @@ def predict(inp):
21
  confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
22
  return confidences
23
 
24
- # define the interface
25
  demo = gr.Interface(fn=predict,
26
  inputs=gr.inputs.Image(type="pil"),
27
  outputs=gr.outputs.Label(num_top_classes=3),
28
  examples=[["cheetah.jpg"]],
29
  )
30
 
31
- # launch
32
  demo.launch()
 
 
 
 
1
  import gradio as gr
2
  import torch
3
  import requests
4
  from torchvision import transforms
5
 
 
6
  model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
 
 
7
  response = requests.get("https://git.io/JJkYN")
8
  labels = response.text.split("\n")
9
 
 
10
  def predict(inp):
11
  inp = transforms.ToTensor()(inp).unsqueeze(0)
12
  with torch.no_grad():
 
14
  confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
15
  return confidences
16
 
 
17
  demo = gr.Interface(fn=predict,
18
  inputs=gr.inputs.Image(type="pil"),
19
  outputs=gr.outputs.Label(num_top_classes=3),
20
  examples=[["cheetah.jpg"]],
21
  )
22
 
 
23
  demo.launch()