yuragoithf commited on
Commit
436c7ec
·
1 Parent(s): 4c0f5e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -17
app.py CHANGED
@@ -1,36 +1,54 @@
1
  import gradio as gr
2
- import requests
3
- import base64
4
  from PIL import Image
 
 
5
 
 
 
6
 
7
 
8
- API_URL = "https://api-inference.huggingface.co/models/facebook/detr-resnet-50-panoptic"
9
- headers = {"Authorization": "Bearer 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"}
 
 
 
 
 
 
 
10
 
 
 
 
 
 
11
 
12
- inputs = gr.inputs.Image(type="pil", label="Upload an image")
13
- # output = query("cats.jpg")
 
 
 
14
 
15
 
16
- # Perform image segmentation for multy class output
17
- def query(inputs):
18
- # with open(inputs, "rb") as f:
19
- # data = f.read()
20
- response = requests.post(API_URL, headers=headers, data=base64.b64encode(inputs))
21
- return response.json()
22
 
23
 
24
  inputs = gr.inputs.Image(type="pil", label="Upload an image")
25
  # outputs = gr.outputs.HTML() #uncomment for single class output
26
- #outputs = query(inputs)
27
 
28
- title = "<h1 style='text-align: center;'>Image Segmentation</h1>"
29
- description = "Upload an image and get the segmentation result."
 
30
 
31
- gr.Interface(fn=query,
32
  inputs=inputs,
33
- outputs=gr.outputs.HTML(),
34
  title=title,
35
  examples=[["00_plane.jpg"], ["01_car.jpg"], ["02_bird.jpg"], ["03_cat.jpg"], ["04_deer.jpg"]],
 
36
  description=description).launch()
 
1
  import gradio as gr
 
 
2
  from PIL import Image
3
+ from transformers import AutoFeatureExtractor, AutoModelForImageSegmentation
4
+ import tensorflow as tf
5
 
6
+ extractor = AutoFeatureExtractor.from_pretrained("facebook/detr-resnet-50-panoptic")
7
+ model = AutoModelForImageSegmentation.from_pretrained("facebook/detr-resnet-50-panoptic")
8
 
9
 
10
+ # Perform image classification for single class output
11
+ # def predict_class(image):
12
+ # img = tf.cast(image, tf.float32)
13
+ # img = tf.image.resize(img, [input_shape[0], input_shape[1]])
14
+ # img = tf.expand_dims(img, axis=0)
15
+ # prediction = model.predict(img)
16
+ # class_index = tf.argmax(prediction[0]).numpy()
17
+ # predicted_class = labels[class_index]
18
+ # return predicted_class
19
 
20
+ # Perform image classification for multy class output
21
+ def predict_class(image):
22
+ img = tf.cast(image, tf.float32)
23
+ prediction = model.predict(img)
24
+ return prediction
25
 
26
+ # UI Design for single class output
27
+ # def classify_image(image):
28
+ # predicted_class = predict_class(image)
29
+ # output = f"<h2>Predicted Class: <span style='text-transform:uppercase';>{predicted_class}</span></h2>"
30
+ # return output
31
 
32
 
33
+ # UI Design for multy class output
34
+ def classify_image(image):
35
+ results = predict_class(image)
36
+
37
+ return results
 
38
 
39
 
40
  inputs = gr.inputs.Image(type="pil", label="Upload an image")
41
  # outputs = gr.outputs.HTML() #uncomment for single class output
42
+ outputs = gr.outputs.Label(num_top_classes=4)
43
 
44
+ title = "<h1 style='text-align: center;'>Image Classifier</h1>"
45
+ description = "Upload an image and get the predicted class."
46
+ # css_code='body{background-image:url("file=wave.mp4");}'
47
 
48
+ gr.Interface(fn=classify_image,
49
  inputs=inputs,
50
+ outputs=outputs,
51
  title=title,
52
  examples=[["00_plane.jpg"], ["01_car.jpg"], ["02_bird.jpg"], ["03_cat.jpg"], ["04_deer.jpg"]],
53
+ # css=css_code,
54
  description=description).launch()