mennamostafa55555 commited on
Commit
c30c3c6
1 Parent(s): b6f2026

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -10
app.py CHANGED
@@ -40,7 +40,7 @@ def yolov8_inference(
40
 
41
 
42
  return annotated_image
43
-
44
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
45
 
46
  inputs = [
@@ -53,16 +53,87 @@ inputs = [
53
 
54
  outputs = gr.Image(type="filepath", label="Output Image")
55
  title = "Amazon Products Demo"
 
56
  import os
57
  examples = [["ex1.png", 0.5, 0.45],
58
  ["tu3.jpg", 0.5, 0.45],
59
  ]
60
- demo_app = gr.Interface(examples=examples,
61
- fn=yolov8_inference,
62
- inputs=inputs,
63
- outputs=outputs,
64
- title=title,
65
- cache_examples=True,
66
- theme="default",
67
- )
68
- demo_app.launch(debug=False, enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
 
42
  return annotated_image
43
+ '''
44
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
45
 
46
  inputs = [
 
53
 
54
  outputs = gr.Image(type="filepath", label="Output Image")
55
  title = "Amazon Products Demo"
56
+ '''
57
  import os
58
  examples = [["ex1.png", 0.5, 0.45],
59
  ["tu3.jpg", 0.5, 0.45],
60
  ]
61
+ outputs_images = [
62
+ ["1.jpg"], # First example: an output image for the cat example
63
+ ["2.jpg"] # Second example: an output image for the dog exam
64
+ ]
65
+
66
+ readme_html = """
67
+ <html>
68
+ <head>
69
+ <style>
70
+ .description {
71
+ margin: 20px;
72
+ padding: 10px;
73
+ border: 1px solid #ccc;
74
+ }
75
+ </style>
76
+ </head>
77
+ <body>
78
+ <div class="description">
79
+ <p><strong>More details:</strong></p>
80
+ <p>We present a demo for performing object segmentation using a model trained on Amazon's ARMBench dataset. The model was trained on over 37,000 training images and validated on 4,425 images.</p>
81
+ <p><strong>Usage:</strong></p>
82
+ <p>You can use our demo by uploading your product image, and it will provide you with a segmented image.</p>
83
+ <p><strong>Dataset:</strong></p>
84
+ <p>-The model was trained on the ARMBench segmentation dataset, which comprises more than 50,000 images.</p>
85
+ <ul>
86
+ <li>Paper: ARMBench: An object-centric benchmark dataset for robotic manipulation</li>
87
+ <li>Authors: Chaitanya Mitash, Fan Wang, Shiyang Lu, Vikedo Terhuja, Tyler Garaas, Felipe Polido, Manikantan Nambi</li>
88
+ </ul>
89
+ <p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
90
+ <p>To access and download this dataset, please follow this link: <a href=" https://www.amazon.science/blog/amazon-releases-largest-dataset-for-training-pick-and-place-robots." target="_blank">Dataset Download</a></p>
91
+
92
+
93
+ </body>
94
+ </html>
95
+ """
96
+ with gr.Blocks() as demo:
97
+ gr.Markdown(
98
+ """
99
+ <div style="text-align: center;">
100
+ <h1> Amazon Products Demo</h1>
101
+ Powered by <a href="https://Tuba.ai">Tuba</a>
102
+ </div>
103
+ """
104
+ )
105
+
106
+
107
+ # Define the input components and add them to the layout
108
+ with gr.Row():
109
+ image_input = gr.inputs.Image()
110
+
111
+
112
+ outputs = gr.Image(type="filepath", label="Output Image")
113
+
114
+ # Define the output component and add it to the layout
115
+ with gr.Row():
116
+ conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" )
117
+ with gr.Row():
118
+ IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold")
119
+
120
+
121
+
122
+
123
+ button = gr.Button("Run")
124
+
125
+
126
+ # Define the event listener that connects the input and output components and triggers the function
127
+ button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference")
128
+
129
+ gr.Examples(
130
+ fn=yolov8_inference,
131
+ examples=examples,
132
+ inputs=[image_input, conf_slider,IOU_Slider],
133
+ outputs=[outputs]
134
+ )
135
+ # gr.Examples(inputs=examples, outputs=outputs_images)
136
+ # Add the description below the layout
137
+ gr.Markdown(readme_html)
138
+ # Launch the app
139
+ demo.launch(share=False)