mennamostafa55555 commited on
Commit
d6d3399
·
1 Parent(s): e29aa01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -2
app.py CHANGED
@@ -45,7 +45,7 @@ def yolov8_inference(
45
 
46
 
47
  return annotated_image
48
-
49
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
50
 
51
  inputs = [
@@ -58,12 +58,19 @@ inputs = [
58
 
59
  outputs = gr.Image(type="filepath", label="Output Image")
60
  title = "Wheel Segmentation Demo"
 
61
  import os
62
  examples = [
63
  ["wh1.jpg", 0.6, 0.45],
64
  ["wh2.jpg", 0.25, 0.45],
65
  ["wh3.jpg", 0.25, 0.45],
66
  ]
 
 
 
 
 
 
67
  demo_app = gr.Interface(examples=examples,
68
  fn=yolov8_inference,
69
  inputs=inputs,
@@ -72,4 +79,84 @@ demo_app = gr.Interface(examples=examples,
72
  cache_examples=True,
73
  theme="default",
74
  )
75
- demo_app.launch(debug=False, enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
 
47
  return annotated_image
48
+ '''
49
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
50
 
51
  inputs = [
 
58
 
59
  outputs = gr.Image(type="filepath", label="Output Image")
60
  title = "Wheel Segmentation Demo"
61
+ '''
62
  import os
63
  examples = [
64
  ["wh1.jpg", 0.6, 0.45],
65
  ["wh2.jpg", 0.25, 0.45],
66
  ["wh3.jpg", 0.25, 0.45],
67
  ]
68
+ outputs_images = [
69
+ ["1.jpg"], # First example: an output image for the cat example
70
+ ["2.jpg"] # Second example: an output image for the dog example
71
+ ,["3.jpg"]
72
+ ]
73
+ '''
74
  demo_app = gr.Interface(examples=examples,
75
  fn=yolov8_inference,
76
  inputs=inputs,
 
79
  cache_examples=True,
80
  theme="default",
81
  )
82
+ '''
83
+ readme_html = """
84
+ <html>
85
+ <head>
86
+ <style>
87
+ .description {
88
+ margin: 20px;
89
+ padding: 10px;
90
+ border: 1px solid #ccc;
91
+ }
92
+ </style>
93
+ </head>
94
+ <body>
95
+ <div class="description">
96
+ <p><strong>More details:</strong></p>
97
+ <p> We present a demo for performing object segmentation with training a Yolov8-seg on wheel Image dataset. The model was trained on 696 training images and validated on 199 images.</p>
98
+ <p><strong>Usage:</strong></p>
99
+ <p>You can upload wheel Image images, and the demo will provide you with your segmented image.</p>
100
+ <p><strong>Dataset:</strong></p>
101
+ <p>This dataset comprises a total of 994 images, which are divided into three distinct sets for various purposes:</p>
102
+ <ul>
103
+ <li><strong>Training Set:</strong> It includes 696 images and is intended for training the model.</li>
104
+ <li><strong>Validation Set:</strong> There are 199 images in the validation set, which is used for optimizing model parameters during development.</li>
105
+ <li><strong>Test Set:</strong> This set consists of 99 images and serves as a separate evaluation dataset to assess the performance of trained models.</li>
106
+ </ul>
107
+ <p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
108
+ <p>To access and download this dataset, please follow this link: <a href=" https://universe.roboflow.com/project-wce7s/1000_seg_wheel" target="_blank">Dataset Download</a></p>
109
+
110
+
111
+ </body>
112
+ </html>
113
+ """
114
+
115
+
116
+
117
+
118
+
119
+ with gr.Blocks() as demo:
120
+ gr.Markdown(
121
+ """
122
+ <div style="text-align: center;">
123
+ <h1>Wheel Segmentation Demo</h1>
124
+ Powered by <a href="https://Tuba.ai">Tuba</a>
125
+ </div>
126
+ """
127
+ )
128
+
129
+
130
+ # Define the input components and add them to the layout
131
+ with gr.Row():
132
+ image_input = gr.inputs.Image()
133
+
134
+
135
+ outputs = gr.Image(type="filepath", label="Output Image")
136
+
137
+ # Define the output component and add it to the layout
138
+ with gr.Row():
139
+ conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" )
140
+ with gr.Row():
141
+ IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold")
142
+
143
+
144
+
145
+
146
+ button = gr.Button("Run")
147
+
148
+
149
+ # Define the event listener that connects the input and output components and triggers the function
150
+ button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference")
151
+
152
+ gr.Examples(
153
+ fn=yolov8_inference,
154
+ examples=examples,
155
+ inputs=[image_input, conf_slider,IOU_Slider],
156
+ outputs=[outputs]
157
+ )
158
+ # gr.Examples(inputs=examples, outputs=outputs_images)
159
+ # Add the description below the layout
160
+ gr.Markdown(readme_html)
161
+ # Launch the app
162
+ demo.launch(share=False)