mennamostafa55555 commited on
Commit
75a58f1
·
1 Parent(s): eb5b07e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -10
app.py CHANGED
@@ -45,7 +45,7 @@ def yolov8_inference(
45
 
46
 
47
  return annotated_image
48
-
49
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
50
 
51
  inputs = [
@@ -58,18 +58,97 @@ inputs = [
58
 
59
  outputs = gr.Image(type="filepath", label="Output Image")
60
  title = "Tennis Court Demo"
 
61
  import os
62
  examples = [
63
  ["t1.jpg", 0.6, 0.45],
64
  ["t2.jpg", 0.25, 0.45],
65
  ["t3.jpg", 0.25, 0.45],
66
  ]
67
- demo_app = gr.Interface(examples=examples,
68
- fn=yolov8_inference,
69
- inputs=inputs,
70
- outputs=outputs,
71
- title=title,
72
- cache_examples=True,
73
- theme="default",
74
- )
75
- demo_app.launch(debug=True, enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
 
47
  return annotated_image
48
+ '''
49
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
50
 
51
  inputs = [
 
58
 
59
  outputs = gr.Image(type="filepath", label="Output Image")
60
  title = "Tennis Court Demo"
61
+ '''
62
  import os
63
  examples = [
64
  ["t1.jpg", 0.6, 0.45],
65
  ["t2.jpg", 0.25, 0.45],
66
  ["t3.jpg", 0.25, 0.45],
67
  ]
68
+ outputs_images = [
69
+ ["1.jpg"], # First example: an output image for the cat example
70
+ ["2.jpg"] # Second example: an output image for the dog example
71
+ ,["3.jpg"]
72
+ ]
73
+
74
+
75
+ readme_html = """
76
+ <html>
77
+ <head>
78
+ <style>
79
+ .description {
80
+ margin: 20px;
81
+ padding: 10px;
82
+ border: 1px solid #ccc;
83
+ }
84
+ </style>
85
+ </head>
86
+ <body>
87
+ <div class="description">
88
+ <p><strong>More details:</strong></p>
89
+ <p> We present a demo for performing object segmentation with training a Yolov8-seg on wheel Image dataset. The model was trained on 696 training images and validated on 199 images.</p>
90
+ <p><strong>Usage:</strong></p>
91
+ <p>You can upload wheel Image images, and the demo will provide you with your segmented image.</p>
92
+ <p><strong>Dataset:</strong></p>
93
+ <p>The dataset contains 3,146 images and is formatted in COCO style. To facilitate usage with YOLOv8-seg, we have converted it into YOLOv8 format.</p>
94
+ <ul>
95
+ <li><strong>Training Set:</strong> It includes 2649 images and is intended for training the model.</li>
96
+ <li><strong>Validation Set:</strong> There are 250 images in the validation set, which is used for optimizing model parameters during development.</li>
97
+ <li><strong>Test Set:</strong> This set consists of 247 images and serves as a separate evaluation dataset to assess the performance of trained models.</li>
98
+ </ul>
99
+ <p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
100
+ <p>To access and download this dataset, please follow this link: <a href="https://universe.roboflow.com/tenniscourtworkspace/teslasegmentation" target="_blank">Dataset Download</a></p>
101
+
102
+
103
+ </body>
104
+ </html>
105
+ """
106
+
107
+
108
+
109
+
110
+
111
+ with gr.Blocks() as demo:
112
+ gr.Markdown(
113
+ """
114
+ <div style="text-align: center;">
115
+ <h1>Tennis Court Demo</h1>
116
+ Powered by <a href="https://Tuba.ai">Tuba</a>
117
+ </div>
118
+ """
119
+ )
120
+
121
+
122
+ # Define the input components and add them to the layout
123
+ with gr.Row():
124
+ image_input = gr.inputs.Image()
125
+
126
+
127
+ outputs = gr.Image(type="filepath", label="Output Image")
128
+
129
+ # Define the output component and add it to the layout
130
+ with gr.Row():
131
+ conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" )
132
+ with gr.Row():
133
+ IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold")
134
+
135
+
136
+
137
+
138
+ button = gr.Button("Run")
139
+
140
+
141
+ # Define the event listener that connects the input and output components and triggers the function
142
+ button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference")
143
+
144
+ gr.Examples(
145
+ fn=yolov8_inference,
146
+ examples=examples,
147
+ inputs=[image_input, conf_slider,IOU_Slider],
148
+ outputs=[outputs]
149
+ )
150
+ # gr.Examples(inputs=examples, outputs=outputs_images)
151
+ # Add the description below the layout
152
+ gr.Markdown(readme_html)
153
+ # Launch the app
154
+ demo.launch(share=False)