mennamostafa55555 commited on
Commit
95410e0
·
1 Parent(s): 2439f1e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -10
app.py CHANGED
@@ -42,7 +42,7 @@ def yolov8_inference(
42
 
43
 
44
  return annotated_image
45
-
46
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
47
 
48
  inputs = [
@@ -55,18 +55,90 @@ inputs = [
55
 
56
  outputs = gr.Image(type="filepath", label="Output Image")
57
  title = "Materials-Demo"
 
58
  import os
59
  examples = [
60
  ["m1.jpg", 0.6, 0.45],
61
  ["m2.jpg", 0.25, 0.45],
62
  ["m3.jpg", 0.25, 0.45],
63
  ]
64
- demo_app = gr.Interface(examples=examples,
65
- fn=yolov8_inference,
66
- inputs=inputs,
67
- outputs=outputs,
68
- title=title,
69
- cache_examples=True,
70
- theme="default",
71
- )
72
- demo_app.launch(debug=False, enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
 
44
  return annotated_image
45
+ '''
46
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
47
 
48
  inputs = [
 
55
 
56
  outputs = gr.Image(type="filepath", label="Output Image")
57
  title = "Materials-Demo"
58
+ '''
59
  import os
60
  examples = [
61
  ["m1.jpg", 0.6, 0.45],
62
  ["m2.jpg", 0.25, 0.45],
63
  ["m3.jpg", 0.25, 0.45],
64
  ]
65
+ outputs_images = [
66
+ ["1.jpg"], # First example: an output image for the cat example
67
+ ["2.jpg"] # Second example: an output image for the dog example
68
+ ,["3.jpg"]
69
+ ]
70
+ readme_html = """
71
+ <html>
72
+ <head>
73
+ <style>
74
+ .description {
75
+ margin: 20px;
76
+ padding: 10px;
77
+ border: 1px solid #ccc;
78
+ }
79
+ </style>
80
+ </head>
81
+ <body>
82
+ <div class="description">
83
+ <p><strong>More details:</strong></p>
84
+ <p>We present a demo for performing object segmentation with training a Yolov8-seg on Materials dataset. The model was trained on 4424 training images and validated on 464 images.</p>
85
+ <p><strong>Usage:</strong></p>
86
+ <p>You can upload wheel Image images, and the demo will provide you with your segmented image.</p>
87
+ <p><strong>Dataset:</strong></p>
88
+ <p>The dataset contains 6,365 images and is formatted in COCO style. To facilitate usage with YOLOv8-seg, we have converted it into YOLOv8 format</p>
89
+ <ul>
90
+ <li><strong>Training Set:</strong> It includes 4424 images and is intended for training the model.</li>
91
+ <li><strong>Validation Set:</strong> There are 464 images in the validation set, which is used for optimizing model parameters during development.</li>
92
+ <li><strong>Test Set:</strong> This set consists of 1477 images and serves as a separate evaluation dataset to assess the performance of trained models.</li>
93
+ </ul>
94
+ <p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
95
+ <p>To access and download this dataset, please follow this link: <a href="https://universe.roboflow.com/expand-ai/materials-semantic" target="_blank">Dataset Download</a></p>
96
+
97
+
98
+ </body>
99
+ </html>
100
+ """
101
+ with gr.Blocks() as demo:
102
+ gr.Markdown(
103
+ """
104
+ <div style="text-align: center;">
105
+ <h1>Materials-Demo</h1>
106
+ Powered by <a href="https://Tuba.ai">Tuba</a>
107
+ </div>
108
+ """
109
+ )
110
+
111
+
112
+ # Define the input components and add them to the layout
113
+ with gr.Row():
114
+ image_input = gr.inputs.Image()
115
+
116
+
117
+ outputs = gr.Image(type="filepath", label="Output Image")
118
+
119
+ # Define the output component and add it to the layout
120
+ with gr.Row():
121
+ conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" )
122
+ with gr.Row():
123
+ IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold")
124
+
125
+
126
+
127
+
128
+ button = gr.Button("Run")
129
+
130
+
131
+ # Define the event listener that connects the input and output components and triggers the function
132
+ button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference")
133
+
134
+ gr.Examples(
135
+ fn=yolov8_inference,
136
+ examples=examples,
137
+ inputs=[image_input, conf_slider,IOU_Slider],
138
+ outputs=[outputs]
139
+ )
140
+ # gr.Examples(inputs=examples, outputs=outputs_images)
141
+ # Add the description below the layout
142
+ gr.Markdown(readme_html)
143
+ # Launch the app
144
+ demo.launch(share=False)