Spaces:
Sleeping
Sleeping
File size: 5,146 Bytes
b7df221 63b33e8 b7df221 d6d3399 b7df221 2364ca2 d6d3399 b7df221 d6d3399 b7df221 d6d3399 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
import supervision as sv
import gradio as gr
from ultralytics import YOLO
import sahi
import numpy as np
# Images
sahi.utils.file.download_from_url(
"https://transform.roboflow.com/bViBvBXkjUWzz4lYXwtoVTE2gpO2/210fe71d15bb416b0dfde415686da572/thumb.jpg",
"wh1.jpg",
)
sahi.utils.file.download_from_url(
"https://transform.roboflow.com/bViBvBXkjUWzz4lYXwtoVTE2gpO2/6731f1ac3e966e90ccc0057c86b42c74/thumb.jpg",
"wh2.jpg",
)
sahi.utils.file.download_from_url(
"https://transform.roboflow.com/bViBvBXkjUWzz4lYXwtoVTE2gpO2/ba9fc3cc24849c0408d5e2ddd4a4a4ed/thumb.jpg",
"wh3.jpg",
)
annotatorbbox = sv.BoxAnnotator()
annotatormask=sv.MaskAnnotator()
def yolov8_inference(
image: gr.inputs.Image = None,
conf_threshold: gr.inputs.Slider = 0.25,
iou_threshold: gr.inputs.Slider = 0.45,
):
image=image[:, :, ::-1].astype(np.uint8)
model = YOLO("https://huggingface.co/spaces/devisionx/Fourth_demo/blob/main/best.pt")
results = model(image,imgsz=640)[0]
image=image[:, :, ::-1].astype(np.uint8)
detections = sv.Detections.from_yolov8(results)
annotated_image = annotatorbbox.annotate(scene=image, detections=detections)
return annotated_image
'''
image_input = gr.inputs.Image() # Adjust the shape according to your requirements
inputs = [
gr.inputs.Image(label="Input Image"),
gr.Slider(
minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"
),
gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
]
outputs = gr.Image(type="filepath", label="Output Image")
title = "Wheel Segmentation Demo"
'''
import os
examples = [
["wh1.jpg", 0.6, 0.45],
["wh2.jpg", 0.25, 0.45],
["wh3.jpg", 0.25, 0.45],
]
outputs_images = [
["1.jpg"], # First example: an output image for the cat example
["2.jpg"] # Second example: an output image for the dog example
,["3.jpg"]
]
'''
demo_app = gr.Interface(examples=examples,
fn=yolov8_inference,
inputs=inputs,
outputs=outputs,
title=title,
cache_examples=True,
theme="default",
)
'''
readme_html = """
<html>
<head>
<style>
.description {
margin: 20px;
padding: 10px;
border: 1px solid #ccc;
}
</style>
</head>
<body>
<div class="description">
<p><strong>More details:</strong></p>
<p> We present a demo for performing object segmentation with training a Yolov8-seg on wheel Image dataset. The model was trained on 696 training images and validated on 199 images.</p>
<p><strong>Usage:</strong></p>
<p>You can upload wheel Image images, and the demo will provide you with your segmented image.</p>
<p><strong>Dataset:</strong></p>
<p>This dataset comprises a total of 994 images, which are divided into three distinct sets for various purposes:</p>
<ul>
<li><strong>Training Set:</strong> It includes 696 images and is intended for training the model.</li>
<li><strong>Validation Set:</strong> There are 199 images in the validation set, which is used for optimizing model parameters during development.</li>
<li><strong>Test Set:</strong> This set consists of 99 images and serves as a separate evaluation dataset to assess the performance of trained models.</li>
</ul>
<p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
<p>To access and download this dataset, please follow this link: <a href=" https://universe.roboflow.com/project-wce7s/1000_seg_wheel" target="_blank">Dataset Download</a></p>
</body>
</html>
"""
with gr.Blocks() as demo:
gr.Markdown(
"""
<div style="text-align: center;">
<h1>Wheel Segmentation Demo</h1>
Powered by <a href="https://Tuba.ai">Tuba</a>
</div>
"""
)
# Define the input components and add them to the layout
with gr.Row():
image_input = gr.inputs.Image()
outputs = gr.Image(type="filepath", label="Output Image")
# Define the output component and add it to the layout
with gr.Row():
conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" )
with gr.Row():
IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold")
button = gr.Button("Run")
# Define the event listener that connects the input and output components and triggers the function
button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference")
gr.Examples(
fn=yolov8_inference,
examples=examples,
inputs=[image_input, conf_slider,IOU_Slider],
outputs=[outputs]
)
# gr.Examples(inputs=examples, outputs=outputs_images)
# Add the description below the layout
gr.Markdown(readme_html)
# Launch the app
demo.launch(share=False)
|