File size: 4,893 Bytes
cc7179a
 
 
 
c75d65a
cc7179a
 
 
 
 
 
 
 
 
 
 
1d6ad45
cc7179a
 
 
 
 
 
 
 
 
c75d65a
cc7179a
 
 
c75d65a
e8d8961
9c6e363
c75d65a
cc7179a
c75d65a
 
cc7179a
 
 
 
95410e0
cc7179a
 
 
 
 
 
 
 
 
 
 
b0be920
95410e0
cc7179a
 
9c6e363
cc7179a
 
95410e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8afaf27
95410e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import supervision as sv
import gradio as gr
from ultralytics import YOLO
import sahi
import numpy as np


# Images
sahi.utils.file.download_from_url(
    "https://transform.roboflow.com/aHClLv0V9gWdgkEi3TZOcyGv4zZ2/2b24b3f5ef9330424b9fda06ad38f98a/thumb.jpg",
    "m1.jpg",
)
sahi.utils.file.download_from_url(
    "https://transform.roboflow.com/aHClLv0V9gWdgkEi3TZOcyGv4zZ2/751a6fca76be162856174c24048b293d/thumb.jpg",
    "m2.jpg",
)





annotatorbbox = sv.BoxAnnotator()
annotatormask=sv.MaskAnnotator()

def yolov8_inference(
    image: gr.inputs.Image = None,
    conf_threshold: gr.inputs.Slider = 0.5,
    iou_threshold: gr.inputs.Slider = 0.45,
):

    image=image[:, :, ::-1].astype(np.uint8)
    model = YOLO("https://huggingface.co/spaces/devisionx/Final_demo/blob/main/best_weights.pt")
    results = model(image,imgsz=360,conf=conf_threshold,iou=iou_threshold)[0]
    image=image[:, :, ::-1].astype(np.uint8)
    detections = sv.Detections.from_yolov8(results)
    annotated_image = annotatormask.annotate(scene=image, detections=detections)
    annotated_image = annotatorbbox.annotate(scene=annotated_image , detections=detections)



    return annotated_image
'''
image_input = gr.inputs.Image()  # Adjust the shape according to your requirements

inputs = [
    gr.inputs.Image(label="Input Image"),
    gr.Slider(
        minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"
    ),
    gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
]

outputs = gr.Image(type="filepath", label="Output Image")
title = "Materials-Demo"
'''
import os
examples = [
    ["m1.jpg", 0.25, 0.45],
    ["m2.jpg", 0.25, 0.45],
]
outputs_images = [
    ["1.jpg"], # First example: an output image for the cat example
    ["2.jpg"] # Second example: an output image for the dog example
]
readme_html = """
<html>
<head>
    <style>
        .description {
            margin: 20px;
            padding: 10px;
            border: 1px solid #ccc;
        }
    </style>
</head>
<body>
    <div class="description">
        <p><strong>More details:</strong></p>
        <p>We present a demo for performing object segmentation with training a Yolov8-seg on Materials dataset. The model was trained on 4424 training images and validated on 464 images.</p>
        <p><strong>Usage:</strong></p>
        <p>You can upload Material images, and the demo will provide you with your segmented image.</p>
        <p><strong>Dataset:</strong></p>
        <p>The dataset contains 6,365 images and is formatted in COCO style. To facilitate usage with YOLOv8-seg, we have converted it into YOLOv8 format</p>
        <ul>
            <li><strong>Training Set:</strong> It includes 4424 images and is intended for training the model.</li>
            <li><strong>Validation Set:</strong> There are 464 images in the validation set, which is used for optimizing model parameters during development.</li>
            <li><strong>Test Set:</strong> This set consists of 1477 images and serves as a separate evaluation dataset to assess the performance of trained models.</li>
        </ul>
        <p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
        <p>To access and download this dataset, please follow this link: <a href="https://universe.roboflow.com/expand-ai/materials-semantic" target="_blank">Dataset Download</a></p>
        
        
</body>
</html>
"""
with gr.Blocks() as demo:
    gr.Markdown(
        """
        <div style="text-align: center;">
            <h1>Materials-Demo</h1>
            Powered by <a href="https://Tuba.ai">Tuba</a>
        </div>
        """
    )


    # Define the input components and add them to the layout
    with gr.Row():
        image_input = gr.inputs.Image()
        
        
        outputs = gr.Image(type="filepath", label="Output Image")
        
    # Define the output component and add it to the layout
    with gr.Row():
        conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" )
    with gr.Row():
        IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold")
    
    
    

    button = gr.Button("Run")
    
        
    # Define the event listener that connects the input and output components and triggers the function
    button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference")
    
    gr.Examples(
            fn=yolov8_inference,
            examples=examples,
            inputs=[image_input, conf_slider,IOU_Slider],
            outputs=[outputs]
        )
    # gr.Examples(inputs=examples, outputs=outputs_images)
    # Add the description below the layout
    gr.Markdown(readme_html)
# Launch the app
demo.launch(share=False)