Spaces:
Runtime error
Runtime error
File size: 5,364 Bytes
0f1bb12 4d26a0d 0f1bb12 cde3d94 0f1bb12 4d26a0d 0f1bb12 4d26a0d 6ac1fcf 2ebe9c4 4d26a0d 0f1bb12 2ebe9c4 4843045 0f1bb12 35eddcc 0f1bb12 bf7469c 1fdc4b8 bf7469c 35eddcc ad2a714 36b0c2f 7451aaa 36b0c2f 7451aaa 36b0c2f 05171b7 36b0c2f 2b95f09 ea8b973 7451aaa f9f6129 35eddcc bf7469c d5d0f01 bf7469c 35eddcc 1fdc4b8 bf7469c d5d0f01 35eddcc 36b0c2f 35eddcc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import supervision as sv
import gradio as gr
from ultralytics import YOLO
import sahi
import numpy as np
# Images
sahi.utils.file.download_from_url(
"https://transform.roboflow.com/zZuu207UOVOOJKuuCpmV/3512b3839afacecec643949bef398e99/thumb.jpg",
"tu1.jpg",
)
sahi.utils.file.download_from_url(
"https://transform.roboflow.com/zZuu207UOVOOJKuuCpmV/5b8b940fae2f9e4952395bcced0688aa/thumb.jpg",
"tu2.jpg",
)
sahi.utils.file.download_from_url(
"https://raw.githubusercontent.com/mensss/vvvvv/main/MRI_of_Human_Brain.jpg",
"tu3.jpg",
)
annotatorbbox = sv.BoxAnnotator()
annotatormask=sv.MaskAnnotator()
def yolov8_inference(
image: gr.inputs.Image = None,
conf_threshold: gr.inputs.Slider = 0.25,
iou_threshold: gr.inputs.Slider = 0.45,
):
image=image[:, :, ::-1].astype(np.uint8)
model = YOLO("https://huggingface.co/spaces/devisionx/Fifth_demo/blob/main/best_weigh.pt")
results = model(image,imgsz=360,conf=conf_threshold,iou=iou_threshold)[0]
image=image[:, :, ::-1].astype(np.uint8)
detections = sv.Detections.from_yolov8(results)
annotated_image = annotatormask.annotate(scene=image, detections=detections)
annotated_image = annotatorbbox.annotate(scene=annotated_image , detections=detections)
return annotated_image
# image_input = gr.inputs.Image() # Adjust the shape according to your requirements
# inputs = [
# gr.inputs.Image(label="Input Image"),
# gr.Slider(
# minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"
# ),
# gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
# ]
# outputs = gr.Image(type="filepath", label="Output Image")
# title = "Brain Tumor Demo"
import os
examples = [
["tu1.jpg", 0.6, 0.45],
["tu2.jpg", 0.25, 0.45],
["tu3.jpg", 0.25, 0.45],
]
outputs_images = [
["1.jpg"], # First example: an output image for the cat example
["2.jpg"] # Second example: an output image for the dog example
,["3.jpg"]
]
# demo_app = gr.Interface(examples=examples,
# fn=yolov8_inference,
# inputs=inputs,
# outputs=outputs,
# title=title,
# cache_examples=True,
# theme="default",
# )
# demo_app.launch(debug=False, enable_queue=True)
# gr.Examples(examples) # Add the examples to the app
readme_html = """
<html>
<head>
<style>
.description {
margin: 20px;
padding: 10px;
border: 1px solid #ccc;
}
</style>
</head>
<body>
<div class="description">
<p><strong>More details:</strong></p>
<p>We present a demo for performing object segmentation with training a Yolov8-seg on Brain tumor dataset. The model was trained on 236 training images and validated on 28 images.</p>
<p><strong>Usage:</strong></p>
<p>You can upload Brain tumor images, and the demo will provide you with your segmented image.</p>
<p><strong>Dataset:</strong></p>
<p>This dataset comprises a total of 278 images, which are divided into three distinct sets for various purposes:</p>
<ul>
<li><strong>Training Set:</strong> It includes 236 images and is intended for training the model.</li>
<li><strong>Validation Set:</strong> There are 28 images in the validation set, which is used for optimizing model parameters during development.</li>
<li><strong>Test Set:</strong> This set consists of 14 images and serves as a separate evaluation dataset to assess the performance of trained models.</li>
</ul>
<p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
<p>To access and download this dataset, please follow this link: <a href="https://universe.roboflow.com/detection-qskiw/segmnetation" target="_blank">Dataset Download</a></p>
</body>
</html>
"""
with gr.Blocks() as demo:
gr.Markdown(
"""
<div style="text-align: center;">
<h1>Brain Tumor Segmentation Demo</h1>
Powered by <a href="https://Tuba.ai">Tuba</a>
</div>
"""
)
# Define the input components and add them to the layout
with gr.Row():
image_input = gr.inputs.Image()
outputs = gr.Image(type="filepath", label="Output Image")
# Define the output component and add it to the layout
with gr.Row():
conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" )
with gr.Row():
IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold")
button = gr.Button("Run")
# Define the event listener that connects the input and output components and triggers the function
button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference")
gr.Examples(
fn=yolov8_inference,
examples=examples,
inputs=[image_input, conf_slider,IOU_Slider],
outputs=[outputs]
)
# gr.Examples(inputs=examples, outputs=outputs_images)
# Add the description below the layout
gr.Markdown(readme_html)
# Launch the app
demo.launch(share=False)
|