Spaces:
Running
Running
File size: 1,846 Bytes
1763d5f 7ca0c88 f028f54 7ca0c88 1763d5f ce00d99 1763d5f ce00d99 1763d5f ce00d99 7ca0c88 ce00d99 1763d5f ce00d99 7ca0c88 1763d5f 7ca0c88 1763d5f 7ca0c88 1763d5f 7ca0c88 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import gradio as gr
from ultralytics import YOLO
from PIL import Image
from ultralytics.utils.plotting import Annotator, colors
import glob
# Load model and data
model = YOLO('Dental_model.pt')
pic_files = glob.glob('*.jpg')
names = model.model.names
cloud_sdk = '<iframe src="https://cloudhand-sdk-lmpc.vercel.app/" width="100%" height="600px" style="border:none;"></iframe>'
def detect_objects(image):
image1 = image.copy()
results = model.predict(image)
classes = results[0].boxes.cls.cpu().tolist()
boxes = results[0].boxes.xyxy.cpu()
annotator = Annotator(image, line_width=3)
annotator1 = Annotator(image1, line_width=3)
for box, cls in zip(boxes, classes):
annotator.box_label(box, label=names[int(cls)], color=colors(int(cls)))
annotator1.box_label(box, label=None, color=colors(int(cls)))
return Image.fromarray(annotator.result()), Image.fromarray(annotator1.result())
# Gradio Blocks App
with gr.Blocks() as demo:
gr.Markdown("## Dental Analysis")
gr.Markdown("Analyze your Dental XRAY image with our AI object Detection model")
with gr.Row():
with gr.Column():
image_input = gr.Image(type="pil", label="Upload Image")
run_button = gr.Button("Run Detection")
example_images = gr.Examples(
examples=pic_files,
inputs=image_input,
label="Examples"
)
with gr.Column():
image_output_1 = gr.Image(type="pil", label="Dental Analysis")
image_output_2 = gr.Image(type="pil", label="Without Labels")
html_output = gr.HTML(cloud_sdk)
run_button.click(fn=detect_objects,
inputs=image_input,
outputs=[image_output_1, image_output_2])
if __name__ == "__main__":
demo.launch()
|