Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
import gradio as gr
|
2 |
from ultralytics import YOLO
|
3 |
from PIL import Image
|
4 |
-
from ultralytics import
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
model = YOLO('Dental_model.pt')
|
9 |
-
pic_files=glob.glob('*.jpg')
|
10 |
-
names=model.model.names
|
|
|
11 |
def detect_objects(image):
|
12 |
image1 = image.copy()
|
13 |
results = model.predict(image)
|
@@ -15,24 +16,36 @@ def detect_objects(image):
|
|
15 |
boxes = results[0].boxes.xyxy.cpu()
|
16 |
annotator = Annotator(image, line_width=3)
|
17 |
annotator1 = Annotator(image1, line_width=3)
|
18 |
-
cloud_sdk='<iframe src="https://cloudhand-sdk-xlsh.vercel.app/" width="100%" height="600px" style="border:none;"></iframe>'
|
|
|
19 |
for box, cls in zip(boxes, classes):
|
20 |
annotator.box_label(box, label=names[int(cls)], color=colors(int(cls)))
|
21 |
annotator1.box_label(box, label=None, color=colors(int(cls)))
|
22 |
-
|
23 |
-
return Image.fromarray(annotator.result()), Image.fromarray(annotator1.result()),cloud_sdk
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
|
31 |
-
inputs=
|
32 |
-
outputs=[
|
33 |
-
gr.Image(type='pil', label="Dental Analysis"),
|
34 |
-
gr.HTML() ],
|
35 |
-
examples=pic_files)
|
36 |
|
37 |
-
if __name__=="__main__":
|
38 |
-
|
|
|
1 |
import gradio as gr
|
2 |
from ultralytics import YOLO
|
3 |
from PIL import Image
|
4 |
+
from ultralytics.utils.plotting import Annotator, colors
|
5 |
+
import glob
|
6 |
+
|
7 |
+
# Load model and data
|
8 |
+
model = YOLO('Dental_model.pt')
|
9 |
+
pic_files = glob.glob('*.jpg')
|
10 |
+
names = model.model.names
|
11 |
+
|
12 |
def detect_objects(image):
|
13 |
image1 = image.copy()
|
14 |
results = model.predict(image)
|
|
|
16 |
boxes = results[0].boxes.xyxy.cpu()
|
17 |
annotator = Annotator(image, line_width=3)
|
18 |
annotator1 = Annotator(image1, line_width=3)
|
19 |
+
cloud_sdk = '<iframe src="https://cloudhand-sdk-xlsh.vercel.app/" width="100%" height="600px" style="border:none;"></iframe>'
|
20 |
+
|
21 |
for box, cls in zip(boxes, classes):
|
22 |
annotator.box_label(box, label=names[int(cls)], color=colors(int(cls)))
|
23 |
annotator1.box_label(box, label=None, color=colors(int(cls)))
|
|
|
|
|
24 |
|
25 |
+
return Image.fromarray(annotator.result()), Image.fromarray(annotator1.result())
|
26 |
+
|
27 |
+
# Gradio Blocks App
|
28 |
+
with gr.Blocks() as demo:
|
29 |
+
gr.Markdown("## Dental Analysis")
|
30 |
+
gr.Markdown("Analyze your Dental XRAY image with our AI object Detection model")
|
31 |
|
32 |
+
with gr.Row():
|
33 |
+
with gr.Column():
|
34 |
+
image_input = gr.Image(type="pil", label="Upload Image")
|
35 |
+
run_button = gr.Button("Run Detection")
|
36 |
+
example_images = gr.Examples(
|
37 |
+
examples=pic_files,
|
38 |
+
inputs=image_input,
|
39 |
+
label="Examples"
|
40 |
+
)
|
41 |
+
with gr.Column():
|
42 |
+
image_output_1 = gr.Image(type="pil", label="Dental Analysis")
|
43 |
+
image_output_2 = gr.Image(type="pil", label="Without Labels")
|
44 |
+
html_output = gr.HTML(cloud_sdk)
|
45 |
|
46 |
+
run_button.click(fn=detect_objects,
|
47 |
+
inputs=image_input,
|
48 |
+
outputs=[image_output_1, image_output_2])
|
|
|
|
|
|
|
49 |
|
50 |
+
if __name__ == "__main__":
|
51 |
+
demo.launch()
|