alibidaran commited on
Commit
7ca0c88
·
verified ·
1 Parent(s): fe9123f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -21
app.py CHANGED
@@ -1,13 +1,14 @@
1
  import gradio as gr
2
  from ultralytics import YOLO
3
  from PIL import Image
4
- from ultralytics import YOLO
5
- from PIL import Image
6
- from ultralytics.utils.plotting import Annotator,colors
7
- import glob
8
- model = YOLO('Dental_model.pt') # Replace 'yolov8n.pt' with your model file if using a custom one
9
- pic_files=glob.glob('*.jpg')
10
- names=model.model.names
 
11
  def detect_objects(image):
12
  image1 = image.copy()
13
  results = model.predict(image)
@@ -15,24 +16,36 @@ def detect_objects(image):
15
  boxes = results[0].boxes.xyxy.cpu()
16
  annotator = Annotator(image, line_width=3)
17
  annotator1 = Annotator(image1, line_width=3)
18
- cloud_sdk='<iframe src="https://cloudhand-sdk-xlsh.vercel.app/" width="100%" height="600px" style="border:none;"></iframe>'
 
19
  for box, cls in zip(boxes, classes):
20
  annotator.box_label(box, label=names[int(cls)], color=colors(int(cls)))
21
  annotator1.box_label(box, label=None, color=colors(int(cls)))
22
-
23
- return Image.fromarray(annotator.result()), Image.fromarray(annotator1.result()),cloud_sdk
24
 
 
 
 
 
 
 
25
 
26
- # Gradio Interface
27
- title = "YOLOv8 Object Detection"
28
- description = "Upload an image to detect objects using a YOLOv8 model."
 
 
 
 
 
 
 
 
 
 
29
 
30
- gradio_app =gr.Interface(fn=detect_objects,
31
- inputs=gr.Image(type="pil"),
32
- outputs=[gr.Image(type='pil', label="Dental Analysis"),
33
- gr.Image(type='pil', label="Dental Analysis"),
34
- gr.HTML() ],
35
- examples=pic_files)
36
 
37
- if __name__=="__main__":
38
- gradio_app.launch()
 
1
  import gradio as gr
2
  from ultralytics import YOLO
3
  from PIL import Image
4
+ from ultralytics.utils.plotting import Annotator, colors
5
+ import glob
6
+
7
+ # Load model and data
8
+ model = YOLO('Dental_model.pt')
9
+ pic_files = glob.glob('*.jpg')
10
+ names = model.model.names
11
+
12
  def detect_objects(image):
13
  image1 = image.copy()
14
  results = model.predict(image)
 
16
  boxes = results[0].boxes.xyxy.cpu()
17
  annotator = Annotator(image, line_width=3)
18
  annotator1 = Annotator(image1, line_width=3)
19
+ cloud_sdk = '<iframe src="https://cloudhand-sdk-xlsh.vercel.app/" width="100%" height="600px" style="border:none;"></iframe>'
20
+
21
  for box, cls in zip(boxes, classes):
22
  annotator.box_label(box, label=names[int(cls)], color=colors(int(cls)))
23
  annotator1.box_label(box, label=None, color=colors(int(cls)))
 
 
24
 
25
+ return Image.fromarray(annotator.result()), Image.fromarray(annotator1.result())
26
+
27
+ # Gradio Blocks App
28
+ with gr.Blocks() as demo:
29
+ gr.Markdown("## Dental Analysis")
30
+ gr.Markdown("Analyze your Dental XRAY image with our AI object Detection model")
31
 
32
+ with gr.Row():
33
+ with gr.Column():
34
+ image_input = gr.Image(type="pil", label="Upload Image")
35
+ run_button = gr.Button("Run Detection")
36
+ example_images = gr.Examples(
37
+ examples=pic_files,
38
+ inputs=image_input,
39
+ label="Examples"
40
+ )
41
+ with gr.Column():
42
+ image_output_1 = gr.Image(type="pil", label="Dental Analysis")
43
+ image_output_2 = gr.Image(type="pil", label="Without Labels")
44
+ html_output = gr.HTML(cloud_sdk)
45
 
46
+ run_button.click(fn=detect_objects,
47
+ inputs=image_input,
48
+ outputs=[image_output_1, image_output_2])
 
 
 
49
 
50
+ if __name__ == "__main__":
51
+ demo.launch()