Nguyen Thai Thao Uyen commited on
Commit
14cfc9a
·
1 Parent(s): b9b2d60

Add application file

Browse files
Files changed (1) hide show
  1. app.py +80 -0
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from ultralyticsplus import YOLO, render_result
4
+
5
+
6
+ torch.hub.download_url_to_file(
7
+ 'https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg',
8
+ 'one.jpg')
9
+ torch.hub.download_url_to_file(
10
+ 'https://www.state.gov/wp-content/uploads/2022/01/shutterstock_248799484-scaled.jpg',
11
+ 'two.jpg')
12
+ torch.hub.download_url_to_file(
13
+ 'https://cdn.theatlantic.com/thumbor/xoh2WVVSx4F2uboG9xbT5BDprtM=/0x0:4939x2778/960x540/media/img/mt/2023/11/LON68717_copy/original.jpg',
14
+ 'three.jpg')
15
+
16
+
17
+ def yoloV8_func(image: gr.inputs.Image = None,
18
+ image_size: gr.inputs.Slider = 640,
19
+ conf_threshold: gr.inputs.Slider = 0.4,
20
+ iou_threshold: gr.inputs.Slider = 0.50):
21
+ """This function performs YOLOv8 object detection on the given image.
22
+
23
+ Args:
24
+ image (gr.inputs.Image, optional): Input image to detect objects on. Defaults to None.
25
+ image_size (gr.inputs.Slider, optional): Desired image size for the model. Defaults to 640.
26
+ conf_threshold (gr.inputs.Slider, optional): Confidence threshold for object detection. Defaults to 0.4.
27
+ iou_threshold (gr.inputs.Slider, optional): Intersection over Union threshold for object detection. Defaults to 0.50.
28
+ """
29
+ # Load the YOLOv8 model from the 'best.pt' checkpoint
30
+ model_path = "YOLO-best.pt"
31
+ model = YOLO(model_path)
32
+
33
+ # Perform object detection on the input image using the YOLOv8 model
34
+ results = model.predict(image,
35
+ conf=conf_threshold,
36
+ iou=iou_threshold,
37
+ imgsz=image_size)
38
+
39
+ # Print the detected objects' information (class, coordinates, and probability)
40
+ box = results[0].boxes
41
+ print("Object type:", box.cls)
42
+ print("Coordinates:", box.xyxy)
43
+ print("Probability:", box.conf)
44
+
45
+ # Render the output image with bounding boxes around detected objects
46
+ render = render_result(model=model, image=image, result=results[0])
47
+ return render
48
+
49
+
50
+ inputs = [
51
+ gr.inputs.Image(type="filepath", label="Input Image"),
52
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640,
53
+ step=32, label="Image Size"),
54
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25,
55
+ step=0.05, label="Confidence Threshold"),
56
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45,
57
+ step=0.05, label="IOU Threshold"),
58
+ ]
59
+
60
+
61
+ outputs = gr.outputs.Image(type="filepath", label="Output Image")
62
+
63
+ title = "YOLOv8 101: Custom Object Detection on Objects in Big Cities"
64
+
65
+
66
+ examples = [['one.jpg', 640, 0.5, 0.7],
67
+ ['two.jpg', 800, 0.5, 0.6],
68
+ ['three.jpg', 900, 0.5, 0.8]]
69
+
70
+ yolo_app = gr.Interface(
71
+ fn=yoloV8_func,
72
+ inputs=inputs,
73
+ outputs=outputs,
74
+ title=title,
75
+ examples=examples,
76
+ cache_examples=True,
77
+ )
78
+
79
+ # Launch the Gradio interface in debug mode with queue enabled
80
+ yolo_app.launch(debug=True, enable_queue=True)