rgautroncgiar commited on
Commit
b6ad7e1
·
0 Parent(s):

Initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
37
+ *.jpg filter=lfs diff=lfs merge=lfs -text
38
+ images/annotated_1688033955437_.jpg filter=lfs diff=lfs merge=lfs -text
39
+ images/1688033955437_.jpg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ **/_*
2
+ _*
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - coffee
4
+ - cherry count
5
+ - yield estimate
6
+ - ultralyticsplus
7
+ - yolov8
8
+ - ultralytics
9
+ - yolo
10
+ - vision
11
+ - object-detection
12
+ - pytorch
13
+
14
+ library_name: ultralytics
15
+ library_version: 8.0.75
16
+ inference: false
17
+
18
+ datasets:
19
+ - rgautron/croppie_coffee
20
+
21
+ model-index:
22
+ - name: rgautron/croppie_coffee
23
+ results:
24
+ - task:
25
+ type: object-detection
26
+
27
+ dataset:
28
+ type: rgautron/croppie_coffee
29
+ name: croppie_coffee
30
+ split: val
31
+
32
+ metrics:
33
+ - type: precision # substitute for [email protected]
34
+ value: 0.691
35
+ name: [email protected](box)
36
+ ---
37
+ ### General description
38
+ Ultralytics' Yolo V8 medium model fined tuned for coffee cherry detection using the [Croppie coffee dataset](https://huggingface.co/datasets/rgautroncgiar/croppie_coffee_split).
39
+
40
+ ![](images/annotated_1688033955437_.jpg)
41
+
42
+ **Note: the low visibility/unsure class was not used for model fine tuning**
43
+
44
+ The predicted numerical classes correspond to the following cherry types:
45
+ ```
46
+ {0: "dark_brown_cherry", 1: "green_cherry", 2: "red_cherry", 3: "yellow_cherry"}
47
+ ```
48
+
49
+ ### Demonstration
50
+ Assuming you are in the ```scripts``` folder, you can run ```python3 test_script.py```. This script saves the annotated image in ```../images/annotated_1688033955437.jpg```.
51
+
52
+ Make sure that the Python packages found in ```requirements.txt``` are installed. In case they are not, simply run ```pip3 install -r requirements.txt```.
images/1688033955437.jpg ADDED

Git LFS Details

  • SHA256: 9a58c4b5276d52c1f981c665ea66f92483384780e41117000deb21dab22db463
  • Pointer size: 132 Bytes
  • Size of remote file: 2.53 MB
images/annotated_1688033955437_.jpg ADDED

Git LFS Details

  • SHA256: 6e33021cbfb6cefe6dd50805701ec7f0d35985847a819735766b8c4e40418ab2
  • Pointer size: 131 Bytes
  • Size of remote file: 942 kB
model_v3_202402021.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3180ebfc4422978e38d18722ba23840debaf531867770aaaddebba2f6431b988
3
+ size 52013835
scripts/render_results.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import matplotlib.pyplot as plt
3
+ from PIL import ImageColor
4
+ from pathlib import Path
5
+ import os
6
+
7
+ def annotate_image_prediction(image_path, yolo_boxes, class_dic, saving_folder, hex_class_colors=None, show=False, true_count=False, saving_image_name=None, put_title=True, box_thickness=3, font_scale=1, font_thickness=5):
8
+ """
9
+ Fonction to label individual images with YOLO predictions
10
+ Args:
11
+ image_path (str): path to the image to label
12
+ yolo_boxes (str): YOLO predicted boxes
13
+ class_dic (dict): dictionary with predicted class as key and corresponding label as value
14
+ saving_folder (str): folder where to save the annotated image
15
+ hex_class_colors (dict, optional): HEX color code dict of the class to plot. Defaults to None.
16
+ show (bool, optional): If you want a window of the annotated image to pop up. Defaults to False.
17
+ true_count (bool, optional): If you want to display the true total count of cherries. Defaults to None.
18
+ saving_image_name (str, optional): Name of the annotated image to save. Defaults to None.
19
+ put_title (bool, optional): If you want a title to show in the plot. Defaults to True.
20
+ box_thickness (int, optional): Thickness of the bounding boxes to plot. Defaults to 3.
21
+ font_scale (int, optional): Font scale of the text of counts to be displayed. Defaults to 1.
22
+ font_thickness (int, optional): Font thickness of the text of counts to be displayed. Defaults to 5.
23
+
24
+ Returns:
25
+ string: saving path of the annotated image
26
+ """
27
+ if os.path.isfile(image_path):
28
+ Path(saving_folder).mkdir(parents=True, exist_ok=True)
29
+ image_file = image_path.split('/')[-1]
30
+ if not hex_class_colors:
31
+ hex_class_colors = {class_name: (255, 0, 0) for class_name in class_dic.values()}
32
+ color_map = {key: ImageColor.getcolor(hex_class_colors[class_dic[key]], 'RGB') for key in [*class_dic]}
33
+
34
+ img = cv2.imread(image_path)
35
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
36
+ dh, dw, _ = img.shape
37
+
38
+ for yolo_box in yolo_boxes:
39
+ x1, y1, x2, y2 = yolo_box.xyxy[0]
40
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
41
+ c = int(yolo_box.cls[0])
42
+ cv2.rectangle(img, (x1, y1), (x2, y2), color_map[c], box_thickness)
43
+
44
+ if show:
45
+ plt.imshow(img)
46
+ plt.show()
47
+ img_copy = img.copy()
48
+ if put_title:
49
+ if true_count:
50
+ title = f'Predicted count: {len(yolo_boxes)}, true count: {true_count}, delta: {len(yolo_boxes) - true_count}'
51
+ else:
52
+ title = f'Predicted count: {len(yolo_boxes)}'
53
+ cv2.putText(
54
+ img=img_copy,
55
+ text=title,
56
+ org=(int(0.1 * dw), int(0.1 * dh)),
57
+ fontFace=cv2.FONT_HERSHEY_SIMPLEX,
58
+ fontScale=font_scale,
59
+ thickness=font_thickness,
60
+ color=(255,251,5),
61
+ )
62
+
63
+ if not saving_image_name:
64
+ saving_image_name = f'annotated_{image_file}'
65
+ Path(saving_folder).mkdir(parents=True, exist_ok=True)
66
+ full_saving_path = os.path.join(saving_folder, saving_image_name)
67
+ plt.imsave(full_saving_path, img_copy)
68
+ else:
69
+ full_saving_path = None
70
+ print(f'WARNING: {image_path} does not exists')
71
+ return full_saving_path
scripts/requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch==2.1.2
2
+ ultralytics==8.0.75
3
+ Pillow==9.3.0
4
+ matplotlib==3.7.1
5
+ opencv-python==4.7.0.72
scripts/test_script.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ultralytics
2
+ ultralytics.checks()
3
+ from render_results import annotate_image_prediction
4
+ from PIL import Image
5
+
6
+ if __name__=='__main__':
7
+ # load model
8
+ model = ultralytics.YOLO('../model_v3_202402021.pt')
9
+
10
+ # set parameters
11
+ image_path = '../images/1688033955437.jpg'
12
+ saving_folder = '../images'
13
+
14
+ # infer
15
+ image = Image.open(image_path)
16
+ results = model.predict(image_path)
17
+
18
+ # plot
19
+ hex_class_colors = {'green_cherry': '#9CF09A',
20
+ 'yellow_cherry': '#F3C63D',
21
+ 'red_cherry': '#F44336',
22
+ 'dark_brown_cherry': '#C36105',
23
+ 'low_visibility_unsure': '#02D5FA'}
24
+ class_dic = {1: 'green_cherry', 3: 'yellow_cherry', 2: 'red_cherry', 0: 'dark_brown_cherry'}
25
+ render = annotate_image_prediction(
26
+ image_path=image_path,
27
+ yolo_boxes=results[0].boxes,
28
+ class_dic=class_dic,
29
+ saving_folder=saving_folder,
30
+ hex_class_colors=hex_class_colors,
31
+ show=True,
32
+ font_scale=5,
33
+ font_thickness=10,
34
+ )