Datasets:

Modalities:
Image
Formats:
parquet
Languages:
English
DOI:
Libraries:
Datasets
Dask
License:
jpodivin commited on
Commit
bdae278
1 Parent(s): 3d18b90

New semantic masks

Browse files

Signed-off-by: Jiri Podivin <[email protected]>

.gitattributes CHANGED
@@ -64,3 +64,4 @@ metadata filter=lfs diff=lfs merge=lfs -text
64
  metadata_semantic_test.csv filter=lfs diff=lfs merge=lfs -text
65
  metadata_semantic_train.csv filter=lfs diff=lfs merge=lfs -text
66
  metadata_test.csv filter=lfs diff=lfs merge=lfs -text
 
 
64
  metadata_semantic_test.csv filter=lfs diff=lfs merge=lfs -text
65
  metadata_semantic_train.csv filter=lfs diff=lfs merge=lfs -text
66
  metadata_test.csv filter=lfs diff=lfs merge=lfs -text
67
+ semantic_masks.tar.gz filter=lfs diff=lfs merge=lfs -text
masks.tar.00 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b4875906c4fc60c1646b37b301d2b4aca7b64436b27bfe06d5683f7c98efe9e
3
- size 2662328320
 
 
 
 
masks.tar.01 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf7f6a6db277892459bfb4f709fe621fd1c5a851b256ee05e0b38d7e06de8edd
3
- size 2402375680
 
 
 
 
metadata_semantic_test.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a0f94ce4ddaefe6cd6881698ae9b375907a9a44ee26ee6e8b226f7fc4843e0ac
3
- size 9839152
 
 
 
 
metadata_semantic_train.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:dce5995456cc88209702d3e324299c99121db8f0d6819919aee8d600b5e4d47c
3
- size 22904232
 
 
 
 
semantic_masks.tar.01 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a027e3447f65c326b972ce1c1eeec102338f411a78e5d261d7f9fce405013515
3
- size 512757760
 
 
 
 
semantic_masks.tar.00 → semantic_masks.tar.gz RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:41d2d9638b7f12a5284ced53e1d9a4591b11c3227476feed328ca48947b744a5
3
- size 582758400
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67d089fdb940fae15dc5d9cae0af139a007888221d8821b6cd33ab4d49ee571a
3
+ size 287311415
semantic_metadata.csv ADDED
The diff for this file is too large to render. See raw diff
 
utils/convert_masks.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ import numpy as np
4
+ import json
5
+ import os
6
+ import numpy as np
7
+ from PIL import Image
8
+ from multiprocessing import Pool, cpu_count
9
+ from urllib.parse import unquote
10
+ from datetime import datetime
11
+ import pandas as pd
12
+ import os
13
+ import tempfile
14
+ import argparse
15
+ import glob
16
+
17
+ class InputStream:
18
+ def __init__(self, data):
19
+ self.data = data
20
+ self.i = 0
21
+
22
+ def read(self, size):
23
+ out = self.data[self.i : self.i + size]
24
+ self.i += size
25
+ return int(out, 2)
26
+
27
+
28
+ def access_bit(data, num):
29
+ """from bytes array to bits by num position"""
30
+ base = int(num // 8)
31
+ shift = 7 - int(num % 8)
32
+ return (data[base] & (1 << shift)) >> shift
33
+
34
+
35
+ def bytes2bit(data):
36
+ """get bit string from bytes data"""
37
+ return ''.join([str(access_bit(data, i)) for i in range(len(data) * 8)])
38
+
39
+
40
+ def decode_rle(rle, print_params: bool = False):
41
+ """from LS RLE to numpy uint8 3d image [width, height, channel]
42
+
43
+ Args:
44
+ print_params (bool, optional): If true, a RLE parameters print statement is suppressed
45
+ """
46
+ input = InputStream(bytes2bit(rle))
47
+ num = input.read(32)
48
+ word_size = input.read(5) + 1
49
+ rle_sizes = [input.read(4) + 1 for _ in range(4)]
50
+
51
+ if print_params:
52
+ print(
53
+ 'RLE params:', num, 'values', word_size, 'word_size', rle_sizes, 'rle_sizes'
54
+ )
55
+
56
+ i = 0
57
+ out = np.zeros(num, dtype=np.uint8)
58
+ while i < num:
59
+ x = input.read(1)
60
+ j = i + 1 + input.read(rle_sizes[input.read(2)])
61
+ if x:
62
+ val = input.read(word_size)
63
+ out[i:j] = val
64
+ i = j
65
+ else:
66
+ while i < j:
67
+ val = input.read(word_size)
68
+ out[i] = val
69
+ i += 1
70
+ return out
71
+
72
+ def log(message):
73
+ timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
74
+ print(f"[{timestamp}] {message}")
75
+
76
+ def save_image(mask_image: Image.Image, save_path: str):
77
+ mask_image.save(save_path, format='PNG')
78
+ log(f'Saved mask: {save_path}')
79
+
80
+ def process_files_in_parallel(files_to_process, masks_save_directory, source_files):
81
+ with Pool(processes=cpu_count()//2) as pool:
82
+ results = pool.starmap(process_file, [(file, masks_save_directory, source_files) for file in files_to_process])
83
+
84
+ return [e for r in results for e in r]
85
+
86
+
87
+ def process_file(file_path, masks_save_directory, source_files):
88
+ log(f"Opening file: {file_path}")
89
+ total_metadata = []
90
+ try:
91
+ with open(file_path, 'r') as file:
92
+ data = json.load(file)
93
+ except Exception as e:
94
+ log(f'Error reading file {file_path}: {e}')
95
+ return total_metadata
96
+
97
+ image_name = data['task']['data']['image'].split('/')[-1]
98
+
99
+ if image_name not in source_files:
100
+ log(f"Requested file {image_name} does not exist in source data!")
101
+ return total_metadata
102
+
103
+ image_name_prefix = unquote(image_name.rsplit('.', 1)[0])
104
+ log(f"Processing image: {image_name_prefix}")
105
+ label_counts = {}
106
+
107
+ for result in data['result']:
108
+ if 'rle' not in result['value']:
109
+ log(f"No 'rle' key found in result: {result.get('id', 'Unknown ID')}")
110
+ continue
111
+
112
+ rle_data = result['value']['rle']
113
+ rle_bytes = bytes.fromhex(''.join(format(x, '02x') for x in rle_data))
114
+ mask = decode_rle(rle_bytes)
115
+
116
+ original_height = result['original_height']
117
+ original_width = result['original_width']
118
+ mask = mask.reshape((original_height, original_width, 4))
119
+
120
+ alpha_channel = mask[:, :, 3]
121
+ mask_image = np.zeros((original_height, original_width, 3), dtype=np.uint8)
122
+ mask_image[alpha_channel == 255] = [255, 255, 255]
123
+
124
+ if 'brushlabels' in result['value']:
125
+ for label in result['value']['brushlabels']:
126
+
127
+ label_counts[label] = label_counts.get(label, 0) + 1
128
+ save_path = os.path.join(masks_save_directory, f"{image_name_prefix}-{label}-{label_counts[label]}.png")
129
+ save_image(Image.fromarray(mask_image).convert('L'), save_path)
130
+ metadata = {
131
+ "original_height": result['original_height'],
132
+ "original_width": result['original_width'],
133
+ "image": os.path.join('sourcedata/labeled/', os.path.basename(data['task']['data']['image'])),
134
+ "score": result['score'] if 'score' in result.keys() else 0,
135
+ "mask": save_path,
136
+ "class": label,
137
+ }
138
+ total_metadata.append(metadata)
139
+
140
+ return total_metadata
141
+
142
+ def merge_file_masks(mask_info, target_mask_dir, label2id, img):
143
+ final_mask = np.zeros(
144
+ np.asarray(Image.open(mask_info['mask'].iloc[0])).shape, dtype=np.uint8)
145
+ for i, r in mask_info.iterrows():
146
+ mask = np.asarray(Image.open(r['mask']))
147
+ final_mask = np.where(mask == 0, final_mask, label2id[r['class']])
148
+
149
+ mask_path = os.path.join(target_mask_dir, f"{os.path.basename(img).split('.')[0]}_mask.png")
150
+ Image.fromarray(final_mask).convert('L').save(mask_path, format='PNG')
151
+ return {
152
+ 'mask': mask_path,
153
+ 'image': img,
154
+ 'original_height': r['original_height'],
155
+ 'original_width': r['original_width']
156
+ }
157
+
158
+ def merge_masks(mask_metadata, target_mask_dir, label2id):
159
+ new_metadata = []
160
+ imgs = [
161
+ (
162
+ mask_metadata[mask_metadata['image'] == img],
163
+ target_mask_dir,
164
+ label2id,
165
+ img
166
+ ) for img in mask_metadata['image'].unique()]
167
+
168
+
169
+ with Pool(processes=cpu_count()//2) as pool:
170
+ new_metadata = pool.starmap(merge_file_masks, imgs)
171
+
172
+ return new_metadata
173
+
174
+
175
+ def main():
176
+ parser = argparse.ArgumentParser('maskconvert')
177
+ parser.add_argument('dataset_root')
178
+
179
+ arguments = parser.parse_args()
180
+ annotations_folder_path = os.path.join(arguments.dataset_root, 'labels_raw')
181
+ tmp_mask_path = tempfile.mkdtemp('masks')
182
+
183
+ files_to_process = glob.glob(f"{annotations_folder_path}/*")
184
+
185
+ # For sanity check
186
+ source_files = [os.path.basename(name) for name in glob.glob(f"sourcedata/**/*.jpg")]
187
+
188
+ metadata = pd.DataFrame(process_files_in_parallel(files_to_process, tmp_mask_path, source_files))
189
+
190
+ id2label = {int(k): v for k, v in enumerate(['void', 'Fruit', 'Leaf', 'Flower', 'Stem'])}
191
+
192
+ label2id = {v: k for k, v in id2label.items()}
193
+
194
+ result = merge_masks(metadata, os.path.join(arguments.dataset_root, 'semantic_masks'), label2id)
195
+ result = pd.DataFrame(result).drop_duplicates()
196
+ result.to_csv(
197
+ os.path.join(arguments.dataset_root, 'semantic_metadata.csv'),
198
+ index=False)
199
+
200
+
201
+ if __name__ == '__main__':
202
+ main()