Datasets:

Modalities:
Image
License:
DehydratedWater42 commited on
Commit
f0ceea2
·
verified ·
1 Parent(s): 204925b

Uploaded subsets of Hagrid dataset with top X images per category

Browse files
extract_subset_from_hagrid.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %% Cell 1
2
+
3
+ import os
4
+ import json
5
+ from typing import Any
6
+ from zipfile import ZipFile
7
+
8
+ def export_subset_of_hagrid(output_name: str, count_to_export: int = 1000):
9
+ image_subset_by_sign: dict[str, list[str]] = {}
10
+
11
+
12
+ with ZipFile('hagridv2_512.zip') as zf:
13
+ for file in zf.namelist():
14
+ if file.endswith('.jpg'):
15
+ sign_category = file.split("/")[1]
16
+ os.makedirs(f"{output_name}/{sign_category}", exist_ok=True)
17
+ if len(image_subset_by_sign.get(sign_category, [])) >= count_to_export:
18
+ continue
19
+ file_name = file.split('/')[-1]
20
+ if sign_category not in image_subset_by_sign:
21
+ image_subset_by_sign[sign_category] = []
22
+ image_subset_by_sign[sign_category].append(file_name[:-4])
23
+ export_file_name = f"{output_name}/{sign_category}/{file_name}"
24
+ with open(export_file_name, 'wb') as img_file:
25
+ img_file.write(zf.read(file))
26
+
27
+ with open("exported_images.json", 'w') as file:
28
+ json.dump(image_subset_by_sign, file)
29
+
30
+ # export_subset_of_hagrid('hagrid-10', count_to_export=10)
31
+
32
+ # %% Cell 2
33
+
34
+ import json
35
+
36
+ def extract_connected_annotations(output_name: str):
37
+ with ZipFile("annotations.zip") as fa:
38
+
39
+ with open("exported_images.json", 'r') as file:
40
+ exported_examples = json.load(file)
41
+
42
+ annotations_for_exported_subset: dict[str, dict[str, Any]] = {}
43
+ minimal_annotations_for_exported_subset: dict[str, dict[str, Any]] = {}
44
+
45
+
46
+ for file in fa.namelist()[:]:
47
+ print(file)
48
+ if file.endswith('.json'):
49
+ category = file.split("/")[-1][:-5]
50
+ if category not in annotations_for_exported_subset:
51
+ annotations_for_exported_subset[category] = {}
52
+ minimal_annotations_for_exported_subset[category] = {}
53
+ if category not in exported_examples:
54
+ continue
55
+
56
+ with fa.open(file) as json_file:
57
+ labels = json.load(json_file)
58
+
59
+ print(category)
60
+ # print(exported_examples[category])
61
+ print(set(labels.keys()).intersection(exported_examples[category]))
62
+
63
+ for overlapping_images in set(labels.keys()).intersection(exported_examples[category]):
64
+ annotations_for_exported_subset[category][overlapping_images] = labels[overlapping_images]
65
+ minimal_annotations_for_exported_subset[category][overlapping_images] = {
66
+ "labels": labels[overlapping_images]["labels"],
67
+ "bboxes": labels[overlapping_images]["bboxes"],
68
+ "meta": labels[overlapping_images]["meta"],
69
+ }
70
+
71
+ with open(f"minimal-annotations.json", 'w') as f:
72
+ json.dump(minimal_annotations_for_exported_subset, f, indent=4)
73
+
74
+
75
+ with open(f"annotations.json", 'w') as f:
76
+ json.dump(annotations_for_exported_subset, f, indent=4)
77
+
78
+ # extract_connected_annotations('hagrid-10')
79
+ # %% Cell 3
80
+ def run_pipeline(output_name: str, count_exported: int):
81
+ export_subset_of_hagrid(output_name, count_exported)
82
+ extract_connected_annotations(output_name)
83
+
84
+ annotation_files = [
85
+ "minimal-annotations.json",
86
+ "annotations.json",
87
+ "exported_images.json"
88
+ ]
89
+
90
+ output_zip_name = f"{output_name}_{count_exported}_images.zip"
91
+ with ZipFile(output_zip_name, 'w') as archive:
92
+ for root, dirs, files in os.walk(output_name):
93
+ for file in files:
94
+ file_path = os.path.join(root, file)
95
+ archive.write(file_path, arcname=os.path.relpath(file_path, start=output_name))
96
+
97
+ for file_name in annotation_files:
98
+ full_path = os.path.join(output_name, file_name)
99
+ if os.path.exists(full_path):
100
+ archive.write(full_path, arcname=file_name)
101
+
102
+ for root, dirs, files in os.walk(output_name, topdown=False):
103
+ for name in files:
104
+ os.remove(os.path.join(root, name))
105
+ for name in dirs:
106
+ os.rmdir(os.path.join(root, name))
107
+ os.rmdir(output_name)
108
+
109
+ for file_name in annotation_files:
110
+ print(file_name)
111
+ file_to_remove = os.path.join(output_name, file_name)
112
+ if os.path.exists(file_name):
113
+ os.remove(file_name)
114
+
115
+ run_pipeline('hagrid-export', 100)
116
+ run_pipeline('hagrid-export', 500)
117
+ run_pipeline('hagrid-export', 1000)
hagrid-export_1000_images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43505fb19cc383478d34c7242c3032beb957896bedfe9cfd4a354d848f8db92d
3
+ size 4056532453
hagrid-export_100_images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72ada2959c117d23e98c8c4da2c9009580554d12163a6db8f78de50312cbdc47
3
+ size 402272913
hagrid-export_500_images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9ce69320543d29f1e15c750b25a421019b2ded7798e6c850b3b203d3ddb0436
3
+ size 2028554073
hagrid-export_5_images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:615edd12fbe0f56cdaf8573a82b4c1b6ef4c5cf9a8a4c9a258bc4d5bc2948ffd
3
+ size 20336296