Loading script update
Browse filesSigned-off-by: Jiri Podivin <[email protected]>
- plantorgans.py +57 -15
plantorgans.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import datasets
|
2 |
-
import
|
3 |
-
import
|
|
|
4 |
|
5 |
_DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers."""
|
6 |
|
@@ -20,9 +21,11 @@ _NAMES = [
|
|
20 |
_BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/"
|
21 |
_TRAIN_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(0, 8)]
|
22 |
_TEST_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(8, 12)]
|
|
|
|
|
23 |
_METADATA_URLS = {
|
24 |
-
'train': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/
|
25 |
-
'test': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/
|
26 |
}
|
27 |
|
28 |
|
@@ -61,7 +64,7 @@ class PlantOrgans(datasets.GeneratorBasedBuilder):
|
|
61 |
features=datasets.Features(
|
62 |
{
|
63 |
"image": datasets.Image(),
|
64 |
-
"
|
65 |
}
|
66 |
),
|
67 |
supervised_keys=("image", "annotation"),
|
@@ -72,29 +75,68 @@ class PlantOrgans(datasets.GeneratorBasedBuilder):
|
|
72 |
|
73 |
|
74 |
def _split_generators(self, dl_manager):
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
split_metadata_paths = dl_manager.download(_METADATA_URLS)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
return [
|
80 |
datasets.SplitGenerator(
|
81 |
name=datasets.Split.TRAIN,
|
82 |
gen_kwargs={
|
83 |
-
"images":
|
84 |
"metadata_path": split_metadata_paths["train"],
|
|
|
85 |
},
|
86 |
),
|
87 |
datasets.SplitGenerator(
|
88 |
name=datasets.Split.TEST,
|
89 |
gen_kwargs={
|
90 |
-
"images":
|
91 |
"metadata_path": split_metadata_paths["test"],
|
|
|
92 |
},
|
93 |
),
|
94 |
]
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import datasets
|
2 |
+
import pandas as pd
|
3 |
+
import glob
|
4 |
+
from pathlib import Path
|
5 |
|
6 |
_DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers."""
|
7 |
|
|
|
21 |
_BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/"
|
22 |
_TRAIN_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(0, 8)]
|
23 |
_TEST_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(8, 12)]
|
24 |
+
_MASKS_URLS = [_BASE_URL + f"masks.tar.0{i}" for i in range(0, 2)]
|
25 |
+
|
26 |
_METADATA_URLS = {
|
27 |
+
'train': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/metadata_train.csv',
|
28 |
+
'test': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/metadata_test.csv'
|
29 |
}
|
30 |
|
31 |
|
|
|
64 |
features=datasets.Features(
|
65 |
{
|
66 |
"image": datasets.Image(),
|
67 |
+
"mask": datasets.Image(),
|
68 |
}
|
69 |
),
|
70 |
supervised_keys=("image", "annotation"),
|
|
|
75 |
|
76 |
|
77 |
def _split_generators(self, dl_manager):
|
78 |
+
|
79 |
+
train_archives_paths = dl_manager.download_and_extract(_TRAIN_URLS)
|
80 |
+
test_archives_paths = dl_manager.download_and_extract(_TEST_URLS)
|
81 |
+
|
82 |
+
train_paths = []
|
83 |
+
test_paths = []
|
84 |
+
|
85 |
+
for p in train_archives_paths:
|
86 |
+
train_paths.extend(glob.glob(str(p)+'/sourcedata/labeled/**.jpg'))
|
87 |
+
for p in test_archives_paths:
|
88 |
+
test_paths.extend(glob.glob(str(p)+'/sourcedata/labeled/**.jpg'))
|
89 |
split_metadata_paths = dl_manager.download(_METADATA_URLS)
|
90 |
+
|
91 |
+
mask_archives_paths = dl_manager.download_and_extract(_MASKS_URLS)
|
92 |
+
|
93 |
+
mask_paths = []
|
94 |
+
for p in mask_archives_paths:
|
95 |
+
mask_paths.extend(glob.glob(str(p)+'/masks/**.png'))
|
96 |
+
|
97 |
return [
|
98 |
datasets.SplitGenerator(
|
99 |
name=datasets.Split.TRAIN,
|
100 |
gen_kwargs={
|
101 |
+
"images": train_paths,
|
102 |
"metadata_path": split_metadata_paths["train"],
|
103 |
+
"masks_path": mask_paths,
|
104 |
},
|
105 |
),
|
106 |
datasets.SplitGenerator(
|
107 |
name=datasets.Split.TEST,
|
108 |
gen_kwargs={
|
109 |
+
"images": test_paths,
|
110 |
"metadata_path": split_metadata_paths["test"],
|
111 |
+
"masks_path": mask_paths,
|
112 |
},
|
113 |
),
|
114 |
]
|
115 |
+
|
116 |
+
|
117 |
+
def _generate_examples(self, images, metadata_path, masks_path):
|
118 |
+
"""
|
119 |
+
images: path to image directory
|
120 |
+
metadata_path: path to metadata csv
|
121 |
+
"""
|
122 |
+
|
123 |
+
# Get local image paths
|
124 |
+
image_paths = pd.DataFrame(
|
125 |
+
[(str(Path(*Path(e).parts[-3:])), e) for e in images], columns=['image', 'image_path'])
|
126 |
+
|
127 |
+
# Get local mask paths
|
128 |
+
masks_paths = pd.DataFrame(
|
129 |
+
[(str(Path(*Path(e).parts[-2:])), e) for e in masks_path], columns=['mask', 'mask_path'])
|
130 |
|
131 |
+
# Get all common about images and masks from csv
|
132 |
+
metadata = pd.read_csv(metadata_path)
|
133 |
+
|
134 |
+
# Merge dataframes
|
135 |
+
metadata = metadata.merge(masks_paths, on='mask', how='inner')
|
136 |
+
metadata = metadata.merge(image_paths, on='image', how='inner')
|
137 |
+
|
138 |
+
# Make examples and yield
|
139 |
+
for i, r in metadata.iterrows():
|
140 |
+
|
141 |
+
# Each example must contain path to image and list of annotations under object key
|
142 |
+
yield i, {'mask': r['mask_path'], 'image': r['image_path']}
|