Basic setup for pulling
Browse filesSigned-off-by: Jiri Podivin <[email protected]>
- .gitattributes +3 -0
- .gitignore +1 -0
- labels.csv +2 -2
- labels_raw.json → labels_raw.json.tar.gz +2 -2
- labels_raw.tar.gz +3 -0
- plantorgans.py +41 -26
.gitattributes
CHANGED
@@ -56,3 +56,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
56 |
# Repo specific
|
57 |
labels.csv filter=lfs diff=lfs merge=lfs -text
|
58 |
labels_raw.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
56 |
# Repo specific
|
57 |
labels.csv filter=lfs diff=lfs merge=lfs -text
|
58 |
labels_raw.json filter=lfs diff=lfs merge=lfs -text
|
59 |
+
labels_test.csv filter=lfs diff=lfs merge=lfs -text
|
60 |
+
labels_train.csv filter=lfs diff=lfs merge=lfs -text
|
61 |
+
labels_raw filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
test*
|
labels.csv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a49b0dfbc6791a093ca4f5ea494bf9ce7c0b38ec7dc3dd528f3cb215c2ccc941
|
3 |
+
size 3989805491
|
labels_raw.json → labels_raw.json.tar.gz
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b73aa96821ec151fd2da7ef66ee93c1218d2052e536f30c56eb62b7f266f31e
|
3 |
+
size 471437296
|
labels_raw.tar.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fdfccc23b41dc268ab608d08662b9f90115fcb600423dad13dd87956063de5db
|
3 |
+
size 424277514
|
plantorgans.py
CHANGED
@@ -1,4 +1,6 @@
|
|
1 |
import datasets
|
|
|
|
|
2 |
|
3 |
_DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers."""
|
4 |
|
@@ -15,20 +17,41 @@ _NAMES = [
|
|
15 |
'Fruit',
|
16 |
]
|
17 |
|
18 |
-
_BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
-
_DOWNLOAD_SUFFIX = "download?true"
|
21 |
-
_METADATA_URLS = [
|
22 |
-
""
|
23 |
-
]
|
24 |
|
25 |
class PlantOrgans(datasets.GeneratorBasedBuilder):
|
26 |
"""Plantorgans dataset
|
27 |
"""
|
28 |
BUILDER_CONFIGS = [
|
29 |
PlantOrgansConfig(
|
30 |
-
name="
|
31 |
-
description="This configuration contains segmentation masks."
|
|
|
|
|
|
|
32 |
),
|
33 |
]
|
34 |
|
@@ -49,37 +72,29 @@ class PlantOrgans(datasets.GeneratorBasedBuilder):
|
|
49 |
|
50 |
|
51 |
def _split_generators(self, dl_manager):
|
52 |
-
|
|
|
|
|
53 |
split_metadata_paths = dl_manager.download(_METADATA_URLS)
|
54 |
return [
|
55 |
datasets.SplitGenerator(
|
56 |
name=datasets.Split.TRAIN,
|
57 |
gen_kwargs={
|
58 |
-
"images": dl_manager.iter_archive(
|
59 |
"metadata_path": split_metadata_paths["train"],
|
60 |
},
|
61 |
),
|
62 |
datasets.SplitGenerator(
|
63 |
-
name=datasets.Split.
|
64 |
gen_kwargs={
|
65 |
-
"images": dl_manager.iter_archive(
|
66 |
"metadata_path": split_metadata_paths["test"],
|
67 |
},
|
68 |
),
|
69 |
]
|
70 |
def _generate_examples(self, images, metadata_path):
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
def __init__(self, data_url, metadata_urls, **kwargs):
|
77 |
-
"""BuilderConfig for PlantOrgans.
|
78 |
-
Args:
|
79 |
-
data_url: `string`, url to download the zip file from.
|
80 |
-
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
|
81 |
-
**kwargs: keyword arguments forwarded to super.
|
82 |
-
"""
|
83 |
-
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
84 |
-
self.data_url = data_url
|
85 |
-
self.metadata_urls = metadata_urls
|
|
|
1 |
import datasets
|
2 |
+
import os
|
3 |
+
import json
|
4 |
|
5 |
_DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers."""
|
6 |
|
|
|
17 |
'Fruit',
|
18 |
]
|
19 |
|
20 |
+
_BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/"
|
21 |
+
_TRAIN_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(0, 8)]
|
22 |
+
_TEST_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(8, 12)]
|
23 |
+
_METADATA_URLS = {
|
24 |
+
'train': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/labels_train.csv',
|
25 |
+
'test': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/labels_test.csv'
|
26 |
+
}
|
27 |
+
|
28 |
+
|
29 |
+
class PlantOrgansConfig(datasets.BuilderConfig):
|
30 |
+
"""Builder Config for PlantOrgans"""
|
31 |
+
|
32 |
+
def __init__(self, data_url, metadata_urls, splits, **kwargs):
|
33 |
+
"""BuilderConfig for PlantOrgans.
|
34 |
+
Args:
|
35 |
+
data_url: `string`, url to download the zip file from.
|
36 |
+
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
|
37 |
+
**kwargs: keyword arguments forwarded to super.
|
38 |
+
"""
|
39 |
+
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
40 |
+
self.data_url = data_url
|
41 |
+
self.metadata_urls = metadata_urls
|
42 |
+
self.splits = splits
|
43 |
|
|
|
|
|
|
|
|
|
44 |
|
45 |
class PlantOrgans(datasets.GeneratorBasedBuilder):
|
46 |
"""Plantorgans dataset
|
47 |
"""
|
48 |
BUILDER_CONFIGS = [
|
49 |
PlantOrgansConfig(
|
50 |
+
name="semantic_segmentation_full",
|
51 |
+
description="This configuration contains segmentation masks.",
|
52 |
+
data_url=_BASE_URL,
|
53 |
+
metadata_urls=_METADATA_URLS,
|
54 |
+
splits=['train', 'test'],
|
55 |
),
|
56 |
]
|
57 |
|
|
|
72 |
|
73 |
|
74 |
def _split_generators(self, dl_manager):
|
75 |
+
train_archive_path = dl_manager.download_and_extract(_TRAIN_URLS)
|
76 |
+
test_archive_path = dl_manager.download_and_extract(_TEST_URLS)
|
77 |
+
|
78 |
split_metadata_paths = dl_manager.download(_METADATA_URLS)
|
79 |
return [
|
80 |
datasets.SplitGenerator(
|
81 |
name=datasets.Split.TRAIN,
|
82 |
gen_kwargs={
|
83 |
+
"images": dl_manager.iter_archive(os.path.join(train_archive_path, 'sourcedata/labeled')),
|
84 |
"metadata_path": split_metadata_paths["train"],
|
85 |
},
|
86 |
),
|
87 |
datasets.SplitGenerator(
|
88 |
+
name=datasets.Split.TEST,
|
89 |
gen_kwargs={
|
90 |
+
"images": dl_manager.iter_archive(os.path.join(test_archive_path, 'sourcedata/labeled')),
|
91 |
"metadata_path": split_metadata_paths["test"],
|
92 |
},
|
93 |
),
|
94 |
]
|
95 |
def _generate_examples(self, images, metadata_path):
|
96 |
+
|
97 |
+
with open(metadata_path, 'w', encoding='utf-8') as fp:
|
98 |
+
metadata = json.load(fp)
|
99 |
+
images = metadata['image']
|
100 |
+
annotations = metadata['annotations']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|