Datasets:

Modalities:
Image
Formats:
parquet
Languages:
English
DOI:
Libraries:
Datasets
Dask
License:
jpodivin commited on
Commit
35ec7f3
1 Parent(s): 47645d8

Basic setup for pulling

Browse files

Signed-off-by: Jiri Podivin <[email protected]>

.gitattributes CHANGED
@@ -56,3 +56,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Repo specific
57
  labels.csv filter=lfs diff=lfs merge=lfs -text
58
  labels_raw.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
56
  # Repo specific
57
  labels.csv filter=lfs diff=lfs merge=lfs -text
58
  labels_raw.json filter=lfs diff=lfs merge=lfs -text
59
+ labels_test.csv filter=lfs diff=lfs merge=lfs -text
60
+ labels_train.csv filter=lfs diff=lfs merge=lfs -text
61
+ labels_raw filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ test*
labels.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27866cd0317906ce4f17b5a25609d62d7dee21dffe3183de3cfc8ec0b8ec8105
3
- size 3595889996
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a49b0dfbc6791a093ca4f5ea494bf9ce7c0b38ec7dc3dd528f3cb215c2ccc941
3
+ size 3989805491
labels_raw.json → labels_raw.json.tar.gz RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:168e4571e61a489453f8262108557b8fe3d07b4e2b341fbef5340181b1ff663d
3
- size 3993693859
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b73aa96821ec151fd2da7ef66ee93c1218d2052e536f30c56eb62b7f266f31e
3
+ size 471437296
labels_raw.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdfccc23b41dc268ab608d08662b9f90115fcb600423dad13dd87956063de5db
3
+ size 424277514
plantorgans.py CHANGED
@@ -1,4 +1,6 @@
1
  import datasets
 
 
2
 
3
  _DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers."""
4
 
@@ -15,20 +17,41 @@ _NAMES = [
15
  'Fruit',
16
  ]
17
 
18
- _BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/blob/main/sourcedata/"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- _DOWNLOAD_SUFFIX = "download?true"
21
- _METADATA_URLS = [
22
- ""
23
- ]
24
 
25
  class PlantOrgans(datasets.GeneratorBasedBuilder):
26
  """Plantorgans dataset
27
  """
28
  BUILDER_CONFIGS = [
29
  PlantOrgansConfig(
30
- name="semantic_segmentation",
31
- description="This configuration contains segmentation masks."
 
 
 
32
  ),
33
  ]
34
 
@@ -49,37 +72,29 @@ class PlantOrgans(datasets.GeneratorBasedBuilder):
49
 
50
 
51
  def _split_generators(self, dl_manager):
52
- archive_path = dl_manager.download(_BASE_URL)
 
 
53
  split_metadata_paths = dl_manager.download(_METADATA_URLS)
54
  return [
55
  datasets.SplitGenerator(
56
  name=datasets.Split.TRAIN,
57
  gen_kwargs={
58
- "images": dl_manager.iter_archive(archive_path),
59
  "metadata_path": split_metadata_paths["train"],
60
  },
61
  ),
62
  datasets.SplitGenerator(
63
- name=datasets.Split.VALIDATION,
64
  gen_kwargs={
65
- "images": dl_manager.iter_archive(archive_path),
66
  "metadata_path": split_metadata_paths["test"],
67
  },
68
  ),
69
  ]
70
  def _generate_examples(self, images, metadata_path):
71
- ...
72
-
73
- class PlantOrgansConfig(datasets.BuilderConfig):
74
- """Builder Config for PlantOrgans"""
75
-
76
- def __init__(self, data_url, metadata_urls, **kwargs):
77
- """BuilderConfig for PlantOrgans.
78
- Args:
79
- data_url: `string`, url to download the zip file from.
80
- metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
81
- **kwargs: keyword arguments forwarded to super.
82
- """
83
- super().__init__(version=datasets.Version("1.0.0"), **kwargs)
84
- self.data_url = data_url
85
- self.metadata_urls = metadata_urls
 
1
  import datasets
2
+ import os
3
+ import json
4
 
5
  _DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers."""
6
 
 
17
  'Fruit',
18
  ]
19
 
20
+ _BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/"
21
+ _TRAIN_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(0, 8)]
22
+ _TEST_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(8, 12)]
23
+ _METADATA_URLS = {
24
+ 'train': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/labels_train.csv',
25
+ 'test': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/labels_test.csv'
26
+ }
27
+
28
+
29
+ class PlantOrgansConfig(datasets.BuilderConfig):
30
+ """Builder Config for PlantOrgans"""
31
+
32
+ def __init__(self, data_url, metadata_urls, splits, **kwargs):
33
+ """BuilderConfig for PlantOrgans.
34
+ Args:
35
+ data_url: `string`, url to download the zip file from.
36
+ metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
37
+ **kwargs: keyword arguments forwarded to super.
38
+ """
39
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
40
+ self.data_url = data_url
41
+ self.metadata_urls = metadata_urls
42
+ self.splits = splits
43
 
 
 
 
 
44
 
45
  class PlantOrgans(datasets.GeneratorBasedBuilder):
46
  """Plantorgans dataset
47
  """
48
  BUILDER_CONFIGS = [
49
  PlantOrgansConfig(
50
+ name="semantic_segmentation_full",
51
+ description="This configuration contains segmentation masks.",
52
+ data_url=_BASE_URL,
53
+ metadata_urls=_METADATA_URLS,
54
+ splits=['train', 'test'],
55
  ),
56
  ]
57
 
 
72
 
73
 
74
  def _split_generators(self, dl_manager):
75
+ train_archive_path = dl_manager.download_and_extract(_TRAIN_URLS)
76
+ test_archive_path = dl_manager.download_and_extract(_TEST_URLS)
77
+
78
  split_metadata_paths = dl_manager.download(_METADATA_URLS)
79
  return [
80
  datasets.SplitGenerator(
81
  name=datasets.Split.TRAIN,
82
  gen_kwargs={
83
+ "images": dl_manager.iter_archive(os.path.join(train_archive_path, 'sourcedata/labeled')),
84
  "metadata_path": split_metadata_paths["train"],
85
  },
86
  ),
87
  datasets.SplitGenerator(
88
+ name=datasets.Split.TEST,
89
  gen_kwargs={
90
+ "images": dl_manager.iter_archive(os.path.join(test_archive_path, 'sourcedata/labeled')),
91
  "metadata_path": split_metadata_paths["test"],
92
  },
93
  ),
94
  ]
95
  def _generate_examples(self, images, metadata_path):
96
+
97
+ with open(metadata_path, 'w', encoding='utf-8') as fp:
98
+ metadata = json.load(fp)
99
+ images = metadata['image']
100
+ annotations = metadata['annotations']