Update script to hub
Browse files- Boat_dataset.py +4 -41
Boat_dataset.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
#
|
3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
@@ -48,7 +49,7 @@ _LICENSE = ""
|
|
48 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
49 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
50 |
_URLS = {
|
51 |
-
"Boat_dataset": "ftp://arg.lab.nycu.edu.tw/arg-projectfile-download/detr/dataset/
|
52 |
}
|
53 |
|
54 |
|
@@ -82,7 +83,7 @@ class BoatDataset(datasets.GeneratorBasedBuilder):
|
|
82 |
features=datasets.Features({
|
83 |
'image_id': datasets.Value('int32'),
|
84 |
# 'image': datasets.Image(), # This is commented out because you can't directly store PIL images in the dataset.
|
85 |
-
'
|
86 |
'width': datasets.Value('int32'),
|
87 |
'height': datasets.Value('int32'),
|
88 |
'objects': datasets.Features({
|
@@ -92,8 +93,6 @@ class BoatDataset(datasets.GeneratorBasedBuilder):
|
|
92 |
'category': datasets.Sequence(datasets.Value('int32'))
|
93 |
}),
|
94 |
})
|
95 |
-
|
96 |
-
print(features)
|
97 |
|
98 |
return datasets.DatasetInfo(
|
99 |
# This is the description that will appear on the datasets page.
|
@@ -120,7 +119,6 @@ class BoatDataset(datasets.GeneratorBasedBuilder):
|
|
120 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
121 |
urls = _URLS[self.config.name]
|
122 |
data_dir = dl_manager.download_and_extract(urls)
|
123 |
-
data_dir = os.path.join(data_dir, "images")
|
124 |
return [
|
125 |
datasets.SplitGenerator(
|
126 |
name=datasets.Split.TRAIN,
|
@@ -146,14 +144,6 @@ class BoatDataset(datasets.GeneratorBasedBuilder):
|
|
146 |
"split": "val_real",
|
147 |
},
|
148 |
),
|
149 |
-
# datasets.SplitGenerator(
|
150 |
-
# name=datasets.Split.TEST,
|
151 |
-
# # These kwargs will be passed to _generate_examples
|
152 |
-
# gen_kwargs={
|
153 |
-
# "filepath": os.path.join(data_dir, "test.jsonl"),
|
154 |
-
# "split": "test"
|
155 |
-
# },
|
156 |
-
# ),
|
157 |
]
|
158 |
|
159 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
@@ -165,7 +155,7 @@ class BoatDataset(datasets.GeneratorBasedBuilder):
|
|
165 |
# Proceed to use 'data' for generating examples
|
166 |
yield key, {
|
167 |
"image_id": data["image_id"],
|
168 |
-
"
|
169 |
"width": data["width"],
|
170 |
"height": data["height"],
|
171 |
"objects": data["objects"],
|
@@ -173,30 +163,3 @@ class BoatDataset(datasets.GeneratorBasedBuilder):
|
|
173 |
except json.JSONDecodeError:
|
174 |
print(f"Skipping invalid JSON at line {key + 1}: {row}")
|
175 |
continue
|
176 |
-
|
177 |
-
# def _generate_examples(self, filepath, split):
|
178 |
-
# # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
179 |
-
# # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
180 |
-
# with open(filepath, encoding="utf-8") as f:
|
181 |
-
# for key, row in enumerate(f, 1): # Start enumeration at 1 for line numbers
|
182 |
-
# try:
|
183 |
-
# data = json.loads(row)
|
184 |
-
# # Your existing processing logic here
|
185 |
-
# except json.JSONDecodeError:
|
186 |
-
# print(f"Skipping invalid JSON at line {key}")
|
187 |
-
# continue
|
188 |
-
# # for key, row in enumerate(f):
|
189 |
-
# # data = json.loads(row)
|
190 |
-
|
191 |
-
# yield key, {
|
192 |
-
# "image_id": data["image_id"],
|
193 |
-
# "file_name": data["file_name"],
|
194 |
-
# "width": data["width"],
|
195 |
-
# "height": data["height"],
|
196 |
-
# "objects": {
|
197 |
-
# "id": data["objects"]["id"],
|
198 |
-
# "area": data["objects"]["area"],
|
199 |
-
# "bbox": data["objects"]["bbox"],
|
200 |
-
# "category": data["objects"]["category"],
|
201 |
-
# },
|
202 |
-
# }
|
|
|
1 |
+
# Source: https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py
|
2 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
#
|
4 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
49 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
50 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
51 |
_URLS = {
|
52 |
+
"Boat_dataset": "ftp://arg.lab.nycu.edu.tw/arg-projectfile-download/detr/dataset/annotations.zip",
|
53 |
}
|
54 |
|
55 |
|
|
|
83 |
features=datasets.Features({
|
84 |
'image_id': datasets.Value('int32'),
|
85 |
# 'image': datasets.Image(), # This is commented out because you can't directly store PIL images in the dataset.
|
86 |
+
'image_path': datasets.Value('string'), # Store the path to the image file instead.
|
87 |
'width': datasets.Value('int32'),
|
88 |
'height': datasets.Value('int32'),
|
89 |
'objects': datasets.Features({
|
|
|
93 |
'category': datasets.Sequence(datasets.Value('int32'))
|
94 |
}),
|
95 |
})
|
|
|
|
|
96 |
|
97 |
return datasets.DatasetInfo(
|
98 |
# This is the description that will appear on the datasets page.
|
|
|
119 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
120 |
urls = _URLS[self.config.name]
|
121 |
data_dir = dl_manager.download_and_extract(urls)
|
|
|
122 |
return [
|
123 |
datasets.SplitGenerator(
|
124 |
name=datasets.Split.TRAIN,
|
|
|
144 |
"split": "val_real",
|
145 |
},
|
146 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
]
|
148 |
|
149 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
|
|
155 |
# Proceed to use 'data' for generating examples
|
156 |
yield key, {
|
157 |
"image_id": data["image_id"],
|
158 |
+
"image_path": data["image_path"],
|
159 |
"width": data["width"],
|
160 |
"height": data["height"],
|
161 |
"objects": data["objects"],
|
|
|
163 |
except json.JSONDecodeError:
|
164 |
print(f"Skipping invalid JSON at line {key + 1}: {row}")
|
165 |
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|