zhuchi76 commited on
Commit
26b559b
·
verified ·
1 Parent(s): 4c82f72

Update script to hub

Browse files
Files changed (1) hide show
  1. Boat_dataset.py +9 -23
Boat_dataset.py CHANGED
@@ -23,13 +23,10 @@ _HOMEPAGE = "https://huggingface.co/datasets/zhuchi76/Boat_dataset/resolve/main"
23
  _LICENSE = ""
24
 
25
  _URLS = {
26
- "images": f"{_HOMEPAGE}/data/images.tar.gz",
27
  "classes": f"{_HOMEPAGE}/data/classes.txt",
28
- "anno": {
29
- "train": f"{_HOMEPAGE}/data/instances_train2023.jsonl",
30
- "val": f"{_HOMEPAGE}/data/instances_val2023.jsonl",
31
- "test": f"{_HOMEPAGE}/data/instances_val2023r.jsonl"
32
- },
33
  }
34
 
35
  class BoatDataset(datasets.GeneratorBasedBuilder):
@@ -66,21 +63,15 @@ class BoatDataset(datasets.GeneratorBasedBuilder):
66
  # Download all files and extract them
67
  downloaded_files = dl_manager.download_and_extract(_URLS)
68
 
69
- # Extract the image archive
70
- image_dir = dl_manager.extract(downloaded_files["images"])
71
- classes_file_path = downloaded_files["classes"]
72
- annotations_paths = downloaded_files["anno"]
73
-
74
  # Load class labels from the classes file
75
- with open(classes_file_path, 'r') as file:
76
  classes = [line.strip() for line in file.readlines()]
77
 
78
  return [
79
  datasets.SplitGenerator(
80
  name=datasets.Split.TRAIN,
81
  gen_kwargs={
82
- "image_dir": image_dir,
83
- "annotations_file": annotations_paths["train"],
84
  "classes": classes,
85
  "split": "train",
86
  }
@@ -88,8 +79,7 @@ class BoatDataset(datasets.GeneratorBasedBuilder):
88
  datasets.SplitGenerator(
89
  name=datasets.Split.VALIDATION,
90
  gen_kwargs={
91
- "image_dir": image_dir,
92
- "annotations_file": annotations_paths["val"],
93
  "classes": classes,
94
  "split": "val",
95
  }
@@ -97,26 +87,22 @@ class BoatDataset(datasets.GeneratorBasedBuilder):
97
  datasets.SplitGenerator(
98
  name=datasets.Split.TEST,
99
  gen_kwargs={
100
- "image_dir": image_dir,
101
- "annotations_file": annotations_paths["test"],
102
  "classes": classes,
103
  "split": "val_real",
104
  }
105
  ),
106
  ]
107
 
108
- def _generate_examples(self, image_dir, annotations_file, classes, split):
109
  # Process annotations
110
  with open(annotations_file, encoding="utf-8") as f:
111
  for key, row in enumerate(f):
112
  try:
113
  data = json.loads(row.strip())
114
- file_path = os.path.join(image_dir, data["file_name"])
115
- if not os.path.isfile(file_path):
116
- continue # Skip if file is not found in the directory
117
  yield key, {
118
  "image_id": data["image_id"],
119
- "image_path": file_path, # Provide the full path to the image
120
  "width": data["width"],
121
  "height": data["height"],
122
  "objects": {
 
23
  _LICENSE = ""
24
 
25
  _URLS = {
 
26
  "classes": f"{_HOMEPAGE}/data/classes.txt",
27
+ "train": f"{_HOMEPAGE}/data/instances_train2023.jsonl",
28
+ "val": f"{_HOMEPAGE}/data/instances_val2023.jsonl",
29
+ "test": f"{_HOMEPAGE}/data/instances_val2023r.jsonl"
 
 
30
  }
31
 
32
  class BoatDataset(datasets.GeneratorBasedBuilder):
 
63
  # Download all files and extract them
64
  downloaded_files = dl_manager.download_and_extract(_URLS)
65
 
 
 
 
 
 
66
  # Load class labels from the classes file
67
+ with open(downloaded_files['classes'], 'r') as file:
68
  classes = [line.strip() for line in file.readlines()]
69
 
70
  return [
71
  datasets.SplitGenerator(
72
  name=datasets.Split.TRAIN,
73
  gen_kwargs={
74
+ "annotations_file": downloaded_files["train"],
 
75
  "classes": classes,
76
  "split": "train",
77
  }
 
79
  datasets.SplitGenerator(
80
  name=datasets.Split.VALIDATION,
81
  gen_kwargs={
82
+ "annotations_file": downloaded_files["val"],
 
83
  "classes": classes,
84
  "split": "val",
85
  }
 
87
  datasets.SplitGenerator(
88
  name=datasets.Split.TEST,
89
  gen_kwargs={
90
+ "annotations_file": downloaded_files["test"],
 
91
  "classes": classes,
92
  "split": "val_real",
93
  }
94
  ),
95
  ]
96
 
97
+ def _generate_examples(self, annotations_file, classes, split):
98
  # Process annotations
99
  with open(annotations_file, encoding="utf-8") as f:
100
  for key, row in enumerate(f):
101
  try:
102
  data = json.loads(row.strip())
 
 
 
103
  yield key, {
104
  "image_id": data["image_id"],
105
+ "image_path": data["image_path"],
106
  "width": data["width"],
107
  "height": data["height"],
108
  "objects": {