ShixuanAn commited on
Commit
62d6f17
1 Parent(s): 3396a5c

Update hugging_face.py

Browse files
Files changed (1) hide show
  1. hugging_face.py +63 -58
hugging_face.py CHANGED
@@ -36,9 +36,6 @@ _LICENSE = ""
36
  # TODO: Add link to the official dataset URLs here
37
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
38
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
39
- _URLS = {
40
- "dataset": "https://prod-dcd-datasets-cache-zipfiles.s3.eu-west-1.amazonaws.com/5ty2wb6gvg-1.zip"
41
- }
42
 
43
 
44
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
@@ -61,7 +58,7 @@ class RDD2020_Dataset(datasets.GeneratorBasedBuilder):
61
  "depth": datasets.Value("int32"),
62
  }),
63
  "image_path": datasets.Value("string"),
64
- "pics_array": datasets.Array3D(shape=(None, None, 3), dtype="uint8"),
65
  "crack_type": datasets.Sequence(datasets.Value("string")),
66
  "crack_coordinates": datasets.Sequence(datasets.Features({
67
  "x_min": datasets.Value("int32"),
@@ -75,79 +72,87 @@ class RDD2020_Dataset(datasets.GeneratorBasedBuilder):
75
  )
76
 
77
  def _split_generators(self, dl_manager):
78
- """This method downloads/extracts the data and defines the splits."""
79
- data_dir = dl_manager.download_and_extract(_URLS["dataset"])
 
 
 
 
 
 
 
 
80
 
81
  return [
82
  datasets.SplitGenerator(
83
  name=datasets.Split.TRAIN,
84
  gen_kwargs={
85
- "images_dir": os.path.join(data_dir, "train"),
86
- "annotations_dir": os.path.join(data_dir, "train", "annotations"),
87
  "split": "train",
88
  },
89
  ),
90
  datasets.SplitGenerator(
91
  name=datasets.Split.TEST,
92
  gen_kwargs={
93
- "images_dir": os.path.join(data_dir, "test1"),
94
- "annotations_dir": os.path.join(data_dir, "test1", "annotations"),
95
  "split": "test1",
96
  },
97
  ),
98
  datasets.SplitGenerator(
99
- name=datasets.Split.TEST,
100
  gen_kwargs={
101
- "images_dir": os.path.join(data_dir, "test2"),
102
- "annotations_dir": os.path.join(data_dir, "test2", "annotations"),
103
  "split": "test2",
104
  },
105
  ),
106
  ]
107
 
108
- def _generate_examples(self, images_dir, annotations_dir, split):
109
- """Yields examples as (key, example) tuples."""
110
- for image_file in os.listdir(images_dir):
111
- if not image_file.endswith('.jpg'):
112
- continue
113
- image_id = image_file.split('.')[0]
114
- annotation_file = image_id + '.xml'
115
- annotation_path = os.path.join(annotations_dir, annotation_file)
116
-
117
- if not os.path.exists(annotation_path):
118
- continue
119
-
120
- tree = ET.parse(annotation_path)
121
- root = tree.getroot()
122
-
123
- country = split.capitalize()
124
- image_path = os.path.join(images_dir, image_file)
125
- crack_type = []
126
- crack_coordinates = []
127
-
128
- for obj in root.findall('object'):
129
- crack_type.append(obj.find('name').text)
130
- bndbox = obj.find('bndbox')
131
- coordinates = {
132
- "x_min": int(bndbox.find('xmin').text),
133
- "x_max": int(bndbox.find('xmax').text),
134
- "y_min": int(bndbox.find('ymin').text),
135
- "y_max": int(bndbox.find('ymax').text),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  }
137
- crack_coordinates.append(coordinates)
138
-
139
- # Assuming images are of uniform size, you might want to adjust this or extract from image directly
140
- image_resolution = {"width": 600, "height": 600, "depth": 3} if country != "India" else {"width": 720,
141
- "height": 720,
142
- "depth": 3}
143
- yield image_id, {
144
- "image_id": image_id,
145
- "country": country,
146
- "type": split,
147
- "image_resolution": image_resolution,
148
- "image_path": image_path,
149
- "crack_type": crack_type,
150
- "crack_coordinates": crack_coordinates,
151
- }
152
-
153
-
 
36
  # TODO: Add link to the official dataset URLs here
37
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
38
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
 
 
 
39
 
40
 
41
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
 
58
  "depth": datasets.Value("int32"),
59
  }),
60
  "image_path": datasets.Value("string"),
61
+ #"pics_array": datasets.Array3D(shape=(None, None, 3), dtype="uint8"),
62
  "crack_type": datasets.Sequence(datasets.Value("string")),
63
  "crack_coordinates": datasets.Sequence(datasets.Features({
64
  "x_min": datasets.Value("int32"),
 
72
  )
73
 
74
  def _split_generators(self, dl_manager):
75
+ # The URL provided must be the direct link to the zip file
76
+ urls_to_download = {
77
+ "dataset": "https://huggingface.co/datasets/ShixuanAn/RDD2020/resolve/main/RDD2020.zip"
78
+ }
79
+
80
+ # Download and extract the dataset using the dl_manager
81
+ downloaded_files = dl_manager.download_and_extract(urls_to_download["dataset"])
82
+
83
+ # Assuming the ZIP file extracts to a folder named 'RDD2020'
84
+ extracted_path = os.path.join(downloaded_files, "RDD2020")
85
 
86
  return [
87
  datasets.SplitGenerator(
88
  name=datasets.Split.TRAIN,
89
  gen_kwargs={
90
+ "images_dir": os.path.join(extracted_path, "train", "images"),
91
+ "annotations_dir": os.path.join(extracted_path, "train", "annotations", "xmls"),
92
  "split": "train",
93
  },
94
  ),
95
  datasets.SplitGenerator(
96
  name=datasets.Split.TEST,
97
  gen_kwargs={
98
+ "images_dir": os.path.join(extracted_path, "test1", "images"),
99
+ "annotations_dir": None, # No annotations for test1
100
  "split": "test1",
101
  },
102
  ),
103
  datasets.SplitGenerator(
104
+ name=datasets.Split.VALIDATION,
105
  gen_kwargs={
106
+ "images_dir": os.path.join(extracted_path, "test2", "images"),
107
+ "annotations_dir": None, # No annotations for test2
108
  "split": "test2",
109
  },
110
  ),
111
  ]
112
 
113
+ def _generate_examples(self, base_path, split):
114
+ # Iterate over each country directory
115
+ for country_dir in ['Czech', 'India', 'Japan']:
116
+ images_dir = f"{extracted_path}/{country_dir}/images"
117
+ annotations_dir = f"{extracted_path}/{country_dir}/annotations/xmls" if split == "train" else None
118
+
119
+ # Iterate over each image in the country's image directory
120
+ for image_file in os.listdir(images_dir):
121
+ if not image_file.endswith('.jpg'):
122
+ continue
123
+
124
+ image_id = f"{country_dir}_{image_file.split('.')[0]}"
125
+ image_path = os.path.join(images_dir, image_file)
126
+ if annotations_dir:
127
+ annotation_file = image_id + '.xml'
128
+ annotation_path = os.path.join(annotations_dir, annotation_file)
129
+ if not os.path.exists(annotation_path):
130
+ continue
131
+ tree = ET.parse(annotation_path)
132
+ root = tree.getroot()
133
+ crack_type = []
134
+ crack_coordinates = []
135
+ for obj in root.findall('object'):
136
+ crack_type.append(obj.find('name').text)
137
+ bndbox = obj.find('bndbox')
138
+ coordinates = {
139
+ "x_min": int(bndbox.find('xmin').text),
140
+ "x_max": int(bndbox.find('xmax').text),
141
+ "y_min": int(bndbox.find('ymin').text),
142
+ "y_max": int(bndbox.find('ymax').text),
143
+ }
144
+ crack_coordinates.append(coordinates)
145
+ else:
146
+ crack_type = []
147
+ crack_coordinates = []
148
+
149
+ image_resolution = {"width": 600, "height": 600, "depth": 3}
150
+ yield image_id, {
151
+ "image_id": image_id,
152
+ "country": country_dir,
153
+ "type": split,
154
+ "image_resolution": image_resolution,
155
+ "image_path": image_path,
156
+ "crack_type": crack_type,
157
+ "crack_coordinates": crack_coordinates,
158
  }