fix: image loading
Browse files- README.md +3 -3
- facial_keypoint_detection.py +47 -28
README.md
CHANGED
@@ -19,10 +19,10 @@ dataset_info:
|
|
19 |
dtype: string
|
20 |
splits:
|
21 |
- name: train
|
22 |
-
num_bytes:
|
23 |
num_examples: 15
|
24 |
-
download_size:
|
25 |
-
dataset_size:
|
26 |
---
|
27 |
# Facial Keypoints
|
28 |
The dataset is designed for computer vision and machine learning tasks involving the identification and analysis of key points on a human face. It consists of images of human faces, each accompanied by key point annotations in XML format.
|
|
|
19 |
dtype: string
|
20 |
splits:
|
21 |
- name: train
|
22 |
+
num_bytes: 134736982
|
23 |
num_examples: 15
|
24 |
+
download_size: 129724970
|
25 |
+
dataset_size: 134736982
|
26 |
---
|
27 |
# Facial Keypoints
|
28 |
The dataset is designed for computer vision and machine learning tasks involving the identification and analysis of key points on a human face. It consists of images of human faces, each accompanied by key point annotations in XML format.
|
facial_keypoint_detection.py
CHANGED
@@ -103,11 +103,15 @@ class FacialKeypointDetection(datasets.GeneratorBasedBuilder):
|
|
103 |
license=_LICENSE)
|
104 |
|
105 |
def _split_generators(self, dl_manager):
|
106 |
-
images = dl_manager.download_and_extract(f"{_DATA}images.zip")
|
107 |
-
masks = dl_manager.download_and_extract(f"{_DATA}masks.zip")
|
|
|
|
|
108 |
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
|
109 |
-
images = dl_manager.iter_files(images)
|
110 |
-
masks = dl_manager.iter_files(masks)
|
|
|
|
|
111 |
|
112 |
return [
|
113 |
datasets.SplitGenerator(name=datasets.Split.TRAIN,
|
@@ -120,29 +124,44 @@ class FacialKeypointDetection(datasets.GeneratorBasedBuilder):
|
|
120 |
|
121 |
def _generate_examples(self, images, masks, annotations):
|
122 |
annotations_df = pd.read_csv(annotations, sep=',')
|
123 |
-
|
124 |
-
|
125 |
-
for idx, (image_path, mask_path) in enumerate(zip(images, masks)):
|
126 |
-
images_data.loc[idx] = {
|
127 |
-
'image_name': image_path.split('/')[-1],
|
128 |
-
'image_path': image_path,
|
129 |
-
'mask_path': mask_path
|
130 |
-
}
|
131 |
-
|
132 |
-
annotations_df = pd.merge(annotations_df,
|
133 |
-
images_data,
|
134 |
-
how='left',
|
135 |
-
on=['image_name'])
|
136 |
-
|
137 |
-
annotations_df[['image_path', 'mask_path'
|
138 |
-
]] = annotations_df[['image_path',
|
139 |
-
'mask_path']].astype('string')
|
140 |
-
|
141 |
-
for row in annotations_df.sort_values(['image_name'
|
142 |
-
]).itertuples(index=False):
|
143 |
yield idx, {
|
144 |
-
'image_id':
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
license=_LICENSE)
|
104 |
|
105 |
def _split_generators(self, dl_manager):
|
106 |
+
# images = dl_manager.download_and_extract(f"{_DATA}images.zip")
|
107 |
+
# masks = dl_manager.download_and_extract(f"{_DATA}masks.zip")
|
108 |
+
images = dl_manager.download(f"{_DATA}images.tar.gz")
|
109 |
+
masks = dl_manager.download(f"{_DATA}masks.tar.gz")
|
110 |
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
|
111 |
+
# images = dl_manager.iter_files(images)
|
112 |
+
# masks = dl_manager.iter_files(masks)
|
113 |
+
images = dl_manager.iter_archive(images)
|
114 |
+
masks = dl_manager.iter_archive(masks)
|
115 |
|
116 |
return [
|
117 |
datasets.SplitGenerator(name=datasets.Split.TRAIN,
|
|
|
124 |
|
125 |
def _generate_examples(self, images, masks, annotations):
|
126 |
annotations_df = pd.read_csv(annotations, sep=',')
|
127 |
+
for idx, ((image_path, image),
|
128 |
+
(mask_path, mask)) in enumerate(zip(images, masks)):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
yield idx, {
|
130 |
+
'image_id': annotations_df['image_id'].iloc[idx],
|
131 |
+
"image": {
|
132 |
+
"path": image_path,
|
133 |
+
"bytes": image.read()
|
134 |
+
},
|
135 |
+
"mask": {
|
136 |
+
"path": mask_path,
|
137 |
+
"bytes": mask.read()
|
138 |
+
},
|
139 |
+
'key_points': annotations_df['key_points'].iloc[idx]
|
140 |
}
|
141 |
+
# images_data = pd.DataFrame(
|
142 |
+
# columns=['image_name', 'image_path', 'mask_path'])
|
143 |
+
# for idx, ((image_path, image),
|
144 |
+
# (mask_path, mask)) in enumerate(zip(images, masks)):
|
145 |
+
# images_data.loc[idx] = {
|
146 |
+
# 'image_name': image_path.split('/')[-1],
|
147 |
+
# 'image_path': image_path,
|
148 |
+
# 'mask_path': mask_path
|
149 |
+
# }
|
150 |
+
|
151 |
+
# annotations_df = pd.merge(annotations_df,
|
152 |
+
# images_data,
|
153 |
+
# how='left',
|
154 |
+
# on=['image_name'])
|
155 |
+
|
156 |
+
# annotations_df[['image_path', 'mask_path'
|
157 |
+
# ]] = annotations_df[['image_path',
|
158 |
+
# 'mask_path']].astype('string')
|
159 |
+
|
160 |
+
# for row in annotations_df.sort_values(['image_name'
|
161 |
+
# ]).itertuples(index=False):
|
162 |
+
# yield idx, {
|
163 |
+
# 'image_id': row[0],
|
164 |
+
# 'image': row[3],
|
165 |
+
# 'mask': row[4],
|
166 |
+
# 'key_points': row[2]
|
167 |
+
# }
|