File size: 4,549 Bytes
2271465 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
import io
import datasets
import pandas as pd
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {selfies_and_id},
author = {TrainingDataPro},
year = {2023}
}
"""
_DESCRIPTION = """\
4083 sets, which includes 2 photos of a person from his documents and
13 selfies. 571 sets of Hispanics and 3512 sets of Caucasians.
Photo documents contains only a photo of a person.
All personal information from the document is hidden.
"""
_NAME = 'selfies_and_id'
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
_LICENSE = ""
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
class SelfiesAndId(datasets.GeneratorBasedBuilder):
"""Small sample of image-text pairs"""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
'id_1': datasets.Image(),
'id_2': datasets.Image(),
'selfie_1': datasets.Image(),
'selfie_2': datasets.Image(),
'selfie_3': datasets.Image(),
'selfie_4': datasets.Image(),
'selfie_5': datasets.Image(),
'selfie_6': datasets.Image(),
'selfie_7': datasets.Image(),
'selfie_8': datasets.Image(),
'selfie_9': datasets.Image(),
'selfie_10': datasets.Image(),
'selfie_11': datasets.Image(),
'selfie_12': datasets.Image(),
'selfie_13': datasets.Image(),
'user_id': datasets.Value('string'),
'set_id': datasets.Value('string'),
'user_race': datasets.Value('string'),
'name': datasets.Value('string'),
'age': datasets.Value('int8'),
'country': datasets.Value('string'),
'gender': datasets.Value('string')
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
images = dl_manager.download(f"{_DATA}images.tar.gz")
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
images = dl_manager.iter_archive(images)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={
"images": images,
'annotations': annotations
}),
]
def _generate_examples(self, images, annotations):
annotations_df = pd.read_csv(annotations, sep=';')
images_data = pd.DataFrame(columns=['URL', 'Bytes'])
for idx, (image_path, image) in enumerate(images):
images_data.loc[idx] = {'URL': image_path, 'Bytes': image.read()}
annotations_df = pd.merge(annotations_df,
images_data,
how='left',
on=['URL'])
for idx, worker_id in enumerate(pd.unique(annotations_df['UserId'])):
annotation = annotations_df.loc[annotations_df['UserId'] ==
worker_id]
annotation = annotation.sort_values(['FName'])
data = {
row[5].lower(): {
'path': row[6],
'bytes': row[10]
} for row in annotation.itertuples()
}
age = annotation.loc[annotation['FName'] ==
'ID_1']['Age'].values[0]
country = annotation.loc[annotation['FName'] ==
'ID_1']['Country'].values[0]
gender = annotation.loc[annotation['FName'] ==
'ID_1']['Gender'].values[0]
set_id = annotation.loc[annotation['FName'] ==
'ID_1']['SetId'].values[0]
user_race = annotation.loc[annotation['FName'] ==
'ID_1']['UserRace'].values[0]
name = annotation.loc[annotation['FName'] ==
'ID_1']['Name'].values[0]
data['user_id'] = worker_id
data['age'] = age
data['country'] = country
data['gender'] = gender
data['set_id'] = set_id
data['user_race'] = user_race
data['name'] = name
yield idx, data
|