|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
import gzip |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_URLS = "https://huggingface.co/datasets/khalidalt/subscene/resolve/main/{Lang}/{Lang}_subscene_{split}{index}.json.gz" |
|
|
|
_N_FILES_PER_SPLIT = { |
|
'arabic': {'train':33 }, |
|
'english': {'train': 82}, |
|
} |
|
|
|
_LangID = ['arabic', 'english'] |
|
|
|
|
|
class SubsceneConfig(datasets.BuilderConfig): |
|
""" Builder config for Subscene Dataset. """ |
|
|
|
def __init__(self, subset, **kwargs): |
|
super(SubsceneConfig, self).__init__(**kwargs) |
|
|
|
if subset !="all": |
|
|
|
self.subset = [subset] |
|
else: |
|
self.subset = _LangID |
|
|
|
class Subscene(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
BUILDER_CONFIGS_CLASS = SubsceneConfig |
|
BUILDER_CONFIGS = [ |
|
SubsceneConfig(name=subset, |
|
subset=subset, |
|
version=datasets.Version("1.1.0", ""), |
|
description='') |
|
for subset in _LangID |
|
] |
|
|
|
|
|
def _info(self): |
|
|
|
|
|
features = datasets.Features( |
|
{ |
|
"subtitle_name": datasets.Value("string"), |
|
"file_name": datasets.Value("string"), |
|
"transcript": datasets.Value("string"), |
|
} |
|
) |
|
|
|
|
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
data_urls = {} |
|
for split in ['train']: |
|
|
|
|
|
data_urls[split] = [ |
|
_URLS.format( |
|
Lang = subset, |
|
split='validation' if split=='_val' else '', |
|
index = i, |
|
) |
|
for subset in self.config.subset |
|
for i in range(_N_FILES_PER_SPLIT[subset][split]) |
|
] |
|
|
|
train_downloaded_files = dl_manager.download(data_urls["train"]) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}), |
|
|
|
|
|
|
|
] |
|
|
|
|
|
|
|
def _generate_examples(self, filepaths): |
|
|
|
id_ = 0 |
|
for filepath in filepaths: |
|
with gzip.open(open(filepath,"rb"), "rt", encoding = "utf-8") as f: |
|
for row in f: |
|
if row: |
|
|
|
data = json.loads(row) |
|
yield id_, data |
|
id_ +=1 |
|
|