cocoon-glosses / cocoon-gloss.py
taiqihe's picture
clean up code
9429217 verified
# coding=utf-8
# Copyright 2023 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cocoon datasets that contrain interlinear gloss"""
import os
import json
import datasets
_ALL_LANGS = [
"tvk",
"kke",
"ers",
"sxg",
"svm",
"kkt",
"nee",
"bej",
"say",
"ady",
"kab",
"ixc",
"rmn",
"swb",
"nxq",
"nru",
]
_ALL_CONFIGS = [*_ALL_LANGS, "all"]
_DESCRIPTION = ""
_CITATION = ""
_HOMEPAGE_URL = ""
_BASE_PATH = "data/{langs}/"
_DATA_URL = _BASE_PATH + "audio/{split}.tar.gz"
_META_URL = _BASE_PATH + "{split}.json"
class CocoonConfig(datasets.BuilderConfig):
def __init__(self, name, **kwargs):
super().__init__(name=name, version=datasets.Version("0.0.0", ""), **kwargs)
class Cocoon(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [CocoonConfig(name) for name in _ALL_CONFIGS]
def _info(self):
task_templates = None
langs = _ALL_CONFIGS
features = datasets.Features(
{
"id": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=16_000),
"transcription": datasets.Value("string"),
"language": datasets.Value("string"),
"speaker": datasets.Value("string"),
"surface": datasets.Value("string"),
"underlying": datasets.Value("string"),
"gloss": datasets.Value("string"),
"translation": datasets.Value("string"),
"url": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=("audio", "transcription"),
homepage=_HOMEPAGE_URL,
citation=_CITATION,
task_templates=None,
)
def _split_generators(self, dl_manager):
splits = ["train", "dev", "test"]
if self.config.name == "all":
data_urls = {
split: [
_DATA_URL.format(langs=langs, split=split) for langs in _ALL_LANGS
]
for split in splits
}
meta_urls = {
split: [
_META_URL.format(langs=langs, split=split) for langs in _ALL_LANGS
]
for split in splits
}
else:
data_urls = {
split: [_DATA_URL.format(langs=self.config.name, split=split)]
for split in splits
}
meta_urls = {
split: [_META_URL.format(langs=self.config.name, split=split)]
for split in splits
}
archive_paths = dl_manager.download(data_urls)
local_extracted_archives = (
dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
)
archive_iters = {
split: [dl_manager.iter_archive(path) for path in paths]
for split, paths in archive_paths.items()
}
meta_paths = dl_manager.download(meta_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"local_extracted_archives": local_extracted_archives.get(
"train", [None] * len(meta_paths.get("train"))
),
"archive_iters": archive_iters.get("train"),
"text_paths": meta_paths.get("train"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"local_extracted_archives": local_extracted_archives.get(
"dev", [None] * len(meta_paths.get("dev"))
),
"archive_iters": archive_iters.get("dev"),
"text_paths": meta_paths.get("dev"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"local_extracted_archives": local_extracted_archives.get(
"test", [None] * len(meta_paths.get("test"))
),
"archive_iters": archive_iters.get("test"),
"text_paths": meta_paths.get("test"),
},
),
]
def _generate_examples(self, local_extracted_archives, archive_iters, text_paths):
assert len(local_extracted_archives) == len(archive_iters) == len(text_paths)
key = 0
if self.config.name == "all":
langs = _ALL_LANGS
else:
langs = [self.config.name]
for archive, text_path, local_extracted_path, lang in zip(
archive_iters, text_paths, local_extracted_archives, langs
):
with open(text_path, encoding="utf-8") as fin:
data = json.load(fin)
for audio_path, audio_file in archive:
audio_filename = audio_path.split("/")[-1]
if audio_filename not in data:
continue
result = data[audio_filename]
extracted_audio_path = (
os.path.join(local_extracted_path, audio_filename)
if local_extracted_path is not None
else None
)
result["language"] = lang
result["audio"] = {"path": audio_path, "bytes": audio_file.read()}
yield key, result
key += 1