Datasets:

Languages:
English
ArXiv:
License:
sesge / sesge.py
Gabi00's picture
Upload sesge.py
41c8b52 verified
raw
history blame
3.43 kB
import csv
import os
import json
import datasets
from datasets.utils.py_utils import size_str
from tqdm import tqdm
from scipy.io.wavfile import read, write
import io
#from .release_stats import STATS
_CITATION = """\
@inproceedings{demint2024,
author = {Pérez-Ortiz, Juan Antonio and
Esplà-Gomis, Miquel and
Sánchez-Cartagena, Víctor M. and
Sánchez-Martínez, Felipe and
Chernysh, Roman and
Mora-Rodríguez, Gabriel and
Berezhnoy, Lev},
title = {{DeMINT}: Automated Language Debriefing for English Learners via {AI}
Chatbot Analysis of Meeting Transcripts},
booktitle = {Proceedings of the 13th Workshop on NLP for Computer Assisted Language Learning},
month = october,
year = {2024},
url = {https://aclanthology.org/volumes/2024.nlp4call-1/},
}
"""
class SesgeConfig(datasets.BuilderConfig):
def __init__(self, name, version, **kwargs):
self.language = kwargs.pop("language", None)
self.release_date = kwargs.pop("release_date", None)
description = (
"A dataset containing English speech with grammatical errors, along with the corresponding transcriptions."
"Utterances are synthesized using a text-to-speech model, whereas the grammatically incorrect texts come from the C4_200M synthetic dataset."
)
super(SesgeConfig, self).__init__(
name=name,
**kwargs,
)
class Sesge():
BUILDER_CONFIGS = [
SesgeConfig(
name="sesge",
version=1.0,
language='eng',
release_date="2024-10-8",
)
]
def _info(self):
total_languages = 1
total_valid_hours = 1
description = (
"A dataset containing English speech with grammatical errors, along with the corresponding transcriptions."
"Utterances are synthesized using a text-to-speech model, whereas the grammatically incorrect texts come from the C4_200M synthetic dataset."
)
features = datasets.Features(
{
"audio": datasets.features.Audio(sampling_rate=48_000),
"sentence": datasets.Value("string"),
}
)
def _generate_examples(self, local_extracted_archive_paths, archives, meta_path, split):
archives = os.listdir(archives)
metadata = {}
with open(meta_path, encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter=";", quoting=csv.QUOTE_NONE)
for row in tqdm(reader):
metadata[row["file_name"]] = row
for i, path in enumerate(archives):
#for path, file in audio_archive:
_, filename = os.path.split(path)
file = os.path.join("data", split, filename)
if file in metadata:
result = dict(metadata[file])
print("Result: ", result)
with open(os.path.join(local_extracted_archive_paths, filename), 'rb') as wavfile:
input_wav = wavfile.read()
rate, data = read(io.BytesIO(input_wav))
path = os.path.join(local_extracted_archive_paths[i], path)
result["audio"] = {"path": path, "bytes": data}
result["path"] = path
yield path, result
else:
print("No file found")
yield None, None