File size: 6,077 Bytes
87f18a9 36fe124 87f18a9 36fe124 87f18a9 36fe124 87f18a9 36fe124 87f18a9 36fe124 87f18a9 36fe124 87f18a9 36fe124 87f18a9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datasets
import pandas as pd
from datasets import ClassLabel
import os
"""The Audio, Speech, and Vision Processing Lab - Emotional Sound Database (ASVP - ESD)"""
_HOMEPAGE = "https://affective-meld.github.io/"
_CITATION = """\
@article{poria2018meld,
title={Meld: A multimodal multi-party dataset for emotion recognition in conversations},
author={Poria, Soujanya and Hazarika, Devamanyu and Majumder, Navonil and Naik, Gautam and Cambria, Erik and Mihalcea, Rada},
journal={arXiv preprint arXiv:1810.02508},
year={2018}
}
@article{chen2018emotionlines,
title={Emotionlines: An emotion corpus of multi-party conversations},
author={Chen, Sheng-Yeh and Hsu, Chao-Chun and Kuo, Chuan-Chun and Ku, Lun-Wei and others},
journal={arXiv preprint arXiv:1802.08379},
year={2018}
}
"""
_DESCRIPTION = """\
Multimodal EmotionLines Dataset (MELD) has been created by enhancing and extending EmotionLines dataset.
MELD contains the same dialogue instances available in EmotionLines, but it also encompasses audio and
visual modality along with text. MELD has more than 1400 dialogues and 13000 utterances from Friends TV series.
Multiple speakers participated in the dialogues. Each utterance in a dialogue has been labeled by any of these
seven emotions -- Anger, Disgust, Sadness, Joy, Neutral, Surprise and Fear. MELD also has sentiment (positive,
negative and neutral) annotation for each utterance.
This dataset is modified from https://huggingface.co/datasets/zrr1999/MELD_Text_Audio.
The audio is extracted from MELD mp4 files while the audio only has one channel with sample rate 16khz.
"""
_LICENSE = "gpl-3.0"
class MELD_Audio(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("0.0.1")
BUILDER_CONFIGS = [ # noqa: RUF012
datasets.BuilderConfig(name="MELD_Audio", version=VERSION, description="MELD audio"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"path": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16000),
"emotion": ClassLabel(names=["neutral", "joy", "sadness", "anger", "fear", "disgust", "surprise"]),
"sentiment": ClassLabel(names=["neutral", "positive", "negative"]),
}
),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
metadata_dir: dict[str, str] = dl_manager.download_and_extract(
{"train": "train.csv", "dev": "dev.csv", "test": "test.csv"}
) # type: ignore # noqa: PGH003
data_path: dict[str, str] = dl_manager.download(
{
"train": "archive/train.tar.gz",
"dev": "archive/dev.tar.gz",
"test": "archive/test.tar.gz",
}
) # type: ignore # noqa: PGH003
path_to_clips = "MELD_Audio"
local_extracted_archive: dict[str, str] = (
dl_manager.extract(data_path)
if not dl_manager.is_streaming
else {
"train": None,
"dev": None,
"test": None,
}
) # type: ignore # noqa: PGH003
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, # type: ignore # noqa: PGH003
gen_kwargs={
"filepath": metadata_dir["train"],
"split": "train",
"local_extracted_archive": local_extracted_archive["train"],
"audio_files": dl_manager.iter_archive(data_path["train"]),
"path_to_clips": path_to_clips,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, # type: ignore # noqa: PGH003
gen_kwargs={
"filepath": metadata_dir["dev"],
"split": "dev",
"local_extracted_archive": local_extracted_archive["dev"],
"audio_files": dl_manager.iter_archive(data_path["dev"]),
"path_to_clips": path_to_clips,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST, # type: ignore # noqa: PGH003
gen_kwargs={
"filepath": metadata_dir["test"],
"split": "test",
"local_extracted_archive": local_extracted_archive["test"],
"audio_files": dl_manager.iter_archive(data_path["test"]),
"path_to_clips": path_to_clips,
},
),
]
def _generate_examples(self, filepath, split, local_extracted_archive, audio_files, path_to_clips):
"""Yields examples."""
metadata_df = pd.read_csv(filepath, sep=",", index_col=0, header=0)
metadata = {}
for _, row in metadata_df.iterrows():
id_ = f"dia{row['Dialogue_ID']}_utt{row['Utterance_ID']}"
audio_path = f"{split}/{id_}.flac"
metadata[audio_path] = row
id_ = 0
for path, f in audio_files:
if path in metadata:
row = metadata[path]
path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
audio = {"path": path, bytes: f.read()}
yield (
id_,
{
"text": row["Utterance"],
"path": path,
"audio": audio,
"emotion": row["Emotion"],
"sentiment": row["Sentiment"],
},
)
id_ += 1
|