metadata
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
dataset_info:
features:
- name: utterance_ID
dtype: int64
- name: text
dtype: string
- name: speaker
dtype: string
- name: emotion
dtype: string
- name: video_name
dtype: string
splits:
- name: train
num_bytes: 1198989.1453851238
num_examples: 12529
- name: test
num_bytes: 104309.85461487627
num_examples: 1090
download_size: 614184
dataset_size: 1303299
Dataset Card for "SemEval_traindata_emotions"
Как был получен
from datasets import load_dataset
import datasets
from torchvision.io import read_video
import json
import torch
import os
from torch.utils.data import Dataset, DataLoader
import tqdm
dataset_path = "./SemEval-2024_Task3/training_data/Subtask_2_train.json"
dataset = json.loads(open(dataset_path).read())
print(len(dataset))
all_conversations = []
for item in dataset:
all_conversations.extend(item["conversation"])
print(len(all_conversations))
all_data = datasets.Dataset.from_list(all_conversations)
all_data = all_data.train_test_split(
test_size=0.08,
seed=42,
)
all_data.push_to_hub(
"dim/SemEval_training_data_emotions",
token=open("./hf_token").read(),
)