|
--- |
|
dataset_info: |
|
- config_name: default |
|
features: |
|
- name: utterance |
|
dtype: string |
|
- name: label |
|
sequence: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 1074443.008463079 |
|
num_examples: 12384 |
|
- name: test |
|
num_bytes: 268523.991536921 |
|
num_examples: 3095 |
|
download_size: 300800 |
|
dataset_size: 1342967.0 |
|
- config_name: intents |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: name |
|
dtype: string |
|
- name: tags |
|
sequence: 'null' |
|
- name: regex_full_match |
|
sequence: 'null' |
|
- name: regex_partial_match |
|
sequence: 'null' |
|
- name: description |
|
dtype: 'null' |
|
splits: |
|
- name: intents |
|
num_bytes: 207 |
|
num_examples: 7 |
|
download_size: 2996 |
|
dataset_size: 207 |
|
configs: |
|
- config_name: default |
|
data_files: |
|
- split: train |
|
path: data/train-* |
|
- split: test |
|
path: data/test-* |
|
- config_name: intents |
|
data_files: |
|
- split: intents |
|
path: intents/intents-* |
|
task_categories: |
|
- text-classification |
|
language: |
|
- en |
|
--- |
|
|
|
# dstc3 |
|
|
|
This is a text classification dataset. It is intended for machine learning research and experimentation. |
|
|
|
This dataset is obtained via formatting another publicly available data to be compatible with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html). |
|
|
|
## Usage |
|
|
|
It is intended to be used with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html): |
|
|
|
```python |
|
from autointent import Dataset |
|
|
|
dstc3 = Dataset.from_hub("AutoIntent/dstc3") |
|
``` |
|
|
|
## Source |
|
|
|
This dataset is taken from `marcel-gohsen/dstc3` and formatted with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html): |
|
|
|
```python |
|
import datasets |
|
from autointent import Dataset |
|
from autointent.context.data_handler import split_dataset |
|
|
|
|
|
def extract_intent_info(ds: datasets.Dataset) -> list[str]: |
|
ds = ds.filter(lambda example: example["transcript"] != "") |
|
intent_names = sorted( |
|
set(name for intents in ds["intent"] for name in intents) |
|
) |
|
intent_names.remove("reqmore") |
|
ds.filter(lambda example: "reqmore" in example["intent"]) |
|
return intent_names |
|
|
|
def parse(ds: datasets.Dataset, intent_names: list[str]): |
|
def transform(example: dict): |
|
return { |
|
"utterance": example["transcript"], |
|
"label": [int(name in example["intent"]) for name in intent_names], |
|
} |
|
return ds.map( |
|
transform, remove_columns=ds.features.keys() |
|
) |
|
|
|
def calc_fractions(ds: datasets.Dataset, intent_names: list[str]) -> list[float]: |
|
res = [0] * len(intent_names) |
|
for sample in ds: |
|
for i, indicator in enumerate(sample["label"]): |
|
res[i] += indicator |
|
for i in range(len(intent_names)): |
|
res[i] /= len(ds) |
|
return res |
|
|
|
def remove_low_resource_classes(ds: datasets.Dataset, intent_names: list[str], fraction_thresh: float = 0.01) -> tuple[list[dict], list[str]]: |
|
remove_or_not = [(frac < fraction_thresh) for frac in calc_fractions(ds, intent_names)] |
|
intent_names = [name for i, name in enumerate(intent_names) if not remove_or_not[i]] |
|
res = [] |
|
for sample in ds: |
|
if sum(sample["label"]) == 1 and remove_or_not[sample["label"].index(1)]: |
|
continue |
|
sample["label"] = [ |
|
indicator for indicator, low_resource in |
|
zip(sample["label"], remove_or_not, strict=True) if not low_resource |
|
] |
|
res.append(sample) |
|
return res, intent_names |
|
|
|
def remove_oos(ds: datasets.Dataset): |
|
return ds.filter(lambda sample: sum(sample["label"]) != 0) |
|
|
|
|
|
if __name__ == "__main__": |
|
dstc3 = datasets.load_dataset("marcel-gohsen/dstc3") |
|
|
|
intent_names = extract_intent_info(dstc3["test"]) |
|
parsed = parse(dstc3["test"], intent_names) |
|
filtered, intent_names = remove_low_resource_classes(remove_oos(parsed), intent_names) |
|
intents = [{"id": i, "name": name} for i, name in enumerate(intent_names)] |
|
dstc_final = Dataset.from_dict({"intents": intents, "train": filtered}) |
|
dstc_final["train"], dstc_final["test"] = split_dataset( |
|
dstc_final, split="train", test_size=0.2, random_seed=42 |
|
) |
|
``` |
|
|