File size: 4,099 Bytes
faa7f03
 
6e4e1db
faa7f03
 
 
 
 
 
d80bb5f
 
 
54a2f31
 
 
 
 
6e4e1db
 
 
 
 
 
 
 
76bca50
6e4e1db
76bca50
6e4e1db
 
 
 
 
8d467db
 
 
 
faa7f03
 
 
 
 
b5d41b4
 
6e4e1db
 
 
 
4bb5a2a
 
 
 
3f6cc20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c56dd13
3f6cc20
 
 
 
 
 
 
3eca468
3f6cc20
49e4d8c
3f6cc20
3eca468
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f6cc20
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
---
dataset_info:
- config_name: default
  features:
  - name: utterance
    dtype: string
  - name: label
    sequence: int64
  splits:
  - name: train
    num_bytes: 1074443.008463079
    num_examples: 12384
  - name: test
    num_bytes: 268523.991536921
    num_examples: 3095
  download_size: 300800
  dataset_size: 1342967.0
- config_name: intents
  features:
  - name: id
    dtype: int64
  - name: name
    dtype: string
  - name: tags
    sequence: 'null'
  - name: regex_full_match
    sequence: 'null'
  - name: regex_partial_match
    sequence: 'null'
  - name: description
    dtype: 'null'
  splits:
  - name: intents
    num_bytes: 207
    num_examples: 7
  download_size: 2996
  dataset_size: 207
configs:
- config_name: default
  data_files:
  - split: train
    path: data/train-*
  - split: test
    path: data/test-*
- config_name: intents
  data_files:
  - split: intents
    path: intents/intents-*
task_categories:
- text-classification
language:
- en
---

# dstc3

This is a text classification dataset. It is intended for machine learning research and experimentation.

This dataset is obtained via formatting another publicly available data to be compatible with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html).

## Usage

It is intended to be used with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html):

```python
from autointent import Dataset

dstc3 = Dataset.from_hub("AutoIntent/dstc3")
```

## Source

This dataset is taken from `marcel-gohsen/dstc3` and formatted with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html):

```python
import datasets
from autointent import Dataset
from autointent.context.data_handler import split_dataset


def extract_intent_info(ds: datasets.Dataset) -> list[str]:
    ds = ds.filter(lambda example: example["transcript"] != "")
    intent_names = sorted(
        set(name for intents in ds["intent"] for name in intents)
    )
    intent_names.remove("reqmore")
    ds.filter(lambda example: "reqmore" in example["intent"])
    return intent_names

def parse(ds: datasets.Dataset, intent_names: list[str]):
    def transform(example: dict):
        return {
            "utterance": example["transcript"],
            "label": [int(name in example["intent"]) for name in intent_names],
        }
    return ds.map(
        transform, remove_columns=ds.features.keys()
    )

def calc_fractions(ds: datasets.Dataset, intent_names: list[str]) -> list[float]:
    res = [0] * len(intent_names)
    for sample in ds:
        for i, indicator in enumerate(sample["label"]):
            res[i] += indicator
    for i in range(len(intent_names)):
        res[i] /= len(ds)
    return res

def remove_low_resource_classes(ds: datasets.Dataset, intent_names: list[str], fraction_thresh: float = 0.01) -> tuple[list[dict], list[str]]:
    remove_or_not = [(frac < fraction_thresh) for frac in calc_fractions(ds, intent_names)]
    intent_names = [name for i, name in enumerate(intent_names) if not remove_or_not[i]]
    res = []
    for sample in ds:
        if sum(sample["label"]) == 1 and remove_or_not[sample["label"].index(1)]:
            continue
        sample["label"] = [
            indicator for indicator, low_resource in
            zip(sample["label"], remove_or_not, strict=True) if not low_resource
        ]
        res.append(sample)
    return res, intent_names

def remove_oos(ds: datasets.Dataset):
    return ds.filter(lambda sample: sum(sample["label"]) != 0)


if __name__ == "__main__":
    dstc3 = datasets.load_dataset("marcel-gohsen/dstc3")
    
    intent_names = extract_intent_info(dstc3["test"])
    parsed = parse(dstc3["test"], intent_names)
    filtered, intent_names = remove_low_resource_classes(remove_oos(parsed), intent_names)
    intents = [{"id": i, "name": name} for i, name in enumerate(intent_names)]
    dstc_final = Dataset.from_dict({"intents": intents, "train": filtered})
    dstc_final["train"], dstc_final["test"] = split_dataset(
        dstc_final, split="train", test_size=0.2, random_seed=42
    )
```