File size: 4,726 Bytes
a6326c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
from utils import parse, read_json_file, write_jsonl_file
import os

def get_prefix_lengths(lengths):
    prefix_lengths = [0]
    for length in lengths:
        prefix_lengths.append(prefix_lengths[-1] + length)

    return prefix_lengths


def custom_join(tokens, start, end):
    joined_str = ""
    for i in range(start, end):
        joined_str += tokens[i]
        if (
            i == end - 1
            or tokens[i + 1] in ["-", ","]
            or tokens[i + 1].startswith("'")
            or tokens[i] == "-"
        ):
            continue
        joined_str += " "

    if joined_str.count('"') > 1:
        start = joined_str.index('"')
        end = joined_str[start + 1 :].index('"')
        joined_str = (
            joined_str[: start + 1]
            + joined_str[start + 1 :][:end].strip()
            + joined_str[start + 1 :][end:]
        )

    return joined_str


def parse_characters(utterance, tokens, character_entities):
    utterance_char_idx = 0
    characters = []
    for sent_idx, sent in enumerate(character_entities):
        prefix_lengths = get_prefix_lengths(map(len, tokens[sent_idx]))
        for character in sent:
            span = ", ".join(character[2:])
            scan_length = prefix_lengths[character[0]]
            start = utterance_char_idx

            while scan_length >= 0 and start < len(utterance):
                if scan_length == 0 and utterance[start] != " ":
                    break

                if utterance[start] != " ":
                    scan_length -= 1

                start += 1

            scan_length = prefix_lengths[character[1]] - prefix_lengths[character[0]]
            end = start

            while scan_length > 0 and end < len(utterance):
                if utterance[end] != " ":
                    scan_length -= 1

                end += 1

            characters.append({"value": span, "start": start, "end": end})
            # print(utterance)
            # print(tokens)
            # print(character_entities)
            # print(
            #     utterance_char_idx,
            #     start,
            #     end,
            #     utterance[start:end],
            #     tokens[sent_idx][character[0] : character[1]],
            #     custom_join(tokens[sent_idx], character[0], character[1]),
            # )

            if utterance[start:end] == "Emily- noooo":
                continue

            assert utterance[start:end] == custom_join(
                tokens[sent_idx], character[0], character[1]
            )

        # update utterance char idx
        scan_length = prefix_lengths[-1]
        while scan_length >= 0 and utterance_char_idx < len(utterance):
            if scan_length == 0 and utterance[utterance_char_idx] != " ":
                break

            if utterance[utterance_char_idx] != " ":
                scan_length -= 1

            utterance_char_idx += 1

    return characters


def preprocess(args, split):
    input_file = os.path.join(args.input_dir, f"character-identification-{split}.json")

    if split == "trn":
        split = "train"
    elif split == "tst":
        split = "test"

    output_file = os.path.join(args.output_dir, f"{split}.jsonl")

    episodes = read_json_file(input_file)["episodes"]

    processed_data = []
    for episode in episodes:
        scenes = episode["scenes"]
        for scene in scenes:
            utterances = scene["utterances"]
            dialog = {
                "turn": "multi",
                "locale": "en",
                "dialog": [],
            }

            roles = ["#GENERAL#", "#OTHER#", "#ALL#"]
            # labels = set()
            for example in utterances:
                utterance = example["transcript"]
                tokens = example["tokens"]
                character_entities = example["character_entities"]

                characters = parse_characters(utterance, tokens, character_entities)
                dialog["dialog"].append(
                    {
                        "roles": example["speakers"],
                        "utterance": utterance,
                        "characters": characters,
                    }
                )

                roles += example["speakers"]

                # for character in characters:
                #     labels.add(character["value"])

            # assert not len(labels - set(roles)), f"\n{sorted(labels)} \n {sorted(set(roles))}"
            dialog["knowledge"] = {"type": "lsit", "value": sorted(roles)}
            processed_data.append(dialog)

    write_jsonl_file(processed_data, output_file)


if __name__ == "__main__":
    args = parse()
    preprocess(args, "trn")
    preprocess(args, "dev")
    preprocess(args, "tst")