File size: 13,201 Bytes
5caedb4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
import time
from unittest import mock
from unittest.mock import MagicMock

import pandas as pd
import pytest

from llm_studio.app_utils.default_datasets import (
    prepare_default_dataset_causal_language_modeling,
)
from llm_studio.src.datasets.conversation_chain_handler import ConversationChainHandler


@pytest.fixture
def df():
    return pd.DataFrame(
        {
            "id": ["id1", "id2", "id3", "id4", "x1", "x2", "x3", "x4"],
            "parent_id": ["None", "id1", "id2", "id3", "None", "x1", "x2", "x3"],
            "answer": [
                "answer1",
                "answer2",
                "answer3",
                "answer4",
                "a1",
                "a2",
                "a3",
                "a4",
            ],
            "system": [
                "system1",
                "system2",
                "system3",
                "system4",
                "s1",
                "s2",
                "s3",
                "s4",
            ],
            "prompt": [
                "prompt1",
                "prompt2",
                "prompt3",
                "prompt4",
                "p1",
                "p2",
                "p3",
                "p4",
            ],
        }
    )


@pytest.fixture
def df_short():
    return pd.DataFrame(
        {
            "id": ["id1", "id2", "id3", "id4"],
            "parent_id": ["None", "id1", "id2", "id3"],
            "answer": ["answer1", "answer2", "answer3", "answer4"],
            "system": ["system1", "system2", "system3", "system4"],
            "prompt": ["prompt1", "prompt2", "prompt3", "prompt4"],
        }
    )


@pytest.fixture
def cfg():
    cfg = MagicMock()
    cfg.dataset.parent_id_column = "parent_id"
    cfg.dataset.id_column = "id"
    cfg.dataset.system_column = "system"
    cfg.dataset.prompt_column = "prompt"
    cfg.dataset.answer_column = "answer"
    cfg.dataset.limit_chained_samples = True
    return cfg


def test_conversation_chain_handler(cfg, df):
    handler = ConversationChainHandler(df, cfg)

    assert len(handler) == 2, len(handler)

    data = handler[0]
    assert data == {
        "prompts": ["prompt1", "prompt2", "prompt3", "prompt4"],
        "answers": ["answer1", "answer2", "answer3", "answer4"],
        "systems": ["system1", "system2", "system3", "system4"],
    }

    data = handler[1]
    assert data == {
        "prompts": ["p1", "p2", "p3", "p4"],
        "answers": ["a1", "a2", "a3", "a4"],
        "systems": ["s1", "s2", "s3", "s4"],
    }


def test_chained_samples_disabled(df_short, cfg):
    cfg.dataset.limit_chained_samples = False
    cfg.dataset.parent_id_column = "None"

    handler = ConversationChainHandler(df_short, cfg)
    assert len(handler) == 4
    for i in range(4):
        assert handler[i] == {
            "prompts": [f"prompt{i + 1}"],
            "answers": [f"answer{i + 1}"],
            "systems": [f"system{i + 1}"],
        }


def test_incomplete_chained_samples(cfg, df_short):
    cfg.dataset.limit_chained_samples = False

    handler = ConversationChainHandler(df_short, cfg)
    assert handler.conversation_chain_ids == [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]]
    assert len(handler) == 4
    for i in range(4):
        assert handler[i] == {
            "prompts": [f"prompt{j + 1}" for j in range(i + 1)],
            "answers": [f"answer{j + 1}" for j in range(i + 1)],
            "systems": [f"system{j + 1}" for j in range(i + 1)],
        }


def test_get_conversation_ids():
    # test the get_conversation_ids method - normal case
    conv_ids = ConversationChainHandler.get_conversation_ids(
        {"id2": "id1", "id3": "id2", "id4": "id3"}, "id4"
    )
    assert conv_ids == ["id1", "id2", "id3", "id4"]

    # test the get_conversation_ids method - circular case, should raise ValueError
    with pytest.raises(ValueError):
        ConversationChainHandler.get_conversation_ids(
            {"id1": "id4", "id2": "id1", "id3": "id2", "id4": "id3"}, "id4"
        )


@pytest.fixture
def df_with_nan():
    # mapping is
    # a1 -> " " -> -inf -> 1234567890 -> "1234567890" -> "x1" -> 1 -> 2 -> 3 -> 4
    # a2
    # a3
    # a4
    return pd.DataFrame(
        {
            "id": [
                "a1",
                " ",
                "-inf",
                1234567890,
                "1234567890",
                "x1",
                1,
                2,
                3.0,
                4.0,
                "a2",
                "a3",
                "a4",
            ],
            "parent_id": [
                " ",  # valid
                "-inf",  # valid
                1234567890,  # valid
                "1234567890",  # valid, different type
                "x1",  # valid
                1.0,  # valid, needs to map to the int value
                2.0,  # valid, needs to map to the int value
                3,  # valid, needs to map to the float value
                4,  # valid, needs to map to the float value
                float("nan"),  # should be ignored
                "None",  # should be ignored
                None,  # should be ignored
                float("inf"),  # should be ignored
            ],
            "answer": [f"answer{i + 1}" for i in range(13)],
            "system": [f"system{i + 1}" for i in range(13)],
            "prompt": [f"prompt{i + 1}" for i in range(13)],
        }
    )


def test_conversation_chain_handles_nan_parent_ids(df_with_nan, cfg):
    handler = ConversationChainHandler(df_with_nan, cfg)
    assert handler.conversation_chain_ids == [
        [9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
        [10],
        [11],
        [12],
    ]
    assert len(handler) == 4
    assert handler[0] == {
        "prompts": [
            "prompt10",
            "prompt9",
            "prompt8",
            "prompt7",
            "prompt6",
            "prompt5",
            "prompt4",
            "prompt3",
            "prompt2",
            "prompt1",
        ],
        "answers": [
            "answer10",
            "answer9",
            "answer8",
            "answer7",
            "answer6",
            "answer5",
            "answer4",
            "answer3",
            "answer2",
            "answer1",
        ],
        "systems": [
            "system10",
            "system9",
            "system8",
            "system7",
            "system6",
            "system5",
            "system4",
            "system3",
            "system2",
            "system1",
        ],
    }
    assert handler[1] == {
        "prompts": ["prompt11"],
        "answers": ["answer11"],
        "systems": ["system11"],
    }
    assert handler[2] == {
        "prompts": ["prompt12"],
        "answers": ["answer12"],
        "systems": ["system12"],
    }
    assert handler[3] == {
        "prompts": ["prompt13"],
        "answers": ["answer13"],
        "systems": ["system13"],
    }


def test_conversation_chain_handler_filters_parent_ids(df_with_nan, cfg):
    for i in range(len(df_with_nan)):
        df_with_nan_1 = df_with_nan.copy()
        df_with_nan_1.loc[i, "parent_id"] = "MISSING"

        handler_1 = ConversationChainHandler(df_with_nan_1, cfg)
        df_with_nan_2 = df_with_nan.copy()
        df_with_nan_2.loc[i, "parent_id"] = "None"

        handler_2 = ConversationChainHandler(df_with_nan_2, cfg)
        assert handler_1.conversation_chain_ids == handler_2.conversation_chain_ids


def test_oasst_conversation_chain_handler(tmp_path):
    """
    Test conversation chain handler on default OASST dataset.
    """

    df = prepare_default_dataset_causal_language_modeling(tmp_path)
    cfg = mock.MagicMock()
    cfg.dataset.prompt_column = "instruction"
    cfg.dataset.answer_column = "output"
    cfg.dataset.parent_id_column = "parent_id"
    cfg.dataset.id_column = "id"
    cfg.dataset.system_column = "None"

    for limit_chained_samples in [False, True]:
        cfg.dataset.limit_chained_samples = limit_chained_samples
        conversation_chain_handler = ConversationChainHandler(df, cfg=cfg)
        for conversation_chain_ids in conversation_chain_handler.conversation_chain_ids:
            parent_idx = conversation_chain_ids[0]
            for idx in conversation_chain_ids[1:]:
                df.loc[idx, "parent_id"] = parent_idx
                parent_idx = idx

    cfg.dataset.limit_chained_samples = False
    conversation_chain_handler = ConversationChainHandler(df, cfg=cfg)

    prompts = [
        "I am building a mechanical keyboard from scratch. I already have the working hardware and am in the process of configuring the firmware. However i find that the qwertz layout gives me wrist pain. I will use the keyboard for writing in english, german and french, and for coding mainly.\nWhat keyboard layout would be best suited for me?",  # noqa: E501
        "what is the advantage of BvoFrak and Bépo, over Azerty that is more common in France.",  # noqa: E501
        "Is there any information on how long it takes a typical person to adopt the BvoFrak or Bépo layouts? I worry that the time investment may be a concern, or that I may  make more mistakes when programming during the transition period.",  # noqa: E501
    ]
    answers = [
        "Generally, it seems that Neo layout may be what you are looking for.\nHere are some keyboard layouts for various use cases: \nFor French: BvoFrak, Bépo\nFor German: Neo, AdNW\nFor English: DHIATENSOR, Dvorak,\nFor programing: Programmer Dvorak, Evolved, Capewell, QGMLWY, Arensito\nNote that while layout may contribute to wrist pain, other factors like the angle of the keyboard, key spacing (vertical, horizontal, and depth), more may also be contributing factors. Learning a new layout takes time and effort, and may make it more difficult to type on other keyboards.",  # noqa: E501
        "BvoFrak and Bépo are alternative French keyboard layouts that aim to improve on the traditional Azerty layout. Some of the claimed advantages include:\n\n- BvoFrak aims to minimize finger movement and optimize typing efficiency. It groups letters by finger positions rather than alphabetic order.\n- Bépo aims to allow for faster typing of common French letters and letter combinations. It also groups letters by finger positions and makes some other rearrangements to commonly used keys.\n- Both BvoFrak and Bépo proponents argue that Azerty is inefficient and optimized for typewriters, not modern keyboards and typing. However, Azerty is deeply entrenched and the alternative layouts have not achieved widespread adoption.\n\nSo the potential advantages are increased typing efficiency and comfort, but the alternative layouts have to overcome the momentum of the established Azerty layout. Whether the claimed advantages hold up in practice for most typists is unclear without much broader adoption of the alternative layouts.",  # noqa: E501
        "The time it takes to learn a new keyboard layout can vary depending on several factors, including the individual's previous typing experience and the amount of time they can devote to practice. However, on average, it can take between 2-4 weeks to become proficient in a new keyboard layout.\n\nDuring the transition period, it's natural to make more mistakes when typing, especially when programming or using special characters. It's important to be patient with yourself and allow yourself time to adjust to the new layout. With practice, the new layout will become more natural and comfortable to use, and the speed and accuracy of your typing will improve.\n\nUltimately, the investment of time and effort required to learn a new keyboard layout is a personal decision, and it's important to consider the potential benefits and drawbacks before making a decision. While there may be some initial challenges, many people find that the improved ergonomics and efficiency of a new keyboard layout make the investment well worth it in the long run.",  # noqa: E501
    ]
    systems = ["", "", ""]
    sample = conversation_chain_handler[
        10719
    ]  # 10719 == sample with 3 round conversation
    assert sample["prompts"] == prompts
    assert sample["answers"] == answers
    assert sample["systems"] == systems


def test_oasst_conversation_chain_handler_is_fast(tmp_path):
    df_oasst = prepare_default_dataset_causal_language_modeling(tmp_path)
    cfg = mock.MagicMock()
    cfg.dataset.prompt_column = "instruction"
    cfg.dataset.answer_column = "output"
    cfg.dataset.parent_id_column = "parent_id"
    cfg.dataset.id_column = "id"
    cfg.dataset.system_column = "None"
    cfg.dataset.limit_chained_samples = True
    dfs = []
    for i in range(50):
        df = df_oasst.copy()
        df["parent_id"] = df["parent_id"].apply(
            lambda x: x + str(i) if x is not None else x
        )
        df["id"] = df["id"].apply(lambda x: x + str(i))
        dfs.append(df)

    df = pd.concat(dfs).reset_index(drop=True)

    assert len(df) > 400_000

    t_0 = time.time()
    conversation_chain_handler = ConversationChainHandler(df, cfg)
    _ = [conversation for conversation in conversation_chain_handler]
    t_1 = time.time()
    assert t_1 - t_0 < 10  # shouldn't take longer than ~5 seconds