Upload dan-chat-advanced.py
Browse files- dan-chat-advanced.py +151 -0
dan-chat-advanced.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Module containing the PygmalionPromptTokenizingStrategy and PygmalionPrompter class"""
|
2 |
+
|
3 |
+
import copy
|
4 |
+
import logging
|
5 |
+
from collections import defaultdict
|
6 |
+
from typing import Generator, List, Tuple, Dict
|
7 |
+
|
8 |
+
from axolotl.prompt_tokenizers import (
|
9 |
+
PromptTokenizingStrategy,
|
10 |
+
parse_tokenized_to_result,
|
11 |
+
tokenize_prompt_default,
|
12 |
+
)
|
13 |
+
|
14 |
+
LOG = logging.getLogger("axolotl")
|
15 |
+
|
16 |
+
IGNORE_TOKEN_ID = -100
|
17 |
+
|
18 |
+
turn_separator = "\n"
|
19 |
+
|
20 |
+
system_prefix = "<|im_start|>system\n"
|
21 |
+
user_prefix = "<|im_start|>user\n"
|
22 |
+
assistant_prefix = "<|im_start|>assistant\n"
|
23 |
+
|
24 |
+
class DanChatMLPromptTokenizingStrategy(PromptTokenizingStrategy):
|
25 |
+
def __init__(self, prompter, tokenizer, train_on_inputs, sequence_len, *args, **kwargs):
|
26 |
+
super().__init__(prompter, tokenizer, *args, **kwargs)
|
27 |
+
|
28 |
+
res = self._tokenize(assistant_prefix, add_eos_token=False, strip_bos_token=True)
|
29 |
+
self.bot_prefix_token_ids = res["input_ids"]
|
30 |
+
|
31 |
+
res = self._tokenize(turn_separator, add_eos_token=False, strip_bos_token=True)
|
32 |
+
self.turn_separator_token_ids = res["input_ids"]
|
33 |
+
|
34 |
+
self.train_on_inputs = train_on_inputs
|
35 |
+
self.sequence_len = sequence_len
|
36 |
+
|
37 |
+
def tokenize_prompt(self, prompt):
|
38 |
+
prompt_parts = list(self.prompter.build_prompt(prompt["conversations"]))
|
39 |
+
tokenized_parts = []
|
40 |
+
total_length = 0
|
41 |
+
not_first_turn = False
|
42 |
+
|
43 |
+
for role, message, loss, prefix in prompt_parts:
|
44 |
+
prefix = prefix or ""
|
45 |
+
message = prefix + message
|
46 |
+
|
47 |
+
if role in ["system", "user", "human"]:
|
48 |
+
role_prefix = system_prefix if role == "system" else user_prefix
|
49 |
+
res = self._tokenize_with_turn(role_prefix, message, not_first_turn)
|
50 |
+
labels = [IGNORE_TOKEN_ID] * len(res["input_ids"])
|
51 |
+
|
52 |
+
elif role in ["model", "gpt"]:
|
53 |
+
if not prefix:
|
54 |
+
res = self._tokenize_with_turn(assistant_prefix, message, not_first_turn)
|
55 |
+
labels = self._get_labels(res, loss, not_first_turn)
|
56 |
+
else:
|
57 |
+
res_prefix = self._tokenize_with_turn(assistant_prefix, prefix, not_first_turn, add_eos_token=False)
|
58 |
+
labels_prefix = [IGNORE_TOKEN_ID] * len(res_prefix["input_ids"])
|
59 |
+
|
60 |
+
res_message = self._tokenize(message.rstrip(), add_eos_token=True, strip_bos_token=True)
|
61 |
+
labels_message = [*copy.deepcopy(res_message["input_ids"])] if loss else [IGNORE_TOKEN_ID] * len(res_message["input_ids"])
|
62 |
+
|
63 |
+
res = {
|
64 |
+
"input_ids": res_prefix["input_ids"] + res_message["input_ids"],
|
65 |
+
"attention_mask": res_prefix["attention_mask"] + res_message["attention_mask"]
|
66 |
+
}
|
67 |
+
labels = labels_prefix + labels_message
|
68 |
+
else:
|
69 |
+
LOG.warning(f"unknown role in conversation: {role}")
|
70 |
+
continue
|
71 |
+
|
72 |
+
part_length = len(res["input_ids"])
|
73 |
+
if total_length + part_length > self.sequence_len:
|
74 |
+
break
|
75 |
+
|
76 |
+
tokenized_parts.append({
|
77 |
+
"input_ids": res["input_ids"],
|
78 |
+
"attention_mask": res["attention_mask"],
|
79 |
+
"labels": labels,
|
80 |
+
"role": role,
|
81 |
+
"loss": loss
|
82 |
+
})
|
83 |
+
total_length += part_length
|
84 |
+
not_first_turn = True
|
85 |
+
|
86 |
+
result = {
|
87 |
+
"input_ids": [],
|
88 |
+
"attention_mask": [],
|
89 |
+
"labels": []
|
90 |
+
}
|
91 |
+
|
92 |
+
|
93 |
+
# Check if the last turn is a human/user/system turn or loss = False
|
94 |
+
while tokenized_parts and (tokenized_parts[-1]["role"] in ["human", "user", "system"] or not tokenized_parts[-1]["loss"]):
|
95 |
+
tokenized_parts.pop()
|
96 |
+
|
97 |
+
|
98 |
+
# Ensure we have at least one user/human/system turn, if not return
|
99 |
+
if not any(part["role"] in ["human", "user", "system"] for part in tokenized_parts):
|
100 |
+
return result
|
101 |
+
|
102 |
+
# Ensure we have at least one gpt/model turn, if not return
|
103 |
+
if not any(part["role"] in ["model", "gpt"] for part in tokenized_parts):
|
104 |
+
return result
|
105 |
+
|
106 |
+
# Concatenate the final result
|
107 |
+
for part in tokenized_parts:
|
108 |
+
result["input_ids"] += part["input_ids"]
|
109 |
+
result["attention_mask"] += part["attention_mask"]
|
110 |
+
result["labels"] += part["labels"]
|
111 |
+
|
112 |
+
return result
|
113 |
+
|
114 |
+
def _tokenize_with_turn(self, role_prefix, message, not_first_turn, add_eos_token=True):
|
115 |
+
full_message = (turn_separator if not_first_turn else "") + role_prefix + message.strip()
|
116 |
+
return self._tokenize(full_message, add_eos_token=add_eos_token, strip_bos_token=not_first_turn)
|
117 |
+
|
118 |
+
def _get_labels(self, res, loss, not_first_turn):
|
119 |
+
if not loss:
|
120 |
+
return [IGNORE_TOKEN_ID] * len(res["input_ids"])
|
121 |
+
|
122 |
+
prefix_len = len(self.bot_prefix_token_ids + (self.turn_separator_token_ids if not_first_turn else []))
|
123 |
+
return [IGNORE_TOKEN_ID] * prefix_len + [*copy.deepcopy(res["input_ids"])][prefix_len:]
|
124 |
+
|
125 |
+
|
126 |
+
class DanChatMLPrompter:
|
127 |
+
"""
|
128 |
+
Prompter for DanChatML.
|
129 |
+
"""
|
130 |
+
|
131 |
+
def __init__(self, *args, **kwargs):
|
132 |
+
pass
|
133 |
+
|
134 |
+
def build_prompt(self, source, *args, **kwargs) -> Generator[Tuple[str, str, bool, str], None, None]:
|
135 |
+
for msg in source:
|
136 |
+
from_value = msg["from"]
|
137 |
+
message_value = msg["value"]
|
138 |
+
|
139 |
+
# Set loss based on the message source
|
140 |
+
loss = msg.get("loss")
|
141 |
+
if loss is None:
|
142 |
+
loss = True if from_value in ["gpt", "model"] else None
|
143 |
+
|
144 |
+
# Set prefix, defaulting to an empty string if not present
|
145 |
+
prefix = msg.get("prefix", "")
|
146 |
+
|
147 |
+
yield from_value, message_value, loss, prefix
|
148 |
+
|
149 |
+
|
150 |
+
def load(tokenizer, cfg):
|
151 |
+
return DanChatMLPromptTokenizingStrategy(DanChatMLPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len)
|