Delta-Vector commited on
Commit
65e42b4
·
verified ·
1 Parent(s): 333daa2

Upload dan-chat-advanced-llama3.py

Browse files
Files changed (1) hide show
  1. dan-chat-advanced-llama3.py +159 -0
dan-chat-advanced-llama3.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Module containing the PygmalionPromptTokenizingStrategy and PygmalionPrompter class"""
2
+
3
+ import copy
4
+ import logging
5
+ from collections import defaultdict
6
+ from typing import Generator, List, Tuple, Dict
7
+
8
+ from axolotl.prompt_tokenizers import (
9
+ PromptTokenizingStrategy,
10
+ parse_tokenized_to_result,
11
+ tokenize_prompt_default,
12
+ )
13
+
14
+ LOG = logging.getLogger("axolotl")
15
+
16
+ IGNORE_TOKEN_ID = -100
17
+
18
+ turn_separator = ""
19
+
20
+ system_prefix = "<|start_header_id|>system<|end_header_id|>\n\n"
21
+ user_prefix = "<|start_header_id|>user<|end_header_id|>\n\n"
22
+ assistant_prefix = "<|start_header_id|>assistant<|end_header_id|>\n\n"
23
+ tool_prefix = "<|start_header_id|>tool<|end_header_id|>\n\n"
24
+
25
+ class DanChatMLPromptTokenizingStrategy(PromptTokenizingStrategy):
26
+ def __init__(self, prompter, tokenizer, train_on_inputs, sequence_len, *args, **kwargs):
27
+ super().__init__(prompter, tokenizer, *args, **kwargs)
28
+
29
+ res = self._tokenize(assistant_prefix, add_eos_token=False, strip_bos_token=True)
30
+ self.bot_prefix_token_ids = res["input_ids"]
31
+
32
+ res = self._tokenize(turn_separator, add_eos_token=False, strip_bos_token=True)
33
+ self.turn_separator_token_ids = res["input_ids"]
34
+
35
+ self.train_on_inputs = train_on_inputs
36
+ self.sequence_len = sequence_len
37
+
38
+ def tokenize_prompt(self, prompt):
39
+ prompt_parts = list(self.prompter.build_prompt(prompt["conversations"]))
40
+ tokenized_parts = []
41
+ total_length = 0
42
+ not_first_turn = False
43
+
44
+ for role, message, loss, prefix in prompt_parts:
45
+ # If prefix is not defined, set it to an empty string
46
+ if prefix is None:
47
+ prefix = ""
48
+
49
+ if role in ["system", "user", "human", "tool"]:
50
+ # Set the role prefix based on the role
51
+ if role == "system":
52
+ role_prefix = system_prefix
53
+ elif role == "user" or role == "human":
54
+ role_prefix = user_prefix
55
+ elif role == "tool":
56
+ role_prefix = tool_prefix
57
+ res = self._tokenize_with_turn(role_prefix, prefix + message, not_first_turn)
58
+ labels = [IGNORE_TOKEN_ID] * len(res["input_ids"])
59
+
60
+ elif role in ["model", "gpt"]:
61
+ if not prefix:
62
+ res = self._tokenize_with_turn(assistant_prefix, message, not_first_turn)
63
+ labels = self._get_labels(res, loss, not_first_turn)
64
+ else:
65
+ res_prefix = self._tokenize_with_turn(assistant_prefix, prefix, not_first_turn, add_eos_token=False)
66
+ labels_prefix = [IGNORE_TOKEN_ID] * len(res_prefix["input_ids"])
67
+
68
+ res_message = self._tokenize(message.rstrip(), add_eos_token=True, strip_bos_token=True)
69
+ labels_message = [*copy.deepcopy(res_message["input_ids"])] if loss else [IGNORE_TOKEN_ID] * len(res_message["input_ids"])
70
+
71
+ res = {
72
+ "input_ids": res_prefix["input_ids"] + res_message["input_ids"],
73
+ "attention_mask": res_prefix["attention_mask"] + res_message["attention_mask"]
74
+ }
75
+ labels = labels_prefix + labels_message
76
+ else:
77
+ LOG.warning(f"unknown role in conversation: {role}")
78
+ continue
79
+
80
+ part_length = len(res["input_ids"])
81
+ if total_length + part_length > self.sequence_len:
82
+ break
83
+
84
+ tokenized_parts.append({
85
+ "input_ids": res["input_ids"],
86
+ "attention_mask": res["attention_mask"],
87
+ "labels": labels,
88
+ "role": role,
89
+ "loss": loss
90
+ })
91
+ total_length += part_length
92
+ not_first_turn = True
93
+
94
+ result = {
95
+ "input_ids": [],
96
+ "attention_mask": [],
97
+ "labels": []
98
+ }
99
+
100
+
101
+ # Check if the last turn is a human/user/system turn or loss = False
102
+ while tokenized_parts and (tokenized_parts[-1]["role"] in ["human", "user", "system"] or not tokenized_parts[-1]["loss"]):
103
+ tokenized_parts.pop()
104
+
105
+
106
+ # Ensure we have at least one user/human/system turn, if not return
107
+ if not any(part["role"] in ["human", "user", "system"] for part in tokenized_parts):
108
+ return result
109
+
110
+ # Ensure we have at least one gpt/model turn, if not return
111
+ if not any(part["role"] in ["model", "gpt"] for part in tokenized_parts):
112
+ return result
113
+
114
+ # Concatenate the final result
115
+ for part in tokenized_parts:
116
+ result["input_ids"] += part["input_ids"]
117
+ result["attention_mask"] += part["attention_mask"]
118
+ result["labels"] += part["labels"]
119
+
120
+ return result
121
+
122
+ def _tokenize_with_turn(self, role_prefix, message, not_first_turn, add_eos_token=True):
123
+ full_message = (turn_separator if not_first_turn else "") + role_prefix + message.strip()
124
+ return self._tokenize(full_message, add_eos_token=add_eos_token, strip_bos_token=not_first_turn)
125
+
126
+ def _get_labels(self, res, loss, not_first_turn):
127
+ if not loss:
128
+ return [IGNORE_TOKEN_ID] * len(res["input_ids"])
129
+
130
+ prefix_len = len(self.bot_prefix_token_ids + (self.turn_separator_token_ids if not_first_turn else []))
131
+ return [IGNORE_TOKEN_ID] * prefix_len + [*copy.deepcopy(res["input_ids"])][prefix_len:]
132
+
133
+
134
+ class DanChatMLPrompter:
135
+ """
136
+ Prompter for DanChatML.
137
+ """
138
+
139
+ def __init__(self, *args, **kwargs):
140
+ pass
141
+
142
+ def build_prompt(self, source, *args, **kwargs) -> Generator[Tuple[str, str, bool, str], None, None]:
143
+ for msg in source:
144
+ from_value = msg["from"]
145
+ message_value = msg["value"]
146
+
147
+ # Set loss based on the message source
148
+ loss = msg.get("loss")
149
+ if loss is None:
150
+ loss = True if from_value in ["gpt", "model"] else None
151
+
152
+ # Set prefix, defaulting to an empty string if not present
153
+ prefix = msg.get("prefix", "")
154
+
155
+ yield from_value, message_value, loss, prefix
156
+
157
+
158
+ def load(tokenizer, cfg):
159
+ return DanChatMLPromptTokenizingStrategy(DanChatMLPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len)