Abhaykoul commited on
Commit
e54a5fb
·
verified ·
1 Parent(s): 08286af

Delete instruct_pipeline.py

Browse files
Files changed (1) hide show
  1. instruct_pipeline.py +0 -212
instruct_pipeline.py DELETED
@@ -1,212 +0,0 @@
1
- import logging
2
- import re
3
- from typing import List
4
-
5
- import numpy as np
6
- from transformers import Pipeline, PreTrainedTokenizer
7
-
8
- from transformers.utils import is_tf_available
9
-
10
- if is_tf_available():
11
- import tensorflow as tf
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
- INSTRUCTION_KEY = "### Instruction:"
16
- RESPONSE_KEY = "### Response:"
17
- END_KEY = "### End"
18
- INTRO_BLURB = (
19
- "Below is an instruction that describes a task. Write a response that appropriately completes the request."
20
- )
21
-
22
- # This is the prompt that is used for generating responses using an already trained model. It ends with the response
23
- # key, where the job of the model is to provide the completion that follows it (i.e. the response itself).
24
- PROMPT_FOR_GENERATION_FORMAT = """{intro}
25
-
26
- {instruction_key}
27
- {instruction}
28
-
29
- {response_key}
30
- """.format(
31
- intro=INTRO_BLURB,
32
- instruction_key=INSTRUCTION_KEY,
33
- instruction="{instruction}",
34
- response_key=RESPONSE_KEY,
35
- )
36
-
37
-
38
- def get_special_token_id(tokenizer: PreTrainedTokenizer, key: str) -> int:
39
- """Gets the token ID for a given string that has been added to the tokenizer as a special token.
40
-
41
- When training, we configure the tokenizer so that the sequences like "### Instruction:" and "### End" are
42
- treated specially and converted to a single, new token. This retrieves the token ID each of these keys map to.
43
-
44
- Args:
45
- tokenizer (PreTrainedTokenizer): the tokenizer
46
- key (str): the key to convert to a single token
47
-
48
- Raises:
49
- RuntimeError: if more than one ID was generated
50
-
51
- Returns:
52
- int: the token ID for the given key
53
- """
54
- token_ids = tokenizer.encode(key)
55
- if len(token_ids) > 1:
56
- raise ValueError(f"Expected only a single token for '{key}' but found {token_ids}")
57
- return token_ids[0]
58
-
59
-
60
- class InstructionTextGenerationPipeline(Pipeline):
61
- def __init__(
62
- self, *args, do_sample: bool = True, max_new_tokens: int = 256, top_p: float = 0.92, top_k: int = 0, **kwargs
63
- ):
64
- """Initialize the pipeline
65
-
66
- Args:
67
- do_sample (bool, optional): Whether or not to use sampling. Defaults to True.
68
- max_new_tokens (int, optional): Max new tokens after the prompt to generate. Defaults to 128.
69
- top_p (float, optional): If set to float < 1, only the smallest set of most probable tokens with
70
- probabilities that add up to top_p or higher are kept for generation. Defaults to 0.92.
71
- top_k (int, optional): The number of highest probability vocabulary tokens to keep for top-k-filtering.
72
- Defaults to 0.
73
- """
74
- super().__init__(*args, do_sample=do_sample, max_new_tokens=max_new_tokens, top_p=top_p, top_k=top_k,
75
- **kwargs)
76
-
77
- def _sanitize_parameters(self,
78
- return_full_text: bool = None,
79
- **generate_kwargs):
80
- preprocess_params = {}
81
-
82
- # newer versions of the tokenizer configure the response key as a special token. newer versions still may
83
- # append a newline to yield a single token. find whatever token is configured for the response key.
84
- tokenizer_response_key = next(
85
- (token for token in self.tokenizer.additional_special_tokens if token.startswith(RESPONSE_KEY)), None
86
- )
87
-
88
- response_key_token_id = None
89
- end_key_token_id = None
90
- if tokenizer_response_key:
91
- try:
92
- response_key_token_id = get_special_token_id(self.tokenizer, tokenizer_response_key)
93
- end_key_token_id = get_special_token_id(self.tokenizer, END_KEY)
94
-
95
- # Ensure generation stops once it generates "### End"
96
- generate_kwargs["eos_token_id"] = end_key_token_id
97
- except ValueError:
98
- pass
99
-
100
- forward_params = generate_kwargs
101
- postprocess_params = {
102
- "response_key_token_id": response_key_token_id,
103
- "end_key_token_id": end_key_token_id
104
- }
105
-
106
- if return_full_text is not None:
107
- postprocess_params["return_full_text"] = return_full_text
108
-
109
- return preprocess_params, forward_params, postprocess_params
110
-
111
- def preprocess(self, instruction_text, **generate_kwargs):
112
- prompt_text = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction_text)
113
- inputs = self.tokenizer(
114
- prompt_text,
115
- return_tensors="pt",
116
- )
117
- inputs["prompt_text"] = prompt_text
118
- inputs["instruction_text"] = instruction_text
119
- return inputs
120
-
121
- def _forward(self, model_inputs, **generate_kwargs):
122
- input_ids = model_inputs["input_ids"]
123
- attention_mask = model_inputs.get("attention_mask", None)
124
-
125
- if input_ids.shape[1] == 0:
126
- input_ids = None
127
- attention_mask = None
128
- in_b = 1
129
- else:
130
- in_b = input_ids.shape[0]
131
-
132
- generated_sequence = self.model.generate(
133
- input_ids=input_ids.to(self.model.device),
134
- attention_mask=attention_mask.to(self.model.device) if attention_mask is not None else None,
135
- pad_token_id=self.tokenizer.pad_token_id,
136
- **generate_kwargs,
137
- )
138
-
139
- out_b = generated_sequence.shape[0]
140
- if self.framework == "pt":
141
- generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:])
142
- elif self.framework == "tf":
143
- generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
144
-
145
- instruction_text = model_inputs.pop("instruction_text")
146
- return {"generated_sequence": generated_sequence, "input_ids": input_ids, "instruction_text": instruction_text}
147
-
148
- def postprocess(self, model_outputs, response_key_token_id, end_key_token_id, return_full_text: bool = False):
149
-
150
- generated_sequence = model_outputs["generated_sequence"][0]
151
- instruction_text = model_outputs["instruction_text"]
152
-
153
- generated_sequence: List[List[int]] = generated_sequence.numpy().tolist()
154
- records = []
155
- for sequence in generated_sequence:
156
-
157
- # The response will be set to this variable if we can identify it.
158
- decoded = None
159
-
160
- # If we have token IDs for the response and end, then we can find the tokens and only decode between them.
161
- if response_key_token_id and end_key_token_id:
162
- # Find where "### Response:" is first found in the generated tokens. Considering this is part of the
163
- # prompt, we should definitely find it. We will return the tokens found after this token.
164
- try:
165
- response_pos = sequence.index(response_key_token_id)
166
- except ValueError:
167
- logger.warn(f"Could not find response key {response_key_token_id} in: {sequence}")
168
- response_pos = None
169
-
170
- if response_pos:
171
- # Next find where "### End" is located. The model has been trained to end its responses with this
172
- # sequence (or actually, the token ID it maps to, since it is a special token). We may not find
173
- # this token, as the response could be truncated. If we don't find it then just return everything
174
- # to the end. Note that even though we set eos_token_id, we still see the this token at the end.
175
- try:
176
- end_pos = sequence.index(end_key_token_id)
177
- except ValueError:
178
- end_pos = None
179
-
180
- decoded = self.tokenizer.decode(sequence[response_pos + 1 : end_pos]).strip()
181
-
182
- if not decoded:
183
- # Otherwise we'll decode everything and use a regex to find the response and end.
184
-
185
- fully_decoded = self.tokenizer.decode(sequence)
186
-
187
- # The response appears after "### Response:". The model has been trained to append "### End" at the
188
- # end.
189
- m = re.search(r"#+\s*Response:\s*(.+?)#+\s*End", fully_decoded, flags=re.DOTALL)
190
-
191
- if m:
192
- decoded = m.group(1).strip()
193
- else:
194
- # The model might not generate the "### End" sequence before reaching the max tokens. In this case,
195
- # return everything after "### Response:".
196
- m = re.search(r"#+\s*Response:\s*(.+)", fully_decoded, flags=re.DOTALL)
197
- if m:
198
- decoded = m.group(1).strip()
199
- else:
200
- logger.warn(f"Failed to find response in:\n{fully_decoded}")
201
-
202
- # If the full text is requested, then append the decoded text to the original instruction.
203
- # This technically isn't the full text, as we format the instruction in the prompt the model has been
204
- # trained on, but to the client it will appear to be the full text.
205
- if return_full_text:
206
- decoded = f"{instruction_text}\n{decoded}"
207
-
208
- rec = {"generated_text": decoded}
209
-
210
- records.append(rec)
211
-
212
- return records