File size: 7,747 Bytes
966f777 a6635ad 966f777 a6635ad 966f777 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
"""Preprocessor for codegen-350M-mono-gptj."""
import csv
import json
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import torch
import triton_python_backend_utils as pb_utils
from torch.nn.utils.rnn import pad_sequence
from transformers import AutoTokenizer
END_ID = 50256
class TritonPythonModel:
"""Preprocessor for codegen-350M-mono-gptj."""
def initialize(self, args: Dict[str, Any]) -> None:
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to initialize any state associated with this model.
Args : Dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
# Parse model configs
self.model_config = model_config = json.loads(args["model_config"])
# Parse model output configs and convert Triton types to numpy types
input_names = [
"INPUT_ID",
"REQUEST_INPUT_LEN",
"BAD_WORDS_IDS",
"STOP_WORDS_IDS",
]
for input_name in input_names:
setattr(
self,
input_name.lower() + "_dtype",
pb_utils.triton_string_to_numpy(
pb_utils.get_output_config_by_name(model_config, input_name)[
"data_type"
]
),
)
cur_folder = Path(__file__).parent
cache_dir = cur_folder / ".cache"
self.tokenizer = AutoTokenizer.from_pretrained(
"Salesforce/codegen-350M-mono", cache_dir=cache_dir
)
def execute(
self, requests: List["pb_utils.InferenceRequest"]
) -> List["pb_utils.InferenceResponse"]:
"""Preprocess the requests.
`execute` must be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference is requested
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse.
Args:
requests: A list of pb_utils.InferenceRequest
Returns:
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
responses = []
# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get input tensors
query = pb_utils.get_input_tensor_by_name(request, "QUERY").as_numpy()
request_output_len = pb_utils.get_input_tensor_by_name(
request, "REQUEST_OUTPUT_LEN"
).as_numpy()
bad_words_dict = pb_utils.get_input_tensor_by_name(
request, "BAD_WORDS_DICT"
).as_numpy()
stop_words_dict = pb_utils.get_input_tensor_by_name(
request, "STOP_WORDS_DICT"
).as_numpy()
# Preprocessing input data.
input_id, request_input_len = self._create_request(query)
bad_words = self._create_word_list(bad_words_dict)
stop_words = self._create_word_list(stop_words_dict)
# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
input_id_tensor = pb_utils.Tensor(
"INPUT_ID", np.array(input_id).astype(self.input_id_dtype)
)
request_input_len_tensor = pb_utils.Tensor(
"REQUEST_INPUT_LEN",
np.array(request_input_len).astype(self.request_input_len_dtype),
)
request_output_len_tensor = pb_utils.Tensor(
"REQUEST_OUTPUT_LEN", request_output_len
)
bad_words_ids_tensor = pb_utils.Tensor("BAD_WORDS_IDS", bad_words)
stop_words_ids_tensor = pb_utils.Tensor("STOP_WORDS_IDS", stop_words)
# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
# Below is an example of how you can set errors in inference
# response:
#
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occurred"))
inference_response = pb_utils.InferenceResponse(
output_tensors=[
input_id_tensor,
bad_words_ids_tensor,
stop_words_ids_tensor,
request_input_len_tensor,
request_output_len_tensor,
]
)
responses.append(inference_response)
# You should return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses
def finalize(self) -> None:
"""Unload the model.
`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is optional. This function allows
the model to perform any necessary clean ups before exit.
"""
print("Cleaning up...")
def _create_request(self, query: np.ndarray) -> Tuple[torch.Tensor, torch.Tensor]:
"""Encode the requests as model's inputs.
Args:
- query: batch string (2D numpy array)
"""
start_ids = [
torch.IntTensor(self.tokenizer.encode(s[0].decode())) for s in query
]
start_lengths = torch.IntTensor([[len(ids)] for ids in start_ids])
start_ids = pad_sequence(start_ids, batch_first=True, padding_value=END_ID)
return start_ids, start_lengths
def _create_word_list(self, word_dict: np.ndarray) -> np.ndarray:
"""Encode the word list."""
flat_ids = []
offsets = []
for word_dict_item in word_dict:
item_flat_ids = []
item_offsets = []
words = list(csv.reader([word_dict_item[0].decode()]))[0]
for word in words:
ids = self._encode(word)
if len(ids) == 0:
continue
item_flat_ids += ids
item_offsets.append(len(ids))
flat_ids.append(np.array(item_flat_ids))
offsets.append(np.cumsum(np.array(item_offsets)))
pad_to = max(1, max(len(ids) for ids in flat_ids))
for i, (ids, offs) in enumerate(zip(flat_ids, offsets)):
flat_ids[i] = np.pad(ids, (0, pad_to - len(ids)), constant_values=0)
offsets[i] = np.pad(offs, (0, pad_to - len(offs)), constant_values=-1)
return np.array([flat_ids, offsets], dtype="int32").transpose((1, 0, 2))
def _encode(self, sentence: str) -> List[int]:
"""Encode sentences into tokens."""
sentence = sentence.decode() if isinstance(sentence, bytes) else sentence
return self.tokenizer.encode(sentence)
|