|
"""Postprocessor for codegen-350M-mono-gptj.""" |
|
import json |
|
from pathlib import Path |
|
from typing import Any, Dict, List |
|
|
|
import numpy as np |
|
import triton_python_backend_utils as pb_utils |
|
from transformers import AutoTokenizer |
|
|
|
|
|
class TritonPythonModel: |
|
"""Postprocessor for codegen-350M-mono-gptj.""" |
|
|
|
def initialize(self, args: Dict[str, Any]) -> None: |
|
"""`initialize` is called only once when the model is being loaded. |
|
|
|
Implementing `initialize` function is optional. This function allows |
|
the model to initialize any state associated with this model. |
|
Args: |
|
Both keys and values are strings. The dictionary keys and values are: |
|
* model_config: A JSON string containing the model configuration |
|
* model_instance_kind: A string containing model instance kind |
|
* model_instance_device_id: A string containing model instance device ID |
|
* model_repository: Model repository path |
|
* model_version: Model version |
|
* model_name: Model name |
|
""" |
|
|
|
self.model_config = model_config = json.loads(args["model_config"]) |
|
|
|
|
|
output_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT") |
|
|
|
|
|
self.output_dtype = pb_utils.triton_string_to_numpy(output_config["data_type"]) |
|
|
|
|
|
cur_folder = Path(__file__).parent |
|
cache_dir = cur_folder / ".cache" |
|
self.tokenizer = AutoTokenizer.from_pretrained( |
|
"Salesforce/codegen-350M-mono", cache_dir=cache_dir |
|
) |
|
|
|
def execute( |
|
self, requests: List["pb_utils.InferenceRequest"] |
|
) -> List["pb_utils.InferenceResponse"]: |
|
"""Preprocess the requests. |
|
|
|
`execute` must be implemented in every Python model. `execute` |
|
function receives a list of pb_utils.InferenceRequest as the only |
|
argument. This function is called when an inference is requested |
|
for this model. Depending on the batching configuration (e.g. Dynamic |
|
Batching) used, `requests` may contain multiple requests. Every |
|
Python model, must create one pb_utils.InferenceResponse for every |
|
pb_utils.InferenceRequest in `requests`. If there is an error, you can |
|
set the error argument when creating a pb_utils.InferenceResponse. |
|
|
|
Args: |
|
requests : A list of pb_utils.InferenceRequest |
|
|
|
Returns: |
|
A list of pb_utils.InferenceResponse. The length of this list must |
|
be the same as `requests` |
|
""" |
|
responses = [] |
|
|
|
|
|
|
|
for request in requests: |
|
|
|
tokens_batch = pb_utils.get_input_tensor_by_name( |
|
request, "TOKENS_BATCH" |
|
).as_numpy() |
|
|
|
|
|
|
|
|
|
|
|
|
|
outputs = self._postprocessing(tokens_batch) |
|
|
|
|
|
|
|
output_tensor = pb_utils.Tensor( |
|
"OUTPUT", np.array(outputs).astype(self.output_dtype) |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inference_response = pb_utils.InferenceResponse( |
|
output_tensors=[output_tensor] |
|
) |
|
responses.append(inference_response) |
|
|
|
|
|
|
|
return responses |
|
|
|
def finalize(self) -> None: |
|
"""Clean up the model. |
|
|
|
`finalize` is called only once when the model is being unloaded. |
|
Implementing `finalize` function is optional. This function allows |
|
the model to perform any necessary clean ups before exit. |
|
""" |
|
print("Cleaning up...") |
|
|
|
def _postprocessing(self, tokens_batch: np.ndarray) -> List[bytes]: |
|
"""Postprocess.""" |
|
outputs = [] |
|
for beam_tokens in tokens_batch: |
|
for tokens in beam_tokens: |
|
outputs.append(self.tokenizer.decode(tokens)) |
|
return outputs |
|
|