text
stringlengths 0
15.3k
|
---|
self.lora_request = None |
@property |
def eot_token_id(self): |
return self.tokenizer.eos_token_id |
@property |
def prefix_token_id(self): |
if self.custom_prefix_token_id is not None: |
return self.custom_prefix_token_id |
if self.tokenizer.bos_token_id is not None: |
return self.tokenizer.bos_token_id |
return self.tokenizer.eos_token_id |
@property |
def max_length(self): |
if self._max_length: |
return self._max_length |
if self.data_parallel_size <= 1: |
return self.model.llm_engine.model_config.max_model_len |
else: |
seqlen_config_attrs = ('n_positions', 'max_position_embeddings', 'n_ctx') |
for attr in seqlen_config_attrs: |
if hasattr(self._config, attr): |
return getattr(self._config, attr) |
if hasattr(self.tokenizer, 'model_max_length'): |
if self.tokenizer.model_max_length == 1000000000000000019884624838656: |
return self._DEFAULT_MAX_LENGTH |
return self.tokenizer.model_max_length |
return self._DEFAULT_MAX_LENGTH |
@property |
def max_gen_toks(self): |
return self._max_gen_toks |
def apply_chat_template(self, chat_history: List[Dict[str, str]]) -> str: |
return self.tokenizer.apply_chat_template(chat_history, tokenize=False, add_generation_prompt=True) |
@property |
def chat_template(self) -> str: |
if self.tokenizer.chat_template is not None: |
return self.tokenizer.chat_template |
return self.tokenizer.default_chat_template |
@property |
def tokenizer_name(self) -> str: |
return self.tokenizer.name_or_path.replace('/', '__') |
def tok_encode(self, string: Union[str, List[str]], left_truncate_len: int=None, add_special_tokens: bool=False, truncation: bool=False) -> Union[List[int], List[List[int]]]: |
if not add_special_tokens: |
add_special_tokens = False or self.add_bos_token |
encoding: Union[List[List[int]], List[int]] = self.tokenizer(string, add_special_tokens=add_special_tokens, truncation=truncation, return_attention_mask=False).input_ids |
if left_truncate_len: |
if not isinstance(string, str): |
encoding = [enc[-left_truncate_len:] for enc in encoding] |
else: |
encoding = encoding[-left_truncate_len:] |
return encoding |
def _model_generate(self, requests: List[List[int]]=None, generate: bool=False, max_tokens: int=None, stop: Optional[List[str]]=None, **kwargs): |
if generate: |
kwargs = self.modify_gen_kwargs(kwargs) |
sampling_params = SamplingParams(max_tokens=max_tokens, stop=stop, **kwargs) |
else: |
sampling_params = SamplingParams(temperature=0, prompt_logprobs=1, max_tokens=1, detokenize=False) |
if self.data_parallel_size > 1: |
@ray.remote |
def run_inference_one_model(model_args: dict, sampling_params, requests: List[List[int]]): |
llm = LLM(**model_args) |
return llm.generate(prompt_token_ids=requests, sampling_params=sampling_params) |
requests = [list(x) for x in distribute(self.data_parallel_size, requests)] |
inputs = ((self.model_args, sampling_params, req) for req in requests) |
object_refs = [run_inference_one_model.remote(*x) for x in inputs] |
results = ray.get(object_refs) |
ray.shutdown() |
return undistribute(results) |
if self.lora_request is not None: |
outputs = self.model.generate(prompt_token_ids=requests, sampling_params=sampling_params, use_tqdm=True if self.batch_size == 'auto' else False, lora_request=self.lora_request) |
else: |
outputs = self.model.generate(prompt_token_ids=requests, sampling_params=sampling_params, use_tqdm=True if self.batch_size == 'auto' else False) |
return outputs |
def loglikelihood_rolling(self, requests: List[Instance], disable_tqdm: bool=False) -> List[float]: |
loglikelihoods = [] |
for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm): |
rolling_token_windows = list(map(make_disjoint_window, get_rolling_token_windows(token_list=self.tok_encode(string), prefix_token=self.eot_token_id, max_seq_len=self.max_length - 1, context_len=1))) |
rolling_token_windows = [(None,) + x for x in rolling_token_windows] |
string_nll = self._loglikelihood_tokens(rolling_token_windows) |
string_nll = [x[0] for x in string_nll] |
string_nll = sum(string_nll) |
loglikelihoods.append(string_nll) |
return loglikelihoods |
def generate_until(self, requests: List[Instance], disable_tqdm: bool=False) -> List[str]: |
res = [] |
(context, all_gen_kwargs) = zip(*(req.args for req in requests)) |
context_encoding: List[List[int]] = self.tok_encode(context, add_special_tokens=self.add_bos_token) |
requests = [((a, b), c) for (a, b, c) in zip(context, context_encoding, all_gen_kwargs)] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.