text
stringlengths
0
15.3k
@retry_on_specific_exceptions(on_exceptions=[openai.OpenAIError], max_retries=None, on_exception_callback=_exception_callback)
def completion():
if chat:
return client.chat.completions.create(**kwargs)
else:
return client.completions.create(**kwargs)
return completion()
@register_model('openai-completions', 'local-completions')
class OpenaiCompletionsLM(TemplateLM):
_DEFAULT_MAX_LENGTH = 2048
def __init__(self, model: str, base_url: str=None, tokenizer: Optional[str]=None, tokenizer_backend: Literal['tiktoken', 'huggingface']='tiktoken', truncate: bool=False, max_gen_toks: int=256, batch_size: int=1, seed: int=1234, max_length: Optional[int]=None) -> None:
super().__init__()
self.seed = seed
try:
import openai
import tiktoken
except ModuleNotFoundError:
raise Exception('attempted to use \'openai\' LM type, but package `openai` or `tiktoken` are not installed. please install these via `pip install lm-eval[openai]` or `pip install -e ."[openai]"`')
self.model = model
self.base_url = base_url
self.tokenizer_backend = tokenizer_backend
self.truncate = truncate
self._batch_size = int(batch_size)
self._max_gen_toks = max_gen_toks
self._max_length = max_length
if self.tokenizer_backend == 'huggingface':
import transformers
self.tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer if tokenizer else self.model)
self.vocab_size = self.tokenizer.vocab
self.end_of_text_token_id = self.tokenizer.eos_token
elif self.tokenizer_backend == 'tiktoken':
if self.base_url:
eval_logger.warning(f'Passed `base_url={self.base_url}` but using Tiktoken tokenizer backend. Pass `tokenizer_backend=huggingface` and provide the HF tokenizer name if your model does not use Tiktoken.')
self.tokenizer = tiktoken.encoding_for_model(self.model)
self.vocab_size = self.tokenizer.n_vocab
self.end_of_text_token_id = self.tokenizer.eot_token
else:
raise ValueError(f"Expected tokenizer_backend to be one of ['tiktoken', 'huggingface'] but got {self.tokenizer_backend}")
openai.api_key = os.environ['OPENAI_API_KEY']
if self.base_url:
self.client = openai.OpenAI(base_url=self.base_url)
else:
self.client = openai.OpenAI()
@property
def eot_token_id(self):
return self.end_of_text_token_id
@property
def max_length(self) -> int:
if self._max_length:
return self._max_length
else:
return self._DEFAULT_MAX_LENGTH
@property
def max_gen_toks(self) -> int:
return self._max_gen_toks
@property
def batch_size(self) -> int:
return self._batch_size
@property
def device(self):
raise NotImplementedError()
def tok_encode(self, string: str, **kwargs) -> List[int]:
return self.tokenizer.encode(string)
def tok_decode(self, tokens: List[int]) -> str:
return self.tokenizer.decode(tokens)
def _loglikelihood_tokens(self, requests, disable_tqdm: bool=False) -> List[Tuple[float, bool]]:
res = []
def _collate(x):
toks = x[1] + x[2]
return (-len(toks), tuple(toks))
re_ord = utils.Reorderer(requests, _collate)
for chunk in tqdm(list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)), disable=disable_tqdm):
inps = []
ctxlens = []
for (cache_key, context_enc, continuation_enc) in chunk:
inp = (context_enc + continuation_enc)[-(self.max_length + 1):]
ctxlen = len(context_enc) - max(0, len(context_enc) + len(continuation_enc) - (self.max_length + 1))
inps.append(inp)
ctxlens.append(ctxlen)
response = oa_completion(client=self.client, model=self.model, prompt=inps, max_tokens=0, temperature=0.0, logprobs=10, seed=self.seed)
for (resp, ctxlen, (cache_key, context_enc, continuation_enc)) in zip(response.choices, ctxlens, chunk):
answer = get_result(resp)
res.append(answer)
if cache_key is not None:
self.cache_hook.add_partial('loglikelihood', cache_key, answer)
return re_ord.get_original(res)
def generate_until(self, requests, disable_tqdm: bool=False) -> List[str]: