text
stringlengths
0
15.3k
try:
import anthropic
except ModuleNotFoundError:
raise Exception("attempted to use 'anthropic' LM type, but package `anthropic` is not installed. please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`")
def _exception_callback(e: Exception, sleep_time: float) -> None:
eval_logger.warning(f'RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds')
@retry_on_specific_exceptions(on_exceptions=[anthropic.RateLimitError, anthropic.APIConnectionError, anthropic.APIStatusError], max_retries=None, on_exception_callback=_exception_callback)
def messages():
response = client.messages.create(model=model, max_tokens=max_tokens, temperature=temperature, messages=[{'role': 'user', 'content': f'{prompt}'}], **kwargs)
return response.content[0].text
return messages()
@register_model('anthropic')
class AnthropicLM(LM):
REQ_CHUNK_SIZE = 20
def __init__(self, batch_size: int=1, model: str='claude-2.0', max_tokens_to_sample: int=256, temperature: float=0, **kwargs) -> None:
super().__init__()
try:
import anthropic
except ModuleNotFoundError:
raise Exception("attempted to use 'anthropic' LM type, but package `anthropic` is not installed. please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`")
self.model = model
self.client = anthropic.Anthropic()
self.temperature = temperature
self.max_tokens_to_sample = max_tokens_to_sample
self.tokenizer = self.client.get_tokenizer()
self.kwargs = kwargs
@property
def eot_token_id(self):
raise NotImplementedError('No idea about anthropic tokenization.')
@property
def max_length(self) -> int:
return 2048
@property
def max_gen_toks(self) -> int:
return self.max_tokens_to_sample
@property
def batch_size(self):
raise NotImplementedError('No support for logits.')
@property
def device(self):
raise NotImplementedError('No support for logits.')
def tok_encode(self, string: str) -> List[int]:
return self.tokenizer.encode(string).ids
def tok_decode(self, tokens: List[int]) -> str:
return self.tokenizer.decode(tokens)
def _loglikelihood_tokens(self, requests, disable_tqdm: bool=False):
raise NotImplementedError('No support for logits.')
def generate_until(self, requests, disable_tqdm: bool=False) -> List[str]:
try:
import anthropic
except ModuleNotFoundError:
raise Exception("attempted to use 'anthropic' LM type, but package `anthropic` is not installed. please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`")
if not requests:
return []
_requests: List[Tuple[str, dict]] = [req.args for req in requests]
res = []
for request in tqdm(_requests, disable=disable_tqdm):
try:
inp = request[0]
request_args = request[1]
until = request_args.get('until')
max_gen_toks = request_args.get('max_gen_toks', self.max_length)
temperature = request_args.get('temperature', self.temperature)
response = anthropic_completion(client=self.client, model=self.model, prompt=inp, max_tokens_to_sample=max_gen_toks, temperature=temperature, stop=until, **self.kwargs)
res.append(response)
self.cache_hook.add_partial('generate_until', request, response)
except anthropic.APIConnectionError as e:
eval_logger.critical(f'Server unreachable: {e.__cause__}')
break
except anthropic.APIStatusError as e:
eval_logger.critical(f'API error {e.status_code}: {e.message}')
break
return res
def _model_call(self, inps):
raise NotImplementedError()
def _model_generate(self, context, max_length, eos_token_id):
raise NotImplementedError()
def loglikelihood(self, requests, disable_tqdm: bool=False):
raise NotImplementedError('No support for logits.')
def loglikelihood_rolling(self, requests, disable_tqdm: bool=False):
raise NotImplementedError('No support for logits.')
@register_model('anthropic-chat', 'anthropic-chat-completions')