text
stringlengths
0
15.3k
class AnthropicChatLM(AnthropicLM):
REQ_CHUNK_SIZE = 20
def __init__(self, model: str, batch_size: int=1, max_tokens: int=256, temperature: float=0, **kwargs) -> None:
super().__init__()
try:
import anthropic
except ModuleNotFoundError:
raise Exception("attempted to use 'anthropic' LM type, but package `anthropic` is not installed. please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`")
self.model = model
self.client = anthropic.Anthropic()
self.temperature = temperature
self.max_tokens = max_tokens
self.tokenizer = self.client.get_tokenizer()
self.kwargs = kwargs
@property
def max_gen_toks(self) -> int:
return self.max_tokens
def generate_until(self, requests) -> List[str]:
try:
import anthropic
except ModuleNotFoundError:
raise Exception("attempted to use 'anthropic' LM type, but package `anthropic` is not installed. please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`")
if not requests:
return []
_requests: List[Tuple[str, dict]] = [req.args for req in requests]
res = []
for request in tqdm(_requests):
try:
inp = request[0]
request_args = request[1]
until = request_args.get('until')
max_tokens = request_args.get('max_gen_toks', self.max_length)
temperature = request_args.get('temperature', self.temperature)
response = anthropic_chat(client=self.client, model=self.model, prompt=inp, max_tokens=max_tokens, temperature=temperature, stop=until, **self.kwargs)
res.append(response)
self.cache_hook.add_partial('generate_until', request, response)
except anthropic.APIConnectionError as e:
eval_logger.critical(f'Server unreachable: {e.__cause__}')
break
except anthropic.APIStatusError as e:
eval_logger.critical(f'API error {e.status_code}: {e.message}')
break
return res
# File: lm-evaluation-harness-main/lm_eval/models/dummy.py
import random
from tqdm import tqdm
from lm_eval.api.model import LM
from lm_eval.api.registry import register_model
@register_model('dummy')
class DummyLM(LM):
def __init__(self) -> None:
super().__init__()
@classmethod
def create_from_arg_string(cls, arg_string, additional_config=None):
return cls()
def loglikelihood(self, requests, disable_tqdm: bool=False):
res = []
for _ in tqdm(requests, disable=disable_tqdm):
res.append((-random.random(), False))
return res
def generate_until(self, requests, disable_tqdm: bool=False):
res = []
for (ctx, _) in tqdm(requests, disable=disable_tqdm):
res.append('lol')
assert ctx.strip() != ''
return res
def loglikelihood_rolling(self, requests, disable_tqdm: bool=False):
res = []
for _ in tqdm(requests, disable=disable_tqdm):
res.append(-random.random())
return res
# File: lm-evaluation-harness-main/lm_eval/models/gguf.py
import logging
import time
import requests
from requests.exceptions import RequestException
from tqdm import tqdm
from lm_eval.api.model import LM
from lm_eval.api.registry import register_model
logger = logging.getLogger(__name__)
def get_result(logprobs, context_length):
is_greedy = True
offsets = logprobs['text_offset']
tokens = logprobs['tokens']
tokens_logprobs = logprobs['token_logprobs']
idx = 0
while offsets[idx] < context_length:
idx += 1