text
stringlengths
0
15.3k
if group_by == 'contexts':
res[tuple(fn(ob))].append(ob)
else:
try:
hashable_dict = tuple(((key, tuple(value) if isinstance(value, collections.abc.Iterable) else value) for (key, value) in sorted(fn(ob).items())))
res[hashable_dict].append(ob)
except (TypeError, AttributeError):
res[tuple(fn(ob))].append(ob)
return res
@staticmethod
def get_chunks(_iter, n: int=0, fn=None):
arr = []
_iter = tuple(_iter)
for (i, x) in enumerate(_iter):
arr.append(x)
if len(arr) == (fn(i, _iter) if fn else n):
yield arr
arr = []
if arr:
yield arr
def configure_pad_token(tokenizer: 'PreTrainedTokenizerBase', model_config: Optional['PretrainedConfig']=None) -> 'PreTrainedTokenizerBase':
if tokenizer.pad_token:
pass
elif tokenizer.unk_token:
tokenizer.pad_token_id = tokenizer.unk_token_id
elif tokenizer.eos_token:
tokenizer.pad_token_id = tokenizer.eos_token_id
elif model_config and getattr(model_config, 'model_type', None) == 'qwen':
tokenizer.pad_token = '<|endoftext|>'
elif tokenizer.__class__.__name__ == 'RWKVWorldTokenizer' or tokenizer.__class__.__name__ == 'Rwkv5Tokenizer':
assert tokenizer.pad_token_id == 0
else:
tokenizer.add_special_tokens({'pad_token': '<|pad|>'})
return tokenizer
# File: lm-evaluation-harness-main/lm_eval/models/vllm_causallms.py
import copy
from importlib.metadata import version
from importlib.util import find_spec
from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Tuple, Union
from more_itertools import distribute
from packaging.version import parse as parse_version
from tqdm import tqdm
from lm_eval.api.instance import Instance
from lm_eval.api.model import TemplateLM
from lm_eval.api.registry import register_model
from lm_eval.models.utils import Collator, configure_pad_token, undistribute
from lm_eval.utils import eval_logger, get_rolling_token_windows, make_disjoint_window
try:
import ray
from vllm import LLM, SamplingParams
from vllm.lora.request import LoRARequest
from vllm.transformers_utils.tokenizer import get_tokenizer
except ModuleNotFoundError:
pass
if TYPE_CHECKING:
pass
eval_logger = eval_logger
@register_model('vllm')
class VLLM(TemplateLM):
_DEFAULT_MAX_LENGTH = 2048
def __init__(self, pretrained: str, dtype: Literal['float16', 'bfloat16', 'float32', 'auto']='auto', revision: Optional[str]=None, trust_remote_code: Optional[bool]=False, tokenizer: Optional[str]=None, tokenizer_mode: Literal['auto', 'slow']='auto', tokenizer_revision: Optional[str]=None, add_bos_token: Optional[bool]=False, prefix_token_id: Optional[int]=None, tensor_parallel_size: int=1, quantization: Optional[str]=None, max_gen_toks: int=256, swap_space: int=4, batch_size: Union[str, int]=1, max_batch_size=None, max_length: int=None, max_model_len: int=None, seed: int=1234, gpu_memory_utilization: float=0.9, device: str='cuda', data_parallel_size: int=1, lora_local_path: str=None, **kwargs):
super().__init__()
if not find_spec('vllm'):
raise Exception("attempted to use 'vllm' LM type, but package `vllm` is not installed. Please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`")
assert 'cuda' in device or device is None, 'vLLM only supports CUDA'
assert max_length is None or max_model_len is None, 'Either max_length or max_model_len may be provided, but not both'
self._max_length = max_model_len if max_model_len is not None else max_length
self.tensor_parallel_size = int(tensor_parallel_size)
self.data_parallel_size = int(data_parallel_size)
self.model_args = {'model': pretrained, 'gpu_memory_utilization': float(gpu_memory_utilization), 'revision': revision, 'dtype': dtype, 'tokenizer': tokenizer, 'tokenizer_mode': tokenizer_mode, 'tokenizer_revision': tokenizer_revision, 'trust_remote_code': trust_remote_code, 'tensor_parallel_size': int(tensor_parallel_size), 'max_model_len': int(self._max_length) if self._max_length else None, 'swap_space': int(swap_space), 'quantization': quantization, 'seed': int(seed)}
self.model_args.update(kwargs)
self.batch_size = 'auto' if isinstance(batch_size, str) and 'auto' in batch_size else batch_size
if self.data_parallel_size <= 1:
self.model = LLM(**self.model_args)
else:
eval_logger.warning('You might experience occasional issues with model weight downloading when data_parallel is in use. To ensure stable performance, run with data_parallel_size=1 until the weights are downloaded and cached.')
self.model_args['worker_use_ray'] = True
self.batch_size = 'auto'
eval_logger.info('Manual batching is not compatible with data parallelism.')
from transformers import AutoConfig
self._config = AutoConfig.from_pretrained(pretrained, trust_remote_code=trust_remote_code, revision=revision)
self.tokenizer = get_tokenizer(tokenizer if tokenizer else pretrained, tokenizer_mode=tokenizer_mode, trust_remote_code=trust_remote_code, tokenizer_revision=tokenizer_revision)
self.tokenizer = configure_pad_token(self.tokenizer)
self.add_bos_token = add_bos_token
if 'gemma' in pretrained.lower():
self.add_bos_token = True
eval_logger.info("Found 'gemma' in model name, a BOS token will be used as Gemma series models underperform without it.")
self.custom_prefix_token_id = prefix_token_id
if prefix_token_id is not None:
eval_logger.info(f'Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}')
self._max_gen_toks = max_gen_toks
if lora_local_path is not None:
assert parse_version(version('vllm')) > parse_version('0.3.0'), 'lora adapters only compatible with vllm > v0.3.0.'
self.lora_request = LoRARequest('finetuned', 1, lora_local_path)
else: