text
stringlengths
0
15.3k
for chunk in chunks:
(contexts, all_gen_kwargs) = zip(*chunk)
inps = [{'role': 'user', 'content': context} for context in contexts]
gen_kwargs = all_gen_kwargs[0]
until = None
if isinstance((kwargs := copy.deepcopy(gen_kwargs)), dict):
if 'do_sample' in kwargs.keys():
kwargs.pop('do_sample')
if 'until' in kwargs.keys():
until = kwargs.pop('until')
if isinstance(until, str):
until = [until]
elif not isinstance(until, list):
raise ValueError(f"Expected repr(kwargs['until']) to be of type Union[str, list] but got {until}")
kwargs['stop'] = until
kwargs['max_tokens'] = kwargs.pop('max_gen_toks', self.max_gen_toks)
else:
raise ValueError(f'Expected repr(kwargs) to be of type repr(dict) but got {kwargs}')
response = oa_completion(client=self.client, chat=True, messages=inps, model=self.model, **kwargs)
for (resp, (context, args_)) in zip(response.choices, chunk):
s = resp.message.content
if until is not None:
for term in until:
if len(term) > 0:
s = s.split(term)[0]
res[key].append(s)
self.cache_hook.add_partial('generate_until', (context, {'until': until}), s)
pbar.update(1)
res[key] = re_ord.get_original(res[key])
pbar.close()
return grouper.get_original(res)
def loglikelihood(self, requests, disable_tqdm: bool=False):
raise NotImplementedError('No support for logits.')
def loglikelihood_rolling(self, requests, disable_tqdm: bool=False):
raise NotImplementedError('No support for logits.')
# File: lm-evaluation-harness-main/lm_eval/models/optimum_lm.py
import json
from importlib.util import find_spec
from pathlib import Path
from lm_eval import utils
from lm_eval.api.registry import register_model
from lm_eval.models.huggingface import HFLM
eval_logger = utils.eval_logger
@register_model('openvino')
class OptimumLM(HFLM):
def __init__(self, device='cpu', **kwargs) -> None:
if 'backend' in kwargs:
assert kwargs['backend'] == 'causal', 'Currently, only OVModelForCausalLM is supported.'
self.openvino_device = device
super().__init__(device=self.openvino_device, backend=kwargs.pop('backend', 'causal'), **kwargs)
def _create_model(self, pretrained: str, revision='main', dtype='auto', trust_remote_code=False, **kwargs) -> None:
if not find_spec('optimum'):
raise Exception('package `optimum` is not installed. Please install it via `pip install optimum[openvino]`')
else:
from optimum.intel.openvino import OVModelForCausalLM
model_kwargs = kwargs if kwargs else {}
if 'ov_config' in model_kwargs:
if not Path(model_kwargs['ov_config']).exists():
raise ValueError('ov_config should point to a .json file containing an OpenVINO config')
with open(model_kwargs['ov_config']) as f:
model_kwargs['ov_config'] = json.load(f)
eval_logger.info(f"Using custom OpenVINO config: {model_kwargs['ov_config']}")
else:
model_kwargs['ov_config'] = {}
model_kwargs['ov_config'].setdefault('CACHE_DIR', '')
model_file = Path(pretrained) / 'openvino_model.xml'
if model_file.exists():
export = False
else:
export = True
self._model = OVModelForCausalLM.from_pretrained(pretrained, revision=revision, trust_remote_code=trust_remote_code, export=export, device=self.openvino_device.upper(), **model_kwargs)
# File: lm-evaluation-harness-main/lm_eval/models/textsynth.py
""""""
import logging
import os
import requests as _requests
from tqdm import tqdm
from lm_eval.api.model import LM
from lm_eval.api.registry import register_model
from lm_eval.models.utils import retry_on_specific_exceptions
logger = logging.getLogger(__name__)
def textsynth_completion(**kwargs):
def _exception_callback(e: Exception, sleep_time: float) -> None:
import traceback
traceback.print_exc()
@retry_on_specific_exceptions(on_exceptions=[_requests.exceptions.RequestException], max_retries=None, on_exception_callback=_exception_callback)
def completion():
return _requests.post(**kwargs)
return completion()