text
stringlengths
0
15.3k
for (context, _) in chunk:
inps.append(context)
until = request_args.pop('until', ['<|endoftext|>'])
request_args.pop('do_sample', None)
request_args['temperature'] = request_args.get('temperature', 0)
out = self.model(sequences=inps, max_new_tokens=self.max_gen_toks - 1, stop=until, **request_args)
for (resp, (context, args_)) in zip(out.generations, chunk):
text = resp.text
until_ = until
for term in until_:
if len(term) > 0:
text = text.split(term)[0]
res.append(text)
self.cache_hook.add_partial('generate_until', (context, {'until': until_}), text)
pbar.update(1)
pbar.close()
return re_ord.get_original(res)
def _encode_pair(self, context: str, continuation: str) -> Tuple[List[int], List[int]]:
n_spaces = len(context) - len(context.rstrip())
if n_spaces > 0:
continuation = context[-n_spaces:] + continuation
context = context[:-n_spaces]
whole_enc = self.tok_encode(context + continuation)
context_enc = self.tok_encode(context)
context_enc_len = len(context_enc)
continuation_enc = whole_enc[context_enc_len:]
return (context_enc, continuation_enc)
# File: lm-evaluation-harness-main/lm_eval/models/neuron_optimum.py
import copy
import json
import logging
import subprocess
from collections import defaultdict
from typing import List, Optional, Union
import torch
import torch.nn.functional as F
import transformers
from packaging import version
from tqdm import tqdm
from transformers import GenerationConfig
from transformers.generation import StoppingCriteriaList
import lm_eval.models.utils
from lm_eval import utils
from lm_eval.api.model import TemplateLM
from lm_eval.api.registry import register_model
from lm_eval.models.utils import stop_sequences_criteria
try:
NEURON_AVAILABLE = True
from optimum.neuron import NeuronModelForCausalLM
from optimum.neuron.generation import TokenSelector
from optimum.neuron.version import __version__ as optimum_neuron_version
except ImportError:
NeuronModelForCausalLM = object
NEURON_AVAILABLE = False
logger = logging.getLogger(__name__)
def get_nc_count() -> Union[int, None]:
try:
cmd = 'neuron-ls --json-output'
result = subprocess.run(cmd, shell=True, capture_output=True)
print(f'inferring nc_count from `neuron-ls` {result.stdout}')
json_output = json.loads(result.stdout)
count = sum([x['nc_count'] for x in json_output])
print(f'nc_count={count}')
return count
except Exception:
return None
def wrap_constant_batch_size(func):
def _decorator(self, input_ids):
batch_size = input_ids.shape[0]
if batch_size < self.batch_size:
input_ids = torch.concat((input_ids, torch.zeros([self.batch_size - batch_size, *input_ids.size()[1:]], dtype=input_ids.dtype, device=input_ids.device)), dim=0)
elif batch_size > self.batch_size:
raise ValueError(f'The specified batch_size ({batch_size}) exceeds the model static batch size ({self.batch_size})')
return func(self, input_ids)[:batch_size]
return _decorator
class CustomNeuronModelForCausalLM(NeuronModelForCausalLM):
def generate(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, stopping_criteria: Optional['StoppingCriteriaList']=None, generation_config: Optional['GenerationConfig']=None, **kwargs) -> torch.LongTensor:
generation_config = copy.deepcopy(self.generation_config if generation_config is None else generation_config)
model_kwargs = generation_config.update(**kwargs)
self._validate_model_kwargs(model_kwargs)
selector = TokenSelector.create(input_ids, generation_config, self, self.max_length)
selector.stopping_criteria.append(stopping_criteria)
(batch_size, sequence_length) = input_ids.shape
if sequence_length > self.max_length:
raise ValueError(f'The input sequence length ({sequence_length}) exceeds the model static sequence length ({self.max_length})')
padded_input_ids = input_ids
padded_attention_mask = attention_mask
if batch_size > self.batch_size:
raise ValueError(f'The specified batch_size ({batch_size}) exceeds the model static batch size ({self.batch_size})')
elif batch_size < self.batch_size:
logger.warning('Inputs will be padded to match the model static batch size. This will increase latency.')
padding_shape = [self.batch_size - batch_size, sequence_length]
padding = torch.full(padding_shape, fill_value=self.config.eos_token_id, dtype=torch.int64)