text
stringlengths 0
15.3k
|
---|
req_args = all_gen_kwargs[0] |
until = get_until(req_args) |
max_gen_toks = req_args.get('max_gen_toks', self.max_gen_toks) |
remaining_length = self.max_length - max_gen_toks |
contexts = [] |
for (context, _) in chunk: |
encoded_context = self.tok_encode(context) |
encoded_context = encoded_context[-remaining_length:] |
contexts.append(self.tok_decode(encoded_context)) |
output = self.generate(self.model, inputs=contexts, tokens_to_generate=max_gen_toks, end_strings=until, greedy=True) |
answers = output['sentences'] |
continuations = [] |
for (context, answer) in zip(contexts, answers): |
continuations.append(answer[len(context):]) |
for term in until: |
continuations = [answer.split(term)[0] for answer in continuations] |
for (request, answer) in zip(chunk, continuations): |
self.cache_hook.add_partial('greedy_until', request, answer) |
res.append(answer) |
return re_ords.get_original(res) |
# File: lm-evaluation-harness-main/lm_eval/models/neuralmagic.py |
import copy |
from typing import List, Optional, Tuple, Union |
import numpy |
import transformers |
from tqdm import tqdm |
import lm_eval.models.utils |
from lm_eval import utils |
from lm_eval.api.instance import Instance |
from lm_eval.api.model import LM |
from lm_eval.api.registry import register_model |
from lm_eval.models.huggingface import HFLM |
eval_logger = utils.eval_logger |
@register_model('sparseml') |
class SparseMLLM(HFLM): |
def _create_model(self, pretrained: str, revision: Optional[str]='main', dtype: Optional[str]='auto', trust_remote_code: Optional[bool]=False, **kwargs) -> None: |
try: |
from sparseml.transformers import SparseAutoModelForCausalLM |
except ModuleNotFoundError: |
raise Exception('Package `sparseml` is not installed. Please install it via `pip install sparseml[transformers]`') |
model_kwargs = kwargs if kwargs else {} |
if 'device_map' not in model_kwargs: |
if hasattr(self, 'accelerator'): |
model_kwargs.update({'device_map': {'': f'cuda:{self.accelerator.local_process_index}'}}) |
else: |
model_kwargs.update({'device_map': {'': str(self.device)}}) |
relevant_kwarg_names = ['offload_folder', 'device_map'] |
relevant_kwargs = {k: v for (k, v) in model_kwargs.items() if k in relevant_kwarg_names} |
ignored_kwargs = {} |
for (k, v) in model_kwargs.items(): |
if k not in relevant_kwargs.keys(): |
ignored_kwargs[k] = v |
eval_logger.warning(f'The sparseml integration is ignoring the following kwargs that are specified: {ignored_kwargs}') |
model = SparseAutoModelForCausalLM.from_pretrained(pretrained, revision=revision, torch_dtype=lm_eval.models.utils.get_dtype(dtype), trust_remote_code=trust_remote_code, **relevant_kwargs) |
self._model = model |
def _get_config(self, pretrained: str, **kwargs) -> None: |
try: |
from sparseml.transformers import SparseAutoConfig |
except ModuleNotFoundError: |
raise Exception('Package `sparseml` is not installed. Please install it via `pip install sparseml[transformers]`') |
self._config = SparseAutoConfig.from_pretrained(pretrained_model_name_or_path=pretrained, **kwargs) |
def _create_tokenizer(self, pretrained: Union[str, transformers.PreTrainedModel], tokenizer: Optional[Union[str, transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast]], **kwargs) -> None: |
try: |
from sparseml.transformers import SparseAutoTokenizer |
except ModuleNotFoundError: |
raise Exception('Package `sparseml` is not installed. Please install it via `pip install sparseml[transformers]`') |
if tokenizer: |
if isinstance(tokenizer, str): |
self.tokenizer = SparseAutoTokenizer.from_pretrained(tokenizer, **kwargs) |
else: |
assert isinstance(tokenizer, transformers.PreTrainedTokenizer) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast) |
self.tokenizer = tokenizer |
else: |
if isinstance(pretrained, str): |
model_name = pretrained |
else: |
model_name = self.model.name_or_path |
self.tokenizer = SparseAutoTokenizer.from_pretrained(model_name, **kwargs) |
return None |
@register_model('deepsparse') |
class DeepSparseLM(LM): |
_DEFAULT_MAX_LENGTH = 2048 |
def __init__(self, pretrained: str, tokenizer: Optional[Union[str, transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast]]=None, batch_size: Optional[Union[int, str]]=1, max_gen_toks: Optional[int]=256, max_length: Optional[int]=None): |
super().__init__() |
try: |
import deepsparse |
except ModuleNotFoundError: |
raise Exception('Package `deepsparse` is not installed. Please install it via `pip install deepsparse[transformers]`') |
if isinstance(batch_size, str) and (not batch_size.isdigit()): |
eval_logger.warning(f'batch_size={batch_size} is not valid for deepsparse because it is not an integer. Ignoring and using the default of 1.') |
batch_size = 1 |
self.batch_size = int(batch_size) |
self._max_length = max_length if max_length else self._DEFAULT_MAX_LENGTH |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.