text
stringlengths
0
15.3k
padded_input_ids = torch.cat([input_ids, padding])
if attention_mask is not None:
padding = torch.zeros(padding_shape, dtype=torch.int64)
padded_attention_mask = torch.cat([attention_mask, padding])
self.reset_generation()
output_ids = self.generate_tokens(padded_input_ids, selector, batch_size, attention_mask=padded_attention_mask, **model_kwargs)
return output_ids[:batch_size, :]
@register_model('neuronx')
class NEURON_HF(TemplateLM):
_DEFAULT_MAX_LENGTH = 2048
def __init__(self, pretrained: Optional[str]='TinyLlama/TinyLlama-1.1B-Chat-v1.0', revision: Optional[str]='main', tp_degree: Optional[int]=None, subfolder: Optional[str]=None, tokenizer: Optional[str]=None, truncation: Optional[bool]=False, max_length: Optional[int]=None, dtype: Optional[Union[str, torch.dtype]]='auto', batch_size: Optional[int]=1, low_cpu_mem_usage: Optional[bool]=True, trust_remote_code: Optional[bool]=False, use_fast_tokenizer: Optional[bool]=True, add_bos_token: Optional[bool]=False) -> None:
if not NEURON_AVAILABLE:
raise Exception('Tried to load neuron model, but neuron is not installed ', 'please install neuron via pip install transformers-neuron ', 'also make sure you are running on an AWS inf2 instance')
if version.parse(optimum_neuron_version) != version.parse('0.0.17'):
logger.warning(f'`optimum-neuron` model requires `pip install "optimum[neuronx]>=0.0.17" preferably using the Hugging Face Neuron Deep Learning AMI (Ubuntu 22.04) https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2 You are using optimum-neuron={optimum_neuron_version}')
super().__init__()
assert isinstance(pretrained, str)
assert isinstance(batch_size, (int, str))
self.batch_size_per_gpu = int(batch_size)
batch_size = int(batch_size)
if tp_degree is None:
tp_degree = get_nc_count()
assert isinstance(tp_degree, int), f'model_args must include tp_degree. tp_degree must be set to an integer, but is tp_degree=`{tp_degree}` with type=`{type(tp_degree)}`.Set it to number of neuron cores on your instance. For inf2.xlarge and inf2.8xlarge, set it to `2`. For inf2.24xlarge, set it to `12`. For inf2.48xlarge, set it to `24`.'
revision = revision + ('/' + subfolder if subfolder is not None else '')
self._config = transformers.AutoConfig.from_pretrained(pretrained, revision=revision, trust_remote_code=trust_remote_code)
torch_dtype = lm_eval.models.utils.get_dtype(dtype)
assert torch_dtype in [torch.float16, torch.bfloat16], 'Only float16 and bfloat16 are supported'
self.tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained if tokenizer is None else tokenizer, revision=revision, trust_remote_code=trust_remote_code, use_fast=use_fast_tokenizer)
if torch_dtype == torch.float16:
self.amp_dtype = 'f16'
elif torch_dtype == torch.bfloat16:
self.amp_dtype = 'bf16'
elif torch_dtype == torch.float32:
self.amp_dtype = 'f32'
else:
raise NotImplementedError('Only float16 and bfloat16 are implemented.')
compiler_args = {'num_cores': tp_degree, 'auto_cast_type': self.amp_dtype}
input_shapes = {'batch_size': batch_size, 'sequence_length': self._DEFAULT_MAX_LENGTH}
print(f"{'=' * 20} \n loading model to neuron with {compiler_args}, {input_shapes}...")
self.model = CustomNeuronModelForCausalLM.from_pretrained(pretrained, revision=revision, trust_remote_code=trust_remote_code, low_cpu_mem_usage=low_cpu_mem_usage, export=True, **compiler_args, **input_shapes)
print(f"SUCCESS: neuron model compiled. \n {'=' * 20}")
self.truncation = truncation
self.vocab_size = self.tokenizer.vocab_size
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
self.add_bos_token = add_bos_token
self._max_length = max_length
self.batch_schedule = 1
self.batch_sizes = {}
@property
def config(self):
return self._config
@property
def eot_token_id(self):
return self.tokenizer.eos_token_id
@property
def prefix_token_id(self):
return self.tokenizer.bos_token_id or self.tokenizer.eos_token_id
@property
def max_length(self):
if self._max_length:
return self._max_length
seqlen_config_attrs = ('n_positions', 'max_position_embeddings', 'n_ctx')
for attr in seqlen_config_attrs:
if hasattr(self.model.config, attr):
return getattr(self.model.config, attr)
if hasattr(self.tokenizer, 'model_max_length'):
if self.tokenizer.model_max_length == 1000000000000000019884624838656:
return self._DEFAULT_MAX_LENGTH
return self.tokenizer.model_max_length
return self._DEFAULT_MAX_LENGTH
@property
def max_gen_toks(self) -> int:
return 256
@property
def batch_size(self):
return self.batch_size_per_gpu
@property
def device(self):
return 'cpu'
@property
def rank(self):
return 0
@property
def world_size(self):
return 1
def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None):
""""""
if add_special_tokens is None: