text
stringlengths 0
15.3k
|
---|
import lm_eval.models.utils |
from lm_eval.api.registry import register_model |
from lm_eval.models.huggingface import HFLM |
@register_model('mamba_ssm') |
class MambaLMWrapper(HFLM): |
def __init__(self, pretrained='state-spaces/mamba-130m', **kwargs) -> None: |
if 'backend' in kwargs: |
assert kwargs['backend'] == 'causal' |
super().__init__(pretrained=pretrained, backend=kwargs.pop('backend', 'causal'), tokenizer=kwargs.pop('tokenizer', 'EleutherAI/gpt-neox-20b'), max_length=kwargs.pop('max_length', 2048), **kwargs) |
def _get_config(self, pretrained: str, **kwargs) -> None: |
try: |
from mamba_ssm.utils.hf import load_config_hf |
except ModuleNotFoundError: |
raise Exception("attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`") |
self._config = load_config_hf(pretrained) |
def _create_model(self, pretrained: str, dtype: Optional[Union[str, torch.dtype]]='float16', **kwargs) -> None: |
try: |
from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel |
except ModuleNotFoundError: |
raise Exception("attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`") |
self._model = MambaLMHeadModel.from_pretrained(pretrained, device=self._device, dtype=torch.float16 if dtype == 'auto' else lm_eval.models.utils.get_dtype(dtype)) |
def _model_generate(self, context, max_length, stop, **generation_kwargs): |
for key in ('do_sample', 'attention_mask'): |
if key in generation_kwargs: |
generation_kwargs.pop(key) |
return self.model.generate(input_ids=context, max_length=max_length, **generation_kwargs) |
# File: lm-evaluation-harness-main/lm_eval/models/nemo_lm.py |
import importlib |
import pathlib |
from copy import deepcopy |
from typing import List, Literal |
import filelock |
import numpy as np |
import torch |
from tqdm import tqdm |
from lm_eval.api.instance import Instance |
from lm_eval.api.model import LM |
from lm_eval.api.registry import register_model |
from lm_eval.models.utils import Collator |
from lm_eval.utils import eval_logger, get_rolling_token_windows, make_disjoint_window, simple_parse_args_string |
def _patch_pretrained_cfg(pretrained_cfg, trainer, tensor_model_parallel_size, pipeline_model_parallel_size): |
try: |
import omegaconf |
except ModuleNotFoundError: |
raise Exception("Attempted to use 'nemo_lm' model type, but package `nemo` is not installedPlease install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, or installing nemo following https://github.com/NVIDIA/NeMo.") |
omegaconf.OmegaConf.set_struct(pretrained_cfg, True) |
with omegaconf.open_dict(pretrained_cfg): |
attributes_to_update = {'sequence_parallel': False, 'activations_checkpoint_granularity': None, 'activations_checkpoint_method': None, 'precision': trainer.precision, 'global_batch_size': None, 'tensor_model_parallel_size': tensor_model_parallel_size, 'pipeline_model_parallel_size': pipeline_model_parallel_size, 'apply_rope_fusion': False} |
for (name, value) in attributes_to_update.items(): |
if hasattr(pretrained_cfg, name): |
pretrained_cfg[name] = value |
return pretrained_cfg |
def _get_target_from_class(target_class) -> str: |
return f'{target_class.__module__}.{target_class.__name__}' |
def load_model(model_path: str, trainer, tensor_model_parallel_size: int, pipeline_model_parallel_size: int) -> torch.nn.Module: |
try: |
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel |
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector |
except ModuleNotFoundError: |
raise Exception("Attempted to use 'nemo_lm' model type, but package `nemo` is not installedPlease install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, or installing nemo following https://github.com/NVIDIA/NeMo.") |
model_path = pathlib.Path(model_path) |
save_restore_connector = NLPSaveRestoreConnector() |
if model_path.is_dir(): |
save_restore_connector.model_extracted_dir = model_path.as_posix() |
pretrained_cfg = save_restore_connector.restore_from(None, model_path.as_posix(), return_config=True, trainer=trainer) |
if not hasattr(pretrained_cfg, 'target'): |
pretrained_cfg['target'] = _get_target_from_class(MegatronGPTModel) |
pretrained_cfg = _patch_pretrained_cfg(pretrained_cfg, trainer, tensor_model_parallel_size=tensor_model_parallel_size, pipeline_model_parallel_size=pipeline_model_parallel_size) |
model_to_load_path = model_path |
override_config = pretrained_cfg |
(module_name, class_name) = override_config.target.rsplit('.', 1) |
model_class = getattr(importlib.import_module(module_name), class_name) |
tokenizer_lock = filelock.FileLock(f'/tmp/{model_path.name}.tokenizer.lock') |
def _synced_build_tokenizer(self): |
with tokenizer_lock: |
self._original_build_tokenizer() |
model_class._original_build_tokenizer = model_class._build_tokenizer |
model_class._build_tokenizer = _synced_build_tokenizer |
model = model_class.restore_from(restore_path=model_to_load_path.as_posix(), trainer=trainer, override_config_path=override_config, save_restore_connector=save_restore_connector, map_location=f'cuda:{trainer.local_rank}') |
model.freeze() |
model.training = False |
try: |
model.model.language_model.encoder.activations_checkpoint_method = None |
except AttributeError: |
pass |
return model |
def setup_distributed_environment(trainer): |
try: |
from nemo.utils.app_state import AppState |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.