llm / services /model_service.py
Chris4K's picture
Update services/model_service.py
32a90bc verified
raw
history blame
2.29 kB
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaConfig
from sentence_transformers import SentenceTransformer
import torch
import logging
logger = logging.getLogger(__name__)
class ModelService:
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if not self._initialized:
self._initialized = True
self._load_models()
def _load_models(self):
try:
# Load tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(settings.MODEL_NAME)
# Load model configuration
config = LlamaConfig.from_pretrained(settings.MODEL_NAME)
# Check and update rope_scaling if necessary
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
logger.info("Updating rope_scaling in configuration...")
config.rope_scaling = {
"type": "linear", # Ensure this matches the expected type
"factor": config.rope_scaling.get('factor', 1.0) # Use existing factor or default to 1.0
}
# Check quantization type and adjust accordingly
if config.get('quantization_config', {}).get('type', '') == 'compressed-tensors':
logger.warning("Quantization type 'compressed-tensors' is not supported. Switching to 'bitsandbytes_8bit'.")
config.quantization_config['type'] = 'bitsandbytes_8bit'
# Load model with the updated configuration
self.model = AutoModelForCausalLM.from_pretrained(
settings.MODEL_NAME,
torch_dtype=torch.float16 if settings.DEVICE == "cuda" else torch.float32,
device_map="auto" if settings.DEVICE == "cuda" else None,
config=config
)
# Load sentence embedder
self.embedder = SentenceTransformer(settings.EMBEDDER_MODEL)
except Exception as e:
logger.error(f"Error loading models: {e}")
raise
def get_models(self):
return self.tokenizer, self.model, self.embedder