llm / services /model_service.py
Chris4K's picture
Update services/model_service.py
d81f317 verified
raw
history blame
2.35 kB
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaConfig
from config.config import settings
from sentence_transformers import SentenceTransformer
import torch
import logging
logger = logging.getLogger(__name__)
class ModelService:
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if not self._initialized:
self._initialized = True
self._load_models()
def _load_models(self):
try:
# Load tokenizer
#self.tokenizer = AutoTokenizer.from_pretrained(settings.MODEL_NAME)
## Load model configuration
#config = LlamaConfig.from_pretrained(settings.MODEL_NAME)
## Check quantization type and adjust accordingly
#if config.get('quantization_config', {}).get('type', '') == 'compressed-tensors':
# logger.warning("Quantization type 'compressed-tensors' is not supported. Switching to 'bitsandbytes_8bit'.")
# config.quantization_config['type'] = 'bitsandbytes_8bit'
## Load model with the updated configuration
#self.model = AutoModelForCausalLM.from_pretrained(
# settings.MODEL_NAME,
# config=config,
# torch_dtype=torch.float16 if settings.DEVICE == "cuda" else torch.float32,
# device_map="auto" if settings.DEVICE == "cuda" else None
#)
#-----
# Load Llama 3.2 model
model_name = settings.MODEL_NAME #"meta-llama/Llama-3.2-3B-Instruct" # Replace with the exact model path
tokenizer = AutoTokenizer.from_pretrained(model_name)
#model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
self.model = AutoModelForCausalLM.from_pretrained(model_name, device_map=None, torch_dtype=torch.float32)
# Load sentence embedder
self.embedder = SentenceTransformer(settings.EMBEDDER_MODEL)
except Exception as e:
logger.error(f"Error loading models: {e}")
raise
def get_models(self):
return self.tokenizer, self.model, self.embedder