# src/utils/llm_utils.py from fastapi import HTTPException from typing import Tuple from src.llms.openai_llm import OpenAILanguageModel from src.llms.ollama_llm import OllamaLanguageModel from src.llms.bert_llm import BERTLanguageModel from src.llms.falcon_llm import FalconLanguageModel from src.llms.llama_llm import LlamaLanguageModel from src.embeddings.huggingface_embedding import HuggingFaceEmbedding from src.vectorstores.chroma_vectorstore import ChromaVectorStore from src.utils.logger import logger from config.config import settings def get_llm_instance(provider: str): """ Get LLM instance based on provider Args: provider (str): Name of the LLM provider Returns: BaseLLM: Instance of the LLM Raises: ValueError: If provider is not supported """ llm_map = { 'openai': lambda: OpenAILanguageModel(api_key=settings.OPENAI_API_KEY), 'ollama': lambda: OllamaLanguageModel(base_url=settings.OLLAMA_BASE_URL), 'bert': lambda: BERTLanguageModel(), 'falcon': lambda: FalconLanguageModel(), 'llama': lambda: LlamaLanguageModel(), } if provider not in llm_map: raise ValueError(f"Unsupported LLM provider: {provider}") return llm_map[provider]() async def get_vector_store() -> Tuple[ChromaVectorStore, HuggingFaceEmbedding]: """ Initialize and return vector store with embedding model. Returns: Tuple[ChromaVectorStore, HuggingFaceEmbedding]: Initialized vector store and embedding model Raises: HTTPException: If vector store initialization fails """ try: embedding = HuggingFaceEmbedding(model_name=settings.EMBEDDING_MODEL) vector_store = ChromaVectorStore( embedding_function=embedding.embed_documents, persist_directory=settings.CHROMA_PATH ) return vector_store, embedding except Exception as e: logger.error(f"Error initializing vector store: {str(e)}") raise HTTPException(status_code=500, detail="Failed to initialize vector store")