Spaces:
Running
Running
File size: 2,304 Bytes
e9d730a b953016 e9d730a b953016 e9d730a b953016 e9d730a b953016 e9d730a b953016 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
# src/utils/llm_utils.py
from fastapi import HTTPException
from typing import Tuple
from src.llms.openai_llm import OpenAILanguageModel
from src.llms.ollama_llm import OllamaLanguageModel
from src.llms.bert_llm import BERTLanguageModel
from src.llms.falcon_llm import FalconLanguageModel
from src.llms.llama_llm import LlamaLanguageModel
from src.embeddings.huggingface_embedding import HuggingFaceEmbedding
from src.vectorstores.chroma_vectorstore import ChromaVectorStore
from src.vectorstores.optimized_vectorstore import get_optimized_vector_store
from src.utils.logger import logger
from config.config import settings
def get_llm_instance(provider: str):
"""
Get LLM instance based on provider
Args:
provider (str): Name of the LLM provider
Returns:
BaseLLM: Instance of the LLM
Raises:
ValueError: If provider is not supported
"""
llm_map = {
'openai': lambda: OpenAILanguageModel(api_key=settings.OPENAI_API_KEY),
'ollama': lambda: OllamaLanguageModel(base_url=settings.OLLAMA_BASE_URL),
'bert': lambda: BERTLanguageModel(),
'falcon': lambda: FalconLanguageModel(),
'llama': lambda: LlamaLanguageModel(),
}
if provider not in llm_map:
raise ValueError(f"Unsupported LLM provider: {provider}")
return llm_map[provider]()
async def get_vector_store() -> Tuple[ChromaVectorStore, HuggingFaceEmbedding]:
"""
Get vector store and embedding model instances
Uses optimized implementation while maintaining backward compatibility
Returns:
Tuple[ChromaVectorStore, HuggingFaceEmbedding]:
Vector store and embedding model instances
"""
try:
return await get_optimized_vector_store()
except Exception as e:
logger.error(f"Error getting optimized vector store: {str(e)}")
# Fallback to original implementation if optimization fails
logger.warning("Falling back to standard vector store implementation")
embedding = HuggingFaceEmbedding(model_name=settings.EMBEDDING_MODEL)
vector_store = ChromaVectorStore(
embedding_function=embedding.embed_documents,
persist_directory=settings.CHROMA_PATH
)
return vector_store, embedding |