DocChatAI / services /vector_store.py
Deepak Yadav
updated new version deepseek-r1
7d9087b
raw
history blame
549 Bytes
from langchain_community.vectorstores import FAISS
# from langchain_community.embeddings import SentenceTransformerEmbeddings
# from langchain_community.embeddings.ollama import OllamaEmbeddings
from langchain_huggingface import HuggingFaceEmbeddings
def create_vector_store(splits):
# embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
# embeddings = OllamaEmbeddings(model="nomic-embed-text")
return FAISS.from_documents(splits, embeddings)