File size: 349 Bytes
74d8f71
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
from langchain_experimental.text_splitter import SemanticChunker




def split_docs(docs, embedder):

    # Split into chunks using the SemanticChunker with the embedder'
    print("Splitting documents into chunks...")
    text_splitter = SemanticChunker(embeddings=embedder)
    documents = text_splitter.split_documents(docs)

    return documents