EgorShibaev's picture
scripts
d8b31a6
raw
history blame
2.12 kB
from langchain.text_splitter import CharacterTextSplitter, NLTKTextSplitter
import argparse
from pathlib import Path
import os
from tqdm import tqdm
def fixed_size_chunking(text, chunk_size=256) -> list[str]:
splitter = CharacterTextSplitter(
separator=" ",
chunk_size=chunk_size,
chunk_overlap=20
)
return splitter.split_text(text)
def content_aware_chunking(text, chunk_size=256) -> list[str]:
splitter = NLTKTextSplitter(
separator=".",
chunk_size = chunk_size,
chunk_overlap = 20
)
return splitter.split_text(text)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input-dir", help="input directory with text files", type=str,
default="docs")
parser.add_argument("--output-dir", help="output directory to store chunked texts", type=str,
default="chunked_docs")
parser.add_argument("--chunk-size", help="chunk size", type=int, default=256)
parser.add_argument("--chunking-type", help="fixed_size or content_aware", type=str, default="fixed_size")
args = parser.parse_args()
input_dir = Path(args.input_dir)
output_dir = Path(args.output_dir)
assert os.path.isdir(input_dir), "Input directory doesn't exist"
os.makedirs(output_dir, exist_ok=True)
for file in tqdm(input_dir.rglob("*")):
if file.is_file():
with open(file, 'r', encoding='utf8') as f:
text = f.read()
if args.chunking_type == "fixed_size":
chunked_text = fixed_size_chunking(text, args.chunk_size)
elif args.chunking_type == "content_aware":
chunked_text = content_aware_chunking(text, args.chunk_size)
else:
raise ValueError("Invalid chunking type. Choose from 'fixed_size' or 'content_aware'")
for i, chunk in enumerate(chunked_text):
with open(output_dir / f"{file.stem}_chunk_{i}.txt", "w", encoding='utf8') as f:
f.write(chunk)
if __name__ == "__main__":
main()