diff --git "a/notebooks/adaptive_rag.ipynb" "b/notebooks/adaptive_rag.ipynb" new file mode 100644--- /dev/null +++ "b/notebooks/adaptive_rag.ipynb" @@ -0,0 +1,1974 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", + "To disable this warning, you can either:\n", + "\t- Avoid using `tokenizers` before the fork if possible\n", + "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: langchain-community in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.3.13)\n", + "Requirement already satisfied: tiktoken in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.8.0)\n", + "Requirement already satisfied: langchain-openai in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.2.14)\n", + "Requirement already satisfied: langchainhub in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.1.21)\n", + "Requirement already satisfied: langchain in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.3.13)\n", + "Requirement already satisfied: langgraph in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.2.60)\n", + "Requirement already satisfied: duckduckgo-search in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (7.0.1)\n", + "Requirement already satisfied: langchain-groq in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.2.2)\n", + "Requirement already satisfied: langchain-huggingface in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.1.2)\n", + "Requirement already satisfied: sentence_transformers in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (3.3.1)\n", + "Requirement already satisfied: tavily-python in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.5.0)\n", + "Requirement already satisfied: crawl4ai in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.4.23)\n", + "Requirement already satisfied: docling in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (2.14.0)\n", + "Requirement already satisfied: easyocr in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (1.7.2)\n", + "Requirement already satisfied: FlagEmbedding in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (1.3.3)\n", + "Requirement already satisfied: pinecone in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (5.4.2)\n", + "Requirement already satisfied: streamlit in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (1.27.2)\n", + "Collecting streamlit\n", + " Downloading streamlit-1.41.1-py2.py3-none-any.whl.metadata (8.5 kB)\n", + "Requirement already satisfied: chonkie[semantic] in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.2.2)\n", + "Requirement already satisfied: PyYAML>=5.3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-community) (6.0.2)\n", + "Requirement already satisfied: SQLAlchemy<3,>=1.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-community) (2.0.36)\n", + "Requirement already satisfied: aiohttp<4.0.0,>=3.8.3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-community) (3.11.10)\n", + "Requirement already satisfied: dataclasses-json<0.7,>=0.5.7 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-community) (0.6.7)\n", + "Requirement already satisfied: httpx-sse<0.5.0,>=0.4.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-community) (0.4.0)\n", + "Requirement already satisfied: langchain-core<0.4.0,>=0.3.27 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-community) (0.3.28)\n", + "Requirement already satisfied: langsmith<0.3,>=0.1.125 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-community) (0.2.3)\n", + "Requirement already satisfied: numpy<2,>=1.22.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-community) (1.26.4)\n", + "Requirement already satisfied: pydantic-settings<3.0.0,>=2.4.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-community) (2.7.0)\n", + "Requirement already satisfied: requests<3,>=2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-community) (2.32.3)\n", + "Requirement already satisfied: tenacity!=8.4.0,<10,>=8.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-community) (8.5.0)\n", + "Requirement already satisfied: regex>=2022.1.18 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from tiktoken) (2024.11.6)\n", + "Requirement already satisfied: openai<2.0.0,>=1.58.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-openai) (1.58.1)\n", + "Requirement already satisfied: packaging<25,>=23.2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchainhub) (23.2)\n", + "Requirement already satisfied: types-requests<3.0.0.0,>=2.31.0.2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchainhub) (2.32.0.20241016)\n", + "Requirement already satisfied: async-timeout<5.0.0,>=4.0.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain) (4.0.3)\n", + "Requirement already satisfied: langchain-text-splitters<0.4.0,>=0.3.3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain) (0.3.3)\n", + "Requirement already satisfied: pydantic<3.0.0,>=2.7.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain) (2.10.3)\n", + "Requirement already satisfied: langgraph-checkpoint<3.0.0,>=2.0.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langgraph) (2.0.9)\n", + "Requirement already satisfied: langgraph-sdk<0.2.0,>=0.1.42 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langgraph) (0.1.46)\n", + "Requirement already satisfied: click>=8.1.7 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from duckduckgo-search) (8.1.7)\n", + "Requirement already satisfied: primp>=0.9.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from duckduckgo-search) (0.9.1)\n", + "Requirement already satisfied: lxml>=5.3.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from duckduckgo-search) (5.3.0)\n", + "Requirement already satisfied: groq<1,>=0.4.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-groq) (0.13.1)\n", + "Requirement already satisfied: huggingface-hub>=0.23.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-huggingface) (0.27.0)\n", + "Requirement already satisfied: tokenizers>=0.19.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-huggingface) (0.19.1)\n", + "Requirement already satisfied: transformers>=4.39.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-huggingface) (4.44.2)\n", + "Requirement already satisfied: tqdm in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from sentence_transformers) (4.67.1)\n", + "Requirement already satisfied: torch>=1.11.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from sentence_transformers) (2.5.1)\n", + "Requirement already satisfied: scikit-learn in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from sentence_transformers) (1.3.2)\n", + "Requirement already satisfied: scipy in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from sentence_transformers) (1.11.4)\n", + "Requirement already satisfied: Pillow in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from sentence_transformers) (10.4.0)\n", + "Requirement already satisfied: httpx in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from tavily-python) (0.27.2)\n", + "Requirement already satisfied: aiosqlite~=0.20 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from crawl4ai) (0.20.0)\n", + "Requirement already satisfied: litellm>=1.53.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from crawl4ai) (1.55.3)\n", + "Requirement already satisfied: playwright>=1.49.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from crawl4ai) (1.49.1)\n", + "Requirement already satisfied: python-dotenv~=1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from crawl4ai) (1.0.1)\n", + "Requirement already satisfied: beautifulsoup4~=4.12 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from crawl4ai) (4.12.3)\n", + "Requirement already satisfied: tf-playwright-stealth>=1.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from crawl4ai) (1.1.0)\n", + "Requirement already satisfied: xxhash~=3.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from crawl4ai) (3.5.0)\n", + "Requirement already satisfied: rank-bm25~=0.2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from crawl4ai) (0.2.2)\n", + "Requirement already satisfied: aiofiles>=24.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from crawl4ai) (24.1.0)\n", + "Requirement already satisfied: colorama~=0.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from crawl4ai) (0.4.6)\n", + "Requirement already satisfied: snowballstemmer~=2.2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from crawl4ai) (2.2.0)\n", + "Requirement already satisfied: certifi>=2024.7.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (2024.8.30)\n", + "Requirement already satisfied: deepsearch-glm<2.0.0,>=1.0.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (1.0.0)\n", + "Requirement already satisfied: docling-core<3.0.0,>=2.12.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling-core[chunking]<3.0.0,>=2.12.1->docling) (2.12.1)\n", + "Requirement already satisfied: docling-ibm-models<4.0.0,>=3.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (3.1.0)\n", + "Requirement already satisfied: docling-parse<4.0.0,>=3.0.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (3.0.0)\n", + "Requirement already satisfied: filetype<2.0.0,>=1.2.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (1.2.0)\n", + "Requirement already satisfied: marko<3.0.0,>=2.1.2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (2.1.2)\n", + "Requirement already satisfied: openpyxl<4.0.0,>=3.1.5 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (3.1.5)\n", + "Requirement already satisfied: pandas<3.0.0,>=2.1.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (2.1.4)\n", + "Requirement already satisfied: pypdfium2<5.0.0,>=4.30.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (4.30.0)\n", + "Requirement already satisfied: python-docx<2.0.0,>=1.1.2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (1.1.2)\n", + "Requirement already satisfied: python-pptx<2.0.0,>=1.0.2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (1.0.2)\n", + "Requirement already satisfied: rtree<2.0.0,>=1.3.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (1.3.0)\n", + "Requirement already satisfied: typer<0.13.0,>=0.12.5 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling) (0.12.5)\n", + "Requirement already satisfied: torchvision>=0.5 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from easyocr) (0.20.1)\n", + "Requirement already satisfied: opencv-python-headless in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from easyocr) (4.10.0.84)\n", + "Requirement already satisfied: scikit-image in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from easyocr) (0.25.0)\n", + "Requirement already satisfied: python-bidi in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from easyocr) (0.6.3)\n", + "Requirement already satisfied: Shapely in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from easyocr) (2.0.6)\n", + "Requirement already satisfied: pyclipper in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from easyocr) (1.3.0.post6)\n", + "Requirement already satisfied: ninja in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from easyocr) (1.11.1.3)\n", + "Requirement already satisfied: datasets==2.19.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from FlagEmbedding) (2.19.0)\n", + "Requirement already satisfied: accelerate>=0.20.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from FlagEmbedding) (1.2.1)\n", + "Requirement already satisfied: peft in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from FlagEmbedding) (0.14.0)\n", + "Requirement already satisfied: ir-datasets in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from FlagEmbedding) (0.5.9)\n", + "Requirement already satisfied: sentencepiece in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from FlagEmbedding) (0.2.0)\n", + "Requirement already satisfied: protobuf in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from FlagEmbedding) (4.25.5)\n", + "Requirement already satisfied: filelock in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from datasets==2.19.0->FlagEmbedding) (3.16.1)\n", + "Requirement already satisfied: pyarrow>=12.0.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from datasets==2.19.0->FlagEmbedding) (18.1.0)\n", + "Requirement already satisfied: pyarrow-hotfix in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from datasets==2.19.0->FlagEmbedding) (0.6)\n", + "Requirement already satisfied: dill<0.3.9,>=0.3.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from datasets==2.19.0->FlagEmbedding) (0.3.8)\n", + "Requirement already satisfied: multiprocess in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from datasets==2.19.0->FlagEmbedding) (0.70.16)\n", + "Requirement already satisfied: fsspec<=2024.3.1,>=2023.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from fsspec[http]<=2024.3.1,>=2023.1.0->datasets==2.19.0->FlagEmbedding) (2024.3.1)\n", + "Requirement already satisfied: safetensors>=0.4.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from transformers>=4.39.0->langchain-huggingface) (0.4.5)\n", + "Requirement already satisfied: autotiktokenizer>=0.2.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from chonkie[semantic]) (0.2.2)\n", + "Requirement already satisfied: model2vec>=0.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from chonkie[semantic]) (0.2.4)\n", + "Requirement already satisfied: pinecone-plugin-inference<4.0.0,>=2.0.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pinecone) (3.1.0)\n", + "Requirement already satisfied: pinecone-plugin-interface<0.0.8,>=0.0.7 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pinecone) (0.0.7)\n", + "Requirement already satisfied: python-dateutil>=2.5.3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pinecone) (2.9.0.post0)\n", + "Requirement already satisfied: typing-extensions>=3.7.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pinecone) (4.12.2)\n", + "Requirement already satisfied: urllib3>=1.26.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pinecone) (2.2.3)\n", + "Requirement already satisfied: altair<6,>=4.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from streamlit) (5.5.0)\n", + "Requirement already satisfied: blinker<2,>=1.0.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from streamlit) (1.9.0)\n", + "Requirement already satisfied: cachetools<6,>=4.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from streamlit) (5.5.0)\n", + "Requirement already satisfied: rich<14,>=10.14.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from streamlit) (13.9.4)\n", + "Requirement already satisfied: toml<2,>=0.10.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from streamlit) (0.10.2)\n", + "Requirement already satisfied: watchdog<7,>=2.1.5 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from streamlit) (6.0.0)\n", + "Requirement already satisfied: gitpython!=3.1.19,<4,>=3.0.7 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from streamlit) (3.1.43)\n", + "Requirement already satisfied: pydeck<1,>=0.8.0b4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from streamlit) (0.9.1)\n", + "Requirement already satisfied: tornado<7,>=6.0.3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from streamlit) (6.4.2)\n", + "Requirement already satisfied: psutil in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from accelerate>=0.20.1->FlagEmbedding) (6.1.0)\n", + "Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community) (2.4.4)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community) (24.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community) (1.5.0)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community) (6.1.0)\n", + "Requirement already satisfied: propcache>=0.2.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community) (0.2.1)\n", + "Requirement already satisfied: yarl<2.0,>=1.17.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community) (1.18.3)\n", + "Requirement already satisfied: jinja2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from altair<6,>=4.0->streamlit) (3.1.4)\n", + "Requirement already satisfied: jsonschema>=3.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from altair<6,>=4.0->streamlit) (4.23.0)\n", + "Requirement already satisfied: narwhals>=1.14.2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from altair<6,>=4.0->streamlit) (1.19.0)\n", + "Requirement already satisfied: soupsieve>1.2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from beautifulsoup4~=4.12->crawl4ai) (2.6)\n", + "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community) (3.23.1)\n", + "Requirement already satisfied: typing-inspect<1,>=0.4.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community) (0.9.0)\n", + "Requirement already satisfied: jsonref<2.0.0,>=1.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling-core<3.0.0,>=2.12.1->docling-core[chunking]<3.0.0,>=2.12.1->docling) (1.1.0)\n", + "Requirement already satisfied: tabulate<0.10.0,>=0.9.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling-core<3.0.0,>=2.12.1->docling-core[chunking]<3.0.0,>=2.12.1->docling) (0.9.0)\n", + "Requirement already satisfied: semchunk<3.0.0,>=2.2.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling-core[chunking]<3.0.0,>=2.12.1->docling) (2.2.2)\n", + "Requirement already satisfied: jsonlines<4.0.0,>=3.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling-ibm-models<4.0.0,>=3.1.0->docling) (3.1.0)\n", + "Requirement already satisfied: autoflake<3.0.0,>=2.3.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from docling-parse<4.0.0,>=3.0.0->docling) (2.3.1)\n", + "Requirement already satisfied: gitdb<5,>=4.0.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from gitpython!=3.1.19,<4,>=3.0.7->streamlit) (4.0.11)\n", + "Requirement already satisfied: anyio<5,>=3.5.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from groq<1,>=0.4.1->langchain-groq) (4.7.0)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from groq<1,>=0.4.1->langchain-groq) (1.9.0)\n", + "Requirement already satisfied: sniffio in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from groq<1,>=0.4.1->langchain-groq) (1.3.1)\n", + "Requirement already satisfied: httpcore==1.* in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from httpx->tavily-python) (1.0.7)\n", + "Requirement already satisfied: idna in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from httpx->tavily-python) (3.10)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from httpcore==1.*->httpx->tavily-python) (0.14.0)\n", + "Requirement already satisfied: jsonpatch<2.0,>=1.33 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langchain-core<0.4.0,>=0.3.27->langchain-community) (1.33)\n", + "Requirement already satisfied: msgpack<2.0.0,>=1.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langgraph-checkpoint<3.0.0,>=2.0.4->langgraph) (1.1.0)\n", + "Requirement already satisfied: orjson>=3.10.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langgraph-sdk<0.2.0,>=0.1.42->langgraph) (3.10.12)\n", + "Requirement already satisfied: requests-toolbelt<2.0.0,>=1.0.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from langsmith<0.3,>=0.1.125->langchain-community) (1.0.0)\n", + "Requirement already satisfied: importlib-metadata>=6.8.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from litellm>=1.53.1->crawl4ai) (6.11.0)\n", + "Requirement already satisfied: setuptools in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from model2vec>=0.1.0->chonkie[semantic]) (75.1.0)\n", + "Requirement already satisfied: jiter<1,>=0.4.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from openai<2.0.0,>=1.58.1->langchain-openai) (0.8.2)\n", + "Requirement already satisfied: et-xmlfile in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from openpyxl<4.0.0,>=3.1.5->docling) (2.0.0)\n", + "Requirement already satisfied: pytz>=2020.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pandas<3.0.0,>=2.1.4->docling) (2024.2)\n", + "Requirement already satisfied: tzdata>=2022.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pandas<3.0.0,>=2.1.4->docling) (2024.2)\n", + "Requirement already satisfied: greenlet==3.1.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from playwright>=1.49.0->crawl4ai) (3.1.1)\n", + "Requirement already satisfied: pyee==12.0.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from playwright>=1.49.0->crawl4ai) (12.0.0)\n", + "Requirement already satisfied: annotated-types>=0.6.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pydantic<3.0.0,>=2.7.4->langchain) (0.7.0)\n", + "Requirement already satisfied: pydantic-core==2.27.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pydantic<3.0.0,>=2.7.4->langchain) (2.27.1)\n", + "Requirement already satisfied: six>=1.5 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from python-dateutil>=2.5.3->pinecone) (1.17.0)\n", + "Requirement already satisfied: XlsxWriter>=0.5.7 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from python-pptx<2.0.0,>=1.0.2->docling) (3.2.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from requests<3,>=2->langchain-community) (3.4.0)\n", + "Requirement already satisfied: markdown-it-py>=2.2.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from rich<14,>=10.14.0->streamlit) (3.0.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from rich<14,>=10.14.0->streamlit) (2.18.0)\n", + "Requirement already satisfied: fake-http-header<0.4.0,>=0.3.5 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from tf-playwright-stealth>=1.1.0->crawl4ai) (0.3.5)\n", + "Requirement already satisfied: pytest-mockito<0.0.5,>=0.0.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from tf-playwright-stealth>=1.1.0->crawl4ai) (0.0.4)\n", + "Requirement already satisfied: networkx in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (3.4.2)\n", + "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.4.127 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (12.4.127)\n", + "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.4.127 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (12.4.127)\n", + "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.4.127 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (12.4.127)\n", + "Requirement already satisfied: nvidia-cudnn-cu12==9.1.0.70 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (9.1.0.70)\n", + "Requirement already satisfied: nvidia-cublas-cu12==12.4.5.8 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (12.4.5.8)\n", + "Requirement already satisfied: nvidia-cufft-cu12==11.2.1.3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (11.2.1.3)\n", + "Requirement already satisfied: nvidia-curand-cu12==10.3.5.147 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (10.3.5.147)\n", + "Requirement already satisfied: nvidia-cusolver-cu12==11.6.1.9 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (11.6.1.9)\n", + "Requirement already satisfied: nvidia-cusparse-cu12==12.3.1.170 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (12.3.1.170)\n", + "Requirement already satisfied: nvidia-nccl-cu12==2.21.5 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (2.21.5)\n", + "Requirement already satisfied: nvidia-nvtx-cu12==12.4.127 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (12.4.127)\n", + "Requirement already satisfied: nvidia-nvjitlink-cu12==12.4.127 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (12.4.127)\n", + "Requirement already satisfied: triton==3.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (3.1.0)\n", + "Requirement already satisfied: sympy==1.13.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch>=1.11.0->sentence_transformers) (1.13.1)\n", + "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from sympy==1.13.1->torch>=1.11.0->sentence_transformers) (1.3.0)\n", + "Requirement already satisfied: shellingham>=1.3.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from typer<0.13.0,>=0.12.5->docling) (1.5.4)\n", + "Requirement already satisfied: inscriptis>=2.2.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (2.5.0)\n", + "Requirement already satisfied: trec-car-tools>=2.5.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (2.6)\n", + "Requirement already satisfied: lz4>=3.1.10 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (4.3.3)\n", + "Requirement already satisfied: warc3-wet>=0.2.3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (0.2.5)\n", + "Requirement already satisfied: warc3-wet-clueweb09>=0.2.5 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (0.2.5)\n", + "Requirement already satisfied: zlib-state>=0.1.3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (0.1.9)\n", + "Requirement already satisfied: ijson>=3.1.3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (3.3.0)\n", + "Requirement already satisfied: unlzw3>=0.2.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from ir-datasets->FlagEmbedding) (0.2.2)\n", + "Requirement already satisfied: imageio!=2.35.0,>=2.33 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from scikit-image->easyocr) (2.36.1)\n", + "Requirement already satisfied: tifffile>=2022.8.12 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from scikit-image->easyocr) (2024.12.12)\n", + "Requirement already satisfied: lazy-loader>=0.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from scikit-image->easyocr) (0.4)\n", + "Requirement already satisfied: joblib>=1.1.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from scikit-learn->sentence_transformers) (1.4.2)\n", + "Requirement already satisfied: threadpoolctl>=2.0.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from scikit-learn->sentence_transformers) (3.5.0)\n", + "Requirement already satisfied: exceptiongroup>=1.0.2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from anyio<5,>=3.5.0->groq<1,>=0.4.1->langchain-groq) (1.2.2)\n", + "Requirement already satisfied: pyflakes>=3.0.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from autoflake<3.0.0,>=2.3.1->docling-parse<4.0.0,>=3.0.0->docling) (3.2.0)\n", + "Requirement already satisfied: tomli>=2.0.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from autoflake<3.0.0,>=2.3.1->docling-parse<4.0.0,>=3.0.0->docling) (2.2.1)\n", + "Requirement already satisfied: smmap<6,>=3.0.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from gitdb<5,>=4.0.1->gitpython!=3.1.19,<4,>=3.0.7->streamlit) (5.0.1)\n", + "Requirement already satisfied: zipp>=0.5 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from importlib-metadata>=6.8.0->litellm>=1.53.1->crawl4ai) (3.21.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from jinja2->altair<6,>=4.0->streamlit) (3.0.2)\n", + "Requirement already satisfied: jsonpointer>=1.9 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from jsonpatch<2.0,>=1.33->langchain-core<0.4.0,>=0.3.27->langchain-community) (3.0.0)\n", + "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (2024.10.1)\n", + "Requirement already satisfied: referencing>=0.28.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.35.1)\n", + "Requirement already satisfied: rpds-py>=0.7.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.22.3)\n", + "Requirement already satisfied: mdurl~=0.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from markdown-it-py>=2.2.0->rich<14,>=10.14.0->streamlit) (0.1.2)\n", + "Requirement already satisfied: pytest>=3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pytest-mockito<0.0.5,>=0.0.4->tf-playwright-stealth>=1.1.0->crawl4ai) (8.3.4)\n", + "Requirement already satisfied: mockito>=1.0.6 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pytest-mockito<0.0.5,>=0.0.4->tf-playwright-stealth>=1.1.0->crawl4ai) (1.5.3)\n", + "Requirement already satisfied: mpire[dill] in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from semchunk<3.0.0,>=2.2.0->docling-core[chunking]<3.0.0,>=2.12.1->docling) (2.10.2)\n", + "Requirement already satisfied: cbor>=1.0.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from trec-car-tools>=2.5.4->ir-datasets->FlagEmbedding) (1.0.0)\n", + "Requirement already satisfied: mypy-extensions>=0.3.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from typing-inspect<1,>=0.4.0->dataclasses-json<0.7,>=0.5.7->langchain-community) (1.0.0)\n", + "Requirement already satisfied: iniconfig in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pytest>=3->pytest-mockito<0.0.5,>=0.0.4->tf-playwright-stealth>=1.1.0->crawl4ai) (2.0.0)\n", + "Requirement already satisfied: pluggy<2,>=1.5 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from pytest>=3->pytest-mockito<0.0.5,>=0.0.4->tf-playwright-stealth>=1.1.0->crawl4ai) (1.5.0)\n", + "Downloading streamlit-1.41.1-py2.py3-none-any.whl (9.1 MB)\n", + " 25l ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 0.0/9.1 MB ? eta -:--:--━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 9.1/9.1 MB 189.4 MB/s eta 0:00:00\n", + "\u001b[?25hInstalling collected packages: streamlit\n", + " Attempting uninstall: streamlit\n", + " Found existing installation: streamlit 1.27.2\n", + " Uninstalling streamlit-1.27.2:\n", + " Successfully uninstalled streamlit-1.27.2\n", + "Successfully installed streamlit-1.41.1\n" + ] + } + ], + "source": [ + "!pip install -U langchain-community tiktoken langchain-openai langchainhub langchain langgraph duckduckgo-search langchain-groq langchain-huggingface sentence_transformers tavily-python crawl4ai docling easyocr FlagEmbedding \"chonkie[semantic]\" pinecone streamlit" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[36m[INIT].... → Running post-installation setup...\u001b[0m\n", + "\u001b[36m[INIT].... → Installing Playwright browsers...\u001b[0m\n", + "You are using a frozen webkit browser which does not receive updates anymore on ubuntu20.04-x64. Please update to the latest version of your operating system to test up-to-date browsers.\n", + "Playwright Host validation warning: \n", + "╔══════════════════════════════════════════════════════╗\n", + "║ Host system is missing dependencies to run browsers. ║\n", + "║ Please install them with the following command: ║\n", + "║ ║\n", + "║ sudo playwright install-deps ║\n", + "║ ║\n", + "║ Alternatively, use apt: ║\n", + "║ sudo apt-get install libxslt1.1\\ ║\n", + "║ libwoff1\\ ║\n", + "║ libwebpdemux2\\ ║\n", + "║ libenchant-2-2\\ ║\n", + "║ libhyphen0\\ ║\n", + "║ libgles2 ║\n", + "║ ║\n", + "║ <3 Playwright Team ║\n", + "╚══════════════════════════════════════════════════════╝\n", + " at validateDependenciesLinux (/system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages/playwright/driver/package/lib/server/registry/dependencies.js:216:9)\n", + "\u001b[90m at process.processTicksAndRejections (node:internal/process/task_queues:105:5)\u001b[39m\n", + " at async Registry._validateHostRequirements (/system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages/playwright/driver/package/lib/server/registry/index.js:753:43)\n", + " at async Registry._validateHostRequirementsForExecutableIfNeeded (/system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages/playwright/driver/package/lib/server/registry/index.js:851:7)\n", + " at async Registry.validateHostRequirementsForExecutablesIfNeeded (/system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages/playwright/driver/package/lib/server/registry/index.js:840:43)\n", + " at async t. (/system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages/playwright/driver/package/lib/cli/program.js:137:7)\n", + "\u001b[32m[COMPLETE] ● Playwright installation completed successfully.\u001b[0m\n", + "\u001b[36m[INIT].... → Starting database initialization...\u001b[0m\n", + "\u001b[32m[COMPLETE] ● Database initialization completed successfully.\u001b[0m\n", + "\u001b[32m[COMPLETE] ● Post-installation setup completed!\u001b[0m\n", + "\u001b[0m" + ] + } + ], + "source": [ + "!crawl4ai-setup\n", + "!export PYTHONPATH=." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "from typing import List, Union\n", + "import logging\n", + "from dataclasses import dataclass\n", + "\n", + "from langchain_core.documents import Document as LCDocument\n", + "from langchain_core.document_loaders import BaseLoader\n", + "from docling.document_converter import DocumentConverter, PdfFormatOption\n", + "from docling.datamodel.base_models import InputFormat, ConversionStatus\n", + "from docling.datamodel.pipeline_options import (\n", + " PdfPipelineOptions,\n", + " EasyOcrOptions\n", + ")\n", + "\n", + "logging.basicConfig(level=logging.INFO)\n", + "_log = logging.getLogger(__name__)\n", + "\n", + "@dataclass\n", + "class ProcessingResult:\n", + " \"\"\"Store results of document processing\"\"\"\n", + " success_count: int = 0\n", + " failure_count: int = 0\n", + " partial_success_count: int = 0\n", + " failed_files: List[str] = None\n", + "\n", + " def __post_init__(self):\n", + " if self.failed_files is None:\n", + " self.failed_files = []\n", + "\n", + "class MultiFormatDocumentLoader(BaseLoader):\n", + " \"\"\"Loader for multiple document formats that converts to LangChain documents\"\"\"\n", + " \n", + " def __init__(\n", + " self,\n", + " file_paths: Union[str, List[str]],\n", + " enable_ocr: bool = True,\n", + " enable_tables: bool = True\n", + " ):\n", + " self._file_paths = [file_paths] if isinstance(file_paths, str) else file_paths\n", + " self._enable_ocr = enable_ocr\n", + " self._enable_tables = enable_tables\n", + " self._converter = self._setup_converter()\n", + " \n", + " def _setup_converter(self):\n", + " \"\"\"Set up the document converter with appropriate options\"\"\"\n", + " # Configure pipeline options\n", + " pipeline_options = PdfPipelineOptions(do_ocr=False, do_table_structure=False, ocr_options=EasyOcrOptions(\n", + " force_full_page_ocr=True\n", + " ))\n", + " if self._enable_ocr:\n", + " pipeline_options.do_ocr = True\n", + " if self._enable_tables:\n", + " pipeline_options.do_table_structure = True\n", + " pipeline_options.table_structure_options.do_cell_matching = True\n", + "\n", + " # Create converter with supported formats\n", + " return DocumentConverter(\n", + " allowed_formats=[\n", + " InputFormat.PDF,\n", + " InputFormat.IMAGE,\n", + " InputFormat.DOCX,\n", + " InputFormat.HTML,\n", + " InputFormat.PPTX,\n", + " InputFormat.ASCIIDOC,\n", + " InputFormat.MD,\n", + " ],\n", + " format_options={\n", + " InputFormat.PDF: PdfFormatOption(\n", + " pipeline_options=pipeline_options,\n", + " )}\n", + " )\n", + "\n", + " def lazy_load(self):\n", + " \"\"\"Convert documents and yield LangChain documents\"\"\"\n", + " results = ProcessingResult()\n", + " \n", + " for file_path in self._file_paths:\n", + " try:\n", + " path = Path(file_path)\n", + " if not path.exists():\n", + " _log.warning(f\"File not found: {file_path}\")\n", + " results.failure_count += 1\n", + " results.failed_files.append(file_path)\n", + " continue\n", + "\n", + " conversion_result = self._converter.convert(path)\n", + " \n", + " if conversion_result.status == ConversionStatus.SUCCESS:\n", + " results.success_count += 1\n", + " text = conversion_result.document.export_to_markdown()\n", + " metadata = {\n", + " 'source': str(path),\n", + " 'file_type': path.suffix,\n", + " }\n", + " yield LCDocument(\n", + " page_content=text,\n", + " metadata=metadata\n", + " )\n", + " elif conversion_result.status == ConversionStatus.PARTIAL_SUCCESS:\n", + " results.partial_success_count += 1\n", + " _log.warning(f\"Partial conversion for {file_path}\")\n", + " text = conversion_result.document.export_to_markdown()\n", + " metadata = {\n", + " 'source': str(path),\n", + " 'file_type': path.suffix,\n", + " 'conversion_status': 'partial'\n", + " }\n", + " yield LCDocument(\n", + " page_content=text,\n", + " metadata=metadata\n", + " )\n", + " else:\n", + " results.failure_count += 1\n", + " results.failed_files.append(file_path)\n", + " _log.error(f\"Failed to convert {file_path}\")\n", + " \n", + " except Exception as e:\n", + " _log.error(f\"Error processing {file_path}: {str(e)}\")\n", + " results.failure_count += 1\n", + " results.failed_files.append(file_path)\n", + "\n", + " # Log final results\n", + " total = results.success_count + results.partial_success_count + results.failure_count\n", + " _log.info(\n", + " f\"Processed {total} documents:\\n\"\n", + " f\"- Successfully converted: {results.success_count}\\n\"\n", + " f\"- Partially converted: {results.partial_success_count}\\n\"\n", + " f\"- Failed: {results.failure_count}\"\n", + " )\n", + " if results.failed_files:\n", + " _log.info(\"Failed files:\")\n", + " for file in results.failed_files:\n", + " _log.info(f\"- {file}\")\n", + " \n", + " \n", + "# if __name__ == '__main__':\n", + "# # Load documents from a list of file paths\n", + "# loader = MultiFormatDocumentLoader(\n", + "# file_paths=[\n", + "# # './data/2404.19756v1.pdf',\n", + "# # './data/OD429347375590223100.pdf',\n", + "# './data/Project Report Format.docx',\n", + "# # './data/UNIT 2 GENDER BASED VIOLENCE.pptx'\n", + "# ],\n", + "# enable_ocr=False,\n", + "# enable_tables=True\n", + "# )\n", + "# for doc in loader.lazy_load():\n", + "# print(doc.page_content)\n", + "# print(doc.metadata)\n", + "# # save document in .md file \n", + "# with open('output.md', 'w') as f:\n", + "# f.write(doc.page_content)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:datasets:PyTorch version 2.5.1 available.\n" + ] + } + ], + "source": [ + "from typing import List\n", + "import numpy as np\n", + "from chonkie.embeddings import BaseEmbeddings\n", + "from FlagEmbedding import BGEM3FlagModel\n", + "from chonkie import SDPMChunker as SDPMChunker\n", + "\n", + "class BGEM3Embeddings(BaseEmbeddings):\n", + " def __init__(self, model_name):\n", + " self.model = BGEM3FlagModel(model_name, use_fp16=True)\n", + " self.task = \"separation\"\n", + " \n", + " @property\n", + " def dimension(self):\n", + " return 1024\n", + "\n", + " def embed(self, text: str):\n", + " e = self.model.encode([text], return_dense=True, return_sparse=False, return_colbert_vecs=False)['dense_vecs']\n", + " # print(e)\n", + " return e\n", + "\n", + " def embed_batch(self, texts: List[str]):\n", + " embeddings = self.model.encode(texts, return_dense=True, return_sparse=False, return_colbert_vecs=False\n", + " )\n", + " # print(embeddings['dense_vecs'])\n", + " return embeddings['dense_vecs']\n", + "\n", + " def count_tokens(self, text: str):\n", + " l = len(self.model.tokenizer.encode(text))\n", + " # print(l)\n", + " return l\n", + "\n", + " def count_tokens_batch(self, texts: List[str]):\n", + " encodings = self.model.tokenizer(texts)\n", + " # print([len(enc) for enc in encodings[\"input_ids\"]])\n", + " return [len(enc) for enc in encodings[\"input_ids\"]]\n", + "\n", + " def get_tokenizer_or_token_counter(self):\n", + " return self.model.tokenizer\n", + " \n", + " def similarity(self, u: \"np.ndarray\", v: \"np.ndarray\"):\n", + " \"\"\"Compute cosine similarity between two embeddings.\"\"\"\n", + " s = (u@v.T)#.item()\n", + " # print(s)\n", + " return s\n", + " \n", + " @classmethod\n", + " def is_available(cls):\n", + " return True\n", + "\n", + " def __repr__(self):\n", + " return \"bgem3\"\n", + "\n", + "\n", + "# def main():\n", + "# # Initialize the BGE M3 embeddings model\n", + "# embedding_model = BGEM3Embeddings(\n", + "# model_name=\"BAAI/bge-m3\"\n", + "# )\n", + "\n", + "# # Initialize the SDPM chunker\n", + "# chunker = SDPMChunker(\n", + "# embedding_model=embedding_model,\n", + "# chunk_size=256,\n", + "# threshold=0.7,\n", + "# skip_window=2\n", + "# )\n", + "\n", + "# with open('./output.md', 'r') as file:\n", + "# text = file.read()\n", + "\n", + "# # Generate chunks\n", + "# chunks = chunker.chunk(text)\n", + "\n", + "# # Print the chunks\n", + "# for i, chunk in enumerate(chunks, 1):\n", + "# print(f\"\\nChunk {i}:\")\n", + "# print(f\"Text: {chunk.text}\")\n", + "# print(f\"Token count: {chunk.token_count}\")\n", + "# print(f\"Start index: {chunk.start_index}\")\n", + "# print(f\"End index: {chunk.end_index}\")\n", + "# print(f\"no of sentences: {len(chunk.sentences)}\")\n", + "# print(\"-\" * 80)\n", + "\n", + "# if __name__ == \"__main__\":\n", + "# main()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "68e181477fef4e4a88ee6e25a1ece83d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Fetching 30 files: 0%| | 0/30 [00:00 List[Dict]:\n", + " \"\"\"\n", + " Perform web search and crawl the results\n", + " \n", + " Args:\n", + " query (str): Search query\n", + " \n", + " Returns:\n", + " List of crawled content results\n", + " \"\"\"\n", + " # Perform web search\n", + " search_tool = self._create_web_search_tool()\n", + " try:\n", + " search_results = search_tool.invoke({\"query\": query})\n", + " \n", + " # Extract URLs from search results\n", + " urls = [result['link'] for result in search_results]\n", + " print(f\"Found {len(urls)} URLs for query: {query}\")\n", + " \n", + " # Crawl URLs\n", + " crawl_results = await self.crawl_urls(urls, user_query=query)\n", + " \n", + " return crawl_results\n", + " \n", + " except Exception as e:\n", + " print(f\"Web search and crawl error: {e}\")\n", + " return []\n", + "\n", + "# def main():\n", + "# # Example usage\n", + "# crawler = AdvancedWebCrawler(\n", + "# max_search_results=5,\n", + "# word_count_threshold=50,\n", + "# content_filter_type='f',\n", + "# filter_threshold=0.48\n", + "# )\n", + " \n", + "# test_queries = [\n", + "# \"Latest developments in AI agents\",\n", + "# \"Today's weather forecast in Kolkata\",\n", + "# ]\n", + " \n", + "# for query in test_queries:\n", + "# # Run search and crawl asynchronously\n", + "# results = asyncio.run(crawler.search_and_crawl(query))\n", + " \n", + "# print(f\"\\nResults for query: {query}\")\n", + "# for result in results:\n", + "# print(f\"URL: {result['url']}\")\n", + "# print(f\"Success: {result['success']}\")\n", + "# print(f\"Title: {result['title']}\")\n", + "# print(f\"Word Count: {result['word_count']}\")\n", + "# print(f\"Content Preview: {result['content'][:500]}...\\n\")\n", + "\n", + "# if __name__ == \"__main__\":\n", + "# main()" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List, TypedDict\n", + "from langchain_core.documents.base import Document\n", + "\n", + "class GraphState(TypedDict):\n", + " \"\"\"\n", + " Represents the state of our adaptive RAG graph.\n", + "\n", + " Attributes:\n", + " question (str): Original user question\n", + " generation (str, optional): LLM generated answer\n", + " documents (List[Document], optional): Retrieved or searched documents\n", + " \"\"\"\n", + " question: str\n", + " generation: str | None\n", + " documents: List[Document]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "from langgraph.graph import END, StateGraph, START\n", + "from langchain_core.prompts import PromptTemplate\n", + "import asyncio\n", + "from langchain_core.output_parsers import StrOutputParser\n", + "\n", + "def perform_web_search(question: str):\n", + " \"\"\"\n", + " Perform web search using the AdvancedWebCrawler.\n", + " \n", + " Args:\n", + " question (str): User's input question\n", + " \n", + " Returns:\n", + " List: Web search results\n", + " \"\"\"\n", + " # Initialize web crawler\n", + " crawler = AdvancedWebCrawler(\n", + " max_search_results=5,\n", + " word_count_threshold=50,\n", + " content_filter_type='f',\n", + " filter_threshold=0.48\n", + " )\n", + " results = asyncio.run(crawler.search_and_crawl(question))\n", + " \n", + " return results\n", + "\n", + "\n", + "def create_adaptive_rag_workflow(retriever, llm, top_k=5, enable_websearch=False):\n", + " \"\"\"\n", + " Create the adaptive RAG workflow graph.\n", + " \n", + " Args:\n", + " retriever: Vector store retriever\n", + " \n", + " Returns:\n", + " Compiled LangGraph workflow\n", + " \"\"\"\n", + " def retrieve(state: GraphState):\n", + " \"\"\"Retrieve documents from vectorstore.\"\"\"\n", + " print(\"---RETRIEVE---\")\n", + " question = state['question']\n", + " documents = retriever.invoke(question, top_k)\n", + " print(f\"Retrieved {len(documents)} documents.\")\n", + " print(documents)\n", + " return {\"documents\": documents, \"question\": question}\n", + "\n", + " def route_to_datasource(state: GraphState):\n", + " \"\"\"Route question to web search or vectorstore.\"\"\"\n", + " print(\"---ROUTE QUESTION---\")\n", + " # question = state['question']\n", + " # source = route_query(question)\n", + " \n", + " if enable_websearch:\n", + " print(\"---ROUTE TO WEB SEARCH---\")\n", + " return \"web_search\"\n", + " else:\n", + " print(\"---ROUTE TO RAG---\")\n", + " return \"vectorstore\"\n", + "\n", + " def generate_answer(state: GraphState):\n", + " \"\"\"Generate answer using retrieved documents.\"\"\"\n", + " print(\"---GENERATE---\")\n", + " question = state['question']\n", + " documents = state['documents']\n", + " \n", + " # Prepare context\n", + " context = \"\\n\\n\".join([doc[\"page_content\"] for doc in documents])\n", + " prompt_template = PromptTemplate.from_template(\"\"\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\n", + " Question: {question}\n", + " Context: {context}\n", + " Answer:\"\"\")\n", + " # Generate answer\n", + " rag_chain = prompt_template | llm | StrOutputParser()\n", + "\n", + " generation = rag_chain.invoke({\"context\": context, \"question\": question})\n", + " \n", + " return {\"generation\": generation, \"documents\": documents, \"question\": question}\n", + "\n", + " def grade_documents(state: GraphState):\n", + " \"\"\"Filter relevant documents.\"\"\"\n", + " print(\"---GRADE DOCUMENTS---\")\n", + " question = state['question']\n", + " documents = state['documents']\n", + " \n", + " # Filter documents\n", + " filtered_docs = []\n", + " for doc in documents:\n", + " score = grade_document_relevance(question, doc[\"page_content\"], llm)\n", + " if score == \"yes\":\n", + " filtered_docs.append(doc)\n", + " \n", + " return {\"documents\": filtered_docs, \"question\": question}\n", + "\n", + " def web_search(state: GraphState):\n", + " \"\"\"Perform web search.\"\"\"\n", + " print(\"---WEB SEARCH---\")\n", + " question = state['question']\n", + " \n", + " # Perform web search\n", + " results = perform_web_search(question)\n", + " web_documents = [\n", + " {\n", + " \"page_content\": result['content'], \n", + " \"metadata\": {\"source\": result['url']}\n", + " } for result in results\n", + " ]\n", + " \n", + " return {\"documents\": web_documents, \"question\": question}\n", + "\n", + " def check_generation_quality(state: GraphState):\n", + " \"\"\"Check the quality of generated answer.\"\"\"\n", + " print(\"---ASSESS GENERATION---\")\n", + " question = state['question']\n", + " documents = state['documents']\n", + " generation = state['generation']\n", + " \n", + " \n", + " print(\"---Generation is not hallucinated.---\")\n", + " # Check answer quality\n", + " quality_score = grade_answer_quality(question, generation, llm)\n", + " if quality_score == \"yes\":\n", + " print(\"---Answer quality is good.---\")\n", + " else:\n", + " print(\"---Answer quality is poor.---\")\n", + " return \"end\" if quality_score == \"yes\" else \"rewrite\"\n", + "\n", + " # Create workflow\n", + " workflow = StateGraph(GraphState)\n", + "\n", + " # Add nodes\n", + " workflow.add_node(\"vectorstore\", retrieve)\n", + " workflow.add_node(\"web_search\", web_search)\n", + " workflow.add_node(\"grade_documents\", grade_documents)\n", + " workflow.add_node(\"generate\", generate_answer)\n", + " workflow.add_node(\"rewrite_query\", lambda state: {\n", + " \"question\": rewrite_query(state['question'], llm),\n", + " \"documents\": [],\n", + " \"generation\": None\n", + " })\n", + "\n", + " # Define edges\n", + " workflow.add_conditional_edges(\n", + " START, \n", + " route_to_datasource,\n", + " {\n", + " \"web_search\": \"web_search\",\n", + " \"vectorstore\": \"vectorstore\"\n", + " }\n", + " )\n", + " \n", + " workflow.add_edge(\"web_search\", \"generate\")\n", + " workflow.add_edge(\"vectorstore\", \"grade_documents\")\n", + " \n", + " workflow.add_conditional_edges(\n", + " \"grade_documents\",\n", + " lambda state: \"generate\" if state['documents'] else \"rewrite_query\"\n", + " )\n", + " \n", + " workflow.add_edge(\"rewrite_query\", \"vectorstore\")\n", + " \n", + " workflow.add_conditional_edges(\n", + " \"generate\",\n", + " check_generation_quality,\n", + " {\n", + " \"end\": END,\n", + " \"regenerate\": \"generate\",\n", + " \"rewrite\": \"rewrite_query\"\n", + " }\n", + " )\n", + "\n", + " # Compile the workflow\n", + " app = workflow.compile()\n", + " return app\n", + "\n", + "def run_adaptive_rag(retriever, question: str, llm, top_k=5, enable_websearch=False):\n", + " \"\"\"\n", + " Run the adaptive RAG workflow for a given question.\n", + " \n", + " Args:\n", + " retriever: Vector store retriever\n", + " question (str): User's input question\n", + " \n", + " Returns:\n", + " str: Generated answer\n", + " \"\"\"\n", + " # Create workflow\n", + " workflow = create_adaptive_rag_workflow(retriever, llm, top_k, enable_websearch=enable_websearch)\n", + " \n", + " # Run workflow\n", + " final_state = None\n", + " for output in workflow.stream({\"question\": question}, config={\"recursion_limit\": 5}):\n", + " for key, value in output.items():\n", + " print(f\"Node '{key}':\")\n", + " # Optionally print state details\n", + " # print(value)\n", + " final_state = value\n", + " \n", + " return final_state.get('generation', 'No answer could be generated.')\n", + "\n", + "# if __name__ == \"__main__\":\n", + "# # Example usage\n", + "# from vectorstore.pinecone_db import PINECONE_API_KEY, ingest_data, get_retriever, load_documents, process_chunks, save_to_parquet\n", + "# from pinecone import Pinecone\n", + " \n", + "# # Load and prepare documents\n", + "# pc = Pinecone(api_key=PINECONE_API_KEY)\n", + " \n", + "# # Define input files\n", + "# file_paths=[\n", + "# # './data/2404.19756v1.pdf',\n", + "# # './data/OD429347375590223100.pdf',\n", + "# # './data/Project Report Format.docx',\n", + "# './data/UNIT 2 GENDER BASED VIOLENCE.pptx'\n", + "# ]\n", + "\n", + "# # Process pipeline\n", + "# try:\n", + "# # Step 1: Load and combine documents\n", + "# print(\"Loading documents...\")\n", + "# markdown_path = load_documents(file_paths)\n", + " \n", + "# # Step 2: Process into chunks with embeddings\n", + "# print(\"Processing chunks...\")\n", + "# chunks = process_chunks(markdown_path)\n", + " \n", + "# # Step 3: Save to Parquet\n", + "# print(\"Saving to Parquet...\")\n", + "# parquet_path = save_to_parquet(chunks)\n", + " \n", + "# # Step 4: Ingest into Pinecone\n", + "# print(\"Ingesting into Pinecone...\")\n", + "# ingest_data(pc,\n", + "# parquet_path=parquet_path,\n", + "# text_column=\"text\",\n", + "# pinecone_client=pc,\n", + "# )\n", + " \n", + "# # Step 5: Test retrieval\n", + "# print(\"\\nTesting retrieval...\")\n", + "# retriever = get_retriever(\n", + "# pinecone_client=pc,\n", + "# index_name=\"vector-index\",\n", + "# namespace=\"rag\"\n", + "# )\n", + " \n", + "# except Exception as e:\n", + "# print(f\"Error in pipeline: {str(e)}\") \n", + "\n", + "# llm = ChatOllama(model = \"llama3.2\", temperature = 0.1, num_predict = 256, top_p=0.5)\n", + " \n", + "# # Test questions\n", + "# test_questions = [\n", + "# # \"What are the key components of AI agent memory?\",\n", + "# # \"Explain prompt engineering techniques\",\n", + "# # \"What are recent advancements in adversarial attacks on LLMs?\"\n", + "# \"what are the trending papers that are published in NeurIPS 2024?\"\n", + "# ]\n", + " \n", + "# # Run workflow for each test question\n", + "# for question in test_questions:\n", + "# print(f\"\\n--- Processing Question: {question} ---\")\n", + "# answer = run_adaptive_rag(retriever, question, llm)\n", + "# print(\"\\nFinal Answer:\", answer)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:pinecone_plugin_interface.logging:Discovering subpackages in _NamespacePath(['/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/pinecone_plugins'])\n", + "INFO:pinecone_plugin_interface.logging:Looking for plugins in pinecone_plugins.inference\n", + "INFO:pinecone_plugin_interface.logging:Installing plugin inference into Pinecone\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Enter the paths to your documents (one per line).\n", + "Press Enter twice when done:\n", + "Processing documents...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:docling.document_converter:Going to convert document batch...\n", + "INFO:docling.pipeline.base_pipeline:Processing document Project Report Format.docx\n", + "INFO:docling.document_converter:Finished converting document Project Report Format.docx in 0.44 sec.\n", + "INFO:__main__:Processed 1 documents:\n", + "- Successfully converted: 1\n", + "- Partially converted: 0\n", + "- Failed: 0\n", + "pre tokenize: 100%|██████████| 2/2 [00:00<00:00, 133.11it/s]\n", + "Inference Embeddings: 100%|██████████| 2/2 [01:36<00:00, 48.32s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving to Parquet: /tmp/tmpwx9hgq_7/documents.parquet\n", + "Saved to Parquet: /tmp/tmpwx9hgq_7/documents.parquet\n", + "Reading Parquet file: /tmp/tmpwx9hgq_7/documents.parquet\n", + "Total records: 26\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:pinecone_plugin_interface.logging:Discovering subpackages in _NamespacePath(['/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/pinecone_plugins'])\n", + "INFO:pinecone_plugin_interface.logging:Looking for plugins in pinecone_plugins.inference\n", + " 0%| | 0/1 [00:00\\n\\nB. Bhagabati et al.\\n\\nFig. 21. Variation of inference with epoch.\\n\\n\\n\\nFig. 22. Variation of NMS with epoch.\\n\\n\\n\\nFig. 23. Variation of accuracy with epoch.\\n\\n\\n\\ndetermine how effective the suggested approach is. The system that is being offered is set up to avoid conflicts between humans and animals, as well as to identify a wide variety of animals even when the lighting,\\n\\ndistance, and background are all different, while also providing for the automatic generation of alarms. The system has been built and tested in a form that is resident in the cloud, with cameras set at four different\\n\\nB.', 'score': 0.367663801}]\n", + "Node 'vectorstore':\n", + "---GRADE DOCUMENTS---\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Node 'grade_documents':\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Node 'rewrite_query':\n", + "---RETRIEVE---\n", + "Retrieved 5 documents.\n", + "[{'page_content': ' In the process of detecting wild animals, further improvement in accuracy is needed. Further, there is an opportunity to develop approaches that are proficient\\n\\nin working well in a generalized approach under both day and night conditions with background variations for detecting human-animal conflict. The YOLOv5 model, with certain modifications and additions, is found to be suitable for developing a generalized framework for the detection of human-animal conflict under both day and night conditions with background variations. Especially, the addition of attention layers as part of the primary detection network helps not only to focus on key areas of the scene under study but also provides optimization in the training and enhanced accuracy. In view of the above, in this work, a SENet attention layer (Hu et al., 2019) is added to YOLOv5 for detecting human-animal conflict under both day and night conditions with background variations. The proposed network is extensively trained with samples of public databases and video streams capturing the scenes under study. The combination produces appreciably better outcomes.', 'score': 0.383713484}, {'page_content': '| | | | |\\n\\na A is admonishment coefficient of total population (Times New Roman 10)\\n\\nb B is Bombardment coefficient of the mean population (Times New Roman 10)\\n\\n- Motivation of the study\\n\\nAlarming rate of climate change, sea level rise and other natural disasters are to be managed efficiently. Assessment and management of green house gases thus become very much essential..\\n\\n1 Adapted from Monika and Ram, 2008 (Times New Roman 10)\\n\\nSample sheet 11\\n\\n##### The satellite image as given in Figure 1.1 shows the area from where samples are collected.\\n\\n\\n\\nFigure 1.1 Title of the figure (Times New Roman 11)\\n\\n### REFERENCES\\n\\n- Attanas, D.B. and Monica, H.G. (2012). Effects of green house gases, In Proc. IOOC-ECOC, pp. 557-998.\\n\\n- Gurudeep, P.R. and Mahin, P. (2009).', 'score': 0.378645808}, {'page_content': ' A method for automated approach for humananimal conflict minimisation using YOLO and SENet Attention Framework.\\n\\nInput: Number of Classes, Class names, images, videos\\n\\nOutput: Alarm generated due to detection of animal\\n\\n- 1. Load image dataset\\n- 2. Define model architecture as follows\\n- 2a. Backbone network (YOLOv5sBackbone with SENet)\\n- 2b. Neck Network (YOLOv5sNeck)\\n\\n2c.\\n\\nDetection head (YOLOv5sHead)\\n\\n3. Train the model:\\n\\n3a.\\n\\nCompute loss on a batch of images\\n\\n3b.\\n\\nCompute gradients and update weights using the optimiser\\n\\n4. Prediction:\\n\\n4a.\\n\\nRemove overlapping prediction\\n\\n4b.\\n\\nOutput final detection results (as bounding boxes, class probabilities, confidence score)\\n\\n5. Detection:\\n\\n5a.\\n\\nUse the model weight and detect objects from input images or video captured by cameras installed at different\\n\\nlocations.\\n\\n5b.\\n\\n', 'score': 0.375454068}, {'page_content': \"| Proposed model | 96.00% | 67.00% | YOLOv5s with SENet attention layer | Animal2-v1, comprises images of tiger, beer, leopard, monkey, elephant and wildboar. | About 9952 images |\\n\\nsites, each of which represents a different zone of the KNP with high rates of human-animal conflict. Along the section of the NH-37 that travels through the KNP, as well as in the boundaries towards the humaninhabitant areas of the Park ' s four different ranges, namely the Kohora range, the Agoratuli range, the Bagori range, and the Burapahar range, positions for the cameras that take pictures of wild animals are being considered. The model receives video and image data taken by the cameras, which it then uses to detect instances of wild animals crossing roadways or entering human habitation or agricultural regions.\", 'score': 0.365198106}, {'page_content': ' The model has a high degree of accuracy when it comes to identifying wild animals such as elephants, deer, tigers, and other similar species. Due to the fact that this is an automated system, the model has the capability of eliminating and replacing the manual monitoring system. As a result, the system will become very helpful for conservation efforts as well as for the community. When the system identifies the presence of any wild animal, the information may be shared with forest officials as well as the general public so that appropriate safety measures can be taken. Wild elephants are responsible for the destruction of a significant quantity of crops and rice fields each year in the region surrounding the KNP as well as throughout the state of Assam. The implementation of an automated system that is based on AI, as proposed in this work, might prevent something like this from happening. The application of this paradigm can remove or significantly reduce the risk that humans pose to biodiversity, which is caused by the conflict that arises between humans and wild animals.', 'score': 0.364592403}]\n", + "Node 'vectorstore':\n", + "---GRADE DOCUMENTS---\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Node 'grade_documents':\n", + "\n", + "Assistant: I cannot find a sufficient answer to your question in the provided documents. Please try rephrasing your question or ask something else about the content of the documents.\n", + "---ROUTE QUESTION---\n", + "---ROUTE TO RAG---\n", + "---RETRIEVE---\n", + "Retrieved 5 documents.\n", + "[{'page_content': '| | | | |\\n\\na A is admonishment coefficient of total population (Times New Roman 10)\\n\\nb B is Bombardment coefficient of the mean population (Times New Roman 10)\\n\\n- Motivation of the study\\n\\nAlarming rate of climate change, sea level rise and other natural disasters are to be managed efficiently. Assessment and management of green house gases thus become very much essential..\\n\\n1 Adapted from Monika and Ram, 2008 (Times New Roman 10)\\n\\nSample sheet 11\\n\\n##### The satellite image as given in Figure 1.1 shows the area from where samples are collected.\\n\\n\\n\\nFigure 1.1 Title of the figure (Times New Roman 11)\\n\\n### REFERENCES\\n\\n- Attanas, D.B. and Monica, H.G. (2012). Effects of green house gases, In Proc. IOOC-ECOC, pp. 557-998.\\n\\n- Gurudeep, P.R. and Mahin, P. (2009).', 'score': 0.423661351}, {'page_content': 'The impact analysis of the proposed technique looks at how accurate and reliable the system is at finding wild animals compared to similar works that have already been reported. It also looks at the manual ways that people in the area already use to deal with the problem of people and animals getting into fights. This analysis is carried out in order to\\n\\nFig. 20. Variation of preprocessing with epoch.\\n\\n\\n\\nB. Bhagabati et al.\\n\\nFig. 21. Variation of inference with epoch.\\n\\n\\n\\nFig. 22. Variation of NMS with epoch.\\n\\n\\n\\nFig. 23. Variation of accuracy with epoch.\\n\\n\\n\\ndetermine how effective the suggested approach is. The system that is being offered is set up to avoid conflicts between humans and animals, as well as to identify a wide variety of animals even when the lighting,\\n\\ndistance, and background are all different, while also providing for the automatic generation of alarms. The system has been built and tested in a form that is resident in the cloud, with cameras set at four different\\n\\nB.', 'score': 0.418868601}, {'page_content': ' In the process of detecting wild animals, further improvement in accuracy is needed. Further, there is an opportunity to develop approaches that are proficient\\n\\nin working well in a generalized approach under both day and night conditions with background variations for detecting human-animal conflict. The YOLOv5 model, with certain modifications and additions, is found to be suitable for developing a generalized framework for the detection of human-animal conflict under both day and night conditions with background variations. Especially, the addition of attention layers as part of the primary detection network helps not only to focus on key areas of the scene under study but also provides optimization in the training and enhanced accuracy. In view of the above, in this work, a SENet attention layer (Hu et al., 2019) is added to YOLOv5 for detecting human-animal conflict under both day and night conditions with background variations. The proposed network is extensively trained with samples of public databases and video streams capturing the scenes under study. The combination produces appreciably better outcomes.', 'score': 0.413667738}, {'page_content': ' A method for automated approach for humananimal conflict minimisation using YOLO and SENet Attention Framework.\\n\\nInput: Number of Classes, Class names, images, videos\\n\\nOutput: Alarm generated due to detection of animal\\n\\n- 1. Load image dataset\\n- 2. Define model architecture as follows\\n- 2a. Backbone network (YOLOv5sBackbone with SENet)\\n- 2b. Neck Network (YOLOv5sNeck)\\n\\n2c.\\n\\nDetection head (YOLOv5sHead)\\n\\n3. Train the model:\\n\\n3a.\\n\\nCompute loss on a batch of images\\n\\n3b.\\n\\nCompute gradients and update weights using the optimiser\\n\\n4. Prediction:\\n\\n4a.\\n\\nRemove overlapping prediction\\n\\n4b.\\n\\nOutput final detection results (as bounding boxes, class probabilities, confidence score)\\n\\n5. Detection:\\n\\n5a.\\n\\nUse the model weight and detect objects from input images or video captured by cameras installed at different\\n\\nlocations.\\n\\n5b.\\n\\n', 'score': 0.411838204}, {'page_content': '3390/s22020464.\\n- Premarathna, K.S.P., Rathnayaka, R.M.K.T., 2020. CNN based image detection system for elephant directions to reduce human-elephant conflict. In: 13th Intl.', 'score': 0.406028211}]\n", + "Node 'vectorstore':\n", + "---GRADE DOCUMENTS---\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Node 'grade_documents':\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Node 'rewrite_query':\n", + "---RETRIEVE---\n", + "Retrieved 5 documents.\n", + "[{'page_content': ' There are three main methods of feature extraction, namely local, holistic, and hybrid. For example, in the local approach entire face is divided into some small regions and then features are extracted from each small region and then during detection, those extracted features are applied. That is why after changing the images slightly from the original one, either by rotating the image or by changing its contrast, the trained network can work for detecting images.\\n\\n## 5.4. Impact analysis\\n\\nThe key novelty of the system is the use of an AI-based automated approach, which provides higher accuracy in detecting human-wild animal conflicts and alarms forest officials and the public continuously throughout the day and night. Forest officials are not required to stand along the boundary of the KNP and monitor the movements of wild animals constantly. Instead, they can attend as notified by the system. It can go a long way in assisting the coexistence of the natural world with humans and minimizing distressing situations.\\n\\n', 'score': 0.422885329}, {'page_content': 'The impact analysis of the proposed technique looks at how accurate and reliable the system is at finding wild animals compared to similar works that have already been reported. It also looks at the manual ways that people in the area already use to deal with the problem of people and animals getting into fights. This analysis is carried out in order to\\n\\nFig. 20. Variation of preprocessing with epoch.\\n\\n\\n\\nB. Bhagabati et al.\\n\\nFig. 21. Variation of inference with epoch.\\n\\n\\n\\nFig. 22. Variation of NMS with epoch.\\n\\n\\n\\nFig. 23. Variation of accuracy with epoch.\\n\\n\\n\\ndetermine how effective the suggested approach is. The system that is being offered is set up to avoid conflicts between humans and animals, as well as to identify a wide variety of animals even when the lighting,\\n\\ndistance, and background are all different, while also providing for the automatic generation of alarms. The system has been built and tested in a form that is resident in the cloud, with cameras set at four different\\n\\nB.', 'score': 0.395100534}, {'page_content': '| | | | |\\n\\na A is admonishment coefficient of total population (Times New Roman 10)\\n\\nb B is Bombardment coefficient of the mean population (Times New Roman 10)\\n\\n- Motivation of the study\\n\\nAlarming rate of climate change, sea level rise and other natural disasters are to be managed efficiently. Assessment and management of green house gases thus become very much essential..\\n\\n1 Adapted from Monika and Ram, 2008 (Times New Roman 10)\\n\\nSample sheet 11\\n\\n##### The satellite image as given in Figure 1.1 shows the area from where samples are collected.\\n\\n\\n\\nFigure 1.1 Title of the figure (Times New Roman 11)\\n\\n### REFERENCES\\n\\n- Attanas, D.B. and Monica, H.G. (2012). Effects of green house gases, In Proc. IOOC-ECOC, pp. 557-998.\\n\\n- Gurudeep, P.R. and Mahin, P. (2009).', 'score': 0.394052714}, {'page_content': ' In the process of detecting wild animals, further improvement in accuracy is needed. Further, there is an opportunity to develop approaches that are proficient\\n\\nin working well in a generalized approach under both day and night conditions with background variations for detecting human-animal conflict. The YOLOv5 model, with certain modifications and additions, is found to be suitable for developing a generalized framework for the detection of human-animal conflict under both day and night conditions with background variations. Especially, the addition of attention layers as part of the primary detection network helps not only to focus on key areas of the scene under study but also provides optimization in the training and enhanced accuracy. In view of the above, in this work, a SENet attention layer (Hu et al., 2019) is added to YOLOv5 for detecting human-animal conflict under both day and night conditions with background variations. The proposed network is extensively trained with samples of public databases and video streams capturing the scenes under study. The combination produces appreciably better outcomes.', 'score': 0.391879261}, {'page_content': '461 | |\\n\\nIn order to determine more accurate training results and also to explore the effect of epoch upon training result, apart from 150 epochs, the model with attention layer is trained with epoch values 100, 200, and 250 under a uniform training environment and with the same dataset. The training summary for each of these epochs is shown in Tables 5, 6 and 7 for epochs 100, 200, and 250, respectively. The trends of mAP values with increasing epochs are shown in Figs. 14 and Fig. 15.\\n\\nFor the detection of wild animals and conflict situations, the model is tested with real images captured by four different cameras around the NH-37 passing through the KNP. The wild animals are detected successfully, and evolving conflict situations are reported. Deer while crossing roads when vehicles are plying are shown in Fig. 16. Whereas elephants crossing the NH-37 through the animal corridor at the KNP are\\n\\nThe size of the dataset used for custom training is sufficiently large.', 'score': 0.383396268}]\n", + "Node 'vectorstore':\n", + "---GRADE DOCUMENTS---\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Node 'grade_documents':\n", + "\n", + "Assistant: I cannot find a sufficient answer to your question in the provided documents. Please try rephrasing your question or ask something else about the content of the documents.\n", + "---ROUTE QUESTION---\n", + "---ROUTE TO RAG---\n", + "---RETRIEVE---\n", + "Retrieved 5 documents.\n", + "[{'page_content': '---\\nsource: /teamspace/studios/this_studio/adaptive_rag/data/Project Report Format.docx\\nfile_type: .docx\\n---\\n\\nGUIDELINES FOR B.TECH PROJECT REPORT PREPARATION\\n\\nIntroduction\\n\\nThis document is intended to provide a set of specific and uniform guidelines to the B. Tech students in the preparation of the project report. The content of the report, which is submitted to the University in partial fulfillment for the award of the degree of Bachelor of Technology, is very much important. It is also imperative that the report, to be acceptable by the University, should essentially meet a uniform format emphasizing readability, concordance with ethical standards and University-wide homogeneity.\\n\\n### CHAPTER 1 REPORT LAYOUT\\n\\nThe thesis has to be organized in the following order.\\n\\n- Cover Page\\n- Inside Title Page\\n- Certificate signed by the Supervisor(s) (in the stipulated format)\\n- Declaration signed by the Candidate (in the stipulated format)\\n- Acknowledgements\\n- Abstract\\n', 'score': 0.569816232}, {'page_content': '### ACKNOWLEDGMENTS\\n\\n##### All acknowledgements to be included here. Please restrict to two pages.\\n\\nThe name of the candidate shall appear at the end, without signature.\\n\\nI take this opportunity to thank Prof. Partha Mukherjee, Dean - SST, Dr.\\n\\nShahnawaz Ansari, HoD –Cyber Security, and other faculty members who helped in preparing the report.\\n\\nI extend my sincere thanks to one and all of TNU family for the completion of this document on the project report.\\n\\n\\n\\n### ABSTRACT\\n\\n##### Abstract of the report to be given here. Please restrict to a maximum of 300 words. NOTE: The abstract should not have any citations, or abbreviations, nor should it be divided into sections. It can be divided into adequate number of paragraphs as the author wishes. It is advisable to avoid any equations in the Abstract. Figures and tables are to be avoided.\\n\\n', 'score': 0.564478457}, {'page_content': ' They have to be accommodated in a closed pocket in the back cover page of the thesis. The inclusion of non-paper materials must be indicated in the Table of Contents. All non-paper materials must have a label each clearly indicating the name of the candidate, student code number and the date of submission.\\n\\n- Binding\\n\\nThesis copies to be submitted for evaluation are to be soft bounded. The cover page should be printed on glossy white card of 300 g/m2 or above.\\n\\n- Electronic Copy\\n\\nAn electronic version of the report should be submitted to the Head of the Department and the concerned faculty incharge of Internship-Project Planning and Coordination Committee (IPCC). The file name should contain student code number, name of the candidate and date of submission.\\n\\n## TITLE OF THE PROJECT REPORT TO BE SUBMITTED BY THE CANDIDATE\\n\\nA Report submitted\\n\\nin partial fulfillment for the Degree of\\n\\nB. Tech in\\n\\nComputer Science and Engineering with Specialization in Cyber Security\\n\\nby\\n\\nNAME OF THE CANDIDATE(S)\\n\\npursued in\\n\\n', 'score': 0.501448154}, {'page_content': '### DECLARATION\\n\\nI declare that this project report titled submitted in partial fulfillment of the degree of B. Tech in (Computer Science and Engineering with Specialization in Cyber Security) is a record of original work carried out by me under the supervision of <Name(s) of the Supervisor(s)>, and has not formed the basis for the award of any other degree or diploma, in this or any other Institution or University. In keeping with the ethical practice in reporting scientific information, due acknowledgements have been made wherever the findings of others have been cited.\\n\\n', 'score': 0.478058666}, {'page_content': 'Note that all paragraphs in the Abstract start with an indent of 15 mm, and there is no extra spacing between two successive paragraphs. The text should be Times New Roman font size 12, single spaced.\\n\\n### TABLE OF CONTENTS\\n\\n###### DESCRIPTION\\tPAGE NUMBER\\n\\n| CERTIFICATE | iii |\\n|----------------------------------------|-------|\\n| DECLARATION | v |\\n| ACKNOWLEDGEMENTS | vii |\\n| ABSTRACT | ix |\\n| LIST OF FIGURES | xiii |\\n| LIST OF TABLES | xv |\\n| ABBREVIATIONS/ NOTATIONS/ NOMENCLATURE | xvii |\\n| 1.\\tTITLE OF CHAPTER 1 | 1 |\\n| 1.1\\tSection heading name | 1 |\\n| 1.2\\tSection heading name | 1 |\\n| 1.2.1\\tSecond level section heading | 3 |\\n| 1.', 'score': 0.461370528}]\n", + "Node 'vectorstore':\n", + "---GRADE DOCUMENTS---\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Node 'grade_documents':\n", + "---GENERATE---\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---ASSESS GENERATION---\n", + "---Generation is not hallucinated.---\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---Answer quality is good.---\n", + "Node 'generate':\n", + "\n", + "Assistant: The project introduction should provide specific guidelines for B.Tech students in preparing their project report. It should emphasize the importance of the report for fulfilling the requirements of the Bachelor of Technology degree and highlight the need for uniform format, readability, and ethical standards. It should set the tone for the rest of the report and provide an overview of the project.\n", + "Goodbye!\n" + ] + } + ], + "source": [ + "from pinecone import Pinecone\n", + "from langchain_openai import ChatOpenAI\n", + "from langgraph.pregel import GraphRecursionError\n", + "import tempfile\n", + "import os\n", + "from pathlib import Path\n", + "\n", + "def initialize_pinecone(api_key):\n", + " \"\"\"Initialize Pinecone client with API key.\"\"\"\n", + " try:\n", + " return Pinecone(api_key=api_key)\n", + " except Exception as e:\n", + " print(f\"Error initializing Pinecone: {str(e)}\")\n", + " return None\n", + "\n", + "def initialize_llm(api_key):\n", + " \"\"\"Initialize OpenAI LLM.\"\"\"\n", + " try:\n", + " return ChatOpenAI(api_key=api_key, model=\"gpt-3.5-turbo\")\n", + " except Exception as e:\n", + " print(f\"Error initializing OpenAI: {str(e)}\")\n", + " return None\n", + "\n", + "def process_documents(file_paths, pc):\n", + " \"\"\"Process documents and store in Pinecone.\"\"\"\n", + " if not file_paths:\n", + " print(\"No documents provided.\")\n", + " return None\n", + "\n", + " print(\"Processing documents...\")\n", + " temp_dir = tempfile.mkdtemp()\n", + " markdown_path = Path(temp_dir) / \"combined.md\"\n", + " parquet_path = Path(temp_dir) / \"documents.parquet\"\n", + "\n", + " try:\n", + " markdown_path = load_documents(file_paths, output_path=markdown_path)\n", + " chunks = process_chunks(markdown_path, chunk_size=256, threshold=0.6)\n", + " parquet_path = save_to_parquet(chunks, parquet_path)\n", + " \n", + " ingest_data(\n", + " pc=pc,\n", + " parquet_path=parquet_path,\n", + " text_column=\"text\",\n", + " pinecone_client=pc\n", + " )\n", + " \n", + " retriever = get_retriever(pc)\n", + " print(\"Documents processed successfully!\")\n", + " return retriever\n", + " \n", + " except Exception as e:\n", + " print(f\"Error processing documents: {str(e)}\")\n", + " return None\n", + " finally:\n", + " try:\n", + " os.remove(markdown_path)\n", + " os.remove(parquet_path)\n", + " os.rmdir(temp_dir)\n", + " except:\n", + " pass\n", + "\n", + "def main():\n", + " # Get API keys\n", + " pinecone_api_key = input(\"Enter your Pinecone API key: \")\n", + " openai_api_key = input(\"Enter your OpenAI API key: \")\n", + " \n", + " # Initialize clients\n", + " pc = initialize_pinecone(pinecone_api_key)\n", + " if not pc:\n", + " return\n", + " \n", + " llm = initialize_llm(openai_api_key)\n", + " if not llm:\n", + " return\n", + "\n", + " # Get document paths\n", + " print(\"\\nEnter the paths to your documents (one per line).\")\n", + " print(\"Press Enter twice when done:\")\n", + " \n", + " file_paths = []\n", + " while True:\n", + " path = input()\n", + " if not path:\n", + " break\n", + " if os.path.exists(path):\n", + " file_paths.append(path)\n", + " else:\n", + " print(f\"Warning: File {path} does not exist\")\n", + "\n", + " # Process documents\n", + " retriever = process_documents(file_paths, pc)\n", + " if not retriever:\n", + " return\n", + "\n", + " # Chat loop\n", + " print(\"\\nChat with your documents! Type 'exit' to quit.\")\n", + " while True:\n", + " question = input(\"\\nYou: \")\n", + " \n", + " if question.lower() == 'exit':\n", + " print(\"Goodbye!\")\n", + " break\n", + " \n", + " try:\n", + " response = run_adaptive_rag(\n", + " retriever=retriever,\n", + " question=question,\n", + " llm=llm,\n", + " top_k=5,\n", + " enable_websearch=False\n", + " )\n", + " print(\"\\nAssistant:\", response)\n", + " \n", + " except GraphRecursionError:\n", + " print(\"\\nAssistant: I cannot find a sufficient answer to your question in the provided documents. Please try rephrasing your question or ask something else about the content of the documents.\")\n", + " \n", + " except Exception as e:\n", + " print(f\"\\nError: {str(e)}\")\n", + "\n", + "if __name__ == \"__main__\":\n", + " main()" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}