Commit
Β·
5ddcfe5
0
Parent(s):
Initial commit
Browse files- .github/workflows/deploy_hf.yaml +24 -0
- .gitignore +177 -0
- README.md +74 -0
- data/scraping_scripts/create_vector_stores.py +218 -0
- data/scraping_scripts/csv_to_jsonl.py +61 -0
- data/scraping_scripts/github_to_markdown_ai_docs.py +214 -0
- data/scraping_scripts/process_md_files.py +579 -0
- data/scraping_scripts/upload_dbs_to_hf.py +38 -0
- requirements.txt +20 -0
- scripts/contextual_retrieval.py +244 -0
- scripts/custom_retriever.py +257 -0
- scripts/evaluate_rag_system.py +773 -0
- scripts/generate_qa_dataset.ipynb +287 -0
- scripts/main.py +291 -0
- scripts/prompts.py +29 -0
- scripts/setup.py +212 -0
- scripts/utils.py +16 -0
.github/workflows/deploy_hf.yaml
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to Hugging Face hub
|
2 |
+
on:
|
3 |
+
push:
|
4 |
+
branches: [main]
|
5 |
+
paths:
|
6 |
+
- 'scripts/**' # Replace with the path to your specific folder
|
7 |
+
- 'requirements.txt' # Add this line to include requirements.txt
|
8 |
+
|
9 |
+
# to run this workflow manually from the Actions tab
|
10 |
+
workflow_dispatch:
|
11 |
+
|
12 |
+
jobs:
|
13 |
+
sync-to-hub:
|
14 |
+
runs-on: ubuntu-latest
|
15 |
+
steps:
|
16 |
+
- uses: actions/checkout@v3
|
17 |
+
with:
|
18 |
+
fetch-depth: 0
|
19 |
+
lfs: true
|
20 |
+
- name: Push to hub
|
21 |
+
env:
|
22 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
23 |
+
HF_USERNAME: ${{ secrets.HF_USERNAME }}
|
24 |
+
run: git push --force https://$HF_USERNAME:[email protected]/spaces/towardsai-buster/ai-tutor-chatbot main:main
|
.gitignore
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
ai-tutor/
|
131 |
+
venv_ai_tutor/
|
132 |
+
|
133 |
+
# Spyder project settings
|
134 |
+
.spyderproject
|
135 |
+
.spyproject
|
136 |
+
|
137 |
+
# Rope project settings
|
138 |
+
.ropeproject
|
139 |
+
|
140 |
+
# mkdocs documentation
|
141 |
+
/site
|
142 |
+
|
143 |
+
# mypy
|
144 |
+
.mypy_cache/
|
145 |
+
.dmypy.json
|
146 |
+
dmypy.json
|
147 |
+
|
148 |
+
# Pyre type checker
|
149 |
+
.pyre/
|
150 |
+
|
151 |
+
# pytype static type analyzer
|
152 |
+
.pytype/
|
153 |
+
|
154 |
+
# Cython debug symbols
|
155 |
+
cython_debug/
|
156 |
+
|
157 |
+
# PyCharm
|
158 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
159 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
160 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
161 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
162 |
+
#.idea/
|
163 |
+
|
164 |
+
.vscode/
|
165 |
+
data/chroma-db**/
|
166 |
+
.huggingface
|
167 |
+
|
168 |
+
.DS_Store
|
169 |
+
|
170 |
+
*.csv
|
171 |
+
*.json
|
172 |
+
*.jsonl
|
173 |
+
*.html
|
174 |
+
*.mdx
|
175 |
+
*.pkl
|
176 |
+
*.png
|
177 |
+
*.mov
|
README.md
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: AI Tutor Chatbot
|
3 |
+
emoji: π§π»βπ«
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.13.2
|
8 |
+
app_file: scripts/main.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
### Gradio UI Chatbot
|
13 |
+
|
14 |
+
A Gradio UI for the chatbot is available in [scripts/main.py](./scripts/main.py).
|
15 |
+
|
16 |
+
The Gradio demo is deployed on Hugging Face Spaces at: [AI Tutor Chatbot on Hugging Face](https://huggingface.co/spaces/towardsai-buster/ai-tutor-chatbot).
|
17 |
+
|
18 |
+
**Note:** A GitHub Action automatically deploys the Gradio demo when changes are pushed to the `scripts` folder.
|
19 |
+
|
20 |
+
### Installation (for Gradio UI)
|
21 |
+
|
22 |
+
1. **Create a new Python environment:**
|
23 |
+
|
24 |
+
```bash
|
25 |
+
python -m venv .venv
|
26 |
+
```
|
27 |
+
|
28 |
+
2. **Activate the environment:**
|
29 |
+
|
30 |
+
For macOS and Linux:
|
31 |
+
|
32 |
+
```bash
|
33 |
+
source .venv/bin/activate
|
34 |
+
```
|
35 |
+
|
36 |
+
For Windows:
|
37 |
+
|
38 |
+
```bash
|
39 |
+
.venv\Scripts\activate
|
40 |
+
```
|
41 |
+
|
42 |
+
3. **Install the dependencies:**
|
43 |
+
|
44 |
+
```bash
|
45 |
+
pip install -r requirements.txt
|
46 |
+
```
|
47 |
+
|
48 |
+
### Usage (for Gradio UI)
|
49 |
+
|
50 |
+
1. **Set environment variables:**
|
51 |
+
|
52 |
+
Before running the application, set up the required API keys:
|
53 |
+
|
54 |
+
For macOS and Linux:
|
55 |
+
|
56 |
+
```bash
|
57 |
+
export OPENAI_API_KEY=your_openai_api_key_here
|
58 |
+
export COHERE_API_KEY=your_cohere_api_key_here
|
59 |
+
```
|
60 |
+
|
61 |
+
For Windows:
|
62 |
+
|
63 |
+
```bash
|
64 |
+
set OPENAI_API_KEY=your_openai_api_key_here
|
65 |
+
set COHERE_API_KEY=your_cohere_api_key_here
|
66 |
+
```
|
67 |
+
|
68 |
+
2. **Run the application:**
|
69 |
+
|
70 |
+
```bash
|
71 |
+
python scripts/main.py
|
72 |
+
```
|
73 |
+
|
74 |
+
This command starts the Gradio interface for the AI Tutor chatbot.
|
data/scraping_scripts/create_vector_stores.py
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Vector Store Creation Script
|
3 |
+
|
4 |
+
Purpose:
|
5 |
+
This script processes various data sources (e.g., transformers, peft, trl, llama_index, openai_cookbooks, langchain)
|
6 |
+
to create vector stores using Chroma and LlamaIndex. It reads data from JSONL files, creates document embeddings,
|
7 |
+
and stores them in persistent Chroma databases for efficient retrieval.
|
8 |
+
|
9 |
+
Usage:
|
10 |
+
python script_name.py <source1> <source2> ...
|
11 |
+
|
12 |
+
Example:
|
13 |
+
python script_name.py transformers peft llama_index
|
14 |
+
|
15 |
+
The script accepts one or more source names as command-line arguments. Valid source names are:
|
16 |
+
transformers, peft, trl, llama_index, openai_cookbooks, langchain
|
17 |
+
|
18 |
+
For each specified source, the script will:
|
19 |
+
1. Read data from the corresponding JSONL file
|
20 |
+
2. Create document embeddings
|
21 |
+
3. Store the embeddings in a Chroma vector database
|
22 |
+
4. Save a dictionary of documents for future reference
|
23 |
+
|
24 |
+
Note: Ensure that the input JSONL files are present in the 'data' directory.
|
25 |
+
"""
|
26 |
+
|
27 |
+
import argparse
|
28 |
+
import json
|
29 |
+
import os
|
30 |
+
import pdb
|
31 |
+
import pickle
|
32 |
+
import shutil
|
33 |
+
|
34 |
+
import chromadb
|
35 |
+
from dotenv import load_dotenv
|
36 |
+
from llama_index.core import Document, StorageContext, VectorStoreIndex
|
37 |
+
from llama_index.core.node_parser import SentenceSplitter
|
38 |
+
from llama_index.core.schema import MetadataMode, TextNode
|
39 |
+
from llama_index.embeddings.cohere import CohereEmbedding
|
40 |
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
41 |
+
from llama_index.llms.openai import OpenAI
|
42 |
+
from llama_index.vector_stores.chroma import ChromaVectorStore
|
43 |
+
|
44 |
+
load_dotenv()
|
45 |
+
|
46 |
+
# Configuration for different sources
|
47 |
+
SOURCE_CONFIGS = {
|
48 |
+
"transformers": {
|
49 |
+
"input_file": "data/transformers_data.jsonl",
|
50 |
+
"db_name": "chroma-db-transformers",
|
51 |
+
},
|
52 |
+
"peft": {"input_file": "data/peft_data.jsonl", "db_name": "chroma-db-peft"},
|
53 |
+
"trl": {"input_file": "data/trl_data.jsonl", "db_name": "chroma-db-trl"},
|
54 |
+
"llama_index": {
|
55 |
+
"input_file": "data/llama_index_data.jsonl",
|
56 |
+
"db_name": "chroma-db-llama_index",
|
57 |
+
},
|
58 |
+
"openai_cookbooks": {
|
59 |
+
"input_file": "data/openai_cookbooks_data.jsonl",
|
60 |
+
"db_name": "chroma-db-openai_cookbooks",
|
61 |
+
},
|
62 |
+
"langchain": {
|
63 |
+
"input_file": "data/langchain_data.jsonl",
|
64 |
+
"db_name": "chroma-db-langchain",
|
65 |
+
},
|
66 |
+
"tai_blog": {
|
67 |
+
"input_file": "data/tai_blog_data.jsonl",
|
68 |
+
"db_name": "chroma-db-tai_blog",
|
69 |
+
},
|
70 |
+
"all_sources": {
|
71 |
+
"input_file": "data/all_sources_data.jsonl",
|
72 |
+
"db_name": "chroma-db-all_sources",
|
73 |
+
},
|
74 |
+
}
|
75 |
+
|
76 |
+
|
77 |
+
def create_docs(input_file: str) -> list[Document]:
|
78 |
+
with open(input_file, "r") as f:
|
79 |
+
documents = []
|
80 |
+
for line in f:
|
81 |
+
data = json.loads(line)
|
82 |
+
documents.append(
|
83 |
+
Document(
|
84 |
+
doc_id=data["doc_id"],
|
85 |
+
text=data["content"],
|
86 |
+
metadata={ # type: ignore
|
87 |
+
"url": data["url"],
|
88 |
+
"title": data["name"],
|
89 |
+
"tokens": data["tokens"],
|
90 |
+
"retrieve_doc": data["retrieve_doc"],
|
91 |
+
"source": data["source"],
|
92 |
+
},
|
93 |
+
excluded_llm_metadata_keys=[ # url is included in LLM context
|
94 |
+
"title",
|
95 |
+
"tokens",
|
96 |
+
"retrieve_doc",
|
97 |
+
"source",
|
98 |
+
],
|
99 |
+
excluded_embed_metadata_keys=[ # title is embedded along the content
|
100 |
+
"url",
|
101 |
+
"tokens",
|
102 |
+
"retrieve_doc",
|
103 |
+
"source",
|
104 |
+
],
|
105 |
+
)
|
106 |
+
)
|
107 |
+
return documents
|
108 |
+
|
109 |
+
|
110 |
+
def process_source(source: str):
|
111 |
+
config = SOURCE_CONFIGS[source]
|
112 |
+
|
113 |
+
input_file = config["input_file"]
|
114 |
+
db_name = config["db_name"]
|
115 |
+
db_path = f"data/{db_name}"
|
116 |
+
|
117 |
+
print(f"Processing source: {source}")
|
118 |
+
|
119 |
+
documents: list[Document] = create_docs(input_file)
|
120 |
+
print(f"Created {len(documents)} documents")
|
121 |
+
|
122 |
+
# Check if the folder exists and delete it
|
123 |
+
if os.path.exists(db_path):
|
124 |
+
print(f"Existing database found at {db_path}. Deleting...")
|
125 |
+
shutil.rmtree(db_path)
|
126 |
+
print(f"Deleted existing database at {db_path}")
|
127 |
+
|
128 |
+
# Create Chroma client and collection
|
129 |
+
chroma_client = chromadb.PersistentClient(path=f"data/{db_name}")
|
130 |
+
chroma_collection = chroma_client.create_collection(db_name)
|
131 |
+
|
132 |
+
# Create vector store and storage context
|
133 |
+
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
|
134 |
+
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
135 |
+
|
136 |
+
# Save document dictionary
|
137 |
+
document_dict: dict[str, Document] = {doc.doc_id: doc for doc in documents}
|
138 |
+
document_dict_file = f"data/{db_name}/document_dict_{source}.pkl"
|
139 |
+
with open(document_dict_file, "wb") as f:
|
140 |
+
pickle.dump(document_dict, f)
|
141 |
+
print(f"Saved document dictionary to {document_dict_file}")
|
142 |
+
|
143 |
+
# Load nodes with context
|
144 |
+
with open("data/all_sources_contextual_nodes.pkl", "rb") as f:
|
145 |
+
nodes_with_context: list[TextNode] = pickle.load(f)
|
146 |
+
|
147 |
+
print(f"Loaded {len(nodes_with_context)} nodes with context")
|
148 |
+
# pdb.set_trace()
|
149 |
+
# exit()
|
150 |
+
|
151 |
+
# Create vector store index
|
152 |
+
index = VectorStoreIndex(
|
153 |
+
nodes=nodes_with_context,
|
154 |
+
# embed_model=OpenAIEmbedding(model="text-embedding-3-large", mode="similarity"),
|
155 |
+
embed_model=CohereEmbedding(
|
156 |
+
api_key=os.environ["COHERE_API_KEY"],
|
157 |
+
model_name="embed-english-v3.0",
|
158 |
+
input_type="search_document",
|
159 |
+
),
|
160 |
+
show_progress=True,
|
161 |
+
use_async=True,
|
162 |
+
storage_context=storage_context,
|
163 |
+
)
|
164 |
+
llm = OpenAI(
|
165 |
+
temperature=1,
|
166 |
+
model="gpt-4o-mini",
|
167 |
+
# model="gpt-4o",
|
168 |
+
max_tokens=5000,
|
169 |
+
max_retries=3,
|
170 |
+
)
|
171 |
+
query_engine = index.as_query_engine(llm=llm)
|
172 |
+
response = query_engine.query("How to fine-tune an llm?")
|
173 |
+
print(response)
|
174 |
+
for src in response.source_nodes:
|
175 |
+
print("Node ID\t", src.node_id)
|
176 |
+
print("Title\t", src.metadata["title"])
|
177 |
+
print("Text\t", src.text)
|
178 |
+
print("Score\t", src.score)
|
179 |
+
print("-_" * 20)
|
180 |
+
|
181 |
+
# # Create vector store index
|
182 |
+
# index = VectorStoreIndex.from_documents(
|
183 |
+
# documents,
|
184 |
+
# # embed_model=OpenAIEmbedding(model="text-embedding-3-large", mode="similarity"),
|
185 |
+
# embed_model=CohereEmbedding(
|
186 |
+
# api_key=os.environ["COHERE_API_KEY"],
|
187 |
+
# model_name="embed-english-v3.0",
|
188 |
+
# input_type="search_document",
|
189 |
+
# ),
|
190 |
+
# transformations=[SentenceSplitter(chunk_size=800, chunk_overlap=0)],
|
191 |
+
# show_progress=True,
|
192 |
+
# use_async=True,
|
193 |
+
# storage_context=storage_context,
|
194 |
+
# )
|
195 |
+
print(f"Created vector store index for {source}")
|
196 |
+
|
197 |
+
|
198 |
+
def main(sources: list[str]):
|
199 |
+
for source in sources:
|
200 |
+
if source in SOURCE_CONFIGS:
|
201 |
+
process_source(source)
|
202 |
+
else:
|
203 |
+
print(f"Unknown source: {source}. Skipping.")
|
204 |
+
|
205 |
+
|
206 |
+
if __name__ == "__main__":
|
207 |
+
parser = argparse.ArgumentParser(
|
208 |
+
description="Process sources and create vector stores."
|
209 |
+
)
|
210 |
+
parser.add_argument(
|
211 |
+
"sources",
|
212 |
+
nargs="+",
|
213 |
+
choices=SOURCE_CONFIGS.keys(),
|
214 |
+
help="Specify one or more sources to process",
|
215 |
+
)
|
216 |
+
args = parser.parse_args()
|
217 |
+
|
218 |
+
main(args.sources)
|
data/scraping_scripts/csv_to_jsonl.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import uuid
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import tiktoken
|
6 |
+
|
7 |
+
|
8 |
+
# Function to count tokens using tiktoken
|
9 |
+
def num_tokens_from_string(string: str, encoding_name: str) -> int:
|
10 |
+
encoding = tiktoken.get_encoding(encoding_name)
|
11 |
+
num_tokens = len(
|
12 |
+
encoding.encode(
|
13 |
+
string, disallowed_special=(encoding.special_tokens_set - {"<|endoftext|>"})
|
14 |
+
)
|
15 |
+
)
|
16 |
+
return num_tokens
|
17 |
+
|
18 |
+
|
19 |
+
# Function to clean or remove specific content, e.g., copyright headers
|
20 |
+
def remove_copyright_header(content: str) -> str:
|
21 |
+
# Implement any cleaning logic you need here
|
22 |
+
return content
|
23 |
+
|
24 |
+
|
25 |
+
# Function to convert DataFrame to JSONL format with token counting
|
26 |
+
def convert_to_jsonl_with_conditions(df, encoding_name="cl100k_base"):
|
27 |
+
jsonl_data = []
|
28 |
+
for _, row in df.iterrows():
|
29 |
+
token_count = num_tokens_from_string(row["text"], encoding_name)
|
30 |
+
|
31 |
+
# Skip entries based on token count conditions
|
32 |
+
if token_count < 100 or token_count > 200_000:
|
33 |
+
print(f"Skipping {row['title']} due to token count {token_count}")
|
34 |
+
continue
|
35 |
+
|
36 |
+
cleaned_content = remove_copyright_header(row["text"])
|
37 |
+
|
38 |
+
entry = {
|
39 |
+
"tokens": token_count, # Token count using tiktoken
|
40 |
+
"doc_id": str(uuid.uuid4()), # Generate a unique UUID
|
41 |
+
"name": row["title"],
|
42 |
+
"url": row["tai_url"],
|
43 |
+
"retrieve_doc": (token_count <= 8000), # retrieve_doc condition
|
44 |
+
"source": "tai_blog",
|
45 |
+
"content": cleaned_content,
|
46 |
+
}
|
47 |
+
jsonl_data.append(entry)
|
48 |
+
return jsonl_data
|
49 |
+
|
50 |
+
|
51 |
+
# Load the CSV file
|
52 |
+
data = pd.read_csv("data/tai.csv")
|
53 |
+
|
54 |
+
# Convert the dataframe to JSONL format with token counting and conditions
|
55 |
+
jsonl_data_with_conditions = convert_to_jsonl_with_conditions(data)
|
56 |
+
|
57 |
+
# Save the output to a new JSONL file using json.dumps to ensure proper escaping
|
58 |
+
output_path = "data/tai_blog_data_conditions.jsonl"
|
59 |
+
with open(output_path, "w") as f:
|
60 |
+
for entry in jsonl_data_with_conditions:
|
61 |
+
f.write(json.dumps(entry) + "\n")
|
data/scraping_scripts/github_to_markdown_ai_docs.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Fetch Markdown files from specified GitHub repositories.
|
3 |
+
|
4 |
+
This script fetches Markdown (.md), MDX (.mdx), and Jupyter Notebook (.ipynb) files
|
5 |
+
from specified GitHub repositories, particularly focusing on documentation sources
|
6 |
+
for various AI and machine learning libraries.
|
7 |
+
|
8 |
+
Key features:
|
9 |
+
1. Configurable for multiple documentation sources (e.g., Hugging Face Transformers, PEFT, TRL)
|
10 |
+
2. Command-line interface for specifying one or more sources to process
|
11 |
+
3. Automatic conversion of Jupyter Notebooks to Markdown
|
12 |
+
4. Rate limiting handling to comply with GitHub API restrictions
|
13 |
+
5. Retry mechanism for resilience against network issues
|
14 |
+
|
15 |
+
Usage:
|
16 |
+
python github_to_markdown_ai_docs.py <source1> [<source2> ...]
|
17 |
+
|
18 |
+
Where <sourceN> is one of the predefined sources in SOURCE_CONFIGS (e.g., 'transformers', 'peft', 'trl').
|
19 |
+
|
20 |
+
Example:
|
21 |
+
python github_to_markdown_ai_docs.py trl peft
|
22 |
+
|
23 |
+
This will download and process the documentation files for both TRL and PEFT libraries.
|
24 |
+
|
25 |
+
Note:
|
26 |
+
- Ensure you have set the GITHUB_TOKEN variable with your GitHub Personal Access Token.
|
27 |
+
- The script creates a 'data' directory in the current working directory to store the downloaded files.
|
28 |
+
- Each source's files are stored in a subdirectory named '<repo>_md_files'.
|
29 |
+
|
30 |
+
"""
|
31 |
+
|
32 |
+
import argparse
|
33 |
+
import json
|
34 |
+
import os
|
35 |
+
import random
|
36 |
+
import time
|
37 |
+
from typing import Dict, List
|
38 |
+
|
39 |
+
import nbformat
|
40 |
+
import requests
|
41 |
+
from dotenv import load_dotenv
|
42 |
+
from nbconvert import MarkdownExporter
|
43 |
+
|
44 |
+
load_dotenv()
|
45 |
+
|
46 |
+
# Configuration for different sources
|
47 |
+
SOURCE_CONFIGS = {
|
48 |
+
"transformers": {
|
49 |
+
"owner": "huggingface",
|
50 |
+
"repo": "transformers",
|
51 |
+
"path": "docs/source/en",
|
52 |
+
},
|
53 |
+
"peft": {
|
54 |
+
"owner": "huggingface",
|
55 |
+
"repo": "peft",
|
56 |
+
"path": "docs/source",
|
57 |
+
},
|
58 |
+
"trl": {
|
59 |
+
"owner": "huggingface",
|
60 |
+
"repo": "trl",
|
61 |
+
"path": "docs/source",
|
62 |
+
},
|
63 |
+
"llama_index": {
|
64 |
+
"owner": "run-llama",
|
65 |
+
"repo": "llama_index",
|
66 |
+
"path": "docs/docs",
|
67 |
+
},
|
68 |
+
"openai_cookbooks": {
|
69 |
+
"owner": "openai",
|
70 |
+
"repo": "openai-cookbook",
|
71 |
+
"path": "examples",
|
72 |
+
},
|
73 |
+
"langchain": {
|
74 |
+
"owner": "langchain-ai",
|
75 |
+
"repo": "langchain",
|
76 |
+
"path": "docs/docs",
|
77 |
+
},
|
78 |
+
}
|
79 |
+
|
80 |
+
# GitHub Personal Access Token (replace with your own token)
|
81 |
+
GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
|
82 |
+
|
83 |
+
# Headers for authenticated requests
|
84 |
+
HEADERS = {
|
85 |
+
"Authorization": f"token {GITHUB_TOKEN}",
|
86 |
+
"Accept": "application/vnd.github.v3+json",
|
87 |
+
}
|
88 |
+
|
89 |
+
# Maximum number of retries
|
90 |
+
MAX_RETRIES = 5
|
91 |
+
|
92 |
+
|
93 |
+
def check_rate_limit():
|
94 |
+
rate_limit_url = "https://api.github.com/rate_limit"
|
95 |
+
response = requests.get(rate_limit_url, headers=HEADERS)
|
96 |
+
data = response.json()
|
97 |
+
remaining = data["resources"]["core"]["remaining"]
|
98 |
+
reset_time = data["resources"]["core"]["reset"]
|
99 |
+
|
100 |
+
if remaining < 10: # Adjust this threshold as needed
|
101 |
+
wait_time = reset_time - time.time()
|
102 |
+
print(f"Rate limit nearly exceeded. Waiting for {wait_time:.2f} seconds.")
|
103 |
+
time.sleep(wait_time + 1) # Add 1 second buffer
|
104 |
+
|
105 |
+
|
106 |
+
def get_files_in_directory(api_url: str, retries: int = 0) -> List[Dict]:
|
107 |
+
try:
|
108 |
+
check_rate_limit()
|
109 |
+
response = requests.get(api_url, headers=HEADERS)
|
110 |
+
response.raise_for_status()
|
111 |
+
return response.json()
|
112 |
+
except requests.exceptions.RequestException as e:
|
113 |
+
if retries < MAX_RETRIES:
|
114 |
+
wait_time = (2**retries) + random.random()
|
115 |
+
print(
|
116 |
+
f"Error fetching directory contents: {e}. Retrying in {wait_time:.2f} seconds..."
|
117 |
+
)
|
118 |
+
time.sleep(wait_time)
|
119 |
+
return get_files_in_directory(api_url, retries + 1)
|
120 |
+
else:
|
121 |
+
print(
|
122 |
+
f"Failed to fetch directory contents after {MAX_RETRIES} retries: {e}"
|
123 |
+
)
|
124 |
+
return []
|
125 |
+
|
126 |
+
|
127 |
+
def download_file(file_url: str, file_path: str, retries: int = 0):
|
128 |
+
try:
|
129 |
+
check_rate_limit()
|
130 |
+
response = requests.get(file_url, headers=HEADERS)
|
131 |
+
response.raise_for_status()
|
132 |
+
with open(file_path, "wb") as file:
|
133 |
+
file.write(response.content)
|
134 |
+
except requests.exceptions.RequestException as e:
|
135 |
+
if retries < MAX_RETRIES:
|
136 |
+
wait_time = (2**retries) + random.random()
|
137 |
+
print(
|
138 |
+
f"Error downloading file: {e}. Retrying in {wait_time:.2f} seconds..."
|
139 |
+
)
|
140 |
+
time.sleep(wait_time)
|
141 |
+
download_file(file_url, file_path, retries + 1)
|
142 |
+
else:
|
143 |
+
print(f"Failed to download file after {MAX_RETRIES} retries: {e}")
|
144 |
+
|
145 |
+
|
146 |
+
def convert_ipynb_to_md(ipynb_path: str, md_path: str):
|
147 |
+
with open(ipynb_path, "r", encoding="utf-8") as f:
|
148 |
+
notebook = nbformat.read(f, as_version=4)
|
149 |
+
|
150 |
+
exporter = MarkdownExporter()
|
151 |
+
markdown, _ = exporter.from_notebook_node(notebook)
|
152 |
+
|
153 |
+
with open(md_path, "w", encoding="utf-8") as f:
|
154 |
+
f.write(markdown)
|
155 |
+
|
156 |
+
|
157 |
+
def fetch_files(api_url: str, local_dir: str):
|
158 |
+
files = get_files_in_directory(api_url)
|
159 |
+
for file in files:
|
160 |
+
if file["type"] == "file" and file["name"].endswith((".md", ".mdx", ".ipynb")):
|
161 |
+
file_url = file["download_url"]
|
162 |
+
file_name = file["name"]
|
163 |
+
file_path = os.path.join(local_dir, file_name)
|
164 |
+
print(f"Downloading {file_name}...")
|
165 |
+
download_file(file_url, file_path)
|
166 |
+
|
167 |
+
if file_name.endswith(".ipynb"):
|
168 |
+
md_file_name = file_name.replace(".ipynb", ".md")
|
169 |
+
md_file_path = os.path.join(local_dir, md_file_name)
|
170 |
+
print(f"Converting {file_name} to markdown...")
|
171 |
+
convert_ipynb_to_md(file_path, md_file_path)
|
172 |
+
os.remove(file_path) # Remove the .ipynb file after conversion
|
173 |
+
elif file["type"] == "dir":
|
174 |
+
subdir = os.path.join(local_dir, file["name"])
|
175 |
+
os.makedirs(subdir, exist_ok=True)
|
176 |
+
fetch_files(file["url"], subdir)
|
177 |
+
|
178 |
+
|
179 |
+
def process_source(source: str):
|
180 |
+
if source not in SOURCE_CONFIGS:
|
181 |
+
print(
|
182 |
+
f"Error: Unknown source '{source}'. Available sources: {', '.join(SOURCE_CONFIGS.keys())}"
|
183 |
+
)
|
184 |
+
return
|
185 |
+
|
186 |
+
config = SOURCE_CONFIGS[source]
|
187 |
+
api_url = f"https://api.github.com/repos/{config['owner']}/{config['repo']}/contents/{config['path']}"
|
188 |
+
local_dir = f"data/{config['repo']}_md_files"
|
189 |
+
os.makedirs(local_dir, exist_ok=True)
|
190 |
+
|
191 |
+
print(f"Processing source: {source}")
|
192 |
+
fetch_files(api_url, local_dir)
|
193 |
+
print(f"Finished processing {source}")
|
194 |
+
|
195 |
+
|
196 |
+
def main(sources: List[str]):
|
197 |
+
for source in sources:
|
198 |
+
process_source(source)
|
199 |
+
print("All specified sources have been processed.")
|
200 |
+
|
201 |
+
|
202 |
+
if __name__ == "__main__":
|
203 |
+
parser = argparse.ArgumentParser(
|
204 |
+
description="Fetch Markdown files from specified GitHub repositories."
|
205 |
+
)
|
206 |
+
parser.add_argument(
|
207 |
+
"sources",
|
208 |
+
nargs="+",
|
209 |
+
choices=SOURCE_CONFIGS.keys(),
|
210 |
+
help="Specify one or more sources to process",
|
211 |
+
)
|
212 |
+
args = parser.parse_args()
|
213 |
+
|
214 |
+
main(args.sources)
|
data/scraping_scripts/process_md_files.py
ADDED
@@ -0,0 +1,579 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Markdown Document Processor for Documentation Sources
|
3 |
+
|
4 |
+
This script processes Markdown (.md) and MDX (.mdx) files from various documentation sources
|
5 |
+
(such as Hugging Face Transformers, PEFT, TRL, LlamaIndex, and OpenAI Cookbook) and converts
|
6 |
+
them into a standardized JSONL format for further processing or indexing.
|
7 |
+
|
8 |
+
Key features:
|
9 |
+
1. Configurable for multiple documentation sources
|
10 |
+
2. Extracts titles, generates URLs, and counts tokens for each document
|
11 |
+
3. Supports inclusion/exclusion of specific directories and root files
|
12 |
+
4. Removes copyright headers from content
|
13 |
+
5. Generates a unique ID for each document
|
14 |
+
6. Determines if a whole document should be retrieved based on token count
|
15 |
+
7. Handles special cases like openai-cookbook repo by adding .ipynb extensions
|
16 |
+
8. Processes multiple sources in a single run
|
17 |
+
|
18 |
+
Usage:
|
19 |
+
python process_md_files.py <source1> <source2> ...
|
20 |
+
|
21 |
+
Where <source1>, <source2>, etc. are one or more of the predefined sources in SOURCE_CONFIGS
|
22 |
+
(e.g., 'transformers', 'llama_index', 'openai_cookbooks').
|
23 |
+
|
24 |
+
The script processes all Markdown files in the specified input directories (and their subdirectories),
|
25 |
+
applies the configured filters, and saves the results in JSONL files. Each line in the output
|
26 |
+
files represents a single document with metadata and content.
|
27 |
+
|
28 |
+
To add or modify sources, update the SOURCE_CONFIGS dictionary at the top of the script.
|
29 |
+
"""
|
30 |
+
|
31 |
+
# import argparse
|
32 |
+
# import json
|
33 |
+
# import logging
|
34 |
+
# import os
|
35 |
+
# import re
|
36 |
+
# import uuid
|
37 |
+
# from typing import Dict, List
|
38 |
+
|
39 |
+
# import tiktoken
|
40 |
+
|
41 |
+
# logging.basicConfig(level=logging.INFO)
|
42 |
+
# logger = logging.getLogger(__name__)
|
43 |
+
|
44 |
+
# # Configuration for different sources
|
45 |
+
# SOURCE_CONFIGS = {
|
46 |
+
# "transformers": {
|
47 |
+
# "base_url": "https://huggingface.co/docs/transformers/",
|
48 |
+
# "input_directory": "data/transformers_md_files",
|
49 |
+
# "output_file": "data/transformers_data.jsonl",
|
50 |
+
# "source_name": "transformers",
|
51 |
+
# "use_include_list": False,
|
52 |
+
# "included_dirs": [],
|
53 |
+
# "excluded_dirs": ["internal", "main_classes"],
|
54 |
+
# "excluded_root_files": [],
|
55 |
+
# "included_root_files": [],
|
56 |
+
# "url_extension": "",
|
57 |
+
# },
|
58 |
+
# "peft": {
|
59 |
+
# "base_url": "https://huggingface.co/docs/peft/",
|
60 |
+
# "input_directory": "data/peft_md_files",
|
61 |
+
# "output_file": "data/peft_data.jsonl",
|
62 |
+
# "source_name": "peft",
|
63 |
+
# "use_include_list": False,
|
64 |
+
# "included_dirs": [],
|
65 |
+
# "excluded_dirs": [],
|
66 |
+
# "excluded_root_files": [],
|
67 |
+
# "included_root_files": [],
|
68 |
+
# "url_extension": "",
|
69 |
+
# },
|
70 |
+
# "trl": {
|
71 |
+
# "base_url": "https://huggingface.co/docs/trl/",
|
72 |
+
# "input_directory": "data/trl_md_files",
|
73 |
+
# "output_file": "data/trl_data.jsonl",
|
74 |
+
# "source_name": "trl",
|
75 |
+
# "use_include_list": False,
|
76 |
+
# "included_dirs": [],
|
77 |
+
# "excluded_dirs": [],
|
78 |
+
# "excluded_root_files": [],
|
79 |
+
# "included_root_files": [],
|
80 |
+
# "url_extension": "",
|
81 |
+
# },
|
82 |
+
# "llama_index": {
|
83 |
+
# "base_url": "https://docs.llamaindex.ai/en/stable/",
|
84 |
+
# "input_directory": "data/llama_index_md_files",
|
85 |
+
# "output_file": "data/llama_index_data.jsonl",
|
86 |
+
# "source_name": "llama_index",
|
87 |
+
# "use_include_list": True,
|
88 |
+
# "included_dirs": [
|
89 |
+
# "getting_started",
|
90 |
+
# "understanding",
|
91 |
+
# "use_cases",
|
92 |
+
# "examples",
|
93 |
+
# "module_guides",
|
94 |
+
# "optimizing",
|
95 |
+
# ],
|
96 |
+
# "excluded_dirs": [],
|
97 |
+
# "excluded_root_files": [],
|
98 |
+
# "included_root_files": ["index.md"],
|
99 |
+
# "url_extension": "",
|
100 |
+
# },
|
101 |
+
# "openai_cookbooks": {
|
102 |
+
# "base_url": "https://github.com/openai/openai-cookbook/blob/main/examples/",
|
103 |
+
# "input_directory": "data/openai-cookbook_md_files",
|
104 |
+
# "output_file": "data/openai_cookbooks_data.jsonl",
|
105 |
+
# "source_name": "openai_cookbooks",
|
106 |
+
# "use_include_list": False,
|
107 |
+
# "included_dirs": [],
|
108 |
+
# "excluded_dirs": [],
|
109 |
+
# "excluded_root_files": [],
|
110 |
+
# "included_root_files": [],
|
111 |
+
# "url_extension": ".ipynb",
|
112 |
+
# },
|
113 |
+
# "langchain": {
|
114 |
+
# "base_url": "https://python.langchain.com/v0.2/docs/",
|
115 |
+
# "input_directory": "data/langchain_md_files",
|
116 |
+
# "output_file": "data/langchain_data.jsonl",
|
117 |
+
# "source_name": "langchain",
|
118 |
+
# "use_include_list": True,
|
119 |
+
# "included_dirs": ["how_to", "versions", "turorials", "integrations"],
|
120 |
+
# "excluded_dirs": [],
|
121 |
+
# "excluded_root_files": [],
|
122 |
+
# "included_root_files": ["security.md", "concepts.mdx", "introduction.mdx"],
|
123 |
+
# "url_extension": "",
|
124 |
+
# },
|
125 |
+
# "tai_blog": {
|
126 |
+
# "base_url": "",
|
127 |
+
# "input_directory": "",
|
128 |
+
# "output_file": "data/tai_blog_data.jsonl",
|
129 |
+
# "source_name": "tai_blog",
|
130 |
+
# "use_include_list": False,
|
131 |
+
# "included_dirs": [],
|
132 |
+
# "excluded_dirs": [],
|
133 |
+
# "excluded_root_files": [],
|
134 |
+
# "included_root_files": [],
|
135 |
+
# "url_extension": "",
|
136 |
+
# },
|
137 |
+
# }
|
138 |
+
|
139 |
+
|
140 |
+
# def extract_title(content: str):
|
141 |
+
# title_match = re.search(r"^#\s+(.+)$", content, re.MULTILINE)
|
142 |
+
# if title_match:
|
143 |
+
# return title_match.group(1).strip()
|
144 |
+
|
145 |
+
# lines = content.split("\n")
|
146 |
+
# for line in lines:
|
147 |
+
# if line.strip():
|
148 |
+
# return line.strip()
|
149 |
+
|
150 |
+
# return None
|
151 |
+
|
152 |
+
|
153 |
+
# def generate_url(file_path: str, config: Dict) -> str:
|
154 |
+
# path_without_extension = os.path.splitext(file_path)[0]
|
155 |
+
# path_with_forward_slashes = path_without_extension.replace("\\", "/")
|
156 |
+
# return config["base_url"] + path_with_forward_slashes + config["url_extension"]
|
157 |
+
|
158 |
+
|
159 |
+
# def should_include_file(file_path: str, config: Dict) -> bool:
|
160 |
+
# if os.path.dirname(file_path) == "":
|
161 |
+
# if config["use_include_list"]:
|
162 |
+
# return os.path.basename(file_path) in config["included_root_files"]
|
163 |
+
# else:
|
164 |
+
# return os.path.basename(file_path) not in config["excluded_root_files"]
|
165 |
+
|
166 |
+
# if config["use_include_list"]:
|
167 |
+
# return any(file_path.startswith(dir) for dir in config["included_dirs"])
|
168 |
+
# else:
|
169 |
+
# return not any(file_path.startswith(dir) for dir in config["excluded_dirs"])
|
170 |
+
|
171 |
+
|
172 |
+
# def num_tokens_from_string(string: str, encoding_name: str) -> int:
|
173 |
+
# encoding = tiktoken.get_encoding(encoding_name)
|
174 |
+
# num_tokens = len(
|
175 |
+
# encoding.encode(
|
176 |
+
# string, disallowed_special=(encoding.special_tokens_set - {"<|endoftext|>"})
|
177 |
+
# )
|
178 |
+
# )
|
179 |
+
# return num_tokens
|
180 |
+
|
181 |
+
|
182 |
+
# def remove_copyright_header(content: str) -> str:
|
183 |
+
# header_pattern = re.compile(r"<!--Copyright.*?-->\s*", re.DOTALL)
|
184 |
+
# cleaned_content = header_pattern.sub("", content, count=1)
|
185 |
+
# return cleaned_content.strip()
|
186 |
+
|
187 |
+
|
188 |
+
# def process_md_files(directory: str, config: Dict) -> List[Dict]:
|
189 |
+
# jsonl_data = []
|
190 |
+
|
191 |
+
# for root, _, files in os.walk(directory):
|
192 |
+
# for file in files:
|
193 |
+
# if file.endswith(".md") or file.endswith(".mdx"):
|
194 |
+
# file_path = os.path.join(root, file)
|
195 |
+
# relative_path = os.path.relpath(file_path, directory)
|
196 |
+
|
197 |
+
# if should_include_file(relative_path, config):
|
198 |
+
# with open(file_path, "r", encoding="utf-8") as f:
|
199 |
+
# content = f.read()
|
200 |
+
|
201 |
+
# title = extract_title(content)
|
202 |
+
# token_count = num_tokens_from_string(content, "cl100k_base")
|
203 |
+
|
204 |
+
# if token_count < 100 or token_count > 200_000:
|
205 |
+
# logger.info(
|
206 |
+
# f"Skipping {relative_path} due to token count {token_count}"
|
207 |
+
# )
|
208 |
+
# continue
|
209 |
+
|
210 |
+
# cleaned_content = remove_copyright_header(content)
|
211 |
+
|
212 |
+
# json_object = {
|
213 |
+
# "tokens": token_count,
|
214 |
+
# "doc_id": str(uuid.uuid4()),
|
215 |
+
# "name": (title if title else file),
|
216 |
+
# "url": generate_url(relative_path, config),
|
217 |
+
# "retrieve_doc": (token_count <= 8000),
|
218 |
+
# "source": config["source_name"],
|
219 |
+
# "content": cleaned_content,
|
220 |
+
# }
|
221 |
+
|
222 |
+
# jsonl_data.append(json_object)
|
223 |
+
|
224 |
+
# return jsonl_data
|
225 |
+
|
226 |
+
|
227 |
+
# def save_jsonl(data: List[Dict], output_file: str) -> None:
|
228 |
+
# with open(output_file, "w", encoding="utf-8") as f:
|
229 |
+
# for item in data:
|
230 |
+
# json.dump(item, f, ensure_ascii=False)
|
231 |
+
# f.write("\n")
|
232 |
+
|
233 |
+
|
234 |
+
# def combine_all_sources(sources: List[str]) -> None:
|
235 |
+
# all_data = []
|
236 |
+
# output_file = "data/all_sources_data.jsonl"
|
237 |
+
|
238 |
+
# for source in sources:
|
239 |
+
# if source not in SOURCE_CONFIGS:
|
240 |
+
# logger.error(f"Unknown source '{source}'. Skipping.")
|
241 |
+
# continue
|
242 |
+
|
243 |
+
# input_file = SOURCE_CONFIGS[source]["output_file"]
|
244 |
+
# logger.info(f"Processing source: {source}")
|
245 |
+
|
246 |
+
# with open(input_file, "r", encoding="utf-8") as f:
|
247 |
+
# for line in f:
|
248 |
+
# all_data.append(json.loads(line))
|
249 |
+
|
250 |
+
# logger.info(f"Total documents combined: {len(all_data)}")
|
251 |
+
# save_jsonl(all_data, output_file)
|
252 |
+
# logger.info(f"Combined data saved to {output_file}")
|
253 |
+
|
254 |
+
|
255 |
+
# def process_source(source: str) -> None:
|
256 |
+
# if source not in SOURCE_CONFIGS:
|
257 |
+
# logger.error(f"Unknown source '{source}'. Skipping.")
|
258 |
+
# return
|
259 |
+
|
260 |
+
# config = SOURCE_CONFIGS[source]
|
261 |
+
# logger.info(f"\n\nProcessing source: {source}")
|
262 |
+
# jsonl_data = process_md_files(config["input_directory"], config)
|
263 |
+
# save_jsonl(jsonl_data, config["output_file"])
|
264 |
+
# logger.info(
|
265 |
+
# f"Processed {len(jsonl_data)} files and saved to {config['output_file']}"
|
266 |
+
# )
|
267 |
+
|
268 |
+
|
269 |
+
# def main(sources: List[str]) -> None:
|
270 |
+
# for source in sources:
|
271 |
+
# process_source(source)
|
272 |
+
|
273 |
+
# if len(sources) > 1:
|
274 |
+
# # sources = [
|
275 |
+
# # "transformers",
|
276 |
+
# # "peft",
|
277 |
+
# # "trl",
|
278 |
+
# # "llama_index",
|
279 |
+
# # "langchain",
|
280 |
+
# # "openai_cookbooks",
|
281 |
+
# # "tai_blog",
|
282 |
+
# # ]
|
283 |
+
# combine_all_sources(sources)
|
284 |
+
|
285 |
+
|
286 |
+
# if __name__ == "__main__":
|
287 |
+
# parser = argparse.ArgumentParser(
|
288 |
+
# description="Process Markdown files from specified sources."
|
289 |
+
# )
|
290 |
+
# parser.add_argument(
|
291 |
+
# "sources",
|
292 |
+
# nargs="+",
|
293 |
+
# choices=SOURCE_CONFIGS.keys(),
|
294 |
+
# help="Specify one or more sources to process",
|
295 |
+
# )
|
296 |
+
# args = parser.parse_args()
|
297 |
+
|
298 |
+
# main(args.sources)
|
299 |
+
|
300 |
+
|
301 |
+
import argparse
|
302 |
+
import json
|
303 |
+
import logging
|
304 |
+
import os
|
305 |
+
import re
|
306 |
+
import uuid
|
307 |
+
from typing import Dict, List
|
308 |
+
|
309 |
+
import tiktoken
|
310 |
+
|
311 |
+
logging.basicConfig(level=logging.INFO)
|
312 |
+
logger = logging.getLogger(__name__)
|
313 |
+
|
314 |
+
# Configuration for different sources
|
315 |
+
SOURCE_CONFIGS = {
|
316 |
+
"transformers": {
|
317 |
+
"base_url": "https://huggingface.co/docs/transformers/",
|
318 |
+
"input_directory": "data/transformers_md_files",
|
319 |
+
"output_file": "data/transformers_data.jsonl",
|
320 |
+
"source_name": "transformers",
|
321 |
+
"use_include_list": False,
|
322 |
+
"included_dirs": [],
|
323 |
+
"excluded_dirs": ["internal", "main_classes"],
|
324 |
+
"excluded_root_files": [],
|
325 |
+
"included_root_files": [],
|
326 |
+
"url_extension": "",
|
327 |
+
},
|
328 |
+
"peft": {
|
329 |
+
"base_url": "https://huggingface.co/docs/peft/",
|
330 |
+
"input_directory": "data/peft_md_files",
|
331 |
+
"output_file": "data/peft_data.jsonl",
|
332 |
+
"source_name": "peft",
|
333 |
+
"use_include_list": False,
|
334 |
+
"included_dirs": [],
|
335 |
+
"excluded_dirs": [],
|
336 |
+
"excluded_root_files": [],
|
337 |
+
"included_root_files": [],
|
338 |
+
"url_extension": "",
|
339 |
+
},
|
340 |
+
"trl": {
|
341 |
+
"base_url": "https://huggingface.co/docs/trl/",
|
342 |
+
"input_directory": "data/trl_md_files",
|
343 |
+
"output_file": "data/trl_data.jsonl",
|
344 |
+
"source_name": "trl",
|
345 |
+
"use_include_list": False,
|
346 |
+
"included_dirs": [],
|
347 |
+
"excluded_dirs": [],
|
348 |
+
"excluded_root_files": [],
|
349 |
+
"included_root_files": [],
|
350 |
+
"url_extension": "",
|
351 |
+
},
|
352 |
+
"llama_index": {
|
353 |
+
"base_url": "https://docs.llamaindex.ai/en/stable/",
|
354 |
+
"input_directory": "data/llama_index_md_files",
|
355 |
+
"output_file": "data/llama_index_data.jsonl",
|
356 |
+
"source_name": "llama_index",
|
357 |
+
"use_include_list": True,
|
358 |
+
"included_dirs": [
|
359 |
+
"getting_started",
|
360 |
+
"understanding",
|
361 |
+
"use_cases",
|
362 |
+
"examples",
|
363 |
+
"module_guides",
|
364 |
+
"optimizing",
|
365 |
+
],
|
366 |
+
"excluded_dirs": [],
|
367 |
+
"excluded_root_files": [],
|
368 |
+
"included_root_files": ["index.md"],
|
369 |
+
"url_extension": "",
|
370 |
+
},
|
371 |
+
"openai_cookbooks": {
|
372 |
+
"base_url": "https://github.com/openai/openai-cookbook/blob/main/examples/",
|
373 |
+
"input_directory": "data/openai-cookbook_md_files",
|
374 |
+
"output_file": "data/openai_cookbooks_data.jsonl",
|
375 |
+
"source_name": "openai_cookbooks",
|
376 |
+
"use_include_list": False,
|
377 |
+
"included_dirs": [],
|
378 |
+
"excluded_dirs": [],
|
379 |
+
"excluded_root_files": [],
|
380 |
+
"included_root_files": [],
|
381 |
+
"url_extension": ".ipynb",
|
382 |
+
},
|
383 |
+
"langchain": {
|
384 |
+
"base_url": "https://python.langchain.com/v0.2/docs/",
|
385 |
+
"input_directory": "data/langchain_md_files",
|
386 |
+
"output_file": "data/langchain_data.jsonl",
|
387 |
+
"source_name": "langchain",
|
388 |
+
"use_include_list": True,
|
389 |
+
"included_dirs": ["how_to", "versions", "turorials", "integrations"],
|
390 |
+
"excluded_dirs": [],
|
391 |
+
"excluded_root_files": [],
|
392 |
+
"included_root_files": ["security.md", "concepts.mdx", "introduction.mdx"],
|
393 |
+
"url_extension": "",
|
394 |
+
},
|
395 |
+
"tai_blog": {
|
396 |
+
"base_url": "",
|
397 |
+
"input_directory": "",
|
398 |
+
"output_file": "data/tai_blog_data.jsonl",
|
399 |
+
"source_name": "tai_blog",
|
400 |
+
"use_include_list": False,
|
401 |
+
"included_dirs": [],
|
402 |
+
"excluded_dirs": [],
|
403 |
+
"excluded_root_files": [],
|
404 |
+
"included_root_files": [],
|
405 |
+
"url_extension": "",
|
406 |
+
},
|
407 |
+
"8-hour_primer": {
|
408 |
+
"base_url": "",
|
409 |
+
"input_directory": "data/8-hour_primer",
|
410 |
+
"output_file": "data/8-hour_primer_data.jsonl", # 8-hour Generative AI Primer
|
411 |
+
"source_name": "8-hour_primer",
|
412 |
+
"use_include_list": False,
|
413 |
+
"included_dirs": [],
|
414 |
+
"excluded_dirs": [],
|
415 |
+
"excluded_root_files": [],
|
416 |
+
"included_root_files": [],
|
417 |
+
"url_extension": "",
|
418 |
+
},
|
419 |
+
}
|
420 |
+
|
421 |
+
|
422 |
+
def extract_title(content: str):
|
423 |
+
title_match = re.search(r"^#\s+(.+)$", content, re.MULTILINE)
|
424 |
+
if title_match:
|
425 |
+
return title_match.group(1).strip()
|
426 |
+
|
427 |
+
lines = content.split("\n")
|
428 |
+
for line in lines:
|
429 |
+
if line.strip():
|
430 |
+
return line.strip()
|
431 |
+
|
432 |
+
return None
|
433 |
+
|
434 |
+
|
435 |
+
def generate_url(file_path: str, config: Dict) -> str:
|
436 |
+
"""
|
437 |
+
Return an empty string if base_url is empty;
|
438 |
+
otherwise return the constructed URL as before.
|
439 |
+
"""
|
440 |
+
if not config["base_url"]:
|
441 |
+
return ""
|
442 |
+
|
443 |
+
path_without_extension = os.path.splitext(file_path)[0]
|
444 |
+
path_with_forward_slashes = path_without_extension.replace("\\", "/")
|
445 |
+
return config["base_url"] + path_with_forward_slashes + config["url_extension"]
|
446 |
+
|
447 |
+
|
448 |
+
def should_include_file(file_path: str, config: Dict) -> bool:
|
449 |
+
if os.path.dirname(file_path) == "":
|
450 |
+
if config["use_include_list"]:
|
451 |
+
return os.path.basename(file_path) in config["included_root_files"]
|
452 |
+
else:
|
453 |
+
return os.path.basename(file_path) not in config["excluded_root_files"]
|
454 |
+
|
455 |
+
if config["use_include_list"]:
|
456 |
+
return any(file_path.startswith(dir) for dir in config["included_dirs"])
|
457 |
+
else:
|
458 |
+
return not any(file_path.startswith(dir) for dir in config["excluded_dirs"])
|
459 |
+
|
460 |
+
|
461 |
+
def num_tokens_from_string(string: str, encoding_name: str) -> int:
|
462 |
+
encoding = tiktoken.get_encoding(encoding_name)
|
463 |
+
num_tokens = len(
|
464 |
+
encoding.encode(
|
465 |
+
string, disallowed_special=(encoding.special_tokens_set - {"<|endoftext|>"})
|
466 |
+
)
|
467 |
+
)
|
468 |
+
return num_tokens
|
469 |
+
|
470 |
+
|
471 |
+
def remove_copyright_header(content: str) -> str:
|
472 |
+
header_pattern = re.compile(r"<!--Copyright.*?-->\s*", re.DOTALL)
|
473 |
+
cleaned_content = header_pattern.sub("", content, count=1)
|
474 |
+
return cleaned_content.strip()
|
475 |
+
|
476 |
+
|
477 |
+
def process_md_files(directory: str, config: Dict) -> List[Dict]:
|
478 |
+
jsonl_data = []
|
479 |
+
|
480 |
+
for root, _, files in os.walk(directory):
|
481 |
+
for file in files:
|
482 |
+
if file.endswith(".md") or file.endswith(".mdx"):
|
483 |
+
file_path = os.path.join(root, file)
|
484 |
+
relative_path = os.path.relpath(file_path, directory)
|
485 |
+
|
486 |
+
if should_include_file(relative_path, config):
|
487 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
488 |
+
content = f.read()
|
489 |
+
|
490 |
+
title = extract_title(content)
|
491 |
+
token_count = num_tokens_from_string(content, "cl100k_base")
|
492 |
+
|
493 |
+
# Skip very small or extremely large files
|
494 |
+
if token_count < 100 or token_count > 200_000:
|
495 |
+
logger.info(
|
496 |
+
f"Skipping {relative_path} due to token count {token_count}"
|
497 |
+
)
|
498 |
+
continue
|
499 |
+
|
500 |
+
cleaned_content = remove_copyright_header(content)
|
501 |
+
|
502 |
+
json_object = {
|
503 |
+
"tokens": token_count,
|
504 |
+
"doc_id": str(uuid.uuid4()),
|
505 |
+
"name": (title if title else file),
|
506 |
+
"url": generate_url(relative_path, config),
|
507 |
+
"retrieve_doc": (token_count <= 8000),
|
508 |
+
"source": config["source_name"],
|
509 |
+
"content": cleaned_content,
|
510 |
+
}
|
511 |
+
|
512 |
+
jsonl_data.append(json_object)
|
513 |
+
|
514 |
+
return jsonl_data
|
515 |
+
|
516 |
+
|
517 |
+
def save_jsonl(data: List[Dict], output_file: str) -> None:
|
518 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
519 |
+
for item in data:
|
520 |
+
json.dump(item, f, ensure_ascii=False)
|
521 |
+
f.write("\n")
|
522 |
+
|
523 |
+
|
524 |
+
def combine_all_sources(sources: List[str]) -> None:
|
525 |
+
all_data = []
|
526 |
+
output_file = "data/all_sources_data.jsonl"
|
527 |
+
|
528 |
+
for source in sources:
|
529 |
+
if source not in SOURCE_CONFIGS:
|
530 |
+
logger.error(f"Unknown source '{source}'. Skipping.")
|
531 |
+
continue
|
532 |
+
|
533 |
+
input_file = SOURCE_CONFIGS[source]["output_file"]
|
534 |
+
logger.info(f"Processing source: {source}")
|
535 |
+
|
536 |
+
with open(input_file, "r", encoding="utf-8") as f:
|
537 |
+
for line in f:
|
538 |
+
all_data.append(json.loads(line))
|
539 |
+
|
540 |
+
logger.info(f"Total documents combined: {len(all_data)}")
|
541 |
+
save_jsonl(all_data, output_file)
|
542 |
+
logger.info(f"Combined data saved to {output_file}")
|
543 |
+
|
544 |
+
|
545 |
+
def process_source(source: str) -> None:
|
546 |
+
if source not in SOURCE_CONFIGS:
|
547 |
+
logger.error(f"Unknown source '{source}'. Skipping.")
|
548 |
+
return
|
549 |
+
|
550 |
+
config = SOURCE_CONFIGS[source]
|
551 |
+
logger.info(f"\n\nProcessing source: {source}")
|
552 |
+
jsonl_data = process_md_files(config["input_directory"], config)
|
553 |
+
save_jsonl(jsonl_data, config["output_file"])
|
554 |
+
logger.info(
|
555 |
+
f"Processed {len(jsonl_data)} files and saved to {config['output_file']}"
|
556 |
+
)
|
557 |
+
|
558 |
+
|
559 |
+
def main(sources: List[str]) -> None:
|
560 |
+
for source in sources:
|
561 |
+
process_source(source)
|
562 |
+
|
563 |
+
if len(sources) > 1:
|
564 |
+
combine_all_sources(sources)
|
565 |
+
|
566 |
+
|
567 |
+
if __name__ == "__main__":
|
568 |
+
parser = argparse.ArgumentParser(
|
569 |
+
description="Process Markdown files from specified sources."
|
570 |
+
)
|
571 |
+
parser.add_argument(
|
572 |
+
"sources",
|
573 |
+
nargs="+",
|
574 |
+
choices=SOURCE_CONFIGS.keys(),
|
575 |
+
help="Specify one or more sources to process",
|
576 |
+
)
|
577 |
+
args = parser.parse_args()
|
578 |
+
|
579 |
+
main(args.sources)
|
data/scraping_scripts/upload_dbs_to_hf.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Hugging Face Data Upload Script
|
3 |
+
|
4 |
+
Purpose:
|
5 |
+
This script uploads a local folder to a Hugging Face dataset repository. It's designed to
|
6 |
+
update or create a dataset on the Hugging Face Hub by uploading the contents of a specified
|
7 |
+
local folder.
|
8 |
+
|
9 |
+
Usage:
|
10 |
+
- Run the script: python data/scraping_scripts/upload_dbs_to_hf.py
|
11 |
+
|
12 |
+
The script will:
|
13 |
+
- Upload the contents of the 'data' folder to the specified Hugging Face dataset repository.
|
14 |
+
- https://huggingface.co/datasets/towardsai-buster/ai-tutor-vector-db
|
15 |
+
|
16 |
+
Configuration:
|
17 |
+
- The script is set to upload to the "towardsai-buster/test-data" dataset repository.
|
18 |
+
- It deletes all existing files in the repository before uploading (due to delete_patterns=["*"]).
|
19 |
+
"""
|
20 |
+
|
21 |
+
import os
|
22 |
+
|
23 |
+
from dotenv import load_dotenv
|
24 |
+
from huggingface_hub import HfApi
|
25 |
+
|
26 |
+
load_dotenv()
|
27 |
+
|
28 |
+
api = HfApi(token=os.getenv("HF_TOKEN"))
|
29 |
+
|
30 |
+
api.upload_folder(
|
31 |
+
folder_path="data",
|
32 |
+
repo_id="towardsai-tutors/ai-tutor-vector-db",
|
33 |
+
repo_type="dataset",
|
34 |
+
# multi_commits=True,
|
35 |
+
# multi_commits_verbose=True,
|
36 |
+
delete_patterns=["*"],
|
37 |
+
ignore_patterns=["*.jsonl", "*.py", "*.txt", "*.ipynb", "*.md", "*.pyc"],
|
38 |
+
)
|
requirements.txt
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
modal
|
2 |
+
openai
|
3 |
+
anthropic
|
4 |
+
instructor
|
5 |
+
pydantic
|
6 |
+
logfire
|
7 |
+
chromadb
|
8 |
+
cohere
|
9 |
+
tiktoken
|
10 |
+
llama-index
|
11 |
+
llama-index-postprocessor-cohere-rerank
|
12 |
+
llama-index-embeddings-cohere
|
13 |
+
llama-index-vector-stores-chroma
|
14 |
+
python-dotenv
|
15 |
+
ipykernel
|
16 |
+
google-generativeai
|
17 |
+
llama-index-llms-gemini
|
18 |
+
gradio
|
19 |
+
pymongo
|
20 |
+
huggingface_hub
|
scripts/contextual_retrieval.py
ADDED
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import json
|
3 |
+
import pdb
|
4 |
+
import pickle
|
5 |
+
from typing import Dict, List
|
6 |
+
|
7 |
+
import instructor
|
8 |
+
import logfire
|
9 |
+
import tiktoken
|
10 |
+
from anthropic import AsyncAnthropic
|
11 |
+
from dotenv import load_dotenv
|
12 |
+
|
13 |
+
# from instructor import AsyncInstructor, Mode, patch
|
14 |
+
from jinja2 import Template
|
15 |
+
from llama_index.core import Document
|
16 |
+
from llama_index.core.ingestion import IngestionPipeline
|
17 |
+
from llama_index.core.node_parser import SentenceSplitter
|
18 |
+
from llama_index.core.schema import TextNode
|
19 |
+
from openai import AsyncOpenAI
|
20 |
+
from pydantic import BaseModel, Field
|
21 |
+
from tenacity import retry, stop_after_attempt, wait_exponential
|
22 |
+
from tqdm.asyncio import tqdm
|
23 |
+
|
24 |
+
load_dotenv(".env")
|
25 |
+
|
26 |
+
# logfire.configure()
|
27 |
+
|
28 |
+
|
29 |
+
def create_docs(input_file: str) -> List[Document]:
|
30 |
+
with open(input_file, "r") as f:
|
31 |
+
documents: list[Document] = []
|
32 |
+
for line in f:
|
33 |
+
data = json.loads(line)
|
34 |
+
documents.append(
|
35 |
+
Document(
|
36 |
+
doc_id=data["doc_id"],
|
37 |
+
text=data["content"],
|
38 |
+
metadata={ # type: ignore
|
39 |
+
"url": data["url"],
|
40 |
+
"title": data["name"],
|
41 |
+
"tokens": data["tokens"],
|
42 |
+
"retrieve_doc": data["retrieve_doc"],
|
43 |
+
"source": data["source"],
|
44 |
+
},
|
45 |
+
excluded_llm_metadata_keys=[
|
46 |
+
"title",
|
47 |
+
"tokens",
|
48 |
+
"retrieve_doc",
|
49 |
+
"source",
|
50 |
+
],
|
51 |
+
excluded_embed_metadata_keys=[
|
52 |
+
"url",
|
53 |
+
"tokens",
|
54 |
+
"retrieve_doc",
|
55 |
+
"source",
|
56 |
+
],
|
57 |
+
)
|
58 |
+
)
|
59 |
+
return documents
|
60 |
+
|
61 |
+
|
62 |
+
class SituatedContext(BaseModel):
|
63 |
+
title: str = Field(..., description="The title of the document.")
|
64 |
+
context: str = Field(
|
65 |
+
..., description="The context to situate the chunk within the document."
|
66 |
+
)
|
67 |
+
|
68 |
+
|
69 |
+
# client = AsyncInstructor(
|
70 |
+
# client=AsyncAnthropic(),
|
71 |
+
# create=patch(
|
72 |
+
# create=AsyncAnthropic().beta.prompt_caching.messages.create,
|
73 |
+
# mode=Mode.ANTHROPIC_TOOLS,
|
74 |
+
# ),
|
75 |
+
# mode=Mode.ANTHROPIC_TOOLS,
|
76 |
+
# )
|
77 |
+
aclient = AsyncOpenAI()
|
78 |
+
# logfire.instrument_openai(aclient)
|
79 |
+
client: instructor.AsyncInstructor = instructor.from_openai(aclient)
|
80 |
+
|
81 |
+
|
82 |
+
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, min=4, max=10))
|
83 |
+
async def situate_context(doc: str, chunk: str) -> str:
|
84 |
+
template = Template(
|
85 |
+
"""
|
86 |
+
<document>
|
87 |
+
{{ doc }}
|
88 |
+
</document>
|
89 |
+
|
90 |
+
Here is the chunk we want to situate within the whole document above:
|
91 |
+
|
92 |
+
<chunk>
|
93 |
+
{{ chunk }}
|
94 |
+
</chunk>
|
95 |
+
|
96 |
+
Please give a short succinct context to situate this chunk within the overall document for the purposes of improving search retrieval of the chunk.
|
97 |
+
Answer only with the succinct context and nothing else.
|
98 |
+
"""
|
99 |
+
)
|
100 |
+
|
101 |
+
content = template.render(doc=doc, chunk=chunk)
|
102 |
+
|
103 |
+
response = await client.chat.completions.create(
|
104 |
+
model="gpt-4o-mini",
|
105 |
+
max_tokens=1000,
|
106 |
+
temperature=0,
|
107 |
+
messages=[
|
108 |
+
{
|
109 |
+
"role": "user",
|
110 |
+
"content": content,
|
111 |
+
}
|
112 |
+
],
|
113 |
+
response_model=SituatedContext,
|
114 |
+
)
|
115 |
+
return response.context
|
116 |
+
|
117 |
+
|
118 |
+
# async def process_chunk(node: TextNode, document_dict: dict) -> TextNode:
|
119 |
+
|
120 |
+
# doc_id: str = node.source_node.node_id # type: ignore
|
121 |
+
# doc: Document = document_dict[doc_id]
|
122 |
+
|
123 |
+
# if doc.metadata["tokens"] > 120_000:
|
124 |
+
# # Tokenize the document text
|
125 |
+
# encoding = tiktoken.encoding_for_model("gpt-4o-mini")
|
126 |
+
# tokens = encoding.encode(doc.text)
|
127 |
+
|
128 |
+
# # Trim to 120,000 tokens
|
129 |
+
# trimmed_tokens = tokens[:120_000]
|
130 |
+
|
131 |
+
# # Decode back to text
|
132 |
+
# trimmed_text = encoding.decode(trimmed_tokens)
|
133 |
+
|
134 |
+
# # Update the document text
|
135 |
+
# doc.text = trimmed_text
|
136 |
+
# doc.metadata["tokens"] = 120_000
|
137 |
+
|
138 |
+
# context: str = await situate_context(doc.text, node.text)
|
139 |
+
# node.text = f"{node.text}\n\n{context}"
|
140 |
+
# return node
|
141 |
+
|
142 |
+
|
143 |
+
async def process_chunk(node: TextNode, document_dict: dict) -> TextNode:
|
144 |
+
doc_id: str = node.source_node.node_id # type: ignore
|
145 |
+
doc: Document = document_dict[doc_id]
|
146 |
+
|
147 |
+
if doc.metadata["tokens"] > 120_000:
|
148 |
+
# Tokenize the document text
|
149 |
+
encoding = tiktoken.encoding_for_model("gpt-4o-mini")
|
150 |
+
tokens = encoding.encode(doc.get_content())
|
151 |
+
|
152 |
+
# Trim to 120,000 tokens
|
153 |
+
trimmed_tokens = tokens[:120_000]
|
154 |
+
|
155 |
+
# Decode back to text
|
156 |
+
trimmed_text = encoding.decode(trimmed_tokens)
|
157 |
+
|
158 |
+
# Update the document with trimmed text
|
159 |
+
doc = Document(text=trimmed_text, metadata=doc.metadata)
|
160 |
+
doc.metadata["tokens"] = 120_000
|
161 |
+
|
162 |
+
context: str = await situate_context(doc.get_content(), node.text)
|
163 |
+
node.text = f"{node.text}\n\n{context}"
|
164 |
+
return node
|
165 |
+
|
166 |
+
|
167 |
+
async def process(
|
168 |
+
documents: List[Document], semaphore_limit: int = 50
|
169 |
+
) -> List[TextNode]:
|
170 |
+
|
171 |
+
# From the document, we create chunks
|
172 |
+
pipeline = IngestionPipeline(
|
173 |
+
transformations=[SentenceSplitter(chunk_size=800, chunk_overlap=0)]
|
174 |
+
)
|
175 |
+
all_nodes: list[TextNode] = pipeline.run(documents=documents, show_progress=True)
|
176 |
+
print(f"Number of nodes: {len(all_nodes)}")
|
177 |
+
|
178 |
+
document_dict: dict[str, Document] = {doc.doc_id: doc for doc in documents}
|
179 |
+
|
180 |
+
semaphore = asyncio.Semaphore(semaphore_limit)
|
181 |
+
|
182 |
+
async def process_with_semaphore(node):
|
183 |
+
async with semaphore:
|
184 |
+
result = await process_chunk(node, document_dict)
|
185 |
+
await asyncio.sleep(0.1)
|
186 |
+
return result
|
187 |
+
|
188 |
+
tasks = [process_with_semaphore(node) for node in all_nodes]
|
189 |
+
|
190 |
+
results: List[TextNode] = await tqdm.gather(*tasks, desc="Processing chunks")
|
191 |
+
|
192 |
+
# results: List[TextNode] = []
|
193 |
+
# # Add tqdm progress bar with semaphore limit
|
194 |
+
# for task in tqdm(
|
195 |
+
# asyncio.as_completed(tasks), total=len(tasks), desc="Processing chunks"
|
196 |
+
# ):
|
197 |
+
# result = await task
|
198 |
+
# results.append(result)
|
199 |
+
# pdb.set_trace()
|
200 |
+
|
201 |
+
return results
|
202 |
+
|
203 |
+
|
204 |
+
async def main():
|
205 |
+
documents: List[Document] = create_docs("data/all_sources_data.jsonl")
|
206 |
+
enhanced_nodes: List[TextNode] = await process(documents)
|
207 |
+
|
208 |
+
with open("data/all_sources_contextual_nodes.pkl", "wb") as f:
|
209 |
+
pickle.dump(enhanced_nodes, f)
|
210 |
+
|
211 |
+
# pipeline = IngestionPipeline(
|
212 |
+
# transformations=[SentenceSplitter(chunk_size=800, chunk_overlap=0)]
|
213 |
+
# )
|
214 |
+
# all_nodes: list[TextNode] = pipeline.run(documents=documents, show_progress=True)
|
215 |
+
# print(all_nodes[7933])
|
216 |
+
# pdb.set_trace()
|
217 |
+
|
218 |
+
with open("data/all_sources_contextual_nodes.pkl", "rb") as f:
|
219 |
+
enhanced_nodes: list[TextNode] = pickle.load(f)
|
220 |
+
|
221 |
+
for i, node in enumerate(enhanced_nodes):
|
222 |
+
print(f"Chunk {i + 1}:")
|
223 |
+
print(f"Node: {node}")
|
224 |
+
print(f"Text: {node.text}")
|
225 |
+
# pdb.set_trace()
|
226 |
+
break
|
227 |
+
|
228 |
+
|
229 |
+
if __name__ == "__main__":
|
230 |
+
asyncio.run(main())
|
231 |
+
|
232 |
+
|
233 |
+
# Ok so I need to create a new chroma-db-all_sources that embedded (context+chunk)
|
234 |
+
# I need to create an index and instead of from_documents it will be from_nodes
|
235 |
+
|
236 |
+
|
237 |
+
# First I need to create contexts for each chunk. Create a list of tasks (doc + chunk)
|
238 |
+
|
239 |
+
|
240 |
+
# documents = create_docs("data/all_sources_data.jsonl")
|
241 |
+
# pipeline = IngestionPipeline(
|
242 |
+
# transformations=[SentenceSplitter(chunk_size=800, chunk_overlap=0)]
|
243 |
+
# )
|
244 |
+
# all_nodes = pipeline.run(documents=documents, show_progress=True)
|
scripts/custom_retriever.py
ADDED
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
import traceback
|
5 |
+
from typing import List, Optional
|
6 |
+
|
7 |
+
import logfire
|
8 |
+
import tiktoken
|
9 |
+
from cohere import AsyncClient
|
10 |
+
from dotenv import load_dotenv
|
11 |
+
from llama_index.core import Document, QueryBundle
|
12 |
+
from llama_index.core.async_utils import run_async_tasks
|
13 |
+
from llama_index.core.callbacks import CBEventType, EventPayload
|
14 |
+
from llama_index.core.retrievers import (
|
15 |
+
BaseRetriever,
|
16 |
+
KeywordTableSimpleRetriever,
|
17 |
+
VectorIndexRetriever,
|
18 |
+
)
|
19 |
+
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle, TextNode
|
20 |
+
from llama_index.core.vector_stores import (
|
21 |
+
FilterCondition,
|
22 |
+
FilterOperator,
|
23 |
+
MetadataFilter,
|
24 |
+
MetadataFilters,
|
25 |
+
)
|
26 |
+
from llama_index.postprocessor.cohere_rerank import CohereRerank
|
27 |
+
from llama_index.postprocessor.cohere_rerank.base import CohereRerank
|
28 |
+
|
29 |
+
load_dotenv()
|
30 |
+
|
31 |
+
|
32 |
+
class AsyncCohereRerank(CohereRerank):
|
33 |
+
def __init__(
|
34 |
+
self,
|
35 |
+
top_n: int = 5,
|
36 |
+
model: str = "rerank-english-v3.0",
|
37 |
+
api_key: Optional[str] = None,
|
38 |
+
) -> None:
|
39 |
+
super().__init__(top_n=top_n, model=model, api_key=api_key)
|
40 |
+
self._api_key = api_key
|
41 |
+
self._model = model
|
42 |
+
self._top_n = top_n
|
43 |
+
|
44 |
+
async def apostprocess_nodes(
|
45 |
+
self,
|
46 |
+
nodes: List[NodeWithScore],
|
47 |
+
query_bundle: Optional[QueryBundle] = None,
|
48 |
+
) -> List[NodeWithScore]:
|
49 |
+
if query_bundle is None:
|
50 |
+
raise ValueError("Query bundle must be provided.")
|
51 |
+
|
52 |
+
if len(nodes) == 0:
|
53 |
+
return []
|
54 |
+
|
55 |
+
async_client = AsyncClient(api_key=self._api_key)
|
56 |
+
|
57 |
+
with self.callback_manager.event(
|
58 |
+
CBEventType.RERANKING,
|
59 |
+
payload={
|
60 |
+
EventPayload.NODES: nodes,
|
61 |
+
EventPayload.MODEL_NAME: self._model,
|
62 |
+
EventPayload.QUERY_STR: query_bundle.query_str,
|
63 |
+
EventPayload.TOP_K: self._top_n,
|
64 |
+
},
|
65 |
+
) as event:
|
66 |
+
texts = [
|
67 |
+
node.node.get_content(metadata_mode=MetadataMode.EMBED)
|
68 |
+
for node in nodes
|
69 |
+
]
|
70 |
+
|
71 |
+
results = await async_client.rerank(
|
72 |
+
model=self._model,
|
73 |
+
top_n=self._top_n,
|
74 |
+
query=query_bundle.query_str,
|
75 |
+
documents=texts,
|
76 |
+
)
|
77 |
+
|
78 |
+
new_nodes = []
|
79 |
+
for result in results.results:
|
80 |
+
new_node_with_score = NodeWithScore(
|
81 |
+
node=nodes[result.index].node, score=result.relevance_score
|
82 |
+
)
|
83 |
+
new_nodes.append(new_node_with_score)
|
84 |
+
event.on_end(payload={EventPayload.NODES: new_nodes})
|
85 |
+
|
86 |
+
return new_nodes
|
87 |
+
|
88 |
+
|
89 |
+
class CustomRetriever(BaseRetriever):
|
90 |
+
"""Custom retriever that performs both semantic search and hybrid search."""
|
91 |
+
|
92 |
+
def __init__(
|
93 |
+
self,
|
94 |
+
vector_retriever: VectorIndexRetriever,
|
95 |
+
document_dict: dict,
|
96 |
+
keyword_retriever=None,
|
97 |
+
mode: str = "AND",
|
98 |
+
) -> None:
|
99 |
+
"""Init params."""
|
100 |
+
self._vector_retriever = vector_retriever
|
101 |
+
self._document_dict = document_dict
|
102 |
+
self._keyword_retriever = keyword_retriever
|
103 |
+
if mode not in ("AND", "OR"):
|
104 |
+
raise ValueError("Invalid mode.")
|
105 |
+
self._mode = mode
|
106 |
+
super().__init__()
|
107 |
+
|
108 |
+
async def _process_retrieval(
|
109 |
+
self, query_bundle: QueryBundle, is_async: bool = True
|
110 |
+
) -> List[NodeWithScore]:
|
111 |
+
"""Common processing logic for both sync and async retrieval."""
|
112 |
+
# Clean query string
|
113 |
+
query_bundle.query_str = query_bundle.query_str.replace(
|
114 |
+
"\ninput is ", ""
|
115 |
+
).rstrip()
|
116 |
+
logfire.info(f"Retrieving nodes with string: '{query_bundle}'")
|
117 |
+
|
118 |
+
start = time.time()
|
119 |
+
|
120 |
+
# Get nodes from both retrievers
|
121 |
+
if is_async:
|
122 |
+
nodes = await self._vector_retriever.aretrieve(query_bundle)
|
123 |
+
else:
|
124 |
+
nodes = self._vector_retriever.retrieve(query_bundle)
|
125 |
+
|
126 |
+
keyword_nodes = []
|
127 |
+
if self._keyword_retriever:
|
128 |
+
if is_async:
|
129 |
+
keyword_nodes = await self._keyword_retriever.aretrieve(query_bundle)
|
130 |
+
else:
|
131 |
+
keyword_nodes = self._keyword_retriever.retrieve(query_bundle)
|
132 |
+
|
133 |
+
logfire.info(f"Number of vector nodes: {len(nodes)}")
|
134 |
+
logfire.info(f"Number of keyword nodes: {len(keyword_nodes)}")
|
135 |
+
|
136 |
+
# # Filter keyword nodes based on metadata filters from vector retriever
|
137 |
+
# if (
|
138 |
+
# hasattr(self._vector_retriever, "_filters")
|
139 |
+
# and self._vector_retriever._filters
|
140 |
+
# ):
|
141 |
+
# filtered_keyword_nodes = []
|
142 |
+
# for node in keyword_nodes:
|
143 |
+
# node_source = node.node.metadata.get("source")
|
144 |
+
# # Check if node's source matches any of the filter conditions
|
145 |
+
# for filter in self._vector_retriever._filters.filters:
|
146 |
+
# if (
|
147 |
+
# isinstance(filter, MetadataFilter)
|
148 |
+
# and filter.key == "source"
|
149 |
+
# and filter.operator == FilterOperator.EQ
|
150 |
+
# and filter.value == node_source
|
151 |
+
# ):
|
152 |
+
# filtered_keyword_nodes.append(node)
|
153 |
+
# break
|
154 |
+
# keyword_nodes = filtered_keyword_nodes
|
155 |
+
# logfire.info(
|
156 |
+
# f"Number of keyword nodes after filtering: {len(keyword_nodes)}"
|
157 |
+
# )
|
158 |
+
|
159 |
+
# Combine results based on mode
|
160 |
+
vector_ids = {n.node.node_id for n in nodes}
|
161 |
+
keyword_ids = {n.node.node_id for n in keyword_nodes}
|
162 |
+
combined_dict = {n.node.node_id: n for n in nodes}
|
163 |
+
combined_dict.update({n.node.node_id: n for n in keyword_nodes})
|
164 |
+
|
165 |
+
# If no keyword retriever or no keyword nodes, just use vector nodes
|
166 |
+
if not self._keyword_retriever or not keyword_nodes:
|
167 |
+
retrieve_ids = vector_ids
|
168 |
+
else:
|
169 |
+
retrieve_ids = (
|
170 |
+
vector_ids.intersection(keyword_ids)
|
171 |
+
if self._mode == "AND"
|
172 |
+
else vector_ids.union(keyword_ids)
|
173 |
+
)
|
174 |
+
|
175 |
+
nodes = [combined_dict[rid] for rid in retrieve_ids]
|
176 |
+
logfire.info(f"Number of combined nodes: {len(nodes)}")
|
177 |
+
|
178 |
+
# Filter unique doc IDs
|
179 |
+
nodes = self._filter_nodes_by_unique_doc_id(nodes)
|
180 |
+
logfire.info(f"Number of nodes without duplicate doc IDs: {len(nodes)}")
|
181 |
+
|
182 |
+
# Process node contents
|
183 |
+
for node in nodes:
|
184 |
+
doc_id = node.node.source_node.node_id
|
185 |
+
if node.metadata["retrieve_doc"]:
|
186 |
+
doc = self._document_dict[doc_id]
|
187 |
+
node.node.text = doc.text
|
188 |
+
node.node.node_id = doc_id
|
189 |
+
|
190 |
+
# Rerank results
|
191 |
+
try:
|
192 |
+
reranker = (
|
193 |
+
AsyncCohereRerank(top_n=5, model="rerank-english-v3.0")
|
194 |
+
if is_async
|
195 |
+
else CohereRerank(top_n=5, model="rerank-english-v3.0")
|
196 |
+
)
|
197 |
+
nodes = (
|
198 |
+
await reranker.apostprocess_nodes(nodes, query_bundle)
|
199 |
+
if is_async
|
200 |
+
else reranker.postprocess_nodes(nodes, query_bundle)
|
201 |
+
)
|
202 |
+
except Exception as e:
|
203 |
+
error_msg = f"Error during reranking: {type(e).__name__}: {str(e)}\n"
|
204 |
+
error_msg += "Traceback:\n"
|
205 |
+
error_msg += traceback.format_exc()
|
206 |
+
logfire.error(error_msg)
|
207 |
+
|
208 |
+
# Filter by score and token count
|
209 |
+
nodes_filtered = self._filter_by_score_and_tokens(nodes)
|
210 |
+
|
211 |
+
duration = time.time() - start
|
212 |
+
logfire.info(f"Retrieving nodes took {duration:.2f}s")
|
213 |
+
logfire.info(f"Nodes sent to LLM: {nodes_filtered[:5]}")
|
214 |
+
|
215 |
+
return nodes_filtered[:5]
|
216 |
+
|
217 |
+
def _filter_nodes_by_unique_doc_id(
|
218 |
+
self, nodes: List[NodeWithScore]
|
219 |
+
) -> List[NodeWithScore]:
|
220 |
+
"""Filter nodes to keep only unique doc IDs."""
|
221 |
+
unique_nodes = {}
|
222 |
+
for node in nodes:
|
223 |
+
doc_id = node.node.source_node.node_id
|
224 |
+
if doc_id is not None and doc_id not in unique_nodes:
|
225 |
+
unique_nodes[doc_id] = node
|
226 |
+
return list(unique_nodes.values())
|
227 |
+
|
228 |
+
def _filter_by_score_and_tokens(
|
229 |
+
self, nodes: List[NodeWithScore]
|
230 |
+
) -> List[NodeWithScore]:
|
231 |
+
"""Filter nodes by score and token count."""
|
232 |
+
nodes_filtered = []
|
233 |
+
total_tokens = 0
|
234 |
+
enc = tiktoken.encoding_for_model("gpt-4o-mini")
|
235 |
+
|
236 |
+
for node in nodes:
|
237 |
+
if node.score < 0.10:
|
238 |
+
logfire.info(f"Skipping node with score {node.score}")
|
239 |
+
continue
|
240 |
+
|
241 |
+
node_tokens = len(enc.encode(node.node.text))
|
242 |
+
if total_tokens + node_tokens > 100_000:
|
243 |
+
logfire.info("Skipping node due to token count exceeding 100k")
|
244 |
+
break
|
245 |
+
|
246 |
+
total_tokens += node_tokens
|
247 |
+
nodes_filtered.append(node)
|
248 |
+
|
249 |
+
return nodes_filtered
|
250 |
+
|
251 |
+
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
|
252 |
+
"""Async retrieve nodes given query."""
|
253 |
+
return await self._process_retrieval(query_bundle, is_async=True)
|
254 |
+
|
255 |
+
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
|
256 |
+
"""Sync retrieve nodes given query."""
|
257 |
+
return asyncio.run(self._process_retrieval(query_bundle, is_async=False))
|
scripts/evaluate_rag_system.py
ADDED
@@ -0,0 +1,773 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import html
|
3 |
+
import json
|
4 |
+
import logging
|
5 |
+
import os
|
6 |
+
import pdb
|
7 |
+
import pickle
|
8 |
+
import random
|
9 |
+
import time
|
10 |
+
from typing import Dict, List, Optional, Tuple
|
11 |
+
|
12 |
+
import aiofiles
|
13 |
+
import chromadb
|
14 |
+
import logfire
|
15 |
+
import pandas as pd
|
16 |
+
from custom_retriever import CustomRetriever
|
17 |
+
from llama_index.agent.openai import OpenAIAgent
|
18 |
+
from llama_index.core import Document, SimpleKeywordTableIndex, VectorStoreIndex
|
19 |
+
from llama_index.core.base.base_retriever import BaseRetriever
|
20 |
+
from llama_index.core.bridge.pydantic import Field, SerializeAsAny
|
21 |
+
from llama_index.core.chat_engine.types import (
|
22 |
+
AGENT_CHAT_RESPONSE_TYPE,
|
23 |
+
AgentChatResponse,
|
24 |
+
ChatResponseMode,
|
25 |
+
)
|
26 |
+
from llama_index.core.evaluation import (
|
27 |
+
AnswerRelevancyEvaluator,
|
28 |
+
BatchEvalRunner,
|
29 |
+
EmbeddingQAFinetuneDataset,
|
30 |
+
FaithfulnessEvaluator,
|
31 |
+
RelevancyEvaluator,
|
32 |
+
)
|
33 |
+
from llama_index.core.evaluation.base import EvaluationResult
|
34 |
+
from llama_index.core.evaluation.retrieval.base import (
|
35 |
+
BaseRetrievalEvaluator,
|
36 |
+
RetrievalEvalMode,
|
37 |
+
RetrievalEvalResult,
|
38 |
+
)
|
39 |
+
from llama_index.core.indices.base_retriever import BaseRetriever
|
40 |
+
from llama_index.core.ingestion import IngestionPipeline
|
41 |
+
from llama_index.core.node_parser import SentenceSplitter
|
42 |
+
from llama_index.core.postprocessor.types import BaseNodePostprocessor
|
43 |
+
from llama_index.core.retrievers import (
|
44 |
+
BaseRetriever,
|
45 |
+
KeywordTableSimpleRetriever,
|
46 |
+
VectorIndexRetriever,
|
47 |
+
)
|
48 |
+
from llama_index.core.schema import ImageNode, NodeWithScore, QueryBundle, TextNode
|
49 |
+
from llama_index.core.tools import RetrieverTool, ToolMetadata
|
50 |
+
from llama_index.core.vector_stores import (
|
51 |
+
FilterOperator,
|
52 |
+
MetadataFilter,
|
53 |
+
MetadataFilters,
|
54 |
+
)
|
55 |
+
from llama_index.embeddings.cohere import CohereEmbedding
|
56 |
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
57 |
+
from llama_index.llms.gemini import Gemini
|
58 |
+
from llama_index.llms.openai import OpenAI
|
59 |
+
from llama_index.vector_stores.chroma import ChromaVectorStore
|
60 |
+
from prompts import system_message_openai_agent
|
61 |
+
from pydantic import BaseModel, Field
|
62 |
+
from tqdm.asyncio import tqdm_asyncio
|
63 |
+
|
64 |
+
# from setup import (
|
65 |
+
# AVAILABLE_SOURCES,
|
66 |
+
# AVAILABLE_SOURCES_UI,
|
67 |
+
# custom_retriever_all_sources,
|
68 |
+
# custom_retriever_langchain,
|
69 |
+
# custom_retriever_llama_index,
|
70 |
+
# custom_retriever_openai_cookbooks,
|
71 |
+
# custom_retriever_peft,
|
72 |
+
# custom_retriever_transformers,
|
73 |
+
# custom_retriever_trl,
|
74 |
+
# )
|
75 |
+
|
76 |
+
|
77 |
+
class RotatingJSONLWriter:
|
78 |
+
def __init__(
|
79 |
+
self, base_filename: str, max_size: int = 10**6, backup_count: int = 5
|
80 |
+
):
|
81 |
+
"""
|
82 |
+
Initialize the rotating JSONL writer.
|
83 |
+
|
84 |
+
Args:
|
85 |
+
base_filename (str): The base filename for the JSONL files.
|
86 |
+
max_size (int): Maximum size in bytes before rotating.
|
87 |
+
backup_count (int): Number of backup files to keep.
|
88 |
+
"""
|
89 |
+
self.base_filename = base_filename
|
90 |
+
self.max_size = max_size
|
91 |
+
self.backup_count = backup_count
|
92 |
+
self.current_file = base_filename
|
93 |
+
|
94 |
+
async def write(self, data: dict):
|
95 |
+
# Rotate if file exceeds max size
|
96 |
+
if (
|
97 |
+
os.path.exists(self.current_file)
|
98 |
+
and os.path.getsize(self.current_file) > self.max_size
|
99 |
+
):
|
100 |
+
await self.rotate_files()
|
101 |
+
|
102 |
+
async with aiofiles.open(self.current_file, "a", encoding="utf-8") as f:
|
103 |
+
await f.write(json.dumps(data, ensure_ascii=False) + "\n")
|
104 |
+
|
105 |
+
async def rotate_files(self):
|
106 |
+
# Remove the oldest backup if it exists
|
107 |
+
oldest_backup = f"{self.base_filename}.{self.backup_count}"
|
108 |
+
if os.path.exists(oldest_backup):
|
109 |
+
os.remove(oldest_backup)
|
110 |
+
|
111 |
+
# Rotate existing backups
|
112 |
+
for i in range(self.backup_count - 1, 0, -1):
|
113 |
+
src = f"{self.base_filename}.{i}"
|
114 |
+
dst = f"{self.base_filename}.{i + 1}"
|
115 |
+
if os.path.exists(src):
|
116 |
+
os.rename(src, dst)
|
117 |
+
|
118 |
+
# Rename current file to backup
|
119 |
+
os.rename(self.current_file, f"{self.base_filename}.1")
|
120 |
+
|
121 |
+
|
122 |
+
class AsyncKeywordTableSimpleRetriever(KeywordTableSimpleRetriever):
|
123 |
+
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
|
124 |
+
loop = asyncio.get_event_loop()
|
125 |
+
return await loop.run_in_executor(None, self._retrieve, query_bundle)
|
126 |
+
|
127 |
+
|
128 |
+
class SampleableEmbeddingQADataset:
|
129 |
+
def __init__(self, dataset: EmbeddingQAFinetuneDataset):
|
130 |
+
self.dataset = dataset
|
131 |
+
|
132 |
+
def sample(self, n: int) -> EmbeddingQAFinetuneDataset:
|
133 |
+
"""
|
134 |
+
Sample n queries from the dataset.
|
135 |
+
|
136 |
+
Args:
|
137 |
+
n (int): Number of queries to sample.
|
138 |
+
|
139 |
+
Returns:
|
140 |
+
EmbeddingQAFinetuneDataset: A new dataset with the sampled queries.
|
141 |
+
"""
|
142 |
+
if n > len(self.dataset.queries):
|
143 |
+
raise ValueError(
|
144 |
+
f"n ({n}) is greater than the number of queries ({len(self.dataset.queries)})"
|
145 |
+
)
|
146 |
+
|
147 |
+
sampled_query_ids = random.sample(list(self.dataset.queries.keys()), n)
|
148 |
+
|
149 |
+
sampled_queries = {qid: self.dataset.queries[qid] for qid in sampled_query_ids}
|
150 |
+
sampled_relevant_docs = {
|
151 |
+
qid: self.dataset.relevant_docs[qid] for qid in sampled_query_ids
|
152 |
+
}
|
153 |
+
|
154 |
+
# Collect all unique document IDs from the sampled relevant docs
|
155 |
+
sampled_doc_ids = set()
|
156 |
+
for doc_ids in sampled_relevant_docs.values():
|
157 |
+
sampled_doc_ids.update(doc_ids)
|
158 |
+
|
159 |
+
sampled_corpus = {
|
160 |
+
doc_id: self.dataset.corpus[doc_id] for doc_id in sampled_doc_ids
|
161 |
+
}
|
162 |
+
|
163 |
+
return EmbeddingQAFinetuneDataset(
|
164 |
+
queries=sampled_queries,
|
165 |
+
corpus=sampled_corpus,
|
166 |
+
relevant_docs=sampled_relevant_docs,
|
167 |
+
mode=self.dataset.mode,
|
168 |
+
)
|
169 |
+
|
170 |
+
def __getattr__(self, name):
|
171 |
+
return getattr(self.dataset, name)
|
172 |
+
|
173 |
+
|
174 |
+
class RetrieverEvaluator(BaseRetrievalEvaluator):
|
175 |
+
"""Retriever evaluator.
|
176 |
+
|
177 |
+
This module will evaluate a retriever using a set of metrics.
|
178 |
+
|
179 |
+
Args:
|
180 |
+
metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate
|
181 |
+
retriever: Retriever to evaluate.
|
182 |
+
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval.
|
183 |
+
"""
|
184 |
+
|
185 |
+
retriever: BaseRetriever = Field(..., description="Retriever to evaluate")
|
186 |
+
node_postprocessors: Optional[List[SerializeAsAny[BaseNodePostprocessor]]] = Field(
|
187 |
+
default=None, description="Optional post-processor"
|
188 |
+
)
|
189 |
+
|
190 |
+
async def _aget_retrieved_ids_and_texts(
|
191 |
+
self,
|
192 |
+
query: str,
|
193 |
+
mode: RetrievalEvalMode = RetrievalEvalMode.TEXT,
|
194 |
+
source: str = "",
|
195 |
+
) -> Tuple[List[str], List[str]]:
|
196 |
+
"""Get retrieved ids and texts, potentially applying a post-processor."""
|
197 |
+
try:
|
198 |
+
retrieved_nodes: list[NodeWithScore] = await self.retriever.aretrieve(query)
|
199 |
+
logfire.info(f"Retrieved {len(retrieved_nodes)} nodes for: '{query}'")
|
200 |
+
except Exception as e:
|
201 |
+
return ["00000000-0000-0000-0000-000000000000"], [str(e)]
|
202 |
+
|
203 |
+
if len(retrieved_nodes) == 0 or retrieved_nodes is None:
|
204 |
+
print(f"No nodes retrieved for {query}")
|
205 |
+
return ["00000000-0000-0000-0000-000000000000"], ["No nodes retrieved"]
|
206 |
+
|
207 |
+
if self.node_postprocessors:
|
208 |
+
for node_postprocessor in self.node_postprocessors:
|
209 |
+
retrieved_nodes = node_postprocessor.postprocess_nodes(
|
210 |
+
retrieved_nodes, query_str=query
|
211 |
+
)
|
212 |
+
|
213 |
+
return (
|
214 |
+
[node.node.node_id for node in retrieved_nodes],
|
215 |
+
[node.node.text for node in retrieved_nodes], # type: ignore
|
216 |
+
)
|
217 |
+
|
218 |
+
|
219 |
+
class OpenAIAgentRetrieverEvaluator(BaseRetrievalEvaluator):
|
220 |
+
agent: OpenAIAgent = Field(description="The OpenAI agent used for retrieval")
|
221 |
+
|
222 |
+
async def _aget_retrieved_ids_and_texts(
|
223 |
+
self,
|
224 |
+
query: str,
|
225 |
+
mode: RetrievalEvalMode = RetrievalEvalMode.TEXT,
|
226 |
+
source: str = "",
|
227 |
+
) -> Tuple[List[str], List[str]]:
|
228 |
+
|
229 |
+
self.agent.memory.reset()
|
230 |
+
|
231 |
+
try:
|
232 |
+
logfire.info(f"Executing agent with query: {query}")
|
233 |
+
response: AgentChatResponse = await self.agent.achat(query)
|
234 |
+
except Exception as e:
|
235 |
+
# await self._save_response_data_async(
|
236 |
+
# source, query, ["Error retrieving nodes"], "Error retrieving nodes"
|
237 |
+
# )
|
238 |
+
return ["00000000-0000-0000-0000-000000000000"], [str(e)]
|
239 |
+
|
240 |
+
retrieved_nodes: list[NodeWithScore] = get_nodes_with_score(response)
|
241 |
+
logfire.info(f"Retrieved {len(retrieved_nodes)} to answer: '{query}'")
|
242 |
+
retrieved_nodes = retrieved_nodes[:6] # Limit to first 6 retrieved nodes
|
243 |
+
|
244 |
+
if len(retrieved_nodes) == 0 or retrieved_nodes is None:
|
245 |
+
# await self._save_response_data_async(
|
246 |
+
# source, query, ["No retrieved nodes"], "No retrieved nodes"
|
247 |
+
# )
|
248 |
+
return ["00000000-0000-0000-0000-000000000000"], ["No nodes retrieved"]
|
249 |
+
|
250 |
+
retrieved_ids = [node.node.node_id for node in retrieved_nodes]
|
251 |
+
retrieved_texts = [node.node.text for node in retrieved_nodes] # type: ignore
|
252 |
+
|
253 |
+
# Will not save context as its too long (token wise), costly and takes too much time.
|
254 |
+
await self._save_response_data_async(
|
255 |
+
source=source, query=query, context="", response=response.response
|
256 |
+
)
|
257 |
+
|
258 |
+
return retrieved_ids, retrieved_texts
|
259 |
+
|
260 |
+
async def _save_response_data_async(self, source, query, context, response):
|
261 |
+
data = {
|
262 |
+
"source": source,
|
263 |
+
"question": query,
|
264 |
+
# "context": context,
|
265 |
+
"answer": response,
|
266 |
+
}
|
267 |
+
await rotating_writer.write(data)
|
268 |
+
|
269 |
+
|
270 |
+
def get_nodes_with_score(completion) -> list[NodeWithScore]:
|
271 |
+
retrieved_nodes = []
|
272 |
+
for source in completion.sources: # completion.sources = list[ToolOutput]
|
273 |
+
if source.is_error == True:
|
274 |
+
continue
|
275 |
+
for node in source.raw_output: # source.raw_output = list[NodeWithScore]
|
276 |
+
retrieved_nodes.append(node)
|
277 |
+
return retrieved_nodes
|
278 |
+
|
279 |
+
|
280 |
+
def setup_basic_database(db_collection, dict_file_name, keyword_retriever):
|
281 |
+
db = chromadb.PersistentClient(path=f"data/{db_collection}")
|
282 |
+
chroma_collection = db.get_or_create_collection(db_collection)
|
283 |
+
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
|
284 |
+
|
285 |
+
# embed_model = OpenAIEmbedding(model="text-embedding-3-large", mode="similarity")
|
286 |
+
embed_model = CohereEmbedding(
|
287 |
+
api_key=os.environ["COHERE_API_KEY"],
|
288 |
+
model_name="embed-english-v3.0",
|
289 |
+
input_type="search_query",
|
290 |
+
)
|
291 |
+
# client = embed_model._get_client()
|
292 |
+
# aclient = embed_model._get_aclient()
|
293 |
+
# logfire.instrument_openai(client)
|
294 |
+
# logfire.instrument_openai(aclient)
|
295 |
+
|
296 |
+
index = VectorStoreIndex.from_vector_store(
|
297 |
+
vector_store=vector_store,
|
298 |
+
show_progress=True,
|
299 |
+
)
|
300 |
+
vector_retriever = VectorIndexRetriever(
|
301 |
+
index=index,
|
302 |
+
similarity_top_k=15,
|
303 |
+
embed_model=embed_model,
|
304 |
+
)
|
305 |
+
with open(f"data/{db_collection}/{dict_file_name}", "rb") as f:
|
306 |
+
document_dict = pickle.load(f)
|
307 |
+
|
308 |
+
return CustomRetriever(vector_retriever, document_dict, keyword_retriever, "OR")
|
309 |
+
|
310 |
+
|
311 |
+
def update_query_engine_tools(selected_sources, custom_retriever_all_sources):
|
312 |
+
tools = []
|
313 |
+
source_mapping = {
|
314 |
+
# "Transformers Docs": (
|
315 |
+
# custom_retriever_transformers,
|
316 |
+
# "Transformers_information",
|
317 |
+
# """Useful for general questions asking about the artificial intelligence (AI) field. Employ this tool to fetch information on topics such as language models (LLMs) models such as Llama3 and theory (transformer architectures), tips on prompting, quantization, etc.""",
|
318 |
+
# ),
|
319 |
+
# "PEFT Docs": (
|
320 |
+
# custom_retriever_peft,
|
321 |
+
# "PEFT_information",
|
322 |
+
# """Useful for questions asking about efficient LLM fine-tuning. Employ this tool to fetch information on topics such as LoRA, QLoRA, etc.""",
|
323 |
+
# ),
|
324 |
+
# "TRL Docs": (
|
325 |
+
# custom_retriever_trl,
|
326 |
+
# "TRL_information",
|
327 |
+
# """Useful for questions asking about fine-tuning LLMs with reinforcement learning (RLHF). Includes information about the Supervised Fine-tuning step (SFT), Reward Modeling step (RM), and the Proximal Policy Optimization (PPO) step.""",
|
328 |
+
# ),
|
329 |
+
# "LlamaIndex Docs": (
|
330 |
+
# custom_retriever_llama_index,
|
331 |
+
# "LlamaIndex_information",
|
332 |
+
# """Useful for questions asking about retrieval augmented generation (RAG) with LLMs and embedding models. It is the documentation of a framework, includes info about fine-tuning embedding models, building chatbots, and agents with llms, using vector databases, embeddings, information retrieval with cosine similarity or bm25, etc.""",
|
333 |
+
# ),
|
334 |
+
# "OpenAI Cookbooks": (
|
335 |
+
# custom_retriever_openai_cookbooks,
|
336 |
+
# "openai_cookbooks_info",
|
337 |
+
# """Useful for questions asking about accomplishing common tasks with theΒ OpenAI API. Returns example code and guides stored in Jupyter notebooks, including info about ChatGPT GPT actions, OpenAI Assistants API, and How to fine-tune OpenAI's GPT-4o and GPT-4o-mini models with the OpenAI API.""",
|
338 |
+
# ),
|
339 |
+
# "LangChain Docs": (
|
340 |
+
# custom_retriever_langchain,
|
341 |
+
# "langchain_info",
|
342 |
+
# """Useful for questions asking about the LangChain framework. It is the documentation of the LangChain framework, includes info about building chains, agents, and tools, using memory, prompts, callbacks, etc.""",
|
343 |
+
# ),
|
344 |
+
"All Sources": (
|
345 |
+
custom_retriever_all_sources,
|
346 |
+
"all_sources_info",
|
347 |
+
"""Useful for all questions, contains information about the field of AI.""",
|
348 |
+
),
|
349 |
+
}
|
350 |
+
|
351 |
+
for source in selected_sources:
|
352 |
+
if source in source_mapping:
|
353 |
+
retriever, name, description = source_mapping[source]
|
354 |
+
tools.append(
|
355 |
+
RetrieverTool(
|
356 |
+
retriever=retriever,
|
357 |
+
metadata=ToolMetadata(
|
358 |
+
name=name,
|
359 |
+
description=description,
|
360 |
+
),
|
361 |
+
)
|
362 |
+
)
|
363 |
+
|
364 |
+
return tools
|
365 |
+
|
366 |
+
|
367 |
+
def setup_agent(custom_retriever_all_sources) -> OpenAIAgent:
|
368 |
+
|
369 |
+
llm = OpenAI(
|
370 |
+
temperature=1,
|
371 |
+
# model="gpt-4o",
|
372 |
+
model="gpt-4o-mini",
|
373 |
+
max_tokens=5000,
|
374 |
+
max_retries=3,
|
375 |
+
)
|
376 |
+
client = llm._get_client()
|
377 |
+
logfire.instrument_openai(client)
|
378 |
+
aclient = llm._get_aclient()
|
379 |
+
logfire.instrument_openai(aclient)
|
380 |
+
|
381 |
+
tools_available = [
|
382 |
+
# "Transformers Docs",
|
383 |
+
# "PEFT Docs",
|
384 |
+
# "TRL Docs",
|
385 |
+
# "LlamaIndex Docs",
|
386 |
+
# "LangChain Docs",
|
387 |
+
# "OpenAI Cookbooks",
|
388 |
+
"All Sources",
|
389 |
+
]
|
390 |
+
query_engine_tools = update_query_engine_tools(
|
391 |
+
tools_available, custom_retriever_all_sources
|
392 |
+
)
|
393 |
+
|
394 |
+
agent = OpenAIAgent.from_tools(
|
395 |
+
llm=llm,
|
396 |
+
tools=query_engine_tools,
|
397 |
+
system_prompt=system_message_openai_agent,
|
398 |
+
)
|
399 |
+
|
400 |
+
return agent
|
401 |
+
|
402 |
+
|
403 |
+
async def evaluate_answers():
|
404 |
+
start_time = time.time()
|
405 |
+
|
406 |
+
# Gemini is not async here, maybe it could work with multithreading?
|
407 |
+
# llm = Gemini(model="models/gemini-1.5-flash-002", temperature=1, max_tokens=1000)
|
408 |
+
llm = OpenAI(model="gpt-4o-mini", temperature=1, max_tokens=1000)
|
409 |
+
relevancy_evaluator = AnswerRelevancyEvaluator(llm=llm)
|
410 |
+
|
411 |
+
# Load queries and response strings from JSONL file
|
412 |
+
query_response_pairs = []
|
413 |
+
with open("response_data.jsonl", "r") as f:
|
414 |
+
for line in f:
|
415 |
+
data = json.loads(line)
|
416 |
+
query_response_pairs.append(
|
417 |
+
(data["source"], data["query"], data["response"])
|
418 |
+
)
|
419 |
+
|
420 |
+
logfire.info(f"Number of queries and answers: {len(query_response_pairs)}")
|
421 |
+
|
422 |
+
semaphore = asyncio.Semaphore(90) # Adjust this value as needed
|
423 |
+
|
424 |
+
async def evaluate_query_response(source, query, response):
|
425 |
+
async with semaphore:
|
426 |
+
try:
|
427 |
+
result: EvaluationResult = await relevancy_evaluator.aevaluate(
|
428 |
+
query=query, response=response
|
429 |
+
)
|
430 |
+
return source, result
|
431 |
+
except Exception as e:
|
432 |
+
logfire.error(f"Error evaluating query for {source}: {str(e)}")
|
433 |
+
return source, None
|
434 |
+
|
435 |
+
# Use asyncio.gather to run all evaluations concurrently
|
436 |
+
results = await tqdm_asyncio.gather(
|
437 |
+
*[
|
438 |
+
evaluate_query_response(source, query, response)
|
439 |
+
for source, query, response in query_response_pairs
|
440 |
+
],
|
441 |
+
desc="Evaluating answers",
|
442 |
+
total=len(query_response_pairs),
|
443 |
+
)
|
444 |
+
|
445 |
+
# Process results
|
446 |
+
eval_results = {}
|
447 |
+
for item in results:
|
448 |
+
if isinstance(item, tuple) and len(item) == 2:
|
449 |
+
source, result = item
|
450 |
+
if result is not None:
|
451 |
+
if source not in eval_results:
|
452 |
+
eval_results[source] = []
|
453 |
+
eval_results[source].append(result)
|
454 |
+
else:
|
455 |
+
logfire.error(f"Unexpected result: {item}")
|
456 |
+
|
457 |
+
# Save results for each source
|
458 |
+
for source, results in eval_results.items():
|
459 |
+
with open(f"eval_answers_results_{source}.pkl", "wb") as f:
|
460 |
+
pickle.dump(results, f)
|
461 |
+
|
462 |
+
end_time = time.time()
|
463 |
+
logfire.info(f"Total evaluation time: {round(end_time - start_time, 3)} seconds")
|
464 |
+
|
465 |
+
return eval_results
|
466 |
+
|
467 |
+
|
468 |
+
def create_docs(input_file: str) -> List[Document]:
|
469 |
+
with open(input_file, "r") as f:
|
470 |
+
documents = []
|
471 |
+
for line in f:
|
472 |
+
data = json.loads(line)
|
473 |
+
documents.append(
|
474 |
+
Document(
|
475 |
+
doc_id=data["doc_id"],
|
476 |
+
text=data["content"],
|
477 |
+
metadata={ # type: ignore
|
478 |
+
"url": data["url"],
|
479 |
+
"title": data["name"],
|
480 |
+
"tokens": data["tokens"],
|
481 |
+
"retrieve_doc": data["retrieve_doc"],
|
482 |
+
"source": data["source"],
|
483 |
+
},
|
484 |
+
excluded_llm_metadata_keys=[
|
485 |
+
"title",
|
486 |
+
"tokens",
|
487 |
+
"retrieve_doc",
|
488 |
+
"source",
|
489 |
+
],
|
490 |
+
excluded_embed_metadata_keys=[
|
491 |
+
"url",
|
492 |
+
"tokens",
|
493 |
+
"retrieve_doc",
|
494 |
+
"source",
|
495 |
+
],
|
496 |
+
)
|
497 |
+
)
|
498 |
+
return documents
|
499 |
+
|
500 |
+
|
501 |
+
def get_sample_size(source: str, total_queries: int) -> int:
|
502 |
+
"""Determine the number of queries to sample based on the source."""
|
503 |
+
# small_datasets = {"peft": 0, "trl": 0, "openai_cookbooks": 0}
|
504 |
+
# large_datasets = {
|
505 |
+
# "transformers": 0,
|
506 |
+
# "llama_index": 0,
|
507 |
+
# "langchain": 1,
|
508 |
+
# "tai_blog": 0,
|
509 |
+
# }
|
510 |
+
small_datasets = {"peft": 49, "trl": 34, "openai_cookbooks": 170}
|
511 |
+
large_datasets = {
|
512 |
+
"transformers": 200,
|
513 |
+
"llama_index": 200,
|
514 |
+
"langchain": 200,
|
515 |
+
"tai_blog": 200,
|
516 |
+
}
|
517 |
+
# small_datasets = {"peft": 49, "trl": 34, "openai_cookbooks": 100}
|
518 |
+
# large_datasets = {
|
519 |
+
# "transformers": 100,
|
520 |
+
# "llama_index": 100,
|
521 |
+
# "langchain": 100,
|
522 |
+
# "tai_blog": 100,
|
523 |
+
# }
|
524 |
+
# small_datasets = {"peft": 18, "trl": 12, "openai_cookbooks": 14}
|
525 |
+
# large_datasets = {
|
526 |
+
# "transformers": 24,
|
527 |
+
# "llama_index": 8,
|
528 |
+
# "langchain": 6,
|
529 |
+
# "tai_blog": 18,
|
530 |
+
# }
|
531 |
+
# small_datasets = {"peft": 4, "trl": 4, "openai_cookbooks": 4}
|
532 |
+
# large_datasets = {
|
533 |
+
# "transformers": 4,
|
534 |
+
# "llama_index": 4,
|
535 |
+
# "langchain": 5,
|
536 |
+
# "tai_blog": 5,
|
537 |
+
# }
|
538 |
+
|
539 |
+
if source in small_datasets:
|
540 |
+
return small_datasets[source]
|
541 |
+
elif source in large_datasets:
|
542 |
+
return large_datasets[source]
|
543 |
+
else:
|
544 |
+
return min(100, total_queries) # Default to 100 or all queries if less than 100
|
545 |
+
|
546 |
+
|
547 |
+
async def evaluate_retriever():
|
548 |
+
start_time = time.time()
|
549 |
+
with open("data/keyword_retriever_async.pkl", "rb") as f:
|
550 |
+
keyword_retriever = pickle.load(f)
|
551 |
+
|
552 |
+
custom_retriever_all_sources: CustomRetriever = setup_basic_database(
|
553 |
+
"chroma-db-all_sources", "document_dict_all_sources.pkl", keyword_retriever
|
554 |
+
)
|
555 |
+
# agent = setup_agent(custom_retriever_all_sources)
|
556 |
+
|
557 |
+
# filters = MetadataFilters(
|
558 |
+
# filters=[
|
559 |
+
# MetadataFilter(key="source", operator=FilterOperator.EQ, value="langchain"),
|
560 |
+
# ]
|
561 |
+
# )
|
562 |
+
# custom_retriever_all_sources._vector_retriever._filters = filters
|
563 |
+
|
564 |
+
end_time = time.time()
|
565 |
+
logfire.info(
|
566 |
+
f"Time taken for setup the custom retriever: {round(end_time - start_time, 2)} seconds"
|
567 |
+
)
|
568 |
+
|
569 |
+
sources_to_evaluate = [
|
570 |
+
"transformers",
|
571 |
+
"peft",
|
572 |
+
"trl",
|
573 |
+
"llama_index",
|
574 |
+
"langchain",
|
575 |
+
"openai_cookbooks",
|
576 |
+
"tai_blog",
|
577 |
+
]
|
578 |
+
|
579 |
+
# for k in [5, 7, 9, 11, 13, 15]:
|
580 |
+
# custom_retriever_all_sources._vector_retriever._similarity_top_k = k
|
581 |
+
|
582 |
+
retriever_evaluator = RetrieverEvaluator.from_metric_names(
|
583 |
+
["mrr", "hit_rate"], retriever=custom_retriever_all_sources
|
584 |
+
)
|
585 |
+
# retriever_evaluator = OpenAIAgentRetrieverEvaluator.from_metric_names(
|
586 |
+
# metric_names=["mrr", "hit_rate"], agent=agent
|
587 |
+
# )
|
588 |
+
|
589 |
+
all_query_pairs = []
|
590 |
+
for source in sources_to_evaluate:
|
591 |
+
rag_eval_dataset = EmbeddingQAFinetuneDataset.from_json(
|
592 |
+
f"scripts/rag_eval_{source}.json"
|
593 |
+
)
|
594 |
+
sampleable_dataset = SampleableEmbeddingQADataset(rag_eval_dataset)
|
595 |
+
sample_size = get_sample_size(source, len(sampleable_dataset.queries))
|
596 |
+
sampled_dataset = sampleable_dataset.sample(n=sample_size)
|
597 |
+
query_expected_ids_pairs = sampled_dataset.query_docid_pairs
|
598 |
+
all_query_pairs.extend(
|
599 |
+
[(source, pair[0], pair[1]) for pair in query_expected_ids_pairs]
|
600 |
+
)
|
601 |
+
|
602 |
+
semaphore = asyncio.Semaphore(220) # 250 caused a couple of errors
|
603 |
+
# semaphore = asyncio.Semaphore(90) # 100 caused a couple of errors with agent
|
604 |
+
|
605 |
+
async def evaluate_query(source, query, expected_ids):
|
606 |
+
async with semaphore:
|
607 |
+
try:
|
608 |
+
result: RetrievalEvalResult = await retriever_evaluator.aevaluate(
|
609 |
+
query=query,
|
610 |
+
expected_ids=expected_ids,
|
611 |
+
mode=RetrievalEvalMode.TEXT,
|
612 |
+
source=source,
|
613 |
+
)
|
614 |
+
return source, result
|
615 |
+
except Exception as e:
|
616 |
+
logfire.error(f"Error evaluating query for {source}: {str(e)}")
|
617 |
+
return source, None
|
618 |
+
|
619 |
+
# Use asyncio.gather to run all evaluations concurrently
|
620 |
+
results = await tqdm_asyncio.gather(
|
621 |
+
*[
|
622 |
+
evaluate_query(source, query, expected_ids)
|
623 |
+
for source, query, expected_ids in all_query_pairs
|
624 |
+
],
|
625 |
+
desc="Evaluating queries",
|
626 |
+
total=len(all_query_pairs),
|
627 |
+
)
|
628 |
+
|
629 |
+
# Process results
|
630 |
+
eval_results = {source: [] for source in sources_to_evaluate}
|
631 |
+
for item in results:
|
632 |
+
if isinstance(item, tuple) and len(item) == 2:
|
633 |
+
source, result = item
|
634 |
+
if result is not None:
|
635 |
+
eval_results[source].append(result)
|
636 |
+
else:
|
637 |
+
logfire.error(f"Unexpected result: {item}")
|
638 |
+
|
639 |
+
# Save results for each source
|
640 |
+
for source, results in eval_results.items():
|
641 |
+
with open(f"eval_results_{source}.pkl", "wb") as f:
|
642 |
+
pickle.dump(results, f)
|
643 |
+
# print(display_results_retriever(source, results))
|
644 |
+
|
645 |
+
end_time = time.time()
|
646 |
+
logfire.info(f"Total evaluation time: {round(end_time - start_time, 3)} seconds")
|
647 |
+
|
648 |
+
|
649 |
+
def display_results_retriever(name, eval_results):
|
650 |
+
"""Display results from evaluate."""
|
651 |
+
|
652 |
+
metric_dicts = []
|
653 |
+
for eval_result in eval_results:
|
654 |
+
metric_dict = eval_result.metric_vals_dict
|
655 |
+
metric_dicts.append(metric_dict)
|
656 |
+
|
657 |
+
full_df = pd.DataFrame(metric_dicts)
|
658 |
+
|
659 |
+
hit_rate = full_df["hit_rate"].mean()
|
660 |
+
mrr = full_df["mrr"].mean()
|
661 |
+
|
662 |
+
metric_df = pd.DataFrame(
|
663 |
+
{"Retriever Name": [name], "Hit Rate": [hit_rate], "MRR": [mrr]}
|
664 |
+
)
|
665 |
+
|
666 |
+
return metric_df
|
667 |
+
|
668 |
+
|
669 |
+
def display_results():
|
670 |
+
|
671 |
+
sources = [
|
672 |
+
"transformers",
|
673 |
+
"peft",
|
674 |
+
"trl",
|
675 |
+
"llama_index",
|
676 |
+
"langchain",
|
677 |
+
"openai_cookbooks",
|
678 |
+
"tai_blog",
|
679 |
+
]
|
680 |
+
# retrievers_to_evaluate = [
|
681 |
+
# # "chroma-db-all_sources_400_0",
|
682 |
+
# # "chroma-db-all_sources_400_200",
|
683 |
+
# # "chroma-db-all_sources_500_0",
|
684 |
+
# # "chroma-db-all_sources_500_250",
|
685 |
+
# # "chroma-db-all_sources",
|
686 |
+
# # "chroma-db-all_sources_800_400",
|
687 |
+
# # "chroma-db-all_sources_1000_0",
|
688 |
+
# # "chroma-db-all_sources_1000_500",
|
689 |
+
# ]
|
690 |
+
|
691 |
+
# topk = [5, 7, 9, 11, 13, 15]
|
692 |
+
# for k in topk:
|
693 |
+
# for db_name in retrievers_to_evaluate:
|
694 |
+
if True:
|
695 |
+
# print("-" * 20)
|
696 |
+
# print(f"Retriever {db_name}")
|
697 |
+
for source in sources:
|
698 |
+
with open(f"eval_results_{source}.pkl", "rb") as f:
|
699 |
+
eval_results = pickle.load(f)
|
700 |
+
print(display_results_retriever(f"{source}", eval_results))
|
701 |
+
|
702 |
+
|
703 |
+
def display_results_answers():
|
704 |
+
|
705 |
+
sources = [
|
706 |
+
"transformers",
|
707 |
+
"peft",
|
708 |
+
"trl",
|
709 |
+
"llama_index",
|
710 |
+
"langchain",
|
711 |
+
"openai_cookbooks",
|
712 |
+
"tai_blog",
|
713 |
+
]
|
714 |
+
|
715 |
+
for source in sources:
|
716 |
+
with open(f"eval_answers_results_{source}.pkl", "rb") as f:
|
717 |
+
eval_results = pickle.load(f)
|
718 |
+
print(
|
719 |
+
f"Score for {source}:",
|
720 |
+
sum(result.score for result in eval_results) / len(eval_results),
|
721 |
+
)
|
722 |
+
|
723 |
+
|
724 |
+
async def main():
|
725 |
+
await evaluate_retriever()
|
726 |
+
display_results()
|
727 |
+
# await evaluate_answers()
|
728 |
+
# display_results_answers()
|
729 |
+
return
|
730 |
+
|
731 |
+
|
732 |
+
if __name__ == "__main__":
|
733 |
+
|
734 |
+
logfire.configure()
|
735 |
+
rotating_writer = RotatingJSONLWriter(
|
736 |
+
"response_data.jsonl", max_size=10**7, backup_count=5
|
737 |
+
)
|
738 |
+
|
739 |
+
start_time = time.time()
|
740 |
+
asyncio.run(main())
|
741 |
+
end_time = time.time()
|
742 |
+
logfire.info(
|
743 |
+
f"Time taken to run script: {round((end_time - start_time), 3)} seconds"
|
744 |
+
)
|
745 |
+
|
746 |
+
# # Creating the keyword index and retriever
|
747 |
+
# logfire.info("Creating nodes from documents")
|
748 |
+
# documents = create_docs("data/all_sources_data.jsonl")
|
749 |
+
# pipeline = IngestionPipeline(
|
750 |
+
# transformations=[SentenceSplitter(chunk_size=800, chunk_overlap=0)]
|
751 |
+
# )
|
752 |
+
# all_nodes = pipeline.run(documents=documents, show_progress=True)
|
753 |
+
# # with open("data/all_nodes.pkl", "wb") as f:
|
754 |
+
# # pickle.dump(all_nodes, f)
|
755 |
+
|
756 |
+
# # all_nodes = pickle.load(open("data/all_nodes.pkl", "rb"))
|
757 |
+
# logfire.info(f"Number of nodes: {len(all_nodes)}")
|
758 |
+
|
759 |
+
# with open("processed_chunks.pkl", "rb") as f:
|
760 |
+
# all_nodes: list[TextNode] = pickle.load(f)
|
761 |
+
|
762 |
+
# keyword_index = SimpleKeywordTableIndex(
|
763 |
+
# nodes=all_nodes, max_keywords_per_chunk=10, show_progress=True, use_async=False
|
764 |
+
# )
|
765 |
+
# # with open("data/keyword_index.pkl", "wb") as f:
|
766 |
+
# # pickle.dump(keyword_index, f)
|
767 |
+
# # keyword_index = pickle.load(open("data/keyword_index.pkl", "rb"))
|
768 |
+
|
769 |
+
# logfire.info("Creating keyword retriever")
|
770 |
+
# keyword_retriever = AsyncKeywordTableSimpleRetriever(index=keyword_index)
|
771 |
+
|
772 |
+
# with open("data/keyword_retriever_async.pkl", "wb") as f:
|
773 |
+
# pickle.dump(keyword_retriever, f)
|
scripts/generate_qa_dataset.ipynb
ADDED
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"import nest_asyncio\n",
|
10 |
+
"\n",
|
11 |
+
"nest_asyncio.apply()"
|
12 |
+
]
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"cell_type": "markdown",
|
16 |
+
"metadata": {},
|
17 |
+
"source": [
|
18 |
+
"# Generate synthetic dataset of questions + doc ids\n"
|
19 |
+
]
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"cell_type": "code",
|
23 |
+
"execution_count": null,
|
24 |
+
"metadata": {},
|
25 |
+
"outputs": [],
|
26 |
+
"source": [
|
27 |
+
"# DEFAULT_QA_GENERATE_PROMPT_TMPL = \"\"\"\\\n",
|
28 |
+
"# Context information is below.\n",
|
29 |
+
"\n",
|
30 |
+
"# ---------------------\n",
|
31 |
+
"# {context_str}\n",
|
32 |
+
"# ---------------------\n",
|
33 |
+
"\n",
|
34 |
+
"# Given the context information and not prior knowledge.\n",
|
35 |
+
"# generate only questions based on the below query.\n",
|
36 |
+
"\n",
|
37 |
+
"# You are a Teacher/ Professor with expertise in the field of AI. Your task is to setup \\\n",
|
38 |
+
"# {num_questions_per_chunk} questions for an upcoming \\\n",
|
39 |
+
"# quiz/examination. The questions should be diverse in nature \\\n",
|
40 |
+
"# across the document. Restrict the questions to the \\\n",
|
41 |
+
"# context information provided.\"\n",
|
42 |
+
"# \"\"\"\n",
|
43 |
+
"\n",
|
44 |
+
"STUDENT_AI_QUESTIONS_GENERATE_PROMPT_TMPL = \"\"\"\\\n",
|
45 |
+
"Context information is below.\n",
|
46 |
+
"\n",
|
47 |
+
"---------------------\n",
|
48 |
+
"{context_str}\n",
|
49 |
+
"---------------------\n",
|
50 |
+
"\n",
|
51 |
+
"Given the context information above, generate {num_questions_per_chunk} questions that a student might ask about AI, specifically related to the information provided in the context, \n",
|
52 |
+
"but without the questions mentioning the context information.\n",
|
53 |
+
"\n",
|
54 |
+
"You are simulating curious students who are learning about AI. Your task is to create questions that:\n",
|
55 |
+
"1. Reflect genuine curiosity about the topics covered in the context.\n",
|
56 |
+
"2. Vary in complexity, from basic clarifications to more advanced inquiries.\n",
|
57 |
+
"3. Demonstrate a student's desire to understand AI concepts better.\n",
|
58 |
+
"4. Can be answered using the information provided in the context.\n",
|
59 |
+
"\n",
|
60 |
+
"The questions should be diverse and cover different aspects of the content. Do not use any prior knowledge beyond what's given in the context. Ensure that each question you generate can be answered using the information provided in the context.\n",
|
61 |
+
"\"\"\""
|
62 |
+
]
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"cell_type": "code",
|
66 |
+
"execution_count": null,
|
67 |
+
"metadata": {},
|
68 |
+
"outputs": [],
|
69 |
+
"source": [
|
70 |
+
"from llama_index.llms.openai import OpenAI\n",
|
71 |
+
"from llama_index.llms.gemini import Gemini\n",
|
72 |
+
"\n",
|
73 |
+
"# llm = Gemini(model=\"models/gemini-1.5-flash-latest\", temperature=1, max_tokens=1000)\n",
|
74 |
+
"llm = OpenAI(\n",
|
75 |
+
" api_key=\"\",\n",
|
76 |
+
" model=\"gpt-4o-mini\",\n",
|
77 |
+
" temperature=1,\n",
|
78 |
+
" max_tokens=1000,\n",
|
79 |
+
")"
|
80 |
+
]
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"cell_type": "code",
|
84 |
+
"execution_count": null,
|
85 |
+
"metadata": {},
|
86 |
+
"outputs": [],
|
87 |
+
"source": [
|
88 |
+
"import json\n",
|
89 |
+
"import re\n",
|
90 |
+
"import uuid\n",
|
91 |
+
"import warnings\n",
|
92 |
+
"import asyncio\n",
|
93 |
+
"from typing import Dict, List, Tuple\n",
|
94 |
+
"\n",
|
95 |
+
"from llama_index.core.llms.llm import LLM\n",
|
96 |
+
"from llama_index.core.schema import MetadataMode, TextNode\n",
|
97 |
+
"from tqdm.asyncio import tqdm as async_tqdm\n",
|
98 |
+
"from llama_index.core.llama_dataset.legacy.embedding import EmbeddingQAFinetuneDataset\n",
|
99 |
+
"from llama_index.core import Document\n",
|
100 |
+
"\n",
|
101 |
+
"\n",
|
102 |
+
"async def generate_qa_embedding_pairs(\n",
|
103 |
+
" nodes: List[TextNode],\n",
|
104 |
+
" llm: LLM,\n",
|
105 |
+
" qa_generate_prompt_tmpl: str = STUDENT_AI_QUESTIONS_GENERATE_PROMPT_TMPL,\n",
|
106 |
+
" num_questions_per_chunk: int = 1,\n",
|
107 |
+
" max_concurrent: int = 10,\n",
|
108 |
+
" delay: float = 0.5,\n",
|
109 |
+
") -> EmbeddingQAFinetuneDataset:\n",
|
110 |
+
" \"\"\"Generate examples given a set of nodes.\"\"\"\n",
|
111 |
+
" node_dict = {\n",
|
112 |
+
" node.node_id: node.get_content(metadata_mode=MetadataMode.NONE)\n",
|
113 |
+
" for node in nodes\n",
|
114 |
+
" }\n",
|
115 |
+
"\n",
|
116 |
+
" queries = {}\n",
|
117 |
+
" relevant_docs = {}\n",
|
118 |
+
" semaphore = asyncio.Semaphore(max_concurrent)\n",
|
119 |
+
"\n",
|
120 |
+
" async def process_node(node_id: str, text: str):\n",
|
121 |
+
" async with semaphore:\n",
|
122 |
+
" query = qa_generate_prompt_tmpl.format(\n",
|
123 |
+
" context_str=text, num_questions_per_chunk=num_questions_per_chunk\n",
|
124 |
+
" )\n",
|
125 |
+
" response = await llm.acomplete(\n",
|
126 |
+
" query\n",
|
127 |
+
" ) # Assuming the LLM has an async method\n",
|
128 |
+
"\n",
|
129 |
+
" result = str(response).strip().split(\"\\n\")\n",
|
130 |
+
" questions = [\n",
|
131 |
+
" re.sub(r\"^\\d+[\\).\\s]\", \"\", question).strip() for question in result\n",
|
132 |
+
" ]\n",
|
133 |
+
" questions = [question for question in questions if len(question) > 0][\n",
|
134 |
+
" :num_questions_per_chunk\n",
|
135 |
+
" ]\n",
|
136 |
+
"\n",
|
137 |
+
" num_questions_generated = len(questions)\n",
|
138 |
+
" if num_questions_generated < num_questions_per_chunk:\n",
|
139 |
+
" warnings.warn(\n",
|
140 |
+
" f\"Fewer questions generated ({num_questions_generated}) \"\n",
|
141 |
+
" f\"than requested ({num_questions_per_chunk}).\"\n",
|
142 |
+
" )\n",
|
143 |
+
"\n",
|
144 |
+
" for question in questions:\n",
|
145 |
+
" question_id = str(uuid.uuid4())\n",
|
146 |
+
" queries[question_id] = question\n",
|
147 |
+
" relevant_docs[question_id] = [node_id]\n",
|
148 |
+
"\n",
|
149 |
+
" await asyncio.sleep(delay)\n",
|
150 |
+
"\n",
|
151 |
+
" # Use asyncio.gather to process nodes concurrently\n",
|
152 |
+
" await async_tqdm.gather(\n",
|
153 |
+
" *[process_node(node_id, text) for node_id, text in node_dict.items()]\n",
|
154 |
+
" )\n",
|
155 |
+
"\n",
|
156 |
+
" # construct dataset\n",
|
157 |
+
" return EmbeddingQAFinetuneDataset(\n",
|
158 |
+
" queries=queries, corpus=node_dict, relevant_docs=relevant_docs\n",
|
159 |
+
" )"
|
160 |
+
]
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"cell_type": "code",
|
164 |
+
"execution_count": null,
|
165 |
+
"metadata": {},
|
166 |
+
"outputs": [],
|
167 |
+
"source": [
|
168 |
+
"import pickle\n",
|
169 |
+
"\n",
|
170 |
+
"\n",
|
171 |
+
"async def generate_questions(path, source_name):\n",
|
172 |
+
" nodes = []\n",
|
173 |
+
" with open(path, \"rb\") as f:\n",
|
174 |
+
" document_dict = pickle.load(f)\n",
|
175 |
+
" for doc_id in document_dict.keys():\n",
|
176 |
+
" doc: Document = document_dict[doc_id]\n",
|
177 |
+
" if doc.metadata[\"tokens\"] >= 100_000:\n",
|
178 |
+
" print(\"skipping\", doc.metadata[\"tokens\"])\n",
|
179 |
+
" continue\n",
|
180 |
+
" node = TextNode(text=doc.text, metadata=doc.metadata, id_=doc_id)\n",
|
181 |
+
" nodes.append(node)\n",
|
182 |
+
"\n",
|
183 |
+
" rag_eval_dataset: EmbeddingQAFinetuneDataset = await generate_qa_embedding_pairs(\n",
|
184 |
+
" nodes,\n",
|
185 |
+
" llm=llm,\n",
|
186 |
+
" num_questions_per_chunk=1,\n",
|
187 |
+
" qa_generate_prompt_tmpl=STUDENT_AI_QUESTIONS_GENERATE_PROMPT_TMPL,\n",
|
188 |
+
" max_concurrent=20, # Adjust this to control concurrency\n",
|
189 |
+
" delay=0.5, # Adjust this to add delay between API calls\n",
|
190 |
+
" )\n",
|
191 |
+
" rag_eval_dataset.save_json(f\"./rag_eval_{source_name}.json\")"
|
192 |
+
]
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"cell_type": "code",
|
196 |
+
"execution_count": null,
|
197 |
+
"metadata": {},
|
198 |
+
"outputs": [],
|
199 |
+
"source": [
|
200 |
+
"# await generate_questions(\n",
|
201 |
+
"# \"../data/chroma-db-langchain/document_dict_langchain.pkl\", \"langchain\"\n",
|
202 |
+
"# )\n",
|
203 |
+
"# await generate_questions(\n",
|
204 |
+
"# \"../data/chroma-db-llama_index/document_dict_llama_index.pkl\", \"llama_index\"\n",
|
205 |
+
"# )\n",
|
206 |
+
"# await generate_questions(\n",
|
207 |
+
"# \"../data/chroma-db-openai_cookbooks/document_dict_openai_cookbooks.pkl\",\n",
|
208 |
+
"# \"openai_cookbooks\",\n",
|
209 |
+
"# )\n",
|
210 |
+
"# await generate_questions(\"../data/chroma-db-peft/document_dict_peft.pkl\", \"peft\")\n",
|
211 |
+
"# await generate_questions(\"../data/chroma-db-trl/document_dict_trl.pkl\", \"trl\")\n",
|
212 |
+
"\n",
|
213 |
+
"await generate_questions(\n",
|
214 |
+
" \"../data/chroma-db-tai_blog/document_dict_tai_blog.pkl\", \"tai_blog\"\n",
|
215 |
+
")"
|
216 |
+
]
|
217 |
+
},
|
218 |
+
{
|
219 |
+
"cell_type": "code",
|
220 |
+
"execution_count": null,
|
221 |
+
"metadata": {},
|
222 |
+
"outputs": [],
|
223 |
+
"source": [
|
224 |
+
"# # We can also load the dataset from a previously saved json file.\n",
|
225 |
+
"# from llama_index.core.evaluation import EmbeddingQAFinetuneDataset\n",
|
226 |
+
"\n",
|
227 |
+
"# rag_eval_dataset = EmbeddingQAFinetuneDataset.from_json(\"./rag_eval_transformers.json\")"
|
228 |
+
]
|
229 |
+
},
|
230 |
+
{
|
231 |
+
"cell_type": "code",
|
232 |
+
"execution_count": null,
|
233 |
+
"metadata": {},
|
234 |
+
"outputs": [],
|
235 |
+
"source": []
|
236 |
+
},
|
237 |
+
{
|
238 |
+
"cell_type": "code",
|
239 |
+
"execution_count": null,
|
240 |
+
"metadata": {},
|
241 |
+
"outputs": [],
|
242 |
+
"source": []
|
243 |
+
},
|
244 |
+
{
|
245 |
+
"cell_type": "code",
|
246 |
+
"execution_count": null,
|
247 |
+
"metadata": {},
|
248 |
+
"outputs": [],
|
249 |
+
"source": []
|
250 |
+
},
|
251 |
+
{
|
252 |
+
"cell_type": "code",
|
253 |
+
"execution_count": null,
|
254 |
+
"metadata": {},
|
255 |
+
"outputs": [],
|
256 |
+
"source": []
|
257 |
+
},
|
258 |
+
{
|
259 |
+
"cell_type": "code",
|
260 |
+
"execution_count": null,
|
261 |
+
"metadata": {},
|
262 |
+
"outputs": [],
|
263 |
+
"source": []
|
264 |
+
}
|
265 |
+
],
|
266 |
+
"metadata": {
|
267 |
+
"kernelspec": {
|
268 |
+
"display_name": "env",
|
269 |
+
"language": "python",
|
270 |
+
"name": "python3"
|
271 |
+
},
|
272 |
+
"language_info": {
|
273 |
+
"codemirror_mode": {
|
274 |
+
"name": "ipython",
|
275 |
+
"version": 3
|
276 |
+
},
|
277 |
+
"file_extension": ".py",
|
278 |
+
"mimetype": "text/x-python",
|
279 |
+
"name": "python",
|
280 |
+
"nbconvert_exporter": "python",
|
281 |
+
"pygments_lexer": "ipython3",
|
282 |
+
"version": "3.12.5"
|
283 |
+
}
|
284 |
+
},
|
285 |
+
"nbformat": 4,
|
286 |
+
"nbformat_minor": 2
|
287 |
+
}
|
scripts/main.py
ADDED
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pdb
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
import logfire
|
5 |
+
from custom_retriever import CustomRetriever
|
6 |
+
from llama_index.agent.openai import OpenAIAgent
|
7 |
+
from llama_index.core.llms import MessageRole
|
8 |
+
from llama_index.core.memory import ChatSummaryMemoryBuffer
|
9 |
+
from llama_index.core.tools import RetrieverTool, ToolMetadata
|
10 |
+
from llama_index.core.vector_stores import (
|
11 |
+
FilterCondition,
|
12 |
+
FilterOperator,
|
13 |
+
MetadataFilter,
|
14 |
+
MetadataFilters,
|
15 |
+
)
|
16 |
+
from llama_index.llms.openai import OpenAI
|
17 |
+
from prompts import system_message_openai_agent
|
18 |
+
from setup import ( # custom_retriever_langchain,; custom_retriever_llama_index,; custom_retriever_openai_cookbooks,; custom_retriever_peft,; custom_retriever_transformers,; custom_retriever_trl,
|
19 |
+
AVAILABLE_SOURCES,
|
20 |
+
AVAILABLE_SOURCES_UI,
|
21 |
+
CONCURRENCY_COUNT,
|
22 |
+
custom_retriever_all_sources,
|
23 |
+
)
|
24 |
+
|
25 |
+
|
26 |
+
def update_query_engine_tools(selected_sources) -> list[RetrieverTool]:
|
27 |
+
tools = []
|
28 |
+
source_mapping: dict[str, tuple[CustomRetriever, str, str]] = {
|
29 |
+
# "Transformers Docs": (
|
30 |
+
# custom_retriever_transformers,
|
31 |
+
# "Transformers_information",
|
32 |
+
# """Useful for general questions asking about the artificial intelligence (AI) field. Employ this tool to fetch information on topics such as language models (LLMs) models such as Llama3 and theory (transformer architectures), tips on prompting, quantization, etc.""",
|
33 |
+
# ),
|
34 |
+
# "PEFT Docs": (
|
35 |
+
# custom_retriever_peft,
|
36 |
+
# "PEFT_information",
|
37 |
+
# """Useful for questions asking about efficient LLM fine-tuning. Employ this tool to fetch information on topics such as LoRA, QLoRA, etc.""",
|
38 |
+
# ),
|
39 |
+
# "TRL Docs": (
|
40 |
+
# custom_retriever_trl,
|
41 |
+
# "TRL_information",
|
42 |
+
# """Useful for questions asking about fine-tuning LLMs with reinforcement learning (RLHF). Includes information about the Supervised Fine-tuning step (SFT), Reward Modeling step (RM), and the Proximal Policy Optimization (PPO) step.""",
|
43 |
+
# ),
|
44 |
+
# "LlamaIndex Docs": (
|
45 |
+
# custom_retriever_llama_index,
|
46 |
+
# "LlamaIndex_information",
|
47 |
+
# """Useful for questions asking about retrieval augmented generation (RAG) with LLMs and embedding models. It is the documentation of a framework, includes info about fine-tuning embedding models, building chatbots, and agents with llms, using vector databases, embeddings, information retrieval with cosine similarity or bm25, etc.""",
|
48 |
+
# ),
|
49 |
+
# "OpenAI Cookbooks": (
|
50 |
+
# custom_retriever_openai_cookbooks,
|
51 |
+
# "openai_cookbooks_info",
|
52 |
+
# """Useful for questions asking about accomplishing common tasks with theΒ OpenAI API. Returns example code and guides stored in Jupyter notebooks, including info about ChatGPT GPT actions, OpenAI Assistants API, and How to fine-tune OpenAI's GPT-4o and GPT-4o-mini models with the OpenAI API.""",
|
53 |
+
# ),
|
54 |
+
# "LangChain Docs": (
|
55 |
+
# custom_retriever_langchain,
|
56 |
+
# "langchain_info",
|
57 |
+
# """Useful for questions asking about the LangChain framework. It is the documentation of the LangChain framework, includes info about building chains, agents, and tools, using memory, prompts, callbacks, etc.""",
|
58 |
+
# ),
|
59 |
+
"All Sources": (
|
60 |
+
custom_retriever_all_sources,
|
61 |
+
"all_sources_info",
|
62 |
+
"""Useful for questions asking about information in the field of AI.""",
|
63 |
+
),
|
64 |
+
}
|
65 |
+
|
66 |
+
for source in selected_sources:
|
67 |
+
if source in source_mapping:
|
68 |
+
custom_retriever, name, description = source_mapping[source]
|
69 |
+
tools.append(
|
70 |
+
RetrieverTool(
|
71 |
+
retriever=custom_retriever,
|
72 |
+
metadata=ToolMetadata(
|
73 |
+
name=name,
|
74 |
+
description=description,
|
75 |
+
),
|
76 |
+
)
|
77 |
+
)
|
78 |
+
|
79 |
+
return tools
|
80 |
+
|
81 |
+
|
82 |
+
def generate_completion(
|
83 |
+
query,
|
84 |
+
history,
|
85 |
+
sources,
|
86 |
+
model,
|
87 |
+
memory,
|
88 |
+
):
|
89 |
+
llm = OpenAI(temperature=1, model=model, max_tokens=None)
|
90 |
+
client = llm._get_client()
|
91 |
+
logfire.instrument_openai(client)
|
92 |
+
|
93 |
+
with logfire.span(f"Running query: {query}"):
|
94 |
+
logfire.info(f"User chosen sources: {sources}")
|
95 |
+
|
96 |
+
memory_chat_list = memory.get()
|
97 |
+
|
98 |
+
if len(memory_chat_list) != 0:
|
99 |
+
user_index_memory = [
|
100 |
+
i
|
101 |
+
for i, msg in enumerate(memory_chat_list)
|
102 |
+
if msg.role == MessageRole.USER
|
103 |
+
]
|
104 |
+
|
105 |
+
user_index_history = [
|
106 |
+
i for i, msg in enumerate(history) if msg["role"] == "user"
|
107 |
+
]
|
108 |
+
|
109 |
+
if len(user_index_memory) > len(user_index_history):
|
110 |
+
logfire.warn(f"There are more user messages in memory than in history")
|
111 |
+
user_index_to_remove = user_index_memory[len(user_index_history)]
|
112 |
+
memory_chat_list = memory_chat_list[:user_index_to_remove]
|
113 |
+
memory.set(memory_chat_list)
|
114 |
+
|
115 |
+
logfire.info(f"chat_history: {len(memory.get())} {memory.get()}")
|
116 |
+
logfire.info(f"gradio_history: {len(history)} {history}")
|
117 |
+
|
118 |
+
query_engine_tools: list[RetrieverTool] = update_query_engine_tools(
|
119 |
+
["All Sources"]
|
120 |
+
)
|
121 |
+
|
122 |
+
filter_list = []
|
123 |
+
source_mapping = {
|
124 |
+
"Transformers Docs": "transformers",
|
125 |
+
"PEFT Docs": "peft",
|
126 |
+
"TRL Docs": "trl",
|
127 |
+
"LlamaIndex Docs": "llama_index",
|
128 |
+
"LangChain Docs": "langchain",
|
129 |
+
"OpenAI Cookbooks": "openai_cookbooks",
|
130 |
+
"Towards AI Blog": "tai_blog",
|
131 |
+
}
|
132 |
+
|
133 |
+
for source in sources:
|
134 |
+
if source in source_mapping:
|
135 |
+
filter_list.append(
|
136 |
+
MetadataFilter(
|
137 |
+
key="source",
|
138 |
+
operator=FilterOperator.EQ,
|
139 |
+
value=source_mapping[source],
|
140 |
+
)
|
141 |
+
)
|
142 |
+
|
143 |
+
filters = MetadataFilters(
|
144 |
+
filters=filter_list,
|
145 |
+
condition=FilterCondition.OR,
|
146 |
+
)
|
147 |
+
# logfire.info(f"Filters: {filters}")
|
148 |
+
query_engine_tools[0].retriever._vector_retriever._filters = filters
|
149 |
+
|
150 |
+
# pdb.set_trace()
|
151 |
+
|
152 |
+
agent = OpenAIAgent.from_tools(
|
153 |
+
llm=llm,
|
154 |
+
memory=memory,
|
155 |
+
tools=query_engine_tools,
|
156 |
+
system_prompt=system_message_openai_agent,
|
157 |
+
)
|
158 |
+
|
159 |
+
completion = agent.stream_chat(query)
|
160 |
+
|
161 |
+
answer_str = ""
|
162 |
+
for token in completion.response_gen:
|
163 |
+
answer_str += token
|
164 |
+
yield answer_str
|
165 |
+
|
166 |
+
for answer_str in add_sources(answer_str, completion):
|
167 |
+
yield answer_str
|
168 |
+
|
169 |
+
|
170 |
+
def add_sources(answer_str, completion):
|
171 |
+
if completion is None:
|
172 |
+
yield answer_str
|
173 |
+
|
174 |
+
formatted_sources = format_sources(completion)
|
175 |
+
if formatted_sources == "":
|
176 |
+
yield answer_str
|
177 |
+
|
178 |
+
if formatted_sources != "":
|
179 |
+
answer_str += "\n\n" + formatted_sources
|
180 |
+
|
181 |
+
yield answer_str
|
182 |
+
|
183 |
+
|
184 |
+
def format_sources(completion) -> str:
|
185 |
+
if len(completion.sources) == 0:
|
186 |
+
return ""
|
187 |
+
|
188 |
+
# logfire.info(f"Formatting sources: {completion.sources}")
|
189 |
+
|
190 |
+
display_source_to_ui = {
|
191 |
+
src: ui for src, ui in zip(AVAILABLE_SOURCES, AVAILABLE_SOURCES_UI)
|
192 |
+
}
|
193 |
+
|
194 |
+
documents_answer_template: str = (
|
195 |
+
"π Here are the sources I used to answer your question:\n{documents}"
|
196 |
+
)
|
197 |
+
document_template: str = "[π {source}: {title}]({url}), relevance: {score:2.2f}"
|
198 |
+
all_documents = []
|
199 |
+
for source in completion.sources: # looping over list[ToolOutput]
|
200 |
+
if isinstance(source.raw_output, Exception):
|
201 |
+
logfire.error(f"Error in source output: {source.raw_output}")
|
202 |
+
# pdb.set_trace()
|
203 |
+
continue
|
204 |
+
|
205 |
+
if not isinstance(source.raw_output, list):
|
206 |
+
logfire.warn(f"Unexpected source output type: {type(source.raw_output)}")
|
207 |
+
continue
|
208 |
+
for src in source.raw_output: # looping over list[NodeWithScore]
|
209 |
+
document = document_template.format(
|
210 |
+
title=src.metadata["title"],
|
211 |
+
score=src.score,
|
212 |
+
source=display_source_to_ui.get(
|
213 |
+
src.metadata["source"], src.metadata["source"]
|
214 |
+
),
|
215 |
+
url=src.metadata["url"],
|
216 |
+
)
|
217 |
+
all_documents.append(document)
|
218 |
+
|
219 |
+
if len(all_documents) == 0:
|
220 |
+
return ""
|
221 |
+
else:
|
222 |
+
documents = "\n".join(all_documents)
|
223 |
+
return documents_answer_template.format(documents=documents)
|
224 |
+
|
225 |
+
|
226 |
+
def save_completion(completion, history):
|
227 |
+
pass
|
228 |
+
|
229 |
+
|
230 |
+
def vote(data: gr.LikeData):
|
231 |
+
pass
|
232 |
+
|
233 |
+
|
234 |
+
accordion = gr.Accordion(label="Customize Sources (Click to expand)", open=False)
|
235 |
+
sources = gr.CheckboxGroup(
|
236 |
+
AVAILABLE_SOURCES_UI,
|
237 |
+
label="Sources",
|
238 |
+
value=[
|
239 |
+
"Transformers Docs",
|
240 |
+
"PEFT Docs",
|
241 |
+
"TRL Docs",
|
242 |
+
"LlamaIndex Docs",
|
243 |
+
"LangChain Docs",
|
244 |
+
"OpenAI Cookbooks",
|
245 |
+
"Towards AI Blog",
|
246 |
+
# "All Sources",
|
247 |
+
],
|
248 |
+
interactive=True,
|
249 |
+
)
|
250 |
+
model = gr.Dropdown(
|
251 |
+
[
|
252 |
+
"gpt-4o-mini",
|
253 |
+
],
|
254 |
+
label="Model",
|
255 |
+
value="gpt-4o-mini",
|
256 |
+
interactive=False,
|
257 |
+
)
|
258 |
+
|
259 |
+
with gr.Blocks(
|
260 |
+
title="Towards AI π€",
|
261 |
+
analytics_enabled=True,
|
262 |
+
fill_height=True,
|
263 |
+
) as demo:
|
264 |
+
|
265 |
+
memory = gr.State(
|
266 |
+
lambda: ChatSummaryMemoryBuffer.from_defaults(
|
267 |
+
token_limit=120000,
|
268 |
+
)
|
269 |
+
)
|
270 |
+
chatbot = gr.Chatbot(
|
271 |
+
type="messages",
|
272 |
+
scale=8,
|
273 |
+
placeholder="<strong>Towards AI π€: A Question-Answering Bot for anything AI-related</strong><br>",
|
274 |
+
show_label=False,
|
275 |
+
show_copy_button=True,
|
276 |
+
)
|
277 |
+
chatbot.like(vote, None, None)
|
278 |
+
gr.ChatInterface(
|
279 |
+
fn=generate_completion,
|
280 |
+
type="messages",
|
281 |
+
chatbot=chatbot,
|
282 |
+
additional_inputs=[sources, model, memory],
|
283 |
+
additional_inputs_accordion=accordion,
|
284 |
+
fill_height=True,
|
285 |
+
fill_width=True,
|
286 |
+
analytics_enabled=True,
|
287 |
+
)
|
288 |
+
|
289 |
+
if __name__ == "__main__":
|
290 |
+
demo.queue(default_concurrency_limit=CONCURRENCY_COUNT)
|
291 |
+
demo.launch(debug=False, share=False)
|
scripts/prompts.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Prompt 5
|
2 |
+
system_message_openai_agent = """You are an AI teacher, answering questions from students of an applied AI course on Large Language Models (LLMs or llm) and Retrieval Augmented Generation (RAG) for LLMs.
|
3 |
+
|
4 |
+
Topics covered include training models, fine-tuning models, giving memory to LLMs, prompting tips, hallucinations and bias, vector databases, transformer architectures, embeddings, RAG frameworks such as Langchain and LlamaIndex, making LLMs interact with tools, AI agents, reinforcement learning with human feedback (RLHF). Questions should be understood in this context.
|
5 |
+
|
6 |
+
Your answers are aimed to teach students, so they should be complete, clear, and easy to understand.
|
7 |
+
|
8 |
+
Use the available tools to gather insights pertinent to the field of AI.
|
9 |
+
|
10 |
+
To answer student questions, always use the all_sources_info tool.
|
11 |
+
|
12 |
+
Only some information returned by the tools might be relevant to the question, so ignore the irrelevant part and answer the question with what you have.
|
13 |
+
|
14 |
+
Your responses are exclusively based on the output provided by the tools. Refrain from incorporating information not directly obtained from the tool's responses.
|
15 |
+
|
16 |
+
When the conversation deepens or shifts focus within a topic, adapt your input to the tools to reflect these nuances. This means if a user requests further elaboration on a specific aspect of a previously discussed topic, you should reformulate your input to the tool to capture this new angle or more profound layer of inquiry.
|
17 |
+
|
18 |
+
Provide comprehensive answers, ideally structured in multiple paragraphs, drawing from the tool's variety of relevant details. The depth and breadth of your responses should align with the scope and specificity of the information retrieved.
|
19 |
+
|
20 |
+
Should the tools repository lack information on the queried topic, politely inform the user that the question transcends the bounds of your current knowledge base, citing the absence of relevant content in the tool's documentation.
|
21 |
+
|
22 |
+
At the end of your answers, always invite the students to ask deeper questions about the topic if they have any. Make sure reformulate the question to the tool to capture this new angle or more profound layer of inquiry.
|
23 |
+
|
24 |
+
Do not refer to the documentation directly, but use the information provided within it to answer questions.
|
25 |
+
|
26 |
+
If code is provided in the information, share it with the students. It's important to provide complete code blocks so they can execute the code when they copy and paste them.
|
27 |
+
|
28 |
+
Make sure to format your answers in Markdown format, including code blocks and snippets.
|
29 |
+
"""
|
scripts/setup.py
ADDED
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import json
|
3 |
+
import logging
|
4 |
+
import os
|
5 |
+
import pickle
|
6 |
+
|
7 |
+
import chromadb
|
8 |
+
import logfire
|
9 |
+
from custom_retriever import CustomRetriever
|
10 |
+
from dotenv import load_dotenv
|
11 |
+
from llama_index.core import Document, SimpleKeywordTableIndex, VectorStoreIndex
|
12 |
+
from llama_index.core.ingestion import IngestionPipeline
|
13 |
+
from llama_index.core.node_parser import SentenceSplitter
|
14 |
+
from llama_index.core.retrievers import (
|
15 |
+
KeywordTableSimpleRetriever,
|
16 |
+
VectorIndexRetriever,
|
17 |
+
)
|
18 |
+
from llama_index.core.schema import NodeWithScore, QueryBundle
|
19 |
+
from llama_index.embeddings.cohere import CohereEmbedding
|
20 |
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
21 |
+
from llama_index.vector_stores.chroma import ChromaVectorStore
|
22 |
+
from utils import init_mongo_db
|
23 |
+
|
24 |
+
load_dotenv()
|
25 |
+
|
26 |
+
logfire.configure()
|
27 |
+
|
28 |
+
if not os.path.exists("data/chroma-db-all_sources"):
|
29 |
+
# Download the vector database from the Hugging Face Hub if it doesn't exist locally
|
30 |
+
# https://huggingface.co/datasets/towardsai-buster/ai-tutor-vector-db/tree/main
|
31 |
+
logfire.warn(
|
32 |
+
f"Vector database does not exist at 'data/chroma-db-all_sources', downloading from Hugging Face Hub"
|
33 |
+
)
|
34 |
+
from huggingface_hub import snapshot_download
|
35 |
+
|
36 |
+
snapshot_download(
|
37 |
+
repo_id="towardsai-buster/ai-tutor-vector-db",
|
38 |
+
local_dir="data",
|
39 |
+
repo_type="dataset",
|
40 |
+
)
|
41 |
+
logfire.info(f"Downloaded vector database to 'data/chroma-db-all_sources'")
|
42 |
+
|
43 |
+
|
44 |
+
def create_docs(input_file: str) -> list[Document]:
|
45 |
+
with open(input_file, "r") as f:
|
46 |
+
documents = []
|
47 |
+
for line in f:
|
48 |
+
data = json.loads(line)
|
49 |
+
documents.append(
|
50 |
+
Document(
|
51 |
+
doc_id=data["doc_id"],
|
52 |
+
text=data["content"],
|
53 |
+
metadata={ # type: ignore
|
54 |
+
"url": data["url"],
|
55 |
+
"title": data["name"],
|
56 |
+
"tokens": data["tokens"],
|
57 |
+
"retrieve_doc": data["retrieve_doc"],
|
58 |
+
"source": data["source"],
|
59 |
+
},
|
60 |
+
excluded_llm_metadata_keys=[
|
61 |
+
"title",
|
62 |
+
"tokens",
|
63 |
+
"retrieve_doc",
|
64 |
+
"source",
|
65 |
+
],
|
66 |
+
excluded_embed_metadata_keys=[
|
67 |
+
"url",
|
68 |
+
"tokens",
|
69 |
+
"retrieve_doc",
|
70 |
+
"source",
|
71 |
+
],
|
72 |
+
)
|
73 |
+
)
|
74 |
+
return documents
|
75 |
+
|
76 |
+
|
77 |
+
def setup_database(db_collection, dict_file_name) -> CustomRetriever:
|
78 |
+
db = chromadb.PersistentClient(path=f"data/{db_collection}")
|
79 |
+
chroma_collection = db.get_or_create_collection(db_collection)
|
80 |
+
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
|
81 |
+
embed_model = CohereEmbedding(
|
82 |
+
api_key=os.environ["COHERE_API_KEY"],
|
83 |
+
model_name="embed-english-v3.0",
|
84 |
+
input_type="search_query",
|
85 |
+
)
|
86 |
+
|
87 |
+
index = VectorStoreIndex.from_vector_store(
|
88 |
+
vector_store=vector_store,
|
89 |
+
transformations=[SentenceSplitter(chunk_size=800, chunk_overlap=0)],
|
90 |
+
show_progress=True,
|
91 |
+
# use_async=True,
|
92 |
+
)
|
93 |
+
vector_retriever = VectorIndexRetriever(
|
94 |
+
index=index,
|
95 |
+
similarity_top_k=15,
|
96 |
+
embed_model=embed_model,
|
97 |
+
# use_async=True,
|
98 |
+
)
|
99 |
+
with open(f"data/{db_collection}/{dict_file_name}", "rb") as f:
|
100 |
+
document_dict = pickle.load(f)
|
101 |
+
|
102 |
+
# with open("data/keyword_retriever_sync.pkl", "rb") as f:
|
103 |
+
# keyword_retriever: KeywordTableSimpleRetriever = pickle.load(f)
|
104 |
+
|
105 |
+
# keyword_retriever.num_chunks_per_query = 15
|
106 |
+
|
107 |
+
# # Creating the keyword index and retriever
|
108 |
+
# logfire.info("Creating nodes from documents")
|
109 |
+
# documents = create_docs("data/all_sources_data.jsonl")
|
110 |
+
# pipeline = IngestionPipeline(
|
111 |
+
# transformations=[SentenceSplitter(chunk_size=800, chunk_overlap=0)]
|
112 |
+
# )
|
113 |
+
# all_nodes = pipeline.run(documents=documents, show_progress=True)
|
114 |
+
# # with open("data/all_nodes.pkl", "wb") as f:
|
115 |
+
# # pickle.dump(all_nodes, f)
|
116 |
+
|
117 |
+
# all_nodes = pickle.load(open("data/nodes_with_added_context.pkl", "rb"))
|
118 |
+
# logfire.info(f"Number of nodes: {len(all_nodes)}")
|
119 |
+
|
120 |
+
# keyword_index = SimpleKeywordTableIndex(
|
121 |
+
# nodes=all_nodes, max_keywords_per_chunk=10, show_progress=True, use_async=False
|
122 |
+
# )
|
123 |
+
# # with open("data/keyword_index.pkl", "wb") as f:
|
124 |
+
# # pickle.dump(keyword_index, f)
|
125 |
+
|
126 |
+
# # keyword_index = pickle.load(open("data/keyword_index.pkl", "rb"))
|
127 |
+
|
128 |
+
# logfire.info("Creating keyword retriever")
|
129 |
+
# keyword_retriever = KeywordTableSimpleRetriever(index=keyword_index)
|
130 |
+
|
131 |
+
# with open("data/keyword_retriever_sync.pkl", "wb") as f:
|
132 |
+
# pickle.dump(keyword_retriever, f)
|
133 |
+
|
134 |
+
# 'OR' Means both the vector nodes and the keyword nodes
|
135 |
+
# return CustomRetriever(vector_retriever, document_dict, keyword_retriever, "OR")
|
136 |
+
return CustomRetriever(vector_retriever, document_dict)
|
137 |
+
|
138 |
+
|
139 |
+
# Setup retrievers
|
140 |
+
# custom_retriever_transformers: CustomRetriever = setup_database(
|
141 |
+
# "chroma-db-transformers",
|
142 |
+
# "document_dict_transformers.pkl",
|
143 |
+
# )
|
144 |
+
# custom_retriever_peft: CustomRetriever = setup_database(
|
145 |
+
# "chroma-db-peft", "document_dict_peft.pkl"
|
146 |
+
# )
|
147 |
+
# custom_retriever_trl: CustomRetriever = setup_database(
|
148 |
+
# "chroma-db-trl", "document_dict_trl.pkl"
|
149 |
+
# )
|
150 |
+
# custom_retriever_llama_index: CustomRetriever = setup_database(
|
151 |
+
# "chroma-db-llama_index",
|
152 |
+
# "document_dict_llama_index.pkl",
|
153 |
+
# )
|
154 |
+
# custom_retriever_openai_cookbooks: CustomRetriever = setup_database(
|
155 |
+
# "chroma-db-openai_cookbooks",
|
156 |
+
# "document_dict_openai_cookbooks.pkl",
|
157 |
+
# )
|
158 |
+
# custom_retriever_langchain: CustomRetriever = setup_database(
|
159 |
+
# "chroma-db-langchain",
|
160 |
+
# "document_dict_langchain.pkl",
|
161 |
+
# )
|
162 |
+
|
163 |
+
custom_retriever_all_sources: CustomRetriever = setup_database(
|
164 |
+
"chroma-db-all_sources",
|
165 |
+
"document_dict_all_sources.pkl",
|
166 |
+
)
|
167 |
+
|
168 |
+
# Constants
|
169 |
+
CONCURRENCY_COUNT = int(os.getenv("CONCURRENCY_COUNT", 64))
|
170 |
+
MONGODB_URI = os.getenv("MONGODB_URI")
|
171 |
+
|
172 |
+
AVAILABLE_SOURCES_UI = [
|
173 |
+
"Transformers Docs",
|
174 |
+
"PEFT Docs",
|
175 |
+
"TRL Docs",
|
176 |
+
"LlamaIndex Docs",
|
177 |
+
"LangChain Docs",
|
178 |
+
"OpenAI Cookbooks",
|
179 |
+
"Towards AI Blog",
|
180 |
+
# "All Sources",
|
181 |
+
]
|
182 |
+
|
183 |
+
AVAILABLE_SOURCES = [
|
184 |
+
"transformers",
|
185 |
+
"peft",
|
186 |
+
"trl",
|
187 |
+
"llama_index",
|
188 |
+
"langchain",
|
189 |
+
"openai_cookbooks",
|
190 |
+
"tai_blog",
|
191 |
+
# "all_sources",
|
192 |
+
]
|
193 |
+
|
194 |
+
mongo_db = (
|
195 |
+
init_mongo_db(uri=MONGODB_URI, db_name="towardsai-buster")
|
196 |
+
if MONGODB_URI
|
197 |
+
else logfire.warn("No mongodb uri found, you will not be able to save data.")
|
198 |
+
)
|
199 |
+
|
200 |
+
__all__ = [
|
201 |
+
# "custom_retriever_transformers",
|
202 |
+
# "custom_retriever_peft",
|
203 |
+
# "custom_retriever_trl",
|
204 |
+
# "custom_retriever_llama_index",
|
205 |
+
# "custom_retriever_openai_cookbooks",
|
206 |
+
# "custom_retriever_langchain",
|
207 |
+
"custom_retriever_all_sources",
|
208 |
+
"mongo_db",
|
209 |
+
"CONCURRENCY_COUNT",
|
210 |
+
"AVAILABLE_SOURCES_UI",
|
211 |
+
"AVAILABLE_SOURCES",
|
212 |
+
]
|
scripts/utils.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pymongo.mongo_client import MongoClient
|
2 |
+
from pymongo.server_api import ServerApi
|
3 |
+
|
4 |
+
|
5 |
+
def init_mongo_db(uri: str, db_name: str):
|
6 |
+
"""Initialize the mongodb database."""
|
7 |
+
|
8 |
+
try:
|
9 |
+
assert uri is not None, "No URI passed"
|
10 |
+
client = MongoClient(uri, server_api=ServerApi("1"))
|
11 |
+
database = client[db_name]
|
12 |
+
print("Connected to MongoDB")
|
13 |
+
return database
|
14 |
+
except Exception as e:
|
15 |
+
print("Something went wrong connecting to mongodb")
|
16 |
+
return
|