Commit
·
b5ac9e4
0
Parent(s):
Update app.py
Browse files- .gitattributes +35 -0
- .gitignore +1 -0
- README.md +12 -0
- __pycache__/cache.cpython-310.pyc +0 -0
- __pycache__/global_compression.cpython-310.pyc +0 -0
- __pycache__/preprocess_document.cpython-310.pyc +0 -0
- __pycache__/rag.cpython-310.pyc +0 -0
- __pycache__/utils.cpython-310.pyc +0 -0
- app.py +717 -0
- cache.py +75 -0
- global_compression.py +211 -0
- preprocess_document.py +34 -0
- rag.py +53 -0
- requirements.txt +14 -0
- utils.py +77 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.env
|
README.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Beyondrag
|
3 |
+
emoji: 💬
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.20.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
|
__pycache__/cache.cpython-310.pyc
ADDED
Binary file (3.86 kB). View file
|
|
__pycache__/global_compression.cpython-310.pyc
ADDED
Binary file (4.76 kB). View file
|
|
__pycache__/preprocess_document.cpython-310.pyc
ADDED
Binary file (1.08 kB). View file
|
|
__pycache__/rag.cpython-310.pyc
ADDED
Binary file (2.29 kB). View file
|
|
__pycache__/utils.cpython-310.pyc
ADDED
Binary file (3.04 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,717 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import math
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
from threading import Thread
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
import spaces
|
9 |
+
import torch
|
10 |
+
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
|
11 |
+
from docling.datamodel.pipeline_options import PdfPipelineOptions
|
12 |
+
from docling.document_converter import DocumentConverter, InputFormat, PdfFormatOption
|
13 |
+
from langchain.schema.document import Document
|
14 |
+
from langchain_chroma import Chroma
|
15 |
+
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
|
16 |
+
from langchain_docling import DoclingLoader
|
17 |
+
from langchain_docling.loader import ExportType
|
18 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
19 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, DynamicCache, TextIteratorStreamer
|
20 |
+
from transformers.models.llama.modeling_llama import rotate_half
|
21 |
+
|
22 |
+
from utils import (
|
23 |
+
calculate_tokens_suggest_compression_ratio,
|
24 |
+
repeat_kv,
|
25 |
+
update_retrieval_context,
|
26 |
+
)
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
# Initialize the model and tokenizer.
|
31 |
+
api_token = os.getenv("HF_TOKEN")
|
32 |
+
model_name = "meta-llama/Llama-3.1-8B-Instruct"
|
33 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, token=api_token)
|
34 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, token=api_token, torch_dtype=torch.float16)
|
35 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
36 |
+
model = model.eval()
|
37 |
+
model.to(device)
|
38 |
+
embedding_model = HuggingFaceBgeEmbeddings(
|
39 |
+
model_name="BAAI/bge-large-en-v1.5",
|
40 |
+
model_kwargs={"device": str(device)},
|
41 |
+
encode_kwargs={"normalize_embeddings": True},
|
42 |
+
query_instruction=""
|
43 |
+
)
|
44 |
+
|
45 |
+
|
46 |
+
# Create a chat template and split into prefix and suffix.
|
47 |
+
content_system = ""
|
48 |
+
content_user = "######"
|
49 |
+
user_template = [
|
50 |
+
{"role": "system", "content": content_system},
|
51 |
+
{"role": "user", "content": content_user}
|
52 |
+
]
|
53 |
+
user = tokenizer.apply_chat_template(user_template, add_generation_prompt=True, tokenize=False)
|
54 |
+
prefix, suffix = user.split(content_user)
|
55 |
+
sink_tokens = max(4, len(tokenizer.encode(prefix)))
|
56 |
+
|
57 |
+
# Default prompt content.
|
58 |
+
default_task_description = (
|
59 |
+
"Answer the question based on the given passages. "
|
60 |
+
"Only give me the answer and do not output any other words."
|
61 |
+
)
|
62 |
+
default_few_shot = """Examples
|
63 |
+
question: Which case was brought to court first Miller v. California or Gates v. Collier ?
|
64 |
+
answer: Miller v. California
|
65 |
+
question: The actor that plays Phileas Fogg in "Around the World in 80 Days", co-starred with Gary Cooper in a 1939 Goldwyn Productions film based on a novel by what author?
|
66 |
+
answer: Charles L. Clifford
|
67 |
+
question: Prior to playing for Michigan State, Keith Nichol played football for a school located in what city?
|
68 |
+
answer: Norman
|
69 |
+
"""
|
70 |
+
|
71 |
+
class FinchCache(DynamicCache):
|
72 |
+
def __init__(self) -> None:
|
73 |
+
super().__init__()
|
74 |
+
self.key_cache = []
|
75 |
+
self.value_cache = []
|
76 |
+
|
77 |
+
@staticmethod
|
78 |
+
def _rotate_half(x):
|
79 |
+
x1 = x[..., : x.shape[-1] // 2]
|
80 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
81 |
+
return torch.cat((-x2, x1), dim=-1)
|
82 |
+
|
83 |
+
def _apply_key_rotary_pos_emb(self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor:
|
84 |
+
return (key_states * cos) + (self._rotate_half(key_states) * sin)
|
85 |
+
|
86 |
+
@staticmethod
|
87 |
+
def _rerotate_cos_sin(x, inv_freq, important_pos_batch):
|
88 |
+
B, H, L = important_pos_batch.shape
|
89 |
+
device = important_pos_batch.device
|
90 |
+
device_type = x.device.type
|
91 |
+
dtype = x.dtype
|
92 |
+
idx = torch.arange(0, L, device=device)
|
93 |
+
idx = idx.unsqueeze(0)
|
94 |
+
inv_freq = inv_freq[None, None, :, None].float().expand(B, H, -1, 1) # (B, H, M, 1)
|
95 |
+
idx = idx[:, None, :].float().expand(B, H, L) # (B, H, L)
|
96 |
+
delta_pos = idx - important_pos_batch
|
97 |
+
delta_pos = delta_pos.unsqueeze(2) # (B, H, 1, L)
|
98 |
+
|
99 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
100 |
+
|
101 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
102 |
+
freqs = delta_pos.float() * inv_freq.float()
|
103 |
+
freqs = freqs.transpose(2, 3)
|
104 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
105 |
+
cos = emb.cos().contiguous()
|
106 |
+
sin = emb.sin().contiguous()
|
107 |
+
return cos.to(dtype=dtype), sin.to(dtype=dtype)
|
108 |
+
|
109 |
+
@staticmethod
|
110 |
+
def gather_important_tokens(states, indices):
|
111 |
+
return torch.gather(states, 2, indices.unsqueeze(-1).expand(-1, -1, -1, states.size(3))).contiguous()
|
112 |
+
|
113 |
+
def compress_cache(self, layer_index, important_pos, inv_freq):
|
114 |
+
new_length = important_pos.size(2)
|
115 |
+
new_cos, new_sin = self._rerotate_cos_sin(self.key_cache[layer_index], inv_freq, important_pos)
|
116 |
+
gathered_keys = self.gather_important_tokens(self.key_cache[layer_index], important_pos).clone()
|
117 |
+
self.key_cache[layer_index] = self._apply_key_rotary_pos_emb(gathered_keys, new_cos, new_sin)
|
118 |
+
gathered_values = self.gather_important_tokens(self.value_cache[layer_index], important_pos).clone()
|
119 |
+
self.value_cache[layer_index] = gathered_values
|
120 |
+
self._seen_tokens = new_length
|
121 |
+
|
122 |
+
def save(self, path: str):
|
123 |
+
"""Save the cache to disk, moving tensors to CPU."""
|
124 |
+
try:
|
125 |
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
126 |
+
torch.save(
|
127 |
+
{"key_cache": [k.cpu() for k in self.key_cache], "value_cache": [v.cpu() for v in self.value_cache]},
|
128 |
+
path,
|
129 |
+
)
|
130 |
+
except Exception as e:
|
131 |
+
print(f"Error occurred while saving: {e}")
|
132 |
+
|
133 |
+
@classmethod
|
134 |
+
def load(cls, path: str, device: str = "cpu") -> "FinchCache":
|
135 |
+
"""Load the cache from disk and move tensors to the specified device."""
|
136 |
+
data = torch.load(path, map_location=device)
|
137 |
+
cache = cls()
|
138 |
+
cache.key_cache = [k.to(device) for k in data["key_cache"]]
|
139 |
+
cache.value_cache = [v.to(device) for v in data["value_cache"]]
|
140 |
+
cache._seen_tokens = cache.value_cache[0].size(2) if cache.value_cache else 0
|
141 |
+
return cache
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
def convert_to_markdown(file_objs, url, do_ocr, do_table_structure):
|
146 |
+
file_path = file_objs if file_objs is not None else url
|
147 |
+
pipeline_options = PdfPipelineOptions()
|
148 |
+
pipeline_options.do_ocr = do_ocr
|
149 |
+
pipeline_options.do_table_structure = do_table_structure
|
150 |
+
pdf_format_options = PdfFormatOption(
|
151 |
+
pipeline_options=pipeline_options,
|
152 |
+
backend=PyPdfiumDocumentBackend,
|
153 |
+
)
|
154 |
+
doc_converter = DocumentConverter(
|
155 |
+
allowed_formats=[InputFormat.PDF],
|
156 |
+
format_options={
|
157 |
+
InputFormat.PDF: pdf_format_options
|
158 |
+
}
|
159 |
+
)
|
160 |
+
|
161 |
+
# Pass the custom converter to the DoclingLoader.
|
162 |
+
loader = DoclingLoader(
|
163 |
+
file_path=file_path,
|
164 |
+
export_type=ExportType.MARKDOWN,
|
165 |
+
converter=doc_converter
|
166 |
+
)
|
167 |
+
docs = loader.load()
|
168 |
+
return docs[0].page_content
|
169 |
+
|
170 |
+
def create_rag_index(text_no_prefix):
|
171 |
+
"""Loads the PDF, splits its text, and builds a vectorstore for naive RAG."""
|
172 |
+
text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(
|
173 |
+
tokenizer,
|
174 |
+
chunk_size=256,
|
175 |
+
chunk_overlap=0,
|
176 |
+
add_start_index=True,
|
177 |
+
strip_whitespace=True,
|
178 |
+
separators=["\n\n", "\n", ".", " ", ""],
|
179 |
+
)
|
180 |
+
# Concatenate pages and create Document objects.
|
181 |
+
docs = [Document(page_content=x) for x in text_splitter.split_text(text_no_prefix)]
|
182 |
+
vectorstore = Chroma.from_documents(documents=docs, embedding=embedding_model)
|
183 |
+
return vectorstore
|
184 |
+
|
185 |
+
|
186 |
+
@spaces.GPU
|
187 |
+
def auto_convert(file_objs, url, do_ocr, do_table_structure):
|
188 |
+
if file_objs is None and (url is None or url.strip() == ""):
|
189 |
+
return (
|
190 |
+
gr.update(value=""),
|
191 |
+
"Number of tokens before compression: ",
|
192 |
+
gr.update(),
|
193 |
+
"Number of tokens after compression: ",
|
194 |
+
0,
|
195 |
+
gr.update(interactive=False), # Disable compress button when no input.
|
196 |
+
False,
|
197 |
+
{} # return an empty state dictionary
|
198 |
+
)
|
199 |
+
# Convert the document to markdown.
|
200 |
+
markdown = convert_to_markdown(file_objs, url, do_ocr, do_table_structure)
|
201 |
+
combined_text = prefix + markdown
|
202 |
+
token_count, suggestions, _ = calculate_tokens_suggest_compression_ratio(combined_text, tokenizer, model)
|
203 |
+
min_ratio = min(suggestions)
|
204 |
+
max_ratio = max(suggestions)
|
205 |
+
default_ratio = suggestions[len(suggestions) // 2]
|
206 |
+
retrieval_tokens = int(token_count / default_ratio)
|
207 |
+
token_count_str = f"Number of tokens before compression: {token_count}"
|
208 |
+
retrieval_str = f"Number of tokens after compression: {retrieval_tokens}"
|
209 |
+
slider_update = gr.update(value=default_ratio, minimum=min_ratio, maximum=max_ratio, step=1)
|
210 |
+
|
211 |
+
# Create the RAG index immediately.
|
212 |
+
if combined_text.startswith(prefix):
|
213 |
+
rag_text = combined_text[len(prefix):]
|
214 |
+
else:
|
215 |
+
rag_text = combined_text
|
216 |
+
rag_index = create_rag_index(rag_text)
|
217 |
+
state = {"rag_index": rag_index}
|
218 |
+
|
219 |
+
return (
|
220 |
+
combined_text,
|
221 |
+
token_count_str,
|
222 |
+
slider_update,
|
223 |
+
retrieval_str,
|
224 |
+
token_count,
|
225 |
+
gr.update(interactive=True),
|
226 |
+
False,
|
227 |
+
state
|
228 |
+
)
|
229 |
+
|
230 |
+
|
231 |
+
def get_compressed_kv_cache(sink_tokens, step_size, target_token_size, context_ids, context_attention_mask, question_ids, question_attention_mask):
|
232 |
+
device = model.device
|
233 |
+
dtype = model.dtype
|
234 |
+
sink_tokens = sink_tokens
|
235 |
+
num_chunks = step_size
|
236 |
+
context_ids = context_ids.to(device)
|
237 |
+
context_attention_mask = context_attention_mask.to(device)
|
238 |
+
question_ids = question_ids.to(device)
|
239 |
+
question_attention_mask = question_attention_mask.to(device)
|
240 |
+
question_len = question_ids.size(1)
|
241 |
+
total_len = context_ids.size(1)
|
242 |
+
max_context_tokens_allowed = model.config.max_position_embeddings - question_len
|
243 |
+
if total_len > max_context_tokens_allowed:
|
244 |
+
num_chunks = max(step_size, math.ceil(total_len / max_context_tokens_allowed))
|
245 |
+
|
246 |
+
if total_len <= sink_tokens or num_chunks == 1:
|
247 |
+
# If the context is too short or only one chunk is desired, use the entire context.
|
248 |
+
context_ids_list = [context_ids]
|
249 |
+
context_attention_mask_list = [context_attention_mask]
|
250 |
+
else:
|
251 |
+
# Calculate how many tokens remain after the sink tokens.
|
252 |
+
remainder_len = total_len - sink_tokens
|
253 |
+
|
254 |
+
# Compute the base tokens per chunk and any leftover.
|
255 |
+
base = remainder_len // num_chunks
|
256 |
+
leftover = remainder_len % num_chunks
|
257 |
+
|
258 |
+
# Build a list of chunk sizes.
|
259 |
+
# First chunk gets the sink tokens plus base tokens.
|
260 |
+
chunk_sizes = [sink_tokens + base]
|
261 |
+
|
262 |
+
# Chunks 2 to num_chunks-1 get base tokens each.
|
263 |
+
for _ in range(num_chunks - 2):
|
264 |
+
chunk_sizes.append(base)
|
265 |
+
|
266 |
+
# The last chunk gets the remaining tokens (base + leftover).
|
267 |
+
if num_chunks > 1:
|
268 |
+
chunk_sizes.append(base + leftover)
|
269 |
+
|
270 |
+
# Now slice the context using the calculated sizes.
|
271 |
+
context_ids_list = []
|
272 |
+
context_attention_mask_list = []
|
273 |
+
offset = 0
|
274 |
+
for size in chunk_sizes:
|
275 |
+
end = offset + size
|
276 |
+
context_ids_list.append(context_ids[:, offset:end])
|
277 |
+
context_attention_mask_list.append(context_attention_mask[:, offset:end])
|
278 |
+
offset = end
|
279 |
+
|
280 |
+
# (Optional) Continue with the rest of your processing…
|
281 |
+
len_rest = max(total_len - sink_tokens, 1)
|
282 |
+
compression_factor = len_rest // target_token_size
|
283 |
+
if compression_factor < 1:
|
284 |
+
compression_factor = 1
|
285 |
+
|
286 |
+
tokenized_doc_chunks = []
|
287 |
+
for ids_chunk, mask_chunk in zip(context_ids_list, context_attention_mask_list):
|
288 |
+
tokenized_doc_chunks.append({"input_ids": ids_chunk, "attention_mask": mask_chunk})
|
289 |
+
|
290 |
+
print("Number of chunks: ", len(tokenized_doc_chunks))
|
291 |
+
|
292 |
+
rotary_emb = model.model.rotary_emb.to(device)
|
293 |
+
inv_freq = rotary_emb.inv_freq
|
294 |
+
batch_size = question_ids.size(0)
|
295 |
+
ones_mask = torch.ones(batch_size, 1, dtype=question_attention_mask.dtype, device=device)
|
296 |
+
|
297 |
+
cache = FinchCache()
|
298 |
+
past_cache_len = 0
|
299 |
+
past_attention_mask = torch.zeros(batch_size, 0, dtype=question_attention_mask.dtype, device=device)
|
300 |
+
num_chunks = len(tokenized_doc_chunks)
|
301 |
+
|
302 |
+
# Prepare a shared dictionary for hook outputs.
|
303 |
+
query_context_matrices = {}
|
304 |
+
|
305 |
+
# Define a hook function that uses a per-chunk offset stored on self.
|
306 |
+
def query_hook_fn(module, input, output):
|
307 |
+
layer_idx = getattr(module, "layer_idx", None)
|
308 |
+
if layer_idx is not None:
|
309 |
+
query_states = output.detach()
|
310 |
+
bsz, seq_len, hidden_dim = query_states.size()
|
311 |
+
num_query_heads = module.num_query_heads
|
312 |
+
head_dim = hidden_dim // num_query_heads
|
313 |
+
query_states = (
|
314 |
+
query_states.view(bsz, seq_len, num_query_heads, head_dim)
|
315 |
+
.transpose(1, 2)
|
316 |
+
.contiguous()
|
317 |
+
)
|
318 |
+
# Use self._current_chunk_offset to select only the new tokens.
|
319 |
+
query_context_matrices[layer_idx] = query_states[:, :, _current_chunk_offset:, :].clone()
|
320 |
+
|
321 |
+
# Pre-register hooks for all layers only once.
|
322 |
+
hooks = []
|
323 |
+
for i, layer in enumerate(model.model.layers):
|
324 |
+
layer.self_attn.q_proj.layer_idx = i # For tracking.
|
325 |
+
layer.self_attn.q_proj.num_query_heads = layer.self_attn.config.num_attention_heads
|
326 |
+
hook = layer.self_attn.q_proj.register_forward_hook(query_hook_fn)
|
327 |
+
hooks.append(hook)
|
328 |
+
|
329 |
+
# Process each document chunk sequentially.
|
330 |
+
for j, tokenized_doc_chunk in enumerate(tokenized_doc_chunks):
|
331 |
+
current_seq_length = tokenized_doc_chunk["input_ids"].size(1)
|
332 |
+
# Save the offset in an attribute the hook can access.
|
333 |
+
_current_chunk_offset = current_seq_length
|
334 |
+
# Clear the dictionary from any previous chunk.
|
335 |
+
query_context_matrices.clear()
|
336 |
+
|
337 |
+
# These chunks are already on the device.
|
338 |
+
chunk_input_ids = tokenized_doc_chunk["input_ids"].contiguous()
|
339 |
+
chunk_attention_mask = tokenized_doc_chunk["attention_mask"].contiguous()
|
340 |
+
segment_attention_mask = torch.cat(
|
341 |
+
[past_attention_mask, chunk_attention_mask, ones_mask], dim=-1
|
342 |
+
).contiguous()
|
343 |
+
current_input_ids = torch.cat([chunk_input_ids, question_ids], dim=-1).contiguous()
|
344 |
+
current_attention_mask = torch.cat([segment_attention_mask, question_attention_mask], dim=-1).contiguous()
|
345 |
+
|
346 |
+
past_seen_tokens = cache.get_seq_length() if cache is not None else 0
|
347 |
+
cache_position = torch.arange(
|
348 |
+
past_seen_tokens + chunk_input_ids.shape[1],
|
349 |
+
past_seen_tokens + current_input_ids.shape[1],
|
350 |
+
device=device
|
351 |
+
)
|
352 |
+
causal_mask = model.model._prepare_4d_causal_attention_mask_with_cache_position(
|
353 |
+
current_attention_mask,
|
354 |
+
sequence_length=question_ids.size(1),
|
355 |
+
target_length=current_attention_mask.size(-1),
|
356 |
+
dtype=dtype,
|
357 |
+
device=device,
|
358 |
+
cache_position=cache_position,
|
359 |
+
batch_size=current_input_ids.size(0),
|
360 |
+
).contiguous()
|
361 |
+
|
362 |
+
with torch.no_grad():
|
363 |
+
outputs = model.model(
|
364 |
+
input_ids=current_input_ids,
|
365 |
+
use_cache=True,
|
366 |
+
past_key_values=cache,
|
367 |
+
)
|
368 |
+
cache = outputs.past_key_values
|
369 |
+
|
370 |
+
len_question = question_ids.size(1)
|
371 |
+
# Now, for each transformer layer, update the cache using the query/key attention.
|
372 |
+
for layer_idx in range(len(model.model.layers)):
|
373 |
+
key_matrix = cache.key_cache[layer_idx]
|
374 |
+
query_matrix = query_context_matrices[layer_idx]
|
375 |
+
layer_cache_pos = torch.arange(
|
376 |
+
past_cache_len + current_seq_length,
|
377 |
+
past_cache_len + current_seq_length + len_question,
|
378 |
+
device=device
|
379 |
+
)
|
380 |
+
position_ids = layer_cache_pos.unsqueeze(0)
|
381 |
+
cos, sin = rotary_emb(query_matrix, position_ids)
|
382 |
+
cos = cos.unsqueeze(1)
|
383 |
+
sin = sin.unsqueeze(1)
|
384 |
+
query_matrix = (query_matrix * cos) + (rotate_half(query_matrix) * sin)
|
385 |
+
num_repeats = model.config.num_attention_heads // model.config.num_key_value_heads
|
386 |
+
key_matrix = repeat_kv(key_matrix, num_repeats)
|
387 |
+
|
388 |
+
scaling = math.sqrt(model.config.head_dim)
|
389 |
+
attention_matrix = torch.matmul(query_matrix, key_matrix.transpose(2, 3)) / scaling
|
390 |
+
causal_mask_sliced = causal_mask[:, :, :, : key_matrix.shape[-2]]
|
391 |
+
attention_matrix = attention_matrix + causal_mask_sliced
|
392 |
+
attention_matrix = torch.nn.functional.softmax(attention_matrix, dim=-1, dtype=torch.float32).to(query_matrix.dtype)
|
393 |
+
# Normalization
|
394 |
+
tol = 1e-8
|
395 |
+
binary_mask = (torch.abs(causal_mask_sliced.to(torch.float32)) < tol).to(torch.float32)
|
396 |
+
non_zero_counts = binary_mask.sum(dim=3, keepdim=True)
|
397 |
+
non_zero_counts = torch.clamp_min(non_zero_counts, 1.0).to(attention_matrix.dtype)
|
398 |
+
attention_matrix = attention_matrix / non_zero_counts
|
399 |
+
if j != num_chunks - 1:
|
400 |
+
attention_matrix = attention_matrix[:, :, :, : past_cache_len + current_seq_length].clone().contiguous()
|
401 |
+
else:
|
402 |
+
attention_matrix = attention_matrix[:, :, :, : past_cache_len + current_seq_length + len_question].clone().contiguous()
|
403 |
+
attention_matrix = torch.sum(attention_matrix, dim=-2)
|
404 |
+
attention_matrix = attention_matrix.view(
|
405 |
+
attention_matrix.size(0), model.config.num_key_value_heads, num_repeats, -1
|
406 |
+
).sum(dim=2)
|
407 |
+
full_context_size = attention_matrix.size(-1)
|
408 |
+
attention_matrix[..., :sink_tokens] = float("inf")
|
409 |
+
if j == num_chunks - 1:
|
410 |
+
attention_matrix[..., -len_question:] = float("inf")
|
411 |
+
if j == 0:
|
412 |
+
k = int(sink_tokens + (max(0, current_seq_length - sink_tokens) // compression_factor))
|
413 |
+
k = min(k + past_cache_len, full_context_size)
|
414 |
+
elif j < num_chunks - 1:
|
415 |
+
to_keep_new = int(current_seq_length // compression_factor)
|
416 |
+
k = min(past_cache_len + to_keep_new, full_context_size)
|
417 |
+
else:
|
418 |
+
desired_final = sink_tokens + target_token_size + len_question# TODO remember to include the question tokens
|
419 |
+
k = desired_final if full_context_size >= desired_final else full_context_size
|
420 |
+
k = max(k, sink_tokens)
|
421 |
+
selected_indices = torch.topk(attention_matrix, k, dim=-1).indices
|
422 |
+
selected_indices, _ = torch.sort(selected_indices, dim=-1)
|
423 |
+
cache.compress_cache(layer_idx, selected_indices, inv_freq)
|
424 |
+
|
425 |
+
past_cache_len = cache._seen_tokens
|
426 |
+
past_attention_mask = torch.ones(1, past_cache_len, device=device)
|
427 |
+
|
428 |
+
# Remove the hooks once after all chunks are processed.
|
429 |
+
for hook in hooks:
|
430 |
+
hook.remove()
|
431 |
+
|
432 |
+
return cache
|
433 |
+
|
434 |
+
|
435 |
+
def run_naive_rag_query(vectorstore, query, rag_token_size, prefix, task, few_shot_examples):
|
436 |
+
"""
|
437 |
+
For naive RAG, retrieves top-k chunks (k based on target token size)
|
438 |
+
and generates an answer using those chunks.
|
439 |
+
"""
|
440 |
+
k = max(1, rag_token_size // 256)
|
441 |
+
retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": k})
|
442 |
+
retrieved_docs = retriever.invoke(query)
|
443 |
+
for doc in retrieved_docs:
|
444 |
+
print("=================")
|
445 |
+
print(doc.page_content)
|
446 |
+
print("=================")
|
447 |
+
formatted_context = "\n\n".join([doc.page_content for doc in retrieved_docs])
|
448 |
+
|
449 |
+
rag_context = prefix + "Retrieved context: \n" + formatted_context + task + few_shot_examples
|
450 |
+
|
451 |
+
return rag_context
|
452 |
+
|
453 |
+
|
454 |
+
@spaces.GPU
|
455 |
+
def prepare_compression_and_rag(combined_text, retrieval_slider_value, global_local_value, task_description, few_shot, state):
|
456 |
+
"""
|
457 |
+
Prepares the compressed KV cache. Uses the precomputed rag_index from state.
|
458 |
+
"""
|
459 |
+
percentage = int(global_local_value.replace('%', ''))
|
460 |
+
question_text = task_description + "\n" + few_shot
|
461 |
+
context_encoding = tokenizer(combined_text, return_tensors="pt").to(device)
|
462 |
+
question_encoding = tokenizer(question_text, return_tensors="pt").to(device)
|
463 |
+
context_ids = context_encoding["input_ids"]
|
464 |
+
context_attention_mask = context_encoding["attention_mask"]
|
465 |
+
question_ids = question_encoding["input_ids"]
|
466 |
+
question_attention_mask = question_encoding["attention_mask"]
|
467 |
+
retrieval_context_length = int(context_ids.size(1) / retrieval_slider_value)
|
468 |
+
|
469 |
+
if percentage > 0:
|
470 |
+
target_token_size = int(retrieval_context_length * (percentage / 100))
|
471 |
+
print("Target token size for compression: ", target_token_size)
|
472 |
+
step_size = 2
|
473 |
+
start_time_prefill = time.perf_counter()
|
474 |
+
past_key_values = copy.deepcopy(get_compressed_kv_cache(sink_tokens, step_size, target_token_size,
|
475 |
+
context_ids, context_attention_mask,
|
476 |
+
question_ids, question_attention_mask))
|
477 |
+
compressed_length = past_key_values.get_seq_length()
|
478 |
+
print("Context size after compression: ", compressed_length)
|
479 |
+
print("Compression rate: ", context_ids.size(1) / compressed_length)
|
480 |
+
else:
|
481 |
+
start_time_prefill = 0
|
482 |
+
target_token_size = 0
|
483 |
+
past_key_values = FinchCache()
|
484 |
+
compressed_length = past_key_values.get_seq_length()
|
485 |
+
|
486 |
+
|
487 |
+
# Use the precomputed rag_index from state.
|
488 |
+
rag_index = state.get("rag_index", None)
|
489 |
+
if rag_index is None:
|
490 |
+
if combined_text.startswith(prefix):
|
491 |
+
rag_text = combined_text[len(prefix):]
|
492 |
+
else:
|
493 |
+
rag_text = combined_text
|
494 |
+
rag_index = create_rag_index(rag_text, device)
|
495 |
+
|
496 |
+
state.update({
|
497 |
+
"compressed_cache": past_key_values,
|
498 |
+
"compressed_length": compressed_length,
|
499 |
+
"rag_index": rag_index,
|
500 |
+
"target_token_size": target_token_size,
|
501 |
+
"global_local": percentage,
|
502 |
+
"combined_text": combined_text,
|
503 |
+
"task_description": task_description,
|
504 |
+
"few_shot": few_shot,
|
505 |
+
"retrieval_slider": retrieval_context_length,
|
506 |
+
"prefill_time": time.perf_counter() - start_time_prefill
|
507 |
+
})
|
508 |
+
return state, True
|
509 |
+
|
510 |
+
|
511 |
+
@spaces.GPU
|
512 |
+
def chat_response_stream(message: str, history: list, state: dict):
|
513 |
+
"""
|
514 |
+
Generates a chat response with streaming output.
|
515 |
+
Returns a simple string (not a list of message dicts) for ChatInterface.
|
516 |
+
"""
|
517 |
+
user_message = message
|
518 |
+
past_key_values = state["compressed_cache"]
|
519 |
+
compressed_length = past_key_values.get_seq_length()
|
520 |
+
rag_index = state["rag_index"]
|
521 |
+
retrieval_slider_value = state["retrieval_slider"]
|
522 |
+
percentage = state["global_local"]
|
523 |
+
|
524 |
+
rag_retrieval_size = int(retrieval_slider_value * (1.0 - (percentage / 100)))
|
525 |
+
print("RAG retrieval size: ", rag_retrieval_size)
|
526 |
+
|
527 |
+
if percentage == 0:
|
528 |
+
rag_prefix = prefix
|
529 |
+
rag_task = state["task_description"]
|
530 |
+
rag_few_shot = state["few_shot"]
|
531 |
+
else:
|
532 |
+
rag_prefix = ""
|
533 |
+
rag_task = ""
|
534 |
+
rag_few_shot = ""
|
535 |
+
print("user message: ", user_message)
|
536 |
+
if rag_retrieval_size != 0:
|
537 |
+
rag_context = run_naive_rag_query(rag_index, user_message, rag_retrieval_size, rag_prefix, rag_task, rag_few_shot)
|
538 |
+
new_input = rag_context + "\nquestion: " + user_message + suffix + "answer:"
|
539 |
+
else:
|
540 |
+
new_input = "\nquestion: " + user_message + suffix + "answer:"
|
541 |
+
tokenized_new_input = tokenizer(new_input, return_tensors="pt").to(device)
|
542 |
+
eos_block = torch.full((1, compressed_length), tokenizer.eos_token_id, device=device, dtype=torch.long)
|
543 |
+
new_input_ids = torch.cat([eos_block, tokenized_new_input["input_ids"]], dim=-1)
|
544 |
+
new_attention_mask = torch.cat([torch.ones((1, compressed_length), device=device), tokenized_new_input["attention_mask"]], dim=-1)
|
545 |
+
|
546 |
+
print("New input is: ", new_input)
|
547 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
548 |
+
generate_kwargs = dict(
|
549 |
+
input_ids=new_input_ids,
|
550 |
+
attention_mask=new_attention_mask,
|
551 |
+
past_key_values=past_key_values,
|
552 |
+
streamer=streamer,
|
553 |
+
use_cache=True,
|
554 |
+
max_new_tokens=1024,
|
555 |
+
num_beams=1,
|
556 |
+
do_sample=False,
|
557 |
+
temperature=1.0,
|
558 |
+
top_p=1.0,
|
559 |
+
top_k=None,
|
560 |
+
)
|
561 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
562 |
+
t.start()
|
563 |
+
|
564 |
+
full_output = ""
|
565 |
+
for text in streamer:
|
566 |
+
full_output += text
|
567 |
+
time.sleep(0.05)
|
568 |
+
yield full_output
|
569 |
+
|
570 |
+
state["compressed_cache"] = past_key_values
|
571 |
+
return full_output
|
572 |
+
|
573 |
+
##########################################################################
|
574 |
+
# Gradio Interface: note that we now use ChatInterface instead of a Chatbot.
|
575 |
+
##########################################################################
|
576 |
+
CSS = """
|
577 |
+
body {
|
578 |
+
font-family: "Times New Roman", Times, serif;
|
579 |
+
}
|
580 |
+
.upload-section {
|
581 |
+
padding: 10px;
|
582 |
+
border: 2px dashed #ccc;
|
583 |
+
border-radius: 10px;
|
584 |
+
}
|
585 |
+
.upload-button {
|
586 |
+
background: #34c759 !important;
|
587 |
+
color: white !important;
|
588 |
+
border-radius: 25px !important;
|
589 |
+
}
|
590 |
+
.chatbot-container {
|
591 |
+
margin-top: 20px;
|
592 |
+
}
|
593 |
+
.status-output {
|
594 |
+
margin-top: 10px;
|
595 |
+
font-size: 14px;
|
596 |
+
}
|
597 |
+
.processing-info {
|
598 |
+
margin-top: 5px;
|
599 |
+
font-size: 12px;
|
600 |
+
color: #666;
|
601 |
+
}
|
602 |
+
.info-container {
|
603 |
+
margin-top: 10px;
|
604 |
+
padding: 10px;
|
605 |
+
border-radius: 5px;
|
606 |
+
}
|
607 |
+
.file-list {
|
608 |
+
margin-top: 0;
|
609 |
+
max-height: 200px;
|
610 |
+
overflow-y: auto;
|
611 |
+
padding: 5px;
|
612 |
+
border: 1px solid #eee;
|
613 |
+
border-radius: 5px;
|
614 |
+
}
|
615 |
+
.stats-box {
|
616 |
+
margin-top: 10px;
|
617 |
+
padding: 10px;
|
618 |
+
border-radius: 5px;
|
619 |
+
font-size: 12px;
|
620 |
+
}
|
621 |
+
.submit-btn {
|
622 |
+
background: #1a73e8 !important;
|
623 |
+
color: white !important;
|
624 |
+
border-radius: 25px !important;
|
625 |
+
margin-left: 10px;
|
626 |
+
padding: 5px 10px;
|
627 |
+
font-size: 16px;
|
628 |
+
}
|
629 |
+
.input-row {
|
630 |
+
display: flex;
|
631 |
+
align-items: center;
|
632 |
+
}
|
633 |
+
@media (min-width: 768px) {
|
634 |
+
.main-container {
|
635 |
+
display: flex;
|
636 |
+
justify-content: space-between;
|
637 |
+
gap: 20px;
|
638 |
+
}
|
639 |
+
.upload-section {
|
640 |
+
flex: 3;
|
641 |
+
}
|
642 |
+
.chatbot-container {
|
643 |
+
flex: 1;
|
644 |
+
margin-top: 0;
|
645 |
+
}
|
646 |
+
}
|
647 |
+
"""
|
648 |
+
|
649 |
+
with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
|
650 |
+
gr.HTML("<h1><center>Beyond RAG with LLama 3.1-8B-Instruct Model</center></h1>")
|
651 |
+
gr.HTML("<center><p>Compress your document and chat with it.</p></center>")
|
652 |
+
|
653 |
+
hidden_token_count = gr.State(value=0)
|
654 |
+
compression_done = gr.State(value=False)
|
655 |
+
compressed_doc_state = gr.State(value={})
|
656 |
+
|
657 |
+
with gr.Row(elem_classes="main-container"):
|
658 |
+
with gr.Column(elem_classes="upload-section"):
|
659 |
+
gr.Markdown("## Document Preprocessing")
|
660 |
+
with gr.Row():
|
661 |
+
file_input = gr.File(label="Drop file here or upload", file_count="multiple", elem_id="file-upload-area")
|
662 |
+
url_input = gr.Textbox(label="or enter a URL", placeholder="https://example.com/document.pdf")
|
663 |
+
with gr.Row():
|
664 |
+
do_ocr = gr.Checkbox(label="Do OCR", value=False)
|
665 |
+
do_table = gr.Checkbox(label="Include Table Structure", value=False)
|
666 |
+
with gr.Accordion("Prompt Designer", open=False):
|
667 |
+
task_description_input = gr.Textbox(label="Task Description", value=default_task_description, lines=3, elem_id="task-description")
|
668 |
+
few_shot_input = gr.Textbox(label="Few-Shot Examples", value=default_few_shot, lines=10, elem_id="few-shot")
|
669 |
+
with gr.Accordion("Show Markdown Output", open=False):
|
670 |
+
markdown_output = gr.Textbox(label="Markdown Output", lines=20)
|
671 |
+
token_count_text = gr.Markdown("Number of tokens before compression: ")
|
672 |
+
retrieval_slider = gr.Slider(label="Select Compression Rate", minimum=1, maximum=32, step=1, value=2)
|
673 |
+
retrieval_info_text = gr.Markdown("Number of tokens after compression: ")
|
674 |
+
global_local_slider = gr.Radio(label="Global vs Local (0 is all RAG, 100 is all global)",
|
675 |
+
choices=["0%", "25%", "50%", "75%", "100%"], value="75%")
|
676 |
+
compress_button = gr.Button("Compress Document", interactive=False, elem_classes="upload-button")
|
677 |
+
|
678 |
+
file_input.change(
|
679 |
+
fn=auto_convert,
|
680 |
+
inputs=[file_input, url_input, do_ocr, do_table],
|
681 |
+
outputs=[markdown_output, token_count_text, retrieval_slider, retrieval_info_text, hidden_token_count, compress_button, compression_done, compressed_doc_state]
|
682 |
+
)
|
683 |
+
url_input.change(
|
684 |
+
fn=auto_convert,
|
685 |
+
inputs=[file_input, url_input, do_ocr, do_table],
|
686 |
+
outputs=[markdown_output, token_count_text, retrieval_slider, retrieval_info_text, hidden_token_count, compress_button, compression_done, compressed_doc_state]
|
687 |
+
)
|
688 |
+
do_ocr.change(
|
689 |
+
fn=auto_convert,
|
690 |
+
inputs=[file_input, url_input, do_ocr, do_table],
|
691 |
+
outputs=[markdown_output, token_count_text, retrieval_slider, retrieval_info_text, hidden_token_count, compress_button, compression_done, compressed_doc_state]
|
692 |
+
)
|
693 |
+
do_table.change(
|
694 |
+
fn=auto_convert,
|
695 |
+
inputs=[file_input, url_input, do_ocr, do_table],
|
696 |
+
outputs=[markdown_output, token_count_text, retrieval_slider, retrieval_info_text, hidden_token_count, compress_button, compression_done, compressed_doc_state]
|
697 |
+
)
|
698 |
+
retrieval_slider.change(
|
699 |
+
fn=update_retrieval_context,
|
700 |
+
inputs=[hidden_token_count, retrieval_slider],
|
701 |
+
outputs=retrieval_info_text
|
702 |
+
)
|
703 |
+
compress_button.click(
|
704 |
+
fn=prepare_compression_and_rag,
|
705 |
+
inputs=[markdown_output, retrieval_slider, global_local_slider, task_description_input, few_shot_input, compressed_doc_state],
|
706 |
+
outputs=[compressed_doc_state, compression_done]
|
707 |
+
)
|
708 |
+
|
709 |
+
with gr.Column(elem_classes="chatbot-container"):
|
710 |
+
gr.Markdown("## Chat")
|
711 |
+
chat_interface = gr.ChatInterface(
|
712 |
+
fn=chat_response_stream,
|
713 |
+
additional_inputs=[compressed_doc_state],
|
714 |
+
type="messages"
|
715 |
+
)
|
716 |
+
|
717 |
+
demo.queue(max_size=16).launch()
|
cache.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import DynamicCache
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
|
5 |
+
class FinchCache(DynamicCache):
|
6 |
+
def __init__(self) -> None:
|
7 |
+
super().__init__()
|
8 |
+
self.key_cache = []
|
9 |
+
self.value_cache = []
|
10 |
+
|
11 |
+
@staticmethod
|
12 |
+
def _rotate_half(x):
|
13 |
+
x1 = x[..., : x.shape[-1] // 2]
|
14 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
15 |
+
return torch.cat((-x2, x1), dim=-1)
|
16 |
+
|
17 |
+
def _apply_key_rotary_pos_emb(self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor:
|
18 |
+
return (key_states * cos) + (self._rotate_half(key_states) * sin)
|
19 |
+
|
20 |
+
@staticmethod
|
21 |
+
def _rerotate_cos_sin(x, inv_freq, important_pos_batch):
|
22 |
+
B, H, L = important_pos_batch.shape
|
23 |
+
device = important_pos_batch.device
|
24 |
+
device_type = x.device.type
|
25 |
+
dtype = x.dtype
|
26 |
+
idx = torch.arange(0, L, device=device)
|
27 |
+
idx = idx.unsqueeze(0)
|
28 |
+
inv_freq = inv_freq[None, None, :, None].float().expand(B, H, -1, 1) # (B, H, M, 1)
|
29 |
+
idx = idx[:, None, :].float().expand(B, H, L) # (B, H, L)
|
30 |
+
delta_pos = idx - important_pos_batch
|
31 |
+
delta_pos = delta_pos.unsqueeze(2) # (B, H, 1, L)
|
32 |
+
|
33 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
34 |
+
|
35 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
36 |
+
freqs = delta_pos.float() * inv_freq.float()
|
37 |
+
freqs = freqs.transpose(2, 3)
|
38 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
39 |
+
cos = emb.cos().contiguous()
|
40 |
+
sin = emb.sin().contiguous()
|
41 |
+
return cos.to(dtype=dtype), sin.to(dtype=dtype)
|
42 |
+
|
43 |
+
@staticmethod
|
44 |
+
def gather_important_tokens(states, indices):
|
45 |
+
return torch.gather(states, 2, indices.unsqueeze(-1).expand(-1, -1, -1, states.size(3))).contiguous()
|
46 |
+
|
47 |
+
def compress_cache(self, layer_index, important_pos, inv_freq):
|
48 |
+
new_length = important_pos.size(2)
|
49 |
+
new_cos, new_sin = self._rerotate_cos_sin(self.key_cache[layer_index], inv_freq, important_pos)
|
50 |
+
gathered_keys = self.gather_important_tokens(self.key_cache[layer_index], important_pos).clone()
|
51 |
+
self.key_cache[layer_index] = self._apply_key_rotary_pos_emb(gathered_keys, new_cos, new_sin)
|
52 |
+
gathered_values = self.gather_important_tokens(self.value_cache[layer_index], important_pos).clone()
|
53 |
+
self.value_cache[layer_index] = gathered_values
|
54 |
+
self._seen_tokens = new_length
|
55 |
+
|
56 |
+
def save(self, path: str):
|
57 |
+
"""Save the cache to disk, moving tensors to CPU."""
|
58 |
+
try:
|
59 |
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
60 |
+
torch.save(
|
61 |
+
{"key_cache": [k.cpu() for k in self.key_cache], "value_cache": [v.cpu() for v in self.value_cache]},
|
62 |
+
path,
|
63 |
+
)
|
64 |
+
except Exception as e:
|
65 |
+
print(f"Error occurred while saving: {e}")
|
66 |
+
|
67 |
+
@classmethod
|
68 |
+
def load(cls, path: str, device: str = "cpu") -> "FinchCache":
|
69 |
+
"""Load the cache from disk and move tensors to the specified device."""
|
70 |
+
data = torch.load(path, map_location=device)
|
71 |
+
cache = cls()
|
72 |
+
cache.key_cache = [k.to(device) for k in data["key_cache"]]
|
73 |
+
cache.value_cache = [v.to(device) for v in data["value_cache"]]
|
74 |
+
cache._seen_tokens = cache.value_cache[0].size(2) if cache.value_cache else 0
|
75 |
+
return cache
|
global_compression.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from cache import FinchCache
|
4 |
+
from utils import repeat_kv
|
5 |
+
from transformers.models.llama.modeling_llama import rotate_half
|
6 |
+
import spaces
|
7 |
+
|
8 |
+
@spaces.GPU
|
9 |
+
def get_compressed_kv_cache(model, sink_tokens, step_size, target_token_size, context_ids, context_attention_mask, question_ids, question_attention_mask):
|
10 |
+
device = model.device
|
11 |
+
dtype = model.dtype
|
12 |
+
sink_tokens = sink_tokens
|
13 |
+
num_chunks = step_size
|
14 |
+
context_ids = context_ids.to(device)
|
15 |
+
context_attention_mask = context_attention_mask.to(device)
|
16 |
+
question_ids = question_ids.to(device)
|
17 |
+
question_attention_mask = question_attention_mask.to(device)
|
18 |
+
question_len = question_ids.size(1)
|
19 |
+
total_len = context_ids.size(1)
|
20 |
+
max_context_tokens_allowed = model.config.max_position_embeddings - question_len
|
21 |
+
if total_len > max_context_tokens_allowed:
|
22 |
+
num_chunks = max(step_size, math.ceil(total_len / max_context_tokens_allowed))
|
23 |
+
|
24 |
+
if total_len <= sink_tokens or num_chunks == 1:
|
25 |
+
# If the context is too short or only one chunk is desired, use the entire context.
|
26 |
+
context_ids_list = [context_ids]
|
27 |
+
context_attention_mask_list = [context_attention_mask]
|
28 |
+
else:
|
29 |
+
# Calculate how many tokens remain after the sink tokens.
|
30 |
+
remainder_len = total_len - sink_tokens
|
31 |
+
|
32 |
+
# Compute the base tokens per chunk and any leftover.
|
33 |
+
base = remainder_len // num_chunks
|
34 |
+
leftover = remainder_len % num_chunks
|
35 |
+
|
36 |
+
# Build a list of chunk sizes.
|
37 |
+
# First chunk gets the sink tokens plus base tokens.
|
38 |
+
chunk_sizes = [sink_tokens + base]
|
39 |
+
|
40 |
+
# Chunks 2 to num_chunks-1 get base tokens each.
|
41 |
+
for _ in range(num_chunks - 2):
|
42 |
+
chunk_sizes.append(base)
|
43 |
+
|
44 |
+
# The last chunk gets the remaining tokens (base + leftover).
|
45 |
+
if num_chunks > 1:
|
46 |
+
chunk_sizes.append(base + leftover)
|
47 |
+
|
48 |
+
# Now slice the context using the calculated sizes.
|
49 |
+
context_ids_list = []
|
50 |
+
context_attention_mask_list = []
|
51 |
+
offset = 0
|
52 |
+
for size in chunk_sizes:
|
53 |
+
end = offset + size
|
54 |
+
context_ids_list.append(context_ids[:, offset:end])
|
55 |
+
context_attention_mask_list.append(context_attention_mask[:, offset:end])
|
56 |
+
offset = end
|
57 |
+
|
58 |
+
# (Optional) Continue with the rest of your processing…
|
59 |
+
len_rest = max(total_len - sink_tokens, 1)
|
60 |
+
compression_factor = len_rest // target_token_size
|
61 |
+
if compression_factor < 1:
|
62 |
+
compression_factor = 1
|
63 |
+
|
64 |
+
tokenized_doc_chunks = []
|
65 |
+
for ids_chunk, mask_chunk in zip(context_ids_list, context_attention_mask_list):
|
66 |
+
tokenized_doc_chunks.append({"input_ids": ids_chunk, "attention_mask": mask_chunk})
|
67 |
+
|
68 |
+
print("Number of chunks: ", len(tokenized_doc_chunks))
|
69 |
+
|
70 |
+
rotary_emb = model.model.rotary_emb.to(device)
|
71 |
+
inv_freq = rotary_emb.inv_freq
|
72 |
+
batch_size = question_ids.size(0)
|
73 |
+
ones_mask = torch.ones(batch_size, 1, dtype=question_attention_mask.dtype, device=device)
|
74 |
+
|
75 |
+
cache = FinchCache()
|
76 |
+
past_cache_len = 0
|
77 |
+
past_attention_mask = torch.zeros(batch_size, 0, dtype=question_attention_mask.dtype, device=device)
|
78 |
+
num_chunks = len(tokenized_doc_chunks)
|
79 |
+
|
80 |
+
# Prepare a shared dictionary for hook outputs.
|
81 |
+
query_context_matrices = {}
|
82 |
+
|
83 |
+
# Define a hook function that uses a per-chunk offset stored on self.
|
84 |
+
def query_hook_fn(module, input, output):
|
85 |
+
layer_idx = getattr(module, "layer_idx", None)
|
86 |
+
if layer_idx is not None:
|
87 |
+
query_states = output.detach()
|
88 |
+
bsz, seq_len, hidden_dim = query_states.size()
|
89 |
+
num_query_heads = module.num_query_heads
|
90 |
+
head_dim = hidden_dim // num_query_heads
|
91 |
+
query_states = (
|
92 |
+
query_states.view(bsz, seq_len, num_query_heads, head_dim)
|
93 |
+
.transpose(1, 2)
|
94 |
+
.contiguous()
|
95 |
+
)
|
96 |
+
# Use self._current_chunk_offset to select only the new tokens.
|
97 |
+
query_context_matrices[layer_idx] = query_states[:, :, _current_chunk_offset:, :].clone()
|
98 |
+
|
99 |
+
# Pre-register hooks for all layers only once.
|
100 |
+
hooks = []
|
101 |
+
for i, layer in enumerate(model.model.layers):
|
102 |
+
layer.self_attn.q_proj.layer_idx = i # For tracking.
|
103 |
+
layer.self_attn.q_proj.num_query_heads = layer.self_attn.config.num_attention_heads
|
104 |
+
hook = layer.self_attn.q_proj.register_forward_hook(query_hook_fn)
|
105 |
+
hooks.append(hook)
|
106 |
+
|
107 |
+
# Process each document chunk sequentially.
|
108 |
+
for j, tokenized_doc_chunk in enumerate(tokenized_doc_chunks):
|
109 |
+
current_seq_length = tokenized_doc_chunk["input_ids"].size(1)
|
110 |
+
# Save the offset in an attribute the hook can access.
|
111 |
+
_current_chunk_offset = current_seq_length
|
112 |
+
# Clear the dictionary from any previous chunk.
|
113 |
+
query_context_matrices.clear()
|
114 |
+
|
115 |
+
# These chunks are already on the device.
|
116 |
+
chunk_input_ids = tokenized_doc_chunk["input_ids"].contiguous()
|
117 |
+
chunk_attention_mask = tokenized_doc_chunk["attention_mask"].contiguous()
|
118 |
+
segment_attention_mask = torch.cat(
|
119 |
+
[past_attention_mask, chunk_attention_mask, ones_mask], dim=-1
|
120 |
+
).contiguous()
|
121 |
+
current_input_ids = torch.cat([chunk_input_ids, question_ids], dim=-1).contiguous()
|
122 |
+
current_attention_mask = torch.cat([segment_attention_mask, question_attention_mask], dim=-1).contiguous()
|
123 |
+
|
124 |
+
past_seen_tokens = cache.get_seq_length() if cache is not None else 0
|
125 |
+
cache_position = torch.arange(
|
126 |
+
past_seen_tokens + chunk_input_ids.shape[1],
|
127 |
+
past_seen_tokens + current_input_ids.shape[1],
|
128 |
+
device=device
|
129 |
+
)
|
130 |
+
causal_mask = model.model._prepare_4d_causal_attention_mask_with_cache_position(
|
131 |
+
current_attention_mask,
|
132 |
+
sequence_length=question_ids.size(1),
|
133 |
+
target_length=current_attention_mask.size(-1),
|
134 |
+
dtype=dtype,
|
135 |
+
device=device,
|
136 |
+
cache_position=cache_position,
|
137 |
+
batch_size=current_input_ids.size(0),
|
138 |
+
).contiguous()
|
139 |
+
|
140 |
+
with torch.no_grad():
|
141 |
+
outputs = model.model(
|
142 |
+
input_ids=current_input_ids,
|
143 |
+
use_cache=True,
|
144 |
+
past_key_values=cache,
|
145 |
+
)
|
146 |
+
cache = outputs.past_key_values
|
147 |
+
|
148 |
+
len_question = question_ids.size(1)
|
149 |
+
# Now, for each transformer layer, update the cache using the query/key attention.
|
150 |
+
for layer_idx in range(len(model.model.layers)):
|
151 |
+
key_matrix = cache.key_cache[layer_idx]
|
152 |
+
query_matrix = query_context_matrices[layer_idx]
|
153 |
+
layer_cache_pos = torch.arange(
|
154 |
+
past_cache_len + current_seq_length,
|
155 |
+
past_cache_len + current_seq_length + len_question,
|
156 |
+
device=device
|
157 |
+
)
|
158 |
+
position_ids = layer_cache_pos.unsqueeze(0)
|
159 |
+
cos, sin = rotary_emb(query_matrix, position_ids)
|
160 |
+
cos = cos.unsqueeze(1)
|
161 |
+
sin = sin.unsqueeze(1)
|
162 |
+
query_matrix = (query_matrix * cos) + (rotate_half(query_matrix) * sin)
|
163 |
+
num_repeats = model.config.num_attention_heads // model.config.num_key_value_heads
|
164 |
+
key_matrix = repeat_kv(key_matrix, num_repeats)
|
165 |
+
|
166 |
+
scaling = math.sqrt(model.config.head_dim)
|
167 |
+
attention_matrix = torch.matmul(query_matrix, key_matrix.transpose(2, 3)) / scaling
|
168 |
+
causal_mask_sliced = causal_mask[:, :, :, : key_matrix.shape[-2]]
|
169 |
+
attention_matrix = attention_matrix + causal_mask_sliced
|
170 |
+
attention_matrix = torch.nn.functional.softmax(attention_matrix, dim=-1, dtype=torch.float32).to(query_matrix.dtype)
|
171 |
+
# Normalization
|
172 |
+
tol = 1e-8
|
173 |
+
binary_mask = (torch.abs(causal_mask_sliced.to(torch.float32)) < tol).to(torch.float32)
|
174 |
+
non_zero_counts = binary_mask.sum(dim=3, keepdim=True)
|
175 |
+
non_zero_counts = torch.clamp_min(non_zero_counts, 1.0).to(attention_matrix.dtype)
|
176 |
+
attention_matrix = attention_matrix / non_zero_counts
|
177 |
+
if j != num_chunks - 1:
|
178 |
+
attention_matrix = attention_matrix[:, :, :, : past_cache_len + current_seq_length].clone().contiguous()
|
179 |
+
else:
|
180 |
+
attention_matrix = attention_matrix[:, :, :, : past_cache_len + current_seq_length + len_question].clone().contiguous()
|
181 |
+
attention_matrix = torch.sum(attention_matrix, dim=-2)
|
182 |
+
attention_matrix = attention_matrix.view(
|
183 |
+
attention_matrix.size(0), model.config.num_key_value_heads, num_repeats, -1
|
184 |
+
).sum(dim=2)
|
185 |
+
full_context_size = attention_matrix.size(-1)
|
186 |
+
attention_matrix[..., :sink_tokens] = float("inf")
|
187 |
+
if j == num_chunks - 1:
|
188 |
+
attention_matrix[..., -len_question:] = float("inf")
|
189 |
+
if j == 0:
|
190 |
+
k = int(sink_tokens + (max(0, current_seq_length - sink_tokens) // compression_factor))
|
191 |
+
k = min(k + past_cache_len, full_context_size)
|
192 |
+
elif j < num_chunks - 1:
|
193 |
+
to_keep_new = int(current_seq_length // compression_factor)
|
194 |
+
k = min(past_cache_len + to_keep_new, full_context_size)
|
195 |
+
else:
|
196 |
+
desired_final = sink_tokens + target_token_size + len_question# TODO remember to include the question tokens
|
197 |
+
k = desired_final if full_context_size >= desired_final else full_context_size
|
198 |
+
k = max(k, sink_tokens)
|
199 |
+
selected_indices = torch.topk(attention_matrix, k, dim=-1).indices
|
200 |
+
selected_indices, _ = torch.sort(selected_indices, dim=-1)
|
201 |
+
cache.compress_cache(layer_idx, selected_indices, inv_freq)
|
202 |
+
|
203 |
+
past_cache_len = cache._seen_tokens
|
204 |
+
past_attention_mask = torch.ones(1, past_cache_len, device=device)
|
205 |
+
|
206 |
+
# Remove the hooks once after all chunks are processed.
|
207 |
+
for hook in hooks:
|
208 |
+
hook.remove()
|
209 |
+
|
210 |
+
return cache
|
211 |
+
|
preprocess_document.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_docling import DoclingLoader
|
2 |
+
from langchain_docling.loader import ExportType
|
3 |
+
|
4 |
+
# Import required classes for building a custom converter
|
5 |
+
from docling.document_converter import DocumentConverter, PdfFormatOption, InputFormat
|
6 |
+
from docling.datamodel.pipeline_options import PdfPipelineOptions
|
7 |
+
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
|
8 |
+
import spaces
|
9 |
+
|
10 |
+
@spaces.GPU
|
11 |
+
def convert_to_markdown(file_objs, url, do_ocr, do_table_structure):
|
12 |
+
file_path = file_objs if file_objs is not None else url
|
13 |
+
pipeline_options = PdfPipelineOptions()
|
14 |
+
pipeline_options.do_ocr = do_ocr
|
15 |
+
pipeline_options.do_table_structure = do_table_structure
|
16 |
+
pdf_format_options = PdfFormatOption(
|
17 |
+
pipeline_options=pipeline_options,
|
18 |
+
backend=PyPdfiumDocumentBackend,
|
19 |
+
)
|
20 |
+
doc_converter = DocumentConverter(
|
21 |
+
allowed_formats=[InputFormat.PDF],
|
22 |
+
format_options={
|
23 |
+
InputFormat.PDF: pdf_format_options
|
24 |
+
}
|
25 |
+
)
|
26 |
+
|
27 |
+
# Pass the custom converter to the DoclingLoader.
|
28 |
+
loader = DoclingLoader(
|
29 |
+
file_path=file_path,
|
30 |
+
export_type=ExportType.MARKDOWN,
|
31 |
+
converter=doc_converter
|
32 |
+
)
|
33 |
+
docs = loader.load()
|
34 |
+
return docs[0].page_content
|
rag.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
2 |
+
from langchain.schema.document import Document
|
3 |
+
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
|
4 |
+
from langchain_chroma import Chroma
|
5 |
+
import spaces
|
6 |
+
from langchain_text_splitters import MarkdownHeaderTextSplitter
|
7 |
+
import os
|
8 |
+
from transformers import AutoTokenizer
|
9 |
+
api_token = os.getenv("HF_TOKEN")
|
10 |
+
model_name = "meta-llama/Llama-3.1-8B-Instruct"
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, token=api_token)
|
12 |
+
|
13 |
+
embedding_model = HuggingFaceBgeEmbeddings(
|
14 |
+
model_name="BAAI/bge-large-en-v1.5",
|
15 |
+
model_kwargs={"device": "cuda"},
|
16 |
+
encode_kwargs={"normalize_embeddings": True},
|
17 |
+
query_instruction=""
|
18 |
+
)
|
19 |
+
|
20 |
+
|
21 |
+
def create_rag_index(text_no_prefix):
|
22 |
+
"""Loads the PDF, splits its text, and builds a vectorstore for naive RAG."""
|
23 |
+
text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(
|
24 |
+
tokenizer,
|
25 |
+
chunk_size=256,
|
26 |
+
chunk_overlap=0,
|
27 |
+
add_start_index=True,
|
28 |
+
strip_whitespace=True,
|
29 |
+
separators=["\n\n", "\n", ".", " ", ""],
|
30 |
+
)
|
31 |
+
# Concatenate pages and create Document objects.
|
32 |
+
docs = [Document(page_content=x) for x in text_splitter.split_text(text_no_prefix)]
|
33 |
+
|
34 |
+
vectorstore = Chroma.from_documents(documents=docs, embedding=embedding_model)
|
35 |
+
return vectorstore
|
36 |
+
|
37 |
+
def run_naive_rag_query(vectorstore, query, rag_token_size, prefix, task, few_shot_examples):
|
38 |
+
"""
|
39 |
+
For naive RAG, retrieves top-k chunks (k based on target token size)
|
40 |
+
and generates an answer using those chunks.
|
41 |
+
"""
|
42 |
+
k = max(1, rag_token_size // 256)
|
43 |
+
retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": k})
|
44 |
+
retrieved_docs = retriever.invoke(query)
|
45 |
+
for doc in retrieved_docs:
|
46 |
+
print("=================")
|
47 |
+
print(doc.page_content)
|
48 |
+
print("=================")
|
49 |
+
formatted_context = "\n\n".join([doc.page_content for doc in retrieved_docs])
|
50 |
+
|
51 |
+
rag_context = prefix + "Retrieved context: \n" + formatted_context + task + few_shot_examples
|
52 |
+
|
53 |
+
return rag_context
|
requirements.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
transformers==4.49.0
|
3 |
+
tokenizers
|
4 |
+
huggingface-hub
|
5 |
+
sentence-transformers
|
6 |
+
datasets
|
7 |
+
bitsandbytes
|
8 |
+
langchain
|
9 |
+
langchain-community
|
10 |
+
langchainhub
|
11 |
+
langchain-openai
|
12 |
+
langchain_chroma
|
13 |
+
docling
|
14 |
+
langchain_docling
|
utils.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
3 |
+
"""
|
4 |
+
Repeats key-value hidden states along the key-value head dimension.
|
5 |
+
Args:
|
6 |
+
hidden_states (torch.Tensor): Input tensor with shape either
|
7 |
+
(batch, num_key_value_heads, seqlen, head_dim) or
|
8 |
+
(num_layers, batch, num_key_value_heads, seqlen, head_dim).
|
9 |
+
n_rep (int): Number of repetitions for key-value heads.
|
10 |
+
Returns:
|
11 |
+
torch.Tensor: The repeated tensor with shape either
|
12 |
+
(batch, num_attention_heads, seqlen, head_dim) or
|
13 |
+
(num_layers, batch, num_attention_heads, seqlen, head_dim).
|
14 |
+
"""
|
15 |
+
if hidden_states.dim() == 4: # (batch, num_key_value_heads, seqlen, head_dim)
|
16 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
17 |
+
if n_rep == 1:
|
18 |
+
return hidden_states
|
19 |
+
hidden_states = hidden_states.unsqueeze(2).expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
20 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
21 |
+
|
22 |
+
elif hidden_states.dim() == 5: # (num_layers, batch, num_key_value_heads, seqlen, head_dim)
|
23 |
+
num_layers, batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
24 |
+
if n_rep == 1:
|
25 |
+
return hidden_states
|
26 |
+
hidden_states = hidden_states.unsqueeze(3).expand(num_layers, batch, num_key_value_heads, n_rep, slen, head_dim)
|
27 |
+
return hidden_states.reshape(num_layers, batch, num_key_value_heads * n_rep, slen, head_dim)
|
28 |
+
|
29 |
+
else:
|
30 |
+
raise ValueError("Input tensor must have 4 or 5 dimensions.")
|
31 |
+
|
32 |
+
import math
|
33 |
+
|
34 |
+
def calculate_tokens_suggest_compression_ratio(text, tokenizer, model):
|
35 |
+
"""
|
36 |
+
Tokenizes the text and returns:
|
37 |
+
- token_count: the number of tokens in the input text.
|
38 |
+
- suggestions: a list of 6 candidate compression ratios.
|
39 |
+
- tokenized: a dictionary containing 'input_ids' and 'attention_mask'.
|
40 |
+
|
41 |
+
The suggestions are chosen so that compressing the token count by these ratios
|
42 |
+
would (in the worst case) bring the count within the maximum allowed tokens (128k).
|
43 |
+
|
44 |
+
If the text already fits within the context (<= 128k tokens),
|
45 |
+
the default suggestions [1, 2, 4, 8, 16, 32] are returned.
|
46 |
+
If the text is too long, we generate six values in logarithmic space
|
47 |
+
between max(required_ratio, 1) and 32 (or a higher upper bound if needed).
|
48 |
+
"""
|
49 |
+
tokenized = tokenizer(text, return_tensors="pt", truncation=False)
|
50 |
+
token_ids = tokenized["input_ids"][0]
|
51 |
+
token_count = token_ids.size(0)
|
52 |
+
max_context = model.config.max_position_embeddings
|
53 |
+
if token_count <= max_context:
|
54 |
+
required_ratio = 1.0
|
55 |
+
else:
|
56 |
+
required_ratio = token_count / max_context
|
57 |
+
if required_ratio <= 1.0:
|
58 |
+
suggestions = [1, 2, 4, 8, 16, 32]
|
59 |
+
else:
|
60 |
+
lower_bound = max(required_ratio, 1)
|
61 |
+
if required_ratio < 32:
|
62 |
+
upper_bound = 32
|
63 |
+
else:
|
64 |
+
upper_bound = required_ratio * (32 / 1)
|
65 |
+
suggestions = [
|
66 |
+
round(math.exp(math.log(lower_bound) + i * (math.log(upper_bound) - math.log(lower_bound)) / (6 - 1)), 2)
|
67 |
+
for i in range(6)
|
68 |
+
]
|
69 |
+
|
70 |
+
return token_count, suggestions, tokenized
|
71 |
+
|
72 |
+
|
73 |
+
def update_retrieval_context(token_count, compression_ratio):
|
74 |
+
retrieval_tokens = int(token_count / compression_ratio)
|
75 |
+
return f"Retrieval context tokens (after compression): {retrieval_tokens}"
|
76 |
+
|
77 |
+
|