File size: 7,938 Bytes
9e54725
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
from __future__ import annotations

import gc
import logging
import shutil
from pathlib import Path
from typing import Dict, List, Union

import faiss
import numpy as np
import torch
from datasets import Dataset
from faiss.contrib.ondisk import merge_ondisk
from huggingface_hub import snapshot_download
from sentence_transformers import SentenceTransformer
from tqdm import auto as tqdm

logger = logging.getLogger(__name__)


class OnDiskIVFHelper:
    def __init__(self, path: Union[str, Path]):
        self.path = path

    @property
    def path(self):
        return self._path

    @path.setter
    def path(self, value: Union[str, Path]):
        value = Path(value)
        value.mkdir(parents=True, exist_ok=True)
        self._path = value

    def train(self, xt: np.ndarray):
        index = faiss.index_factory(xt.shape[1], "IVF4096,Flat")
        logger.info("Training index...")
        index.train(xt)
        train_index_path = str(self.path / "faiss.index")
        logger.info(f"Write {train_index_path}")
        faiss.write_index(index, train_index_path)

    def add_with_ids(self, xb: np.ndarray, ix: np.ndarray | int):
        if isinstance(ix, int):
            ix = np.arange(ix, ix + xb.shape[0])
        block_num = ix[0]
        train_index_path = str(self.path / "faiss.index")
        index = faiss.read_index(train_index_path)
        logger.info("Adding vectors to index...")
        index.add_with_ids(xb, ix)
        block_index_path = str(self.path / f"block_{block_num}.index")
        logger.info(f"Write {block_index_path}")
        faiss.write_index(index, block_index_path)

    def merge(self):
        logger.info("Loading trained index")
        train_index_path = str(self.path / "faiss.index")
        index = faiss.read_index(train_index_path)
        block_fnames = [str(p) for p in self.path.glob("block_*.index")]
        if len(block_fnames) == 0:
            return
        merged_index_path = str(self.path / "merged_index.ivfdata")
        merge_ondisk(index, block_fnames, merged_index_path)
        populated_index_path = str(self.path / "populated.index")
        logger.info(f"Write {populated_index_path}")
        faiss.write_index(index, populated_index_path)

    def search(self, xq: np.ndarray, top_k: int = 5, nprobe: int = 16):
        populated_index_path = str(self.path / "populated.index")
        logger.info("Read " + populated_index_path)
        index = faiss.read_index(populated_index_path)
        index.nprobe = nprobe
        D, I = index.search(xq, top_k)  # noqa: E741
        return D, I

    def delete(self):
        shutil.rmtree(self.path)

    @property
    def ntotal(self) -> int:
        populated_index_path = str(self.path / "populated.index")
        logger.info("Read " + populated_index_path)
        index = faiss.read_index(populated_index_path)
        return index.ntotal

    def __len__(self) -> int:
        return self.ntotal

    @property
    def is_trained(self) -> bool:
        index_file_path = self.path / "populated.index"
        if not index_file_path.exists():
            return False
        index_file_path = str(index_file_path)
        index = faiss.read_index(index_file_path)
        return index.is_trained


class DatasetIndex:
    def __init__(self, path: Union[str, Path]):
        self._context_embedding_model = None
        self._query_embedding_model = None
        self.context_model = "facebook-dpr-ctx_encoder-multiset-base"
        self.query_model = "facebook-dpr-question_encoder-multiset-base"
        # Ensure we're using CPU
        if (
            hasattr(torch.backends, "mps")
            and torch.backends.mps.is_available()
        ):
            self.device = torch.device("mps")
        elif torch.cuda.is_available():
            self.device = torch.device("cuda")
        else:
            self.device = torch.device("cpu")
        self.index = OnDiskIVFHelper(path)

    @property
    def context_embedding_model(self) -> SentenceTransformer:
        if self._context_embedding_model is None:
            self._query_embedding_model = None
            self._context_embedding_model = SentenceTransformer(
                self.context_model, device=self.device
            )
        return self._context_embedding_model

    @property
    def query_embedding_model(self) -> SentenceTransformer:
        if self._query_embedding_model is None:
            self._context_embedding_model = None
            self._query_embedding_model = SentenceTransformer(
                self.query_model, device=self.device
            )
        return self._query_embedding_model

    @property
    def ntotal(self) -> int:
        try:
            ntotal = len(self.index)
        except Exception:
            try:
                self.index.merge()
                ntotal = len(self.index)
            except Exception:
                ntotal = 0
        return ntotal

    def create_index(
        self,
        dataset: Dataset | List[Dict] | List[str],
        /,
        *,
        column: str | None = None,
        batch_size: int = 32,
        block_size: int = 159744,
        force: bool = False,
    ):
        """Build FAISS index for the given documents"""
        n_documents = len(dataset)
        n_index = self.ntotal
        if not force and n_index == n_documents:
            logging.info("Found existing index. Skipping...")
            return
        self.index.delete()
        logging.info("Training index/Adding vectors to index...")
        # Add vectors to index
        for block_start in tqdm.trange(
            0, n_documents, block_size, desc="Blocks"
        ):
            # logging.info(f"Starting encoding of {n_documents} documents...")
            # Train index
            embeddings = self.context_embedding_model.encode(
                (
                    dataset[block_start : block_start + block_size]
                    if column is None
                    else dataset[block_start : block_start + block_size][
                        column
                    ]
                ),
                batch_size=batch_size,
                convert_to_numpy=True,
                show_progress_bar=True,
                device=self.device,
            )
            batch_end = min(block_start + block_size, n_documents)
            assert len(embeddings) == batch_end - block_start
            if block_start == 0:
                self.index.train(embeddings)
            self.index.add_with_ids(embeddings, block_start)
        self.index.merge()
        print("Number of indexed documents:", len(self.index))
        # Clear memory
        del embeddings
        gc.collect()
        if self.device.type == "cuda":
            torch.cuda.empty_cache()
        logging.info("Indexing complete!")

    def search(self, query: str, k: int = 5) -> List[Dict]:
        """Search the index for similar documents"""
        if self.index is None:
            raise ValueError("Index not built yet!")
        # Encode query
        if isinstance(query, str):
            query = [query]
        query_vector = self.query_embedding_model.encode(
            query, convert_to_numpy=True
        )
        # Search
        D, I = self.index.search(query_vector, k)  # noqa: E741
        # Format results
        results = []
        for i in range(len(query)):
            results.append([
                (idx, score) for idx, score in zip(I[i], D[i]) if idx >= 0
            ])
        return results

    @classmethod
    def from_pretrained(cls, model_id: str) -> DatasetIndex:
        """Download and load a pre-trained index"""
        snapshot_path = snapshot_download(
            repo_id=model_id,
            repo_type="dataset",
            allow_patterns="index/*",
        )
        snapshot_path = Path(snapshot_path)
        self = cls(snapshot_path / "index")
        # make sure the index is merged
        _ = self.ntotal
        return self