|
from __future__ import annotations |
|
|
|
import gc |
|
import logging |
|
import shutil |
|
from pathlib import Path |
|
from typing import Dict, List, Union |
|
|
|
import faiss |
|
import numpy as np |
|
import torch |
|
from datasets import Dataset |
|
from faiss.contrib.ondisk import merge_ondisk |
|
from huggingface_hub import snapshot_download |
|
from sentence_transformers import SentenceTransformer |
|
from tqdm import auto as tqdm |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class OnDiskIVFHelper: |
|
def __init__(self, path: Union[str, Path]): |
|
self.path = path |
|
|
|
@property |
|
def path(self): |
|
return self._path |
|
|
|
@path.setter |
|
def path(self, value: Union[str, Path]): |
|
value = Path(value) |
|
value.mkdir(parents=True, exist_ok=True) |
|
self._path = value |
|
|
|
def train(self, xt: np.ndarray): |
|
index = faiss.index_factory(xt.shape[1], "IVF4096,Flat") |
|
logger.info("Training index...") |
|
index.train(xt) |
|
train_index_path = str(self.path / "faiss.index") |
|
logger.info(f"Write {train_index_path}") |
|
faiss.write_index(index, train_index_path) |
|
|
|
def add_with_ids(self, xb: np.ndarray, ix: np.ndarray | int): |
|
if isinstance(ix, int): |
|
ix = np.arange(ix, ix + xb.shape[0]) |
|
block_num = ix[0] |
|
train_index_path = str(self.path / "faiss.index") |
|
index = faiss.read_index(train_index_path) |
|
logger.info("Adding vectors to index...") |
|
index.add_with_ids(xb, ix) |
|
block_index_path = str(self.path / f"block_{block_num}.index") |
|
logger.info(f"Write {block_index_path}") |
|
faiss.write_index(index, block_index_path) |
|
|
|
def merge(self): |
|
logger.info("Loading trained index") |
|
train_index_path = str(self.path / "faiss.index") |
|
index = faiss.read_index(train_index_path) |
|
block_fnames = [str(p) for p in self.path.glob("block_*.index")] |
|
if len(block_fnames) == 0: |
|
return |
|
merged_index_path = str(self.path / "merged_index.ivfdata") |
|
merge_ondisk(index, block_fnames, merged_index_path) |
|
populated_index_path = str(self.path / "populated.index") |
|
logger.info(f"Write {populated_index_path}") |
|
faiss.write_index(index, populated_index_path) |
|
|
|
def search(self, xq: np.ndarray, top_k: int = 5, nprobe: int = 16): |
|
populated_index_path = str(self.path / "populated.index") |
|
logger.info("Read " + populated_index_path) |
|
index = faiss.read_index(populated_index_path) |
|
index.nprobe = nprobe |
|
D, I = index.search(xq, top_k) |
|
return D, I |
|
|
|
def delete(self): |
|
shutil.rmtree(self.path) |
|
|
|
@property |
|
def ntotal(self) -> int: |
|
populated_index_path = str(self.path / "populated.index") |
|
logger.info("Read " + populated_index_path) |
|
index = faiss.read_index(populated_index_path) |
|
return index.ntotal |
|
|
|
def __len__(self) -> int: |
|
return self.ntotal |
|
|
|
@property |
|
def is_trained(self) -> bool: |
|
index_file_path = self.path / "populated.index" |
|
if not index_file_path.exists(): |
|
return False |
|
index_file_path = str(index_file_path) |
|
index = faiss.read_index(index_file_path) |
|
return index.is_trained |
|
|
|
|
|
class DatasetIndex: |
|
def __init__(self, path: Union[str, Path]): |
|
self._context_embedding_model = None |
|
self._query_embedding_model = None |
|
self.context_model = "facebook-dpr-ctx_encoder-multiset-base" |
|
self.query_model = "facebook-dpr-question_encoder-multiset-base" |
|
|
|
if ( |
|
hasattr(torch.backends, "mps") |
|
and torch.backends.mps.is_available() |
|
): |
|
self.device = torch.device("mps") |
|
elif torch.cuda.is_available(): |
|
self.device = torch.device("cuda") |
|
else: |
|
self.device = torch.device("cpu") |
|
self.index = OnDiskIVFHelper(path) |
|
|
|
@property |
|
def context_embedding_model(self) -> SentenceTransformer: |
|
if self._context_embedding_model is None: |
|
self._query_embedding_model = None |
|
self._context_embedding_model = SentenceTransformer( |
|
self.context_model, device=self.device |
|
) |
|
return self._context_embedding_model |
|
|
|
@property |
|
def query_embedding_model(self) -> SentenceTransformer: |
|
if self._query_embedding_model is None: |
|
self._context_embedding_model = None |
|
self._query_embedding_model = SentenceTransformer( |
|
self.query_model, device=self.device |
|
) |
|
return self._query_embedding_model |
|
|
|
@property |
|
def ntotal(self) -> int: |
|
try: |
|
ntotal = len(self.index) |
|
except Exception: |
|
try: |
|
self.index.merge() |
|
ntotal = len(self.index) |
|
except Exception: |
|
ntotal = 0 |
|
return ntotal |
|
|
|
def create_index( |
|
self, |
|
dataset: Dataset | List[Dict] | List[str], |
|
/, |
|
*, |
|
column: str | None = None, |
|
batch_size: int = 32, |
|
block_size: int = 159744, |
|
force: bool = False, |
|
): |
|
"""Build FAISS index for the given documents""" |
|
n_documents = len(dataset) |
|
n_index = self.ntotal |
|
if not force and n_index == n_documents: |
|
logging.info("Found existing index. Skipping...") |
|
return |
|
self.index.delete() |
|
logging.info("Training index/Adding vectors to index...") |
|
|
|
for block_start in tqdm.trange( |
|
0, n_documents, block_size, desc="Blocks" |
|
): |
|
|
|
|
|
embeddings = self.context_embedding_model.encode( |
|
( |
|
dataset[block_start : block_start + block_size] |
|
if column is None |
|
else dataset[block_start : block_start + block_size][ |
|
column |
|
] |
|
), |
|
batch_size=batch_size, |
|
convert_to_numpy=True, |
|
show_progress_bar=True, |
|
device=self.device, |
|
) |
|
batch_end = min(block_start + block_size, n_documents) |
|
assert len(embeddings) == batch_end - block_start |
|
if block_start == 0: |
|
self.index.train(embeddings) |
|
self.index.add_with_ids(embeddings, block_start) |
|
self.index.merge() |
|
print("Number of indexed documents:", len(self.index)) |
|
|
|
del embeddings |
|
gc.collect() |
|
if self.device.type == "cuda": |
|
torch.cuda.empty_cache() |
|
logging.info("Indexing complete!") |
|
|
|
def search(self, query: str, k: int = 5) -> List[Dict]: |
|
"""Search the index for similar documents""" |
|
if self.index is None: |
|
raise ValueError("Index not built yet!") |
|
|
|
if isinstance(query, str): |
|
query = [query] |
|
query_vector = self.query_embedding_model.encode( |
|
query, convert_to_numpy=True |
|
) |
|
|
|
D, I = self.index.search(query_vector, k) |
|
|
|
results = [] |
|
for i in range(len(query)): |
|
results.append([ |
|
(idx, score) for idx, score in zip(I[i], D[i]) if idx >= 0 |
|
]) |
|
return results |
|
|
|
@classmethod |
|
def from_pretrained(cls, model_id: str) -> DatasetIndex: |
|
"""Download and load a pre-trained index""" |
|
snapshot_path = snapshot_download( |
|
repo_id=model_id, |
|
repo_type="dataset", |
|
allow_patterns="index/*", |
|
) |
|
snapshot_path = Path(snapshot_path) |
|
self = cls(snapshot_path / "index") |
|
|
|
_ = self.ntotal |
|
return self |
|
|