Spaces:
Running
on
T4
Running
on
T4
File size: 10,609 Bytes
8ce4d25 4775a9f 8ce4d25 9e9d8e8 8ce4d25 be59b6e 8ce4d25 be59b6e bb4f59a 8ce4d25 a0b3781 be59b6e 8ce4d25 9e9d8e8 8ce4d25 be59b6e 8ce4d25 be59b6e 8ce4d25 be59b6e 4775a9f be59b6e 8e4fbd2 be59b6e 4775a9f be59b6e 9e9d8e8 be59b6e b7897bb be59b6e b7897bb be59b6e 8e4fbd2 be59b6e 8e4fbd2 be59b6e b7897bb be59b6e bb4f59a be59b6e 8e4fbd2 be59b6e 8e4fbd2 be59b6e 8e4fbd2 be59b6e bb4f59a be59b6e 8e4fbd2 be59b6e bb4f59a be59b6e bb4f59a be59b6e bb4f59a be59b6e 9e9d8e8 bb4f59a 8ce4d25 be59b6e 8ce4d25 bb4f59a 8ce4d25 bb4f59a 8ce4d25 be59b6e 87544bc be59b6e 87544bc 8ce4d25 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 |
#!/usr/bin/env python3
import torch
from PIL import Image
import numpy as np
from typing import cast, Generator
from pathlib import Path
import base64
from io import BytesIO
from typing import Union, Tuple, List
import matplotlib
import matplotlib.cm as cm
import re
import io
import time
import backend.testquery as testquery
from colpali_engine.models import ColPali, ColPaliProcessor
from colpali_engine.utils.torch_utils import get_torch_device
from einops import rearrange
from vidore_benchmark.interpretability.torch_utils import (
normalize_similarity_map_per_query_token,
)
from vidore_benchmark.interpretability.vit_configs import VIT_CONFIG
matplotlib.use("Agg")
# Prepare the colormap once to avoid recomputation
colormap = cm.get_cmap("viridis")
COLPALI_GEMMA_MODEL_NAME = "vidore/colpaligemma-3b-pt-448-base"
def load_model() -> Tuple[ColPali, ColPaliProcessor]:
model_name = "vidore/colpali-v1.2"
device = get_torch_device("auto")
print(f"Using device: {device}")
# Load the model
model = cast(
ColPali,
ColPali.from_pretrained(
model_name,
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
device_map=device,
),
).eval()
# Load the processor
processor = cast(ColPaliProcessor, ColPaliProcessor.from_pretrained(model_name))
return model, processor, device
def load_vit_config(model):
# Load the ViT config
print(f"VIT config: {VIT_CONFIG}")
vit_config = VIT_CONFIG[COLPALI_GEMMA_MODEL_NAME]
return vit_config
def gen_similarity_maps(
model: ColPali,
processor: ColPaliProcessor,
device,
query: str,
query_embs: torch.Tensor,
token_idx_map: dict,
images: List[Union[Path, str]],
vespa_sim_maps: List[str],
) -> Generator[Tuple[int, str, str], None, None]:
"""
Generate similarity maps for the given images and query, and return base64-encoded blended images.
Args:
model (ColPali): The model used for generating embeddings.
processor (ColPaliProcessor): Processor for images and text.
device: Device to run the computations on.
vit_config: Configuration for the Vision Transformer.
query (str): The query string.
query_embs (torch.Tensor): Query embeddings.
token_idx_map (dict): Mapping from tokens to their indices.
images (List[Union[Path, str]]): List of image paths or base64-encoded strings.
vespa_sim_maps (List[str]): List of Vespa similarity maps.
Yields:
Tuple[int, str, str]: A tuple containing the image index, the selected token, and the base64-encoded image.
"""
vit_config = load_vit_config(model)
# Process images and store original images and sizes
processed_images = []
original_images = []
original_sizes = []
for img in images:
if isinstance(img, Path):
try:
img_pil = Image.open(img).convert("RGB")
except Exception as e:
raise ValueError(f"Failed to open image from path: {e}")
elif isinstance(img, str):
try:
img_pil = Image.open(BytesIO(base64.b64decode(img))).convert("RGB")
except Exception as e:
raise ValueError(f"Failed to open image from base64 string: {e}")
else:
raise ValueError(f"Unsupported image type: {type(img)}")
original_images.append(img_pil.copy())
original_sizes.append(img_pil.size) # (width, height)
processed_images.append(img_pil)
# If similarity maps are provided, use them instead of computing them
if vespa_sim_maps:
print("Using provided similarity maps")
# A sim map looks like this:
# "quantized": [
# {
# "address": {
# "patch": "0",
# "querytoken": "0"
# },
# "value": 12, # score in range [-128, 127]
# },
# ... and so on.
# Now turn these into a tensor of same shape as previous similarity map
vespa_sim_map_tensor = torch.zeros(
(
len(vespa_sim_maps),
query_embs.size(dim=1),
vit_config.n_patch_per_dim,
vit_config.n_patch_per_dim,
)
)
for idx, vespa_sim_map in enumerate(vespa_sim_maps):
for cell in vespa_sim_map["quantized"]["cells"]:
patch = int(cell["address"]["patch"])
# if dummy model then just use 1024 as the image_seq_length
if hasattr(processor, "image_seq_length"):
image_seq_length = processor.image_seq_length
else:
image_seq_length = 1024
if patch >= image_seq_length:
continue
query_token = int(cell["address"]["querytoken"])
value = cell["value"]
vespa_sim_map_tensor[
idx,
int(query_token),
int(patch) // vit_config.n_patch_per_dim,
int(patch) % vit_config.n_patch_per_dim,
] = value
# Normalize the similarity map per query token
similarity_map_normalized = normalize_similarity_map_per_query_token(
vespa_sim_map_tensor
)
else:
# Preprocess inputs
print("Computing similarity maps")
start2 = time.perf_counter()
input_image_processed = processor.process_images(processed_images).to(device)
# Forward passes
with torch.no_grad():
output_image = model.forward(**input_image_processed)
# Remove the special tokens from the output
output_image = output_image[:, : processor.image_seq_length, :]
# Rearrange the output image tensor to represent the 2D grid of patches
output_image = rearrange(
output_image,
"b (h w) c -> b h w c",
h=vit_config.n_patch_per_dim,
w=vit_config.n_patch_per_dim,
)
# Ensure query_embs has batch dimension
if query_embs.dim() == 2:
query_embs = query_embs.unsqueeze(0).to(device)
else:
query_embs = query_embs.to(device)
# Compute the similarity map
similarity_map = torch.einsum(
"bnk,bhwk->bnhw", query_embs, output_image
) # Shape: (batch_size, query_tokens, h, w)
end2 = time.perf_counter()
print(f"Similarity map computation took: {end2 - start2} s")
# Normalize the similarity map per query token
similarity_map_normalized = normalize_similarity_map_per_query_token(
similarity_map
)
# Collect the blended images
start3 = time.perf_counter()
for idx, img in enumerate(original_images):
SCALING_FACTOR = 8
sim_map_resolution = (
max(32, int(original_sizes[idx][0] / SCALING_FACTOR)),
max(32, int(original_sizes[idx][1] / SCALING_FACTOR)),
)
result_per_image = {}
for token, token_idx in token_idx_map.items():
if is_special_token(token):
continue
# Get the similarity map for this image and the selected token
sim_map = similarity_map_normalized[idx, token_idx, :, :] # Shape: (h, w)
# Move the similarity map to CPU, convert to float (as BFloat16 not supported by Numpy) and convert to NumPy array
sim_map_np = sim_map.cpu().float().numpy()
# Resize the similarity map to the original image size
sim_map_img = Image.fromarray(sim_map_np)
sim_map_resized = sim_map_img.resize(
sim_map_resolution, resample=Image.BICUBIC
)
# Convert the resized similarity map to a NumPy array
sim_map_resized_np = np.array(sim_map_resized, dtype=np.float32)
# Normalize the similarity map to range [0, 1]
sim_map_min = sim_map_resized_np.min()
sim_map_max = sim_map_resized_np.max()
if sim_map_max - sim_map_min > 1e-6:
sim_map_normalized = (sim_map_resized_np - sim_map_min) / (
sim_map_max - sim_map_min
)
else:
sim_map_normalized = np.zeros_like(sim_map_resized_np)
# Apply a colormap to the normalized similarity map
heatmap = colormap(sim_map_normalized) # Returns an RGBA array
# Convert the heatmap to a PIL Image
heatmap_uint8 = (heatmap * 255).astype(np.uint8)
heatmap_img = Image.fromarray(heatmap_uint8)
heatmap_img_rgba = heatmap_img.convert("RGBA")
# Save the image to a BytesIO buffer
buffer = io.BytesIO()
heatmap_img_rgba.save(buffer, format="PNG")
buffer.seek(0)
# Encode the image to base64
blended_img_base64 = base64.b64encode(buffer.read()).decode("utf-8")
# Store the base64-encoded image
result_per_image[token] = blended_img_base64
yield idx, token, token_idx, blended_img_base64
end3 = time.perf_counter()
print(f"Blending images took: {end3 - start3} s")
def get_query_embeddings_and_token_map(
processor, model, query
) -> Tuple[torch.Tensor, dict]:
if model is None: # use static test query data (saves time when testing)
return testquery.q_embs, testquery.token_to_idx
start_time = time.perf_counter()
inputs = processor.process_queries([query]).to(model.device)
with torch.no_grad():
embeddings_query = model(**inputs)
q_emb = embeddings_query.to("cpu")[0] # Extract the single embedding
# Use this cell output to choose a token using its index
query_tokens = processor.tokenizer.tokenize(processor.decode(inputs.input_ids[0]))
# reverse key, values in dictionary
print(query_tokens)
token_to_idx = {val: idx for idx, val in enumerate(query_tokens)}
end_time = time.perf_counter()
print(f"Query inference took: {end_time - start_time} s")
return q_emb, token_to_idx
def is_special_token(token: str) -> bool:
# Pattern for tokens that start with '<', numbers, whitespace, or single characters, or the string 'Question'
# Will exclude these tokens from the similarity map generation
pattern = re.compile(r"^<.*$|^\d+$|^\s+$|^\w$|^Question$")
if (len(token) < 3) or pattern.match(token):
return True
return False
|