File size: 13,596 Bytes
8ce4d25
e0b54fb
b7897bb
ece4c70
9e9d8e8
94df778
e0b54fb
9e9d8e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8ce4d25
9e9d8e8
 
 
8ce4d25
9e9d8e8
be59b6e
a0b3781
3b2eca4
b7897bb
3b2eca4
 
 
 
 
 
fa270d9
3b2eca4
8ce4d25
 
 
 
 
 
 
 
 
 
b7897bb
 
 
 
 
 
 
 
b08a991
 
 
 
 
 
 
 
b7897bb
1f02318
8ce4d25
b7897bb
8ce4d25
 
 
 
 
b7897bb
 
b08a991
 
b7897bb
b08a991
8ce4d25
 
a0b3781
be59b6e
b7897bb
 
 
 
e0b54fb
 
 
b7897bb
295263a
b7897bb
 
 
 
9e9d8e8
 
 
 
 
8ce4d25
 
1f02318
 
 
 
 
 
e0b54fb
 
 
 
 
 
9e9d8e8
 
 
8ce4d25
 
 
 
b62467a
8ce4d25
 
 
06120a1
 
 
b7897bb
8ce4d25
 
fa270d9
ece4c70
fa270d9
ece4c70
 
8ce4d25
9e9d8e8
be59b6e
8ce4d25
be59b6e
 
8ce4d25
 
 
 
 
b7897bb
8ce4d25
b7897bb
 
 
 
 
 
 
8ce4d25
b7897bb
 
8ce4d25
 
300f274
9e9d8e8
8ce4d25
b7897bb
 
 
e0b54fb
 
b7897bb
 
8ce4d25
 
9e9d8e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8ce4d25
9e9d8e8
8ce4d25
 
 
9e9d8e8
 
51cbf12
300f274
1f02318
 
fa270d9
8ce4d25
be59b6e
8ce4d25
a0b3781
be59b6e
 
9e9d8e8
fa270d9
be59b6e
 
4775a9f
 
 
fa270d9
9e9d8e8
 
 
 
 
fa270d9
8ce4d25
4775a9f
 
 
 
8ce4d25
 
 
 
 
4775a9f
be59b6e
 
e0b54fb
 
 
 
 
 
 
9e9d8e8
fa270d9
9e9d8e8
 
be59b6e
 
9e9d8e8
fa270d9
be59b6e
9e9d8e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa270d9
9e9d8e8
 
 
 
 
 
 
 
 
 
be59b6e
8ce4d25
3b2eca4
9e9d8e8
3b2eca4
 
 
 
 
9e9d8e8
 
 
 
 
 
4775a9f
3b2eca4
fa270d9
 
 
 
 
3b2eca4
8ce4d25
 
b7897bb
0b432c7
b7897bb
 
 
9e9d8e8
 
 
 
 
 
 
 
 
 
b7897bb
9e9d8e8
b7897bb
 
 
 
 
b08a991
 
 
 
 
 
 
 
 
 
 
 
b7897bb
0b432c7
9e9d8e8
e0b54fb
 
9e9d8e8
 
 
 
 
 
 
 
295263a
9e9d8e8
 
 
 
 
 
 
e0b54fb
 
 
 
b7897bb
 
 
 
 
 
 
 
 
 
 
 
 
e0b54fb
b7897bb
 
 
 
 
 
 
 
 
 
 
8ce4d25
 
b7897bb
8ce4d25
 
 
 
94df778
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
import asyncio
import os
import time
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor
import uuid
import google.generativeai as genai
from fasthtml.common import (
    Div,
    Img,
    Main,
    P,
    Script,
    Link,
    fast_app,
    HighlightJS,
    FileResponse,
    RedirectResponse,
    Aside,
    StreamingResponse,
    JSONResponse,
    serve,
)
from shad4fast import ShadHead
from vespa.application import Vespa
import base64
from fastcore.parallel import threaded
from PIL import Image

from backend.colpali import get_query_embeddings_and_token_map, gen_similarity_maps
from backend.modelmanager import ModelManager
from backend.vespa_app import VespaQueryClient
from frontend.app import (
    ChatResult,
    Home,
    Search,
    SearchBox,
    SearchResult,
    SimMapButtonPoll,
    SimMapButtonReady,
    AboutThisDemo,
)
from frontend.layout import Layout

highlight_js_theme_link = Link(id="highlight-theme", rel="stylesheet", href="")
highlight_js_theme = Script(src="/static/js/highlightjs-theme.js")
highlight_js = HighlightJS(
    langs=["python", "javascript", "java", "json", "xml"],
    dark="github-dark",
    light="github",
)

overlayscrollbars_link = Link(
    rel="stylesheet",
    href="https://cdnjs.cloudflare.com/ajax/libs/overlayscrollbars/2.10.0/styles/overlayscrollbars.min.css",
    type="text/css",
)
overlayscrollbars_js = Script(
    src="https://cdnjs.cloudflare.com/ajax/libs/overlayscrollbars/2.10.0/browser/overlayscrollbars.browser.es5.min.js"
)
awesomplete_link = Link(
    rel="stylesheet",
    href="https://cdnjs.cloudflare.com/ajax/libs/awesomplete/1.1.7/awesomplete.min.css",
    type="text/css",
)
awesomplete_js = Script(
    src="https://cdnjs.cloudflare.com/ajax/libs/awesomplete/1.1.7/awesomplete.min.js"
)
sselink = Script(src="https://unpkg.com/[email protected]/sse.js")

app, rt = fast_app(
    htmlkw={"cls": "grid h-full"},
    pico=False,
    hdrs=(
        highlight_js,
        highlight_js_theme_link,
        highlight_js_theme,
        overlayscrollbars_link,
        overlayscrollbars_js,
        awesomplete_link,
        awesomplete_js,
        sselink,
        ShadHead(tw_cdn=False, theme_handle=True),
    ),
)
vespa_app: Vespa = VespaQueryClient()
thread_pool = ThreadPoolExecutor()
# Gemini config

genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
GEMINI_SYSTEM_PROMPT = """If the user query is a question, try your best to answer it based on the provided images. 
If the user query can not be interpreted as a question, or if the answer to the query can not be inferred from the images,
answer with the exact phrase "I am sorry, I do not have enough information in the image to answer your question.".
Your response should be HTML formatted, but only simple tags, such as <b>. <p>, <i>, <br> <ul> and <li> are allowed. No HTML tables.
This means that newlines will be replaced with <br> tags, bold text will be enclosed in <b> tags, and so on.
But, you should NOT include backticks (`) or HTML tags in your response.
"""
gemini_model = genai.GenerativeModel(
    "gemini-1.5-flash-8b", system_instruction=GEMINI_SYSTEM_PROMPT
)
STATIC_DIR = Path("static")
IMG_DIR = STATIC_DIR / "full_images"
SIM_MAP_DIR = STATIC_DIR / "sim_maps"
os.makedirs(IMG_DIR, exist_ok=True)
os.makedirs(SIM_MAP_DIR, exist_ok=True)


@app.on_event("startup")
def load_model_on_startup():
    app.manager = ModelManager.get_instance()
    return


@app.on_event("startup")
async def keepalive():
    asyncio.create_task(poll_vespa_keepalive())
    return


def generate_query_id(query, ranking_value):
    hash_input = (query + ranking_value).encode("utf-8")
    return hash(hash_input)


@rt("/static/{filepath:path}")
def serve_static(filepath: str):
    return FileResponse(STATIC_DIR / filepath)


@rt("/")
def get(session):
    if "session_id" not in session:
        session["session_id"] = str(uuid.uuid4())
    return Layout(Main(Home()))


@rt("/about-this-demo")
def get():
    return Layout(Main(AboutThisDemo()))


@rt("/search")
def get(request):
    # Extract the 'query' and 'ranking' parameters from the URL
    query_value = request.query_params.get("query", "").strip()
    ranking_value = request.query_params.get("ranking", "nn+colpali")
    print("/search: Fetching results for ranking_value:", ranking_value)

    # Always render the SearchBox first
    if not query_value:
        # Show SearchBox and a message for missing query
        return Layout(
            Main(
                Div(
                    SearchBox(query_value=query_value, ranking_value=ranking_value),
                    Div(
                        P(
                            "No query provided. Please enter a query.",
                            cls="text-center text-muted-foreground",
                        ),
                        cls="p-10",
                    ),
                    cls="grid",
                )
            )
        )
    # Generate a unique query_id based on the query and ranking value
    query_id = generate_query_id(query_value, ranking_value)
    # Show the loading message if a query is provided
    return Layout(
        Main(Search(request), data_overlayscrollbars_initialize=True, cls="border-t"),
        Aside(
            ChatResult(query_id=query_id, query=query_value),
            cls="border-t border-l hidden md:block",
        ),
    )  # Show SearchBox and Loading message initially


@rt("/fetch_results2")
def get(query: str, ranking: str):
    # 1. Get the results from Vespa (without sim_maps and full_images)
    # Call search-endpoint in Vespa sync.

    # 2. Kick off tasks to fetch sim_maps and full_images
    # Sim maps - call search endpoint async.
    # (A) New rank_profile that does not calculate sim_maps.
    # (A) Make vespa endpoints take select_fields as a parameter.
    # One sim map per image per token.
    # the filename query_id_result_idx_token_idx.png
    # Full image. based on the doc_id.
    # Each of these tasks saves to disk.
    # Need a cleanup task to delete old files.
    # Polling endpoints for sim_maps and full_images checks if file exists and returns it.
    pass


@rt("/fetch_results")
async def get(session, request, query: str, ranking: str):
    if "hx-request" not in request.headers:
        return RedirectResponse("/search")

    # Get the hash of the query and ranking value
    query_id = generate_query_id(query, ranking)
    print(f"Query id in /fetch_results: {query_id}")
    # Run the embedding and query against Vespa app
    model = app.manager.model
    processor = app.manager.processor
    q_embs, idx_to_token = get_query_embeddings_and_token_map(processor, model, query)

    start = time.perf_counter()
    # Fetch real search results from Vespa
    result = await vespa_app.get_result_from_query(
        query=query,
        q_embs=q_embs,
        ranking=ranking,
        idx_to_token=idx_to_token,
    )
    end = time.perf_counter()
    print(
        f"Search results fetched in {end - start:.2f} seconds, Vespa says searchtime was {result['timing']['searchtime']} seconds"
    )
    search_results = vespa_app.results_to_search_results(result, idx_to_token)
    get_and_store_sim_maps(
        query_id=query_id,
        query=query,
        q_embs=q_embs,
        ranking=ranking,
        idx_to_token=idx_to_token,
    )
    return SearchResult(search_results, query_id)


def get_results_children(result):
    search_results = (
        result["root"]["children"]
        if "root" in result and "children" in result["root"]
        else []
    )
    return search_results


async def poll_vespa_keepalive():
    while True:
        await asyncio.sleep(5)
        await vespa_app.keepalive()
        print(f"Vespa keepalive: {time.time()}")


@threaded
def get_and_store_sim_maps(query_id, query: str, q_embs, ranking, idx_to_token):
    ranking_sim = ranking + "_sim"
    vespa_sim_maps = vespa_app.get_sim_maps_from_query(
        query=query,
        q_embs=q_embs,
        ranking=ranking_sim,
        idx_to_token=idx_to_token,
    )
    img_paths = [
        IMG_DIR / f"{query_id}_{idx}.jpg" for idx in range(len(vespa_sim_maps))
    ]
    # All images should be downloaded, but best to wait 5 secs
    max_wait = 5
    start_time = time.time()
    while (
        not all([os.path.exists(img_path) for img_path in img_paths])
        and time.time() - start_time < max_wait
    ):
        time.sleep(0.2)
    if not all([os.path.exists(img_path) for img_path in img_paths]):
        print(f"Images not ready in 5 seconds for query_id: {query_id}")
        return False
    sim_map_generator = gen_similarity_maps(
        model=app.manager.model,
        processor=app.manager.processor,
        device=app.manager.device,
        query=query,
        query_embs=q_embs,
        token_idx_map=idx_to_token,
        images=img_paths,
        vespa_sim_maps=vespa_sim_maps,
    )
    for idx, token, token_idx, blended_img_base64 in sim_map_generator:
        with open(SIM_MAP_DIR / f"{query_id}_{idx}_{token_idx}.png", "wb") as f:
            f.write(base64.b64decode(blended_img_base64))
        print(
            f"Sim map saved to disk for query_id: {query_id}, idx: {idx}, token: {token}"
        )
    return True


@app.get("/get_sim_map")
async def get_sim_map(query_id: str, idx: int, token: str, token_idx: int):
    """
    Endpoint that each of the sim map button polls to get the sim map image
    when it is ready. If it is not ready, returns a SimMapButtonPoll, that
    continues to poll every 1 second.
    """
    sim_map_path = SIM_MAP_DIR / f"{query_id}_{idx}_{token_idx}.png"
    if not os.path.exists(sim_map_path):
        print(f"Sim map not ready for query_id: {query_id}, idx: {idx}, token: {token}")
        return SimMapButtonPoll(
            query_id=query_id, idx=idx, token=token, token_idx=token_idx
        )
    else:
        return SimMapButtonReady(
            query_id=query_id,
            idx=idx,
            token=token,
            token_idx=token_idx,
            img_src=sim_map_path,
        )


@app.get("/full_image")
async def full_image(docid: str, query_id: str, idx: int):
    """
    Endpoint to get the full quality image for a given result id.
    """
    img_path = IMG_DIR / f"{query_id}_{idx}.jpg"
    if not os.path.exists(img_path):
        image_data = await vespa_app.get_full_image_from_vespa(docid)
        # image data is base 64 encoded string. Save it to disk as jpg.
        with open(img_path, "wb") as f:
            f.write(base64.b64decode(image_data))
        print(f"Full image saved to disk for query_id: {query_id}, idx: {idx}")
    else:
        with open(img_path, "rb") as f:
            image_data = base64.b64encode(f.read()).decode("utf-8")
    return Img(
        src=f"data:image/jpeg;base64,{image_data}",
        alt="something",
        cls="result-image w-full h-full object-contain",
    )


@rt("/suggestions")
async def get_suggestions(request):
    query = request.query_params.get("query", "").lower().strip()

    if query:
        suggestions = await vespa_app.get_suggestions(query)
        if len(suggestions) > 0:
            return JSONResponse({"suggestions": suggestions})

    return JSONResponse({"suggestions": []})


async def message_generator(query_id: str, query: str):
    images = []
    num_images = 3  # Number of images before firing chat request
    max_wait = 10  # seconds
    start_time = time.time()
    # Check if full images are ready on disk
    while len(images) < num_images and time.time() - start_time < max_wait:
        for idx in range(num_images):
            if not os.path.exists(IMG_DIR / f"{query_id}_{idx}.jpg"):
                print(
                    f"Message generator: Full image not ready for query_id: {query_id}, idx: {idx}"
                )
                continue
            else:
                print(
                    f"Message generator: image ready for query_id: {query_id}, idx: {idx}"
                )
                images.append(Image.open(IMG_DIR / f"{query_id}_{idx}.jpg"))
        await asyncio.sleep(0.2)
    # yield message with number of images ready
    yield f"event: message\ndata: Generating response based on {len(images)} images.\n\n"
    if not images:
        yield "event: message\ndata: I am sorry, I do not have enough information in the image to answer your question.\n\n"
        yield "event: close\ndata: \n\n"
        return

    # If newlines are present in the response, the connection will be closed.
    def replace_newline_with_br(text):
        return text.replace("\n", "<br>")

    response_text = ""
    async for chunk in await gemini_model.generate_content_async(
        images + ["\n\n Query: ", query], stream=True
    ):
        if chunk.text:
            response_text += chunk.text
            response_text = replace_newline_with_br(response_text)
            yield f"event: message\ndata: {response_text}\n\n"
            await asyncio.sleep(0.1)
    yield "event: close\ndata: \n\n"


@app.get("/get-message")
async def get_message(query_id: str, query: str):
    return StreamingResponse(
        message_generator(query_id=query_id, query=query),
        media_type="text/event-stream",
    )


@rt("/app")
def get():
    return Layout(Main(Div(P(f"Connected to Vespa at {vespa_app.url}"), cls="p-4")))


if __name__ == "__main__":
    # ModelManager.get_instance()  # Initialize once at startup
    HOT_RELOAD = os.getenv("HOT_RELOAD", "False").lower() == "true"
    print(f"Starting app with hot reload: {HOT_RELOAD}")
    serve(port=7860, reload=HOT_RELOAD)