File size: 27,463 Bytes
cd14485
e6a1391
 
cd14485
9719dbf
 
e6a1391
 
 
 
 
c39d6e0
c241b7f
e6a1391
 
 
55a0b15
e6a1391
 
 
 
f67af4b
 
 
 
 
cd14485
 
 
 
 
f67af4b
e6a1391
 
 
 
 
 
 
e64a7d2
 
e6a1391
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9719dbf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e6a1391
c241b7f
 
 
 
 
 
 
 
 
 
 
 
e6a1391
 
 
 
261056f
e6a1391
 
 
261056f
 
e6a1391
7177172
 
c241b7f
 
 
 
 
 
 
7177172
 
 
 
 
 
 
 
e6a1391
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c39d6e0
e6a1391
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28f7e6c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c39d6e0
e6a1391
 
 
 
 
 
 
 
 
 
 
 
 
c39d6e0
 
 
 
 
 
 
 
 
 
 
e6a1391
 
 
 
 
 
 
 
 
 
 
 
 
9719dbf
 
cd14485
d52fd55
cd14485
 
 
9719dbf
cd14485
 
 
 
9719dbf
 
 
 
cd14485
 
e6a1391
 
 
 
cd14485
 
 
d5b5c7a
 
 
 
 
 
 
9719dbf
 
 
 
 
 
 
 
 
 
e6a1391
 
aa83efc
 
 
9719dbf
aa83efc
e6a1391
 
 
 
 
 
 
 
 
 
9719dbf
e6a1391
 
 
cd14485
 
 
9719dbf
 
 
 
e6a1391
 
 
b6c2057
 
3a2f25f
 
 
 
 
c241b7f
b6c2057
 
 
ad883ad
 
e6a1391
7177172
3a2f25f
 
 
 
 
bfab353
3a2f25f
 
e6a1391
 
ad883ad
 
 
 
 
 
 
 
 
 
 
 
 
e6a1391
ad883ad
e6a1391
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b3489f
e6a1391
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f67af4b
 
 
 
e6a1391
 
 
 
 
 
76f92f8
 
 
 
 
 
 
 
 
ab58f48
76f92f8
ab58f48
76f92f8
 
 
 
 
 
28f7e6c
76f92f8
 
ab58f48
76f92f8
 
e6a1391
76f92f8
 
e6a1391
76f92f8
 
 
 
 
 
51e39cd
 
 
 
76f92f8
 
 
 
 
51e39cd
 
3f3e390
76f92f8
 
 
 
 
 
 
36ed904
76f92f8
 
f67af4b
76f92f8
 
e6a1391
76f92f8
 
e6a1391
76f92f8
 
 
 
 
 
 
 
 
 
 
 
f67af4b
76f92f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d969ef2
 
 
 
 
 
76f92f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e6a1391
76f92f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db59f6d
76f92f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d5ed425
 
 
 
 
ba890ea
e6a1391
 
 
 
 
261056f
e6a1391
 
 
 
261056f
 
 
 
 
e6a1391
721c4bf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
import datetime
import json
import os
import uuid
import hashlib
import pickle

import gradio as gr
import pandas as pd
import spaces
import torch
from swanson_style_prompt import generate_swanson_style_prompt, get_json_schema
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer

from arxiv_stuff import ARXIV_CATEGORIES_FLAT
from dataset_utils import DatasetManager

# Get HF_TOKEN from environment variables
HF_TOKEN = os.getenv("HF_TOKEN")

# Check if using persistent storage
persistent_storage = os.path.exists("/data")
if persistent_storage:
    # Use persistent storage
    print("Using persistent storage")
    data_path = "/data"
else:
    # Use local storage
    print("Using local storage")
    data_path = "./data"

# Embedding model details
embedding_model_name = "nomadicsynth/research-compass-arxiv-abstracts-embedding-model"
embedding_model_revision = "2025-01-28_23-06-17-1epochs-12batch-32eval-512embed-final"

# Amalysis model details

# Settings for Llama-3.3-70B-Instruct
# reasoning_model_id = "meta-llama/Llama-3.3-70B-Instruct"
reasoning_model_id = "mistralai/Mistral-7B-Instruct-v0.3"
max_length = 1024 * 4
temperature = None
top_p = None
presence_penalty = None

# Settings for QwQ-32B
# reasoning_model_id = "Qwen/QwQ-32B"
# reasoning_start_tag = "<think>"
# reasoning_end_tag = "</think>"
# max_length = 1024 * 4
# temperature = 0.6
# top_p = 0.95
# presence_penalty = 0.1

# Global variables
dataset = None
embedding_model = None
reasoning_model = None

# Define a cache file path
cache_file = os.path.join(data_path, "query_cache.pkl")

# Load cache from file if it exists
if os.path.exists(cache_file):
    with open(cache_file, "rb") as f:
        query_cache = pickle.load(f)
else:
    query_cache = {}

def hash_query(query: str) -> str:
    """Generate a unique hash for the query."""
    return hashlib.sha256(query.encode("utf-8")).hexdigest()

def save_cache():
    """Save the cache to a file."""
    with open(cache_file, "wb") as f:
        pickle.dump(query_cache, f)

def init_embedding_model(
    model_name_or_path: str, model_revision: str = None, hf_token: str = None
) -> SentenceTransformer:
    """
    Initialize the embedding model with the specified model name or path and revision.
    Args:
        model_name_or_path (str): The name or path of the model.
        model_revision (str): The revision of the model.
        hf_token (str): The Hugging Face token for authentication.
    Returns:
        SentenceTransformer: The initialized embedding model.
    """
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    embedding_model = SentenceTransformer(
        model_name_or_path,
        revision=model_revision,
        token=hf_token,
        device=device,
    )

    return embedding_model


@spaces.GPU
def embed_text(text: str | list[str]) -> torch.Tensor:
    """
    Generate embeddings for the given text using the embedding model.
    Args:
        text (str | list[str]): The text or list of texts to embed.
    Returns:
        torch.Tensor: The generated embeddings.
    """
    global embedding_model

    # Strip any leading/trailing whitespace
    text = text.strip() if isinstance(text, str) else [t.strip() for t in text]
    embed_text = embedding_model.encode(text, normalize_embeddings=True)  # Ensure vectors are normalized
    return embed_text


def init_reasoning_model(model_name: str) -> InferenceClient:
    global reasoning_model
    reasoning_model = InferenceClient(
        model=model_name,
        provider="hf-inference",
        api_key=HF_TOKEN,
    )
    return reasoning_model


def generate(messages: list[dict[str, str]]) -> str:
    """
    Generate a response to a list of messages.

    Args:
        messages: A list of message dictionaries with a "role" and "content" key.

    Returns:
        The generated response as a string.
    """
    global reasoning_model

    system_message = {
        "role": "system",
        "content": "You are an expert in evaluating connections between research papers.",
    }

    messages.insert(0, system_message)

    response_schema = get_json_schema()

    response_format = {
        "type": "json",
        "value": response_schema,
    }

    result = reasoning_model.chat.completions.create(
        messages=messages,
        max_tokens=max_length,
        temperature=temperature,
        presence_penalty=presence_penalty,
        response_format=response_format,
        top_p=top_p,
    )

    output = result.choices[0].message.content.strip()
    return output


def analyse_abstracts(query_abstract: str, compare_abstract: dict) -> str:
    """Analyze the relationship between two abstracts and return formatted analysis"""
    global reasoning_model
    # Check if the compare_abstract is valid
    if not isinstance(compare_abstract, dict) or "abstract" not in compare_abstract:
        return "Invalid compare_abstract format. Expected a dictionary with 'abstract' key."
    if not query_abstract or not compare_abstract["abstract"]:
        return "Invalid input. Please provide both query_abstract and compare_abstract."
    # Check if the query_abstract is a string
    if not isinstance(query_abstract, str):
        return "Invalid query_abstract format. Expected a string."
    # Check if the compare_abstract is a string
    if not isinstance(compare_abstract["abstract"], str):
        return "Invalid compare_abstract format. Expected a string."
    # Check if the query_abstract is empty
    if not query_abstract.strip():
        return "Invalid query_abstract format. Expected a non-empty string."
    # Check if the compare_abstract is empty
    if not compare_abstract["abstract"].strip():
        return "Invalid compare_abstract format. Expected a non-empty string."

    messages = generate_swanson_style_prompt(query_abstract, compare_abstract["abstract"])

    # Generate analysis
    try:
        output = generate(messages)
    except Exception as e:
        return f"Error: {e}"

    # Parse the JSON output
    try:
        output = json.loads(output)
    except Exception as e:
        return f"Error: {e}"

    # Format the output as markdown
    formatted_output = "# Connection Analysis\n"
    if "bridge_exists" in output and output["bridge_exists"] is False:
        formatted_output += "There is no bridge between the two papers."
        formatted_output += "## Explanation\n" + output.get("bridge_explanation", "No explanation provided.")
    elif "bridge_exists" in output and output["bridge_exists"] is True:
        formatted_output += "## Bridge Concept\n" + output.get("bridge_concept", "Unknown")
        formatted_output += "\n## Explanation\n" + output.get("bridge_explanation", "No explanation provided.")
        formatted_output += "\n## Hypothesis\n" + output.get("hypothesis", "No hypothesis provided.")
    else:
        formatted_output = "Invalid output format. Please check the model's response: " + output

    return formatted_output


# arXiv Embedding Dataset Details
# DatasetDict({
#     train: Dataset({
#         features: ['id', 'submitter', 'authors', 'title', 'comments', 'journal-ref', 'doi', 'report-no', 'categories', 'license', 'abstract', 'update_date', 'embedding', 'timestamp', 'embedding_model'],
#         num_rows: 2689088
#     })
# })


def log_query_and_results(query_id: str, query: str, results: list[dict], cache_hit: bool = False):
    """Log the query and results to a file, including whether it was a cache hit."""
    log_entry = {
        "timestamp": datetime.datetime.now().isoformat(),
        "query_id": query_id,
        "query": query,
        "results": results,
        "cache_hit": cache_hit,
    }
    log_file = os.path.join(data_path, "query_results_log.jsonl")
    with open(log_file, "a") as f:
        f.write(json.dumps(log_entry) + "\n")

    # Print a short summary of the log entry with timestamp
    cache_status = "Cache Hit" if cache_hit else "Cache Miss"
    print(f"[{log_entry['timestamp']}] Query ID: {query_id}, Results Count: {len(results)}, Status: {cache_status}")


def find_synergistic_papers(abstract: str, limit=25) -> list[dict]:
    """Find papers synergistic with the given abstract using FAISS with cosine similarity"""
    global dataset

    # Generate a unique ID for the query
    query_id = str(uuid.uuid4())

    # Normalize the abstract for cosine similarity
    abstract = abstract.replace("\n", " ")
    abstract = " ".join(abstract.split())
    abstract = abstract.strip()
    if not abstract:
        raise ValueError("Abstract is empty. Please provide a valid abstract.")

    # Hash the query to use as a cache key
    query_hash = hash_query(abstract)

    # Check if the query result is in the cache
    if query_hash in query_cache:
        print("Cache hit for query")
        log_query_and_results(query_id, abstract, query_cache[query_hash], cache_hit=True)  # Log cache hit details
        return query_cache[query_hash]

    # Generate embedding for the query abstract
    abstract_embedding = embed_text(abstract)

    # Access the dataset's train split from the DatasetManager instance
    train_dataset = dataset.dataset["train"]

    # Search for similar papers using FAISS
    scores, examples = train_dataset.get_nearest_examples("embedding", abstract_embedding, k=limit)

    papers = []
    for i in range(len(scores)):
        paper_dict = {
            "id": examples["id"][i],
            "title": examples["title"][i],
            "authors": examples["authors"][i],
            "categories": examples["categories"][i],
            "abstract": examples["abstract"][i],
            "update_date": examples["update_date"][i],
            "synergy_score": float(scores[i]),
        }
        papers.append(paper_dict)

    # Log the query and results
    log_query_and_results(query_id, abstract, papers)

    # Store the result in the cache
    query_cache[query_hash] = papers
    save_cache()

    return papers


def format_search_results_json(abstract: str) -> str:
    """Format search results as JSON for display"""
    try:
        papers = find_synergistic_papers(abstract, limit=10)
        json_output = json.dumps(papers, indent=2)
    except ValueError as e:
        json_output = json.dumps({"error": str(e)}, indent=2)

    return json_output


def format_search_results(abstract: str) -> tuple[pd.DataFrame, list[dict]]:
    """Format search results as a DataFrame for display"""
    # Find papers synergistic with the given abstract
    # papers = embedding_model.find_synergistic_papers(abstract)
    try:
        papers = find_synergistic_papers(abstract)
    except ValueError as e:
        error_message = str(e)
        df = pd.DataFrame(
            [{"Error": error_message}]
        )
        return df, []

    # Convert to DataFrame for display
    df = pd.DataFrame(
        [
            {
                "Title": p["title"],
                "Authors": p["authors"][:50] + "..." if len(p["authors"]) > 50 else p["authors"],
                "Categories": p["categories"],
                "Date": p["update_date"],
                "Match Score": f"{int(p['synergy_score'] * 100)}%",
                "ID": p["id"],  # Hidden column for reference
            }
            for p in papers
        ]
    )

    return df, papers  # Return both DataFrame and original data


def format_paper_as_markdown(paper: dict) -> str:
    # Convert category codes to full names, handling unknown categories
    subjects = []
    for subject in paper["categories"].split():
        if subject in ARXIV_CATEGORIES_FLAT:
            subjects.append(ARXIV_CATEGORIES_FLAT[subject])
        else:
            subjects.append(f"Unknown Category ({subject})")

    paper["title"] = paper["title"].replace("\n", " ").strip()
    paper["authors"] = paper["authors"].replace("\n", " ").strip()

    return f"""# {paper["title"]}
### {paper["authors"]}
#### {', '.join(subjects)} | {paper["update_date"]} | **Score**: {int(paper['synergy_score'] * 100)}%
**[arxiv:{paper["id"]}](https://arxiv.org/abs/{paper["id"]})** - [PDF](https://arxiv.org/pdf/{paper["id"]})<br>

{paper["abstract"]}
"""


latex_delimiters = [
    {"left": "$$", "right": "$$", "display": True},
    {"left": "$", "right": "$", "display": False},
    # {"left": "\\(", "right": "\\)", "display": False},
    # {"left": "\\begin{equation}", "right": "\\end{equation}", "display": True},
    # {"left": "\\begin{align}", "right": "\\end{align}", "display": True},
    # {"left": "\\begin{alignat}", "right": "\\end{alignat}", "display": True},
    # {"left": "\\begin{gather}", "right": "\\end{gather}", "display": True},
    # {"left": "\\begin{CD}", "right": "\\end{CD}", "display": True},
    # {"left": "\\[", "right": "\\]", "display": True},
    # {"left": "\\underline{", "right": "}", "display": False},
    # {"left": "\\textit{", "right": "}", "display": False},
    # {"left": "\\textit{", "right": "}", "display": False},
    # {"left": "{", "right": "}", "display": False},
]


def create_interface():
    # Create CSV loggers
    analysis_logger = gr.CSVLogger()
    paper_match_logger = gr.CSVLogger()

    with gr.Blocks(
        css="""
    .cell-menu-button {
        display: none;
    }"""
    ) as demo:
        with gr.Tabs():
            with gr.Tab("Home"):
                gr.HTML(
                    """
                    <div style="text-align: center; margin-bottom: 1rem">
                        <h1>Inkling</h1>
                        <p>Discover papers with deep conceptual connections to your research</p>
                        <p>An experiment in AI-assisted research discovery and insight generation</p>
                    </div>
                """
                )

                with gr.Accordion(label="Instructions and Privacy Policy", open=False):
                    gr.Markdown(
                        """
                        This tool helps you uncover research papers with **deep, meaningful connections** to your ideas.
                        It uses AI to go beyond keyword or semantic similarity β€” analyzing how papers relate **conceptually** and **contextually**, 
                        even when the surface topics differ.

                        The focus is on surfacing *novel insights* β€” connections that may not be obvious at a glance, 
                        but could **spark new perspectives**, **deepen understanding**, or **highlight relationships that might otherwise be overlooked**.

                        It’s designed to act more like a research collaborator than a search engine β€” helping you explore conceptual bridges and 
                        unexpected pathways in the literature.

                        Please ask any questions or provide feedback on the tool to help us improve it by starting a discussion on
                        the [Community Tab](https://huggingface.co/spaces/nomadicsynth/inkling/discussions).

                        **Privacy Policy**: Each query and the results returned will be logged for research and development purposes.
                        Additionally, the abstract or research description you provide will be included in any feedback
                        you submit and may be used to improve the model, and published in a public dataset.
                        Please ensure that you have the right to share this information.
                        By submitting a query and/or feedback, you agree to the use of this information for research purposes.
                        Do not include personally identifiable, proprietary, or sensitive information.
                        """
                    )
                    gr.Markdown(
                        """
                        1. **Enter Abstract**: Paste an abstract or describe your research question or idea in the text box.
                        2. **Find Related Papers**: Click the button to explore conceptually related research.
                        3. **Select a Paper**: Click on a row in the results table to view more details.
                        4. **Analyze Connection**: Click the analysis button to explore the potential connection between the papers.
                        5. **Insight Analysis**: Review the model’s reasoning about how and why these papers may relate meaningfully.
                        """
                    )

                abstract_input = gr.Textbox(
                    label="Paper Abstract or Description",
                    placeholder="Paste an abstract or describe research details...",
                    lines=8,
                    key="abstract",
                )
                search_btn = gr.Button("Find Related Papers", variant="primary")

                # Store full paper data
                paper_data_state = gr.State([])

                # Store query abstract
                query_abstract_state = gr.State("")

                # Store selected paper
                selected_paper_state = gr.State(None)

                # Use Dataframe for results
                results_df = gr.Dataframe(
                    headers=["Title", "Authors", "Categories", "Date", "Match Score"],
                    datatype=["markdown", "markdown", "str", "date", "str"],
                    latex_delimiters=latex_delimiters,
                    label="Related Papers",
                    interactive=False,
                    wrap=False,
                    line_breaks=False,
                    column_widths=["40%", "20%", "20%", "10%", "10%", "0%"],  # Hide ID column
                    key="results",
                )

                with gr.Row():
                    with gr.Column(scale=1):
                        paper_details_output = gr.Markdown(
                            value="# Paper Details",
                            label="Paper Details",
                            latex_delimiters=latex_delimiters,
                            show_copy_button=True,
                            key="paper_details",
                        )
                        analyze_btn = gr.Button("Analyze Connection", variant="primary", visible=False)
                        with gr.Accordion(label="Feedback and Flagging", open=True, visible=False) as paper_feedback_accordion:
                            gr.Markdown(
                                """
                                Please provide feedback on the relevance of this paper to your input.
                                This helps us improve how well the system identifies meaningful research connections.
                                """
                            )
                            paper_feedback = gr.Radio(
                                ["πŸ‘ Good Match", "πŸ‘Ž Poor Match"],
                                label="Is this paper meaningfully related to your query?",
                            )
                            paper_expert = gr.Checkbox(label="I am an expert in this field", value=False)
                            paper_comment = gr.Textbox(label="Additional feedback on this match (optional)")
                            flag_paper_btn = gr.Button("Submit Paper Feedback")

                    with gr.Column(scale=1):
                        analysis_output = gr.Markdown(
                            value="# Connection Analysis",
                            label="Connection Analysis",
                            latex_delimiters=latex_delimiters,
                            show_copy_button=True,
                            key="analysis_output",
                        )
                        with gr.Accordion(
                            label="Feedback and Flagging", open=True, visible=False
                        ) as analysis_feedback_accordion:
                            gr.Markdown(
                                f"""
                                This connection analysis was generated by an LLM, `{reasoning_model_id}`.
                                Please provide feedback on the quality of the analysis.
                                This helps us improve how well the system identifies meaningful research connections.
                                - **Helpful**: The explanation clarifies the connection between the papers. The connection is plausible and relevant.
                                - **Not Helpful**: The explanation is unclear or incorrect. The connection is spurious or irrelevant.
                                """
                            )
                            analysis_feedback = gr.Radio(
                                ["πŸ‘ Helpful", "πŸ‘Ž Not Helpful"],
                                label="Was this explanation useful in understanding the connection?",
                            )
                            analysis_expert = gr.Checkbox(label="I am an expert in this field", value=False)
                            analysis_comment = gr.Textbox(label="Additional feedback on the analysis (optional)")
                            flag_analysis_btn = gr.Button("Submit Analysis Feedback")

                # Hidden UI elements for API endpoint
                abstract_input_hidden = gr.Textbox(visible=False, label="Abstract Input", key="abstract_hidden")
                synergistic_papers_output = gr.Textbox(
                    visible=False, label="Synergistic Papers", key="synergistic_papers_output"
                )
                search_btn_hidden = gr.Button(visible=False, key="search_hidden")

                # API endpoint for find_synergistic_papers
                search_btn_hidden.click(
                    format_search_results_json,
                    inputs=[abstract_input_hidden],
                    outputs=[synergistic_papers_output],
                    api_name="find_synergistic_papers",
                )

                # Set up logging directories
                flagged_paper_matches_path = data_path + "/flagged_paper_matches"
                flagged_analyses_path = data_path + "/flagged_analyses"
                os.makedirs(flagged_paper_matches_path, exist_ok=True)
                os.makedirs(flagged_analyses_path, exist_ok=True)

                # Set up loggers
                paper_match_logger.setup(
                    [abstract_input, paper_details_output, paper_feedback, paper_expert, paper_comment],
                    flagged_paper_matches_path,
                )
                analysis_logger.setup(
                    [
                        abstract_input,
                        paper_details_output,
                        analysis_output,
                        analysis_feedback,
                        analysis_expert,
                        analysis_comment,
                    ],
                    flagged_analyses_path,
                )

                # Display paper details when row is selected
                def on_select(evt: gr.SelectData, papers, query):
                    selected_index = evt.index[0]  # Get the row index
                    selected = papers[selected_index]

                    # Format paper details
                    details_md = format_paper_as_markdown(selected)

                    return details_md, selected

                # Connect search button to the search function
                search_btn.click(
                    format_search_results,
                    inputs=[abstract_input],
                    outputs=[results_df, paper_data_state],
                    api_name="search",
                ).then(
                    lambda x: x,  # Identity function to pass through the abstract
                    inputs=[abstract_input],
                    outputs=[query_abstract_state],
                    api_name=False,
                ).then(
                    lambda: None,  # Reset selected paper
                    outputs=[selected_paper_state],
                    api_name=False,
                ).then(
                    lambda: (
                        gr.update(visible=False),
                        gr.update(visible=False),
                        gr.update(visible=False),
                    ),  # Hide analyze button and feedback accordions
                    outputs=[analyze_btn, paper_feedback_accordion, analysis_feedback_accordion],
                    api_name=False,
                ).then(
                    lambda: ("# Paper Details", "# Connection Analysis"),  # Clear previous outputs
                    outputs=[paper_details_output, analysis_output],
                    api_name=False,
                )

                # Use built-in select event from Dataframe
                results_df.select(
                    on_select,
                    inputs=[paper_data_state, query_abstract_state],
                    outputs=[paper_details_output, selected_paper_state],
                    api_name=False,
                ).then(
                    lambda: (gr.update(visible=True), gr.update(visible=True)),  # Show analyze button and feedback accordion
                    outputs=[analyze_btn, paper_feedback_accordion],
                    api_name=False,
                )

                # Connect analyze button to run analysis
                analyze_btn.click(
                    analyse_abstracts,
                    inputs=[query_abstract_state, selected_paper_state],
                    outputs=[analysis_output],
                    show_progress_on=[paper_details_output, analysis_output],
                    api_name=False,
                ).then(
                    lambda: gr.update(visible=True),  # Show feedback accordion
                    outputs=[analysis_feedback_accordion],
                    api_name=False,
                )

                # Add flagging handlers
                flag_paper_btn.click(
                    lambda *args: paper_match_logger.flag(list(args)),
                    inputs=[abstract_input, paper_details_output, paper_feedback, paper_expert, paper_comment],
                    preprocess=False,
                    api_name=False,
                )

                flag_analysis_btn.click(
                    lambda *args: analysis_logger.flag(list(args)),
                    inputs=[
                        abstract_input,
                        paper_details_output,
                        analysis_output,
                        analysis_feedback,
                        analysis_expert,
                        analysis_comment,
                    ],
                    preprocess=False,
                    api_name=False,
                )

            with gr.Tab("About"):
                about_text = open("README.md", "r").read()
                # Remove the yaml header
                if about_text.startswith("---"):
                    about_text = about_text.split("---", 2)[2].strip()
                gr.Markdown(value=about_text, label="About")

    return demo


if __name__ == "__main__":
    # Initialize the embedding model
    embedding_model = init_embedding_model(embedding_model_name, embedding_model_revision)

    # Initialize the reasoning model
    reasoning_model = init_reasoning_model(reasoning_model_id)

    # Load dataset with FAISS index
    dataset = DatasetManager(
        embedding_model=embedding_model,
    )

    demo = create_interface()
    demo.queue(api_open=False).launch(ssr_mode=False, show_api=True)