File size: 7,657 Bytes
d12ef82
 
 
16b7567
61dd04e
8f721a8
d12ef82
 
 
 
 
 
16b7567
d12ef82
 
f3caf2c
61dd04e
515e29d
d12ef82
 
16b7567
eed4be8
d12ef82
 
61dd04e
d12ef82
eed4be8
 
 
d12ef82
 
61dd04e
d12ef82
6a39ecf
eed4be8
 
 
d12ef82
 
61dd04e
16b7567
eed4be8
 
 
 
d12ef82
 
f3caf2c
d12ef82
16b7567
 
d12ef82
eed4be8
f5d713a
 
 
 
16b7567
eed4be8
 
f3caf2c
 
eed4be8
 
f3caf2c
891a967
d12ef82
 
f3caf2c
9832c5a
 
 
f3caf2c
d12ef82
 
 
f3caf2c
d12ef82
 
f3caf2c
d12ef82
 
 
 
 
 
 
 
f3caf2c
d12ef82
 
16b7567
d12ef82
 
f3caf2c
 
 
d12ef82
f3caf2c
 
 
 
d12ef82
 
 
f3caf2c
 
d12ef82
f3caf2c
 
16b7567
61dd04e
f3caf2c
 
 
 
16b7567
d12ef82
 
16b7567
61dd04e
16b7567
f3caf2c
 
61dd04e
16b7567
61dd04e
f3caf2c
61dd04e
 
f3caf2c
 
 
 
 
 
 
 
 
 
 
 
 
 
61dd04e
 
 
 
 
 
f3caf2c
 
61dd04e
 
16b7567
 
8f721a8
d12ef82
f3caf2c
d12ef82
515e29d
 
102204e
d12ef82
f3caf2c
515e29d
d12ef82
f3caf2c
515e29d
f3caf2c
 
515e29d
f3caf2c
16b7567
 
61dd04e
 
 
16b7567
bb65861
16b7567
de97bd5
 
 
 
f3caf2c
 
de97bd5
16b7567
 
8f721a8
 
d12ef82
 
2402c39
515e29d
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
import gradio as gr
import spaces
import torch
import pandas as pd
import plotly.graph_objects as go
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator, SequentialEvaluator
from sentence_transformers.util import cos_sim

# Check for GPU support and configure appropriately
device = "cuda" if torch.cuda.is_available() else "cpu"
zero = torch.Tensor([0]).to(device)
print(f"Device being used: {zero.device}")


@spaces.GPU
def evaluate_model(model_id, num_questions):
    model = SentenceTransformer(model_id, device=device)
    matryoshka_dimensions = [768, 512, 256, 128, 64]

    # Prepare datasets (Load entire split, then select num_questions)
    datasets_info = [
        {
            "name": "Financial",
            "dataset_id": "Omartificial-Intelligence-Space/Arabic-finanical-rag-embedding-dataset",
            "split": "train",  # Only train split
            "columns": ("question", "context"),
            "sample_size": num_questions
        },
        {
            "name": "MLQA",
            "dataset_id": "google/xtreme",
            "subset": "MLQA.ar.ar",
            "split": "validation",  # Only validation split
            "columns": ("question", "context"),
            "sample_size": num_questions
        },
        {
            "name": "ARCD",
            "dataset_id": "hsseinmz/arcd",
            "split": "train",  # Only train split
            "columns": ("question", "context"),
            "sample_size": num_questions,
            "last_rows": True  # Take the last num_questions rows
        }
    ]

    evaluation_results = []
    scores_by_dataset = {}

    for dataset_info in datasets_info:
        # Load the full dataset split and limit it afterward
        if "subset" in dataset_info:
            dataset = load_dataset(dataset_info["dataset_id"], dataset_info["subset"], split=dataset_info["split"])
        else:
            dataset = load_dataset(dataset_info["dataset_id"], split=dataset_info["split"])

        # Select the required number of rows
        if dataset_info.get("last_rows"):
            dataset = dataset.select(
                range(len(dataset) - dataset_info["sample_size"], len(dataset)))  # Take last n rows
        else:
            dataset = dataset.select(range(min(dataset_info["sample_size"], len(dataset))))  # Take first n rows

        # Rename columns to 'anchor' and 'positive'
        dataset = dataset.rename_column(dataset_info["columns"][0], "anchor")
        dataset = dataset.rename_column(dataset_info["columns"][1], "positive")

        # Check if "id" column already exists before adding it
        if "id" not in dataset.column_names:
            dataset = dataset.add_column("id", range(len(dataset)))

        # Prepare queries and corpus
        corpus = dict(zip(dataset["id"], dataset["positive"]))
        queries = dict(zip(dataset["id"], dataset["anchor"]))

        # Create a mapping of relevant documents (1 in our case) for each query
        relevant_docs = {q_id: [q_id] for q_id in queries}

        matryoshka_evaluators = []
        for dim in matryoshka_dimensions:
            ir_evaluator = InformationRetrievalEvaluator(
                queries=queries,
                corpus=corpus,
                relevant_docs=relevant_docs,
                name=f"dim_{dim}",
                truncate_dim=dim,
                score_functions={"cosine": cos_sim}
            )
            matryoshka_evaluators.append(ir_evaluator)

        evaluator = SequentialEvaluator(matryoshka_evaluators)
        results = evaluator(model)

        scores_ndcg = []
        scores_mrr = []
        for dim in matryoshka_dimensions:
            ndcg_key = f"dim_{dim}_cosine_ndcg@10"
            mrr_key = f"dim_{dim}_cosine_mrr@10"
            ndcg_score = results[ndcg_key] if ndcg_key in results else None
            mrr_score = results[mrr_key] if mrr_key in results else None
            evaluation_results.append({
                "Dataset": dataset_info["name"],
                "Dimension": dim,
                "NDCG@10": ndcg_score,
                "MRR@10": mrr_score
            })
            scores_ndcg.append(ndcg_score)
            scores_mrr.append(mrr_score)

        # Store scores by dataset for plot creation
        scores_by_dataset[dataset_info["name"]] = {
            "NDCG@10": scores_ndcg,
            "MRR@10": scores_mrr
        }

    # Convert results to DataFrame for display
    result_df = pd.DataFrame(evaluation_results)

    # Generate bar charts for each dataset using Plotly
    charts = []
    color_scale_ndcg = '#a05195'
    color_scale_mrr = '#2f4b7c'

    for dataset_name, scores in scores_by_dataset.items():
        fig = go.Figure()
        # NDCG@10 bars
        fig.add_trace(go.Bar(
            x=[str(dim) for dim in matryoshka_dimensions],
            y=scores["NDCG@10"],
            name="NDCG@10",
            marker_color=color_scale_ndcg,
            text=[f"{score:.3f}" if score else "N/A" for score in scores["NDCG@10"]],
            textposition='auto'
        ))

        # MRR@10 bars
        fig.add_trace(go.Bar(
            x=[str(dim) for dim in matryoshka_dimensions],
            y=scores["MRR@10"],
            name="MRR@10",
            marker_color=color_scale_mrr,
            text=[f"{score:.3f}" if score else "N/A" for score in scores["MRR@10"]],
            textposition='auto'
        ))

        fig.update_layout(
            title=f"{dataset_name} Evaluation",
            xaxis_title="Embedding Dimension",
            yaxis_title="Score",
            barmode='group',  # Group bars
            template="plotly_white"
        )
        charts.append(fig)

    return result_df, charts[0], charts[1], charts[2]


# Define the Gradio interface
def display_results(model_name, num_questions):
    result_df, chart1, chart2, chart3 = evaluate_model(model_name, num_questions)
    return result_df, chart1, chart2, chart3


# Gradio interface with a slider to choose the number of questions (1 to 500)
demo = gr.Interface(
    fn=display_results,
    inputs=[
        gr.Textbox(label="Enter a Hugging Face Model ID",
                   placeholder="e.g., Omartificial-Intelligence-Space/GATE-AraBert-v1"),
        gr.Slider(label="Number of Questions", minimum=1, maximum=500, step=1, value=500)
    ],
    outputs=[
        gr.Dataframe(label="Evaluation Results"),
        gr.Plot(label="Financial Dataset"),
        gr.Plot(label="MLQA Dataset"),
        gr.Plot(label="ARCD Dataset")
    ],
    title="MERAA : Matryoshka Embedding Retrieval Assessment for Arabic",
    description=(
        "Evaluate your Embedding model or any Arabic Sentence Transformer model's performance on **context and question retrieval** for Arabic datasets for Enhancing RAG (Retrieval-Augmented Generation).\n"
        "- **ARCD** evaluates short context retrieval performance.\n"
        "- **MLQA Arabic** evaluates long context retrieval performance.\n"
        "- **Arabic Financial Dataset** focuses on financial context retrieval.\n\n"
        "**Evaluation Metrics:**\n"
        "The evaluation uses **NDCG@10** and **MRR@10**, which measure how well the retrieved documents (contexts) match the query relevance.\n"
        "Higher scores indicate better performance. Embedding dimensions are reduced from 768 to 64, evaluating how well the model performs with fewer dimensions."
    ),
    theme="default",
    live=False,
    css="footer {visibility: hidden;}"
)

demo.launch(debug=True)

# Add the footer
print("\nCreated by Omar Najar | Omartificial Intelligence Space")