File size: 11,241 Bytes
25d2eb7
2827b8a
 
 
 
7a1cd7a
adde4af
2827b8a
f5eb405
 
 
3b4c438
 
f5eb405
3b4c438
 
 
 
f5eb405
3b4c438
f5eb405
 
 
6b0e834
 
 
 
 
7a1cd7a
 
892ceeb
7a1cd7a
adde4af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2827b8a
 
 
7a1cd7a
7ed3881
 
 
3b4c438
892ceeb
2827b8a
f5eb405
 
 
3bd0812
9f13004
 
 
f5eb405
3b4c438
9f13004
 
f5eb405
 
 
 
3bd0812
4b1ac5a
9f13004
 
4b1ac5a
3bd0812
4b1ac5a
9f13004
 
adde4af
3bd0812
f5eb405
9f13004
 
adde4af
6b0e834
3b4c438
3bd0812
5422464
 
 
 
3bd0812
5422464
 
 
3bd0812
5422464
 
 
 
 
 
 
 
 
 
 
 
 
 
3bd0812
9f13004
 
 
3bd0812
f5eb405
adde4af
3b4c438
9f13004
 
f5eb405
 
 
 
3bd0812
3b4c438
9f13004
 
f5eb405
 
 
 
3bd0812
3b4c438
9f13004
 
4b1ac5a
3bd0812
3b4c438
9f13004
 
4b1ac5a
3bd0812
3b4c438
9f13004
 
adde4af
3bd0812
3b4c438
9f13004
 
adde4af
3bd0812
f5eb405
9f13004
 
adde4af
6b0e834
3b4c438
3bd0812
5422464
 
 
3bd0812
5422464
 
 
3bd0812
5422464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3bd0812
9f13004
 
 
f5eb405
6b0e834
 
 
3b4c438
adde4af
20f4a6e
adde4af
20f4a6e
 
 
 
 
 
 
 
adde4af
 
 
 
 
20f4a6e
 
adde4af
20f4a6e
 
 
 
 
 
adde4af
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
import gradio as gr
from datasets import load_dataset
import numpy as np
from model2vec import StaticModel
from reach import Reach
from difflib import ndiff
import asyncio

# Load the model at startup
model = StaticModel.from_pretrained("minishlab/M2V_base_output")

# Update default dataset to 'sst2' and set default threshold to 0.9
default_dataset1_name = "sst2"
default_dataset1_split = "train"
default_dataset2_name = "sst2"
default_dataset2_split = "validation"
default_text_column = "sentence"
default_threshold = 0.9

# Load the default datasets at startup
ds_default1 = load_dataset(default_dataset1_name, split=default_dataset1_split)
ds_default2 = load_dataset(default_dataset2_name, split=default_dataset2_split)

def batch_iterable(iterable, batch_size):
    """Helper function to create batches from an iterable."""
    for i in range(0, len(iterable), batch_size):
        yield iterable[i:i + batch_size]

def display_word_differences(x: str, y: str) -> str:
    diff = ndiff(x.split(), y.split())
    return " ".join([word for word in diff if word.startswith(('+', '-'))])

async def compute_embeddings_async(texts, batch_size, progress, desc):
    embeddings = []
    total_batches = (len(texts) + batch_size - 1) // batch_size
    for i, batch_texts in enumerate(batch_iterable(texts, batch_size)):
        batch_embeddings = await asyncio.to_thread(model.encode, batch_texts, show_progressbar=False)
        embeddings.append(batch_embeddings)
        progress((i + 1) / total_batches, desc=desc)
        await asyncio.sleep(0)
    embedding_matrix = np.concatenate(embeddings, axis=0)
    return embedding_matrix

async def deduplicate_async(embedding_matrix: np.ndarray, threshold: float, batch_size: int = 1024, progress=None) -> tuple[np.ndarray, dict[int, int]]:
    """
    Deduplicate embeddings asynchronously.
    """
    progress(0, desc="Building search index...")
    reach = Reach(vectors=embedding_matrix, items=[str(i) for i in range(len(embedding_matrix))])

    deduplicated_indices = set(range(len(embedding_matrix)))
    duplicate_to_original_mapping = {}

    progress(0, desc="Finding nearest neighbors...")
    results = await asyncio.to_thread(reach.nearest_neighbor_threshold,
                                      embedding_matrix,
                                      threshold=threshold,
                                      batch_size=batch_size,
                                      show_progressbar=False)

    total_items = len(embedding_matrix)
    for i, similar_items in enumerate(results):
        if i not in deduplicated_indices:
            continue

        similar_indices = [int(item[0]) for item in similar_items if int(item[0]) != i]

        for sim_idx in similar_indices:
            if sim_idx in deduplicated_indices:
                deduplicated_indices.remove(sim_idx)
                duplicate_to_original_mapping[sim_idx] = i

        if i % 100 == 0:
            progress(i / total_items, desc="Processing duplicates")
            await asyncio.sleep(0)

    progress(1, desc="Processing duplicates")
    return np.array(list(deduplicated_indices)), duplicate_to_original_mapping

async def perform_deduplication(
    deduplication_type,
    dataset1_name,
    dataset1_split,
    dataset1_text_column,
    dataset2_name="",
    dataset2_split="",
    dataset2_text_column="",
    threshold=default_threshold,
    progress=gr.Progress(track_tqdm=True)
):
    try:
        # Convert threshold to float
        threshold = float(threshold)

        # Initialize status message
        status = ""

        if deduplication_type == "Single dataset":
            # Load Dataset 1
            status = "Loading Dataset 1..."
            yield status, ""
            if dataset1_name == default_dataset1_name and dataset1_split == default_dataset1_split:
                ds = ds_default1
            else:
                ds = load_dataset(dataset1_name, split=dataset1_split)

            # Extract texts
            status = "Extracting texts from Dataset 1..."
            yield status, ""
            texts = [example[dataset1_text_column] for example in ds]

            # Compute embeddings
            status = "Computing embeddings for Dataset 1..."
            yield status, ""
            embedding_matrix = await compute_embeddings_async(texts, batch_size=64, progress=progress, desc="Computing embeddings for Dataset 1")

            # Deduplicate
            status = "Deduplicating embeddings..."
            yield status, ""
            deduplicated_indices, duplicate_to_original_mapping = await deduplicate_async(
                embedding_matrix, threshold, progress=progress
            )

            # Prepare the results
            num_duplicates = len(duplicate_to_original_mapping)
            num_total = len(texts)
            num_deduplicated = len(deduplicated_indices)

            result_text = f"**Total documents:** {num_total}\n"
            result_text += f"**Number of duplicates found:** {num_duplicates}\n"
            result_text += f"**Number of unique documents after deduplication:** {num_deduplicated}\n\n"

            # Show deduplicated examples
            if num_duplicates > 0:
                result_text += "**Examples of duplicates found:**\n\n"
                num_examples = min(5, num_duplicates)
                for duplicate_idx, original_idx in list(duplicate_to_original_mapping.items())[:num_examples]:
                    original_text = texts[original_idx]
                    duplicate_text = texts[duplicate_idx]
                    differences = display_word_differences(original_text, duplicate_text)
                    result_text += f"**Original text:**\n{original_text}\n\n"
                    result_text += f"**Duplicate text:**\n{duplicate_text}\n\n"
                    result_text += f"**Differences:**\n{differences}\n"
                    result_text += "-" * 50 + "\n\n"
            else:
                result_text += "No duplicates found."

            # Final status
            status = "Deduplication completed."
            yield status, result_text

        elif deduplication_type == "Cross-dataset":
            # Similar code for cross-dataset deduplication, using async functions
            # Load Dataset 1
            status = "Loading Dataset 1..."
            yield status, ""
            if dataset1_name == default_dataset1_name and dataset1_split == default_dataset1_split:
                ds1 = ds_default1
            else:
                ds1 = load_dataset(dataset1_name, split=dataset1_split)

            # Load Dataset 2
            status = "Loading Dataset 2..."
            yield status, ""
            if dataset2_name == default_dataset2_name and dataset2_split == default_dataset2_split:
                ds2 = ds_default2
            else:
                ds2 = load_dataset(dataset2_name, split=dataset2_split)

            # Extract texts from Dataset 1
            status = "Extracting texts from Dataset 1..."
            yield status, ""
            texts1 = [example[dataset1_text_column] for example in ds1]

            # Extract texts from Dataset 2
            status = "Extracting texts from Dataset 2..."
            yield status, ""
            texts2 = [example[dataset2_text_column] for example in ds2]

            # Compute embeddings for Dataset 1
            status = "Computing embeddings for Dataset 1..."
            yield status, ""
            embedding_matrix1 = await compute_embeddings_async(texts1, batch_size=64, progress=progress, desc="Computing embeddings for Dataset 1")

            # Compute embeddings for Dataset 2
            status = "Computing embeddings for Dataset 2..."
            yield status, ""
            embedding_matrix2 = await compute_embeddings_async(texts2, batch_size=64, progress=progress, desc="Computing embeddings for Dataset 2")

            # Deduplicate across datasets
            status = "Deduplicating embeddings across datasets..."
            yield status, ""
            duplicate_indices_in_ds2, duplicate_to_original_mapping = await deduplicate_across_datasets_async(
                embedding_matrix1, embedding_matrix2, threshold, progress=progress
            )

            num_duplicates = len(duplicate_indices_in_ds2)
            num_total_ds2 = len(texts2)
            num_unique_ds2 = num_total_ds2 - num_duplicates

            result_text = f"**Total documents in {dataset2_name}/{dataset2_split}:** {num_total_ds2}\n"
            result_text += f"**Number of duplicates found in {dataset2_name}/{dataset2_split}:** {num_duplicates}\n"
            result_text += f"**Number of unique documents in {dataset2_name}/{dataset2_split} after deduplication:** {num_unique_ds2}\n\n"

            # Show deduplicated examples
            if num_duplicates > 0:
                result_text += "**Examples of duplicates found in Dataset 2:**\n\n"
                num_examples = min(5, num_duplicates)
                for duplicate_idx in duplicate_indices_in_ds2[:num_examples]:
                    original_idx = duplicate_to_original_mapping[duplicate_idx]
                    original_text = texts1[original_idx]
                    duplicate_text = texts2[duplicate_idx]
                    differences = display_word_differences(original_text, duplicate_text)
                    result_text += f"**Original text (Dataset 1):**\n{original_text}\n\n"
                    result_text += f"**Duplicate text (Dataset 2):**\n{duplicate_text}\n\n"
                    result_text += f"**Differences:**\n{differences}\n"
                    result_text += "-" * 50 + "\n\n"
            else:
                result_text += "No duplicates found."

            # Final status
            status = "Deduplication completed."
            yield status, result_text

    except Exception as e:
        yield f"An error occurred: {e}", ""
        raise e

async def deduplicate_across_datasets_async(embedding_matrix_1: np.ndarray, embedding_matrix_2: np.ndarray, threshold: float, batch_size: int = 1024, progress=None) -> tuple[list[int], dict[int, int]]:
    """
    Deduplicate embeddings across two datasets asynchronously.
    """
    progress(0, desc="Building search index from Dataset 1...")
    reach = Reach(vectors=embedding_matrix_1, items=[str(i) for i in range(len(embedding_matrix_1))])

    duplicate_indices_in_test = []
    duplicate_to_original_mapping = {}

    progress(0, desc="Finding nearest neighbors between datasets...")
    results = await asyncio.to_thread(reach.nearest_neighbor_threshold,
                                      embedding_matrix_2,
                                      threshold=threshold,
                                      batch_size=batch_size,
                                      show_progressbar=False)

    total_items = len(embedding_matrix_2)
    for i, similar_items in enumerate(results):
        similar_indices = [int(item[0]) for item in similar_items if item[1] >= threshold]

        if similar_indices:
            duplicate_indices_in_test.append(i)
            duplicate_to_original_mapping[i] = similar_indices[0]

        if i % 100 == 0:
            progress(i / total_items, desc="Processing duplicates across datasets")