Lihuchen commited on
Commit
5382d3b
·
verified ·
1 Parent(s): ad31c7e

Upload eval.py

Browse files
Files changed (1) hide show
  1. eval.py +466 -0
eval.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import timeit
3
+ import numpy as np
4
+ import random
5
+ import json
6
+ import torch
7
+ import math
8
+ import time
9
+ import faiss
10
+ import pandas as pd
11
+ import random
12
+ from torch import nn, optim, rand, sum as tsum, reshape, save
13
+ from torch.utils.data import DataLoader, Dataset
14
+ import torch.nn.functional as F
15
+ from transformers import AutoTokenizer, AutoModel
16
+ from autofj.datasets import load_data
17
+ from datasets import load_dataset
18
+ from scipy.stats.stats import pearsonr
19
+ from pytorch_lightning.callbacks import EarlyStopping
20
+ from pytorch_lightning import seed_everything, LightningModule, Trainer
21
+ from torch.utils.data import DataLoader, Dataset
22
+ from sklearn.cluster import KMeans
23
+ from sklearn.metrics.cluster import normalized_mutual_info_score
24
+
25
+
26
+
27
+ def eval_bird(model, dataset, device, batch_size=4):
28
+ text_list = []
29
+ scores = []
30
+
31
+ for row in dataset:
32
+ p1, p2, score = row["term1"], row["term2"], row["relatedness score"]
33
+ text_list.append((p1, p2))
34
+ scores.append(score)
35
+
36
+ cos_sim = nn.CosineSimilarity(dim=1)
37
+ cos_sim_list = []
38
+
39
+ for i in range(0, len(text_list), batch_size):
40
+ batch_text_list = text_list[i:i+batch_size]
41
+ temp1, temp2 = zip(*batch_text_list)
42
+ temp1, temp2 = list(temp1), list(temp2)
43
+ input1 = model(temp1, device)
44
+ input2 = model(temp2, device)
45
+
46
+ sim = cos_sim(input1, input2)
47
+ sim = (sim + 1) / 2.0
48
+ cos_sim_list.extend(sim.tolist())
49
+ cor, _ = pearsonr(cos_sim_list, scores)
50
+ #print('spearman score of BIRD is {a}'.format(a=cor))
51
+ return cor
52
+
53
+
54
+ def eval_turney(model, dataset, device, batch_size=4):
55
+
56
+ data_list = list()
57
+ for row in dataset:
58
+ data_list.append(list((row["query"], row["label"], row["candidate_1"], row["candidate_2"], row["candidate_3"], row["candidate_4"])))
59
+
60
+ num_correct = 0
61
+ for components in data_list:
62
+ emb = encode_in_batch(model, batch_size=batch_size, text_list=components, device=device)
63
+ emb = torch.stack(emb).cpu().detach().numpy()
64
+ query = emb[0, :]
65
+ matrix = emb[1:, :]
66
+ scores = np.dot(matrix, query)
67
+ chosen = np.argmax(scores)
68
+
69
+ if chosen == 0:
70
+ num_correct += 1
71
+ accuracy = num_correct / len(data_list)
72
+
73
+ return accuracy
74
+
75
+
76
+
77
+ def eval_ppdb(model, dataset, device, batch_size=4):
78
+
79
+
80
+ phrase1_list = [item["phrase_1"] for item in dataset]
81
+ phrase2_list = [item["phrase_2"] for item in dataset]
82
+ label = [item["label"] for item in dataset]
83
+ #print('loaded! size = {a}'.format(a=len(phrase1_list)))
84
+
85
+ phrase1_emb_tensor_list = encode_in_batch(model, batch_size, phrase1_list, device)
86
+ phrase2_emb_tensor_list = encode_in_batch(model, batch_size, phrase2_list, device)
87
+ label_list = [1 if e == 'pos' else 0 for e in label]
88
+
89
+
90
+ combined = list(zip(phrase1_emb_tensor_list, phrase2_emb_tensor_list, label_list))
91
+ random.shuffle(combined)
92
+ phrase1_emb_tensor_list_shuffled, phrase2_emb_tensor_list_shuffled, label_list_shuffled = zip(*combined)
93
+ label_tensor = torch.FloatTensor(label_list_shuffled)
94
+
95
+ phrase1_tensor, phrase2_tensor, label = torch.stack(phrase1_emb_tensor_list_shuffled), torch.stack(phrase2_emb_tensor_list_shuffled), label_tensor
96
+
97
+ phrase1_tensor.to(device)
98
+ phrase2_tensor.to(device)
99
+ label_tensor.to(device)
100
+
101
+ split1 = math.ceil(phrase1_tensor.size()[0] * 0.7)
102
+ split2 = math.ceil(phrase1_tensor.size()[0] * 0.85)
103
+
104
+ train_dataset = ParaphraseDataset(phrase1_tensor[:split1, :],
105
+ phrase2_tensor[:split1, :],
106
+ label_tensor[:split1])
107
+ valid_dataset = ParaphraseDataset(phrase1_tensor[split1:split2, :],
108
+ phrase2_tensor[split1:split2, :],
109
+ label_tensor[split1:split2])
110
+ test_dataset = ParaphraseDataset(phrase1_tensor[split2:, :],
111
+ phrase2_tensor[split2:, :],
112
+ label_tensor[split2:])
113
+
114
+ early_stop_callback = EarlyStopping(monitor='epoch_val_accuracy', min_delta=0.00, patience=5, verbose=False,
115
+ mode='max')
116
+ model = ProbingModel(input_dim=phrase1_tensor.shape[1] * 2,
117
+ train_dataset=train_dataset,
118
+ valid_dataset=valid_dataset,
119
+ test_dataset=test_dataset)
120
+ trainer = Trainer(max_epochs=100, min_epochs=3, callbacks=[early_stop_callback], gpus=[torch.cuda.current_device()])
121
+ # trainer.tune(model)
122
+ trainer.fit(model)
123
+ result = trainer.test(test_dataloaders=model.test_dataloader())
124
+ # Your statements here
125
+
126
+ stop = timeit.default_timer()
127
+
128
+ #print('Time: ', stop - start)
129
+
130
+ return result[0]['epoch_test_accuracy']
131
+
132
+
133
+ def eval_clustering(model, dataset, device, batch_size=4, name="conll"):
134
+
135
+ label_dict = dict()
136
+ if 'conll' in name:
137
+ label_dict = {'PER': 0, 'LOC': 1, 'ORG': 2}
138
+ elif 'bc5cdr' in name:
139
+ label_dict = {'Chemical': 0, 'Disease': 1}
140
+ num_class = len(label_dict)
141
+
142
+ phrases, labels = list(), list()
143
+ for row in dataset:
144
+ entity = row['entity']
145
+ if entity is None:entity="NA"
146
+ label = row['label']
147
+ phrases.append(entity)
148
+ labels.append(label)
149
+
150
+ #print('loaded! the size of data is {a}'.format(a=len(phrases)))
151
+ phrase_emb_tensor = np.array([t.cpu().detach().numpy() for t in encode_in_batch(model, batch_size, phrases, device)])
152
+
153
+
154
+ kmeans = KMeans(n_clusters=num_class, random_state=0).fit(phrase_emb_tensor)
155
+
156
+ nmi_score = normalized_mutual_info_score(labels, kmeans.labels_)
157
+
158
+
159
+ return nmi_score
160
+
161
+
162
+ def eval_retrieval(model, kb_dataset, test_dataset, batch_size=16, device='cuda:0'):
163
+
164
+ start_time = time.time()
165
+ e_names = [row["entity_name"] for row in kb_dataset]
166
+ #print('entity name = {a}'.format(a=len(e_names)))
167
+ sen_embeddings = np.array([t.cpu().detach().numpy() for t in encode_in_batch(model, batch_size, e_names, device)])
168
+ sen_embeddings = np.array(sen_embeddings, dtype=np.float32)
169
+ #print('entity emb = {a}'.format(a=len(sen_embeddings)))
170
+ shape = np.shape(sen_embeddings)
171
+ end_time = time.time()
172
+ #print("initial --- %s seconds ---" % (round((end_time - start_time), 5)))
173
+
174
+ start_time = time.time()
175
+ m = 24 # number of centroid IDs in final compressed vectors
176
+ bits = 8 # number of bits in each centroid
177
+ nlist = 100
178
+ quantizer = faiss.IndexFlatL2(shape[-1]) # we keep the same L2 distance flat index
179
+ emb_index = faiss.IndexIVFPQ(quantizer, shape[-1], nlist, m, bits)
180
+ emb_index.train(sen_embeddings)
181
+ emb_index.add(sen_embeddings)
182
+
183
+ end_time = time.time()
184
+ #print("index --- %s seconds ---" % (round((end_time - start_time), 5)))
185
+
186
+ start_time = time.time()
187
+ cnt, wrong_cnt = 0, 0
188
+ mentions = [row["query"] for row in test_dataset]
189
+ labels = [row["label"] for row in test_dataset]
190
+
191
+ batch_emb = np.array([t.cpu().detach().numpy() for t in encode_in_batch(model, batch_size, mentions, device)])
192
+
193
+ D, I = emb_index.search(batch_emb, 1)
194
+ predicts = [e_names[i[0]] for i in I]
195
+ for mention, label, predict in zip(mentions, labels, predicts):
196
+ cnt += 1
197
+ if predict != label:
198
+ wrong_cnt += 1
199
+ acc = (cnt - wrong_cnt) * 1.0 / cnt
200
+ #print('top-1 accuracy of yago = {a}'.format(a=acc))
201
+ end_time = time.time()
202
+ #print("search --- %s seconds ---" % (round((end_time - start_time), 5)))
203
+ return acc
204
+
205
+
206
+ def eval_single_aotufj(dataset, model, device, batch_size):
207
+ cos_sim = nn.CosineSimilarity(dim=1)
208
+ left_table, right_table, gt_table = load_data(dataset)
209
+ left_table = list(left_table.title)
210
+ right_table = list(right_table.title)
211
+ left_label, right_label = list(gt_table.title_l), list(gt_table.title_r)
212
+ gt_label = dict(zip(right_label, left_label))
213
+
214
+
215
+ all_embs = [t.detach() for t in encode_in_batch(model, batch_size, left_table+right_table, device)]
216
+ all_embs = torch.stack(all_embs)
217
+ left_embs, right_embs = all_embs[:len(left_table)], all_embs[len(left_table):]
218
+ acc_cnt, total = 0, 0
219
+
220
+ for index, r_t_emb in enumerate(right_embs):
221
+ r_t = right_table[index]
222
+ if r_t not in gt_label:continue
223
+ g_t = gt_label[r_t]
224
+ r_t_emb = torch.unsqueeze(r_t_emb, dim=0)
225
+ score = cos_sim(r_t_emb, left_embs)
226
+ pred_i = torch.argmax(score).item()
227
+ predicted = left_table[pred_i]
228
+ if predicted == g_t:
229
+ acc_cnt += 1
230
+ total += 1
231
+ acc = acc_cnt * 1.0 / total
232
+
233
+ #print('acc = {a}'.format(a=acc))
234
+ return acc
235
+
236
+
237
+ def eval_autofj(model, dataset, device, batch_size=4):
238
+ table_names = [row["Dataset"] for row in dataset]
239
+ acc_list = list()
240
+ for t_name in table_names:
241
+ acc = eval_single_aotufj(dataset=t_name, model=model, device=device, batch_size=batch_size)
242
+ print(t_name, acc)
243
+ acc_list.append(acc)
244
+ avg_acc = sum(acc_list) / len(acc_list)
245
+ #print('average acc over 50 datasets = {a}'.format(a=avg_acc))
246
+ return avg_acc
247
+
248
+
249
+ class ParaphraseDataset(Dataset):
250
+ def __init__(self, phrase1_tensor, phrase2_tensor, label_tensor ):
251
+ self.concat_input = torch.cat( (phrase1_tensor, phrase2_tensor), 1 )
252
+ self.label = label_tensor
253
+
254
+ def __getitem__(self, index):
255
+ return (self.concat_input[index], self.label[index])
256
+
257
+ def __len__(self):
258
+ return self.concat_input.size()[0]
259
+
260
+ def encode_in_batch(model, batch_size, text_list, device):
261
+ all_emb_tensor_list = []
262
+ for i in range( 0, len(text_list), batch_size ):
263
+ batch_text_list = text_list[i:i+batch_size]
264
+ batch_emb_list = model(batch_text_list, device)
265
+ if len(list(batch_emb_list.size())) < 2: batch_emb_list = torch.unsqueeze(batch_emb_list, dim=0)
266
+ all_emb_tensor_list.extend(batch_emb_list)
267
+ return [t.detach() for t in all_emb_tensor_list]
268
+
269
+ def load_entity(entity_path):
270
+ e_names = list()
271
+ cnt = 0
272
+ for line in open(entity_path, encoding='utf8'):
273
+ cnt += 1
274
+ #if cnt > 1000:break
275
+ e_name = line.strip()
276
+ e_names.append(e_name)
277
+ return {'mention':e_names, 'entity':e_names}
278
+
279
+
280
+ class ProbingModel(LightningModule):
281
+ def __init__(self, input_dim=1536, train_dataset=None, valid_dataset=None, test_dataset=None):
282
+ super(ProbingModel, self).__init__()
283
+ # Network layers
284
+ self.input_dim = input_dim
285
+ self.linear = nn.Linear(self.input_dim, 256)
286
+ self.linear2 = nn.Linear(256, 1)
287
+ self.output = nn.Sigmoid()
288
+
289
+ # Hyper-parameters, that we will auto-tune using lightning!
290
+ self.lr = 0.0001
291
+ self.batch_size = 200
292
+
293
+ # datasets
294
+ self.train_dataset = train_dataset
295
+ self.valid_dataset = valid_dataset
296
+ self.test_dataset = test_dataset
297
+
298
+ def forward(self, x):
299
+ x1 = self.linear(x)
300
+ x1a = F.relu(x1)
301
+ x2 = self.linear2(x1a)
302
+ output = self.output(x2)
303
+ return reshape(output, (-1,))
304
+
305
+ def configure_optimizers(self):
306
+ return optim.Adam(self.parameters(), lr=self.lr)
307
+
308
+ def train_dataloader(self):
309
+ loader = DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True)
310
+ return loader
311
+
312
+ def val_dataloader(self):
313
+ loader = DataLoader(self.valid_dataset, batch_size=self.batch_size, shuffle=False)
314
+ return loader
315
+
316
+ def test_dataloader(self):
317
+ loader = DataLoader(self.test_dataset, batch_size=self.batch_size, shuffle=False)
318
+ return loader
319
+
320
+ def compute_accuracy(self, y_hat, y):
321
+ with torch.no_grad():
322
+ y_pred = (y_hat >= 0.5)
323
+ y_pred_f = y_pred.float()
324
+ num_correct = tsum(y_pred_f == y)
325
+ denom = float(y.size()[0])
326
+ accuracy = torch.div(num_correct, denom)
327
+ return accuracy
328
+
329
+ def training_step(self, batch, batch_nb):
330
+ mode = 'train'
331
+ x, y = batch
332
+ y_hat = self(x)
333
+ loss = F.binary_cross_entropy(y_hat, y)
334
+ accuracy = self.compute_accuracy(y_hat, y)
335
+ #self.log(f'{mode}_loss', loss, on_epoch=True, on_step=True)
336
+ #self.log(f'{mode}_accuracy', accuracy, on_epoch=True, on_step=True)
337
+ return {f'loss': loss, f'{mode}_accuracy': accuracy, 'log': {f'{mode}_loss': loss}}
338
+
339
+ def train_epoch_end(self, outputs):
340
+ mode = 'train'
341
+ loss_mean = sum([o[f'loss'] for o in outputs]) / len(outputs)
342
+ accuracy_mean = sum([o[f'{mode}_accuracy'] for o in outputs]) / len(outputs)
343
+ self.log(f'epoch_{mode}_loss', loss_mean, on_epoch=True, on_step=False)
344
+ #print(f'\nThe end of epoch {mode} loss is {loss_mean.item():.4f}')
345
+ self.log(f'epoch_{mode}_accuracy', accuracy_mean, on_epoch=True, on_step=False)
346
+ #print(f'\nThe end of epoch {mode} accuracy is {accuracy_mean.item():.4f}')
347
+
348
+ def validation_step(self, batch, batch_nb):
349
+ mode = 'val'
350
+ x, y = batch
351
+ y_hat = self(x)
352
+ loss = F.binary_cross_entropy(y_hat, y)
353
+ accuracy = self.compute_accuracy(y_hat, y)
354
+ self.log(f'{mode}_loss', loss, on_epoch=True, on_step=True)
355
+ self.log(f'{mode}_accuracy', accuracy, on_epoch=True, on_step=True)
356
+ return {f'{mode}_loss': loss, f'{mode}_accuracy': accuracy, 'log': {f'{mode}_loss': loss}}
357
+
358
+ def validation_epoch_end(self, outputs):
359
+ mode = 'val'
360
+ loss_mean = sum([o[f'{mode}_loss'] for o in outputs]) / len(outputs)
361
+ accuracy_mean = sum([o[f'{mode}_accuracy'] for o in outputs]) / len(outputs)
362
+ self.log(f'epoch_{mode}_loss', loss_mean, on_epoch=True, on_step=False)
363
+ #print(f'\nThe end of epoch {mode} loss is {loss_mean.item():.4f}')
364
+ self.log(f'epoch_{mode}_accuracy', accuracy_mean, on_epoch=True, on_step=False)
365
+ #print(f'\nThe end of epoch {mode} accuracy is {accuracy_mean.item():.4f}')
366
+
367
+ def test_step(self, batch, batch_nb):
368
+ mode = 'test'
369
+ x, y = batch
370
+ y_hat = self(x)
371
+ loss = F.binary_cross_entropy(y_hat, y)
372
+ accuracy = self.compute_accuracy(y_hat, y)
373
+ self.log(f'{mode}_loss', loss, on_epoch=True, on_step=True)
374
+ self.log(f'{mode}_accuracy', accuracy, on_epoch=True, on_step=True)
375
+ return {f'{mode}_loss': loss, f'{mode}_accuracy': accuracy, 'log': {f'{mode}_loss': loss}}
376
+
377
+ def test_epoch_end(self, outputs):
378
+ mode = 'test'
379
+ loss_mean = sum([o[f'{mode}_loss'] for o in outputs]) / len(outputs)
380
+ accuracy_mean = sum([o[f'{mode}_accuracy'] for o in outputs]) / len(outputs)
381
+ self.log(f'epoch_{mode}_loss', loss_mean, on_epoch=True, on_step=False)
382
+ # print(f'\nThe end of epoch {mode} loss is {loss_mean.item():.4f}')
383
+ self.log(f'epoch_{mode}_accuracy', accuracy_mean, on_epoch=True, on_step=False)
384
+ # print(f'\nThe end of epoch {mode} accuracy is {accuracy_mean.item():.4f}')
385
+
386
+
387
+ class PearlSmallModel(nn.Module):
388
+ def __init__(self):
389
+ super().__init__()
390
+ model_name = "Lihuchen/pearl_small"
391
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
392
+ self.model = AutoModel.from_pretrained(model_name)
393
+
394
+
395
+ def average_pool(self, last_hidden_states, attention_mask):
396
+ last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)
397
+ return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
398
+
399
+
400
+ def forward(self, x, device):
401
+ # Tokenize the input texts
402
+ batch_dict = self.tokenizer(x, max_length=128, padding=True, truncation=True, return_tensors='pt')
403
+ batch_dict = batch_dict.to(device)
404
+
405
+ outputs = self.model(**batch_dict)
406
+ phrase_vec = self.average_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
407
+
408
+ return phrase_vec.detach()
409
+
410
+
411
+
412
+
413
+ if __name__ == "__main__":
414
+ parser = argparse.ArgumentParser(description='contrastive learning framework for word vector')
415
+ parser.add_argument('-batch_size', help='the number of samples in one batch', type=int, default=32)
416
+ args = parser.parse_args()
417
+
418
+
419
+ model = PearlSmallModel()
420
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
421
+ model.to(device)
422
+ batch_size = args.batch_size
423
+
424
+ ppdb_dataset = load_dataset("Lihuchen/pearl_benchmark", "ppdb", split="test")
425
+ ppbd_score = eval_ppdb(model, ppdb_dataset, device=device, batch_size=batch_size)
426
+ print("ppdb: ", ppbd_score)
427
+
428
+ ppdb_filtered_dataset = load_dataset("Lihuchen/pearl_benchmark", "ppdb_filtered", split="test")
429
+ ppbd_filtered_score = eval_ppdb(model, ppdb_filtered_dataset, device=device, batch_size=batch_size)
430
+ print("ppdb_filetered: ", ppbd_filtered_score)
431
+
432
+ turney_dataset = load_dataset("Lihuchen/pearl_benchmark", "turney", split="test")
433
+ turney_score = eval_turney(model, turney_dataset, device=device, batch_size=batch_size)
434
+ print("turney: ", turney_score)
435
+
436
+
437
+ bird_dataset = load_dataset("Lihuchen/pearl_benchmark", "bird", split="test")
438
+ bird_score = eval_bird(model, bird_dataset, device=device, batch_size=batch_size)
439
+ print("bird: ", bird_score)
440
+
441
+ yago_kb_dataset = load_dataset("Lihuchen/pearl_benchmark", "kb", split="yago")
442
+ yago_test_dataset = load_dataset("Lihuchen/pearl_benchmark", "yago", split="test")
443
+ yago_score = eval_retrieval(model, yago_kb_dataset, yago_test_dataset, device=device, batch_size=batch_size)
444
+ print("yago: ", yago_score)
445
+
446
+ umls_kb_dataset = load_dataset("Lihuchen/pearl_benchmark", "kb", split="umls")
447
+ umls_test_dataset = load_dataset("Lihuchen/pearl_benchmark", "umls", split="test")
448
+ umls_score = eval_retrieval(model, umls_kb_dataset, umls_test_dataset, device=device, batch_size=batch_size)
449
+ print("umls: ", umls_score)
450
+
451
+ conll_dataset = load_dataset("Lihuchen/pearl_benchmark", "conll", split="test")
452
+ conll_score = eval_clustering(model, device=device, batch_size=batch_size, name="conll")
453
+ print("conll: ", conll_score)
454
+
455
+ bc5cdr_dataset = load_dataset("Lihuchen/pearl_benchmark", "bc5cdr", split="test")
456
+ bc5cdr_score = eval_clustering(model, bc5cdr_dataset, device=device, batch_size=batch_size, name="bc5cdr")
457
+ print("bc5cdr: ", bc5cdr_score)
458
+
459
+ autofj_dataset = load_dataset("Lihuchen/pearl_benchmark", "autofj", split="test")
460
+ autofj_score = eval_autofj(model, autofj_dataset, device=device, batch_size=batch_size)
461
+ print("autofj: ", autofj_score)
462
+
463
+
464
+
465
+
466
+