File size: 1,202 Bytes
df528fb
 
 
 
 
 
 
 
 
 
 
 
237a43a
 
 
df528fb
 
 
 
 
 
 
 
 
 
 
 
237a43a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from pathlib import Path

import mteb

log_file_path = Path("remove_empty.log")

# remove log file if exists
if log_file_path.exists():
    log_file_path.unlink()

tasks = mteb.get_tasks(tasks=["STS22"])

from datasets import load_dataset

dataset = load_dataset(**tasks[0].metadata.dataset)


def filter_sample(x):
    if len(x["sentence1"]) > 0 and len(x["sentence2"]) > 0:
        return True
    log = f"Filtered: {x['sentence1']} -- {x['sentence2']}"
    with open(log_file_path, "a") as f:
        f.write(log + "\n")
    print(log)
    return False


for split in dataset:
    ds = dataset[split]
    # filter empty sentences
    n_samples = len(ds)
    ds = ds.filter(lambda x: filter_sample(x))
    n_left = len(ds)
    log = f"Filtered {n_samples - n_left} samples from {n_samples} in {split}"
    with open(log_file_path, "a") as f:
        f.write(log + "\n")
    print(log)

    dataset[split] = ds


save_path = Path(__file__).parent.parent / "data"
for split in dataset:
    # dataset[split].to_parquet(save_path / f"{split}-00000-of-00001.parquet")
    dataset[split].to_json(save_path / f"{split}.jsonl.gz", compression="gzip")




ds = load_dataset(tasks[0].metadata.dataset["path"])