Create generate_dataset.py
Browse files- generate_dataset.py +38 -0
generate_dataset.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets, polars as pl
|
2 |
+
|
3 |
+
specter_dataset: datasets.Dataset = datasets.load_dataset(path = "sentence-transformers/specter", name = "triplet", split = "train") # type: ignore
|
4 |
+
specter_dataframe: pl.DataFrame = specter_dataset.to_polars() # type: ignore
|
5 |
+
|
6 |
+
train_dataframe: pl.DataFrame = specter_dataframe.group_by("anchor").agg([
|
7 |
+
pl.col("positive").alias(name = "positive"),
|
8 |
+
pl.col("negative").alias(name = "negative")
|
9 |
+
]).with_columns([
|
10 |
+
pl.col("positive").list.head(n = 4),
|
11 |
+
pl.col("negative").list.head(n = 4)
|
12 |
+
]).explode(columns = ["positive", "negative"])
|
13 |
+
|
14 |
+
val_test_dataframe: pl.DataFrame = specter_dataframe.group_by("anchor").agg([
|
15 |
+
pl.col("positive").alias(name="positive"),
|
16 |
+
pl.col("negative").alias(name="negative")
|
17 |
+
]).with_columns([
|
18 |
+
pl.col("positive").list.tail(n = -4), # Take all elements after index 3
|
19 |
+
pl.col("negative").list.tail(n = -4) # Take all elements after index 3
|
20 |
+
])
|
21 |
+
|
22 |
+
# Filter out empty lists in validation set (in case some anchors had exactly 4 pairs)
|
23 |
+
val_test_dataframe = val_test_dataframe.filter(
|
24 |
+
pl.col("positive").list.len() > 0
|
25 |
+
).explode(columns = ["positive", "negative"])
|
26 |
+
|
27 |
+
total_len: int = val_test_dataframe.height
|
28 |
+
val_size: int = int(total_len * 0.6)
|
29 |
+
val_dataframe: pl.DataFrame = val_test_dataframe.head(n = val_size)
|
30 |
+
test_dataframe: pl.DataFrame = val_test_dataframe.tail(n = total_len - val_size)
|
31 |
+
|
32 |
+
train_dataset: datasets.Dataset = datasets.Dataset.from_polars(df = train_dataframe)
|
33 |
+
val_dataset: datasets.Dataset = datasets.Dataset.from_polars(df = val_dataframe)
|
34 |
+
test_dataset: datasets.Dataset = datasets.Dataset.from_polars(df = test_dataframe)
|
35 |
+
|
36 |
+
train_dataset.push_to_hub(repo_id = "NothingMuch/Specter-Triplet-Split", split = "train", token = os.environ["HUGGINGFACE_TOKEN"])
|
37 |
+
val_dataset.push_to_hub(repo_id = "NothingMuch/Specter-Triplet-Split", split = "validation", token = os.environ["HUGGINGFACE_TOKEN"])
|
38 |
+
test_dataset.push_to_hub(repo_id = "NothingMuch/Specter-Triplet-Split", split = "test", token = os.environ["HUGGINGFACE_TOKEN"])
|