File size: 2,927 Bytes
7c23da9 f4fc7e9 7c23da9 f4fc7e9 7c23da9 f4fc7e9 7c23da9 f4fc7e9 7c23da9 f4fc7e9 7c23da9 f4fc7e9 7c23da9 f4fc7e9 7c23da9 f4fc7e9 7c23da9 f4fc7e9 7c23da9 f4fc7e9 7c23da9 f4fc7e9 7c23da9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
import json
import os
from random import shuffle, seed
from datasets import load_dataset
test = load_dataset("cardiffnlp/super_tweeteval", "tweet_topic", split="test").shuffle(seed=42)
test = list(test.to_pandas().T.to_dict().values())
train = load_dataset("cardiffnlp/super_tweeteval", "tweet_topic", split="train").shuffle(seed=42)
train = list(train.to_pandas().T.to_dict().values())
validation = load_dataset("cardiffnlp/super_tweeteval", "tweet_topic", split="validation").shuffle(seed=42)
validation = list(validation.to_pandas().T.to_dict().values())
n_train = len(train)
n_validation = len(validation)
for data in [train, validation, test]:
for i in data:
i["gold_label_list"] = i["gold_label_list"].tolist()
n_test = int(len(test)/4)
test_1 = test[:n_test]
test_2 = test[n_test:n_test*2]
test_3 = test[n_test*2:n_test*3]
test_4 = test[n_test*3:]
os.makedirs("data/tweet_topic", exist_ok=True)
with open("data/tweet_topic/test.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test]))
with open("data/tweet_topic/test_1.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test_1]))
with open("data/tweet_topic/test_2.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test_2]))
with open("data/tweet_topic/test_3.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test_3]))
with open("data/tweet_topic/test_4.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test_4]))
with open("data/tweet_topic/train.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in train]))
with open("data/tweet_topic/validation.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in validation]))
def sampler(dataset_test, r_seed):
seed(r_seed)
shuffle(dataset_test)
shuffle(train)
shuffle(validation)
test_tr = dataset_test[:int(n_train / 2)]
test_vl = dataset_test[int(n_train / 2): int(n_train / 2) + int(n_validation / 2)]
new_train = test_tr + train[:n_train - len(test_tr)]
new_validation = test_vl + validation[:n_validation - len(test_vl)]
return new_train, new_validation
id2test = {n: t for n, t in enumerate([test_1, test_2, test_3, test_4])}
for n, _test in enumerate([
test_4 + test_2 + test_3,
test_1 + test_4 + test_3,
test_1 + test_2 + test_4,
test_1 + test_2 + test_3]):
for s in range(3):
os.makedirs(f"data/tweet_topic_test{n}_seed{s}", exist_ok=True)
_train, _valid = sampler(_test, s)
with open(f"data/tweet_topic_test{n}_seed{s}/train.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in _train]))
with open(f"data/tweet_topic_test{n}_seed{s}/validation.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in _valid]))
with open(f"data/tweet_topic_test{n}_seed{s}/test.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in id2test[n]]))
|