update
Browse files- README.md +1 -1
- data/sms_spam.jsonl +3 -0
- examples/preprocess/process_sms_spam.py +70 -0
- main.py +18 -0
- requirements.txt +1 -0
- spam_detect.py +94 -0
README.md
CHANGED
@@ -12,7 +12,7 @@ license: apache-2.0
|
|
12 |
| 数据 | 语言 | 任务类型 | 原始数据/项目地址 | 样本个数 | 原始数据描述 | 替代数据下载地址 |
|
13 |
| :--- | :---: | :---: | :---: | :---: | :---: | :---: |
|
14 |
| sms_spam | 英语 | 垃圾短信分类 | [SMS Spam Collection](https://archive.ics.uci.edu/dataset/228/sms+spam+collection); [SMS Spam Collection Dataset](https://www.kaggle.com/datasets/uciml/sms-spam-collection-dataset) | 5,574 | SMS 垃圾邮件集合是一组公开的 SMS 标记消息,为移动电话垃圾邮件研究而收集。 | [sms_spam](https://huggingface.co/datasets/sms_spam) |
|
15 |
-
| spam_assassin | 英语 | 垃圾邮件分类 | [Apache SpamAssassin’s public datasets](https://spamassassin.apache.org/old/publiccorpus/); [Spam or Not Spam Dataset](https://www.kaggle.com/datasets/ozlerhakan/spam-or-not-spam-dataset) |
|
16 |
| enron_spam | 英语 | 垃圾邮件分类 | [enron_spam_data](https://github.com/MWiechmann/enron_spam_data); [Enron-Spam](https://www2.aueb.gr/users/ion/data/enron-spam/); [spam-mails-dataset](https://www.kaggle.com/datasets/venky73/spam-mails-dataset) | 17,171 spam; 16,545 ham | Enron-Spam 数据集是 V. Metsis、I. Androutsopoulos 和 G. Paliouras 收集的绝佳资源 | [enron_spam](https://huggingface.co/datasets/SetFit/enron_spam) |
|
17 |
|
18 |
|
|
|
12 |
| 数据 | 语言 | 任务类型 | 原始数据/项目地址 | 样本个数 | 原始数据描述 | 替代数据下载地址 |
|
13 |
| :--- | :---: | :---: | :---: | :---: | :---: | :---: |
|
14 |
| sms_spam | 英语 | 垃圾短信分类 | [SMS Spam Collection](https://archive.ics.uci.edu/dataset/228/sms+spam+collection); [SMS Spam Collection Dataset](https://www.kaggle.com/datasets/uciml/sms-spam-collection-dataset) | 5,574 | SMS 垃圾邮件集合是一组公开的 SMS 标记消息,为移动电话垃圾邮件研究而收集。 | [sms_spam](https://huggingface.co/datasets/sms_spam) |
|
15 |
+
| spam_assassin | 英语 | 垃圾邮件分类 | [Apache SpamAssassin’s public datasets](https://spamassassin.apache.org/old/publiccorpus/); [Spam or Not Spam Dataset](https://www.kaggle.com/datasets/ozlerhakan/spam-or-not-spam-dataset) | 10.7K | 这是一组邮件消息,适合用于测试垃圾邮件过滤系统。 | [SpamAssassin](https://huggingface.co/datasets/talby/spamassassin) |
|
16 |
| enron_spam | 英语 | 垃圾邮件分类 | [enron_spam_data](https://github.com/MWiechmann/enron_spam_data); [Enron-Spam](https://www2.aueb.gr/users/ion/data/enron-spam/); [spam-mails-dataset](https://www.kaggle.com/datasets/venky73/spam-mails-dataset) | 17,171 spam; 16,545 ham | Enron-Spam 数据集是 V. Metsis、I. Androutsopoulos 和 G. Paliouras 收集的绝佳资源 | [enron_spam](https://huggingface.co/datasets/SetFit/enron_spam) |
|
17 |
|
18 |
|
data/sms_spam.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:65b1bb054a8b310bbf949cb2ce527a93f9e51401275e06f0e4b43d422edb46d5
|
3 |
+
size 868244
|
examples/preprocess/process_sms_spam.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
from collections import defaultdict
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
from pathlib import Path
|
8 |
+
import random
|
9 |
+
import re
|
10 |
+
import sys
|
11 |
+
|
12 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
13 |
+
sys.path.append(os.path.join(pwd, '../../'))
|
14 |
+
|
15 |
+
from datasets import load_dataset
|
16 |
+
from tqdm import tqdm
|
17 |
+
|
18 |
+
from project_settings import project_path
|
19 |
+
|
20 |
+
|
21 |
+
def get_args():
|
22 |
+
parser = argparse.ArgumentParser()
|
23 |
+
|
24 |
+
parser.add_argument("--dataset_path", default="sms_spam", type=str)
|
25 |
+
parser.add_argument(
|
26 |
+
"--dataset_cache_dir",
|
27 |
+
default=(project_path / "hub_datasets").as_posix(),
|
28 |
+
type=str
|
29 |
+
)
|
30 |
+
parser.add_argument(
|
31 |
+
"--output_file",
|
32 |
+
default=(project_path / "data/sms_spam.jsonl"),
|
33 |
+
type=str
|
34 |
+
)
|
35 |
+
args = parser.parse_args()
|
36 |
+
return args
|
37 |
+
|
38 |
+
|
39 |
+
def main():
|
40 |
+
args = get_args()
|
41 |
+
|
42 |
+
dataset_dict = load_dataset(
|
43 |
+
path=args.dataset_path,
|
44 |
+
cache_dir=args.dataset_cache_dir,
|
45 |
+
)
|
46 |
+
print(dataset_dict)
|
47 |
+
|
48 |
+
with open(args.output_file, "w", encoding="utf-8") as f:
|
49 |
+
for sample in tqdm(dataset_dict["train"]):
|
50 |
+
# print(sample)
|
51 |
+
text = sample["sms"]
|
52 |
+
label = sample["label"]
|
53 |
+
|
54 |
+
text = text.strip()
|
55 |
+
label = "spam" if label == 1 else "ham"
|
56 |
+
|
57 |
+
row = {
|
58 |
+
"text": text,
|
59 |
+
"label": label,
|
60 |
+
"data_source": "sms_spam",
|
61 |
+
"split": "train"
|
62 |
+
}
|
63 |
+
row = json.dumps(row, ensure_ascii=False)
|
64 |
+
f.write("{}\n".format(row))
|
65 |
+
|
66 |
+
return
|
67 |
+
|
68 |
+
|
69 |
+
if __name__ == '__main__':
|
70 |
+
main()
|
main.py
CHANGED
@@ -1,5 +1,23 @@
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
|
5 |
if __name__ == '__main__':
|
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
3 |
+
from datasets import load_dataset, DownloadMode
|
4 |
+
|
5 |
+
|
6 |
+
dataset = load_dataset(
|
7 |
+
"spam_detect.py",
|
8 |
+
name="sms_spam",
|
9 |
+
split="train",
|
10 |
+
cache_dir=None,
|
11 |
+
download_mode=DownloadMode.FORCE_REDOWNLOAD
|
12 |
+
)
|
13 |
+
|
14 |
+
for sample in dataset:
|
15 |
+
text = sample["text"]
|
16 |
+
label = sample["label"]
|
17 |
+
|
18 |
+
print(text)
|
19 |
+
print(label)
|
20 |
+
print("-" * 150)
|
21 |
|
22 |
|
23 |
if __name__ == '__main__':
|
requirements.txt
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
datasets==2.10.1
|
|
|
2 |
tqdm==4.66.1
|
|
|
1 |
datasets==2.10.1
|
2 |
+
fsspec==2023.9.2
|
3 |
tqdm==4.66.1
|
spam_detect.py
CHANGED
@@ -1,5 +1,99 @@
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
|
5 |
if __name__ == '__main__':
|
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
3 |
+
from collections import defaultdict
|
4 |
+
import json
|
5 |
+
from pathlib import Path
|
6 |
+
import random
|
7 |
+
import re
|
8 |
+
from typing import Any, Dict, List, Tuple
|
9 |
+
|
10 |
+
import datasets
|
11 |
+
|
12 |
+
|
13 |
+
_urls = {
|
14 |
+
"sms_spam": "data/sms_spam.jsonl"
|
15 |
+
}
|
16 |
+
|
17 |
+
|
18 |
+
_CITATION = """\
|
19 |
+
@dataset{spam_detect,
|
20 |
+
author = {Xing Tian},
|
21 |
+
title = {spam_detect},
|
22 |
+
month = sep,
|
23 |
+
year = 2023,
|
24 |
+
publisher = {Xing Tian},
|
25 |
+
version = {1.0},
|
26 |
+
}
|
27 |
+
"""
|
28 |
+
|
29 |
+
|
30 |
+
class SpamDetect(datasets.GeneratorBasedBuilder):
|
31 |
+
VERSION = datasets.Version("1.0.0")
|
32 |
+
|
33 |
+
intent_configs = list()
|
34 |
+
for name in _urls.keys():
|
35 |
+
config = datasets.BuilderConfig(name=name, version=VERSION, description=name)
|
36 |
+
intent_configs.append(config)
|
37 |
+
|
38 |
+
BUILDER_CONFIGS = [
|
39 |
+
*intent_configs,
|
40 |
+
]
|
41 |
+
|
42 |
+
def _info(self):
|
43 |
+
features = datasets.Features({
|
44 |
+
"text": datasets.Value("string"),
|
45 |
+
"label": datasets.Value("string"),
|
46 |
+
"data_source": datasets.Value("string"),
|
47 |
+
})
|
48 |
+
|
49 |
+
return datasets.DatasetInfo(
|
50 |
+
features=features,
|
51 |
+
supervised_keys=None,
|
52 |
+
homepage="",
|
53 |
+
license="",
|
54 |
+
citation=_CITATION,
|
55 |
+
)
|
56 |
+
|
57 |
+
def _split_generators(self, dl_manager):
|
58 |
+
"""Returns SplitGenerators."""
|
59 |
+
url = _urls[self.config.name]
|
60 |
+
dl_path = dl_manager.download(url)
|
61 |
+
archive_path = dl_path
|
62 |
+
|
63 |
+
return [
|
64 |
+
datasets.SplitGenerator(
|
65 |
+
name=datasets.Split.TRAIN,
|
66 |
+
gen_kwargs={"archive_path": archive_path, "split": "train"},
|
67 |
+
),
|
68 |
+
datasets.SplitGenerator(
|
69 |
+
name=datasets.Split.VALIDATION,
|
70 |
+
gen_kwargs={"archive_path": archive_path, "split": "validation"},
|
71 |
+
),
|
72 |
+
datasets.SplitGenerator(
|
73 |
+
name=datasets.Split.TEST,
|
74 |
+
gen_kwargs={"archive_path": archive_path, "split": "test"},
|
75 |
+
),
|
76 |
+
]
|
77 |
+
|
78 |
+
def _generate_examples(self, archive_path, split):
|
79 |
+
"""Yields examples."""
|
80 |
+
archive_path = Path(archive_path)
|
81 |
+
|
82 |
+
idx = 0
|
83 |
+
|
84 |
+
with open(archive_path, "r", encoding="utf-8") as f:
|
85 |
+
for row in f:
|
86 |
+
sample = json.loads(row)
|
87 |
+
|
88 |
+
if sample["split"] != split:
|
89 |
+
continue
|
90 |
+
|
91 |
+
yield idx, {
|
92 |
+
"text": sample["text"],
|
93 |
+
"label": sample["label"],
|
94 |
+
"data_source": sample["data_source"],
|
95 |
+
}
|
96 |
+
idx += 1
|
97 |
|
98 |
|
99 |
if __name__ == '__main__':
|