File size: 2,390 Bytes
b06652e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3b5b01c
b06652e
3b5b01c
 
b06652e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import os
import re
from typing import List

from project_settings import project_path

os.environ['NLTK_DATA'] = (project_path / "thirdparty_data/nltk_data").as_posix()

import nltk


nltk_sent_tokenize_languages = [
    "czech", "danish", "dutch", "flemish", "english", "estonian",
    "finnish", "french", "german", "italian", "norwegian",
    "polish", "portuguese", "russian", "spanish", "swedish", "turkish"
]


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--text",
        default="M2M100 is a multilingual encoder-decoder (seq-to-seq) model trained for Many-to-Many multilingual translation. It was introduced in this paper and first released in this repository.",
        # default="我是一个句子。我是另一个句子。",
        type=str,
    )
    parser.add_argument(
        "--language",
        default="english",
        # default="chinese",
        choices=nltk_sent_tokenize_languages,
        type=str
    )
    args = parser.parse_args()
    return args


def chinese_sent_tokenize(text: str):
    # 单字符断句符
    text = re.sub(r"([。!?\?])([^”’])", r"\1\n\2", text)
    # 英文省略号
    text = re.sub(r"(\.{6})([^”’])", r"\1\n\2", text)
    # 中文省略号
    text = re.sub(r"(\…{2})([^”’])", r"\1\n\2", text)
    # 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
    text = re.sub(r"([。!?\?][”’])([^,。!?\?])", r"\1\n\2", text)
    # 段尾如果有多余的\n就去掉它
    # 很多规则中会考虑分号; ,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
    text = text.rstrip()

    return text.split("\n")


def sent_tokenize(text: str, language: str) -> List[str]:
    if language in ["chinese"]:
        sent_list = chinese_sent_tokenize(text)
    elif language in nltk_sent_tokenize_languages:
        sent_list = nltk.sent_tokenize(text, language)
    else:
        sent_list = [text]
    return sent_list


def main():
    args = get_args()

    sent_list = sent_tokenize(args.text, language=args.language)

    for sent in sent_list:
        print(sent)

    return


if __name__ == '__main__':
    main()