Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
pythainlp-dev/pythainlp/parse/spacy_thai_engine.py | pythainlp-dev/pythainlp/parse/spacy_thai_engine.py | # -*- coding: utf-8 -*-
"""
spacy_thai: Tokenizer, POS-tagger, and dependency-parser for Thai language, working on Universal Dependencies.
GitHub: https://github.com/KoichiYasuoka/spacy-thai
"""
from typing import List, Union
import spacy_thai
class Parse:
def __init__(self, model: str = "th") -> None:
self.nlp = spacy_thai.load()
def __call__(
self, text: str, tag: str = "str"
) -> Union[List[List[str]], str]:
doc = self.nlp(text)
_text = []
if tag == "list":
_tag_data = []
for t in doc:
_tag_data.append(
[
str(t.i + 1),
t.orth_,
t.lemma_,
t.pos_,
t.tag_,
"_",
str(0 if t.head == t else t.head.i + 1),
t.dep_,
"_",
"_" if t.whitespace_ else "SpaceAfter=No",
]
)
return _tag_data
for t in doc:
_text.append(
"\t".join(
[
str(t.i + 1),
t.orth_,
t.lemma_,
t.pos_,
t.tag_,
"_",
str(0 if t.head == t else t.head.i + 1),
t.dep_,
"_",
"_" if t.whitespace_ else "SpaceAfter=No",
]
)
)
return "\n".join(_text)
| 1,664 | 28.732143 | 110 | py |
pythainlp-dev/pythainlp/parse/transformers_ud.py | pythainlp-dev/pythainlp/parse/transformers_ud.py | # -*- coding: utf-8 -*-
"""
TransformersUD
Author: Prof. Koichi Yasuoka
This tagger is provided under the terms of the apache-2.0 License.
The source: https://huggingface.co/KoichiYasuoka/deberta-base-thai-ud-head
GitHub: https://github.com/KoichiYasuoka
"""
import os
from typing import List, Union
import numpy
import torch
import ufal.chu_liu_edmonds
from transformers import (
AutoTokenizer,
AutoModelForQuestionAnswering,
AutoModelForTokenClassification,
AutoConfig,
TokenClassificationPipeline,
)
from transformers.utils import cached_file
class Parse:
def __init__(
self, model: str = "KoichiYasuoka/deberta-base-thai-ud-head"
) -> None:
if model == None:
model = "KoichiYasuoka/deberta-base-thai-ud-head"
self.tokenizer = AutoTokenizer.from_pretrained(model)
self.model = AutoModelForQuestionAnswering.from_pretrained(model)
x = AutoModelForTokenClassification.from_pretrained
if os.path.isdir(model):
d, t = x(os.path.join(model, "deprel")), x(
os.path.join(model, "tagger")
)
else:
c = AutoConfig.from_pretrained(
cached_file(model, "deprel/config.json")
)
d = x(cached_file(model, "deprel/pytorch_model.bin"), config=c)
s = AutoConfig.from_pretrained(
cached_file(model, "tagger/config.json")
)
t = x(cached_file(model, "tagger/pytorch_model.bin"), config=s)
self.deprel = TokenClassificationPipeline(
model=d, tokenizer=self.tokenizer, aggregation_strategy="simple"
)
self.tagger = TokenClassificationPipeline(
model=t, tokenizer=self.tokenizer
)
def __call__(
self, text: str, tag: str = "str"
) -> Union[List[List[str]], str]:
w = [
(t["start"], t["end"], t["entity_group"])
for t in self.deprel(text)
]
z, n = {
t["start"]: t["entity"].split("|") for t in self.tagger(text)
}, len(w)
r, m = [text[s:e] for s, e, p in w], numpy.full(
(n + 1, n + 1), numpy.nan
)
v, c = self.tokenizer(r, add_special_tokens=False)["input_ids"], []
for i, t in enumerate(v):
q = (
[self.tokenizer.cls_token_id]
+ t
+ [self.tokenizer.sep_token_id]
)
c.append(
[q]
+ v[0:i]
+ [[self.tokenizer.mask_token_id]]
+ v[i + 1 :]
+ [[q[-1]]]
)
b = [[len(sum(x[0 : j + 1], [])) for j in range(len(x))] for x in c]
with torch.no_grad():
d = self.model(
input_ids=torch.tensor([sum(x, []) for x in c]),
token_type_ids=torch.tensor(
[[0] * x[0] + [1] * (x[-1] - x[0]) for x in b]
),
)
s, e = d.start_logits.tolist(), d.end_logits.tolist()
for i in range(n):
for j in range(n):
m[i + 1, 0 if i == j else j + 1] = (
s[i][b[i][j]] + e[i][b[i][j + 1] - 1]
)
h = ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
if [0 for i in h if i == 0] != [0]:
i = ([p for s, e, p in w] + ["root"]).index("root")
j = i + 1 if i < n else numpy.nanargmax(m[:, 0])
m[0:j, 0] = m[j + 1 :, 0] = numpy.nan
h = ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
u = ""
if tag == "list":
_tag_data = []
for i, (s, e, p) in enumerate(w, 1):
p = "root" if h[i] == 0 else "dep" if p == "root" else p
_tag_data.append(
[
str(i),
r[i - 1],
"_",
z[s][0][2:],
"_",
"|".join(z[s][1:]),
str(h[i]),
p,
"_",
"_" if i < n and e < w[i][0] else "SpaceAfter=No",
]
)
return _tag_data
for i, (s, e, p) in enumerate(w, 1):
p = "root" if h[i] == 0 else "dep" if p == "root" else p
u += (
"\t".join(
[
str(i),
r[i - 1],
"_",
z[s][0][2:],
"_",
"|".join(z[s][1:]),
str(h[i]),
p,
"_",
"_" if i < n and e < w[i][0] else "SpaceAfter=No",
]
)
+ "\n"
)
return u + "\n"
| 4,903 | 33.055556 | 76 | py |
pythainlp-dev/pythainlp/parse/ud_goeswith.py | pythainlp-dev/pythainlp/parse/ud_goeswith.py | # -*- coding: utf-8 -*-
"""
UDgoeswith
Author: Prof. Koichi Yasuoka
This tagger is provided under the terms of the apache-2.0 License.
The source: https://huggingface.co/KoichiYasuoka/deberta-base-thai-ud-goeswith
GitHub: https://github.com/KoichiYasuoka
"""
from typing import List, Union
from transformers import AutoTokenizer, AutoModelForTokenClassification
import numpy as np
import torch
import ufal.chu_liu_edmonds
class Parse:
def __init__(
self, model: str = "KoichiYasuoka/deberta-base-thai-ud-goeswith"
) -> None:
if model is None:
model = "KoichiYasuoka/deberta-base-thai-ud-goeswith"
self.tokenizer = AutoTokenizer.from_pretrained(model)
self.model = AutoModelForTokenClassification.from_pretrained(model)
def __call__(
self,
text: str, tag: str = "str"
) -> Union[List[List[str]], str]:
w = self.tokenizer(text, return_offsets_mapping=True)
v = w["input_ids"]
x = [
v[0:i] + [self.tokenizer.mask_token_id] + v[i + 1:] + [j]
for i, j in enumerate(v[1:-1], 1)
]
with torch.no_grad():
e = self.model(input_ids=torch.tensor(x)
).logits.numpy()[:, 1:-2, :]
r = [
1 if i == 0 else -1
if j.endswith("|root") else 0
for i, j in sorted(self.model.config.id2label.items())
]
e += np.where(np.add.outer(np.identity(e.shape[0]), r) == 0, 0, np.nan)
g = self.model.config.label2id["X|_|goeswith"]
r = np.tri(e.shape[0])
for i in range(e.shape[0]):
for j in range(i + 2, e.shape[1]):
r[i, j] = r[i, j - 1] if np.nanargmax(e[i, j - 1]) == g else 1
e[:, :, g] += np.where(r == 0, 0, np.nan)
m = np.full((e.shape[0] + 1, e.shape[1] + 1), np.nan)
m[1:, 1:] = np.nanmax(e, axis=2).transpose()
p = np.zeros(m.shape)
p[1:, 1:] = np.nanargmax(e, axis=2).transpose()
for i in range(1, m.shape[0]):
m[i, 0], m[i, i], p[i, 0] = m[i, i], np.nan, p[i, i]
h = ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
if [0 for i in h if i == 0] != [0]:
m[:, 0] += np.where(
m[:, 0] == np.nanmax(
m[[i for i, j in enumerate(h) if j == 0], 0]), 0, np.nan
)
m[[i for i, j in enumerate(h) if j == 0]] += [
0 if i == 0 or j == 0 else np.nan for i, j in enumerate(h)
]
h = ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
u = ""
v = [(s, e) for s, e in w["offset_mapping"] if s < e]
if tag == "list":
_tag_data = []
for i, (s, e) in enumerate(v, 1):
q = self.model.config.id2label[p[i, h[i]]].split("|")
_tag_data.append(
[
str(i),
text[s:e],
"_",
q[0],
"_",
"|".join(q[1:-1]),
str(h[i]),
q[-1],
"_",
"_" if i < len(v) and e < v[i][0] else "SpaceAfter=No"
]
)
return _tag_data
else:
for i, (s, e) in enumerate(v, 1):
q = self.model.config.id2label[p[i, h[i]]].split("|")
u += "\t".join([str(i),
text[s:e],
"_",
q[0],
"_",
"|".join(q[1:-1]),
str(h[i]),
q[-1],
"_",
"_" if i < len(v) and e < v[i][0] else "SpaceAfter=No"]) + "\n"
return u + "\n"
| 3,930 | 36.438095 | 95 | py |
pythainlp-dev/pythainlp/soundex/__init__.py | pythainlp-dev/pythainlp/soundex/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai soundex
Has three systems to choose from: Udom83 (default), LK82, and MetaSound
"""
__all__ = [
"soundex",
"lk82",
"metasound",
"udom83",
"prayut_and_somchaip",
]
from pythainlp.soundex.lk82 import lk82
from pythainlp.soundex.metasound import metasound
from pythainlp.soundex.udom83 import udom83
from pythainlp.soundex.prayut_and_somchaip import prayut_and_somchaip
DEFAULT_SOUNDEX_ENGINE = "udom83"
from pythainlp.soundex.core import soundex
| 1,092 | 28.540541 | 74 | py |
pythainlp-dev/pythainlp/soundex/core.py | pythainlp-dev/pythainlp/soundex/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai soundex
Has three systems to choose from: Udom83 (default), LK82, and MetaSound
"""
from pythainlp.soundex.lk82 import lk82
from pythainlp.soundex.metasound import metasound
from pythainlp.soundex.udom83 import udom83
from pythainlp.soundex.prayut_and_somchaip import prayut_and_somchaip
from pythainlp.soundex import DEFAULT_SOUNDEX_ENGINE
# Other Thai soundex systems (not implemented yet): Arun91, KSS97
# [KSS97] https://linux.thai.net/~thep/soundex/soundex.html
def soundex(
text: str, engine: str = DEFAULT_SOUNDEX_ENGINE, length: int = 4
) -> str:
"""
This function converts Thai text into phonetic code.
:param str text: word
:param str engine: soundex engine
:param int length: preferred length of the Soundex code (default is 4)\
for metasound and prayut_and_somchaip only
:return: Soundex code
:rtype: str
:Options for engine:
* *udom83* (default) - Thai soundex algorithm proposed
by Vichit Lorchirachoonkul [#udom83]_
* *lk82* - Thai soundex algorithm proposed by
Wannee Udompanich [#lk82]_
* *metasound* - Thai soundex algorithm based on a combination
of Metaphone and Soundex proposed by Snae & Brückner [#metasound]_
* *prayut_and_somchaip* - Thai-English Cross-Language Transliterated
Word Retrieval using Soundex Technique [#prayut_and_somchaip]_
:Example:
::
from pythainlp.soundex import soundex
soundex("ลัก"), soundex("ลัก", engine='lk82'), \\
soundex("ลัก", engine='metasound')
# output: ('ร100000', 'ร1000', 'ล100')
soundex("รัก"), soundex("รัก", engine='lk82'), \\
soundex("รัก", engine='metasound')
# output: ('ร100000', 'ร1000', 'ร100')
soundex("รักษ์"), soundex("รักษ์", engine='lk82'), \\
soundex("รักษ์", engine='metasound')
# output: ('ร100000', 'ร1000', 'ร100')
soundex("บูรณการ"), soundex("บูรณการ", engine='lk82'), \\
soundex("บูรณการ", engine='metasound')
# output: ('บ931900', 'บE419', 'บ551')
soundex("ปัจจุบัน"), soundex("ปัจจุบัน", engine='lk82'), \\
soundex("ปัจจุบัน", engine='metasound')
# output: ('ป775300', 'ป3E54', 'ป223')
soundex("vp", engine="prayut_and_somchaip")
# output: '11'
soundex("วีพี", engine="prayut_and_somchaip")
# output: '11'
"""
if engine == "lk82":
_soundex = lk82(text)
elif engine == "prayut_and_somchaip":
_soundex = prayut_and_somchaip(text, length=length)
elif engine == "metasound":
_soundex = metasound(text, length=length)
else: # default, use "udom83"
_soundex = udom83(text)
return _soundex
| 3,365 | 35.586957 | 76 | py |
pythainlp-dev/pythainlp/soundex/lk82.py | pythainlp-dev/pythainlp/soundex/lk82.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai soundex - LK82 system
Python implementation: Korakot Chaovavanich
https://gist.github.com/korakot/0b772e09340cac2f493868da035597e8
"""
import re
from pythainlp.util import remove_tonemark
_TRANS1 = str.maketrans(
"กขฃคฅฆงจฉชฌซศษสญยฎดฏตณนฐฑฒถทธบปผพภฝฟมรลฬฤฦวหฮอ",
"กกกกกกงจชชชซซซซยยดดตตนนททททททบปพพพฟฟมรรรรรวหหอ",
)
_TRANS2 = str.maketrans(
"กขฃคฅฆงจฉชซฌฎฏฐฑฒดตถทธศษสญณนรลฬฤฦบปพฟภผฝมำยวไใหฮาๅึืเแโุูอ",
"1111112333333333333333333444444445555555667777889AAABCDEEF",
)
# silenced
_RE_KARANT = re.compile(r"จน์|มณ์|ณฑ์|ทร์|ตร์|[ก-ฮ]์|[ก-ฮ][ะ-ู]์")
# signs, symbols, vowel that has no explicit sound
# Paiyannoi, Phinthu, Maiyamok, Maitaikhu, Nikhahit
_RE_SIGN = re.compile(r"[\u0e2f\u0e3a\u0e46\u0e47\u0e4d]")
def lk82(text: str) -> str:
"""
This function converts Thai text into phonetic code with the a
Thai soundex algorithm named **LK82** [#lk82]_.
:param str text: Thai word
:return: LK82 soundex of the given Thai word
:rtype: str
:Example:
::
from pythainlp.soundex import lk82
lk82("ลัก")
# output: 'ร1000'
lk82("รัก")
# output: 'ร1000'
lk82("รักษ์")
# output: 'ร1000'
lk82("บูรณการ")
# output: 'บE419'
lk82("ปัจจุบัน")
# output: 'ป3E54'
"""
if not text or not isinstance(text, str):
return ""
text = remove_tonemark(text) # 4. remove tone marks
text = _RE_KARANT.sub("", text) # 4. remove "karat" characters
text = _RE_SIGN.sub("", text) # 5. remove Mai tai khu,
if not text:
return ""
# 6. encode the first character
res = []
if "ก" <= text[0] <= "ฮ":
res.append(text[0].translate(_TRANS1))
text = text[1:]
else:
if len(text) > 1:
res.append(text[1].translate(_TRANS1))
res.append(text[0].translate(_TRANS2))
text = text[2:]
# encode the rest
i_v = None # ตำแหน่งตัวคั่นล่าสุด (สระ)
len_text = len(text)
for i, c in enumerate(text):
if (
c in "\u0e30\u0e31\u0e34\u0e35"
): # 7. ตัวคั่นเฉยๆ/ Sara A, Mai Han-Akat, Sara I, Sara II
i_v = i
res.append("")
elif (
c in "\u0e32\u0e36\u0e37\u0e39\u0e45"
): # 8. คั่นและใส่/ Sara Aa, Sara Ue, Sara Uee, Sara Uu, Lankkhangyao
i_v = i
res.append(c.translate(_TRANS2))
elif c == "\u0e38": # 9. สระอุ / Sara U
i_v = i
if i == 0 or (text[i - 1] not in "ตธ"):
res.append(c.translate(_TRANS2))
else:
res.append("")
elif c in "\u0e2b\u0e2d": # หอ
if i + 1 < len_text and (
text[i + 1] in "\u0e36\u0e37\u0e38\u0e39"
): # Sara Ue, Sara Uee, Sara U, Sara Uu
res.append(c.translate(_TRANS2))
elif c in "\u0e22\u0e23\u0e24\u0e26\u0e27":
if i_v == i - 1 or (
i + 1 < len_text
and (text[i + 1] in "\u0e36\u0e37\u0e38\u0e39")
): # Sara Ue, Sara Uee, Sara U, Sara Uu
res.append(c.translate(_TRANS2))
else:
res.append(c.translate(_TRANS2)) # 12.
# 13. remove repetitives
res2 = [res[0]]
for i in range(1, len(res)):
if res[i] != res[i - 1]:
res2.append(res[i])
# 14. fill zeros
return ("".join(res2) + "0000")[:5]
| 4,044 | 28.962963 | 78 | py |
pythainlp-dev/pythainlp/soundex/metasound.py | pythainlp-dev/pythainlp/soundex/metasound.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai soundex - MetaSound system
References:
Snae & Brückner. (2009). Novel Phonetic Name Matching Algorithm with
a Statistical Ontology for Analysing Names Given in Accordance
with Thai Astrology.
https://pdfs.semanticscholar.org/3983/963e87ddc6dfdbb291099aa3927a0e3e4ea6.pdf
"""
_CONS_THANTHAKHAT = "กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรลวศษสหฬอฮ์"
_THANTHAKHAT = "์" # \u0e4c
_C1 = "กขฃคฆฅ" # sound K -> coded letter 1
_C2 = "จฉชฌซฐทฒดฎตสศษ" # D -> 2
_C3 = "ฟฝพผภบป" # B -> 3
_C4 = "ง" # NG -> 4
_C5 = "ลฬรนณฦญ" # N -> 5
_C6 = "ม" # M -> 6
_C7 = "ย" # Y -> 7
_C8 = "ว" # W -> 8
def metasound(text: str, length: int = 4) -> str:
"""
This function converts Thai text into phonetic code with the
mactching technique called **MetaSound**
[#metasound]_ (combination between Soundex and Metaphone algorithms).
MetaSound algorithm was developed specifically for Thai language.
:param str text: Thai text
:param int length: preferred length of the MetaSound code (default is 4)
:return: MetaSound for the given text
:rtype: str
:Example:
::
from pythainlp.soundex.metasound import metasound
metasound("ลัก")
# output: 'ล100'
metasound("รัก")
# output: 'ร100'
metasound("รักษ์")
# output: 'ร100'
metasound("บูรณการ", 5)
# output: 'บ5515'
metasound("บูรณการ", 6))
# output: 'บ55150'
metasound("บูรณการ", 4)
# output: 'บ551'
"""
if not text or not isinstance(text, str):
return ""
# keep only consonants and thanthakhat
chars = []
for ch in text:
if ch in _CONS_THANTHAKHAT:
chars.append(ch)
# remove karan (thanthakhat and a consonant before it)
i = 0
while i < len(chars):
if chars[i] == _THANTHAKHAT:
if i > 0:
chars[i - 1] = " "
chars[i] = " "
i += 1
# retain first consonant, encode the rest
chars = chars[:length]
i = 1
while i < len(chars):
if chars[i] in _C1:
chars[i] = "1"
elif chars[i] in _C2:
chars[i] = "2"
elif chars[i] in _C3:
chars[i] = "3"
elif chars[i] in _C4:
chars[i] = "4"
elif chars[i] in _C5:
chars[i] = "5"
elif chars[i] in _C6:
chars[i] = "6"
elif chars[i] in _C7:
chars[i] = "7"
elif chars[i] in _C8:
chars[i] = "8"
else:
chars[i] = "0"
i += 1
while len(chars) < length:
chars.append("0")
return "".join(chars)
| 3,256 | 26.369748 | 78 | py |
pythainlp-dev/pythainlp/soundex/prayut_and_somchaip.py | pythainlp-dev/pythainlp/soundex/prayut_and_somchaip.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai-English Cross-Language Transliterated Word Retrieval
using Soundex Technique
References:
Prayut Suwanvisat, Somchai Prasitjutrakul.Thai-English Cross-Language Transliterated Word Retrieval using Soundex Technique. In 1998 [cited 2022 Sep 8]. Available from: https://www.cp.eng.chula.ac.th/~somchai/spj/papers/ThaiText/ncsec98-clir.pdf
"""
from pythainlp import thai_characters
_C0 = "AEIOUHWYอ"
_C1 = "BFPVบฝฟปผพภว"
_C2 = "CGJKQSXZขฃคฅฆฉขฌกจซศษส"
_C3 = "DTฎดฏตฐฑฒถทธ"
_C4 = "Lลฬ"
_C5 = "MNมณน"
_C6 = "Rร"
_C7 = "AEIOUอ"
_C8 = "Hหฮ"
_C1_1 = "Wว"
_C9 = "Yยญ"
_C52 = "ง"
def prayut_and_somchaip(text: str, length: int = 4) -> str:
"""
This function converts English-Thai Cross-Language Transliterated Word into
phonetic code with the mactching technique called **Soundex** [#prayut_and_somchaip]_.
:param str text: English-Thai Cross-Language Transliterated Word
:param int length: preferred length of the Soundex code (default is 4)
:return: Soundex for the given text
:rtype: str
:Example:
::
from pythainlp.soundex.prayut_and_somchaip import prayut_and_somchaip
prayut_and_somchaip("king", 2)
# output: '52'
prayut_and_somchaip("คิง", 2)
# output: '52'
"""
if not text or not isinstance(text, str):
return ""
text = text.upper()
# keep only consonants (English-Thai)
chars = []
for ch in text:
if ch in thai_characters + "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
chars.append(ch)
i = 0
while i < len(chars):
if i == 0 and chars[i] in _C0:
chars[i] = "0"
elif chars[i] in _C1:
chars[i] = "1"
elif chars[i] in _C2:
chars[i] = "2"
elif chars[i] in _C3:
chars[i] = "3"
elif chars[i] in _C4:
chars[i] = "4"
elif chars[i] in _C5:
chars[i] = "5"
elif chars[i] in _C6:
chars[i] = "6"
elif chars[i] in _C52:
chars[i] = "52"
elif chars[i] in _C7 and i != 0:
chars[i] = "7"
elif chars[i] in _C8 and i != 0:
chars[i] = "8"
elif chars[i] in _C1_1 and i != 0:
chars[i] = "1"
elif chars[i] in _C9 and i != 0:
chars[i] = "9"
else:
chars[i] = None
i += 1
chars = list("".join([i for i in chars if i is not None]))
return "".join(chars[-length:])
| 3,063 | 29.64 | 245 | py |
pythainlp-dev/pythainlp/soundex/sound.py | pythainlp-dev/pythainlp/soundex/sound.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from pythainlp.transliterate import pronunciate, transliterate
from pythainlp.tokenize import word_tokenize
import panphon
import panphon.distance
_ft = panphon.FeatureTable()
_dst = panphon.distance.Distance()
def _clean_ipa(ipa: str) -> str:
"""
Clean IPA by remove tone and remove space between phone
:param str ipa: IPA text
:return: IPA that remove tone from the text
:rtype: str
"""
return ipa.replace("˩˩˦","").replace("˥˩","").replace("˨˩","").replace("˦˥","").replace("˧","").replace("˧","").replace(" .",".").replace(". ",".").strip()
def word2audio(word: str) -> str:
"""
Convert word to IPA
:param str word: Thai word
:return: IPA that remove tone from the text
:rtype: str
:Example:
::
from pythainlp.soundex.sound import word2audio
word2audio("น้ำ")
# output : 'n aː m .'
"""
_word = word_tokenize(word)
_phone = [pronunciate(w, engine="w2p") for w in _word]
_ipa = [_clean_ipa(transliterate(phone, engine="thaig2p")) for phone in _phone]
return '.'.join(_ipa)
def audio_vector(word:str) -> List[List[int]]:
"""
Convert audio to vector list
:param str word: Thai word
:return: List feature from panphon
:rtype: List[List[int]]
:Example:
::
from pythainlp.soundex.sound import audio_vector
audio_vector("น้ำ")
# output : [[-1, 1, 1, -1, -1, -1, ...]]
"""
return _ft.word_to_vector_list(word2audio(word), numeric=True)
def word_approximation(word:str, list_word:List[str]):
"""
Thai Word Approximation
:param str word: Thai word
:param str list_word: Thai word
:return: List of approximation of word (The smaller the value, the closer)
:rtype: List[str]
:Example:
::
from pythainlp.soundex.sound import word_approximation
word_approximation("รถ", ["รด", "รส", "รม", "น้ำ"])
# output : [0.0, 0.0, 3.875, 8.375]
"""
_word = word2audio(word)
_list_word = [word2audio(w) for w in list_word]
_distance = [_dst.weighted_feature_edit_distance(_word, w) for w in _list_word]
return _distance
| 2,789 | 28.368421 | 159 | py |
pythainlp-dev/pythainlp/soundex/udom83.py | pythainlp-dev/pythainlp/soundex/udom83.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai soundex - Udom83 system
Python implementation: Korakot Chaovavanich
https://gist.github.com/korakot/0b772e09340cac2f493868da035597e8
"""
import re
from pythainlp import thai_consonants
_THANTHAKHAT = "\u0e4c"
_RE_1 = re.compile(r"รร([\u0e40-\u0e44])") # เ-ไ
_RE_2 = re.compile(f"รร([{thai_consonants}][{thai_consonants}\u0e40-\u0e44])")
_RE_3 = re.compile(f"รร([{thai_consonants}][\u0e30-\u0e39\u0e48-\u0e4c])")
_RE_4 = re.compile(r"รร")
_RE_5 = re.compile(f"ไ([{thai_consonants}]ย)")
_RE_6 = re.compile(f"[ไใ]([{thai_consonants}])")
_RE_7 = re.compile(r"\u0e33(ม[\u0e30-\u0e39])")
_RE_8 = re.compile(r"\u0e33ม")
_RE_9 = re.compile(r"\u0e33") # ำ
_RE_10 = re.compile(
f"จน์|มณ์|ณฑ์|ทร์|ตร์|"
f"[{thai_consonants}]{_THANTHAKHAT}|[{thai_consonants}]"
f"[\u0e30-\u0e39]{_THANTHAKHAT}"
)
_RE_11 = re.compile(r"[\u0e30-\u0e4c]")
_TRANS1 = str.maketrans(
"กขฃคฅฆงจฉชฌซศษสฎดฏตฐฑฒถทธณนบปผพภฝฟมญยรลฬฤฦวอหฮ",
"กขขขขขงจชชชสสสสดดตตททททททนนบปพพพฟฟมยยรรรรรวอฮฮ",
)
_TRANS2 = str.maketrans(
"มวำกขฃคฅฆงยญณนฎฏดตศษสบปพภผฝฟหอฮจฉชซฌฐฑฒถทธรฤลฦ",
"0001111112233344444445555666666777778888889999",
)
def udom83(text: str) -> str:
"""
This function converts Thai text into phonetic code with the
Thai soundex algorithm named **Udom83** [#udom83]_.
:param str text: Thai word
:return: Udom83 soundex
:rtype: str
:Example:
::
from pythainlp.soundex import udom83
udom83("ลัก")
# output : 'ล100'
udom83("รัก")
# output: 'ร100'
udom83("รักษ์")
# output: 'ร100'
udom83("บูรณการ")
# output: 'บ5515'
udom83("ปัจจุบัน")
# output: 'ป775300'
"""
if not text or not isinstance(text, str):
return ""
text = _RE_1.sub("ัน\\1", text)
text = _RE_2.sub("ั\\1", text)
text = _RE_3.sub("ัน\\1", text)
text = _RE_4.sub("ัน", text)
text = _RE_5.sub("\\1", text)
text = _RE_6.sub("\\1ย", text)
text = _RE_7.sub("ม\\1", text)
text = _RE_8.sub("ม", text)
text = _RE_9.sub("ม", text)
text = _RE_10.sub("", text)
text = _RE_11.sub("", text)
if not text:
return ""
sd = "".join(
[text[0].translate(_TRANS1), text[1:].translate(_TRANS2), "000000"]
)
return sd[:7]
| 2,899 | 26.358491 | 78 | py |
pythainlp-dev/pythainlp/spell/__init__.py | pythainlp-dev/pythainlp/spell/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Spell checking and spelling correction.
"""
__all__ = [
"DEFAULT_SPELL_CHECKER",
"correct",
"spell",
"NorvigSpellChecker",
"spell_sent",
"correct_sent",
]
from pythainlp.spell.pn import NorvigSpellChecker
DEFAULT_SPELL_CHECKER = NorvigSpellChecker()
from pythainlp.spell.core import correct, spell, correct_sent, spell_sent
| 970 | 28.424242 | 74 | py |
pythainlp-dev/pythainlp/spell/core.py | pythainlp-dev/pythainlp/spell/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Spell checking functions
"""
import itertools
from typing import List
from pythainlp.spell import DEFAULT_SPELL_CHECKER
def spell(word: str, engine: str = "pn") -> List[str]:
"""
Provides a list of possible correct spelling of the given word.
The list of words are from the words in the dictionary
that incurs an edit distance value of 1 or 2.
The result is a list of words sorted by their occurrences
in the spelling dictionary in descending order.
:param str word: Word to spell check
:param str engine:
* *pn* - Peter Norvig's algorithm [#norvig_spellchecker]_ (default)
* *phunspell* - A spell checker utilizing spylls a port of Hunspell.
* *symspellpy* - symspellpy is a Python port of SymSpell v6.5.
* *tltk* - wrapper for `TLTK <https://pypi.org/project/tltk/>`_.
:return: list of possible correct words within 1 or 2 edit distance and
sorted by frequency of word occurrences in the spelling dictionary
in descending order.
:rtype: list[str]
:Example:
::
from pythainlp.spell import spell
spell("เส้นตรบ", engine="pn")
# output: ['เส้นตรง']
spell("เส้นตรบ")
# output: ['เส้นตรง']
spell("เส้นตรบ", engine="tltk")
# output: ['เส้นตรง']
spell("ครัช")
# output: ['ครับ', 'ครัว', 'รัช', 'ครัม', 'ครัน', 'วรัช', 'ครัส',
# 'ปรัช', 'บรัช', 'ครัง', 'คัช', 'คลัช', 'ครัย', 'ครัด']
spell("กระปิ")
# output: ['กะปิ', 'กระบิ']
spell("สังเกตุ")
# output: ['สังเกต']
spell("เหตการณ")
# output: ['เหตุการณ์']
"""
if engine == "phunspell":
from pythainlp.spell.phunspell import spell as SPELL_CHECKER
text_correct = SPELL_CHECKER(word)
elif engine == "symspellpy":
from pythainlp.spell.symspellpy import spell as SPELL_CHECKER
text_correct = SPELL_CHECKER(word)
elif engine == "tltk":
from pythainlp.spell.tltk import spell as SPELL_CHECKER
text_correct = SPELL_CHECKER(word)
else:
text_correct = DEFAULT_SPELL_CHECKER.spell(word)
return text_correct
def correct(word: str, engine: str = "pn") -> str:
"""
Corrects the spelling of the given word by returning
the correctly spelled word.
:param str word: word to correct spelling
:param str engine:
* *pn* - Peter Norvig's algorithm [#norvig_spellchecker]_ (default)
* *phunspell* - A spell checker utilizing spylls a port of Hunspell.
* *symspellpy* - symspellpy is a Python port of SymSpell v6.5.
:return: the corrected word
:rtype: str
:Example:
::
from pythainlp.spell import correct
correct("เส้นตรบ")
# output: 'เส้นตรง'
correct("ครัช")
# output: 'ครับ'
correct("สังเกตุ")
# output: 'สังเกต'
correct("กระปิ")
# output: 'กะปิ'
correct("เหตการณ")
# output: 'เหตุการณ์'
"""
if engine == "phunspell":
from pythainlp.spell.phunspell import correct as SPELL_CHECKER
text_correct = SPELL_CHECKER(word)
elif engine == "symspellpy":
from pythainlp.spell.symspellpy import correct as SPELL_CHECKER
text_correct = SPELL_CHECKER(word)
else:
text_correct = DEFAULT_SPELL_CHECKER.correct(word)
return text_correct
def spell_sent(list_words: List[str], engine: str = "pn") -> List[List[str]]:
"""
Provides a list of possible correct spelling of sentence
:param List[str] list_words: list word of sentence
:param str engine:
* *pn* - Peter Norvig's algorithm [#norvig_spellchecker]_ (default)
* *phunspell* - A spell checker utilizing spylls a port of Hunspell.
* *symspellpy* - symspellpy is a Python port of SymSpell v6.5.
:return: list of possible correct words
:rtype: List[List[str]]
:Example:
::
from pythainlp.spell import spell_sent
spell_sent(["เด็","อินอร์เน็ต","แรง"],engine='symspellpy')
# output: [['เด็ก', 'อินเทอร์เน็ต', 'แรง']]
"""
if engine == "symspellpy":
from pythainlp.spell.symspellpy import spell_sent as symspellpy_spell
list_new = symspellpy_spell(list_words)
else:
_temp = list(
itertools.product(*[spell(i, engine=engine) for i in list_words])
)
list_new = []
for i in _temp:
_temp2 = []
for j in i:
_temp2.append(j)
list_new.append(_temp2)
return list_new
def correct_sent(list_words: List[str], engine: str = "pn") -> List[str]:
"""
Corrects the spelling of the given sentence by returning
:param List[str] list_words: list word of sentence
:param str engine:
* *pn* - Peter Norvig's algorithm [#norvig_spellchecker]_ (default)
* *phunspell* - A spell checker utilizing spylls a port of Hunspell.
* *symspellpy* - symspellpy is a Python port of SymSpell v6.5.
:return: the corrected list sentences of word
:rtype: List[str]
:Example:
::
from pythainlp.spell import correct_sent
correct_sent(["เด็","อินอร์เน็ต","แรง"],engine='symspellpy')
# output: ['เด็ก', 'อินเทอร์เน็ต', 'แรง']
"""
return spell_sent(list_words, engine=engine)[0]
| 5,977 | 29.5 | 79 | py |
pythainlp-dev/pythainlp/spell/phunspell.py | pythainlp-dev/pythainlp/spell/phunspell.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Phunspell
A pure Python spell checker utilizing spylls a port of Hunspell.
:See Also:
* \
https://github.com/dvwright/phunspell
"""
from typing import List
import phunspell
pspell = phunspell.Phunspell("th_TH")
def spell(text: str) -> List[str]:
return list(pspell.suggest(text))
def correct(text: str) -> str:
return list(pspell.suggest(text))[0]
| 993 | 26.611111 | 74 | py |
pythainlp-dev/pythainlp/spell/pn.py | pythainlp-dev/pythainlp/spell/pn.py | # -*- coding: utf-8 -*-
"""
Spell checker, using Peter Norvig algorithm.
Spelling dictionary can be customized.
Default spelling dictionary is based on Thai National Corpus.
Based on Peter Norvig's Python code from http://norvig.com/spell-correct.html
"""
from collections import Counter
from string import digits
from typing import (
Callable,
Dict,
ItemsView,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
)
from pythainlp import thai_digits, thai_letters
from pythainlp.corpus import tnc
from pythainlp.util import isthaichar
def _no_filter(word: str) -> bool:
return True
def _is_thai_and_not_num(word: str) -> bool:
for ch in word:
if ch != "." and not isthaichar(ch):
return False
if ch in thai_digits or ch in digits:
return False
return True
def _keep(
word_freq: Tuple[str, int],
min_freq: int,
min_len: int,
max_len: int,
dict_filter: Callable[[str], bool],
) -> bool:
"""
Checks whether a given word has the required minimum frequency of min_freq
and its character length is between min_len and max_len (inclusive).
"""
if not word_freq or word_freq[1] < min_freq:
return False
word = word_freq[0]
if not (word and min_len <= len(word) <= max_len and word[0] != "."):
return False
return dict_filter(word)
def _edits1(word: str) -> Set[str]:
"""
Returns a set of words with edit distance of 1 from the input word
"""
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in thai_letters]
inserts = [L + c + R for L, R in splits for c in thai_letters]
return set(deletes + transposes + replaces + inserts)
def _edits2(word: str) -> Set[str]:
"""
Returns a set of words with edit distance of 2 from the input word
"""
return set(e2 for e1 in _edits1(word) for e2 in _edits1(e1))
def _convert_custom_dict(
custom_dict: Union[
Dict[str, int], Iterable[str], Iterable[Tuple[str, int]]
],
min_freq: int,
min_len: int,
max_len: int,
dict_filter: Optional[Callable[[str], bool]],
) -> List[Tuple[str, int]]:
"""
Converts a custom dictionary to a list of (str, int) tuples
"""
if isinstance(custom_dict, dict):
custom_dict = [(word, freq) for word, freq in custom_dict.items()]
i = iter(custom_dict)
first_member = next(i)
if isinstance(first_member, str):
# create tuples of a word with frequency equal to 1,
# and filter word list
custom_dict = [
(word, 1)
for word in custom_dict
if _keep((word, 1), 1, min_len, max_len, dict_filter)
]
elif isinstance(first_member, tuple):
# filter word list
custom_dict = [
word_freq
for word_freq in custom_dict
if _keep(word_freq, min_freq, min_len, max_len, dict_filter)
]
else:
raise TypeError(
"custom_dict must be either Dict[str, int], "
"Iterable[Tuple[str, int]], or Iterable[str]"
)
return custom_dict
class NorvigSpellChecker:
def __init__(
self,
custom_dict: Union[
Dict[str, int], Iterable[str], Iterable[Tuple[str, int]]
] = None,
min_freq: int = 2,
min_len: int = 2,
max_len: int = 40,
dict_filter: Optional[Callable[[str], bool]] = _is_thai_and_not_num,
):
"""
Initializes Peter Norvig's spell checker object.
Spelling dictionary can be customized.
By default, spelling dictionary is from
`Thai National Corpus <http://www.arts.chula.ac.th/ling/tnc/>`_
Basically, Norvig's spell checker will choose the most likely
spelling correction give a word by searching for candidate
corrected words based on edit distance.
Then, it selects the candidate with
the highest word occurrence probability.
:param str custom_dict: A custom spelling dictionary. This can be:
(1) a dictionary (`dict`), with words (`str`)
as keys and frequencies (`int`) as values;
(2) an iterable (list, tuple, or set) of word
(`str`) and frequency (`int`) tuples:
`(str, int)`; or
(3) an iterable of just words (`str`), without
frequencies -- in this case `1` will be
assigned to every words.
Default is from Thai National Corpus (around
40,000 words).
:param int min_freq: Minimum frequency of a word to keep (default = 2)
:param int min_len: Minimum length (in characters) of a word to keep
(default = 2)
:param int max_len: Maximum length (in characters) of a word to keep
(default = 40)
:param func dict_filter: A function to filter the dictionary.
Default filter removes any word
with number or non-Thai characters.
If no filter is required, use None.
"""
if not custom_dict: # default, use Thai National Corpus
# TODO: #680 change the dict
custom_dict = [(i,j) for i,j in tnc.word_freqs()]
if not dict_filter:
dict_filter = _no_filter
custom_dict = _convert_custom_dict(
custom_dict, min_freq, min_len, max_len, dict_filter
)
self.__WORDS = Counter(dict(custom_dict))
self.__WORDS += Counter() # remove zero and negative counts
self.__WORDS_TOTAL = sum(self.__WORDS.values())
def dictionary(self) -> ItemsView[str, int]:
"""
Returns the spelling dictionary currently used by this spell checker
:return: spelling dictionary of this instance
:rtype: list[tuple[str, int]]
:Example:
::
from pythainlp.spell import NorvigSpellChecker
dictionary= [("หวาน", 30), ("มะนาว", 2), ("แอบ", 3223)]
checker = NorvigSpellChecker(custom_dict=dictionary)
checker.dictionary()
# output: dict_items([('หวาน', 30), ('มะนาว', 2), ('แอบ', 3223)])
"""
return self.__WORDS.items()
def known(self, words: Iterable[str]) -> List[str]:
"""
Returns a list of given words that found in the spelling dictionary
:param list[str] words: A list of words to check if they exist
in the spelling dictionary
:return: intersection of the given words list and words
in the spelling dictionary
:rtype: list[str]
:Example:
::
from pythainlp.spell import NorvigSpellChecker
checker = NorvigSpellChecker()
checker.known(["เพยน", "เพล", "เพลง"])
# output: ['เพล', 'เพลง']
checker.known(['ยกไ', 'ไฟล์ม'])
# output: []
checker.known([])
# output: []
"""
return list(w for w in words if w in self.__WORDS)
def prob(self, word: str) -> float:
"""
Returns the probability of an input word,
according to the spelling dictionary
:param str word: A word to check its probability of occurrence
:return: word occurrence probability
:rtype: float
:Example:
::
from pythainlp.spell import NorvigSpellChecker
checker = NorvigSpellChecker()
checker.prob("ครัช")
# output: 0.0
checker.prob("รัก")
# output: 0.0006959172792052158
checker.prob("น่ารัก")
# output: 9.482306849763902e-05
"""
return self.__WORDS[word] / self.__WORDS_TOTAL
def freq(self, word: str) -> int:
"""
Returns the frequency of an input word,
according to the spelling dictionary
:param str word: A word to check its frequency
:return: frequency of the given word in the spelling dictionary
:rtype: int
:Example:
::
from pythainlp.spell import NorvigSpellChecker
checker = NorvigSpellChecker()
checker.freq("ปัญญา")
# output: 3639
checker.freq("บิญชา")
# output: 0
"""
return self.__WORDS[word]
def spell(self, word: str) -> List[str]:
"""
Returns a list of all correctly-spelled words whose spelling
is similar to the given word by edit distance metrics.
The returned list of words will be sorted by the decreasing
order of word frequencies in the word spelling dictionary.
First, if the input word is spelled-correctly,
this method returns the list of exactly one word which is itself.
Next, this method looks for a list of all correctly-spelled words
whose edit distance value is 1 within the input word.
If there is no such word, that the search expands to
a list of words whose edit distance value is 2.
And if that still fails, the list of input word is returned.
:param str word: A word to check its spelling
:return: list of possible correct words within 1 or 2 edit distance
and sorted by frequency of word occurrence in the
spelling dictionary in descending order.
:rtype: list[str]
:Example:
::
from pythainlp.spell import NorvigSpellChecker
checker = NorvigSpellChecker()
checker.spell("เส้นตรบ")
# output: ['เส้นตรง']
checker.spell("ครัช")
# output: ['ครับ', 'ครัว', 'รัช', 'ครัม', 'ครัน',
# 'วรัช', 'ครัส', 'ปรัช', 'บรัช', 'ครัง',
#'คัช', 'คลัช', 'ครัย', 'ครัด']
"""
if not word:
return [""]
candidates = (
self.known([word])
or self.known(_edits1(word))
or self.known(_edits2(word))
or [word]
)
candidates.sort(key=self.freq, reverse=True)
return candidates
def correct(self, word: str) -> str:
"""
Returns the most possible word, using the probability from
the spelling dictionary
:param str word: A word to correct its spelling
:return: the correct spelling of the given word
:rtype: str
:Example:
::
from pythainlp.spell import NorvigSpellChecker
checker = NorvigSpellChecker()
checker.correct("ปัญชา")
# output: 'ปัญหา'
checker.correct("บิญชา")
# output: 'บัญชา'
checker.correct("มิตรภาบ")
# output: 'มิตรภาพ'
"""
if not word:
return ""
# Check for numeric type
try:
if "." in word:
float(word)
else:
int(word)
return word
except ValueError:
pass
return self.spell(word)[0]
| 11,522 | 29.892761 | 78 | py |
pythainlp-dev/pythainlp/spell/symspellpy.py | pythainlp-dev/pythainlp/spell/symspellpy.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
symspellpy
symspellpy is a Python port of SymSpell v6.5.
We used unigram & bigram from Thai National Corpus (TNC).
:See Also:
* \
https://github.com/mammothb/symspellpy
"""
from typing import List
from symspellpy import SymSpell, Verbosity
from pythainlp.corpus import get_corpus_path
from pythainlp.corpus import path_pythainlp_corpus
_UNIGRAM = "tnc_freq.txt"
_BIGRAM = "tnc_bigram_word_freqs"
sym_spell = SymSpell()
sym_spell.load_dictionary(
path_pythainlp_corpus(_UNIGRAM), 0, 1, separator="\t", encoding="utf-8-sig"
)
sym_spell.load_bigram_dictionary(
get_corpus_path(_BIGRAM), 0, 2, separator="\t", encoding="utf-8-sig"
)
def spell(text: str, max_edit_distance: int = 2) -> List[str]:
return [
str(i).split(",")[0]
for i in list(
sym_spell.lookup(
text, Verbosity.CLOSEST, max_edit_distance=max_edit_distance
)
)
]
def correct(text: str, max_edit_distance: int = 1) -> str:
return spell(text, max_edit_distance=max_edit_distance)[0]
def spell_sent(list_words: List[str], max_edit_distance: int = 2) -> List[str]:
_temp = [
str(i).split(",")[0].split(" ")
for i in list(
sym_spell.lookup_compound(
" ".join(list_words),
split_by_space=True,
max_edit_distance=max_edit_distance,
)
)
]
list_new = []
for i in _temp:
list_new.append(i)
return list_new
def correct_sent(list_words: List[str], max_edit_distance=1) -> List[str]:
return [
i[0]
for i in spell_sent(list_words, max_edit_distance=max_edit_distance)
]
| 2,290 | 27.6375 | 79 | py |
pythainlp-dev/pythainlp/spell/tltk.py | pythainlp-dev/pythainlp/spell/tltk.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TLTK
Thai Language Toolkit
:See Also:
* \
https://pypi.org/project/tltk/
"""
try:
from tltk.nlp import spell_candidates
except ImportError:
raise ImportError("Not found tltk! Please install tltk by pip install tltk")
from typing import List
def spell(text: str) -> List[str]:
return spell_candidates(text)
| 952 | 27.878788 | 80 | py |
pythainlp-dev/pythainlp/summarize/__init__.py | pythainlp-dev/pythainlp/summarize/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text summarization
"""
__all__ = ["summarize", "extract_keywords"]
DEFAULT_SUMMARIZE_ENGINE = "frequency"
CPE_KMUTT_THAI_SENTENCE_SUM = "mt5-cpe-kmutt-thai-sentence-sum"
DEFAULT_KEYWORD_EXTRACTION_ENGINE = "keybert"
from pythainlp.summarize.core import summarize, extract_keywords
| 902 | 33.730769 | 74 | py |
pythainlp-dev/pythainlp/summarize/core.py | pythainlp-dev/pythainlp/summarize/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text summarization and Keyword extraction
"""
from typing import List, Iterable, Optional, Tuple
from pythainlp.summarize import (
DEFAULT_SUMMARIZE_ENGINE,
CPE_KMUTT_THAI_SENTENCE_SUM,
DEFAULT_KEYWORD_EXTRACTION_ENGINE,
)
from pythainlp.summarize.freq import FrequencySummarizer
from pythainlp.tokenize import sent_tokenize
def summarize(
text: str,
n: int = 1,
engine: str = DEFAULT_SUMMARIZE_ENGINE,
tokenizer: str = "newmm",
) -> List[str]:
"""
This function summarizes text based on frequency of words.
Under the hood, this function first tokenize sentence from the given
text with :func:`pythainlp.tokenize.sent_tokenize`.
Then, computes frequencies of tokenized words
(with :func:`pythainlp.tokenize.word_tokenize`) in all sentences
and normalized with maximum word frequency. The words with normalized
frequncy that are less than 0.1 or greater than 0.9 will be
filtered out from frequency dictionary. Finally, it picks *n* sentences
with highest sum of normalized frequency from all words
in the sentence and also appear in the frequency dictionary.
:param str text: text to be summarized
:param int n: number of sentences to be included in the summary
By default, n is *1* (effective for frequency engine only)
:param str engine: text summarization engine (By default: *frequency*).
:param str tokenizer: word tokenizer engine name (refer to
:func:`pythainlp.tokenize.word_tokenize`).
By default, tokenizer is *newmm*
(effective for frequency engine only)
:return: list of selected sentences
**Options for engine**
* *frequency* (default) - frequency of words
* *mt5* - mT5-small model
* *mt5-small* - mT5-small model
* *mt5-base* - mT5-base model
* *mt5-large* - mT5-large model
* *mt5-xl* - mT5-xl model
* *mt5-xxl* - mT5-xxl model
* *mt5-cpe-kmutt-thai-sentence-sum* - mT5 Thai sentence summarization by CPE KMUTT
:Example:
::
from pythainlp.summarize import summarize
text = '''
ทำเนียบท่าช้าง หรือ วังถนนพระอาทิตย์
ตั้งอยู่บนถนนพระอาทิตย์ เขตพระนคร กรุงเทพมหานคร
เดิมเป็นบ้านของเจ้าพระยามหาโยธา (ทอเรียะ คชเสนี)
บุตรเจ้าพระยามหาโยธานราธิบดีศรีพิชัยณรงค์ (พญาเจ่ง)
ต้นสกุลคชเสนี เชื้อสายมอญ เจ้าพระยามหาโยธา (ทอเรีย)
เป็นปู่ของเจ้าจอมมารดากลิ่นในพระบาทสมเด็จพระจอมเกล้าเจ้าอยู่หัว
และเป็นมรดกตกทอดมาถึง พระเจ้าบรมวงศ์เธอ กรมพระนเรศรวรฤทธิ์
(พระองค์เจ้ากฤดาภินิหาร)
ต่อมาในรัชสมัยพระบาทสมเด็จพระจุลจอมเกล้าเจ้าอยู่หัวโปรดเกล้าฯ
ให้สร้างตำหนัก 2 ชั้น
เป็นที่ประทับของพระเจ้าบรมวงศ์เธอ
กรมพระนเรศวรฤทิธิ์และเจ้าจอมมารดา
ต่อมาเรียกอาคารหลักนี้ว่า ตำหนักเดิม
'''
summarize(text, n=1)
# output: ['บุตรเจ้าพระยามหาโยธานราธิบดีศรีพิชัยณรงค์']
summarize(text, n=3)
# output: ['บุตรเจ้าพระยามหาโยธานราธิบดีศรีพิชัยณรงค์',
# 'เดิมเป็นบ้านของเจ้าพระยามหาโยธา',
# 'เจ้าพระยามหาโยธา']
summarize(text, engine="mt5-small")
# output: ['<extra_id_0> ท่าช้าง หรือ วังถนนพระอาทิตย์
# เขตพระนคร กรุงเทพมหานคร ฯลฯ ดังนี้:
# ที่อยู่ - ศิลปวัฒนธรรม']
text = "ถ้าพูดถึงขนมหวานในตำนานที่ชื่นใจที่สุดแล้วละก็ต้องไม่พ้น น้ำแข็งใส แน่ๆ เพราะว่าเป็นอะไรที่ชื่นใจสุดๆ"
summarize(text, engine="mt5-cpe-kmutt-thai-sentence-sum")
# output: ['น้ําแข็งใสเป็นอะไรที่ชื่นใจที่สุด']
"""
if not text or not isinstance(text, str):
return []
sents = []
if engine == DEFAULT_SUMMARIZE_ENGINE:
sents = FrequencySummarizer().summarize(text, n, tokenizer)
elif engine == CPE_KMUTT_THAI_SENTENCE_SUM:
from .mt5 import mT5Summarizer
sents = mT5Summarizer(
pretrained_mt5_model_name=CPE_KMUTT_THAI_SENTENCE_SUM, min_length=5
).summarize(text)
elif engine.startswith("mt5-") or engine == "mt5":
size = engine.replace("mt5-", "")
from .mt5 import mT5Summarizer
sents = mT5Summarizer(model_size=size).summarize(text)
else: # if engine not found, return first n sentences
sents = sent_tokenize(text, engine="whitespace+newline")[:n]
return sents
def extract_keywords(
text: str,
keyphrase_ngram_range: Tuple[int, int] = (1, 2),
max_keywords: int = 5,
min_df: int = 1,
engine: str = DEFAULT_KEYWORD_EXTRACTION_ENGINE,
tokenizer: str = "newmm",
stop_words: Optional[Iterable[str]] = None,
) -> List[str]:
"""
This function returns most-relevant keywords (and/or keyphrases) from the input document.
Each algorithm may produce completely different keywords from each other,
so please be careful when choosing the algorithm.
*Note*: Calling :func: `extract_keywords()` is expensive. For repetitive use of KeyBERT (the default engine),
creating KeyBERT object is highly recommended.
:param str text: text to be summarized
:param Tuple[int, int] keyphrase_ngram_range: Number of token units to be defined as keyword.
The token unit varies w.r.t. `tokenizer_engine`.
For instance, (1, 1) means each token (unigram) can be a keyword (e.g. "เสา", "ไฟฟ้า"),
(1, 2) means one and two consecutive tokens (unigram and bigram) can be keywords
(e.g. "เสา", "ไฟฟ้า", "เสาไฟฟ้า") (default: (1, 2))
:param int max_keywords: Number of maximum keywords to be returned. (default: 5)
:param int min_df: Minimum frequency required to be a keyword. (default: 1)
:param str engine: Name of algorithm to use for keyword extraction. (default: 'keybert')
:param str tokenizer: Name of tokenizer engine to use.
Refer to options in :func: `pythainlp.tokenize.word_tokenizer() (default: 'newmm')
:param Optional[Iterable[str]] stop_words: A list of stop words (a.k.a words to be ignored).
If not specified, :func:`pythainlp.corpus.thai_stopwords` is used. (default: None)
:return: list of keywords
**Options for engine**
* *keybert* (default) - KeyBERT keyword extraction algorithm
* *frequency* - frequency of words
:Example:
::
from pythainlp.summarize import extract_keywords
text = '''
อาหาร หมายถึง ของแข็งหรือของเหลว
ที่กินหรือดื่มเข้าสู่ร่างกายแล้ว
จะทำให้เกิดพลังงานและความร้อนแก่ร่างกาย
ทำให้ร่างกายเจริญเติบโต
ซ่อมแซมส่วนที่สึกหรอ ควบคุมการเปลี่ยนแปลงต่างๆ ในร่างกาย
ช่วยทำให้อวัยวะต่างๆ ทำงานได้อย่างปกติ
อาหารจะต้องไม่มีพิษและไม่เกิดโทษต่อร่างกาย
'''
keywords = extract_keywords(text)
# output: ['อวัยวะต่างๆ',
# 'ซ่อมแซมส่วน',
# 'เจริญเติบโต',
# 'ควบคุมการเปลี่ยนแปลง',
# 'มีพิษ']
keywords = extract_keywords(text, max_keywords=10)
# output: ['อวัยวะต่างๆ',
# 'ซ่อมแซมส่วน',
# 'เจริญเติบโต',
# 'ควบคุมการเปลี่ยนแปลง',
# 'มีพิษ',
# 'ทำให้ร่างกาย',
# 'ร่างกายเจริญเติบโต',
# 'จะทำให้เกิด',
# 'มีพิษและ',
# 'เกิดโทษ']
"""
def rank_by_frequency(
text: str,
max_keywords: int = 5,
min_df: int = 5,
tokenizer: str = "newmm",
stop_words: Optional[Iterable[str]] = None,
):
from pythainlp.util.keywords import rank
from pythainlp.tokenize import word_tokenize
tokens = word_tokenize(text, engine=tokenizer, keep_whitespace=False)
use_custom_stop_words = stop_words is not None
if use_custom_stop_words:
tokens = [token for token in tokens if token not in stop_words]
word_rank = rank(tokens, exclude_stopwords=not use_custom_stop_words)
keywords = [
kw
for kw, cnt in word_rank.most_common(max_keywords)
if cnt >= min_df
]
return keywords
engines = ["keybert", "frequency"]
if engine == "keybert":
from .keybert import KeyBERT
keywords = KeyBERT().extract_keywords(
text,
keyphrase_ngram_range=keyphrase_ngram_range,
max_keywords=max_keywords,
min_df=min_df,
tokenizer=tokenizer,
return_similarity=False,
stop_words=stop_words,
)
elif engine == "frequency":
return rank_by_frequency(
text,
max_keywords=max_keywords,
min_df=min_df,
tokenizer=tokenizer,
stop_words=stop_words,
)
else:
# currently not supported
raise ValueError(
f"Keyword extractor {repr(engine)} is currently not supported. "
f"Use one of {engines}."
)
return keywords
| 9,703 | 35.897338 | 118 | py |
pythainlp-dev/pythainlp/summarize/freq.py | pythainlp-dev/pythainlp/summarize/freq.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Summarization by frequency of words
"""
from collections import defaultdict
from heapq import nlargest
from string import punctuation
from typing import List
from pythainlp.corpus import thai_stopwords
from pythainlp.tokenize import sent_tokenize, word_tokenize
_STOPWORDS = thai_stopwords()
class FrequencySummarizer:
def __init__(self, min_cut: float = 0.1, max_cut: float = 0.9):
self.__min_cut = min_cut
self.__max_cut = max_cut
self.__stopwords = set(punctuation).union(_STOPWORDS)
@staticmethod
def __rank(ranking, n: int):
return nlargest(n, ranking, key=ranking.get)
def __compute_frequencies(
self, word_tokenized_sents: List[List[str]]
) -> defaultdict:
word_freqs = defaultdict(int)
for sent in word_tokenized_sents:
for word in sent:
if word not in self.__stopwords:
word_freqs[word] += 1
max_freq = float(max(word_freqs.values()))
for w in list(word_freqs):
word_freqs[w] = word_freqs[w] / max_freq
if (
word_freqs[w] >= self.__max_cut
or word_freqs[w] <= self.__min_cut
):
del word_freqs[w]
return word_freqs
def summarize(
self, text: str, n: int, tokenizer: str = "newmm"
) -> List[str]:
sents = sent_tokenize(text, engine="whitespace+newline")
word_tokenized_sents = [
word_tokenize(sent, engine=tokenizer) for sent in sents
]
self.__freq = self.__compute_frequencies(word_tokenized_sents)
ranking = defaultdict(int)
for i, sent in enumerate(word_tokenized_sents):
for w in sent:
if w in self.__freq:
ranking[i] += self.__freq[w]
summaries_idx = self.__rank(ranking, n)
return [sents[j] for j in summaries_idx]
| 2,529 | 32.289474 | 74 | py |
pythainlp-dev/pythainlp/summarize/keybert.py | pythainlp-dev/pythainlp/summarize/keybert.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Minimal re-implementation of KeyBERT.
KeyBERT is a minimal and easy-to-use keyword extraction technique
that leverages BERT embeddings to create keywords and keyphrases
that are most similar to a document.
https://github.com/MaartenGr/KeyBERT
"""
from typing import List, Optional, Iterable, Tuple, Union
from collections import Counter
import numpy as np
from transformers import pipeline
from pythainlp.corpus import thai_stopwords
from pythainlp.tokenize import word_tokenize
class KeyBERT:
def __init__(
self, model_name: str = "airesearch/wangchanberta-base-att-spm-uncased"
):
self.ft_pipeline = pipeline(
"feature-extraction",
tokenizer=model_name,
model=model_name,
revision="main",
)
def extract_keywords(
self,
text: str,
keyphrase_ngram_range: Tuple[int, int] = (1, 2),
max_keywords: int = 5,
min_df: int = 1,
tokenizer: str = "newmm",
return_similarity=False,
stop_words: Optional[Iterable[str]] = None,
) -> Union[List[str], List[Tuple[str, float]]]:
"""
Extract Thai keywords and/or keyphrases with KeyBERT algorithm.
See https://github.com/MaartenGr/KeyBERT.
:param str text: text to be summarized
:param Tuple[int, int] keyphrase_ngram_range: Number of token units to be defined as keyword.
The token unit varies w.r.t. `tokenizer_engine`.
For instance, (1, 1) means each token (unigram) can be a keyword (e.g. "เสา", "ไฟฟ้า"),
(1, 2) means one and two consecutive tokens (unigram and bigram) can be keywords
(e.g. "เสา", "ไฟฟ้า", "เสาไฟฟ้า") (default: (1, 2))
:param int max_keywords: Number of maximum keywords to be returned. (default: 5)
:param int min_df: Minimum frequency required to be a keyword. (default: 1)
:param str tokenizer: Name of tokenizer engine to use.
Refer to options in :func: `pythainlp.tokenize.word_tokenizer() (default: 'newmm')
:param bool return_similarity: If `True`, return keyword scores. (default: False)
:param Optional[Iterable[str]] stop_words: A list of stop words (a.k.a words to be ignored).
If not specified, :func:`pythainlp.corpus.thai_stopwords` is used. (default: None)
:return: list of keywords with score
:Example:
::
from pythainlp.summarize.keybert import KeyBERT
text = '''
อาหาร หมายถึง ของแข็งหรือของเหลว
ที่กินหรือดื่มเข้าสู่ร่างกายแล้ว
จะทำให้เกิดพลังงานและความร้อนแก่ร่างกาย
ทำให้ร่างกายเจริญเติบโต
ซ่อมแซมส่วนที่สึกหรอ ควบคุมการเปลี่ยนแปลงต่างๆ ในร่างกาย
ช่วยทำให้อวัยวะต่างๆ ทำงานได้อย่างปกติ
อาหารจะต้องไม่มีพิษและไม่เกิดโทษต่อร่างกาย
'''
kb = KeyBERT()
keywords = kb.extract_keyword(text)
# output: ['อวัยวะต่างๆ',
# 'ซ่อมแซมส่วน',
# 'เจริญเติบโต',
# 'ควบคุมการเปลี่ยนแปลง',
# 'มีพิษ']
keywords = kb.extract_keyword(text, max_keywords=10, return_similarity=True)
# output: [('อวัยวะต่างๆ', 0.3228477063109462),
# ('ซ่อมแซมส่วน', 0.31320597838000375),
# ('เจริญเติบโต', 0.29115434699705506),
# ('ควบคุมการเปลี่ยนแปลง', 0.2678430841321016),
# ('มีพิษ', 0.24996827960821494),
# ('ทำให้ร่างกาย', 0.23876962942443258),
# ('ร่างกายเจริญเติบโต', 0.23191285218852364),
# ('จะทำให้เกิด', 0.22425422716846247),
# ('มีพิษและ', 0.22162962875299588),
# ('เกิดโทษ', 0.20773497763458507)]
"""
try:
text = text.strip()
except AttributeError:
raise AttributeError(
f"Unable to process data of type {type(text)}. "
f"Please provide input of string type."
)
if not text:
return []
# generate all list of keyword / keyphrases
stop_words_ = stop_words if stop_words else thai_stopwords()
kw_candidates = _generate_ngrams(
text, keyphrase_ngram_range, min_df, tokenizer, stop_words_
)
# create document and word vectors
doc_vector = self.embed(text)
kw_vectors = self.embed(kw_candidates)
# rank keywords
keywords = _rank_keywords(
doc_vector, kw_vectors, kw_candidates, max_keywords
)
if return_similarity:
return keywords
else:
return [kw for kw, _ in keywords]
def embed(self, docs: Union[str, List[str]]) -> np.ndarray:
"""
Create an embedding of each input in `docs` by averaging vectors from last hidden layer.
"""
embs = self.ft_pipeline(docs)
if isinstance(docs, str) or len(docs) == 1:
# embed doc. return shape = [1, hidden_size]
emb_mean = np.array(embs).mean(axis=1)
else:
# mean of embedding of each word
# return shape = [len(docs), hidden_size]
emb_mean = np.stack(
[np.array(emb[0]).mean(axis=0) for emb in embs]
)
return emb_mean
def _generate_ngrams(
doc: str,
keyphrase_ngram_range: Tuple[int, int],
min_df: int,
tokenizer_engine: str,
stop_words: Iterable[str],
) -> List[str]:
assert keyphrase_ngram_range[0] >= 1, (
f"`keyphrase_ngram_range` must start from 1. "
f"current value={keyphrase_ngram_range}."
)
assert keyphrase_ngram_range[0] <= keyphrase_ngram_range[1], (
f"The value first argument of `keyphrase_ngram_range` must not exceed the second. "
f"current value={keyphrase_ngram_range}."
)
def _join_ngram(ngrams: List[Tuple[str, str]]) -> List[str]:
ngrams_joined = []
for ng in ngrams:
joined = "".join(ng)
if joined.strip() == joined:
# ngram must not start or end with whitespace as this may cause duplication.
ngrams_joined.append(joined)
return ngrams_joined
words = word_tokenize(doc, engine=tokenizer_engine)
all_grams = []
ngram_range = (keyphrase_ngram_range[0], keyphrase_ngram_range[1] + 1)
for n in range(*ngram_range):
if n == 1:
# filter out space
ngrams = [word for word in words if word.strip()]
else:
ngrams_tuple = zip(*[words[i:] for i in range(n)])
ngrams = _join_ngram(ngrams_tuple)
ngrams_cnt = Counter(ngrams)
ngrams = [
word
for word, freq in ngrams_cnt.items()
if (freq >= min_df) and (word not in stop_words)
]
all_grams.extend(ngrams)
return all_grams
def _rank_keywords(
doc_vector: np.ndarray,
word_vectors: np.ndarray,
keywords: List[str],
max_keywords: int,
) -> List[Tuple[str, float]]:
def l2_norm(v: np.ndarray) -> np.ndarray:
vec_size = v.shape[1]
result = np.divide(
v,
np.linalg.norm(v, axis=1).reshape(-1, 1).repeat(vec_size, axis=1),
)
assert np.isclose(
np.linalg.norm(result, axis=1), 1
).all(), "Cannot normalize a vector to unit vector."
return result
def cosine_sim(a: np.ndarray, b: np.ndarray) -> np.ndarray:
return (np.matmul(a, b.T).T).sum(axis=1)
doc_vector = l2_norm(doc_vector)
word_vectors = l2_norm(word_vectors)
cosine_sims = cosine_sim(doc_vector, word_vectors)
ranking_desc = np.argsort(-cosine_sims)
final_ranks = [
(keywords[r], cosine_sims[r]) for r in ranking_desc[:max_keywords]
]
return final_ranks
| 8,572 | 34.720833 | 119 | py |
pythainlp-dev/pythainlp/summarize/mt5.py | pythainlp-dev/pythainlp/summarize/mt5.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Summarization by mT5 model
"""
from transformers import T5Tokenizer, MT5ForConditionalGeneration
from typing import List
from pythainlp.summarize import CPE_KMUTT_THAI_SENTENCE_SUM
class mT5Summarizer:
def __init__(
self,
model_size: str = "small",
num_beams: int = 4,
no_repeat_ngram_size: int = 2,
min_length: int = 30,
max_length: int = 100,
skip_special_tokens: bool = True,
pretrained_mt5_model_name: str = None,
):
model_name = ""
if pretrained_mt5_model_name is None:
if model_size not in ["small", "base", "large", "xl", "xxl"]:
raise ValueError(
f"""model_size \"{model_size}\" not found.
It might be a typo; if not, please consult our document."""
)
model_name = f"google/mt5-{model_size}"
else:
if pretrained_mt5_model_name == CPE_KMUTT_THAI_SENTENCE_SUM:
model_name = f"thanathorn/{CPE_KMUTT_THAI_SENTENCE_SUM}"
else:
model_name = pretrained_mt5_model_name
self.model_name = model_name
self.model = MT5ForConditionalGeneration.from_pretrained(model_name)
self.tokenizer = T5Tokenizer.from_pretrained(model_name)
self.num_beams = num_beams
self.no_repeat_ngram_size = no_repeat_ngram_size
self.min_length = min_length
self.max_length = max_length
self.skip_special_tokens = skip_special_tokens
def summarize(self, text: str) -> List[str]:
preprocess_text = text.strip().replace("\n", "")
if self.model_name == f"thanathorn/{CPE_KMUTT_THAI_SENTENCE_SUM}":
t5_prepared_Text = "simplify: " + preprocess_text
else:
t5_prepared_Text = "summarize: " + preprocess_text
tokenized_text = self.tokenizer.encode(
t5_prepared_Text, return_tensors="pt"
)
summary_ids = self.model.generate(
tokenized_text,
num_beams=self.num_beams,
no_repeat_ngram_size=self.no_repeat_ngram_size,
min_length=self.min_length,
max_length=self.max_length,
early_stopping=True,
)
output = self.tokenizer.decode(
summary_ids[0], skip_special_tokens=self.skip_special_tokens
)
return [output]
| 3,006 | 37.551282 | 79 | py |
pythainlp-dev/pythainlp/tag/__init__.py | pythainlp-dev/pythainlp/tag/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Linguistic and other taggers.
Tagging each token in a sentence with supplementary information,
such as its part-of-speech (POS) tag, and named entity (NE) tag.
"""
__all__ = [
"PerceptronTagger",
"pos_tag",
"pos_tag_sents",
"tag_provinces",
"chunk_parse",
"NER",
"NNER",
]
from pythainlp.tag.locations import tag_provinces
from pythainlp.tag.pos_tag import pos_tag, pos_tag_sents
from pythainlp.tag._tag_perceptron import PerceptronTagger
from pythainlp.tag.chunk import chunk_parse
from pythainlp.tag.named_entity import NER, NNER
| 1,181 | 30.945946 | 74 | py |
pythainlp-dev/pythainlp/tag/_tag_perceptron.py | pythainlp-dev/pythainlp/tag/_tag_perceptron.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Perceptron Tagger.
This tagger is a port of the Textblob Averaged Perceptron Tagger
Author: Matthew Honnibal <[email protected]>,
Long Duong <[email protected]> (NLTK port)
Wannaphong Phatthiyaphaibun <[email protected]> (PyThaiNLP port)
URL: <https://github.com/sloria/textblob-aptagger>
<https://nltk.org/>
Copyright 2013 Matthew Honnibal
NLTK modifications Copyright 2015 The NLTK Project
PyThaiNLP modifications Copyright 2020 PyThaiNLP Project
This tagger is provided under the terms of the MIT License.
"""
import json
from collections import defaultdict
from typing import Dict, Iterable, List, Tuple, Union
class AveragedPerceptron(object):
"""
An averaged perceptron, as implemented by Matthew Honnibal.
See more implementation details here:
http://honnibal.wordpress.com/2013/09/11/a-good-part-of-speechpos-tagger-in-about-200-lines-of-python/
"""
def __init__(self) -> None:
# Each feature gets its own weight vector,
# so weights is a dict-of-dicts
self.weights = {}
self.classes = set()
# The accumulated values, for the averaging. These will be keyed by
# feature/class tuples
self._totals = defaultdict(int)
# The last time the feature was changed, for the averaging. Also
# keyed by feature/class tuples
# (tstamps is short for timestamps)
self._tstamps = defaultdict(int)
# Number of instances seen
self.i = 0
def predict(self, features: Dict):
"""
Dot-product the features and current weights and return the best
label.
"""
scores = defaultdict(float)
for feat, value in features.items():
if feat not in self.weights or value == 0:
continue
weights = self.weights[feat]
for label, weight in weights.items():
scores[label] += value * weight
# Do a secondary alphabetic sort, for stability
return max(self.classes, key=lambda label: (scores[label], label))
def update(self, truth, guess, features: Dict) -> None:
"""Update the feature weights."""
def upd_feat(c, f, w, v):
param = (f, c)
self._totals[param] += (self.i - self._tstamps[param]) * w
self._tstamps[param] = self.i
self.weights[f][c] = w + v
self.i += 1
if truth == guess:
return
for f in features:
weights = self.weights.setdefault(f, {})
upd_feat(truth, f, weights.get(truth, 0.0), 1.0)
upd_feat(guess, f, weights.get(guess, 0.0), -1.0)
def average_weights(self) -> None:
"""Average weights from all iterations."""
for feat, weights in self.weights.items():
new_feat_weights = {}
for clas, weight in weights.items():
param = (feat, clas)
total = self._totals[param]
total += (self.i - self._tstamps[param]) * weight
averaged = round(total / float(self.i), 3)
if averaged:
new_feat_weights[clas] = averaged
self.weights[feat] = new_feat_weights
class PerceptronTagger:
"""
Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal.
See more implementation details here:
http://honnibal.wordpress.com/2013/09/11/a-good-part-of-speechpos-tagger-in-about-200-lines-of-python/
>>> from pythainlp.tag import PerceptronTagger
>>> tagger = PerceptronTagger()
>>> data = [
[("คน", "N"), ("เดิน", "V")],
[("แมว", "N"), ("เดิน", "V")],
[("คน", "N"), ("วิ่ง", "V")],
[("ปลา", "N"), ("ว่าย", "V")],
[("นก", "N"), ("บิน", "V")],
]
>>> tagger.train(data)
>>> tagger.tag(["นก", "เดิน])
[('นก', 'N'), ('เดิน', 'V')]
"""
START = ["-START-", "-START2-"]
END = ["-END-", "-END2-"]
AP_MODEL_LOC = ""
def __init__(self, path: str = "") -> None:
"""
:param str path: model path
"""
self.model = AveragedPerceptron()
self.tagdict = {}
self.classes = set()
if path != "":
self.AP_MODEL_LOC = path
self.load(self.AP_MODEL_LOC)
def tag(self, tokens: Iterable[str]) -> List[Tuple[str, str]]:
"""Tags a string `tokens`."""
prev, prev2 = self.START
output = []
context = self.START + [self._normalize(w) for w in tokens] + self.END
for i, word in enumerate(tokens):
tag = self.tagdict.get(word)
if not tag:
features = self._get_features(i, word, context, prev, prev2)
tag = self.model.predict(features)
output.append((word, tag))
prev2 = prev
prev = tag
return output
def train(
self,
sentences: Iterable[Iterable[Tuple[str, str]]],
save_loc: Union[str, None] = None,
nr_iter: int = 5,
) -> None:
"""
Train a model from sentences, and save it at ``save_loc``.
``nr_iter`` controls the number of Perceptron training iterations.
:param sentences: A list of (words, tags) tuples.
:param save_loc: If not ``None``, saves a pickled model in this \
location.
:param nr_iter: Number of training iterations.
"""
import random
self._make_tagdict(sentences)
self.model.classes = self.classes
for _ in range(nr_iter):
c = 0
n = 0
for sentence in sentences:
words, tags = zip(*sentence)
prev, prev2 = self.START
context = (
self.START + [self._normalize(w) for w in words] + self.END
)
for i, word in enumerate(words):
guess = self.tagdict.get(word)
if not guess:
feats = self._get_features(
i, word, context, prev, prev2
)
guess = self.model.predict(feats)
self.model.update(tags[i], guess, feats)
prev2 = prev
prev = guess
c += guess == tags[i]
n += 1
random.shuffle(sentences)
self.model.average_weights()
# save the model
if save_loc is not None:
data = {}
data["weights"] = self.model.weights
data["tagdict"] = self.tagdict
data["classes"] = list(self.classes)
with open(save_loc, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False)
def load(self, loc: str) -> None:
"""
Load a pickled model.
:param str loc: model path
"""
try:
with open(loc, "r", encoding="utf-8-sig") as f:
w_td_c = json.load(f)
except IOError:
msg = "Missing trontagger.json file."
raise IOError(msg)
self.model.weights = w_td_c["weights"]
self.tagdict = w_td_c["tagdict"]
self.classes = w_td_c["classes"]
self.model.classes = set(self.classes)
def _normalize(self, word: str) -> str:
"""
Normalization used in pre-processing.
- All words are lower cased
- Digits in the range 1800-2100 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str
"""
if "-" in word and word[0] != "-":
return "!HYPHEN"
elif word.isdigit() and len(word) == 4:
return "!YEAR"
elif word[0].isdigit():
return "!DIGITS"
else:
return word.lower()
def _get_features(
self, i: int, word: str, context: List[str], prev: str, prev2: str
) -> Dict:
"""
Map tokens into a feature representation, implemented as a
{hashable: float} dict. If the features change, a new model must be
trained.
"""
def add(name: str, *args):
features[" ".join((name,) + tuple(args))] += 1
i += len(self.START)
features = defaultdict(int)
# It's useful to have a constant feature,
# which acts sort of like a prior
add("bias")
add("i suffix", word[-3:])
add("i pref1", word[0])
add("i-1 tag", prev)
add("i-2 tag", prev2)
add("i tag+i-2 tag", prev, prev2)
add("i word", context[i])
add("i-1 tag+i word", prev, context[i])
add("i-1 word", context[i - 1])
add("i-1 suffix", context[i - 1][-3:])
add("i-2 word", context[i - 2])
add("i+1 word", context[i + 1])
add("i+1 suffix", context[i + 1][-3:])
add("i+2 word", context[i + 2])
return features
def _make_tagdict(
self, sentences: Iterable[Iterable[Tuple[str, str]]]
) -> None:
"""Make a tag dictionary for single-tag words."""
counts = defaultdict(lambda: defaultdict(int))
for sentence in sentences:
for word, tag in sentence:
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh:
self.tagdict[word] = tag
| 10,394 | 34.237288 | 110 | py |
pythainlp-dev/pythainlp/tag/blackboard.py | pythainlp-dev/pythainlp/tag/blackboard.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
# defined strings for special characters
CHAR_TO_ESCAPE = {" ": "_"}
ESCAPE_TO_CHAR = dict((v, k) for k, v in CHAR_TO_ESCAPE.items())
# map from Blackboard treebank POS tag to Universal POS tag
# from Wannaphong Phatthiyaphaibun & Korakot Chaovavanich
TO_UD = {
"": "",
"AJ": "ADJ",
"AV": "ADV",
"AX": "AUX",
"CC": "CCONJ",
"CL": "NOUN",
"FX": "NOUN",
"IJ": "INTJ",
"NG": "PART",
"NN": "NOUN",
"NU": "NUM",
"PA": "PART",
"PR": "PROPN",
"PS": "ADP",
"PU": "PUNCT",
"VV": "VERB",
"XX": "X",
}
def pre_process(words: List[str]) -> List[str]:
"""
Convert signs and symbols with their defined strings.
This function is to be used as a preprocessing step,
before the actual POS tagging.
"""
keys = CHAR_TO_ESCAPE.keys()
words = [CHAR_TO_ESCAPE[word] if word in keys else word for word in words]
return words
def post_process(
word_tags: List[Tuple[str, str]], to_ud: bool = False
) -> List[Tuple[str, str]]:
"""
Convert defined strings back to corresponding signs and symbols.
This function is to be used as a post-processing step,
after the POS tagging.
"""
keys = ESCAPE_TO_CHAR.keys()
if not to_ud:
word_tags = [
(ESCAPE_TO_CHAR[word], tag) if word in keys else (word, tag)
for word, tag in word_tags
]
else:
word_tags = [
(ESCAPE_TO_CHAR[word], TO_UD[tag])
if word in keys
else (word, TO_UD[tag])
for word, tag in word_tags
]
return word_tags
| 2,238 | 27.341772 | 78 | py |
pythainlp-dev/pythainlp/tag/chunk.py | pythainlp-dev/pythainlp/tag/chunk.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
def chunk_parse(
sent: List[Tuple[str, str]], engine: str = "crf", corpus: str = "orchidpp"
) -> List[str]:
"""
This function parse thai sentence to phrase structure in IOB format.
:param list sent: list [(word,part-of-speech)]
:param str engine: chunk parse engine (now, it has crf only)
:param str corpus: chunk parse corpus (now, it has orchidpp only)
:return: a list of tuple (word,part-of-speech,chunking)
:rtype: List[str]
:Example:
::
from pythainlp.tag import chunk_parse, pos_tag
tokens = ["ผม", "รัก", "คุณ"]
tokens_pos = pos_tag(tokens, engine="perceptron", corpus="orchid")
print(chunk_parse(tokens_pos))
# output: ['B-NP', 'B-VP', 'I-VP']
"""
from .crfchunk import CRFchunk
_engine = CRFchunk()
return _engine.parse(sent)
| 1,485 | 31.304348 | 78 | py |
pythainlp-dev/pythainlp/tag/crfchunk.py | pythainlp-dev/pythainlp/tag/crfchunk.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Tuple, Union
from pycrfsuite import Tagger as CRFTagger
from pythainlp.corpus import path_pythainlp_corpus, thai_stopwords
def _is_stopword(word: str) -> bool: # check thai stopword
return word in thai_stopwords()
def _doc2features(tokens: List[Tuple[str, str]], index: int) -> Dict:
"""
`tokens` = a POS-tagged sentence [(w1, t1), ...]
`index` = the index of the token we want to extract features for
"""
word, pos = tokens[index]
f = {
"word": word,
"word_is_stopword": _is_stopword(word),
"pos": pos,
}
if index > 0 and index > 1:
prevprevword, prevprevpos = tokens[index - 2]
f["prev-prev-word"] = prevprevword
f["prev-prevz-word_is_stopword"] = _is_stopword(prevprevword)
f["prev-prevz-pos"] = prevprevpos
if index > 0:
prevword, prevpos = tokens[index - 1]
f["prev-word"] = prevword
f["prev-word_is_stopword"] = _is_stopword(prevword)
f["prev-pos"] = prevpos
else:
f["BOS"] = True
if index < len(tokens) - 2:
nextnextword, nextnextpos = tokens[index + 2]
f["nextnext-word"] = nextnextword
f["nextnext-word_is_stopword"] = _is_stopword(nextnextword)
f["nextnext-pos"] = nextnextpos
if index < len(tokens) - 1:
nextword, nextpos = tokens[index + 1]
f["next-word"] = nextword
f["next-word_is_stopword"] = _is_stopword(nextword)
f["next-pos"] = nextpos
else:
f["EOS"] = True
return f
def extract_features(doc):
return [_doc2features(doc, i) for i in range(0, len(doc))]
class CRFchunk:
def __init__(self, corpus: str = "orchidpp"):
self.corpus = corpus
self.load_model(self.corpus)
def load_model(self, corpus: str):
self.tagger = CRFTagger()
if corpus == "orchidpp":
self.path = path_pythainlp_corpus("crfchunk_orchidpp.model")
self.tagger.open(self.path)
def parse(self, token_pos: List[Tuple[str, str]]) -> List[str]:
self.xseq = extract_features(token_pos)
return self.tagger.tag(self.xseq)
| 2,769 | 33.197531 | 74 | py |
pythainlp-dev/pythainlp/tag/locations.py | pythainlp-dev/pythainlp/tag/locations.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Recognizes locations in text
"""
from typing import List, Tuple
from pythainlp.corpus import provinces
def tag_provinces(tokens: List[str]) -> List[Tuple[str, str]]:
"""
This function recognize Thailand provinces in text.
Note that it uses exact match and considers no context.
:param list[str] tokens: a list of words
:reutrn: a list of tuple indicating NER for `LOCATION` in IOB format
:rtype: list[tuple[str, str]]
:Example:
::
from pythainlp.tag import tag_provinces
text = ['หนองคาย', 'น่าอยู่']
tag_provinces(text)
# output: [('หนองคาย', 'B-LOCATION'), ('น่าอยู่', 'O')]
"""
province_list = provinces()
output = [
(token, "B-LOCATION") if token in province_list else (token, "O")
for token in tokens
]
return output
| 1,449 | 28.591837 | 74 | py |
pythainlp-dev/pythainlp/tag/named_entity.py | pythainlp-dev/pythainlp/tag/named_entity.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Named-entity recognizer
"""
import warnings
from typing import List, Tuple, Union
class NER:
"""
Named-entity recognizer class
:param str engine: Named-entity recognizer engine
:param str corpus: corpus
**Options for engine**
* *thainer-v2* - Thai NER engine v2.0 for Thai NER 2.0 (default)
* *thainer* - Thai NER engine
* *tltk* - wrapper for `TLTK <https://pypi.org/project/tltk/>`_.
**Options for corpus**
* *thainer* - Thai NER corpus (default)
**Note**: for tltk engine, It's support ner model from tltk only.
"""
def __init__(self, engine: str = "thainer-v2", corpus: str = "thainer") -> None:
self.load_engine(engine=engine, corpus=corpus)
def load_engine(self, engine: str, corpus: str) -> None:
self.name_engine = engine
self.engine = None
if engine == "thainer" and corpus == "thainer":
from pythainlp.tag.thainer import ThaiNameTagger
self.engine = ThaiNameTagger()
elif engine == "thainer-v2" and corpus == "thainer":
from pythainlp.wangchanberta import NamedEntityRecognition
self.engine = NamedEntityRecognition(model="pythainlp/thainer-corpus-v2-base-model")
elif engine == "tltk":
from pythainlp.tag import tltk
self.engine = tltk
elif engine == "wangchanberta" and corpus == "thainer":
from pythainlp.wangchanberta import ThaiNameTagger
self.engine = ThaiNameTagger(dataset_name=corpus)
else:
raise ValueError(
"NER class not support {0} engine or {1} corpus.".format(
engine, corpus
)
)
def tag(
self, text, pos=False, tag=False
) -> Union[List[Tuple[str, str]], List[Tuple[str, str, str]], str]:
"""
This function tags named-entitiy from text in IOB format.
:param str text: text in Thai to be tagged
:param bool pos: output with part-of-speech tag.\
(wangchanberta is not support)
:param bool tag: output like html tag.
:return: a list of tuple associated with tokenized word, NER tag,
POS tag (if the parameter `pos` is specified as `True`),
and output like html tag (if the parameter `tag` is
specified as `True`).
Otherwise, return a list of tuple associated with tokenized
word and NER tag
:rtype: Union[List[Tuple[str, str]], List[Tuple[str, str, str]], str]
:Example:
>>> from pythainlp.tag import NER
>>>
>>> ner = NER("thainer")
>>> ner.tag("ทดสอบนายวรรณพงษ์ ภัททิยไพบูลย์")
[('ทดสอบ', 'O'),
('นาย', 'B-PERSON'),
('วรรณ', 'I-PERSON'),
('พงษ์', 'I-PERSON'),
(' ', 'I-PERSON'),
('ภัททิย', 'I-PERSON'),
('ไพบูลย์', 'I-PERSON')]
>>> ner.tag("ทดสอบนายวรรณพงษ์ ภัททิยไพบูลย์", tag=True)
'ทดสอบ<PERSON>นายวรรณพงษ์ ภัททิยไพบูลย์</PERSON>'
"""
return self.engine.get_ner(text, tag=tag, pos=pos)
class NNER:
"""
Nested Named Entity Recognition
:param str engine: Nested Named entity recognizer engine
:param str corpus: corpus
**Options for engine**
* *thai_nner* - Thai NER engine
"""
def __init__(self, engine: str = "thai_nner") -> None:
self.load_engine(engine)
def load_engine(self, engine: str = "thai_nner") -> None:
from pythainlp.tag.thai_nner import Thai_NNER
self.engine = Thai_NNER()
def tag(self, text) -> Tuple[List[str], List[dict]]:
"""
This function tags nested named-entitiy.
:param str text: text in Thai to be tagged
:return: a list of tuple associated with tokenized word, NNER tag.
:rtype: Tuple[List[str], List[dict]]
:Example:
>>> from pythainlp.tag.named_entity import NNER
>>> nner = NNER()
>>> nner.tag("แมวทำอะไรตอนห้าโมงเช้า")
([
'<s>',
'',
'แมว',
'ทํา',
'',
'อะไร',
'ตอน',
'',
'ห้า',
'',
'โมง',
'',
'เช้า',
'</s>'
],
[
{
'text': ['', 'ห้า'],
'span': [7, 9],
'entity_type': 'cardinal'
},
{
'text': ['', 'ห้า', '', 'โมง'],
'span': [7, 11],
'entity_type': 'time'
},
{
'text': ['', 'โมง'],
'span': [9, 11],
'entity_type': 'unit'
}
])
"""
return self.engine.tag(text)
| 5,628 | 31.726744 | 96 | py |
pythainlp-dev/pythainlp/tag/orchid.py | pythainlp-dev/pythainlp/tag/orchid.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data preprocessing for ORCHID corpus
"""
from typing import List, Tuple
# defined strings for special characters,
# from Table 4 in ORCHID paper
CHAR_TO_ESCAPE = {
" ": "<space>",
"+": "<plus>",
"-": "<minus>",
"=": "<equal>",
",": "<comma>",
"$": "<dollar>",
".": "<full_stop>",
"(": "<left_parenthesis>",
")": "<right_parenthesis>",
'"': "<quotation>",
"@": "<at_mark>",
"&": "<ampersand>",
"{": "<left_curly_bracket>",
"^": "<circumflex_accent>",
"?": "<question_mark>",
"<": "<less_than>",
">": "<greater_than>",
"!": "<exclamation>",
"’": "<apostrophe>",
":": "<colon>",
"*": "<asterisk>",
";": "<semi_colon>",
"/": "<slash>",
}
ESCAPE_TO_CHAR = dict((v, k) for k, v in CHAR_TO_ESCAPE.items())
# map from ORCHID POS tag to Universal POS tag
# from Korakot Chaovavanich
TO_UD = {
"": "",
# NOUN
"NOUN": "NOUN",
"NCMN": "NOUN",
"NTTL": "NOUN",
"CNIT": "NOUN",
"CLTV": "NOUN",
"CMTR": "NOUN",
"CFQC": "NOUN",
"CVBL": "NOUN",
# VERB
"VACT": "VERB",
"VSTA": "VERB",
# PROPN
"PROPN": "PROPN",
"NPRP": "PROPN",
# ADJ
"ADJ": "ADJ",
"NONM": "ADJ",
"VATT": "ADJ",
"DONM": "ADJ",
# ADV
"ADV": "ADV",
"ADVN": "ADV",
"ADVI": "ADV",
"ADVP": "ADV",
"ADVS": "ADV",
# INT
"INT": "INTJ",
# PRON
"PRON": "PRON",
"PPRS": "PRON",
"PDMN": "PRON",
"PNTR": "PRON",
# DET
"DET": "DET",
"DDAN": "DET",
"DDAC": "DET",
"DDBQ": "DET",
"DDAQ": "DET",
"DIAC": "DET",
"DIBQ": "DET",
"DIAQ": "DET",
# NUM
"NUM": "NUM",
"NCNM": "NUM",
"NLBL": "NUM",
"DCNM": "NUM",
# AUX
"AUX": "AUX",
"XVBM": "AUX",
"XVAM": "AUX",
"XVMM": "AUX",
"XVBB": "AUX",
"XVAE": "AUX",
# ADP
"ADP": "ADP",
"RPRE": "ADP",
# CCONJ
"CCONJ": "CCONJ",
"JCRG": "CCONJ",
# SCONJ
"SCONJ": "SCONJ",
"PREL": "SCONJ",
"JSBR": "SCONJ",
"JCMP": "SCONJ",
# PART
"PART": "PART",
"FIXN": "PART",
"FIXV": "PART",
"EAFF": "PART",
"EITT": "PART",
"AITT": "PART",
"NEG": "PART",
# PUNCT
"PUNCT": "PUNCT",
"PUNC": "PUNCT",
}
def ud_exception(w: str, tag: str) -> str:
if w == "การ" or w == "ความ":
return "NOUN"
return tag
def pre_process(words: List[str]) -> List[str]:
"""
Convert signs and symbols with their defined strings.
This function is to be used as a preprocessing step,
before the actual POS tagging.
"""
keys = CHAR_TO_ESCAPE.keys()
words = [CHAR_TO_ESCAPE[word] if word in keys else word for word in words]
return words
def post_process(
word_tags: List[Tuple[str, str]], to_ud: bool = False
) -> List[Tuple[str, str]]:
"""
Convert defined strings back to corresponding signs and symbols.
This function is to be used as a post-processing step,
after the actual POS tagging.
"""
keys = ESCAPE_TO_CHAR.keys()
if not to_ud:
word_tags = [
(ESCAPE_TO_CHAR[word], tag) if word in keys else (word, tag)
for word, tag in word_tags
]
else:
word_tags = [
(ESCAPE_TO_CHAR[word], ud_exception(word, TO_UD[tag]))
if word in keys
else (word, ud_exception(word, TO_UD[tag]))
for word, tag in word_tags
]
return word_tags
| 4,064 | 22.49711 | 78 | py |
pythainlp-dev/pythainlp/tag/perceptron.py | pythainlp-dev/pythainlp/tag/perceptron.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Perceptron part-of-speech tagger
"""
import os
from typing import List, Tuple
from pythainlp.corpus import corpus_path, get_corpus_path
from pythainlp.tag import PerceptronTagger, blackboard, orchid
_ORCHID_FILENAME = "pos_orchid_perceptron.json"
_ORCHID_PATH = os.path.join(corpus_path(), _ORCHID_FILENAME)
_PUD_FILENAME = "pos_ud_perceptron-v0.2.json"
_PUD_PATH = os.path.join(corpus_path(), _PUD_FILENAME)
_BLACKBOARD_NAME = "blackboard_pt_tagger"
_ORCHID_TAGGER = None
_PUD_TAGGER = None
_BLACKBOARD_TAGGER = None
def _orchid_tagger():
global _ORCHID_TAGGER
if not _ORCHID_TAGGER:
_ORCHID_TAGGER = PerceptronTagger(path=_ORCHID_PATH)
return _ORCHID_TAGGER
def _pud_tagger():
global _PUD_TAGGER
if not _PUD_TAGGER:
_PUD_TAGGER = PerceptronTagger(path=_PUD_PATH)
return _PUD_TAGGER
def _blackboard_tagger():
global _BLACKBOARD_TAGGER
if not _BLACKBOARD_TAGGER:
path = get_corpus_path(_BLACKBOARD_NAME)
_LST20_TAGGER = PerceptronTagger(path=path)
return _LST20_TAGGER
def tag(words: List[str], corpus: str = "pud") -> List[Tuple[str, str]]:
"""
:param list words: a list of tokenized words
:param str corpus: corpus name (orchid, pud)
:return: a list of tuples (word, POS tag)
:rtype: list[tuple[str, str]]
"""
if not words:
return []
to_ud = False
if corpus[-3:] == "_ud":
to_ud = True
word_tags = []
if corpus == "orchid" or corpus == "orchid_ud":
words = orchid.pre_process(words)
word_tags = _orchid_tagger().tag(words)
word_tags = orchid.post_process(word_tags, to_ud)
elif corpus == "blackboard" or corpus == "blackboard_ud":
words = blackboard.pre_process(words)
word_tags = _blackboard_tagger().tag(words)
word_tags = blackboard.post_process(word_tags, to_ud)
else: # default, use "pud" as a corpus
tagger = _pud_tagger()
word_tags = tagger.tag(words)
return word_tags
| 2,617 | 29.091954 | 74 | py |
pythainlp-dev/pythainlp/tag/pos_tag.py | pythainlp-dev/pythainlp/tag/pos_tag.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
def pos_tag(
words: List[str], engine: str = "perceptron", corpus: str = "orchid"
) -> List[Tuple[str, str]]:
"""
Marks words with part-of-speech (POS) tags, such as 'NOUN' and 'VERB'.
:param list words: a list of tokenized words
:param str engine:
* *perceptron* - perceptron tagger (default)
* *unigram* - unigram tagger
* *wangchanberta* - wangchanberta model.
* *tltk* - TLTK: Thai Language Toolkit (support TNC corpus only.\
if you choose other corpus, It's change to TNC corpus.)
:param str corpus: the corpus that used to create the language model for tagger
* *orchid* - `ORCHID \
<https://www.academia.edu/9127599/Thai_Treebank>`_ corpus, \
text from Thai academic articles (default)
* *orchid_ud* - ORCHID text, with tags mapped to Universal POS tags
* *blackboard* - `blackboard treebank <https://bitbucket.org/kaamanita/blackboard-treebank/src/master/>`_
* *blackboard_ud* - blackboard text, with tags mapped to Universal POS tag \
from `Universal Dependencies <https://universaldependencies.org/>`
* *pud* - `Parallel Universal Dependencies (PUD)\
<https://github.com/UniversalDependencies/UD_Thai-PUD>`_ \
treebanks, natively use Universal POS tags
* *tnc* - Thai National Corpus (support tltk engine only)
:return: a list of tuples (word, POS tag)
:rtype: list[tuple[str, str]]
:Example:
Tag words with corpus `orchid` (default)::
from pythainlp.tag import pos_tag
words = ['ฉัน','มี','ชีวิต','รอด','ใน','อาคาร','หลบภัย','ของ', \\
'นายก', 'เชอร์ชิล']
pos_tag(words)
# output:
# [('ฉัน', 'PPRS'), ('มี', 'VSTA'), ('ชีวิต', 'NCMN'), ('รอด', 'NCMN'),
# ('ใน', 'RPRE'), ('อาคาร', 'NCMN'), ('หลบภัย', 'NCMN'),
# ('ของ', 'RPRE'), ('นายก', 'NCMN'), ('เชอร์ชิล', 'NCMN')]
Tag words with corpus `orchid_ud`::
from pythainlp.tag import pos_tag
words = ['ฉัน','มี','ชีวิต','รอด','ใน','อาคาร','หลบภัย','ของ', \\
'นายก', 'เชอร์ชิล']
pos_tag(words, corpus='orchid_ud')
# output:
# [('ฉัน', 'PROPN'), ('มี', 'VERB'), ('ชีวิต', 'NOUN'),
# ('รอด', 'NOUN'), ('ใน', 'ADP'), ('อาคาร', 'NOUN'),
# ('หลบภัย', 'NOUN'), ('ของ', 'ADP'), ('นายก', 'NOUN'),
# ('เชอร์ชิล', 'NOUN')]
Tag words with corpus `pud`::
from pythainlp.tag import pos_tag
words = ['ฉัน','มี','ชีวิต','รอด','ใน','อาคาร','หลบภัย','ของ', \\
'นายก', 'เชอร์ชิล']
pos_tag(words, corpus='pud')
# [('ฉัน', 'PRON'), ('มี', 'VERB'), ('ชีวิต', 'NOUN'), ('รอด', 'VERB'),
# ('ใน', 'ADP'), ('อาคาร', 'NOUN'), ('หลบภัย', 'NOUN'),
# ('ของ', 'ADP'), ('นายก', 'NOUN'), ('เชอร์ชิล', 'PROPN')]
Tag words with different engines including *perceptron* and *unigram*::
from pythainlp.tag import pos_tag
words = ['เก้าอี้','มี','จำนวน','ขา', ' ', '=', '3']
pos_tag(words, engine='perceptron', corpus='orchid')
# output:
# [('เก้าอี้', 'NCMN'), ('มี', 'VSTA'), ('จำนวน', 'NCMN'),
# ('ขา', 'NCMN'), (' ', 'PUNC'),
# ('=', 'PUNC'), ('3', 'NCNM')]
pos_tag(words, engine='unigram', corpus='pud')
# output:
# [('เก้าอี้', None), ('มี', 'VERB'), ('จำนวน', 'NOUN'), ('ขา', None),
# ('<space>', None), ('<equal>', None), ('3', 'NUM')]
"""
if not words:
return []
_support_corpus = [
"blackboard",
"blackboard_ud",
"orchid",
"orchid_ud",
"pud",
]
if engine == "perceptron" and corpus in _support_corpus:
from pythainlp.tag.perceptron import tag as tag_
elif engine == "tltk":
from pythainlp.tag.tltk import pos_tag as tag_
corpus = "tnc"
elif engine == "unigram" and corpus in _support_corpus: # default
from pythainlp.tag.unigram import tag as tag_
else:
raise ValueError(
"pos_tag not support {0} engine or {1} corpus.".format(
engine, corpus
)
)
word_tags = tag_(words, corpus=corpus)
return word_tags
def pos_tag_sents(
sentences: List[List[str]],
engine: str = "perceptron",
corpus: str = "orchid",
) -> List[List[Tuple[str, str]]]:
"""
Marks sentences with part-of-speech (POS) tags.
:param list sentences: a list of lists of tokenized words
:param str engine:
* *perceptron* - perceptron tagger (default)
* *unigram* - unigram tagger
* *tltk* - TLTK: Thai Language Toolkit (support TNC corpus only.\
if you choose other corpus, It's change to TNC corpus.)
:param str corpus: the corpus that used to create the language model for tagger
* *orchid* - `ORCHID \
<https://www.academia.edu/9127599/Thai_Treebank>`_ corpus, \
text from Thai academic articles (default)
* *orchid_ud* - ORCHID text, with tags mapped to Universal POS tags
* *blackboard* - `blackboard treebank <https://bitbucket.org/kaamanita/blackboard-treebank/src/master/>`_
* *blackboard_ud* - blackboard text, with tags mapped to Universal POS tag \
from `Universal Dependencies <https://universaldependencies.org/>`
* *pud* - `Parallel Universal Dependencies (PUD)\
<https://github.com/UniversalDependencies/UD_Thai-PUD>`_ \
treebanks, natively use Universal POS tags
* *tnc* - Thai National Corpus (support tltk engine only)
:return: a list of lists of tuples (word, POS tag)
:rtype: list[list[tuple[str, str]]]
:Example:
Labels POS for two sentences::
from pythainlp.tag import pos_tag_sents
sentences = [['เก้าอี้','มี','3','ขา'], \\
['นก', 'บิน', 'กลับ', 'รัง']]
pos_tag_sents(sentences, corpus='pud)
# output:
# [[('เก้าอี้', 'PROPN'), ('มี', 'VERB'), ('3', 'NUM'),
# ('ขา', 'NOUN')], [('นก', 'NOUN'), ('บิน', 'VERB'),
# ('กลับ', 'VERB'), ('รัง', 'NOUN')]]
"""
if not sentences:
return []
return [pos_tag(sent, engine=engine, corpus=corpus) for sent in sentences]
| 6,938 | 37.765363 | 113 | py |
pythainlp-dev/pythainlp/tag/thai_nner.py | pythainlp-dev/pythainlp/tag/thai_nner.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
from thai_nner import NNER
from pythainlp.corpus import get_corpus_path
class Thai_NNER:
def __init__(self, path_model=get_corpus_path("thai_nner", "1.0")) -> None:
self.model = NNER(path_model=path_model)
def tag(self, text) -> Tuple[List[str], List[dict]]:
return self.model.get_tag(text)
| 963 | 36.076923 | 79 | py |
pythainlp-dev/pythainlp/tag/thainer.py | pythainlp-dev/pythainlp/tag/thainer.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Named-entity recognizer
"""
__all__ = ["ThaiNameTagger"]
from typing import Dict, List, Tuple, Union
from pythainlp.corpus import get_corpus_path, thai_stopwords
from pythainlp.tag import pos_tag
from pythainlp.tokenize import word_tokenize
from pythainlp.util import isthai
_TOKENIZER_ENGINE = "newmm" # should be the same as one used in training data
def _is_stopword(word: str) -> bool: # เช็คว่าเป็นคำฟุ่มเฟือย
return word in thai_stopwords()
def _doc2features(doc, i) -> Dict:
word = doc[i][0]
postag = doc[i][1]
# Features from current word
features = {
"word.word": word,
"word.stopword": _is_stopword(word),
"word.isthai": isthai(word),
"word.isspace": word.isspace(),
"postag": postag,
"word.isdigit": word.isdigit(),
}
if word.isdigit() and len(word) == 5:
features["word.islen5"] = True
# Features from previous word
if i > 0:
prevword = doc[i - 1][0]
prevpostag = doc[i - 1][1]
prev_features = {
"word.prevword": prevword,
"word.previsspace": prevword.isspace(),
"word.previsthai": isthai(prevword),
"word.prevstopword": _is_stopword(prevword),
"word.prevpostag": prevpostag,
"word.prevwordisdigit": prevword.isdigit(),
}
features.update(prev_features)
else:
features["BOS"] = True # Special "Beginning of Sequence" tag
# Features from next word
if i < len(doc) - 1:
nextword = doc[i + 1][0]
nextpostag = doc[i + 1][1]
next_features = {
"word.nextword": nextword,
"word.nextisspace": nextword.isspace(),
"word.nextpostag": nextpostag,
"word.nextisthai": isthai(nextword),
"word.nextstopword": _is_stopword(nextword),
"word.nextwordisdigit": nextword.isdigit(),
}
features.update(next_features)
else:
features["EOS"] = True # Special "End of Sequence" tag
return features
class ThaiNameTagger:
"""
Thai named-entity recognizer or Thai NER.
This function support Thai NER 1.4 and 1.5 only.
:param str version: Thai NER version.
It's support Thai NER 1.4 & 1.5.
The defualt value is `1.4
:Example:
::
from pythainlp.tag.named_entity import ThaiNameTagger
thainer14 = ThaiNameTagger(version="1.4")
thainer14.get_ner("วันที่ 15 ก.ย. 61 ทดสอบระบบเวลา 14:49 น.")
"""
def __init__(self, version: str = "1.4") -> None:
"""
Thai named-entity recognizer.
:param str version: Thai NER version.
It's support Thai NER 1.4 & 1.5.
The defualt value is `1.4`
"""
from pycrfsuite import Tagger as CRFTagger
self.crf = CRFTagger()
if version == "1.4":
self.crf.open(get_corpus_path("thainer-1.4", version="1.4"))
self.pos_tag_name = "orchid_ud"
elif version == "1.5":
self.crf.open(get_corpus_path("thainer", version="1.5"))
self.pos_tag_name = "blackboard"
def get_ner(
self, text: str, pos: bool = True, tag: bool = False
) -> Union[List[Tuple[str, str]], List[Tuple[str, str, str]]]:
"""
This function tags named-entitiy from text in IOB format.
:param str text: text in Thai to be tagged
:param bool pos: To include POS tags in the results (`True`) or
exclude (`False`). The defualt value is `True`
:param bool tag: output like html tag.
:return: a list of tuple associated with tokenized word, NER tag,
POS tag (if the parameter `pos` is specified as `True`),
and output like html tag (if the parameter `tag` is
specified as `True`).
Otherwise, return a list of tuple associated with tokenized
word and NER tag
:rtype: Union[list[tuple[str, str]], list[tuple[str, str, str]]], str
:Note:
* For the POS tags to be included in the results, this function
uses :func:`pythainlp.tag.pos_tag` with engine as `perceptron`
and corpus as orchid_ud`.
:Example:
>>> from pythainlp.tag.named_entity import ThaiNameTagger
>>>
>>> ner = ThaiNameTagger()
>>> ner.get_ner("วันที่ 15 ก.ย. 61 ทดสอบระบบเวลา 14:49 น.")
[('วันที่', 'NOUN', 'O'), (' ', 'PUNCT', 'O'),
('15', 'NUM', 'B-DATE'), (' ', 'PUNCT', 'I-DATE'),
('ก.ย.', 'NOUN', 'I-DATE'), (' ', 'PUNCT', 'I-DATE'),
('61', 'NUM', 'I-DATE'), (' ', 'PUNCT', 'O'),
('ทดสอบ', 'VERB', 'O'), ('ระบบ', 'NOUN', 'O'),
('เวลา', 'NOUN', 'O'), (' ', 'PUNCT', 'O'),
('14', 'NOUN', 'B-TIME'), (':', 'PUNCT', 'I-TIME'),
('49', 'NUM', 'I-TIME'), (' ', 'PUNCT', 'I-TIME'),
('น.', 'NOUN', 'I-TIME')]
>>>
>>> ner.get_ner("วันที่ 15 ก.ย. 61 ทดสอบระบบเวลา 14:49 น.",
pos=False)
[('วันที่', 'O'), (' ', 'O'),
('15', 'B-DATE'), (' ', 'I-DATE'),
('ก.ย.', 'I-DATE'), (' ', 'I-DATE'),
('61', 'I-DATE'), (' ', 'O'),
('ทดสอบ', 'O'), ('ระบบ', 'O'),
('เวลา', 'O'), (' ', 'O'),
('14', 'B-TIME'), (':', 'I-TIME'),
('49', 'I-TIME'), (' ', 'I-TIME'),
('น.', 'I-TIME')]
>>> ner.get_ner("วันที่ 15 ก.ย. 61 ทดสอบระบบเวลา 14:49 น.",
tag=True)
'วันที่ <DATE>15 ก.ย. 61</DATE> ทดสอบระบบเวลา <TIME>
14:49 น.</TIME>'
"""
tokens = word_tokenize(text, engine=_TOKENIZER_ENGINE)
pos_tags = pos_tag(
tokens, engine="perceptron", corpus=self.pos_tag_name
)
x_test = ThaiNameTagger.__extract_features(pos_tags)
y = self.crf.tag(x_test)
sent_ner = [(pos_tags[i][0], data) for i, data in enumerate(y)]
if tag:
temp = ""
sent = ""
for idx, (word, ner) in enumerate(sent_ner):
if ner.startswith("B-") and temp != "":
sent += "</" + temp + ">"
temp = ner[2:]
sent += "<" + temp + ">"
elif ner.startswith("B-"):
temp = ner[2:]
sent += "<" + temp + ">"
elif ner == "O" and temp != "":
sent += "</" + temp + ">"
temp = ""
sent += word
if idx == len(sent_ner) - 1 and temp != "":
sent += "</" + temp + ">"
return sent
if pos:
return [
(pos_tags[i][0], pos_tags[i][1], data)
for i, data in enumerate(y)
]
return sent_ner
@staticmethod
def __extract_features(doc):
return [_doc2features(doc, i) for i in range(len(doc))]
| 7,716 | 34.399083 | 78 | py |
pythainlp-dev/pythainlp/tag/tltk.py | pythainlp-dev/pythainlp/tag/tltk.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Union
try:
from tltk import nlp
except ImportError:
raise ImportError("Not found tltk! Please install tltk by pip install tltk")
from pythainlp.tokenize import word_tokenize
nlp.pos_load()
nlp.ner_load()
def pos_tag(words: List[str], corpus: str = "tnc") -> List[Tuple[str, str]]:
if corpus != "tnc":
raise ValueError("tltk not support {0} corpus.".format(0))
return nlp.pos_tag_wordlist(words)
def _post_process(text: str) -> str:
return text.replace("<s/>", " ")
def get_ner(
text: str, pos: bool = True, tag: bool = False
) -> Union[List[Tuple[str, str]], List[Tuple[str, str, str]], str]:
"""
Named-entity recognizer from **TLTK**
This function tags named-entitiy from text in IOB format.
:param str text: text in Thai to be tagged
:param bool pos: To include POS tags in the results (`True`) or
exclude (`False`). The defualt value is `True`
:param bool tag: output like html tag.
:return: a list of tuple associated with tokenized word, NER tag,
POS tag (if the parameter `pos` is specified as `True`),
and output like html tag (if the parameter `tag` is
specified as `True`).
Otherwise, return a list of tuple associated with tokenized
word and NER tag
:rtype: Union[list[tuple[str, str]], list[tuple[str, str, str]]], str
:Example:
>>> from pythainlp.tag.tltk import get_ner
>>> get_ner("เขาเรียนที่โรงเรียนนางรอง")
[('เขา', 'PRON', 'O'),
('เรียน', 'VERB', 'O'),
('ที่', 'SCONJ', 'O'),
('โรงเรียน', 'NOUN', 'B-L'),
('นางรอง', 'VERB', 'I-L')]
>>> get_ner("เขาเรียนที่โรงเรียนนางรอง", pos=False)
[('เขา', 'O'),
('เรียน', 'O'),
('ที่', 'O'),
('โรงเรียน', 'B-L'),
('นางรอง', 'I-L')]
>>> get_ner("เขาเรียนที่โรงเรียนนางรอง", tag=True)
'เขาเรียนที่<L>โรงเรียนนางรอง</L>'
"""
if not text:
return []
list_word = []
for i in word_tokenize(text, engine="tltk"):
if i == " ":
i = "<s/>"
list_word.append(i)
_pos = nlp.pos_tag_wordlist(list_word)
sent_ner = [
(_post_process(word), pos, ner) for word, pos, ner in nlp.ner(_pos)
]
if tag:
temp = ""
sent = ""
for idx, (word, pos, ner) in enumerate(sent_ner):
if ner.startswith("B-") and temp != "":
sent += "</" + temp + ">"
temp = ner[2:]
sent += "<" + temp + ">"
elif ner.startswith("B-"):
temp = ner[2:]
sent += "<" + temp + ">"
elif ner == "O" and temp != "":
sent += "</" + temp + ">"
temp = ""
sent += word
if idx == len(sent_ner) - 1 and temp != "":
sent += "</" + temp + ">"
return sent
if pos is False:
return [(word, ner) for word, pos, ner in sent_ner]
return sent_ner
| 3,632 | 32.638889 | 80 | py |
pythainlp-dev/pythainlp/tag/unigram.py | pythainlp-dev/pythainlp/tag/unigram.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unigram Part-Of-Speech tagger
"""
import json
import os
from typing import List, Tuple
from pythainlp.corpus import corpus_path, get_corpus_path
from pythainlp.tag import blackboard, orchid
_ORCHID_FILENAME = "pos_orchid_unigram.json"
_ORCHID_PATH = os.path.join(corpus_path(), _ORCHID_FILENAME)
_PUD_FILENAME = "pos_ud_unigram-v0.2.json"
_PUD_PATH = os.path.join(corpus_path(), _PUD_FILENAME)
_BLACKBOARD_NAME = "blackboard_unigram_tagger"
_ORCHID_TAGGER = None
_PUD_TAGGER = None
_BLACKBOARD_TAGGER = None
def _orchid_tagger():
global _ORCHID_TAGGER
if not _ORCHID_TAGGER:
with open(_ORCHID_PATH, encoding="utf-8-sig") as fh:
_ORCHID_TAGGER = json.load(fh)
return _ORCHID_TAGGER
def _pud_tagger():
global _PUD_TAGGER
if not _PUD_TAGGER:
with open(_PUD_PATH, encoding="utf-8-sig") as fh:
_PUD_TAGGER = json.load(fh)
return _PUD_TAGGER
def _blackboard_tagger():
global _BLACKBOARD_TAGGER
if not _BLACKBOARD_TAGGER:
path = get_corpus_path(_BLACKBOARD_NAME)
with open(path, encoding="utf-8-sig") as fh:
_BLACKBOARD_TAGGER = json.load(fh)
return _BLACKBOARD_TAGGER
def _find_tag(
words: List[str], dictdata: dict, default_tag: str = ""
) -> List[Tuple[str, str]]:
keys = list(dictdata.keys())
return [
(word, dictdata[word]) if word in keys else (word, default_tag)
for word in words
]
def tag(words: List[str], corpus: str = "pud") -> List[Tuple[str, str]]:
"""
:param list words: a list of tokenized words
:param str corpus: corpus name (orchid or pud)
:return: a list of tuples (word, POS tag)
:rtype: list[tuple[str, str]]
"""
if not words:
return []
to_ud = False
if corpus[-3:] == "_ud":
to_ud = True
word_tags = []
if corpus == "orchid" or corpus == "orchid_ud":
words = orchid.pre_process(words)
word_tags = _find_tag(words, _orchid_tagger())
word_tags = orchid.post_process(word_tags, to_ud)
elif corpus == "blackboard" or corpus == "blackboard_ud":
words = blackboard.pre_process(words)
word_tags = _find_tag(words, _blackboard_tagger())
word_tags = blackboard.post_process(word_tags, to_ud)
else: # default, use "pud" as a corpus
word_tags = _find_tag(words, _pud_tagger())
return word_tags
| 3,000 | 29.01 | 74 | py |
pythainlp-dev/pythainlp/tag/wangchanberta_onnx.py | pythainlp-dev/pythainlp/tag/wangchanberta_onnx.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import json
import numpy as np
from pythainlp.corpus import get_path_folder_corpus
class WngchanBerta_ONNX:
def __init__(
self,
model_name: str,
model_version: str,
file_onnx: str,
providers: List[str] = ["CPUExecutionProvider"],
) -> None:
import sentencepiece as spm
from onnxruntime import (
InferenceSession,
SessionOptions,
GraphOptimizationLevel,
)
self.model_name = model_name
self.model_version = model_version
self.options = SessionOptions()
self.options.graph_optimization_level = (
GraphOptimizationLevel.ORT_ENABLE_ALL
)
self.session = InferenceSession(
get_path_folder_corpus(
self.model_name, self.model_version, file_onnx
),
sess_options=self.options,
providers=providers,
)
self.session.disable_fallback()
self.outputs_name = self.session.get_outputs()[0].name
self.sp = spm.SentencePieceProcessor(
model_file=get_path_folder_corpus(
self.model_name, self.model_version, "sentencepiece.bpe.model"
)
)
with open(
get_path_folder_corpus(
self.model_name, self.model_version, "config.json"
),
encoding="utf-8-sig",
) as fh:
self._json = json.load(fh)
self.id2tag = self._json["id2label"]
def build_tokenizer(self, sent):
_t = [5] + [i + 4 for i in self.sp.encode(sent)] + [6]
model_inputs = {}
model_inputs["input_ids"] = np.array([_t], dtype=np.int64)
model_inputs["attention_mask"] = np.array(
[[1] * len(_t)], dtype=np.int64
)
return model_inputs
def postprocess(self, logits_data):
logits_t = logits_data[0]
maxes = np.max(logits_t, axis=-1, keepdims=True)
shifted_exp = np.exp(logits_t - maxes)
scores = shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
return scores
def clean_output(self, list_text):
return list_text
def totag(self, post, sent):
tag = []
_s = self.sp.EncodeAsPieces(sent)
for i in range(len(_s)):
tag.append(
(
_s[i],
self.id2tag[
str(list(post[i + 1]).index(max(list(post[i + 1]))))
],
)
)
return tag
def _config(self, list_ner):
return list_ner
def get_ner(self, text: str, tag: bool = False):
self._s = self.build_tokenizer(text)
logits = self.session.run(
output_names=[self.outputs_name], input_feed=self._s
)[0]
_tag = self.clean_output(self.totag(self.postprocess(logits), text))
if tag:
_tag = self._config(_tag)
temp = ""
sent = ""
for idx, (word, ner) in enumerate(_tag):
if ner.startswith("B-") and temp != "":
sent += "</" + temp + ">"
temp = ner[2:]
sent += "<" + temp + ">"
elif ner.startswith("B-"):
temp = ner[2:]
sent += "<" + temp + ">"
elif ner == "O" and temp != "":
sent += "</" + temp + ">"
temp = ""
sent += word
if idx == len(_tag) - 1 and temp != "":
sent += "</" + temp + ">"
return sent
else:
return _tag
| 4,312 | 31.923664 | 78 | py |
pythainlp-dev/pythainlp/tokenize/__init__.py | pythainlp-dev/pythainlp/tokenize/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tokenizers at different level of linguistic analysis.
"""
__all__ = [
"THAI2FIT_TOKENIZER",
"Tokenizer",
"Trie",
"clause_tokenize",
"sent_tokenize",
"subword_tokenize",
"word_tokenize",
"word_detokenize",
"paragraph_tokenize",
]
from pythainlp.corpus import thai_syllables, thai_words
from pythainlp.util.trie import Trie
DEFAULT_WORD_TOKENIZE_ENGINE = "newmm"
DEFAULT_SENT_TOKENIZE_ENGINE = "crfcut"
DEFAULT_SUBWORD_TOKENIZE_ENGINE = "tcc"
DEFAULT_SYLLABLE_TOKENIZE_ENGINE = "dict"
DEFAULT_WORD_DICT_TRIE = Trie(thai_words())
DEFAULT_SYLLABLE_DICT_TRIE = Trie(thai_syllables())
DEFAULT_DICT_TRIE = DEFAULT_WORD_DICT_TRIE
from pythainlp.tokenize.core import (
Tokenizer,
clause_tokenize,
sent_tokenize,
subword_tokenize,
word_tokenize,
word_detokenize,
paragraph_tokenize,
)
from pythainlp.corpus import get_corpus as _get_corpus
THAI2FIT_TOKENIZER = Tokenizer(
custom_dict=_get_corpus("words_th_thai2fit_201810.txt"), engine="newmm"
)
| 1,629 | 27.103448 | 75 | py |
pythainlp-dev/pythainlp/tokenize/_utils.py | pythainlp-dev/pythainlp/tokenize/_utils.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions for tokenize module.
"""
import re
from typing import List, Callable
_DIGITS_WITH_SEPARATOR = re.compile(r"(\d+[\.\,:])+\d+")
def apply_postprocessors(
segments: List[str], postprocessors: Callable[[List[str]], List[str]]
) -> List[str]:
"""
A list of callables to apply on a raw segmentation result.
"""
for func in postprocessors:
segments = func(segments)
return segments
def rejoin_formatted_num(segments: List[str]) -> List[str]:
"""
Rejoin well-known formatted numeric that are over-tokenized.
The formatted numeric are numbers separated by ":", ",", or ".",
such as time, decimal number, comma-added number, and IP address.
:param List[str] segments: result from word tokenizer
:return: a list of fixed tokens
:rtype: List[str]
:Example:
tokens = ['ขณะ', 'นี้', 'เวลา', ' ', '12', ':', '00น', ' ', 'อัตรา',
'แลกเปลี่ยน', ' ', '1', ',', '234', '.', '5', ' ', 'baht/zeny']
rejoin_formatted_num(tokens)
# output:
# ['ขณะ', 'นี้', 'เวลา', ' ', '12:00น', ' ', 'อัตรา', 'แลกเปลี่ยน', ' ', '1,234.5', ' ', 'baht/zeny']
tokens = ['IP', ' ', 'address', ' ', 'ของ', 'คุณ', 'คือ', ' ', '127', '.', '0', '.', '0', '.', '1', ' ', 'ครับ']
rejoin_formatted_num(tokens)
# output:
# ['IP', ' ', 'address', ' ', 'ของ', 'คุณ', 'คือ', ' ', '127.0.0.1', ' ', 'ครับ']
"""
original = "".join(segments)
matching_results = _DIGITS_WITH_SEPARATOR.finditer(original)
tokens_joined = []
pos = 0
segment_idx = 0
match = next(matching_results, None)
while segment_idx < len(segments) and match:
is_span_beginning = pos >= match.start()
token = segments[segment_idx]
if is_span_beginning:
connected_token = ""
while pos < match.end() and segment_idx < len(segments):
connected_token += segments[segment_idx]
pos += len(segments[segment_idx])
segment_idx += 1
tokens_joined.append(connected_token)
match = next(matching_results, None)
else:
tokens_joined.append(token)
segment_idx += 1
pos += len(token)
tokens_joined += segments[segment_idx:]
return tokens_joined
def strip_whitespace(segments: List[str]) -> List[str]:
"""
Strip whitespace(s) off each token and remove whitespace tokens.
:param List[str] segments: result from word tokenizer
:return: a list of tokens
:rtype: List[str]
:Example:
tokens = [" ", "วันนี้ ", "เวลา ", "19.00น"]
strip_whitespace(tokens)
# ["วันนี้", "เวลา", "19.00น"]
"""
segments = [token.strip(" ") for token in segments if token.strip(" ")]
return segments
| 3,426 | 32.930693 | 120 | py |
pythainlp-dev/pythainlp/tokenize/attacut.py | pythainlp-dev/pythainlp/tokenize/attacut.py | # -*- coding: utf-8 -*
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper for AttaCut - Fast and Reasonably Accurate Word Tokenizer for Thai
:See Also:
* `GitHub repository <https://github.com/PyThaiNLP/attacut>`_
"""
from typing import List
from attacut import Tokenizer
class AttacutTokenizer:
def __init__(self, model="attacut-sc"):
self._MODEL_NAME = "attacut-sc"
if model == "attacut-c":
self._MODEL_NAME = "attacut-c"
self._tokenizer = Tokenizer(model=self._MODEL_NAME)
def tokenize(self, text: str) -> List[str]:
return self._tokenizer.tokenize(text)
def segment(text: str, model: str = "attacut-sc") -> List[str]:
"""
Wrapper for AttaCut - Fast and Reasonably Accurate Word Tokenizer for Thai
:param str text: text to be tokenized to words
:param str model: word tokenizer model to be tokenized to words
:return: list of words, tokenized from the text
:rtype: list[str]
**Options for model**
* *attacut-sc* (default) using both syllable and character features
* *attacut-c* using only character feature
"""
if not text or not isinstance(text, str):
return []
_tokenizer = AttacutTokenizer(model)
return _tokenizer.tokenize(text)
| 1,824 | 31.589286 | 78 | py |
pythainlp-dev/pythainlp/tokenize/core.py | pythainlp-dev/pythainlp/tokenize/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tokenizer generic functions
"""
import re
from typing import Iterable, List, Union
from pythainlp.tokenize import (
DEFAULT_SENT_TOKENIZE_ENGINE,
DEFAULT_SUBWORD_TOKENIZE_ENGINE,
DEFAULT_SYLLABLE_DICT_TRIE,
DEFAULT_SYLLABLE_TOKENIZE_ENGINE,
DEFAULT_WORD_DICT_TRIE,
DEFAULT_WORD_TOKENIZE_ENGINE,
)
from pythainlp.tokenize._utils import (
apply_postprocessors,
rejoin_formatted_num,
strip_whitespace,
)
from pythainlp.util.trie import Trie, dict_trie
def clause_tokenize(doc: List[str]) -> List[List[str]]:
"""
Clause tokenizer. (or Clause segmentation)
Tokenizes running word list into list of clauses (list of strings).
split by CRF trained on Blackboard Treebank.
:param str doc: word list to be clause
:return: list of claues
:rtype: list[list[str]]
:Example:
Clause tokenizer::
from pythainlp.tokenize import clause_tokenize
clause_tokenize(["ฉัน","นอน","และ","คุณ","เล่น","มือถือ","ส่วน","น้อง","เขียน","โปรแกรม"])
# [['ฉัน', 'นอน'],
# ['และ', 'คุณ', 'เล่น', 'มือถือ'],
# ['ส่วน', 'น้อง', 'เขียน', 'โปรแกรม']]
"""
from pythainlp.tokenize.crfcls import segment
return segment(doc)
def word_detokenize(
segments: Union[List[List[str]], List[str]], output: str = "str"
) -> Union[str, List[str]]:
"""
Word detokenizer.
This function will detokenize the list word in each sentence to text.
:param str segments: List sentences with list words.
:param str output: the output type (str or list)
:return: the thai text
:rtype: Union[str,List[str]]
"""
_list_all = []
if isinstance(segments[0], str):
segments = [segments]
from pythainlp import thai_characters
for i, s in enumerate(segments):
_list_sents = []
_add_index = []
_space_index = []
_mark_index = []
for j, w in enumerate(s):
if j > 0:
# previous word
p_w = s[j - 1]
# if w is number or other language and not be space
if (
w[0] not in thai_characters
and not w.isspace()
and not p_w.isspace()
):
_list_sents.append(" ")
_add_index.append(j)
# if previous word is number or other language and not be space
elif p_w[0] not in thai_characters and not p_w.isspace():
_list_sents.append(" ")
_add_index.append(j)
# if word is Thai iteration mark
elif w == "ๆ":
if not p_w.isspace():
_list_sents.append(" ")
_mark_index.append(j)
elif w.isspace() and j - 1 not in _space_index:
_space_index.append(j)
elif j - 1 in _mark_index:
_list_sents.append(" ")
_list_sents.append(w)
_list_all.append(_list_sents)
if output == "list":
return _list_all
else:
_text = []
for i in _list_all:
_temp = ""
for j in i:
_temp += j
_text.append(_temp)
return " ".join(_text)
def word_tokenize(
text: str,
custom_dict: Trie = None,
engine: str = DEFAULT_WORD_TOKENIZE_ENGINE,
keep_whitespace: bool = True,
join_broken_num: bool = True,
) -> List[str]:
"""
Word tokenizer.
Tokenizes running text into words (list of strings).
:param str text: text to be tokenized
:param str engine: name of the tokenizer to be used
:param pythainlp.util.Trie custom_dict: dictionary trie
:param bool keep_whitespace: True to keep whitespaces, a common mark
for end of phrase in Thai.
Otherwise, whitespaces are omitted.
:param bool join_broken_num: True to rejoin formatted numeric that could be wrongly separated.
Otherwise, formatted numeric could be wrongly separated.
:return: list of words
:rtype: List[str]
**Options for engine**
* *attacut* - wrapper for
`AttaCut <https://github.com/PyThaiNLP/attacut>`_.,
learning-based approach
* *deepcut* - wrapper for
`DeepCut <https://github.com/rkcosmos/deepcut>`_,
learning-based approach
* *icu* - wrapper for a word tokenizer in
`PyICU <https://gitlab.pyicu.org/main/pyicu>`_.,
from ICU (International Components for Unicode),
dictionary-based
* *longest* - dictionary-based, longest matching
* *mm* - "multi-cut", dictionary-based, maximum matching
* *nercut* - dictionary-based, maximal matching,
constrained with Thai Character Cluster (TCC) boundaries,
combining tokens that are parts of the same named-entity
* *newmm* (default) - "new multi-cut",
dictionary-based, maximum matching,
constrained with Thai Character Cluster (TCC) boundaries
with improve the TCC rule that used in newmm.
* *newmm-safe* - newmm, with a mechanism to avoid long
processing time for text with continuous ambiguous breaking points
* *nlpo3* - wrapper for a word tokenizer in
`nlpO3 <https://github.com/PyThaiNLP/nlpo3>`_.,
newmm adaptation in Rust (2.5x faster)
* *oskut* - wrapper for
`OSKut <https://github.com/mrpeerat/OSKut>`_.,
Out-of-domain StacKed cut for Word Segmentation
* *sefr_cut* - wrapper for
`SEFR CUT <https://github.com/mrpeerat/SEFR_CUT>`_.,
Stacked Ensemble Filter and Refine for Word Segmentation
* *tltk* - wrapper for
`TLTK <https://pypi.org/project/tltk/>`_.,
maximum collocation approach
:Note:
- The **custom_dict** parameter only works for \
*deepcut*, *longest*, *newmm*, and *newmm-safe* engines.
:Example:
Tokenize text with different tokenizer::
from pythainlp.tokenize import word_tokenize
text = "โอเคบ่พวกเรารักภาษาบ้านเกิด"
word_tokenize(text, engine="newmm")
# output: ['โอเค', 'บ่', 'พวกเรา', 'รัก', 'ภาษา', 'บ้านเกิด']
word_tokenize(text, engine='attacut')
# output: ['โอเค', 'บ่', 'พวกเรา', 'รัก', 'ภาษา', 'บ้านเกิด']
Tokenize text by omiting whitespaces::
text = "วรรณกรรม ภาพวาด และการแสดงงิ้ว "
word_tokenize(text, engine="newmm")
# output:
# ['วรรณกรรม', ' ', 'ภาพวาด', ' ', 'และ', 'การแสดง', 'งิ้ว', ' ']
word_tokenize(text, engine="newmm", keep_whitespace=False)
# output: ['วรรณกรรม', 'ภาพวาด', 'และ', 'การแสดง', 'งิ้ว']
Join broken formatted numeric (e.g. time, decimals, IP address)::
text = "เงิน1,234บาท19:32น 127.0.0.1"
word_tokenize(text, engine="attacut", join_broken_num=False)
# output:
# ['เงิน', '1', ',', '234', 'บาท', '19', ':', '32น', ' ',
# '127', '.', '0', '.', '0', '.', '1']
word_tokenize(text, engine="attacut", join_broken_num=True)
# output:
# ['เงิน', '1,234', 'บาท', '19:32น', ' ', '127.0.0.1']
Tokenize with default and custom dictionary::
from pythainlp.corpus.common import thai_words
from pythainlp.tokenize import dict_trie
text = 'ชินโซ อาเบะ เกิด 21 กันยายน'
word_tokenize(text, engine="newmm")
# output:
# ['ชิน', 'โซ', ' ', 'อา', 'เบะ', ' ',
# 'เกิด', ' ', '21', ' ', 'กันยายน']
custom_dict_japanese_name = set(thai_words()
custom_dict_japanese_name.add('ชินโซ')
custom_dict_japanese_name.add('อาเบะ')
trie = dict_trie(dict_source=custom_dict_japanese_name)
word_tokenize(text, engine="newmm", custom_dict=trie))
# output:
# ['ชินโซ', ' ', 'อาเบะ', ' ',
# 'เกิด', ' ', '21', ' ', 'กันยายน']
"""
if not text or not isinstance(text, str):
return []
segments = []
if engine == "newmm" or engine == "onecut":
from pythainlp.tokenize.newmm import segment
segments = segment(text, custom_dict)
elif engine == "newmm-safe":
from pythainlp.tokenize.newmm import segment
segments = segment(text, custom_dict, safe_mode=True)
elif engine == "attacut":
from pythainlp.tokenize.attacut import segment
segments = segment(text)
elif engine == "longest":
from pythainlp.tokenize.longest import segment
segments = segment(text, custom_dict)
elif engine == "mm" or engine == "multi_cut":
from pythainlp.tokenize.multi_cut import segment
segments = segment(text, custom_dict)
elif engine == "deepcut": # deepcut can optionally use dictionary
from pythainlp.tokenize.deepcut import segment
if custom_dict:
custom_dict = list(custom_dict)
segments = segment(text, custom_dict)
else:
segments = segment(text)
elif engine == "icu":
from pythainlp.tokenize.pyicu import segment
segments = segment(text)
elif engine == "nercut":
from pythainlp.tokenize.nercut import segment
segments = segment(text)
elif engine == "sefr_cut":
from pythainlp.tokenize.sefr_cut import segment
segments = segment(text)
elif engine == "tltk":
from pythainlp.tokenize.tltk import segment
segments = segment(text)
elif engine == "oskut":
from pythainlp.tokenize.oskut import segment
segments = segment(text)
elif engine == "nlpo3":
from pythainlp.tokenize.nlpo3 import segment
if isinstance(custom_dict, str):
segments = segment(text, custom_dict=custom_dict)
elif not isinstance(custom_dict, str) and custom_dict is not None:
raise ValueError(
f"""Tokenizer \"{engine}\":
custom_dict must be a str.
It is a dictionary name as assigned with load_dict().
See pythainlp.tokenize.nlpo3.load_dict()"""
)
else:
segments = segment(text)
else:
raise ValueError(
f"""Tokenizer \"{engine}\" not found.
It might be a typo; if not, please consult our document."""
)
postprocessors = []
if join_broken_num:
postprocessors.append(rejoin_formatted_num)
if not keep_whitespace:
postprocessors.append(strip_whitespace)
segments = apply_postprocessors(segments, postprocessors)
return segments
def sent_tokenize(
text: str,
engine: str = DEFAULT_SENT_TOKENIZE_ENGINE,
keep_whitespace: bool = True,
) -> List[str]:
"""
Sentence tokenizer.
Tokenizes running text into "sentences"
:param str text: the text to be tokenized
:param str engine: choose among *'crfcut'*, *'whitespace'*, \
*'whitespace+newline'*
:return: list of splited sentences
:rtype: list[str]
**Options for engine**
* *crfcut* - (default) split by CRF trained on TED dataset
* *thaisum* - The implementation of sentence segmentator from \
Nakhun Chumpolsathien, 2020
* *tltk* - split by `TLTK <https://pypi.org/project/tltk/>`_.,
* *wtp* - split by `wtpsplitaxe <https://github.com/bminixhofer/wtpsplit>`_., \
It support many size of models. You can use ``wtp`` to use mini model, \
``wtp-tiny`` to use ``wtp-bert-tiny`` model (default), \
``wtp-mini`` to use ``wtp-bert-mini`` model, \
``wtp-base`` to use ``wtp-canine-s-1l`` model, \
and ``wtp-large`` to use ``wtp-canine-s-12l`` model.
* *whitespace+newline* - split by whitespaces and newline.
* *whitespace* - split by whitespaces. Specifiaclly, with \
:class:`regex` pattern ``r" +"``
:Example:
Split the text based on *whitespace*::
from pythainlp.tokenize import sent_tokenize
sentence_1 = "ฉันไปประชุมเมื่อวันที่ 11 มีนาคม"
sentence_2 = "ข้าราชการได้รับการหมุนเวียนเป็นระยะ \\
และได้รับมอบหมายให้ประจำในระดับภูมิภาค"
sent_tokenize(sentence_1, engine="whitespace")
# output: ['ฉันไปประชุมเมื่อวันที่', '11', 'มีนาคม']
sent_tokenize(sentence_2, engine="whitespace")
# output: ['ข้าราชการได้รับการหมุนเวียนเป็นระยะ',
# '\\nและได้รับมอบหมายให้ประจำในระดับภูมิภาค']
Split the text based on *whitespace* and *newline*::
sentence_1 = "ฉันไปประชุมเมื่อวันที่ 11 มีนาคม"
sentence_2 = "ข้าราชการได้รับการหมุนเวียนเป็นระยะ \\
และได้รับมอบหมายให้ประจำในระดับภูมิภาค"
sent_tokenize(sentence_1, engine="whitespace+newline")
# output: ['ฉันไปประชุมเมื่อวันที่', '11', 'มีนาคม']
sent_tokenize(sentence_2, engine="whitespace+newline")
# output: ['ข้าราชการได้รับการหมุนเวียนเป็นระยะ',
'\\nและได้รับมอบหมายให้ประจำในระดับภูมิภาค']
Split the text using CRF trained on TED dataset::
sentence_1 = "ฉันไปประชุมเมื่อวันที่ 11 มีนาคม"
sentence_2 = "ข้าราชการได้รับการหมุนเวียนเป็นระยะ \\
และเขาได้รับมอบหมายให้ประจำในระดับภูมิภาค"
sent_tokenize(sentence_1, engine="crfcut")
# output: ['ฉันไปประชุมเมื่อวันที่ 11 มีนาคม']
sent_tokenize(sentence_2, engine="crfcut")
# output: ['ข้าราชการได้รับการหมุนเวียนเป็นระยะ ',
'และเขาได้รับมอบหมายให้ประจำในระดับภูมิภาค']
"""
if not text or not isinstance(text, str):
return []
segments = []
if engine == "crfcut":
from pythainlp.tokenize.crfcut import segment
segments = segment(text)
elif engine == "whitespace":
segments = re.split(r" +", text, re.U)
elif engine == "whitespace+newline":
segments = text.split()
elif engine == "tltk":
from pythainlp.tokenize.tltk import sent_tokenize as segment
segments = segment(text)
elif engine == "thaisum":
from pythainlp.tokenize.thaisumcut import (
ThaiSentenceSegmentor as segmentor,
)
segment = segmentor()
segments = segment.split_into_sentences(text)
elif engine.startswith("wtp"):
if "-" not in engine:
_size="mini"
else:
_size = engine.split("-")[-1]
from pythainlp.tokenize.wtsplit import tokenize as segment
segments = segment(text,size=_size,tokenize="sentence")
else:
raise ValueError(
f"""Tokenizer \"{engine}\" not found.
It might be a typo; if not, please consult our document."""
)
if not keep_whitespace:
segments = strip_whitespace(segments)
return segments
def paragraph_tokenize(text: str, engine: str = "wtp-mini", paragraph_threshold:float=0.5) -> List[List[str]]:
"""
Paragraph tokenizer.
Tokenizes text into paragraph.
:param str text: text to be tokenized
:param str engine: the name paragraph tokenizer
:return: list of paragraph
:rtype: List[List[str]]
**Options for engine**
* *wtp* - split by `wtpsplitaxe <https://github.com/bminixhofer/wtpsplit>`_., \
It support many size of models. You can use ``wtp`` to use mini model, \
``wtp-tiny`` to use ``wtp-bert-tiny`` model (default), \
``wtp-mini`` to use ``wtp-bert-mini`` model, \
``wtp-base`` to use ``wtp-canine-s-1l`` model, \
and ``wtp-large`` to use ``wtp-canine-s-12l`` model.
:Example:
Split the text based on *wtp*::
from pythainlp.tokenize import paragraph_tokenize
sent = (
"(1) บทความนี้ผู้เขียนสังเคราะห์ขึ้นมาจากผลงานวิจัยที่เคยทำมาในอดีต"
+" มิได้ทำการศึกษาค้นคว้าใหม่อย่างกว้างขวางแต่อย่างใด"
+" จึงใคร่ขออภัยในความบกพร่องทั้งปวงมา ณ ที่นี้"
)
paragraph_tokenize(sent)
# output: [
# ['(1) '],
# [
# 'บทความนี้ผู้เขียนสังเคราะห์ขึ้นมาจากผลงานวิจัยที่เคยทำมาในอดีต ',
# 'มิได้ทำการศึกษาค้นคว้าใหม่อย่างกว้างขวางแต่อย่างใด ',
# 'จึงใคร่ขออภัยในความบกพร่องทั้งปวงมา ',
# 'ณ ที่นี้'
# ]]
"""
if engine.startswith("wtp"):
if "-" not in engine:
_size="mini"
else:
_size = engine.split("-")[-1]
from pythainlp.tokenize.wtsplit import tokenize as segment
segments = segment(text,size=_size,tokenize="paragraph",paragraph_threshold=paragraph_threshold)
else:
raise ValueError(
f"""Tokenizer \"{engine}\" not found.
It might be a typo; if not, please consult our document."""
)
return segments
def subword_tokenize(
text: str,
engine: str = DEFAULT_SUBWORD_TOKENIZE_ENGINE,
keep_whitespace: bool = True,
) -> List[str]:
"""
Subword tokenizer. Can be smaller than syllable.
Tokenizes text into inseparable units of
Thai contiguous characters namely
`Thai Character Clusters (TCCs) \
<https://www.researchgate.net/publication/2853284_Character_Cluster_Based_Thai_Information_Retrieval>`_
TCCs are the units based on Thai spelling feature that could not be
separated any character further such as 'ก็', 'จะ', 'ไม่', and 'ฝา'.
If the following units are separated, they could not be spelled out.
This function apply the TCC rules to tokenizes the text into
the smallest units.
For example, the word 'ขนมชั้น' would be tokenized
into 'ข', 'น', 'ม', and 'ชั้น'.
:param str text: text to be tokenized
:param str engine: the name subword tokenizer
:return: list of subwords
:rtype: list[str]
**Options for engine**
* *dict* - newmm word tokenizer with a syllable dictionary
* *etcc* - Enhanced Thai Character Cluster (Inrut et al. 2001)
* *ssg* - CRF syllable segmenter for Thai
* *tcc* (default) - Thai Character Cluster (Theeramunkong et al. 2000)
* *tcc_p* - Thai Character Cluster + improve the rule that used in newmm
* *tltk* - syllable tokenizer from tltk
* *wangchanberta* - SentencePiece from wangchanberta model
:Example:
Tokenize text into subword based on *tcc*::
from pythainlp.tokenize import subword_tokenize
text_1 = "ยุคเริ่มแรกของ ราชวงศ์หมิง"
text_2 = "ความแปลกแยกและพัฒนาการ"
subword_tokenize(text_1, engine='tcc')
# output: ['ยุ', 'ค', 'เริ่ม', 'แร', 'ก',
# 'ข', 'อ', 'ง', ' ', 'รา', 'ช', 'ว', 'ง',
# 'ศ', '์', 'ห', 'มิ', 'ง']
subword_tokenize(text_2, engine='tcc')
# output: ['ค', 'วา', 'ม', 'แป', 'ล', 'ก', 'แย', 'ก',
'และ', 'พัฒ','นา', 'กา', 'ร']
Tokenize text into subword based on *etcc*::
text_1 = "ยุคเริ่มแรกของ ราชวงศ์หมิง"
text_2 = "ความแปลกแยกและพัฒนาการ"
subword_tokenize(text_1, engine='etcc')
# output: ['ยุคเริ่มแรกของ ราชวงศ์หมิง']
subword_tokenize(text_2, engine='etcc')
# output: ['ความแปลกแยกและ', 'พัฒ', 'นาการ']
Tokenize text into subword based on *wangchanberta*::
text_1 = "ยุคเริ่มแรกของ ราชวงศ์หมิง"
text_2 = "ความแปลกแยกและพัฒนาการ"
subword_tokenize(text_1, engine='wangchanberta')
# output: ['▁', 'ยุค', 'เริ่มแรก', 'ของ', '▁', 'ราชวงศ์', 'หมิง']
subword_tokenize(text_2, engine='wangchanberta')
# output: ['▁ความ', 'แปลก', 'แยก', 'และ', 'พัฒนาการ']
"""
if not text or not isinstance(text, str):
return []
segments = []
if engine == "tcc":
from pythainlp.tokenize.tcc import segment
elif engine == "tcc_p":
from pythainlp.tokenize.tcc_p import segment
elif engine == "etcc":
from pythainlp.tokenize.etcc import segment
elif engine == "wangchanberta":
from pythainlp.wangchanberta import segment
elif engine == "dict": # use syllable dictionary
words = word_tokenize(text)
for word in words:
segments.extend(
word_tokenize(
text=word, custom_dict=DEFAULT_SYLLABLE_DICT_TRIE
)
)
elif engine == "ssg":
from pythainlp.tokenize.ssg import segment
elif engine == "tltk":
from pythainlp.tokenize.tltk import syllable_tokenize as segment
else:
raise ValueError(
f"""Tokenizer \"{engine}\" not found.
It might be a typo; if not, please consult our document."""
)
if segments == []:
segments = segment(text)
if not keep_whitespace:
segments = strip_whitespace(segments)
return segments
class Tokenizer:
"""
Tokenizer class, for a custom tokenizer.
This class allows users to pre-define custom dictionary along with
tokenizer and encapsulate them into one single object.
It is an wrapper for both two functions including
:func:`pythainlp.tokenize.word_tokenize`,
and :func:`pythainlp.util.dict_trie`
:Example:
Tokenizer object instantiated with :class:`pythainlp.util.Trie`::
from pythainlp.tokenize import Tokenizer
from pythainlp.corpus.common import thai_words
from pythainlp.util import dict_trie
custom_words_list = set(thai_words())
custom_words_list.add('อะเฟเซีย')
custom_words_list.add('Aphasia')
trie = dict_trie(dict_source=custom_words_list)
text = "อะเฟเซีย (Aphasia*) เป็นอาการผิดปกติของการพูด"
_tokenizer = Tokenizer(custom_dict=trie, engine='newmm')
_tokenizer.word_tokenize(text)
# output: ['อะเฟเซีย', ' ', '(', 'Aphasia', ')', ' ', 'เป็น', 'อาการ',
'ผิดปกติ', 'ของ', 'การ', 'พูด']
Tokenizer object instantiated with a list of words::
text = "อะเฟเซีย (Aphasia) เป็นอาการผิดปกติของการพูด"
_tokenizer = Tokenizer(custom_dict=list(thai_words()), engine='newmm')
_tokenizer.word_tokenize(text)
# output:
# ['อะ', 'เฟเซีย', ' ', '(', 'Aphasia', ')', ' ', 'เป็น', 'อาการ',
# 'ผิดปกติ', 'ของ', 'การ', 'พูด']
Tokenizer object instantiated with a file path containing list of
word separated with *newline* and explicitly set a new tokenizer
after initiation::
PATH_TO_CUSTOM_DICTIONARY = './custom_dictionary.txtt'
# write a file
with open(PATH_TO_CUSTOM_DICTIONARY, 'w', encoding='utf-8') as f:
f.write('อะเฟเซีย\\nAphasia\\nผิด\\nปกติ')
text = "อะเฟเซีย (Aphasia) เป็นอาการผิดปกติของการพูด"
# initate an object from file with `attacut` as tokenizer
_tokenizer = Tokenizer(custom_dict=PATH_TO_CUSTOM_DICTIONARY, \\
engine='attacut')
_tokenizer.word_tokenize(text)
# output:
# ['อะเฟเซีย', ' ', '(', 'Aphasia', ')', ' ', 'เป็น', 'อาการ', 'ผิด',
# 'ปกติ', 'ของ', 'การ', 'พูด']
# change tokenizer to `newmm`
_tokenizer.set_tokenizer_engine(engine='newmm')
_tokenizer.word_tokenize(text)
# output:
# ['อะเฟเซีย', ' ', '(', 'Aphasia', ')', ' ', 'เป็นอาการ', 'ผิด',
# 'ปกติ', 'ของการพูด']
"""
def __init__(
self,
custom_dict: Union[Trie, Iterable[str], str] = None,
engine: str = "newmm",
keep_whitespace: bool = True,
join_broken_num: bool = True,
):
"""
Initialize tokenizer object.
:param str custom_dict: a file path, a list of vocaburaies* to be
used to create a trie, or an instantiated
:class:`pythainlp.util.Trie` object.
:param str engine: choose between different options of engine to token
(i.e. *newmm*, *mm*, *longest*, *deepcut*)
:param bool keep_whitespace: True to keep whitespaces, a common mark
for end of phrase in Thai
"""
self.__trie_dict = None
if custom_dict:
self.__trie_dict = dict_trie(custom_dict)
else:
self.__trie_dict = DEFAULT_WORD_DICT_TRIE
self.__engine = engine
if self.__engine not in ["newmm", "mm", "longest", "deepcut"]:
raise NotImplementedError(
"""
The Tokenizer class is not support %s for custom tokenizer
"""
% self.__engine
)
self.__keep_whitespace = keep_whitespace
self.__join_broken_num = join_broken_num
def word_tokenize(self, text: str) -> List[str]:
"""
Main tokenization function.
:param str text: text to be tokenized
:return: list of words, tokenized from the text
:rtype: list[str]
"""
return word_tokenize(
text,
custom_dict=self.__trie_dict,
engine=self.__engine,
keep_whitespace=self.__keep_whitespace,
join_broken_num=self.__join_broken_num,
)
def set_tokenize_engine(self, engine: str) -> None:
"""
Set the tokenizer's engine.
:param str engine: choose between different options of engine to token
(i.e. *newmm*, *mm*, *longest*, *deepcut*)
"""
self.__engine = engine
| 25,995 | 34.320652 | 110 | py |
pythainlp-dev/pythainlp/tokenize/crfcls.py | pythainlp-dev/pythainlp/tokenize/crfcls.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Clause segmenter
"""
from typing import List
import pycrfsuite
from pythainlp.tag import pos_tag
from pythainlp.corpus import path_pythainlp_corpus
def _doc2features(doc, i):
# features from current word
curr_word = doc[i][0]
curr_pos = doc[i][1]
features = {
"word.curr_word": curr_word,
"word.curr_isspace": curr_word.isspace(),
"word.curr_isdigit": curr_word.isdigit(),
"word.curr_postag": curr_pos,
}
# features from previous word
if i > 0:
prev_word = doc[i - 1][0]
prev_pos = doc[i - 1][1]
features["word.prev_word"] = prev_word
features["word.prev_isspace"] = prev_word.isspace()
features["word.prev_isdigit"] = prev_word.isdigit()
features["word.prev_postag"] = prev_pos
else:
features["BOS"] = True # Beginning of Sequence
# features from next word
if i < len(doc) - 1:
next_word = doc[i + 1][0]
next_pos = doc[i + 1][1]
features["word.next_word"] = next_word
features["word.next_isspace"] = next_word.isspace()
features["word.next_isdigit"] = next_word.isdigit()
features["word.next_postag"] = next_pos
else:
features["EOS"] = True # End of Sequence
return features
def _extract_features(doc):
return [_doc2features(doc, i) for i in range(len(doc))]
_CORPUS_NAME = "blackboard-cls_v1.0.crfsuite"
tagger = pycrfsuite.Tagger()
tagger.open(path_pythainlp_corpus(_CORPUS_NAME))
def segment(doc: List[str]) -> List[List[str]]:
word_tags = pos_tag(doc, corpus="blackboard")
features = _extract_features(word_tags)
word_markers = list(zip(doc, tagger.tag(features)))
clauses = []
temp = []
len_doc = len(doc) - 1
for i, word_marker in enumerate(word_markers):
word, marker = word_marker
if marker == "E_CLS" or i == len_doc:
temp.append(word)
clauses.append(temp)
temp = []
else:
temp.append(word)
return clauses
| 2,650 | 29.125 | 74 | py |
pythainlp-dev/pythainlp/tokenize/crfcut.py | pythainlp-dev/pythainlp/tokenize/crfcut.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CRFCut - Thai sentence segmenter.
Thai sentence segmentation using conditional random field,
default model trained on TED dataset
Performance:
- ORCHID - space-correct accuracy 87% vs 95% state-of-the-art
(Zhou et al, 2016; https://www.aclweb.org/anthology/C16-1031.pdf)
- TED dataset - space-correct accuracy 82%
See development notebooks at https://github.com/vistec-AI/ted_crawler;
POS features are not used due to unreliable POS tagging available
"""
import os
from typing import List
import pycrfsuite
from pythainlp.corpus import corpus_path
from pythainlp.tokenize import word_tokenize
_ENDERS = {
# ending honorifics
"ครับ",
"ค่ะ",
"คะ",
"นะคะ",
"นะ",
"จ้ะ",
"จ้า",
"จ๋า",
"ฮะ",
# enders
"ๆ",
"ได้",
"แล้ว",
"ด้วย",
"เลย",
"มาก",
"น้อย",
"กัน",
"เช่นกัน",
"เท่านั้น",
"อยู่",
"ลง",
"ขึ้น",
"มา",
"ไป",
"ไว้",
"เอง",
"อีก",
"ใหม่",
"จริงๆ",
"บ้าง",
"หมด",
"ทีเดียว",
"เดียว",
# demonstratives
"นั้น",
"นี้",
"เหล่านี้",
"เหล่านั้น",
# questions
"อย่างไร",
"ยังไง",
"หรือไม่",
"มั้ย",
"ไหน",
"ไหม",
"อะไร",
"ทำไม",
"เมื่อไหร่",
"เมื่อไร",
}
_STARTERS = {
# pronouns
"ผม",
"ฉัน",
"ดิฉัน",
"ชั้น",
"คุณ",
"มัน",
"เขา",
"เค้า",
"เธอ",
"เรา",
"พวกเรา",
"พวกเขา",
"กู",
"มึง",
"แก",
"ข้าพเจ้า",
# connectors
"และ",
"หรือ",
"แต่",
"เมื่อ",
"ถ้า",
"ใน",
"ด้วย",
"เพราะ",
"เนื่องจาก",
"ซึ่ง",
"ไม่",
"ตอนนี้",
"ทีนี้",
"ดังนั้น",
"เพราะฉะนั้น",
"ฉะนั้น",
"ตั้งแต่",
"ในที่สุด",
"ก็",
"กับ",
"แก่",
"ต่อ",
# demonstratives
"นั้น",
"นี้",
"เหล่านี้",
"เหล่านั้น",
}
def extract_features(
doc: List[str], window: int = 2, max_n_gram: int = 3
) -> List[List[str]]:
"""
Extract features for CRF by sliding `max_n_gram` of tokens
for +/- `window` from the current token
:param List[str] doc: tokens from which features are to be extracted from
:param int window: size of window before and after the current token
:param int max_n_gram: create n_grams from 1-gram to `max_n_gram`-gram \
within the `window`
:return: list of lists of features to be fed to CRF
"""
doc_features = []
doc = (
["xxpad" for i in range(window)]
+ doc
+ ["xxpad" for i in range(window)]
)
# add enders and starters
doc_ender = []
doc_starter = []
for i in range(len(doc)):
if doc[i] in _ENDERS:
doc_ender.append("ender")
else:
doc_ender.append("normal")
if doc[i] in _STARTERS:
doc_starter.append("starter")
else:
doc_starter.append("normal")
# for each word
for i in range(window, len(doc) - window):
# bias term
word_features = ["bias"]
# ngram features
for n_gram in range(1, min(max_n_gram + 1, 2 + window * 2)):
for j in range(i - window, i + window + 2 - n_gram):
feature_position = f"{n_gram}_{j-i}_{j-i+n_gram}"
word_ = f'{"|".join(doc[j:(j+n_gram)])}'
word_features += [f"word_{feature_position}={word_}"]
ender_ = f'{"|".join(doc_ender[j:(j+n_gram)])}'
word_features += [f"ender_{feature_position}={ender_}"]
starter_ = f'{"|".join(doc_starter[j:(j+n_gram)])}'
word_features += [f"starter_{feature_position}={starter_}"]
# append to feature per word
doc_features.append(word_features)
return doc_features
_CRFCUT_DATA_FILENAME = "sentenceseg_crfcut.model"
_tagger = pycrfsuite.Tagger()
_tagger.open(os.path.join(corpus_path(), _CRFCUT_DATA_FILENAME))
def segment(text: str) -> List[str]:
"""
CRF-based sentence segmentation.
:param str text: text to be tokenized to sentences
:return: list of words, tokenized from the text
"""
if isinstance(text, str):
toks = word_tokenize(text)
else:
toks = text
feat = extract_features(toks)
labs = _tagger.tag(feat)
labs[-1] = "E" # make sure it cuts the last sentence
sentences = []
sentence = ""
for i, w in enumerate(toks):
sentence = sentence + w
if labs[i] == "E":
sentences.append(sentence)
sentence = ""
return sentences
| 5,136 | 22.13964 | 77 | py |
pythainlp-dev/pythainlp/tokenize/deepcut.py | pythainlp-dev/pythainlp/tokenize/deepcut.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper for deepcut Thai word segmentation. deepcut is a
Thai word segmentation library using 1D Convolution Neural Network.
User need to install deepcut (and its dependency: tensorflow) by themselves.
:See Also:
* `GitHub repository <https://github.com/rkcosmos/deepcut>`_
"""
from typing import List, Union
try:
from deepcut import tokenize
except ImportError:
raise ImportError("Please install deepcut by pip install deepcut")
from pythainlp.util import Trie
def segment(
text: str, custom_dict: Union[Trie, List[str], str] = None
) -> List[str]:
if not text or not isinstance(text, str):
return []
if custom_dict:
if isinstance(custom_dict, Trie):
custom_dict = list(custom_dict)
return tokenize(text, custom_dict)
return tokenize(text)
| 1,432 | 29.489362 | 76 | py |
pythainlp-dev/pythainlp/tokenize/etcc.py | pythainlp-dev/pythainlp/tokenize/etcc.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Segmenting text to Enhanced Thai Character Cluster (ETCC)
Python implementation by Wannaphong Phatthiyaphaibun
This implementation relies on a dictionary of ETCC created from etcc.txt
in pythainlp/corpus.
Notebook:
https://colab.research.google.com/drive/1UTQgxxMRxOr9Jp1B1jcq1frBNvorhtBQ
:See Also:
Jeeragone Inrut, Patiroop Yuanghirun, Sarayut Paludkong, Supot Nitsuwat, and
Para Limmaneepraserth. "Thai word segmentation using combination of forward
and backward longest matching techniques." In International Symposium on
Communications and Information Technology (ISCIT), pp. 37-40. 2001.
"""
import re
from typing import List
from pythainlp import thai_follow_vowels
from pythainlp.corpus import get_corpus
from pythainlp.tokenize import Tokenizer
_cut_etcc = Tokenizer(get_corpus("etcc.txt"), engine="longest")
_PAT_ENDING_CHAR = f"[{thai_follow_vowels}ๆฯ]"
_RE_ENDING_CHAR = re.compile(_PAT_ENDING_CHAR)
def _cut_subword(tokens: List[str]) -> List[str]:
len_tokens = len(tokens)
i = 0
while True:
if i == len_tokens:
break
if _RE_ENDING_CHAR.search(tokens[i]) and i > 0 and len(tokens[i]) == 1:
tokens[i - 1] += tokens[i]
del tokens[i]
len_tokens -= 1
i += 1
return tokens
def segment(text: str) -> List[str]:
"""
Segmenting text into ETCCs.
Enhanced Thai Character Cluster (ETCC) is a kind of subword unit.
The concept was presented in Inrut, Jeeragone, Patiroop Yuanghirun,
Sarayut Paludkong, Supot Nitsuwat, and Para Limmaneepraserth.
"Thai word segmentation using combination of forward and backward
longest matching techniques." In International Symposium on Communications
and Information Technology (ISCIT), pp. 37-40. 2001.
:param str text: text to be tokenized to character clusters
:return: list of clusters, tokenized from the text
:return: list[str]
"""
if not text or not isinstance(text, str):
return []
return _cut_subword(_cut_etcc.word_tokenize(text))
| 2,661 | 33.128205 | 79 | py |
pythainlp-dev/pythainlp/tokenize/longest.py | pythainlp-dev/pythainlp/tokenize/longest.py | # -*- coding: utf-8 -*-
"""
Dictionary-based longest-matching Thai word segmentation. Implementation based
on the code from Patorn Utenpattanun.
:See Also:
* `GitHub Repository \
<https://github.com/patorn/thaitokenizer/blob/master/thaitokenizer/tokenizer.py>`_
"""
import re
from typing import List, Union
from pythainlp import thai_tonemarks
from pythainlp.tokenize import DEFAULT_WORD_DICT_TRIE
from pythainlp.util import Trie
_FRONT_DEP_CHAR = [
"ะ",
"ั",
"า ",
"ำ",
"ิ",
"ี",
"ึ",
"ื",
"ุ",
"ู",
"ๅ",
"็",
"์",
"ํ",
]
_REAR_DEP_CHAR = ["ั", "ื", "เ", "แ", "โ", "ใ", "ไ", "ํ"]
_TRAILING_CHAR = ["ๆ", "ฯ"]
_RE_NONTHAI = re.compile(r"[A-Za-z\d]*")
_KNOWN = True
_UNKNOWN = False
class LongestMatchTokenizer(object):
def __init__(self, trie: Trie):
self.__trie = trie
@staticmethod
def __search_nonthai(text: str) -> Union[None, str]:
match = _RE_NONTHAI.search(text)
if match.group(0):
return match.group(0).lower()
return None
def __is_next_word_valid(self, text: str, begin_pos: int) -> bool:
text = text[begin_pos:].strip()
if not text:
return True
match = self.__search_nonthai(text)
if match:
return True
for pos in range(len(text) + 1):
if text[0:pos] in self.__trie:
return True
return False
def __longest_matching(self, text: str, begin_pos: int) -> str:
text = text[begin_pos:]
match = self.__search_nonthai(text)
if match:
return match
word = None
word_valid = None
for pos in range(len(text) + 1):
w = text[0:pos]
if w in self.__trie:
word = w
if self.__is_next_word_valid(text, pos):
word_valid = w
if word:
if not word_valid:
word_valid = word
try:
len_word_valid = len(word_valid)
if text[len_word_valid] in _TRAILING_CHAR:
return text[0 : len_word_valid + 1]
else:
return word_valid
except BaseException:
return word_valid
else:
return ""
def __segment(self, text: str):
begin_pos = 0
len_text = len(text)
tokens = []
token_statuses = []
while begin_pos < len_text:
match = self.__longest_matching(text, begin_pos)
if not match:
if (
begin_pos != 0
and not text[begin_pos].isspace()
and (
text[begin_pos] in _FRONT_DEP_CHAR
or text[begin_pos - 1] in _REAR_DEP_CHAR
or text[begin_pos] in thai_tonemarks
or (token_statuses and token_statuses[-1] == _UNKNOWN)
)
):
tokens[-1] += text[begin_pos]
token_statuses[-1] = _UNKNOWN
else:
tokens.append(text[begin_pos])
token_statuses.append(_UNKNOWN)
begin_pos += 1
else:
if begin_pos != 0 and text[begin_pos - 1] in _REAR_DEP_CHAR:
tokens[-1] += match
else:
tokens.append(match)
token_statuses.append(_KNOWN)
begin_pos += len(match)
return tokens
def tokenize(self, text: str) -> List[str]:
tokens = self.__segment(text)
return tokens
def segment(
text: str, custom_dict: Trie = DEFAULT_WORD_DICT_TRIE
) -> List[str]:
"""
Dictionary-based longest matching word segmentation.
:param str text: text to be tokenized to words
:param pythainlp.util.Trie custom_dict: dictionary for tokenization
:return: list of words, tokenized from the text
"""
if not text or not isinstance(text, str):
return []
if not custom_dict:
custom_dict = DEFAULT_WORD_DICT_TRIE
return LongestMatchTokenizer(custom_dict).tokenize(text)
| 4,237 | 25.822785 | 89 | py |
pythainlp-dev/pythainlp/tokenize/multi_cut.py | pythainlp-dev/pythainlp/tokenize/multi_cut.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Multi cut -- Thai word segmentation with maximum matching.
Original code from Korakot Chaovavanich.
:See Also:
* `Facebook post \
<https://www.facebook.com/groups/408004796247683/permalink/431283740586455/>`_
* `GitHub Gist \
<https://gist.github.com/korakot/fe26c65dc9eed467f4497f784a805716>`_
"""
import re
from collections import defaultdict
from typing import Iterator, List
from pythainlp.tokenize import DEFAULT_WORD_DICT_TRIE
from pythainlp.util import Trie
class LatticeString(str):
"""String that keeps possible tokenizations"""
def __new__(cls, value, multi=None, in_dict=True):
return str.__new__(cls, value)
def __init__(self, value, multi=None, in_dict=True):
self.unique = True
if multi:
self.multi = list(multi)
if len(self.multi) > 1:
self.unique = False
else:
self.multi = [value]
self.in_dict = in_dict # if in dictionary
_RE_NONTHAI = r"""(?x)
[-a-zA-Z]+| # Latin characters
\d+([,\.]\d+)*| # number
[ \t]+| # space
\r?\n # newline
"""
_PAT_NONTHAI = re.compile(_RE_NONTHAI)
def _multicut(
text: str, custom_dict: Trie = DEFAULT_WORD_DICT_TRIE
) -> Iterator[LatticeString]:
"""Return LatticeString"""
if not custom_dict:
custom_dict = DEFAULT_WORD_DICT_TRIE
len_text = len(text)
words_at = defaultdict(list) # main data structure
def serialize(p, p2): # helper function
for w in words_at[p]:
p_ = p + len(w)
if p_ == p2:
yield w
elif p_ < p2:
for path in serialize(p_, p2):
yield w + "/" + path
q = {0}
last_p = 0 # last position for yield
while min(q) < len_text:
p = min(q)
q -= {p} # q.pop, but for set
for w in custom_dict.prefixes(text[p:]):
words_at[p].append(w)
q.add(p + len(w))
len_q = len(q)
if len_q == 1:
q0 = min(q)
yield LatticeString(text[last_p:q0], serialize(last_p, q0))
last_p = q0
elif len_q == 0: # len(q) == 0 means not found in dictionary
m = _PAT_NONTHAI.match(text[p:])
if m: # non-Thai toekn
i = p + m.span()[1]
else: # non-Thai token, find minimum skip
for i in range(p, len_text):
ww = custom_dict.prefixes(text[i:])
m = _PAT_NONTHAI.match(text[i:])
if ww or m:
break
else:
i = len_text
w = text[p:i]
words_at[p].append(w)
yield LatticeString(w, in_dict=False)
last_p = i
q.add(i)
def mmcut(text: str) -> List[str]:
res = []
for w in _multicut(text):
mm = min(w.multi, key=lambda x: x.count("/"))
res.extend(mm.split("/"))
return res
def _combine(ww: List[LatticeString]) -> Iterator[str]:
if ww == []:
yield ""
else:
w = ww[0]
for tail in _combine(ww[1:]):
if w.unique:
yield w + "|" + tail
else:
for m in w.multi:
yield m.replace("/", "|") + "|" + tail
def segment(
text: str, custom_dict: Trie = DEFAULT_WORD_DICT_TRIE
) -> List[str]:
"""Dictionary-based maximum matching word segmentation.
:param text: text to be tokenized
:type text: str
:param custom_dict: tokenization dictionary,\
defaults to DEFAULT_WORD_DICT_TRIE
:type custom_dict: Trie, optional
:return: list of segmented tokens
:rtype: List[str]
"""
if not text or not isinstance(text, str):
return []
return list(_multicut(text, custom_dict=custom_dict))
def find_all_segment(
text: str, custom_dict: Trie = DEFAULT_WORD_DICT_TRIE
) -> List[str]:
"""Get all possible segment variations.
:param text: input string to be tokenized
:type text: str
:param custom_dict: tokenization dictionary,\
defaults to DEFAULT_WORD_DICT_TRIE
:type custom_dict: Trie, optional
:return: list of segment variations
:rtype: List[str]
"""
if not text or not isinstance(text, str):
return []
ww = list(_multicut(text, custom_dict=custom_dict))
return list(_combine(ww))
| 5,032 | 28.092486 | 86 | py |
pythainlp-dev/pythainlp/tokenize/nercut.py | pythainlp-dev/pythainlp/tokenize/nercut.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
nercut 0.2
Dictionary-based maximal matching word segmentation, constrained with
Thai Character Cluster (TCC) boundaries, and combining tokens that are
parts of the same named-entity.
Code by Wannaphong Phatthiyaphaibun
"""
from typing import Iterable, List
from pythainlp.tag.named_entity import NER
_thainer = NER(engine="thainer")
def segment(
text: str,
taglist: Iterable[str] = [
"ORGANIZATION",
"PERSON",
"PHONE",
"EMAIL",
"DATE",
"TIME",
],
tagger=_thainer,
) -> List[str]:
"""
Dictionary-based maximal matching word segmentation, constrained with
Thai Character Cluster (TCC) boundaries, and combining tokens that are
parts of the same named-entity.
:param str text: text to be tokenized to words
:parm list taglist: a list of named-entity tags to be used
:parm class tagger: ner tagger engine
:return: list of words, tokenized from the text
"""
if not isinstance(text, str):
return []
tagged_words = tagger.tag(text, pos=False)
words = []
combining_word = ""
for idx, (curr_word, curr_tag) in enumerate(tagged_words):
if curr_tag != "O":
tag = curr_tag[2:]
else:
tag = "O"
if curr_tag.startswith("B-") and tag in taglist:
combining_word = curr_word
elif (
curr_tag.startswith("I-")
and combining_word != ""
and tag in taglist
):
combining_word += curr_word
elif curr_tag == "O" and combining_word != "":
words.append(combining_word)
combining_word = ""
words.append(curr_word)
else: # if tag is O
combining_word = ""
words.append(curr_word)
if idx + 1 == len(tagged_words):
if curr_tag.startswith("B-") and combining_word != "":
words.append(combining_word)
elif curr_tag.startswith("I-") and combining_word != "":
words.append(combining_word)
else:
pass
return words
| 2,731 | 29.355556 | 74 | py |
pythainlp-dev/pythainlp/tokenize/newmm.py | pythainlp-dev/pythainlp/tokenize/newmm.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dictionary-based maximal matching word segmentation, constrained with
Thai Character Cluster (TCC) boundaries with improve the rules.
The code is based on the notebooks created by Korakot Chaovavanich,
with heuristic graph size limit added to avoid exponential wait time.
:See Also:
* \
https://colab.research.google.com/notebook#fileId=1V1Z657_5eSWPo8rLfVRwA0A5E4vkg7SI
* \
https://colab.research.google.com/drive/14Ibg-ngZXj15RKwjNwoZlOT32fQBOrBx#scrollTo=MYZ7NzAR7Dmw
"""
import re
from collections import defaultdict
from heapq import heappop, heappush
from typing import Generator, List
from pythainlp.tokenize import DEFAULT_WORD_DICT_TRIE
from pythainlp.util import Trie
from pythainlp.tokenize.tcc_p import tcc_pos
# match non-Thai tokens
_PAT_NONTHAI = re.compile(
r"""(?x)
[-a-zA-Z]+| # Latin characters
\d+([,\.]\d+)*| # number
[ \t]+| # space
\r?\n # newline
"""
)
# match 2-consonant Thai tokens
_PAT_THAI_TWOCHARS = re.compile("[ก-ฮ]{,2}$")
# maximum graph size before cutoff
_MAX_GRAPH_SIZE = 50
# window size for safe mode
_TEXT_SCAN_POINT = 120
_TEXT_SCAN_LEFT = 20
_TEXT_SCAN_RIGHT = 20
_TEXT_SCAN_BEGIN = _TEXT_SCAN_POINT - _TEXT_SCAN_LEFT
_TEXT_SCAN_END = _TEXT_SCAN_POINT + _TEXT_SCAN_RIGHT
del _TEXT_SCAN_POINT
del _TEXT_SCAN_LEFT
del _TEXT_SCAN_RIGHT
def _bfs_paths_graph(
graph: defaultdict, start: int, goal: int
) -> Generator[List[int], None, None]:
queue = [(start, [start])]
while queue:
(vertex, path) = queue.pop(0)
for pos in graph[vertex]:
if pos == goal:
yield path + [pos]
else:
queue.append((pos, path + [pos]))
def _onecut(text: str, custom_dict: Trie) -> Generator[str, None, None]:
# main data structure:
# - key is begin position (int)
# - value is possible end positions (List[int])
# if key is not found, value is empty list
graph = defaultdict(list)
graph_size = 0 # keep track of graph size, if too big will force cutoff
valid_poss = tcc_pos(text) # breaking positions that are TCC-valid
len_text = len(text)
pos_list = [0] # priority queue of possible breaking positions
end_pos = 0
while pos_list[0] < len_text:
begin_pos = heappop(pos_list)
for word in custom_dict.prefixes(text[begin_pos:]):
end_pos_candidate = begin_pos + len(word)
if end_pos_candidate in valid_poss:
graph[begin_pos].append(end_pos_candidate)
graph_size = graph_size + 1
if end_pos_candidate not in pos_list:
heappush(pos_list, end_pos_candidate)
if graph_size > _MAX_GRAPH_SIZE:
break
len_pos_list = len(pos_list)
if len_pos_list == 1: # one candidate, no longer ambiguous
end_pos_candidates = next(
_bfs_paths_graph(graph, end_pos, pos_list[0])
)
graph_size = 0
for pos in end_pos_candidates[1:]:
yield text[end_pos:pos]
end_pos = pos
elif len_pos_list == 0: # no candidate, deal with non-dictionary word
m = _PAT_NONTHAI.match(text[begin_pos:])
if m: # non-Thai token, skip to the end
end_pos = begin_pos + m.end()
else: # Thai token, find minimum skip
for pos in range(begin_pos + 1, len_text):
if pos in valid_poss:
prefix = text[pos:]
words = [
word
for word in custom_dict.prefixes(prefix)
if (
(pos + len(word) in valid_poss)
and not _PAT_THAI_TWOCHARS.match(word)
)
]
if words: # is a Thai token that longer than 2 chars
end_pos = pos
break
# is a non-Thai token
if _PAT_NONTHAI.match(prefix):
end_pos = pos
break
else:
end_pos = len_text
graph[begin_pos].append(end_pos)
graph_size = graph_size + 1
yield text[begin_pos:end_pos]
heappush(pos_list, end_pos)
def segment(
text: str,
custom_dict: Trie = DEFAULT_WORD_DICT_TRIE,
safe_mode: bool = False,
) -> List[str]:
"""Maximal-matching word segmentation, Thai Character Cluster constrained.
A dictionary-based word segmentation using maximal matching algorithm,
constrained to Thai Character Cluster boundaries.
A custom dictionary can be supplied.
:param text: text to be tokenized
:type text: str
:param custom_dict: tokenization dictionary,\
defaults to DEFAULT_WORD_DICT_TRIE
:type custom_dict: Trie, optional
:param safe_mode: reduce chance for long processing time in long text\
with many ambiguous breaking points, defaults to False
:type safe_mode: bool, optional
:return: list of tokens
:rtype: List[str]
"""
if not text or not isinstance(text, str):
return []
if not custom_dict:
custom_dict = DEFAULT_WORD_DICT_TRIE
if not safe_mode or len(text) < _TEXT_SCAN_END:
return list(_onecut(text, custom_dict))
# if the text is longer than the limit,
# breaks them into smaller chunks then tokenizes each chunk
text_parts = []
while len(text) >= _TEXT_SCAN_END:
sample = text[_TEXT_SCAN_BEGIN:_TEXT_SCAN_END]
# find possible break positions
cut_pos = _TEXT_SCAN_END
# try to break by space first
space_idx = sample.rfind(" ")
if space_idx >= 0:
cut_pos = space_idx + 1
else:
tokens = list(_onecut(sample, custom_dict))
token_max_idx = 0
token_max_len = 0
for i, token in enumerate(tokens):
if len(token) >= token_max_len:
token_max_len = len(token)
token_max_idx = i
# choose the position that covers longest token
cut_pos = _TEXT_SCAN_BEGIN
for i in range(0, token_max_idx):
cut_pos = cut_pos + len(tokens[i])
text_parts.append(text[:cut_pos])
text = text[cut_pos:]
# append remaining text
if len(text):
text_parts.append(text)
# tokenizes each text parts
tokens = []
for text_part in text_parts:
tokens.extend(list(_onecut(text_part, custom_dict)))
return tokens
| 7,379 | 32.393665 | 103 | py |
pythainlp-dev/pythainlp/tokenize/nlpo3.py | pythainlp-dev/pythainlp/tokenize/nlpo3.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sys import stderr
from typing import List
from nlpo3 import segment as nlpo3_segment
from nlpo3 import load_dict as nlpo3_load_dict
from pythainlp.corpus.common import _THAI_WORDS_FILENAME
from pythainlp.corpus import path_pythainlp_corpus
_NLPO3_DEFAULT_DICT_NAME = "_67a47bf9"
_NLPO3_DEFAULT_DICT = nlpo3_load_dict(
path_pythainlp_corpus(_THAI_WORDS_FILENAME), _NLPO3_DEFAULT_DICT_NAME
)
def load_dict(file_path: str, dict_name: str) -> bool:
"""Load a dictionary file into an in-memory dictionary collection.
The loaded dictionary will be accessible throught the assigned dict_name.
*** This function does not override an existing dict name. ***
:param file_path: Path to a dictionary file
:type file_path: str
:param dict_name: A unique dictionary name, use for reference.
:type dict_name: str
:return bool
:See Also:
* \
https://github.com/PyThaiNLP/nlpo3
"""
msg, success = nlpo3_load_dict(file_path=file_path, dict_name=dict_name)
if bool is False:
print(msg, file=stderr)
return success
def segment(
text: str,
custom_dict: str = _NLPO3_DEFAULT_DICT_NAME,
safe_mode: bool = False,
parallel_mode: bool = False,
) -> List[str]:
"""Break text into tokens.
Python binding for nlpO3. It is newmm engine in Rust.
:param str text: text to be tokenized
:param str custom_dict: dictionary name, as assigned with load_dict(),\
defaults to pythainlp/corpus/common/words_th.txt
:param bool safe_mode: reduce chance for long processing time in long text\
with many ambiguous breaking points, defaults to False
:param bool parallel_mode: Use multithread mode, defaults to False
:return: list of tokens
:rtype: List[str]
:See Also:
* \
https://github.com/PyThaiNLP/nlpo3
"""
return nlpo3_segment(
text=text,
dict_name=custom_dict,
safe=safe_mode,
parallel=parallel_mode,
)
| 2,614 | 31.283951 | 79 | py |
pythainlp-dev/pythainlp/tokenize/oskut.py | pythainlp-dev/pythainlp/tokenize/oskut.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper OSKut (Out-of-domain StacKed cut for Word Segmentation).
Handling Cross- and Out-of-Domain Samples in Thai Word Segmentation
Stacked Ensemble Framework and DeepCut as Baseline model (ACL 2021 Findings)
:See Also:
* `GitHub repository <https://github.com/mrpeerat/OSKut>`_
"""
from typing import List
import oskut
DEFAULT_ENGINE = "ws"
oskut.load_model(engine=DEFAULT_ENGINE)
def segment(text: str, engine: str = "ws") -> List[str]:
global DEFAULT_ENGINE
if not text or not isinstance(text, str):
return []
if engine != DEFAULT_ENGINE:
DEFAULT_ENGINE = engine
oskut.load_model(engine=DEFAULT_ENGINE)
return oskut.OSKut(text)
| 1,299 | 32.333333 | 76 | py |
pythainlp-dev/pythainlp/tokenize/pyicu.py | pythainlp-dev/pythainlp/tokenize/pyicu.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper for PyICU word segmentation. This wrapper module uses
:class:`icu.BreakIterator` with Thai as :class:`icu.Local`
to locate boundaries between words from the text.
:See Also:
* `GitHub repository <https://github.com/ovalhub/pyicu>`_
"""
import re
from typing import List
from icu import BreakIterator, Locale
def _gen_words(text: str) -> str:
bd = BreakIterator.createWordInstance(Locale("th"))
bd.setText(text)
p = bd.first()
for q in bd:
yield text[p:q]
p = q
def segment(text: str) -> List[str]:
"""
:param str text: text to be tokenized to words
:return: list of words, tokenized from the text
"""
if not text or not isinstance(text, str):
return []
text = re.sub("([^\u0E00-\u0E7F\n ]+)", " \\1 ", text)
return list(_gen_words(text))
| 1,444 | 28.489796 | 74 | py |
pythainlp-dev/pythainlp/tokenize/sefr_cut.py | pythainlp-dev/pythainlp/tokenize/sefr_cut.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper for SEFR CUT Thai word segmentation. SEFR CUT is a
Thai Word Segmentation Models using Stacked Ensemble.
:See Also:
* `GitHub repository <https://github.com/mrpeerat/SEFR_CUT>`_
"""
from typing import List
import sefr_cut
DEFAULT_ENGINE = "ws1000"
sefr_cut.load_model(engine=DEFAULT_ENGINE)
def segment(text: str, engine: str = "ws1000") -> List[str]:
global DEFAULT_ENGINE
if not text or not isinstance(text, str):
return []
if engine != DEFAULT_ENGINE:
DEFAULT_ENGINE = engine
sefr_cut.load_model(engine=DEFAULT_ENGINE)
return sefr_cut.tokenize(text)[0]
| 1,231 | 31.421053 | 74 | py |
pythainlp-dev/pythainlp/tokenize/ssg.py | pythainlp-dev/pythainlp/tokenize/ssg.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from ssg import syllable_tokenize
def segment(text: str) -> List[str]:
"""
Syllable tokenizer using ssg
"""
if not text or not isinstance(text, str):
return []
return syllable_tokenize(text)
| 861 | 29.785714 | 74 | py |
pythainlp-dev/pythainlp/tokenize/tcc.py | pythainlp-dev/pythainlp/tokenize/tcc.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The implementation of tokenizer accorinding to Thai Character Clusters (TCCs)
rules purposed by `Theeramunkong et al. 2000. \
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.59.2548>`_
Credits:
* TCC: Jakkrit TeCho
* Grammar: Wittawat Jitkrittum (`link to the source file \
<https://github.com/wittawatj/jtcc/blob/master/TCC.g>`_)
* Python code: Korakot Chaovavanich
"""
import re
from typing import List, Set
_RE_TCC = (
"""\
c[ั]([่-๋]c)?
c[ั]([่-๋]c)?k
เc็ck
เcctาะk
เccีtยะk
เccีtย(?=[เ-ไก-ฮ]|$)k
เc[ิีุู]tย(?=[เ-ไก-ฮ]|$)k
เcc็ck
เcิc์ck
เcิtck
เcีtยะ?k
เcืtอะk
เcื
เctา?ะ?k
c[ึื]tck
c[ะ-ู]tk
c[ิุู]์
cรรc์
c็
ct[ะาำ]?k
แc็ck
แcc์k
แctะk
แcc็ck
แccc์k
โctะk
[เ-ไ]ctk
ก็
อึ
หึ
""".replace(
"k", "(cc?[d|ิ]?[์])?"
)
.replace("c", "[ก-ฮ]")
.replace("t", "[่-๋]?")
.replace("d", "อูอุ".replace("อ", "")) # DSara: lower vowel
.split()
)
_PAT_TCC = re.compile("|".join(_RE_TCC))
def tcc(text: str) -> str:
"""
TCC generator, generates Thai Character Clusters
:param str text: text to be tokenized to character clusters
:return: subwords (character clusters)
:rtype: Iterator[str]
"""
if not text or not isinstance(text, str):
return ""
len_text = len(text)
p = 0
while p < len_text:
m = _PAT_TCC.match(text[p:])
if m:
n = m.span()[1]
else:
n = 1
yield text[p : p + n]
p += n
def tcc_pos(text: str) -> Set[int]:
"""
TCC positions
:param str text: text to be tokenized to character clusters
:return: list of the end position of subwords
:rtype: set[int]
"""
if not text or not isinstance(text, str):
return set()
p_set = set()
p = 0
for w in tcc(text):
p += len(w)
p_set.add(p)
return p_set
def segment(text: str) -> List[str]:
"""
Subword segmentation
:param str text: text to be tokenized to character clusters
:return: list of subwords (character clusters), tokenized from the text
:rtype: list[str]
"""
return list(tcc(text))
| 2,733 | 20.527559 | 77 | py |
pythainlp-dev/pythainlp/tokenize/tcc_p.py | pythainlp-dev/pythainlp/tokenize/tcc_p.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The implementation of tokenizer accorinding to Thai Character Clusters (TCCs)
rules purposed by `Theeramunkong et al. 2000. \
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.59.2548>`_
and improve the rule that used in newmm
Credits:
* TCC: Jakkrit TeCho
* Grammar: Wittawat Jitkrittum (`link to the source file \
<https://github.com/wittawatj/jtcc/blob/master/TCC.g>`_)
* Python code: Korakot Chaovavanich
"""
import re
from typing import List, Set
_RE_TCC = (
"""\
เc็ck
เcctาะk
เccีtยะk
เccีtย(?=[เ-ไก-ฮ]|$)k
เcc็ck
เcิc์ck
เcิtck
เcีtยะ?k
เcืtอะ?k
เc[ิีุู]tย(?=[เ-ไก-ฮ]|$)k
เctา?ะ?k
cัtวะk
c[ัื]tc[ุิะ]?k
c[ิุู]์
c[ะ-ู]tk
cรรc์
c็
ct[ะาำ]?k
ck
แc็c
แcc์
แctะ
แcc็c
แccc์
โctะ
[เ-ไ]ct
ก็
อึ
หึ
""".replace(
"k", "(cc?[dิ]?[์])?"
)
.replace("c", "[ก-ฮ]")
.replace("t", "[่-๋]?")
.replace("d", "อูอุ".replace("อ", "")) # DSara: lower vowel
.split()
)
_PAT_TCC = re.compile("|".join(_RE_TCC))
def tcc(text: str) -> str:
"""
TCC generator, generates Thai Character Clusters
:param str text: text to be tokenized to character clusters
:return: subwords (character clusters)
:rtype: Iterator[str]
"""
if not text or not isinstance(text, str):
return ""
len_text = len(text)
p = 0
while p < len_text:
m = _PAT_TCC.match(text[p:])
if m:
n = m.span()[1]
else:
n = 1
yield text[p : p + n]
p += n
def tcc_pos(text: str) -> Set[int]:
"""
TCC positions
:param str text: text to be tokenized to character clusters
:return: list of the end position of subwords
:rtype: set[int]
"""
if not text or not isinstance(text, str):
return set()
p_set = set()
p = 0
for w in tcc(text):
p += len(w)
p_set.add(p)
return p_set
def segment(text: str) -> List[str]:
"""
Subword segmentation
:param str text: text to be tokenized to character clusters
:return: list of subwords (character clusters), tokenized from the text
:rtype: list[str]
"""
return list(tcc(text))
| 2,749 | 20.653543 | 77 | py |
pythainlp-dev/pythainlp/tokenize/thaisumcut.py | pythainlp-dev/pythainlp/tokenize/thaisumcut.py | # -*- coding: utf-8 -*-
"""
The implementation of sentence segmentator from Nakhun Chumpolsathien, 2020
original code from: https://github.com/nakhunchumpolsathien/ThaiSum
Cite:
@mastersthesis{chumpolsathien_2020,
title={Using Knowledge Distillation from Keyword Extraction to Improve the Informativeness of Neural Cross-lingual Summarization},
author={Chumpolsathien, Nakhun},
year={2020},
school={Beijing Institute of Technology}
**ThaiSum License**
Copyright [2020 [Nakhun Chumpolsathien]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import operator
import math
from typing import List
from pythainlp.tokenize import word_tokenize
def list_to_string(list: List[str]) -> str:
string = "".join(list)
string = " ".join(string.split())
return string
def middle_cut(sentences: List[str]) -> List[str]:
new_text = ""
for sentence in sentences:
sentence_size = len(word_tokenize(sentence, keep_whitespace=False))
for k in range(0, len(sentence)):
if k == 0 or k + 1 >= len(sentence):
continue
if sentence[k].isdigit() and sentence[k - 1] == " ":
sentence = sentence[: k - 1] + sentence[k:]
if k + 2 <= len(sentence):
if sentence[k].isdigit() and sentence[k + 1] == " ":
sentence = sentence[: k + 1] + sentence[k + 2 :]
fixed_text_lenth = 20
if sentence_size > fixed_text_lenth:
partition = math.floor(sentence_size / fixed_text_lenth)
tokens = word_tokenize(sentence, keep_whitespace=True)
for i in range(0, partition):
middle_space = sentence_size / (partition + 1) * (i + 1)
white_space_index = []
white_space_diff = {}
for j in range(len(tokens)):
if tokens[j] == " ":
white_space_index.append(j)
for white_space in white_space_index:
white_space_diff.update(
{white_space: abs(white_space - middle_space)}
)
if len(white_space_diff) > 0:
min_diff = min(
white_space_diff.items(), key=operator.itemgetter(1)
)
tokens.pop(min_diff[0])
tokens.insert(min_diff[0], "<stop>")
new_text = new_text + list_to_string(tokens) + "<stop>"
else:
new_text = new_text + sentence + "<stop>"
sentences = new_text.split("<stop>")
sentences = [s.strip() for s in sentences]
if "" in sentences:
sentences.remove("")
if "nan" in sentences:
sentences.remove("nan")
sentences = list(filter(None, sentences))
return sentences
class ThaiSentenceSegmentor:
def split_into_sentences(
self, text: str, isMiddleCut: bool = False
) -> List[str]:
# Declare Variables
th_alphabets = "([ก-๙])"
th_conjunction = "(ทำให้|โดย|เพราะ|นอกจากนี้|แต่|กรณีที่|หลังจากนี้|ต่อมา|ภายหลัง|นับตั้งแต่|หลังจาก|ซึ่งเหตุการณ์|ผู้สื่อข่าวรายงานอีก|ส่วนที่|ส่วนสาเหตุ|ฉะนั้น|เพราะฉะนั้น|เพื่อ|เนื่องจาก|จากการสอบสวนทราบว่า|จากกรณี|จากนี้|อย่างไรก็ดี)"
th_cite = "(กล่าวว่า|เปิดเผยว่า|รายงานว่า|ให้การว่า|เผยว่า|บนทวิตเตอร์ว่า|แจ้งว่า|พลเมืองดีว่า|อ้างว่า)"
th_ka_krub = "(ครับ|ค่ะ)"
th_stop_after = "(หรือไม่|โดยเร็ว|แล้ว|อีกด้วย)"
th_stop_before = "(ล่าสุด|เบื้องต้น|ซึ่ง|ทั้งนี้|แม้ว่า|เมื่อ|แถมยัง|ตอนนั้น|จนเป็นเหตุให้|จากนั้น|อย่างไรก็ตาม|และก็|อย่างใดก็ตาม|เวลานี้|เช่น|กระทั่ง)"
degit = "([0-9])"
th_title = "(นาย|นาง|นางสาว|เด็กชาย|เด็กหญิง|น.ส.|ด.ช.|ด.ญ.)"
text = f" {text} "
text = text.replace("\n", " ")
text = text.replace("", "")
text = text.replace("โดยเร็ว", "<rth_Doeirew>")
text = text.replace("เพื่อน", "<rth_friend>")
text = text.replace("แต่ง", "<rth_but>")
text = text.replace("โดยสาร", "<rth_passenger>")
text = text.replace("แล้วแต่", "<rth_leawtea>")
text = text.replace("หรือเปล่า", "<rth_repraw>")
text = text.replace("หรือไม่", "<rth_remai>")
text = text.replace("จึงรุ่งเรืองกิจ", "<rth_tanatorn_lastname>")
text = text.replace("ตั้งแต่", "<rth_tangtea>")
text = text.replace("แต่ละ", "<rth_teala>")
text = text.replace("วิตแล้ว", "<rth_chiwitleaw>")
text = text.replace("โดยประ", "<rth_doipra>")
text = text.replace("แต่หลังจากนั้น", "<rth_tealangjaknan>")
text = text.replace("พรรคเพื่อ", "<for_party>")
text = text.replace("แต่เนื่อง", "<rth_teaneung>")
text = text.replace("เพื่อทำให้", "เพื่อ<rth_tamhai>")
text = text.replace("ทำเพื่อ", "ทำ<rth_for>")
text = text.replace("จึงทำให้", "จึง<tamhai>")
text = text.replace("มาโดยตลอด", "<madoitalod>")
text = text.replace("แต่อย่างใด", "<teayangdaikptam>")
text = text.replace("แต่หลังจาก", "แต่<langjak>")
text = text.replace("คงทำให้", "<rth_kongtamhai>")
text = text.replace("แต่ทั้งนี้", "แต่<tangni>")
text = text.replace("มีแต่", "มี<tea>")
text = text.replace("เหตุที่ทำให้", "<hedteetamhai>")
text = text.replace("โดยหลังจาก", "โดย<langjak>")
text = text.replace("ซึ่งหลังจาก", "ซึ่ง<langjak>")
text = text.replace("ตั้งโดย", "<rth_tangdoi>")
text = text.replace("โดยตรง", "<rth_doitong>")
text = text.replace("นั้นหรือ", "<rth_nanhlor>")
text = text.replace("ซึ่งต้องทำให้", "ซึ่งต้อง<tamhai>")
text = text.replace("ชื่อต่อมา", "ชื่อ<tomar>")
text = text.replace("โดยเร่งด่วน", "<doi>เร่งด่วน")
text = text.replace("ไม่ได้ทำให้", "ไม่ได้<tamhai>")
text = text.replace("จะทำให้", "จะ<tamhai>")
text = text.replace("จนทำให้", "จน<tamhai>")
text = text.replace("เว้นแต่", "เว้น<rth_tea>")
text = text.replace("ก็ทำให้", "ก็<tamhai>")
text = text.replace(" ณ ตอนนั้น", " ณ <tonnan>")
text = text.replace("บางส่วน", "บาง<rth_suan>")
text = text.replace("หรือแม้แต่", "หรือ<rth_meatea>")
text = text.replace("โดยทำให้", "โดย<tamhai>")
text = text.replace("หรือเพราะ", "หรือ<rth_orbecause>")
text = text.replace("มาแต่", "มา<rth_tea>")
text = text.replace("แต่ไม่ทำให้", "แต่<maitamhai>")
text = text.replace("ฉะนั้นเมื่อ", "ฉะนั้น<rth_moe>")
text = text.replace("เพราะฉะนั้น", "เพราะ<rth_chanan>")
text = text.replace("เพราะหลังจาก", "เพราะ<rth_langjak>")
text = text.replace("สามารถทำให้", "สามารถ<rth_tamhai>")
text = text.replace("อาจทำ", "อาจ<rth_tam>")
text = text.replace("จะทำ", "จะ<rth_tam>")
text = text.replace("และนอกจากนี้", "นอกจากนี้")
text = text.replace("อีกทั้งเพื่อ", "อีกทั้ง<rth_for>")
text = text.replace("ทั้งนี้เพื่อ", "ทั้งนี้<rth_for>")
text = text.replace("เวลาต่อมา", "เวลา<rth_toma>")
text = text.replace("อย่างไรก็ตาม", "อย่างไรก็ตาม")
text = text.replace(
"อย่างไรก็ตามหลังจาก", "<stop>อย่างไรก็ตาม<rth_langjak>"
)
text = text.replace("ซึ่งทำให้", "ซึ่ง<rth_tamhai>")
text = text.replace("โดยประมาท", "<doi>ประมาท")
text = text.replace("โดยธรรม", "<doi>ธรรม")
text = text.replace("โดยสัจจริง", "<doi>สัจจริง")
if "และ" in text:
tokens = word_tokenize(text.strip(), keep_whitespace=True)
and_position = -1
nearest_space_position = -1
last_position = len(tokens)
pop_split_position = []
split_position = []
for i in range(len(tokens)):
if tokens[i] == "และ":
and_position = i
if (
and_position != -1
and i > and_position
and tokens[i] == " "
and nearest_space_position == -1
):
if i - and_position != 1:
nearest_space_position = i
if and_position != -1 and last_position - and_position == 3:
split_position.append(last_position)
and_position = -1
nearest_space_position = -1
if nearest_space_position != -1:
if nearest_space_position - and_position < 5:
pop_split_position.append(nearest_space_position)
else:
split_position.append(and_position)
and_position = -1
nearest_space_position = -1
for pop in pop_split_position:
tokens.pop(pop)
tokens.insert(pop, "<stop>")
for split in split_position:
tokens.insert(split, "<stop>")
text = list_to_string(tokens)
if "หรือ" in text:
tokens = word_tokenize(text.strip(), keep_whitespace=True)
or_position = -1
nearest_space_position = -1
last_position = len(tokens)
pop_split_position = []
split_position = []
for i in range(len(tokens)):
if tokens[i] == "หรือ":
or_position = i
if (
or_position != -1
and i > or_position
and tokens[i] == " "
and nearest_space_position == -1
):
if i - or_position != 1:
nearest_space_position = i
if or_position != -1 and last_position - or_position == 3:
split_position.append(last_position)
or_position = -1
nearest_space_position = -1
if nearest_space_position != -1:
if nearest_space_position - or_position < 4:
pop_split_position.append(nearest_space_position)
else:
split_position.append(or_position)
or_position = -1
nearest_space_position = -1
for pop in pop_split_position:
tokens.pop(pop)
tokens.insert(pop, "<stop>")
for split in split_position:
tokens.insert(split, "<stop>")
text = list_to_string(tokens)
if "จึง" in text:
tokens = word_tokenize(text.strip(), keep_whitespace=True)
cung_position = -1
nearest_space_position = -1
pop_split_position = []
last_position = len(tokens)
split_position = []
for i in range(len(tokens)):
if tokens[i] == "จึง":
cung_position = i
if (
cung_position != -1
and tokens[i] == " "
and i > cung_position
and nearest_space_position == -1
):
if i - cung_position != 1:
nearest_space_position = i
if cung_position != -1 and last_position - cung_position == 2:
split_position.append(last_position)
cung_position = -1
nearest_space_position = -1
if nearest_space_position != -1:
if nearest_space_position - cung_position < 3:
pop_split_position.append(nearest_space_position)
else:
split_position.append(cung_position)
cung_position = -1
nearest_space_position = -1
for pop in pop_split_position:
tokens.pop(pop)
tokens.insert(pop, "<stop>")
for split in split_position:
tokens.insert(split, "<stop>")
text = list_to_string(tokens)
text = re.sub(" " + th_stop_before, "<stop>\\1", text)
text = re.sub(th_ka_krub, "\\1<stop>", text)
text = re.sub(th_conjunction, "<stop>\\1", text)
text = re.sub(th_cite, "\\1<stop>", text)
text = re.sub(" " + degit + "[.]" + th_title, "<stop>\\1.\\2", text)
text = re.sub(
" " + degit + degit + "[.]" + th_title, "<stop>\\1\\2.\\3", text
)
text = re.sub(th_alphabets + th_stop_after + " ", "\\1\\2<stop>", text)
if "”" in text:
text = text.replace(".”", "”.")
if '"' in text:
text = text.replace('."', '".')
if "!" in text:
text = text.replace('!"', '"!')
if "?" in text:
text = text.replace('?"', '"?')
text = text.replace("<rth_Doeirew>", "โดยเร็ว")
text = text.replace("<rth_friend>", "เพื่อน")
text = text.replace("<rth_but>", "แต่ง")
text = text.replace("<rth_passenger>", "โดยสาร")
text = text.replace("<rth_leawtea>", "แล้วแต่")
text = text.replace("<rth_repraw>", "หรือเปล่า")
text = text.replace("<rth_remai>", "หรือไม่")
text = text.replace("<rth_tanatorn_lastname>", "จึงรุ่งเรืองกิจ")
text = text.replace("<rth_tangtea>", "ตั้งแต่")
text = text.replace("<rth_teala>", "แต่ละ")
text = text.replace("<rth_chiwitleaw>", "วิตแล้ว")
text = text.replace("<rth_doipra>", "โดยประ")
text = text.replace("<rth_tealangjaknan>", "แต่หลังจากนั้น")
text = text.replace("<for_party>", "พรรคเพื่อ")
text = text.replace("<rth_teaneung>", "แต่เนื่อง")
text = text.replace("เพื่อ<rth_tamhai>", "เพื่อทำให้")
text = text.replace("ทำ<rth_for>", "ทำเพื่อ")
text = text.replace("จึง<tamhai>", "จึงทำให้")
text = text.replace("<madoitalod>", "มาโดยตลอด")
text = text.replace("แต่<langjak>", "แต่หลังจาก")
text = text.replace("แต่<tangni>", "แต่ทั้งนี้")
text = text.replace("มี<tea>", "มีแต่")
text = text.replace("<teayangdaikptam>", "แต่อย่างใด")
text = text.replace("<rth_kongtamhai>", "คงทำให้")
text = text.replace("<hedteetamhai>", "เหตุที่ทำให้")
text = text.replace("โดย<langjak>", "โดยหลังจาก")
text = text.replace("ซึ่ง<langjak>", "ซึ่งหลังจาก")
text = text.replace("<rth_tangdoi>", "ตั้งโดย")
text = text.replace("<rth_doitong>", "โดยตรง")
text = text.replace("<rth_nanhlor>", "นั้นหรือ")
text = text.replace("ซึ่งต้อง<tamhai>", "ซึ่งต้องทำให้")
text = text.replace("ชื่อ<tomar>", "ชื่อต่อมา")
text = text.replace("<doi>เร่งด่วน", "โดยเร่งด่วน")
text = text.replace("ไม่ได้<tamhai>", "ไม่ได้ทำให้")
text = text.replace("จะ<tamhai>", "จะทำให้")
text = text.replace("จน<tamhai>", "จนทำให้")
text = text.replace("เว้น<rth_tea>", "เว้นแต่")
text = text.replace("ก็<tamhai>", "ก็ทำให้")
text = text.replace(" ณ <tonnan>", " ณ ตอนนั้น")
text = text.replace("บาง<rth_suan>", "บางส่วน")
text = text.replace("หรือ<rth_meatea>", "หรือแม้แต่")
text = text.replace("โดย<tamhai>", "โดยทำให้")
text = text.replace("หรือ<rth_orbecause>", "หรือเพราะ")
text = text.replace("มา<rth_tea>", "มาแต่")
text = text.replace("แต่<maitamhai>", "แต่ไม่ทำให้")
text = text.replace("ฉะนั้น<rth_moe>", "ฉะนั้นเมื่อ")
text = text.replace("เพราะ<rth_chanan>", "เพราะฉะนั้น")
text = text.replace("เพราะ<rth_langjak>", "เพราะหลังจาก")
text = text.replace("สามารถ<rth_tamhai>", "สามารถทำให้")
text = text.replace("อาจ<rth_tam>", "อาจทำ")
text = text.replace("จะ<rth_tam>", "จะทำ")
text = text.replace("อีกทั้ง<rth_for>", "อีกทั้งเพื่อ")
text = text.replace("ทั้งนี้<rth_for>", "ทั้งนี้เพื่อ")
text = text.replace("เวลา<rth_toma>", "เวลาต่อมา")
text = text.replace(
"อย่างไรก็ตาม<rth_langjak>",
"อย่างไรก็ตามหลังจาก",
)
text = text.replace("ซึ่ง<rth_tamhai>", "ซึ่งทำให้")
text = text.replace("<doi>ประมาท", "โดยประมาท")
text = text.replace("<doi>ธรรม", "โดยธรรม")
text = text.replace("<doi>สัจจริง", "โดยสัจจริง")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = [s.strip() for s in sentences]
if "" in sentences:
sentences.remove("")
if "nan" in sentences:
sentences.remove("nan")
sentences = list(filter(None, sentences))
if isMiddleCut:
return middle_cut(sentences)
else:
return sentences
| 17,291 | 43.112245 | 246 | py |
pythainlp-dev/pythainlp/tokenize/tltk.py | pythainlp-dev/pythainlp/tokenize/tltk.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
try:
from tltk.nlp import word_segment as tltk_segment
from tltk.nlp import syl_segment
except ImportError:
raise ImportError("Not found tltk! Please install tltk by pip install tltk")
def segment(text: str) -> List[str]:
if not text or not isinstance(text, str):
return []
text = text.replace(" ", "<u/>")
_temp = tltk_segment(text).replace("<u/>", " ").replace("<s/>", "")
_temp = _temp.split("|")
if _temp[-1] == "":
del _temp[-1]
return _temp
def syllable_tokenize(text: str) -> List[str]:
if not text or not isinstance(text, str):
return []
_temp = syl_segment(text)
_temp = _temp.split("~")
if _temp[-1] == "<s/>":
del _temp[-1]
return _temp
def sent_tokenize(text: str) -> List[str]:
text = text.replace(" ", "<u/>")
_temp = tltk_segment(text).replace("<u/>", " ").replace("|", "")
_temp = _temp.split("<s/>")
if _temp[-1] == "":
del _temp[-1]
return _temp
| 1,624 | 30.862745 | 80 | py |
pythainlp-dev/pythainlp/tokenize/wtsplit.py | pythainlp-dev/pythainlp/tokenize/wtsplit.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Where's the Point? Self-Supervised Multilingual Punctuation-Agnostic Sentence Segmentation
GitHub: https://github.com/bminixhofer/wtpsplit
"""
from typing import List
from wtpsplit import WtP
_MODEL = None
_MODEL_NAME = None
def _tokenize(
text:str,
lang_code:str="th",
model:str="wtp-bert-mini",
tokenize:str="sentence",
paragraph_threshold:float=0.5,
)-> List[str]:
global _MODEL_NAME,_MODEL
if _MODEL_NAME != model:
_MODEL = WtP(model_name_or_model=model)
_MODEL_NAME = model
if tokenize=="sentence":
return _MODEL.split(text,lang_code=lang_code)
else: # Paragraph
return _MODEL.split(
text,
lang_code=lang_code,
do_paragraph_segmentation=True,
paragraph_threshold=paragraph_threshold
)
def tokenize(text:str, size:str="mini", tokenize:str="sentence", paragraph_threshold:float=0.5)-> List[str]:
_model_load=""
if size=="tiny":
_model_load="wtp-bert-tiny"
elif size=="base":
_model_load="wtp-canine-s-1l"
elif size=="large":
_model_load="wtp-canine-s-12l"
else: # mini
_model_load="wtp-bert-mini"
return _tokenize(text, model=_model_load,tokenize=tokenize,paragraph_threshold=paragraph_threshold)
| 1,929 | 31.166667 | 108 | py |
pythainlp-dev/pythainlp/tools/__init__.py | pythainlp-dev/pythainlp/tools/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"PYTHAINLP_DEFAULT_DATA_DIR",
"get_full_data_path",
"get_pythainlp_data_path",
"get_pythainlp_path",
]
from pythainlp.tools.path import (
PYTHAINLP_DEFAULT_DATA_DIR,
get_full_data_path,
get_pythainlp_data_path,
get_pythainlp_path,
)
| 892 | 30.892857 | 74 | py |
pythainlp-dev/pythainlp/tools/misspell.py | pythainlp-dev/pythainlp/tools/misspell.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
THAI_CHARACTERS_WITHOUT_SHIFT = [
"ผปแอิืทมใฝ",
"ฟหกดเ้่าสวง",
"ๆไำพะัีรนยบลฃ",
"ๅ/_ภถุึคตจขช",
]
THAI_CHARACTERS_WITH_SHIFT = [
"()ฉฮฺ์?ฒฬฦ",
"ฤฆฏโฌ็๋ษศซ.",
'๐"ฎฑธํ๊ณฯญฐ,',
"+๑๒๓๔ู฿๕๖๗๘๙",
]
ENGLISH_CHARACTERS_WITHOUT_SHIFT = [
"1234567890-=",
"qwertyuiop[]\\",
"asdfghjkl;'",
"zxcvbnm,./",
]
ENGLISH_CHARACTERS_WITH_SHIFT = [
"!@#$%^&*()_+",
"QWERTYUIOP{}|",
'ASDFGHJKL:"',
"ZXCVBNM<>?",
]
ALL_CHARACTERS = [
THAI_CHARACTERS_WITHOUT_SHIFT + THAI_CHARACTERS_WITH_SHIFT,
ENGLISH_CHARACTERS_WITHOUT_SHIFT + ENGLISH_CHARACTERS_WITH_SHIFT,
]
def search_location_of_character(char: str):
for language_ix in [0, 1]:
for ix, row in enumerate(ALL_CHARACTERS[language_ix]):
if char in row:
return (language_ix, ix // 4, ix % 4, row.index(char))
def find_neighbour_locations(
loc: tuple,
char: str,
kernel: List = [(-1, -1), (-1, 0), (1, 1), (0, 1), (0, -1), (1, 0)],
):
language_ix, is_shift, row, pos = loc
valid_neighbours = []
for kr, ks in kernel:
_row, _pos = row + kr, pos + ks
if 0 <= _row <= 3 and 0 <= _pos <= len(
ALL_CHARACTERS[language_ix][is_shift * 4 + _row]
):
valid_neighbours.append((language_ix, is_shift, _row, _pos, char))
return valid_neighbours
def find_misspell_candidates(char: str, verbose: bool = False):
loc = search_location_of_character(char)
if loc is None:
return None
valid_neighbours = find_neighbour_locations(loc, char)
chars = []
printing_locations = ["▐"] * 3 + [char] + ["▐"] * 3
for language_ix, is_shift, row, pos, char in valid_neighbours:
try:
char = ALL_CHARACTERS[language_ix][is_shift * 4 + row][pos]
chars.append(char)
kernel = (row - loc[1], pos - loc[2])
if kernel == (-1, -1):
ix = 5
elif kernel == (-1, 0):
ix = 6
elif kernel[0] == 0:
ix = 3 + kernel[1]
elif kernel == (1, 0):
ix = 0
elif kernel == (1, 1):
ix = 1
else:
continue
printing_locations[ix] = char
except IndexError as e:
continue
except Exception as e:
print("Something wrong with: ", char)
raise e
return chars
def misspell(sentence: str, ratio: float = 0.05):
"""
Simulate some mispellings for the input sentence.
The number of mispelled locations is governed by ratio.
:params str sentence: sentence to be mispelled
:params float ratio: number of misspells per 100 chars. Defaults to 0.5.
:return: sentence containing some misspelled
:rtype: str
:Example:
::
from pythainlp.tools.misspell import misspell
sentence = "ภาษาไทยปรากฏครั้งแรกในพุทธศักราช 1826"
misspell(sent, ratio=0.1)
# output:
ภาษาไทยปรากฏครั้งแรกในกุทธศักราช 1727
"""
num_misspells = np.floor(len(sentence) * ratio).astype(int)
positions = np.random.choice(
len(sentence), size=num_misspells, replace=False
)
# convert strings to array of characters
misspelled = list(sentence)
for pos in positions:
potential_candidates = find_misspell_candidates(sentence[pos])
if potential_candidates is None:
continue
candidate = np.random.choice(potential_candidates)
misspelled[pos] = candidate
return "".join(misspelled)
| 4,233 | 26.316129 | 78 | py |
pythainlp-dev/pythainlp/tools/path.py | pythainlp-dev/pythainlp/tools/path.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyThaiNLP data tools
For text processing and text conversion, see pythainlp.util
"""
import os
from pythainlp import __file__ as pythainlp_file
PYTHAINLP_DEFAULT_DATA_DIR = "pythainlp-data"
def get_full_data_path(path: str) -> str:
"""
This function joins path of :mod:`pythainlp` data directory and the
given path, and returns the full path.
:return: full path given the name of dataset
:rtype: str
:Example:
::
from pythainlp.tools import get_full_data_path
get_full_data_path('ttc_freq.txt')
# output: '/root/pythainlp-data/ttc_freq.txt'
"""
return os.path.join(get_pythainlp_data_path(), path)
def get_pythainlp_data_path() -> str:
"""
Returns the full path where PyThaiNLP keeps its (downloaded) data.
If the directory does not yet exist, it will be created.
The path can be specified through the environment variable
:envvar:`PYTHAINLP_DATA_DIR`. By default, `~/pythainlp-data`
will be used.
:return: full path of directory for :mod:`pythainlp` downloaded data
:rtype: str
:Example:
::
from pythainlp.tools import get_pythainlp_data_path
get_pythainlp_data_path()
# output: '/root/pythainlp-data'
"""
pythainlp_data_dir = os.getenv(
"PYTHAINLP_DATA_DIR", os.path.join("~", PYTHAINLP_DEFAULT_DATA_DIR)
)
path = os.path.expanduser(pythainlp_data_dir)
os.makedirs(path, exist_ok=True)
return path
def get_pythainlp_path() -> str:
"""
This function returns full path of PyThaiNLP code
:return: full path of :mod:`pythainlp` code
:rtype: str
:Example:
::
from pythainlp.tools import get_pythainlp_path
get_pythainlp_path()
# output: '/usr/local/lib/python3.6/dist-packages/pythainlp'
"""
return os.path.dirname(pythainlp_file)
| 2,479 | 26.865169 | 75 | py |
pythainlp-dev/pythainlp/translate/__init__.py | pythainlp-dev/pythainlp/translate/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Language translation.
"""
__all__ = ["ThZhTranslator", "ZhThTranslator", "Translate"]
from pythainlp.translate.core import Translate
from pythainlp.translate.zh_th import (
ThZhTranslator,
ZhThTranslator,
)
| 836 | 30 | 74 | py |
pythainlp-dev/pythainlp/translate/core.py | pythainlp-dev/pythainlp/translate/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Translate:
"""
Machine Translation
:param str src_lang: source language
:param str target_lang: target language
:param bool use_gpu: load model to gpu (Default is False)
**Options for source & target language**
* *th* - *en* - Thai to English
* *en* - *th* - English to Thai
* *th* - *zh* - Thai to Chinese
* *zh* - *th* - Chinese to Thai
* *th* - *fr* - Thai to French
:Example:
Translate text from Thai to English::
from pythainlp.translate import Translate
th2en = Translate('th', 'en')
th2en.translate("ฉันรักแมว")
# output: I love cat.
"""
def __init__(
self, src_lang: str, target_lang: str, engine: str="default", use_gpu: bool = False
) -> None:
"""
:param str src_lang: source language
:param str target_lang: target language
:param str engine: Machine Translation engine
:param bool use_gpu: load model to gpu (Default is False)
**Options for engine*
* *default* - The engine default by each a language.
* *small100* - A multilingual machine translation model (covering 100 languages)
**Options for source & target language**
* *th* - *en* - Thai to English
* *en* - *th* - English to Thai
* *th* - *zh* - Thai to Chinese
* *zh* - *th* - Chinese to Thai
* *th* - *fr* - Thai to French
* *th* - *xx* - Thai to xx (xx is language code). It uses small100 model.
* *xx* - *th* - xx to Thai (xx is language code). It uses small100 model.
:Example:
Translate text from Thai to English::
from pythainlp.translate import Translate
th2en = Translate('th', 'en')
th2en.translate("ฉันรักแมว")
# output: I love cat.
"""
self.model = None
self.engine = engine
self.src_lang = src_lang
self.use_gpu = use_gpu
self.target_lang = target_lang
self.load_model()
def load_model(self):
src_lang = self.src_lang
target_lang = self.target_lang
use_gpu = self.use_gpu
if self.engine == "small100":
from .small100 import Small100Translator
self.model = Small100Translator(use_gpu)
elif src_lang == "th" and target_lang == "en":
from pythainlp.translate.en_th import ThEnTranslator
self.model = ThEnTranslator(use_gpu)
elif src_lang == "en" and target_lang == "th":
from pythainlp.translate.en_th import EnThTranslator
self.model = EnThTranslator(use_gpu)
elif src_lang == "th" and target_lang == "zh":
from pythainlp.translate.zh_th import ThZhTranslator
self.model = ThZhTranslator(use_gpu)
elif src_lang == "zh" and target_lang == "th":
from pythainlp.translate.zh_th import ZhThTranslator
self.model = ZhThTranslator(use_gpu)
elif src_lang == "th" and target_lang == "fr":
from pythainlp.translate.th_fr import ThFrTranslator
self.model = ThFrTranslator(use_gpu)
else:
raise ValueError("Not support language!")
def translate(self, text) -> str:
"""
Translate text
:param str text: input text in source language
:return: translated text in target language
:rtype: str
"""
if self.engine == "small100":
return self.model.translate(text, tgt_lang=self.target_lang)
return self.model.translate(text)
| 4,257 | 33.33871 | 92 | py |
pythainlp-dev/pythainlp/translate/en_th.py | pythainlp-dev/pythainlp/translate/en_th.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
English-Thai Machine Translation
from VISTEC-depa Thailand Artificial Intelligence Research Institute
Website: https://airesearch.in.th/releases/machine-translation-models/
"""
import os
import tarfile
from collections import defaultdict
from pythainlp.corpus import download, get_corpus_path
from pythainlp.tools import get_full_data_path, get_pythainlp_data_path
from fairseq.models.transformer import TransformerModel
from sacremoses import MosesTokenizer
_EN_TH_MODEL_NAME = "scb_1m_en-th_moses"
# SCB_1M-MT_OPUS+TBASE_en-th_moses-spm_130000-16000_v1.0.tar.gz
_EN_TH_FILE_NAME = "SCB_1M-MT_OPUS+TBASE_en-th_moses-spm_130000-16000_v1.0"
_TH_EN_MODEL_NAME = "scb_1m_th-en_spm"
# SCB_1M-MT_OPUS+TBASE_th-en_spm-spm_32000-joined_v1.0.tar.gz
_TH_EN_FILE_NAME = "SCB_1M-MT_OPUS+TBASE_th-en_spm-spm_32000-joined_v1.0"
def _get_translate_path(model: str, *path: str) -> str:
return os.path.join(get_corpus_path(model, version="1.0"), *path)
def _download_install(name: str) -> None:
if get_corpus_path(name) is None:
download(name, force=True, version="1.0")
def download_model_all() -> None:
"""
Download all translation models in advanced
"""
_download_install(_EN_TH_MODEL_NAME)
_download_install(_TH_EN_MODEL_NAME)
class EnThTranslator:
"""
English-Thai Machine Translation
from VISTEC-depa Thailand Artificial Intelligence Research Institute
Website: https://airesearch.in.th/releases/machine-translation-models/
:param bool use_gpu : load model to gpu (Default is False)
"""
def __init__(self, use_gpu: bool = False):
self._tokenizer = MosesTokenizer("en")
self._model_name = _EN_TH_MODEL_NAME
_download_install(self._model_name)
self._model = TransformerModel.from_pretrained(
model_name_or_path=_get_translate_path(
self._model_name,
_EN_TH_FILE_NAME,
"models",
),
checkpoint_file="checkpoint.pt",
data_name_or_path=_get_translate_path(
self._model_name,
_EN_TH_FILE_NAME,
"vocab",
),
)
if use_gpu:
self._model = self._model.cuda()
def translate(self, text: str) -> str:
"""
Translate text from English to Thai
:param str text: input text in source language
:return: translated text in target language
:rtype: str
:Example:
Translate text from English to Thai::
from pythainlp.translate import EnThTranslator
enth = EnThTranslator()
enth.translate("I love cat.")
# output: ฉันรักแมว
"""
tokens = " ".join(self._tokenizer.tokenize(text))
translated = self._model.translate(tokens)
return translated.replace(" ", "").replace("▁", " ").strip()
class ThEnTranslator:
"""
Thai-English Machine Translation
from VISTEC-depa Thailand Artificial Intelligence Research Institute
Website: https://airesearch.in.th/releases/machine-translation-models/
:param bool use_gpu : load model to gpu (Default is False)
"""
def __init__(self, use_gpu: bool = False):
self._model_name = _TH_EN_MODEL_NAME
_download_install(self._model_name)
self._model = TransformerModel.from_pretrained(
model_name_or_path=_get_translate_path(
self._model_name,
_TH_EN_FILE_NAME,
"models",
),
checkpoint_file="checkpoint.pt",
data_name_or_path=_get_translate_path(
self._model_name,
_TH_EN_FILE_NAME,
"vocab",
),
bpe="sentencepiece",
sentencepiece_model=_get_translate_path(
self._model_name,
_TH_EN_FILE_NAME,
"bpe",
"spm.th.model",
),
)
if use_gpu:
self._model.cuda()
def translate(self, text: str) -> str:
"""
Translate text from Thai to English
:param str text: input text in source language
:return: translated text in target language
:rtype: str
:Example:
Translate text from Thai to English::
from pythainlp.translate import ThEnTranslator
then = ThEnTranslator()
then.translate("ฉันรักแมว")
# output: I love cat.
"""
return self._model.translate(text)
| 5,169 | 28.375 | 75 | py |
pythainlp-dev/pythainlp/translate/small100.py | pythainlp-dev/pythainlp/translate/small100.py | from transformers import M2M100ForConditionalGeneration
from .tokenization_small100 import SMALL100Tokenizer
class Small100Translator:
"""
Machine Translation with small100 model
- Huggingface https://huggingface.co/alirezamsh/small100
:param bool use_gpu : load model to gpu (Default is False)
"""
def __init__(
self,
use_gpu: bool = False,
pretrained: str = "alirezamsh/small100",
) -> None:
self.pretrained = pretrained
self.model = M2M100ForConditionalGeneration.from_pretrained(self.pretrained)
self.tgt_lang = None
if use_gpu:
self.model = self.model.cuda()
def translate(self, text: str, tgt_lang: str="en") -> str:
"""
Translate text from X to X
:param str text: input text in source language
:param str tgt_lang: target language
:return: translated text in target language
:rtype: str
:Example:
::
from pythainlp.translate.small100 import Small100Translator
mt = Small100Translator()
# Translate text from Thai to English
mt.translate("ทดสอบระบบ", tgt_lang="en")
# output: 'Testing system'
# Translate text from Thai to Chinese
mt.translate("ทดสอบระบบ", tgt_lang="zh")
# output: '系统测试'
# Translate text from Thai to French
mt.translate("ทดสอบระบบ", tgt_lang="fr")
# output: 'Test du système'
"""
if tgt_lang!=self.tgt_lang:
self.tokenizer = SMALL100Tokenizer.from_pretrained(self.pretrained, tgt_lang=tgt_lang)
self.tgt_lang = tgt_lang
self.translated = self.model.generate(
**self.tokenizer(text, return_tensors="pt")
)
return self.tokenizer.batch_decode(self.translated, skip_special_tokens=True)[0]
| 1,897 | 30.114754 | 98 | py |
pythainlp-dev/pythainlp/translate/th_fr.py | pythainlp-dev/pythainlp/translate/th_fr.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai-French Machine Translation
Trained by OPUS Corpus
Model from Language Technology Research Group at the University of Helsinki
BLEU 20.4
- Huggingface https://huggingface.co/Helsinki-NLP/opus-mt-th-fr
"""
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
class ThFrTranslator:
"""
Thai-French Machine Translation
Trained by OPUS Corpus
Model from Language Technology Research Group at the University of Helsinki
BLEU 20.4
- Huggingface https://huggingface.co/Helsinki-NLP/opus-mt-th-fr
:param bool use_gpu : load model to gpu (Default is False)
"""
def __init__(
self,
use_gpu: bool = False,
pretrained: str = "Helsinki-NLP/opus-mt-th-fr",
) -> None:
self.tokenizer_thzh = AutoTokenizer.from_pretrained(pretrained)
self.model_thzh = AutoModelForSeq2SeqLM.from_pretrained(pretrained)
if use_gpu:
self.model_thzh = self.model_thzh.cuda()
def translate(self, text: str) -> str:
"""
Translate text from Thai to French
:param str text: input text in source language
:return: translated text in target language
:rtype: str
:Example:
Translate text from Thai to French::
from pythainlp.translate.th_fr import ThFrTranslator
thfr = ThFrTranslator()
thfr.translate("ทดสอบระบบ")
# output: "Test du système."
"""
self.translated = self.model_thzh.generate(
**self.tokenizer_thzh(text, return_tensors="pt", padding=True)
)
return [
self.tokenizer_thzh.decode(t, skip_special_tokens=True)
for t in self.translated
][0]
| 2,344 | 27.950617 | 79 | py |
pythainlp-dev/pythainlp/translate/tokenization_small100.py | pythainlp-dev/pythainlp/translate/tokenization_small100.py | # Copyright (c) 2022 Idiap Research Institute, http://www.idiap.ch/
# Written by Alireza Mohammadshahi <[email protected]>
# This is a modified version of https://github.com/huggingface/transformers/blob/main/src/transformers/models/m2m_100/tokenization_m2m_100.py
# which owns by Fariseq Authors and The HuggingFace Inc. team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for SMALL100."""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from transformers.tokenization_utils import BatchEncoding, PreTrainedTokenizer
from transformers.utils import logging
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/vocab.json",
},
"spm_file": {
"alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/tokenizer_config.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"alirezamsh/small100": 1024,
}
# fmt: off
FAIRSEQ_LANGUAGE_CODES = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"]
}
# fmt: on
class SMALL100Tokenizer(PreTrainedTokenizer):
"""
Construct an SMALL100 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
spm_file (`str`):
Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
contains the vocabulary.
tgt_lang (`str`, *optional*):
A string representing the target language.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
language_codes (`str`, *optional*):
What language codes to use. Should be `"m2m100"`.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Examples:
```python
>>> from tokenization_small100 import SMALL100Tokenizer
>>> tokenizer = SMALL100Tokenizer.from_pretrained("alirezamsh/small100", tgt_lang="ro")
>>> src_text = " UN Chief Says There Is No Military Solution in Syria"
>>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
>>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
>>> model(**model_inputs) # should work
```"""
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(
self,
vocab_file,
spm_file,
tgt_lang=None,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
pad_token="<pad>",
unk_token="<unk>",
language_codes="m2m100",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
num_madeup_words=8,
**kwargs,
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.language_codes = language_codes
fairseq_language_code = FAIRSEQ_LANGUAGE_CODES[language_codes]
self.lang_code_to_token = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code}
kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", [])
kwargs["additional_special_tokens"] += [
self.get_lang_token(lang_code)
for lang_code in fairseq_language_code
if self.get_lang_token(lang_code) not in kwargs["additional_special_tokens"]
]
super().__init__(
tgt_lang=tgt_lang,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
unk_token=unk_token,
pad_token=pad_token,
language_codes=language_codes,
sp_model_kwargs=self.sp_model_kwargs,
num_madeup_words=num_madeup_words,
**kwargs,
)
self.vocab_file = vocab_file
self.encoder = load_json(vocab_file)
self.decoder = {v: k for k, v in self.encoder.items()}
self.spm_file = spm_file
self.sp_model = load_spm(spm_file, self.sp_model_kwargs)
self.encoder_size = len(self.encoder)
self.lang_token_to_id = {
self.get_lang_token(lang_code): self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)
}
self.lang_code_to_id = {lang_code: self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)}
self.id_to_lang_token = {v: k for k, v in self.lang_token_to_id.items()}
self._tgt_lang = tgt_lang if tgt_lang is not None else "en"
self.cur_lang_id = self.get_lang_id(self._tgt_lang)
self.set_lang_special_tokens(self._tgt_lang)
self.num_madeup_words = num_madeup_words
@property
def vocab_size(self) -> int:
return len(self.encoder) + len(self.lang_token_to_id) + self.num_madeup_words
@property
def tgt_lang(self) -> str:
return self._tgt_lang
@tgt_lang.setter
def tgt_lang(self, new_tgt_lang: str) -> None:
self._tgt_lang = new_tgt_lang
self.set_lang_special_tokens(self._tgt_lang)
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(token, self.encoder[self.unk_token])
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the decoder."""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
return self.sp_model.decode(tokens)
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1] * len(self.suffix_tokens)
if token_ids_1 is None:
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An MBART sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `X [eos, src_lang_code]`
- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
if self.prefix_tokens is None:
return token_ids_0 + self.suffix_tokens
else:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
if self.prefix_tokens is None:
return token_ids_0 + token_ids_1 + self.suffix_tokens
else:
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
def get_vocab(self) -> Dict:
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self) -> Dict:
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d: Dict) -> None:
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
save_dir = Path(save_directory)
if not save_dir.is_dir():
raise OSError(f"{save_directory} should be a directory")
vocab_save_path = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
spm_save_path = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder, vocab_save_path)
if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file):
copyfile(self.spm_file, spm_save_path)
elif not os.path.isfile(self.spm_file):
with open(spm_save_path, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (str(vocab_save_path), str(spm_save_path))
def prepare_seq2seq_batch(
self,
src_texts: List[str],
tgt_texts: Optional[List[str]] = None,
tgt_lang: str = "ro",
**kwargs,
) -> BatchEncoding:
self.tgt_lang = tgt_lang
self.set_lang_special_tokens(self.tgt_lang)
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _build_translation_inputs(self, raw_inputs, tgt_lang: Optional[str], **extra_kwargs):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if tgt_lang is None:
raise ValueError("Translation requires a `tgt_lang` for this model")
self.tgt_lang = tgt_lang
inputs = self(raw_inputs, add_special_tokens=True, **extra_kwargs)
return inputs
def _switch_to_input_mode(self):
self.set_lang_special_tokens(self.tgt_lang)
def _switch_to_target_mode(self):
self.prefix_tokens = None
self.suffix_tokens = [self.eos_token_id]
def set_lang_special_tokens(self, src_lang: str) -> None:
"""Reset the special tokens to the tgt lang setting. No prefix and suffix=[eos, tgt_lang_code]."""
lang_token = self.get_lang_token(src_lang)
self.cur_lang_id = self.lang_token_to_id[lang_token]
self.prefix_tokens = [self.cur_lang_id]
self.suffix_tokens = [self.eos_token_id]
def get_lang_token(self, lang: str) -> str:
return self.lang_code_to_token[lang]
def get_lang_id(self, lang: str) -> int:
lang_token = self.get_lang_token(lang)
return self.lang_token_to_id[lang_token]
def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs)
spm.Load(str(path))
return spm
def load_json(path: str) -> Union[Dict, List]:
with open(path, "r") as f:
return json.load(f)
def save_json(data, path: str) -> None:
with open(path, "w") as f:
json.dump(data, f, indent=2)
| 15,955 | 42.955923 | 617 | py |
pythainlp-dev/pythainlp/translate/zh_th.py | pythainlp-dev/pythainlp/translate/zh_th.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Lalita Chinese-Thai Machine Translation
from AI builder
- GitHub: https://github.com/LalitaDeelert/lalita-mt-zhth
- Facebook post https://web.facebook.com/aibuildersx/posts/166736255494822
"""
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
class ThZhTranslator:
"""
Thai-Chinese Machine Translation
from Lalita @ AI builder
- GitHub: https://github.com/LalitaDeelert/lalita-mt-zhth
- Facebook post https://web.facebook.com/aibuildersx/posts/166736255494822
:param bool use_gpu : load model to gpu (Default is False)
"""
def __init__(
self,
use_gpu: bool = False,
pretrained: str = "Lalita/marianmt-th-zh_cn",
) -> None:
self.tokenizer_thzh = AutoTokenizer.from_pretrained(pretrained)
self.model_thzh = AutoModelForSeq2SeqLM.from_pretrained(pretrained)
if use_gpu:
self.model_thzh = self.model_thzh.cuda()
def translate(self, text: str) -> str:
"""
Translate text from Thai to Chinese
:param str text: input text in source language
:return: translated text in target language
:rtype: str
:Example:
Translate text from Thai to Chinese::
from pythainlp.translate import ThZhTranslator
thzh = ThZhTranslator()
thzh.translate("ผมรักคุณ")
# output: 我爱你
"""
self.translated = self.model_thzh.generate(
**self.tokenizer_thzh(text, return_tensors="pt", padding=True)
)
return [
self.tokenizer_thzh.decode(t, skip_special_tokens=True)
for t in self.translated
][0]
class ZhThTranslator:
"""
Chinese-Thai Machine Translation
from Lalita @ AI builder
- GitHub: https://github.com/LalitaDeelert/lalita-mt-zhth
- Facebook post https://web.facebook.com/aibuildersx/posts/166736255494822
:param bool use_gpu : load model to gpu (Default is False)
"""
def __init__(
self,
use_gpu: bool = False,
pretrained: str = "Lalita/marianmt-zh_cn-th",
) -> None:
self.tokenizer_zhth = AutoTokenizer.from_pretrained(pretrained)
self.model_zhth = AutoModelForSeq2SeqLM.from_pretrained(pretrained)
if use_gpu:
self.model_zhth.cuda()
def translate(self, text: str) -> str:
"""
Translate text from Chinese to Thai
:param str text: input text in source language
:return: translated text in target language
:rtype: str
:Example:
Translate text from Chinese to Thai::
from pythainlp.translate import ZhThTranslator
zhth = ZhThTranslator()
zhth.translate("我爱你")
# output: ผมรักคุณนะ
"""
self.translated = self.model_zhth.generate(
**self.tokenizer_zhth(text, return_tensors="pt", padding=True)
)
return [
self.tokenizer_zhth.decode(t, skip_special_tokens=True)
for t in self.translated
][0]
| 3,677 | 28.190476 | 78 | py |
pythainlp-dev/pythainlp/transliterate/__init__.py | pythainlp-dev/pythainlp/transliterate/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Transliteration.
"""
__all__ = ["romanize", "transliterate", "pronunciate", "puan"]
from pythainlp.transliterate.core import romanize, transliterate, pronunciate
from pythainlp.transliterate.spoonerism import puan
| 834 | 35.304348 | 77 | py |
pythainlp-dev/pythainlp/transliterate/core.py | pythainlp-dev/pythainlp/transliterate/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DEFAULT_ROMANIZE_ENGINE = "royin"
DEFAULT_TRANSLITERATE_ENGINE = "thaig2p"
DEFAULT_PRONUNCIATE_ENGINE = "w2p"
def romanize(
text: str,
engine: str = DEFAULT_ROMANIZE_ENGINE,
fallback_engine: str = DEFAULT_ROMANIZE_ENGINE,
) -> str:
"""
This function renders Thai words in the Latin alphabet or "romanization",
using the Royal Thai General System of Transcription (RTGS)
[#rtgs_transcription]_. RTGS is the official system published
by the Royal Institute of Thailand. (Thai: ถอดเสียงภาษาไทยเป็นอักษรละติน)
:param str text: Thai text to be romanized
:param str engine: One of 'royin' (default), 'thai2rom', 'thai2rom_onnx, 'tltk', and 'lookup'. See more in options for engine section.
:param str fallback_engine: If engine equals 'lookup', use `fallback_engine` for words that are not in the transliteration dict.
No effect on other engines. Default to 'royin'.
:return: A string of Thai words rendered in the Latin alphabet.
:rtype: str
:Options for engines:
* *royin* - (default) based on the Royal Thai General System of
Transcription issued by Royal Institute of Thailand.
* *thai2rom* - a deep learning-based Thai romanization engine
(require PyTorch).
* *thai2rom_onnx* - a deep learning-based Thai romanization engine with ONNX runtime
* *tltk* - TLTK: Thai Language Toolkit
* *lookup* - Look up on Thai-English Transliteration dictionary v1.4 compiled by Wannaphong.
:Example:
::
from pythainlp.transliterate import romanize
romanize("สามารถ", engine="royin")
# output: 'samant'
romanize("สามารถ", engine="thai2rom")
# output: 'samat'
romanize("สามารถ", engine="tltk")
# output: 'samat'
romanize("ภาพยนตร์", engine="royin")
# output: 'phapn'
romanize("ภาพยนตร์", engine="thai2rom")
# output: 'phapphayon'
romanize("ภาพยนตร์", engine="thai2rom_onnx")
# output: 'phapphayon'
romanize("ก็อปปี้", engine="lookup")
# output: 'copy'
"""
def select_romanize_engine(engine: str):
if engine == "thai2rom":
from pythainlp.transliterate.thai2rom import romanize
elif engine == "thai2rom_onnx":
from pythainlp.transliterate.thai2rom_onnx import romanize
elif engine == "tltk":
from pythainlp.transliterate.tltk import romanize
else: # use default engine "royin"
from pythainlp.transliterate.royin import romanize
return romanize
if not text or not isinstance(text, str):
return ""
if engine == "lookup":
from pythainlp.transliterate.lookup import romanize
fallback = select_romanize_engine(fallback_engine)
return romanize(text, fallback_func=fallback)
else:
return select_romanize_engine(engine)(text)
def transliterate(
text: str, engine: str = DEFAULT_TRANSLITERATE_ENGINE
) -> str:
"""
This function transliterates Thai text.
:param str text: Thai text to be transliterated
:param str engine: 'icu', 'ipa', or 'thaig2p' (default)
:return: A string of phonetic alphabets indicating
how the input text should be pronounced.
:rtype: str
:Options for engines:
* *thaig2p* - (default) Thai Grapheme-to-Phoneme,
output is IPA (require PyTorch)
* *icu* - pyicu, based on International Components for Unicode (ICU)
* *ipa* - epitran, output is International Phonetic Alphabet (IPA)
* *tltk_g2p* - Thai Grapheme-to-Phoneme from\
`TLTK <https://pypi.org/project/tltk/>`_.,
* *iso_11940* - Thai text into Latin characters with ISO 11940.
* *tltk_ipa* - tltk, output is International Phonetic Alphabet (IPA)
:Example:
::
from pythainlp.transliterate import transliterate
transliterate("สามารถ", engine="icu")
# output: 's̄āmārt̄h'
transliterate("สามารถ", engine="ipa")
# output: 'saːmaːrot'
transliterate("สามารถ", engine="thaig2p")
# output: 's aː ˩˩˦ . m aː t̚ ˥˩'
transliterate("สามารถ", engine="tltk_ipa")
# output: 'saː5.maːt3'
transliterate("สามารถ", engine="tltk_g2p")
# output: 'saa4~maat2'
transliterate("สามารถ", engine="iso_11940")
# output: 's̄āmārt̄h'
transliterate("ภาพยนตร์", engine="icu")
# output: 'p̣hāphyntr̒'
transliterate("ภาพยนตร์", engine="ipa")
# output: 'pʰaːpjanot'
transliterate("ภาพยนตร์", engine="thaig2p")
# output: 'pʰ aː p̚ ˥˩ . pʰ a ˦˥ . j o n ˧'
transliterate("ภาพยนตร์", engine="iso_11940")
# output: 'p̣hāphyntr'
"""
if not text or not isinstance(text, str):
return ""
if engine == "icu" or engine == "pyicu":
from pythainlp.transliterate.pyicu import transliterate
elif engine == "ipa":
from pythainlp.transliterate.ipa import transliterate
elif engine == "tltk_g2p":
from pythainlp.transliterate.tltk import tltk_g2p as transliterate
elif engine == "tltk_ipa":
from pythainlp.transliterate.tltk import tltk_ipa as transliterate
elif engine == "iso_11940":
from pythainlp.transliterate.iso_11940 import transliterate
else: # use default engine: "thaig2p"
from pythainlp.transliterate.thaig2p import transliterate
return transliterate(text)
def pronunciate(word: str, engine: str = DEFAULT_PRONUNCIATE_ENGINE) -> str:
"""
This function pronunciates Thai word.
:param str word: Thai text to be pronunciated
:param str engine: 'w2p' (default)
:return: A string of Thai letters indicating
how the input text should be pronounced.
:rtype: str
:Options for engines:
* *w2p* - Thai Word-to-Phoneme
:Example:
::
from pythainlp.transliterate import pronunciate
pronunciate("สามารถ", engine="w2p")
# output: 'สา-มาด'
pronunciate("ภาพยนตร์", engine="w2p")
# output: 'พาบ-พะ-ยน'
"""
if not word or not isinstance(word, str):
return ""
# if engine == "w2p": # has only one engine
from pythainlp.transliterate.w2p import pronunciate
return pronunciate(word)
| 6,969 | 32.033175 | 138 | py |
pythainlp-dev/pythainlp/transliterate/ipa.py | pythainlp-dev/pythainlp/transliterate/ipa.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Transliterating text to International Phonetic Alphabet (IPA)
Using epitran
:See Also:
* `GitHub \
<https://github.com/dmort27/epitran>`_
"""
from typing import List
import epitran
_EPI_THA = epitran.Epitran("tha-Thai")
def transliterate(text: str) -> str:
return _EPI_THA.transliterate(text)
def trans_list(text: str) -> List[str]:
return _EPI_THA.trans_list(text)
def xsampa_list(text: str) -> List[str]:
return _EPI_THA.xsampa_list(text)
| 1,092 | 26.325 | 74 | py |
pythainlp-dev/pythainlp/transliterate/iso_11940.py | pythainlp-dev/pythainlp/transliterate/iso_11940.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Transliterating Thai text with ISO 11940
:See Also:
* `Wikipedia \
<https://en.wikipedia.org/wiki/ISO_11940>`_
"""
_consonants = {
"ก": "k",
"ข": "k̄h",
"ฃ": "ḳ̄h",
"ค": "kh",
"ฅ": "k̛h",
"ฆ": "ḳh",
"ง": "ng",
"จ": "c",
"ฉ": "c̄h",
"ช": "ch",
"ซ": "s",
"ฌ": "c̣h",
"ญ": "ỵ",
"ฎ": "ḍ",
"ฏ": "ṭ",
"ฐ": "ṭ̄h",
"ฑ": "ṯh",
"ฒ": "t̛h",
"ณ": "ṇ",
"ด": "d",
"ต": "t",
"ถ": "t̄h",
"ท": "th",
"ธ": "ṭh",
"น": "n",
"บ": "b",
"ป": "p",
"ผ": "p̄h",
"ฝ": "f̄",
"พ": "ph",
"ฟ": "f",
"ภ": "p̣h",
"ม": "m",
"ย": "y",
"ร": "r",
"ฤ": "v",
"ล": "l",
"ฦ": "ł",
"ว": "w",
"ศ": "ṣ̄",
"ษ": "s̛̄",
"ส": "s̄",
"ห": "h̄",
"ฬ": "ḷ",
"อ": "x",
"ฮ": "ḥ",
}
_vowels = {
"ะ": "a",
"ั": "ạ",
"า": "ā",
"ำ": "å",
"ิ": "i",
"ี": "ī",
"ึ": "ụ",
"ื": "ụ̄",
"ุ": "u",
"ู": "ū",
"เ": "e",
"แ": "æ",
"โ": "o",
"ใ": "ı",
"ไ": "ị",
"ฤ": "v",
"ฤๅ": "vɨ",
"ฦ": "ł",
"ฦๅ": "łɨ",
"ย": "y",
"ว": "w",
"อ": "x",
}
_tone_marks = {
"่": "–̀".replace("–", ""),
"้": "–̂".replace("–", ""),
"๊": "–́".replace("–", ""),
"๋": "–̌".replace("–", ""),
"็": "–̆".replace("–", ""),
"์": "–̒".replace("–", ""),
"–๎".replace("–", ""): "~",
"–ํ".replace("–", ""): "–̊".replace("–", ""),
"–ฺ".replace("–", ""): "–̥".replace("–", ""),
}
_punctuation_and_digits = {
"ๆ": "«",
"ฯ": "ǂ",
"๏": "§",
"ฯ": "ǀ",
"๚": "ǁ",
"๛": "»",
"๐": "0",
"๑": "1",
"๒": "2",
"๓": "3",
"๔": "4",
"๕": "5",
"๖": "6",
"๗": "7",
"๘": "8",
"๙": "9",
}
_all_dict = {
**_consonants,
**_vowels,
**_tone_marks,
**_punctuation_and_digits,
}
_list_k = _all_dict.keys()
def transliterate(word: str) -> str:
"""
Use ISO 11940 for transliteration
:param str text: Thai text to be transliterated.
:return: A string of IPA indicating how the text should be pronounced.
"""
_new = ""
for i in word:
if i in _list_k:
_new += _all_dict[i]
else:
_new += i
return _new
| 2,860 | 18.201342 | 74 | py |
pythainlp-dev/pythainlp/transliterate/lookup.py | pythainlp-dev/pythainlp/transliterate/lookup.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Look up romanized Thai words in a predefined dictionary compiled by Wannaphong, 2022.
Wannaphong Phatthiyaphaibun. (2022).
wannaphong/thai-english-transliteration-dictionary: v1.4 (v1.4).
Zenodo. https://doi.org/10.5281/zenodo.6716672
"""
from typing import Callable, Optional
from pythainlp.corpus.th_en_translit import (
TRANSLITERATE_DICT,
TRANSLITERATE_EN,
TRANSLITERATE_FOLLOW_RTSG,
)
_TRANSLITERATE_IDX = 0
def follow_rtgs(text: str) -> Optional[bool]:
"""
Check if the `text` follows romanization defined by Royal Society of Thailand (RTGS).
:param str text: Text to look up. Must be a self-contained word.
:return: True if text follows the definition by RTGS, False otherwise.
`None` means unverified or unknown word.
:rtype: Optional[bool]
"""
try:
follow = TRANSLITERATE_DICT[text][TRANSLITERATE_FOLLOW_RTSG][
_TRANSLITERATE_IDX
]
except IndexError:
return None
else:
return follow
def _romanize(text: str, fallback_func: Callable[[str], str]) -> str:
"""
Romanize one word. Look up first, call `fallback_func` if not found.
"""
try:
# try to get 0-th idx of look up result, simply ignore other possible variations.
# not found means no mapping.
lookup = TRANSLITERATE_DICT[text][TRANSLITERATE_EN][_TRANSLITERATE_IDX]
except IndexError:
return fallback_func(text)
except TypeError as e:
raise TypeError(f"`fallback_engine` is not callable. {e}")
else:
return lookup
def romanize(text: str, fallback_func: Callable[[str], str]) -> str:
"""
Render Thai words in Latin alphabet by looking up
Thai-English transliteration dictionary.
:param str text: Thai text to be romanized
:param Callable[[str], str] fallback_func: Callable
:return: A string of Thai words rendered in the Latin alphabet
:rtype: str
"""
# split by whitespace. word_tokenizer may break text into subwords.
# e.g. กาแลกซี่ -> ["กา", "แลก", "ซี่"]
words = text.strip().split()
romanized_words = [_romanize(word, fallback_func) for word in words]
return " ".join(romanized_words)
| 2,815 | 32.52381 | 89 | py |
pythainlp-dev/pythainlp/transliterate/pyicu.py | pythainlp-dev/pythainlp/transliterate/pyicu.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Transliterating text to International Phonetic Alphabet (IPA)
Using International Components for Unicode (ICU)
:See Also:
* `GitHub \
<https://github.com/ovalhub/pyicu>`_
"""
from icu import Transliterator
_ICU_THAI_TO_LATIN = Transliterator.createInstance("Thai-Latin")
def transliterate(text: str) -> str:
"""
Use ICU (International Components for Unicode) for transliteration
:param str text: Thai text to be transliterated.
:return: A string of Internaitonal Phonetic Alphabets indicating how the text should be pronounced.
"""
return _ICU_THAI_TO_LATIN.transliterate(text)
| 1,236 | 34.342857 | 103 | py |
pythainlp-dev/pythainlp/transliterate/royin.py | pythainlp-dev/pythainlp/transliterate/royin.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Royal Thai General System of Transcription (RTGS)
is the official system for rendering Thai words in the Latin alphabet.
It was published by the Royal Institute of Thailand.
:See Also:
* `Wikipedia <https://en.wikipedia.org/wiki/Royal_Thai_General_System_of_Transcription>`_
"""
import re
from pythainlp import thai_consonants, word_tokenize
# vowel
_vowel_patterns = """เ*ียว,\\1iao
แ*็ว,\\1aeo
เ*ือย,\\1ueai
แ*ว,\\1aeo
เ*็ว,\\1eo
เ*ว,\\1eo
*ิว,\\1io
*วย,\\1uai
เ*ย,\\1oei
*อย,\\1oi
โ*ย,\\1oi
*ุย,\\1ui
*าย,\\1ai
ไ*ย,\\1ai
*ัย,\\1ai
ไ**,\\1\\2ai
ไ*,\\1ai
ใ*,\\1ai
*ว*,\\1ua\\2
*ัวะ,\\1ua
*ัว,\\1ua
เ*ือะ,\\1uea
เ*ือ,\\1uea
เ*ียะ,\\1ia
เ*ีย,\\1ia
เ*อะ,\\1oe
เ*อ,\\1oe
เ*ิ,\\1oe
*อ,\\1o
เ*าะ,\\1o
เ*็,\\1e
โ*ะ,\\1o
โ*,\\1o
แ*ะ,\\1ae
แ*,\\1ae
เ*าะ,\\1e
*าว,\\1ao
เ*า,\\1ao
เ*,\\1e
*ู,\\1u
*ุ,\\1u
*ื,\\1ue
*ึ,\\1ue
*ี,\\1i
*ิ,\\1i
*ำ,\\1am
*า,\\1a
*ั,\\1a
*ะ,\\1a
#ฤ,\\1rue
$ฤ,\\1ri"""
_vowel_patterns = _vowel_patterns.replace("*", f"([{thai_consonants}])")
_vowel_patterns = _vowel_patterns.replace("#", "([คนพมห])")
_vowel_patterns = _vowel_patterns.replace("$", "([กตทปศส])")
_VOWELS = [x.split(",") for x in _vowel_patterns.split("\n")]
# พยัญชนะ ต้น สะกด
_CONSONANTS = {
"ก": ["k", "k"],
"ข": ["kh", "k"],
"ฃ": ["kh", "k"],
"ค": ["kh", "k"],
"ฅ": ["kh", "k"],
"ฆ": ["kh", "k"],
"ง": ["ng", "ng"],
"จ": ["ch", "t"],
"ฉ": ["ch", "t"],
"ช": ["ch", "t"],
"ซ": ["s", "t"],
"ฌ": ["ch", "t"],
"ญ": ["y", "n"],
"ฎ": ["d", "t"],
"ฏ": ["t", "t"],
"ฐ": ["th", "t"],
# ฑ พยัญชนะต้น เป็น d ได้
"ฑ": ["th", "t"],
"ฒ": ["th", "t"],
"ณ": ["n", "n"],
"ด": ["d", "t"],
"ต": ["t", "t"],
"ถ": ["th", "t"],
"ท": ["th", "t"],
"ธ": ["th", "t"],
"น": ["n", "n"],
"บ": ["b", "p"],
"ป": ["p", "p"],
"ผ": ["ph", "p"],
"ฝ": ["f", "p"],
"พ": ["ph", "p"],
"ฟ": ["f", "p"],
"ภ": ["ph", "p"],
"ม": ["m", "m"],
"ย": ["y", ""],
"ร": ["r", "n"],
"ฤ": ["rue", ""],
"ล": ["l", "n"],
"ว": ["w", ""],
"ศ": ["s", "t"],
"ษ": ["s", "t"],
"ส": ["s", "t"],
"ห": ["h", ""],
"ฬ": ["l", "n"],
"อ": ["", ""],
"ฮ": ["h", ""],
}
_THANTHAKHAT = "\u0e4c"
_RE_CONSONANT = re.compile(f"[{thai_consonants}]")
_RE_NORMALIZE = re.compile(
f"จน์|มณ์|ณฑ์|ทร์|ตร์|[{thai_consonants}]{_THANTHAKHAT}|"
f"[{thai_consonants}][\u0e30-\u0e39]{_THANTHAKHAT}"
# Paiyannoi, Maiyamok, Tonemarks, Thanthakhat, Nikhahit, other signs
r"|[\u0e2f\u0e46\u0e48-\u0e4f\u0e5a\u0e5b]"
)
def _normalize(word: str) -> str:
"""
Remove silence, no sound, and tonal characters.
ตัดอักษรที่ไม่ออกเสียง (การันต์ ไปยาลน้อย ไม้ยมก*) และวรรณยุกต์ทิ้ง
"""
return _RE_NORMALIZE.sub("", word)
def _replace_vowels(word: str) -> str:
for vowel in _VOWELS:
word = re.sub(vowel[0], vowel[1], word)
return word
def _replace_consonants(word: str, consonants: str) -> str:
_HO_HIP = "\u0e2b" # ห
_RO_RUA = "\u0e23" # ร
_DOUBLE_RO_RUA = _RO_RUA + _RO_RUA
if not consonants:
return word
skip = False
mod_chars = []
j = 0 # j is the index of consonants
for i in range(len(word)):
if skip:
skip = False
j += 1
elif word[i] not in _CONSONANTS: # word[i] is not a Thai consonant.
mod_chars.append(word[i])
elif (
len(mod_chars) == 0 and word[i] == _HO_HIP and len(consonants) != 1
): # Skip HO HIP except that HO HIP is the only one consonant
j += 1
elif (
len(mod_chars) == 0
): # The first character must be an initial consonant.
mod_chars.append(_CONSONANTS[consonants[j]][0])
j += 1
elif word[i:] == _DOUBLE_RO_RUA: # Double RO RUA is in end of word
skip = True
mod_chars.append("a")
mod_chars.append("n")
j += 1
elif word[i : i + 2] == _DOUBLE_RO_RUA:
skip = True
mod_chars.append("a")
j += 1
else: # Assume that the rest are final consonants.
mod_chars.append(_CONSONANTS[consonants[j]][1])
j += 1
return "".join(mod_chars)
# support function for romanize()
def _romanize(word: str) -> str:
word = _replace_vowels(_normalize(word))
consonants = _RE_CONSONANT.findall(word)
# 2-character word, all consonants
if len(word) == 2 and len(consonants) == 2:
word = list(word)
word.insert(1, "o")
word = "".join(word)
word = _replace_consonants(word, consonants)
return word
def romanize(text: str) -> str:
"""Render Thai words in Latin alphabet, using RTGS
Royal Thai General System of Transcription (RTGS),
is the official system by the Royal Institute of Thailand.
:param text: Thai text to be romanized
:type text: str
:return: A string of Thai words rendered in the Latin alphabet
:rtype: str
"""
words = word_tokenize(text)
romanized_words = [_romanize(word) for word in words]
return "".join(romanized_words)
| 5,720 | 23.659483 | 93 | py |
pythainlp-dev/pythainlp/transliterate/spoonerism.py | pythainlp-dev/pythainlp/transliterate/spoonerism.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pythainlp.transliterate import pronunciate
from pythainlp import thai_consonants
_list_consonants = list(thai_consonants.replace("ห", ""))
def puan(word: str, show_pronunciation: bool = True) -> str:
"""
Thai Spoonerism
This function converts Thai word to spoonerism word.
:param str word: Thai word to be spoonerized
:param bool show_pronunciation: True (default) or False
:return: A string of Thai spoonerism word.
:rtype: str
:Example:
::
from pythainlp.transliterate import puan
puan("นาริน")
# output: 'นิน-รา'
puan("นาริน", False)
# output: 'นินรา'
"""
word = pronunciate(word, engine="w2p")
_list_char = []
_list_pron = word.split("-")
_mix_list = ""
if len(_list_pron) == 1:
return word
if show_pronunciation:
_mix_list = "-"
for i in _list_pron:
for j in i:
if j in _list_consonants:
_list_char.append(j)
break
elif "ห" == j and "หฺ" not in i and len(i) == 2:
_list_char.append(j)
break
list_w_char = list(zip(_list_pron, _list_char))
_list_w = []
if len(list_w_char) == 2:
_list_w.append(
list_w_char[1][0].replace(list_w_char[1][1], list_w_char[0][1], 1)
)
_list_w.append(
list_w_char[0][0].replace(list_w_char[0][1], list_w_char[1][1], 1)
)
elif len(list_w_char) == 3:
_list_w.append(_list_pron[0])
_list_w.append(
list_w_char[2][0].replace(list_w_char[2][1], list_w_char[1][1], 1)
)
_list_w.append(
list_w_char[1][0].replace(list_w_char[1][1], list_w_char[2][1], 1)
)
else: # > 3 syllables
_list_w.append(
_list_pron[0].replace(list_w_char[0][1], list_w_char[-1][1], 1)
)
for i in range(1, len(list_w_char) - 1):
_list_w.append(_list_pron[i])
_list_w.append(
_list_pron[-1].replace(list_w_char[-1][1], list_w_char[0][1], 1)
)
if not show_pronunciation:
_list_w = [i.replace("หฺ", "").replace("ฺ", "") for i in _list_w]
return _mix_list.join(_list_w)
| 2,846 | 30.633333 | 78 | py |
pythainlp-dev/pythainlp/transliterate/thai2rom.py | pythainlp-dev/pythainlp/transliterate/thai2rom.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Romanization of Thai words based on machine-learnt engine ("thai2rom")
"""
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from pythainlp.corpus import get_corpus_path
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_MODEL_NAME = "thai2rom-pytorch-attn"
class ThaiTransliterator:
def __init__(self):
"""
Transliteration of Thai words.
Now supports Thai to Latin (romanization)
"""
# get the model, will download if it's not available locally
self.__model_filename = get_corpus_path(_MODEL_NAME)
loader = torch.load(self.__model_filename, map_location=device)
INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT = loader["encoder_params"]
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT = loader["decoder_params"]
self._maxlength = 100
self._char_to_ix = loader["char_to_ix"]
self._ix_to_char = loader["ix_to_char"]
self._target_char_to_ix = loader["target_char_to_ix"]
self._ix_to_target_char = loader["ix_to_target_char"]
# encoder/ decoder
# Restore the model and construct the encoder and decoder.
self._encoder = Encoder(INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT)
self._decoder = AttentionDecoder(
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT
)
self._network = Seq2Seq(
self._encoder,
self._decoder,
self._target_char_to_ix["<start>"],
self._target_char_to_ix["<end>"],
self._maxlength,
).to(device)
self._network.load_state_dict(loader["model_state_dict"])
self._network.eval()
def _prepare_sequence_in(self, text: str):
"""
Prepare input sequence for PyTorch
"""
idxs = []
for ch in text:
if ch in self._char_to_ix:
idxs.append(self._char_to_ix[ch])
else:
idxs.append(self._char_to_ix["<UNK>"])
idxs.append(self._char_to_ix["<end>"])
tensor = torch.tensor(idxs, dtype=torch.long)
return tensor.to(device)
def romanize(self, text: str) -> str:
"""
:param str text: Thai text to be romanized
:return: English (more or less) text that spells out how the Thai text
should be pronounced.
"""
input_tensor = self._prepare_sequence_in(text).view(1, -1)
input_length = torch.Tensor([len(text) + 1]).int()
target_tensor_logits = self._network(
input_tensor, input_length, None, 0
)
# Seq2seq model returns <END> as the first token,
# As a result, target_tensor_logits.size() is torch.Size([0])
if target_tensor_logits.size(0) == 0:
target = ["<PAD>"]
else:
target_tensor = (
torch.argmax(target_tensor_logits.squeeze(1), 1)
.cpu()
.detach()
.numpy()
)
target = [self._ix_to_target_char[t] for t in target_tensor]
return "".join(target)
class Encoder(nn.Module):
def __init__(
self, vocabulary_size, embedding_size, hidden_size, dropout=0.5
):
"""Constructor"""
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.character_embedding = nn.Embedding(
vocabulary_size, embedding_size
)
self.rnn = nn.LSTM(
input_size=embedding_size,
hidden_size=hidden_size // 2,
bidirectional=True,
batch_first=True,
)
self.dropout = nn.Dropout(dropout)
def forward(self, sequences, sequences_lengths):
# sequences: (batch_size, sequence_length=MAX_LENGTH)
# sequences_lengths: (batch_size)
batch_size = sequences.size(0)
self.hidden = self.init_hidden(batch_size)
sequences_lengths = torch.flip(
torch.sort(sequences_lengths).values, dims=(0,)
)
index_sorted = torch.sort(-1 * sequences_lengths).indices
index_unsort = torch.sort(index_sorted).indices # to unsorted sequence
sequences = sequences.index_select(0, index_sorted.to(device))
sequences = self.character_embedding(sequences)
sequences = self.dropout(sequences)
sequences_packed = nn.utils.rnn.pack_padded_sequence(
sequences, sequences_lengths.clone(), batch_first=True
)
sequences_output, self.hidden = self.rnn(sequences_packed, self.hidden)
sequences_output, _ = nn.utils.rnn.pad_packed_sequence(
sequences_output, batch_first=True
)
sequences_output = sequences_output.index_select(
0, index_unsort.clone().detach()
)
return sequences_output, self.hidden
def init_hidden(self, batch_size):
h_0 = torch.zeros(
[2, batch_size, self.hidden_size // 2], requires_grad=True
).to(device)
c_0 = torch.zeros(
[2, batch_size, self.hidden_size // 2], requires_grad=True
).to(device)
return (h_0, c_0)
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
if self.method == "general":
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == "concat":
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.other = nn.Parameter(torch.FloatTensor(1, hidden_size))
def forward(self, hidden, encoder_outputs, mask):
# Calculate energies for each encoder output
if self.method == "dot":
attn_energies = torch.bmm(
encoder_outputs, hidden.transpose(1, 2)
).squeeze(2)
elif self.method == "general":
attn_energies = self.attn(
encoder_outputs.view(-1, encoder_outputs.size(-1))
) # (batch_size * sequence_len, hidden_size)
attn_energies = torch.bmm(
attn_energies.view(*encoder_outputs.size()),
hidden.transpose(1, 2),
).squeeze(
2
) # (batch_size, sequence_len)
elif self.method == "concat":
attn_energies = self.attn(
torch.cat(
(hidden.expand(*encoder_outputs.size()), encoder_outputs),
2,
)
) # (batch_size, sequence_len, hidden_size)
attn_energies = torch.bmm(
attn_energies,
self.other.unsqueeze(0).expand(*hidden.size()).transpose(1, 2),
).squeeze(2)
attn_energies = attn_energies.masked_fill(mask == 0, -1e10)
# Normalize energies to weights in range 0 to 1
return F.softmax(attn_energies, 1)
class AttentionDecoder(nn.Module):
def __init__(
self, vocabulary_size, embedding_size, hidden_size, dropout=0.5
):
"""Constructor"""
super(AttentionDecoder, self).__init__()
self.vocabulary_size = vocabulary_size
self.hidden_size = hidden_size
self.character_embedding = nn.Embedding(
vocabulary_size, embedding_size
)
self.rnn = nn.LSTM(
input_size=embedding_size + self.hidden_size,
hidden_size=hidden_size,
bidirectional=False,
batch_first=True,
)
self.attn = Attn(method="general", hidden_size=self.hidden_size)
self.linear = nn.Linear(hidden_size, vocabulary_size)
self.dropout = nn.Dropout(dropout)
def forward(self, input_character, last_hidden, encoder_outputs, mask):
""" "Defines the forward computation of the decoder"""
# input_character: (batch_size, 1)
# last_hidden: (batch_size, hidden_dim)
# encoder_outputs: (batch_size, sequence_len, hidden_dim)
# mask: (batch_size, sequence_len)
hidden = last_hidden.permute(1, 0, 2)
attn_weights = self.attn(hidden, encoder_outputs, mask)
context_vector = attn_weights.unsqueeze(1).bmm(encoder_outputs)
context_vector = torch.sum(context_vector, dim=1)
context_vector = context_vector.unsqueeze(1)
embedded = self.character_embedding(input_character)
embedded = self.dropout(embedded)
rnn_input = torch.cat((context_vector, embedded), -1)
output, hidden = self.rnn(rnn_input)
output = output.view(-1, output.size(2))
x = self.linear(output)
return x, hidden[0], attn_weights
class Seq2Seq(nn.Module):
def __init__(
self,
encoder,
decoder,
target_start_token,
target_end_token,
max_length,
):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.pad_idx = 0
self.target_start_token = target_start_token
self.target_end_token = target_end_token
self.max_length = max_length
assert encoder.hidden_size == decoder.hidden_size
def create_mask(self, source_seq):
mask = source_seq != self.pad_idx
return mask
def forward(
self, source_seq, source_seq_len, target_seq, teacher_forcing_ratio=0.5
):
# source_seq: (batch_size, MAX_LENGTH)
# source_seq_len: (batch_size, 1)
# target_seq: (batch_size, MAX_LENGTH)
batch_size = source_seq.size(0)
start_token = self.target_start_token
end_token = self.target_end_token
max_len = self.max_length
target_vocab_size = self.decoder.vocabulary_size
outputs = torch.zeros(max_len, batch_size, target_vocab_size).to(
device
)
if target_seq is None:
assert teacher_forcing_ratio == 0, "Must be zero during inference"
inference = True
else:
inference = False
encoder_outputs, encoder_hidden = self.encoder(
source_seq, source_seq_len
)
decoder_input = (
torch.tensor([[start_token] * batch_size])
.view(batch_size, 1)
.to(device)
)
encoder_hidden_h_t = torch.cat(
[encoder_hidden[0][0], encoder_hidden[0][1]], dim=1
).unsqueeze(dim=0)
decoder_hidden = encoder_hidden_h_t
max_source_len = encoder_outputs.size(1)
mask = self.create_mask(source_seq[:, 0:max_source_len])
for di in range(max_len):
decoder_output, decoder_hidden, _ = self.decoder(
decoder_input, decoder_hidden, encoder_outputs, mask
)
topv, topi = decoder_output.topk(1)
outputs[di] = decoder_output.to(device)
teacher_force = random.random() < teacher_forcing_ratio
decoder_input = (
target_seq[:, di].reshape(batch_size, 1)
if teacher_force
else topi.detach()
)
decoder_input = topi.detach()
if inference and decoder_input == end_token:
return outputs[:di]
return outputs
_THAI_TO_ROM = ThaiTransliterator()
def romanize(text: str) -> str:
return _THAI_TO_ROM.romanize(text)
| 11,993 | 31.416216 | 79 | py |
pythainlp-dev/pythainlp/transliterate/thai2rom_onnx.py | pythainlp-dev/pythainlp/transliterate/thai2rom_onnx.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Romanization of Thai words based on machine-learnt engine in ONNX runtime ("thai2rom")
"""
from pythainlp.corpus import get_corpus_path
import numpy as np
import json
from onnxruntime import InferenceSession
_MODEL_ENCODER_NAME = "thai2rom_encoder_onnx"
_MODEL_DECODER_NAME = "thai2rom_decoder_onnx"
_MODEL_CONFIG_NAME = "thai2rom_config_onnx"
class ThaiTransliterator_ONNX:
def __init__(self):
"""
Transliteration of Thai words.
Now supports Thai to Latin (romanization)
"""
# get the model, will download if it's not available locally
self.__encoder_filename = get_corpus_path(_MODEL_ENCODER_NAME)
self.__decoder_filename = get_corpus_path(_MODEL_DECODER_NAME)
self.__config_filename = get_corpus_path(_MODEL_CONFIG_NAME)
# loader = torch.load(self.__model_filename, map_location=device)
with open(str(self.__config_filename)) as f:
loader = json.load(f)
OUTPUT_DIM = loader["output_dim"]
self._maxlength = 100
self._char_to_ix = loader["char_to_ix"]
self._ix_to_char = loader["ix_to_char"]
self._target_char_to_ix = loader["target_char_to_ix"]
self._ix_to_target_char = loader["ix_to_target_char"]
# encoder/ decoder
# Load encoder decoder onnx models.
self._encoder = InferenceSession(self.__encoder_filename)
self._decoder = InferenceSession(self.__decoder_filename)
self._network = Seq2Seq_ONNX(
self._encoder,
self._decoder,
self._target_char_to_ix["<start>"],
self._target_char_to_ix["<end>"],
self._maxlength,
target_vocab_size=OUTPUT_DIM,
)
def _prepare_sequence_in(self, text: str):
"""
Prepare input sequence for ONNX
"""
idxs = []
for ch in text:
if ch in self._char_to_ix:
idxs.append(self._char_to_ix[ch])
else:
idxs.append(self._char_to_ix["<UNK>"])
idxs.append(self._char_to_ix["<end>"])
return np.array(idxs)
def romanize(self, text: str) -> str:
"""
:param str text: Thai text to be romanized
:return: English (more or less) text that spells out how the Thai text
should be pronounced.
"""
input_tensor = self._prepare_sequence_in(text).reshape(1, -1)
input_length = [len(text) + 1]
target_tensor_logits = self._network.run(input_tensor, input_length)
# Seq2seq model returns <END> as the first token,
# As a result, target_tensor_logits.size() is torch.Size([0])
if target_tensor_logits.shape[0] == 0:
target = ["<PAD>"]
else:
target_tensor = np.argmax(target_tensor_logits.squeeze(1), 1)
target = [self._ix_to_target_char[str(t)] for t in target_tensor]
return "".join(target)
class Seq2Seq_ONNX:
def __init__(
self,
encoder,
decoder,
target_start_token,
target_end_token,
max_length,
target_vocab_size,
):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.pad_idx = 0
self.target_start_token = target_start_token
self.target_end_token = target_end_token
self.max_length = max_length
self.target_vocab_size = target_vocab_size
def create_mask(self, source_seq):
mask = source_seq != self.pad_idx
return mask
def run(self, source_seq, source_seq_len):
# source_seq: (batch_size, MAX_LENGTH)
# source_seq_len: (batch_size, 1)
# target_seq: (batch_size, MAX_LENGTH)
batch_size = source_seq.shape[0]
start_token = self.target_start_token
end_token = self.target_end_token
max_len = self.max_length
# target_vocab_size = self.decoder.vocabulary_size
outputs = np.zeros((max_len, batch_size, self.target_vocab_size))
expected_encoder_outputs = list(
map(lambda output: output.name, self.encoder.get_outputs())
)
encoder_outputs, encoder_hidden, _ = self.encoder.run(
input_feed={
"input_tensor": source_seq,
"input_lengths": source_seq_len,
},
output_names=expected_encoder_outputs,
)
decoder_input = np.array([[start_token] * batch_size]).reshape(
batch_size, 1
)
encoder_hidden_h_t = np.expand_dims(
np.concatenate(
# [encoder_hidden_1, encoder_hidden_2], dim=1
(encoder_hidden[0], encoder_hidden[1]),
axis=1,
),
axis=0,
)
decoder_hidden = encoder_hidden_h_t
max_source_len = encoder_outputs.shape[1]
mask = self.create_mask(source_seq[:, 0:max_source_len])
for di in range(max_len):
decoder_output, decoder_hidden = self.decoder.run(
input_feed={
"decoder_input": decoder_input.astype("int32"),
"decoder_hidden_1": decoder_hidden,
"encoder_outputs": encoder_outputs,
"mask": mask.tolist(),
},
output_names=[
self.decoder.get_outputs()[0].name,
self.decoder.get_outputs()[1].name,
],
)
topi = np.argmax(decoder_output, axis=1)
outputs[di] = decoder_output
decoder_input = np.array([topi])
if decoder_input == end_token:
return outputs[:di]
return outputs
_THAI_TO_ROM_ONNX = ThaiTransliterator_ONNX()
def romanize(text: str) -> str:
return _THAI_TO_ROM_ONNX.romanize(text)
| 6,477 | 31.717172 | 86 | py |
pythainlp-dev/pythainlp/transliterate/thaig2p.py | pythainlp-dev/pythainlp/transliterate/thaig2p.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai Grapheme-to-Phoneme (Thai G2P)
GitHub : https://github.com/wannaphong/thai-g2p
"""
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pythainlp.corpus import get_corpus_path
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_MODEL_NAME = "thai-g2p"
class ThaiG2P:
"""
Latin transliteration of Thai words, using International Phonetic Alphabet
"""
def __init__(self):
# get the model, will download if it's not available locally
self.__model_filename = get_corpus_path(_MODEL_NAME)
loader = torch.load(self.__model_filename, map_location=device)
INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT = loader["encoder_params"]
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT = loader["decoder_params"]
self._maxlength = 100
self._char_to_ix = loader["char_to_ix"]
self._ix_to_char = loader["ix_to_char"]
self._target_char_to_ix = loader["target_char_to_ix"]
self._ix_to_target_char = loader["ix_to_target_char"]
# encoder/ decoder
# Restore the model and construct the encoder and decoder.
self._encoder = Encoder(INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT)
self._decoder = AttentionDecoder(
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT
)
self._network = Seq2Seq(
self._encoder,
self._decoder,
self._target_char_to_ix["<start>"],
self._target_char_to_ix["<end>"],
self._maxlength,
).to(device)
self._network.load_state_dict(loader["model_state_dict"])
self._network.eval()
def _prepare_sequence_in(self, text: str):
"""
Prepare input sequence for PyTorch.
"""
idxs = []
for ch in text:
if ch in self._char_to_ix:
idxs.append(self._char_to_ix[ch])
else:
idxs.append(self._char_to_ix["<UNK>"])
idxs.append(self._char_to_ix["<end>"])
tensor = torch.tensor(idxs, dtype=torch.long)
return tensor.to(device)
def g2p(self, text: str) -> str:
"""
:param str text: Thai text to be romanized
:return: English (more or less) text that spells out how the Thai text
should be pronounced.
"""
input_tensor = self._prepare_sequence_in(text).view(1, -1)
input_length = [len(text) + 1]
target_tensor_logits = self._network(
input_tensor, input_length, None, 0
)
# Seq2seq model returns <END> as the first token,
# As a result, target_tensor_logits.size() is torch.Size([0])
if target_tensor_logits.size(0) == 0:
target = ["<PAD>"]
else:
target_tensor = (
torch.argmax(target_tensor_logits.squeeze(1), 1)
.cpu()
.detach()
.numpy()
)
target = [self._ix_to_target_char[t] for t in target_tensor]
return "".join(target)
class Encoder(nn.Module):
def __init__(
self, vocabulary_size, embedding_size, hidden_size, dropout=0.5
):
"""Constructor"""
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.character_embedding = nn.Embedding(
vocabulary_size, embedding_size
)
self.rnn = nn.LSTM(
input_size=embedding_size,
hidden_size=hidden_size // 2,
bidirectional=True,
batch_first=True,
)
self.dropout = nn.Dropout(dropout)
def forward(self, sequences, sequences_lengths):
# sequences: (batch_size, sequence_length=MAX_LENGTH)
# sequences_lengths: (batch_size)
batch_size = sequences.size(0)
self.hidden = self.init_hidden(batch_size)
sequences_lengths = np.sort(sequences_lengths)[::-1]
index_sorted = np.argsort(
-sequences_lengths
) # use negation in sort in descending order
index_unsort = np.argsort(index_sorted) # to unsorted sequence
index_sorted = torch.from_numpy(index_sorted)
sequences = sequences.index_select(0, index_sorted.to(device))
sequences = self.character_embedding(sequences)
sequences = self.dropout(sequences)
sequences_packed = nn.utils.rnn.pack_padded_sequence(
sequences, sequences_lengths.copy(), batch_first=True
)
sequences_output, self.hidden = self.rnn(sequences_packed, self.hidden)
sequences_output, _ = nn.utils.rnn.pad_packed_sequence(
sequences_output, batch_first=True
)
index_unsort = torch.from_numpy(index_unsort).to(device)
sequences_output = sequences_output.index_select(
0, index_unsort.clone().detach()
)
return sequences_output, self.hidden
def init_hidden(self, batch_size):
h_0 = torch.zeros(
[2, batch_size, self.hidden_size // 2], requires_grad=True
).to(device)
c_0 = torch.zeros(
[2, batch_size, self.hidden_size // 2], requires_grad=True
).to(device)
return (h_0, c_0)
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
if self.method == "general":
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == "concat":
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.other = nn.Parameter(torch.FloatTensor(1, hidden_size))
def forward(self, hidden, encoder_outputs, mask):
# Calculate energies for each encoder output
if self.method == "dot":
attn_energies = torch.bmm(
encoder_outputs, hidden.transpose(1, 2)
).squeeze(2)
elif self.method == "general":
attn_energies = self.attn(
encoder_outputs.view(-1, encoder_outputs.size(-1))
) # (batch_size * sequence_len, hidden_size)
attn_energies = torch.bmm(
attn_energies.view(*encoder_outputs.size()),
hidden.transpose(1, 2),
).squeeze(
2
) # (batch_size, sequence_len)
elif self.method == "concat":
attn_energies = self.attn(
torch.cat(
(hidden.expand(*encoder_outputs.size()), encoder_outputs),
2,
)
) # (batch_size, sequence_len, hidden_size)
attn_energies = torch.bmm(
attn_energies,
self.other.unsqueeze(0).expand(*hidden.size()).transpose(1, 2),
).squeeze(2)
attn_energies = attn_energies.masked_fill(mask == 0, -1e10)
# Normalize energies to weights in range 0 to 1
return F.softmax(attn_energies, 1)
class AttentionDecoder(nn.Module):
def __init__(
self, vocabulary_size, embedding_size, hidden_size, dropout=0.5
):
"""Constructor"""
super(AttentionDecoder, self).__init__()
self.vocabulary_size = vocabulary_size
self.hidden_size = hidden_size
self.character_embedding = nn.Embedding(
vocabulary_size, embedding_size
)
self.rnn = nn.LSTM(
input_size=embedding_size + self.hidden_size,
hidden_size=hidden_size,
bidirectional=False,
batch_first=True,
)
self.attn = Attn(method="general", hidden_size=self.hidden_size)
self.linear = nn.Linear(hidden_size, vocabulary_size)
self.dropout = nn.Dropout(dropout)
def forward(self, input_character, last_hidden, encoder_outputs, mask):
""" "Defines the forward computation of the decoder"""
# input_character: (batch_size, 1)
# last_hidden: (batch_size, hidden_dim)
# encoder_outputs: (batch_size, sequence_len, hidden_dim)
# mask: (batch_size, sequence_len)
hidden = last_hidden.permute(1, 0, 2)
attn_weights = self.attn(hidden, encoder_outputs, mask)
context_vector = attn_weights.unsqueeze(1).bmm(encoder_outputs)
context_vector = torch.sum(context_vector, dim=1)
context_vector = context_vector.unsqueeze(1)
embedded = self.character_embedding(input_character)
embedded = self.dropout(embedded)
rnn_input = torch.cat((context_vector, embedded), -1)
output, hidden = self.rnn(rnn_input)
output = output.view(-1, output.size(2))
x = self.linear(output)
return x, hidden[0], attn_weights
class Seq2Seq(nn.Module):
def __init__(
self,
encoder,
decoder,
target_start_token,
target_end_token,
max_length,
):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.pad_idx = 0
self.target_start_token = target_start_token
self.target_end_token = target_end_token
self.max_length = max_length
assert encoder.hidden_size == decoder.hidden_size
def create_mask(self, source_seq):
mask = source_seq != self.pad_idx
return mask
def forward(
self, source_seq, source_seq_len, target_seq, teacher_forcing_ratio=0.5
):
# source_seq: (batch_size, MAX_LENGTH)
# source_seq_len: (batch_size, 1)
# target_seq: (batch_size, MAX_LENGTH)
batch_size = source_seq.size(0)
start_token = self.target_start_token
end_token = self.target_end_token
max_len = self.max_length
target_vocab_size = self.decoder.vocabulary_size
outputs = torch.zeros(max_len, batch_size, target_vocab_size).to(
device
)
if target_seq is None:
assert teacher_forcing_ratio == 0, "Must be zero during inference"
inference = True
else:
inference = False
encoder_outputs, encoder_hidden = self.encoder(
source_seq, source_seq_len
)
decoder_input = (
torch.tensor([[start_token] * batch_size])
.view(batch_size, 1)
.to(device)
)
encoder_hidden_h_t = torch.cat(
[encoder_hidden[0][0], encoder_hidden[0][1]], dim=1
).unsqueeze(dim=0)
decoder_hidden = encoder_hidden_h_t
max_source_len = encoder_outputs.size(1)
mask = self.create_mask(source_seq[:, 0:max_source_len])
for di in range(max_len):
decoder_output, decoder_hidden, _ = self.decoder(
decoder_input, decoder_hidden, encoder_outputs, mask
)
topv, topi = decoder_output.topk(1)
outputs[di] = decoder_output.to(device)
teacher_force = random.random() < teacher_forcing_ratio
decoder_input = (
target_seq[:, di].reshape(batch_size, 1)
if teacher_force
else topi.detach()
)
if inference and decoder_input == end_token:
return outputs[:di]
return outputs
_THAI_G2P = ThaiG2P()
def transliterate(text: str) -> str:
global _THAI_G2P
return _THAI_G2P.g2p(text)
| 12,039 | 31.021277 | 79 | py |
pythainlp-dev/pythainlp/transliterate/tltk.py | pythainlp-dev/pythainlp/transliterate/tltk.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from tltk.nlp import g2p, th2ipa, th2roman
except ImportError:
raise ImportError("Not found tltk! Please install tltk by pip install tltk")
def romanize(text: str) -> str:
"""
Transliterating thai text to the Latin alphabet with tltk.
:param str text: Thai text to be romanized
:return: A string of Thai words rendered in the Latin alphabet.
:rtype: str
"""
_temp = th2roman(text)
return _temp[: _temp.rfind(" <s/>")].replace("<s/>", "")
def tltk_g2p(text: str) -> str:
_temp = g2p(text).split("<tr/>")[1].replace("|<s/>", "").replace("|", " ")
return _temp.replace("<s/>", "")
def tltk_ipa(text: str) -> str:
_temp = th2ipa(text)
return _temp[: _temp.rfind(" <s/>")].replace("<s/>", "")
| 1,370 | 32.439024 | 80 | py |
pythainlp-dev/pythainlp/transliterate/w2p.py | pythainlp-dev/pythainlp/transliterate/w2p.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai Word-to-Phoneme (Thai W2P)
GitHub : https://github.com/wannaphong/Thai_W2P
"""
from typing import Union
import numpy as np
from pythainlp.corpus import download, get_corpus_path
_GRAPHEMES = list(
"พจใงต้ืฮแาฐฒฤๅูศฅถฺฎหคสุขเึดฟำฝยลอ็ม"
+ " ณิฑชฉซทรฏฬํัฃวก่ป์ผฆบี๊ธญฌษะไ๋นโภ?"
)
_PHONEMES = list(
"-พจใงต้ืฮแาฐฒฤูศฅถฺฎหคสุขเึดฟำฝยลอ็ม"
+ " ณิฑชฉซทรํฬฏ–ัฃวก่ปผ์ฆบี๊ธฌญะไษ๋นโภ?"
)
_MODEL_NAME = "thai_w2p"
class _Hparams:
batch_size = 256
enc_maxlen = 30 * 2
dec_maxlen = 40 * 2
num_epochs = 50 * 2
hidden_units = 64 * 8
emb_units = 64 * 4
graphemes = ["<pad>", "<unk>", "</s>"] + _GRAPHEMES
phonemes = ["<pad>", "<unk>", "<s>", "</s>"] + _PHONEMES
lr = 0.001
hp = _Hparams()
def _load_vocab():
g2idx = {g: idx for idx, g in enumerate(hp.graphemes)}
idx2g = {idx: g for idx, g in enumerate(hp.graphemes)}
p2idx = {p: idx for idx, p in enumerate(hp.phonemes)}
idx2p = {idx: p for idx, p in enumerate(hp.phonemes)}
# note that g and p mean grapheme and phoneme, respectively.
return g2idx, idx2g, p2idx, idx2p
class Thai_W2P(object):
def __init__(self):
super().__init__()
self.graphemes = hp.graphemes
self.phonemes = hp.phonemes
self.g2idx, self.idx2g, self.p2idx, self.idx2p = _load_vocab()
self.checkpoint = get_corpus_path(_MODEL_NAME, version="0.2")
if self.checkpoint is None:
download(_MODEL_NAME, version="0.2")
self.checkpoint = get_corpus_path(_MODEL_NAME)
self._load_variables()
def _load_variables(self):
self.variables = np.load(self.checkpoint, allow_pickle=True)
# (29, 64). (len(graphemes), emb)
self.enc_emb = self.variables.item().get("encoder.emb.weight")
# (3*128, 64)
self.enc_w_ih = self.variables.item().get("encoder.rnn.weight_ih_l0")
# (3*128, 128)
self.enc_w_hh = self.variables.item().get("encoder.rnn.weight_hh_l0")
# (3*128,)
self.enc_b_ih = self.variables.item().get("encoder.rnn.bias_ih_l0")
# (3*128,)
self.enc_b_hh = self.variables.item().get("encoder.rnn.bias_hh_l0")
# (74, 64). (len(phonemes), emb)
self.dec_emb = self.variables.item().get("decoder.emb.weight")
# (3*128, 64)
self.dec_w_ih = self.variables.item().get("decoder.rnn.weight_ih_l0")
# (3*128, 128)
self.dec_w_hh = self.variables.item().get("decoder.rnn.weight_hh_l0")
# (3*128,)
self.dec_b_ih = self.variables.item().get("decoder.rnn.bias_ih_l0")
# (3*128,)
self.dec_b_hh = self.variables.item().get("decoder.rnn.bias_hh_l0")
# (74, 128)
self.fc_w = self.variables.item().get("decoder.fc.weight")
# (74,)
self.fc_b = self.variables.item().get("decoder.fc.bias")
def _sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def _grucell(self, x, h, w_ih, w_hh, b_ih, b_hh):
rzn_ih = np.matmul(x, w_ih.T) + b_ih
rzn_hh = np.matmul(h, w_hh.T) + b_hh
rz_ih, n_ih = (
rzn_ih[:, : rzn_ih.shape[-1] * 2 // 3],
rzn_ih[:, rzn_ih.shape[-1] * 2 // 3 :],
)
rz_hh, n_hh = (
rzn_hh[:, : rzn_hh.shape[-1] * 2 // 3],
rzn_hh[:, rzn_hh.shape[-1] * 2 // 3 :],
)
rz = self._sigmoid(rz_ih + rz_hh)
r, z = np.split(rz, 2, -1)
n = np.tanh(n_ih + r * n_hh)
h = (1 - z) * n + z * h
return h
def _gru(self, x, steps, w_ih, w_hh, b_ih, b_hh, h0=None) -> np.ndarray:
if h0 is None:
h0 = np.zeros((x.shape[0], w_hh.shape[1]), np.float32)
h = h0 # initial hidden state
outputs = np.zeros((x.shape[0], steps, w_hh.shape[1]), np.float32)
for t in range(steps):
h = self._grucell(x[:, t, :], h, w_ih, w_hh, b_ih, b_hh) # (b, h)
outputs[:, t, ::] = h
return outputs
def _encode(self, word: str) -> np.ndarray:
chars = list(word) + ["</s>"]
x = [self.g2idx.get(char, self.g2idx["<unk>"]) for char in chars]
x = np.take(self.enc_emb, np.expand_dims(x, 0), axis=0)
return x
def _short_word(self, word: str) -> Union[str, None]:
self.word = word
if self.word.endswith("."):
self.word = self.word.replace(".", "")
self.word = "-".join([i + "อ" for i in list(self.word)])
return self.word
return None
def _predict(self, word: str) -> str:
short_word = self._short_word(word)
if short_word is not None:
return short_word
# encoder
enc = self._encode(word)
enc = self._gru(
enc,
len(word) + 1,
self.enc_w_ih,
self.enc_w_hh,
self.enc_b_ih,
self.enc_b_hh,
h0=np.zeros((1, self.enc_w_hh.shape[-1]), np.float32),
)
last_hidden = enc[:, -1, :]
# decoder
dec = np.take(self.dec_emb, [2], axis=0) # 2: <s>
h = last_hidden
preds = []
for _ in range(20):
h = self._grucell(
dec,
h,
self.dec_w_ih,
self.dec_w_hh,
self.dec_b_ih,
self.dec_b_hh,
) # (b, h)
logits = np.matmul(h, self.fc_w.T) + self.fc_b
pred = logits.argmax()
if pred == 3:
break
preds.append(pred)
dec = np.take(self.dec_emb, [pred], axis=0)
preds = [self.idx2p.get(idx, "<unk>") for idx in preds]
return preds
def __call__(self, word: str) -> str:
if not any(letter in word for letter in self.graphemes):
pron = [word]
else: # predict for oov
pron = self._predict(word)
return "".join(pron)
_THAI_W2P = Thai_W2P()
def pronunciate(text: str) -> str:
"""
Convert a Thai word to its pronunciation in Thai letters.
Input should be one single word.
:param str text: Thai text to be pronunciated
:return: A string of Thai letters indicating
how the input text should be pronounced.
"""
global _THAI_W2P
return _THAI_W2P(text)
| 6,890 | 30.180995 | 78 | py |
pythainlp-dev/pythainlp/transliterate/wunsen.py | pythainlp-dev/pythainlp/transliterate/wunsen.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Transliterating Japanese/Korean/Mandarin/Vietnamese romanization text
to Thai text
By Wunsen
:See Also:
* `GitHub \
<https://github.com/cakimpei/wunsen>`_
"""
from wunsen import ThapSap
class WunsenTransliterate:
"""
Transliterating Japanese/Korean/Mandarin/Vietnamese romanization text
to Thai text
by Wunsen
:See Also:
* `GitHub \
<https://github.com/cakimpei/wunsen>`_
"""
def __init__(self) -> None:
self.thap_value = None
self.lang = None
self.jp_input = None
self.zh_sandhi = None
self.system = None
def transliterate(
self,
text: str,
lang: str,
jp_input: str = None,
zh_sandhi: bool = None,
system: str = None,
):
"""
Use Wunsen for transliteration
:param str text: text wants transliterated to Thai text.
:param str lang: source language
:param str jp_input: japanese input method (for japanese only)
:param bool zh_sandhi: mandarin third tone sandhi option
(for mandarin only)
:param str system: transliteration system (for japanese and
mandarin only)
:return: Thai text
:rtype: str
:Options for lang:
* *jp* - Japanese (from Hepburn romanization)
* *ko* - Korean (from Revised Romanization)
* *vi* - Vietnamese (Latin script)
* *zh* - Mandarin (from Hanyu Pinyin)
:Options for jp_input:
* *Hepburn-no diacritic* - Hepburn-no diacritic (without macron)
:Options for zh_sandhi:
* *True* - apply third tone sandhi rule
* *False* - do not apply third tone sandhi rule
:Options for system:
* *ORS61* - for Japanese หลักเกณฑ์การทับศัพท์ภาษาญี่ปุ่น
(สำนักงานราชบัณฑิตยสภา พ.ศ. 2561)
* *RI35* - for Japanese หลักเกณฑ์การทับศัพท์ภาษาญี่ปุ่น
(ราชบัณฑิตยสถาน พ.ศ. 2535)
* *RI49* - for Mandarin หลักเกณฑ์การทับศัพท์ภาษาจีน
(ราชบัณฑิตยสถาน พ.ศ. 2549)
* *THC43* - for Mandarin เกณฑ์การถ่ายทอดเสียงภาษาจีนแมนดาริน
ด้วยอักขรวิธีไทย (คณะกรรมการสืบค้นประวัติศาสตร์ไทยในเอกสาร
ภาษาจีน พ.ศ. 2543)
:Example:
::
from pythainlp.transliterate.wunsen import WunsenTransliterate
wt = WunsenTransliterate()
wt.transliterate("ohayō", lang="jp")
# output: 'โอฮาโย'
wt.transliterate(
"ohayou",
lang="jp",
jp_input="Hepburn-no diacritic"
)
# output: 'โอฮาโย'
wt.transliterate("ohayō", lang="jp", system="RI35")
# output: 'โอะฮะโย'
wt.transliterate("annyeonghaseyo", lang="ko")
# output: 'อันนย็องฮาเซโย'
wt.transliterate("xin chào", lang="vi")
# output: 'ซีน จ่าว'
wt.transliterate("ni3 hao3", lang="zh")
# output: 'หนี เห่า'
wt.transliterate("ni3 hao3", lang="zh", zh_sandhi=False)
# output: 'หนี่ เห่า'
wt.transliterate("ni3 hao3", lang="zh", system="RI49")
# output: 'หนี ห่าว'
"""
if (
self.lang != lang
or self.jp_input != jp_input
or self.zh_sandhi != zh_sandhi
or self.system != system
):
if lang == "jp":
self.jp_input = jp_input
self.zh_sandhi = None
self.system = system
elif lang == "zh":
self.jp_input = None
self.zh_sandhi = zh_sandhi
self.system = system
elif lang == "ko" or lang == "vi":
self.jp_input = None
self.zh_sandhi = None
self.system = None
else:
raise NotImplementedError(
"The %s language is not implemented." % lang
)
self.lang = lang
input_lang = lang
if input_lang == "jp":
input_lang = "ja"
setting = {}
if self.jp_input is not None:
setting.update({"input": self.jp_input})
if self.zh_sandhi is not None:
setting.update({"option": {"sandhi": self.zh_sandhi}})
if self.system is not None:
setting.update({"system": self.system})
self.thap_value = ThapSap(input_lang, **setting)
return self.thap_value.thap(text)
| 5,232 | 32.33121 | 76 | py |
pythainlp-dev/pythainlp/ulmfit/__init__.py | pythainlp-dev/pythainlp/ulmfit/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Universal Language Model Fine-tuning for Text Classification (ULMFiT).
Code by Charin Polpanumas
https://github.com/cstorm125/thai2fit/
Some pre-processing functions are from fastai (Apache 2.0)
https://github.com/fastai/fastai/blob/master/fastai/text/transform.py
Universal Language Model Fine-tuning for Text Classification
https://arxiv.org/abs/1801.06146
"""
__all__ = [
"THWIKI_LSTM",
"ThaiTokenizer",
"document_vector",
"merge_wgts",
"post_rules_th",
"post_rules_th_sparse",
"pre_rules_th",
"pre_rules_th_sparse",
"process_thai",
"fix_html",
"lowercase_all",
"remove_space",
"replace_rep_after",
"replace_rep_nonum",
"replace_url",
"replace_wrep_post",
"replace_wrep_post_nonum",
"rm_brackets",
"rm_useless_newlines",
"rm_useless_spaces",
"spec_add_spaces",
"ungroup_emoji",
]
from pythainlp.ulmfit.core import (
THWIKI_LSTM,
document_vector,
merge_wgts,
post_rules_th,
post_rules_th_sparse,
pre_rules_th,
pre_rules_th_sparse,
process_thai,
)
from pythainlp.ulmfit.preprocess import (
fix_html,
lowercase_all,
remove_space,
replace_rep_after,
replace_rep_nonum,
replace_url,
replace_wrep_post,
replace_wrep_post_nonum,
rm_brackets,
rm_useless_newlines,
rm_useless_spaces,
spec_add_spaces,
ungroup_emoji,
)
from pythainlp.ulmfit.tokenizer import ThaiTokenizer
| 2,060 | 25.088608 | 74 | py |
pythainlp-dev/pythainlp/ulmfit/core.py | pythainlp-dev/pythainlp/ulmfit/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Universal Language Model Fine-tuning for Text Classification (ULMFiT).
"""
import collections
from typing import Callable, Collection
import numpy as np
import torch
from pythainlp.corpus import get_corpus_path
from pythainlp.tokenize import THAI2FIT_TOKENIZER
from pythainlp.ulmfit.preprocess import (
fix_html,
lowercase_all,
remove_space,
replace_rep_after,
replace_rep_nonum,
replace_url,
replace_wrep_post,
replace_wrep_post_nonum,
rm_brackets,
rm_useless_newlines,
rm_useless_spaces,
spec_add_spaces,
ungroup_emoji,
)
from pythainlp.util import reorder_vowels
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_MODEL_NAME_LSTM = "wiki_lm_lstm"
_ITOS_NAME_LSTM = "wiki_itos_lstm"
# Pretrained model paths
THWIKI_LSTM = dict(
wgts_fname=get_corpus_path(_MODEL_NAME_LSTM),
itos_fname=get_corpus_path(_ITOS_NAME_LSTM),
)
# Preprocessing rules for Thai text
# dense features
pre_rules_th = [
replace_rep_after,
fix_html,
reorder_vowels,
spec_add_spaces,
rm_useless_spaces,
rm_useless_newlines,
rm_brackets,
replace_url,
]
post_rules_th = [replace_wrep_post, ungroup_emoji, lowercase_all]
# sparse features
pre_rules_th_sparse = pre_rules_th[1:] + [replace_rep_nonum]
post_rules_th_sparse = post_rules_th[1:] + [
replace_wrep_post_nonum,
remove_space,
]
def process_thai(
text: str,
pre_rules: Collection = pre_rules_th_sparse,
tok_func: Callable = THAI2FIT_TOKENIZER.word_tokenize,
post_rules: Collection = post_rules_th_sparse,
) -> Collection[str]:
"""
Process Thai texts for models (with sparse features as default)
:param str text: text to be cleaned
:param list[func] pre_rules: rules to apply before tokenization.
:param func tok_func: tokenization function (by default, **tok_func** is
:func:`pythainlp.tokenize.word_tokenize`)
:param list[func] post_rules: rules to apply after tokenizations
:return: a list of cleaned tokenized texts
:rtype: list[str]
:Note:
- The default **pre-rules** consists of :func:`fix_html`,
:func:`pythainlp.util.normalize`,
:func:`spec_add_spaces`,
:func:`rm_useless_spaces`,
:func:`rm_useless_newlines`,
:func:`rm_brackets`
and :func:`replace_rep_nonum`.
- The default **post-rules** consists of :func:`ungroup_emoji`,
:func:`lowercase_all`, :func:`replace_wrep_post_nonum`,
and :func:`remove_space`.
:Example:
1. Use default pre-rules and post-rules:
>>> from pythainlp.ulmfit import process_thai
>>> text = "บ้านนนนน () อยู่นานนานนาน 😂🤣😃😄😅 PyThaiNLP amp; "
>>> process_thai(text)
[บ้าน', 'xxrep', ' ', 'อยู่', 'xxwrep', 'นาน', '😂', '🤣',
'😃', '😄', '😅', 'pythainlp', '&']
2. Modify pre_rules and post_rules arugments with
rules provided in :mod:`pythainlp.ulmfit`:
>>> from pythainlp.ulmfit import (
process_thai,
replace_rep_after,
fix_html,
ungroup_emoji,
replace_wrep_post,
remove_space)
>>>
>>> text = "บ้านนนนน () อยู่นานนานนาน 😂🤣😃😄😅 PyThaiNLP amp; "
>>> process_thai(text,
pre_rules=[replace_rep_after, fix_html],
post_rules=[ungroup_emoji,
replace_wrep_post,
remove_space]
)
['บ้าน', 'xxrep', '5', '()', 'อยู่', 'xxwrep', '2', 'นาน', '😂', '🤣',
'😃', '😄', '😅', 'PyThaiNLP', '&']
"""
res = text
for rule in pre_rules:
res = rule(res)
res = tok_func(res)
for rule in post_rules:
res = rule(res)
return res
def document_vector(text: str, learn, data, agg: str = "mean"):
"""
This function vectorize Thai input text into a 400 dimension vector using
:class:`fastai` language model and data bunch.
:meth: `document_vector` get document vector using fastai language model
and data bunch
:param str text: text to be vectorized with :class:`fastai` language model.
:param learn: :class:`fastai` language model learner
:param data: :class:`fastai` data bunch
:param str agg: name of aggregation methods for word embeddings
The avialable methods are "mean" and "sum"
:return: :class:`numpy.array` of document vector sized 400 based on
the encoder of the model
:rtype: :class:`numpy.ndarray((1, 400))`
:Example:
>>> from pythainlp.ulmfit import document_vectorr
>>> from fastai import *
>>> from fastai.text import *
>>>
>>> # Load Data Bunch
>>> data = load_data(MODEL_PATH, 'thwiki_lm_data.pkl')
>>>
>>> # Initialize language_model_learner
>>> config = dict(emb_sz=400, n_hid=1550, n_layers=4, pad_token=1,
qrnn=False, tie_weights=True, out_bias=True, output_p=0.25,
hidden_p=0.1, input_p=0.2, embed_p=0.02, weight_p=0.15)
>>> trn_args = dict(drop_mult=0.9, clip=0.12, alpha=2, beta=1)
>>> learn = language_model_learner(data, AWD_LSTM, config=config,
pretrained=False, **trn_args)
>>> document_vector('วันนี้วันดีปีใหม่', learn, data)
:See Also:
* A notebook showing how to train `ulmfit` language model and its
usage, `Jupyter Notebook \
<https://github.com/cstorm125/thai2fit/blob/master/thwiki_lm/word2vec_examples.ipynb>`_
"""
s = THAI2FIT_TOKENIZER.word_tokenize(text)
t = torch.tensor(data.vocab.numericalize(s), requires_grad=False).to(
device
)
m = learn.model[0].encoder.to(device)
res = m(t).cpu().detach().numpy()
if agg == "mean":
res = res.mean(0)
elif agg == "sum":
res = res.sum(0)
else:
raise ValueError("Aggregate by mean or sum")
return res
def merge_wgts(em_sz, wgts, itos_pre, itos_new):
"""
This function is to insert new vocab into an existing model named `wgts`
and update the model's weights for new vocab with the average embedding.
:meth: `merge_wgts` insert pretrained weights and vocab into a new set
of weights and vocab; use average if vocab not in pretrained vocab
:param int em_sz: embedding size
:param wgts: torch model weights
:param list itos_pre: pretrained list of vocab
:param list itos_new: list of new vocab
:return: merged torch model weights
:Example:
::
from pythainlp.ulmfit import merge_wgts
import torch
wgts = {'0.encoder.weight': torch.randn(5,3)}
itos_pre = ["แมว", "คน", "หนู"]
itos_new = ["ปลา", "เต่า", "นก"]
em_sz = 3
merge_wgts(em_sz, wgts, itos_pre, itos_new)
# output:
# {'0.encoder.weight': tensor([[0.5952, 0.4453, 0.0011],
# [0.5952, 0.4453, 0.0011],
# [0.5952, 0.4453, 0.0011]]),
# '0.encoder_dp.emb.weight': tensor([[0.5952, 0.4453, 0.0011],
# [0.5952, 0.4453, 0.0011],
# [0.5952, 0.4453, 0.0011]]),
# '1.decoder.weight': tensor([[0.5952, 0.4453, 0.0011],
# [0.5952, 0.4453, 0.0011],
# [0.5952, 0.4453, 0.0011]])}
"""
vocab_size = len(itos_new)
enc_wgts = wgts["0.encoder.weight"].numpy()
# Average weight of encoding
row_m = enc_wgts.mean(0)
stoi_pre = collections.defaultdict(
lambda: -1, {v: k for k, v in enumerate(itos_pre)}
)
# New embedding based on classification dataset
new_w = np.zeros((vocab_size, em_sz), dtype=np.float32)
for i, w in enumerate(itos_new):
r = stoi_pre[w]
# Use pretrianed embedding if present; else use the average
new_w[i] = enc_wgts[r] if r >= 0 else row_m
wgts["0.encoder.weight"] = torch.tensor(new_w)
wgts["0.encoder_dp.emb.weight"] = torch.tensor(np.copy(new_w))
wgts["1.decoder.weight"] = torch.tensor(np.copy(new_w))
return wgts
| 8,731 | 31.340741 | 97 | py |
pythainlp-dev/pythainlp/ulmfit/preprocess.py | pythainlp-dev/pythainlp/ulmfit/preprocess.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocessing for ULMFiT
"""
import html
import re
from typing import Collection, List
import emoji
_TK_UNK = "xxunk"
_TK_REP = "xxrep"
_TK_WREP = "xxwrep"
_TK_END = "xxend"
_TK_URL = "xxurl"
def replace_url(text: str) -> str:
"""
Replace url in `text` with TK_URL
:param str text: text to replace url
:return: text where urls are replaced
:rtype: str
:Example:
>>> from pythainlp.ulmfit import replace_url
>>> replace_url("go to github.com")
go to xxurl
"""
URL_PATTERN = r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))"""
return re.sub(URL_PATTERN, _TK_URL, text)
def fix_html(text: str) -> str:
"""
List of replacements from html strings in `test`. (code from `fastai`)
:param str text: text to replace html string
:return: text where html strings are replaced
:rtype: str
:Example:
>>> from pythainlp.ulmfit import fix_html
>>> fix_html("Anbsp;amp;nbsp;B @.@ ")
A & B.
"""
re1 = re.compile(r" +")
text = (
text.replace("#39;", "'")
.replace("amp;", "&")
.replace("#146;", "'")
.replace("nbsp;", " ")
.replace("#36;", "$")
.replace("\\n", "\n")
.replace("quot;", "'")
.replace("<br />", "\n")
.replace('\\"', '"')
.replace("<unk>", _TK_UNK)
.replace(" @.@ ", ".")
.replace(" @-@ ", "-")
.replace(" @,@ ", ",")
.replace("\\", " \\ ")
)
return re1.sub(" ", html.unescape(text))
def rm_useless_spaces(text: str) -> str:
"""Remove multiple spaces in `text`. (code from `fastai`)"""
return re.sub(" {2,}", " ", text)
def spec_add_spaces(text: str) -> str:
"""Add spaces around / and # in `text`. \n (code from `fastai`)"""
return re.sub(r"([/#\n])", r" \1 ", text)
def replace_rep_after(text: str) -> str:
"""
Replace repetitions at the character level in `text` after the repetition.
This is done to prevent such case as 'น้อยยยยยยยย' becoming 'น้อ xxrep 8 ย'
;instead it will retain the word as 'น้อย xxrep 8'
:param str text: input text to replace character repetition
:return: text with repetitive token **xxrep** and the counter
after character repetition
:rtype: str
:Example:
>>> from pythainlp.ulmfit import replace_rep_after
>>>
>>> text = "กาาาาาาา"
>>> replace_rep_after(text)
'กาxxrep7 '
"""
def _replace_rep(m):
c, cc = m.groups()
return f"{c}{_TK_REP}{len(cc)+1} "
re_rep = re.compile(r"(\S)(\1{3,})")
return re_rep.sub(_replace_rep, text)
def replace_wrep_post(toks: Collection[str]) -> List[str]:
"""
Replace reptitive words post tokenization;
fastai `replace_wrep` does not work well with Thai.
:param list[str] toks: list of tokens
:return: list of tokens where **xxwrep** token and the counter
is added in front of repetitive words.
:rtype: list[str]
:Example:
>>> from pythainlp.ulmfit import replace_wrep_post_nonum
>>>
>>> toks = ["กา", "น้ำ", "น้ำ", "น้ำ", "น้ำ"]
>>> replace_wrep_post(toks)
['กา', 'xxwrep', '3', 'น้ำ']
"""
previous_word = None
rep_count = 0
res = []
for current_word in toks + [_TK_END]:
if current_word == previous_word:
rep_count += 1
elif (current_word != previous_word) & (rep_count > 0):
res += [_TK_WREP, str(rep_count), previous_word]
rep_count = 0
else:
res.append(previous_word)
previous_word = current_word
return res[1:]
def rm_useless_newlines(text: str) -> str:
"Remove multiple newlines in `text`."
return re.sub(r"[\n]{2,}", " ", text)
def rm_brackets(text: str) -> str:
"Remove all empty brackets and artifacts within brackets from `text`."
# remove empty brackets
new_line = re.sub(r"\(\)", "", text)
new_line = re.sub(r"\{\}", "", new_line)
new_line = re.sub(r"\[\]", "", new_line)
# brakets with only punctuations
new_line = re.sub(r"\([^a-zA-Z0-9ก-๙]+\)", "", new_line)
new_line = re.sub(r"\{[^a-zA-Z0-9ก-๙]+\}", "", new_line)
new_line = re.sub(r"\[[^a-zA-Z0-9ก-๙]+\]", "", new_line)
# artifiacts after (
new_line = re.sub(
r"(?<=\()[^a-zA-Z0-9ก-๙]+(?=[a-zA-Z0-9ก-๙])", "", new_line
)
new_line = re.sub(
r"(?<=\{)[^a-zA-Z0-9ก-๙]+(?=[a-zA-Z0-9ก-๙])", "", new_line
)
new_line = re.sub(
r"(?<=\[)[^a-zA-Z0-9ก-๙]+(?=[a-zA-Z0-9ก-๙])", "", new_line
)
# artifacts before )
new_line = re.sub(
r"(?<=[a-zA-Z0-9ก-๙])[^a-zA-Z0-9ก-๙]+(?=\))", "", new_line
)
new_line = re.sub(
r"(?<=[a-zA-Z0-9ก-๙])[^a-zA-Z0-9ก-๙]+(?=\})", "", new_line
)
new_line = re.sub(
r"(?<=[a-zA-Z0-9ก-๙])[^a-zA-Z0-9ก-๙]+(?=\])", "", new_line
)
return new_line
def ungroup_emoji(toks: Collection[str]) -> List[str]:
"""
Ungroup Zero Width Joiner (ZVJ) Emojis
See https://emojipedia.org/emoji-zwj-sequence/
"""
res = []
for tok in toks:
if emoji.emoji_count(tok) == len(tok):
res.extend(list(tok))
else:
res.append(tok)
return res
def lowercase_all(toks: Collection[str]) -> List[str]:
"""
Lowercase all English words;
English words in Thai texts don't usually have nuances of capitalization.
"""
return [tok.lower() for tok in toks]
def replace_rep_nonum(text: str) -> str:
"""
Replace repetitions at the character level in `text` after the repetition.
This is done to prevent such case as 'น้อยยยยยยยย' becoming 'น้อ xxrep ย';
instead it will retain the word as 'น้อย xxrep '
:param str text: input text to replace character repetition
:return: text with repetitive token **xxrep** after
character repetition
:rtype: str
:Example:
>>> from pythainlp.ulmfit import replace_rep_nonum
>>>
>>> text = "กาาาาาาา"
>>> replace_rep_nonum(text)
'กา xxrep '
"""
def _replace_rep(m):
c, _ = m.groups()
return f"{c} {_TK_REP} "
re_rep = re.compile(r"(\S)(\1{3,})")
return re_rep.sub(_replace_rep, text)
def replace_wrep_post_nonum(toks: Collection[str]) -> List[str]:
"""
Replace reptitive words post tokenization;
fastai `replace_wrep` does not work well with Thai.
:param list[str] toks: list of tokens
:return: list of tokens where **xxwrep** token is added in front of
repetitive words.
:rtype: list[str]
:Example:
>>> from pythainlp.ulmfit import replace_wrep_post_nonum
>>>
>>> toks = ["กา", "น้ำ", "น้ำ", "น้ำ", "น้ำ"]
>>> replace_wrep_post_nonum(toks)
['กา', 'xxwrep', 'น้ำ']
"""
previous_word = None
rep_count = 0
res = []
for current_word in toks + [_TK_END]:
if current_word == previous_word:
rep_count += 1
elif (current_word != previous_word) & (rep_count > 0):
res += [_TK_WREP, previous_word]
rep_count = 0
else:
res.append(previous_word)
previous_word = current_word
return res[1:]
def remove_space(toks: Collection[str]) -> List[str]:
"""
Do not include space for bag-of-word models.
:param list[str] toks: list of tokens
:return: list of tokens where space tokens (" ") are filtered out
:rtype: list[str]
"""
res = []
for t in toks:
t = t.strip()
if t:
res.append(t)
return res
| 10,201 | 32.781457 | 2,015 | py |
pythainlp-dev/pythainlp/ulmfit/tokenizer.py | pythainlp-dev/pythainlp/ulmfit/tokenizer.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tokenzier classes for ULMFiT
"""
from typing import Collection, List
from pythainlp.tokenize import THAI2FIT_TOKENIZER
class BaseTokenizer:
"""Basic class for a tokenizer function. (code from `fastai`)"""
def __init__(self, lang: str):
self.lang = lang
def tokenizer(self, t: str) -> List[str]:
return t.split(" ")
def add_special_cases(self, toks: Collection[str]):
pass
class ThaiTokenizer(BaseTokenizer):
"""
Wrapper around a frozen newmm tokenizer to make it a
:class:`fastai.BaseTokenizer`.
(see: https://docs.fast.ai/text.transform#BaseTokenizer)
"""
def __init__(self, lang: str = "th"):
self.lang = lang
@staticmethod
def tokenizer(text: str) -> List[str]:
"""
This function tokenizes text with *newmm* engine and the dictionary
specifically for `ulmfit` related functions
(see: `Dictonary file (.txt) \
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/words_th_thai2fit_201810.txt>`_).
:meth: tokenize text with a frozen newmm engine
:param str text: text to tokenize
:return: tokenized text
:rtype: list[str]
:Example:
Using :func:`pythainlp.ulmfit.ThaiTokenizer.tokenizer` is
similar to :func:`pythainlp.tokenize.word_tokenize`
with *ulmfit* engine.
>>> from pythainlp.ulmfit import ThaiTokenizer
>>> from pythainlp.tokenize import word_tokenize
>>>
>>> text = "อาภรณ์, จินตมยปัญญา ภาวนามยปัญญา"
>>> ThaiTokenizer.tokenizer(text)
['อาภรณ์', ',', ' ', 'จิน', 'ตม', 'ย', 'ปัญญา',
' ', 'ภาวนามยปัญญา']
>>>
>>> word_tokenize(text, engine='ulmfit')
['อาภรณ์', ',', ' ', 'จิน', 'ตม', 'ย', 'ปัญญา',
' ', 'ภาวนามยปัญญา']
"""
return THAI2FIT_TOKENIZER.word_tokenize(text)
def add_special_cases(self, toks):
pass
| 2,623 | 31.395062 | 107 | py |
pythainlp-dev/pythainlp/util/__init__.py | pythainlp-dev/pythainlp/util/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions, like date conversion and digit conversion
"""
__all__ = [
"Trie",
"arabic_digit_to_thai_digit",
"bahttext",
"convert_years",
"collate",
"countthai",
"count_thai_chars",
"dict_trie",
"digit_to_text",
"display_thai_char",
"emoji_to_thai",
"eng_to_thai",
"find_keyword",
"is_native_thai",
"isthai",
"isthaichar",
"normalize",
"now_reign_year",
"num_to_thaiword",
"rank",
"reign_year_to_ad",
"remove_dangling",
"remove_dup_spaces",
"remove_repeat_vowels",
"remove_tonemark",
"remove_zw",
"reorder_vowels",
"text_to_arabic_digit",
"text_to_thai_digit",
"thai_digit_to_arabic_digit",
"thai_keyboard_dist",
"thai_strptime",
"thai_strftime",
"thai_to_eng",
"thai_word_tone_detector",
"thaiword_to_date",
"thaiword_to_num",
"thaiword_to_time",
"time_to_thaiword",
"text_to_num",
"tone_detector",
"words_to_num",
"sound_syllable",
"syllable_length",
"syllable_open_close_detector",
"nectec_to_ipa",
"ipa_to_rtgs",
"remove_tone_ipa",
"tis620_to_utf8",
"spell_words",
]
from pythainlp.util.collate import collate
from pythainlp.util.date import (
now_reign_year,
reign_year_to_ad,
thaiword_to_date,
convert_years,
thai_strptime,
)
from pythainlp.util.digitconv import (
arabic_digit_to_thai_digit,
digit_to_text,
text_to_arabic_digit,
text_to_thai_digit,
thai_digit_to_arabic_digit,
)
from pythainlp.util.keyboard import (
eng_to_thai,
thai_keyboard_dist,
thai_to_eng,
)
from pythainlp.util.emojiconv import emoji_to_thai
from pythainlp.util.keyboard import eng_to_thai, thai_to_eng
from pythainlp.util.keywords import find_keyword, rank
from pythainlp.util.normalize import (
normalize,
maiyamok,
remove_dangling,
remove_dup_spaces,
remove_repeat_vowels,
remove_tonemark,
remove_zw,
reorder_vowels,
)
from pythainlp.util.numtoword import bahttext, num_to_thaiword
from pythainlp.util.strftime import thai_strftime
from pythainlp.util.thai import (
countthai,
count_thai_chars,
display_thai_char,
isthai,
isthaichar,
thai_word_tone_detector,
)
from pythainlp.util.thaiwordcheck import is_native_thai
from pythainlp.util.time import thaiword_to_time, time_to_thaiword
from pythainlp.util.trie import Trie, dict_trie
from pythainlp.util.wordtonum import thaiword_to_num, text_to_num, words_to_num
from pythainlp.util.syllable import (
sound_syllable,
tone_detector,
syllable_length,
syllable_open_close_detector,
)
from pythainlp.util.phoneme import nectec_to_ipa, ipa_to_rtgs, remove_tone_ipa
from pythainlp.util.encoding import tis620_to_utf8
import pythainlp.util.spell_words as spell_words
| 3,444 | 25.914063 | 79 | py |
pythainlp-dev/pythainlp/util/collate.py | pythainlp-dev/pythainlp/util/collate.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai collation (sort according to Thai dictionary order)
Simple implementation using regular expressions
"""
import re
from typing import Iterable, List
_RE_TONE = re.compile(r"[็-์]")
_RE_LV_C = re.compile(r"([เ-ไ])([ก-ฮ])")
def _thkey(word: str) -> str:
cv = _RE_TONE.sub("", word) # remove tone
cv = _RE_LV_C.sub("\\2\\1", cv) # switch lead vowel
tone = _RE_TONE.sub(" ", word) # just tone
return cv + tone
def collate(data: Iterable, reverse: bool = False) -> List[str]:
"""
This function sorts strings (almost) according to Thai dictionary.
Important notes: this implementation ignores tone marks and symbols
:param data: a list of words to be sorted
:type data: Iterable
:param reverse: If `reverse` is set to **True** the result will be
sorted in descending order. Otherwise, the result
will be sorted in ascending order, defaults to False
:type reverse: bool, optional
:return: a list of strings, sorted alphabetically, (almost) according to
Thai dictionary
:rtype: List[str]
:Example:
::
from pythainlp.util import collate
collate(['ไก่', 'เกิด', 'กาล', 'เป็ด', 'หมู', 'วัว', 'วันที่'])
# output: ['กาล', 'เกิด', 'ไก่', 'เป็ด', 'วันที่', 'วัว', 'หมู']
collate(['ไก่', 'เกิด', 'กาล', 'เป็ด', 'หมู', 'วัว', 'วันที่'], \\
reverse=True)
# output: ['หมู', 'วัว', 'วันที่', 'เป็ด', 'ไก่', 'เกิด', 'กาล']
"""
return sorted(data, key=_thkey, reverse=reverse)
| 2,176 | 33.555556 | 77 | py |
pythainlp-dev/pythainlp/util/date.py | pythainlp-dev/pythainlp/util/date.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai date/time conversion.
Note: Does not take into account the change of new year's day in Thailand
"""
# BE คือ พ.ศ.
# AD คือ ค.ศ.
# AH ปีฮิจเราะห์ศักราชเป็นปีพุทธศักราช จะต้องบวกด้วย 1122
# ไม่ได้รองรับปี พ.ศ. ก่อนการเปลี่ยนวันขึ้นปีใหม่ของประเทศไทย
__all__ = [
"convert_years",
"thai_abbr_months",
"thai_abbr_weekdays",
"thai_full_months",
"thai_full_weekdays",
"thai_strptime",
"thaiword_to_date",
]
from datetime import datetime, timedelta
from typing import Union
import re
try:
from zoneinfo import ZoneInfo
except ImportError:
from backports.zoneinfo import ZoneInfo
thai_abbr_weekdays = ["จ", "อ", "พ", "พฤ", "ศ", "ส", "อา"]
thai_full_weekdays = [
"วันจันทร์",
"วันอังคาร",
"วันพุธ",
"วันพฤหัสบดี",
"วันศุกร์",
"วันเสาร์",
"วันอาทิตย์",
]
thai_abbr_months = [
"ม.ค.",
"ก.พ.",
"มี.ค.",
"เม.ย.",
"พ.ค.",
"มิ.ย.",
"ก.ค.",
"ส.ค.",
"ก.ย.",
"ต.ค.",
"พ.ย.",
"ธ.ค.",
]
thai_full_months = [
"มกราคม",
"กุมภาพันธ์",
"มีนาคม",
"เมษายน",
"พฤษภาคม",
"มิถุนายน",
"กรกฎาคม",
"สิงหาคม",
"กันยายน",
"ตุลาคม",
"พฤศจิกายน",
"ธันวาคม",
]
thai_full_month_lists = [
["มกราคม", "มกรา", "ม.ค.", "01", "1"],
["กุมภาพันธ์", "กุมภา", "ก.พ.", "02", "2"],
["มีนาคม", "มีนา", "มี.ค.", "03", "3"],
["เมษายน", "เมษา", "เม.ย.", "04", "4"],
["พฤษภาคม", "พฤษภา", "พ.ค.", "05", "5"],
["มิถุนายน", "มิถุนา", "มิ.ย.", "06", "6"],
["กรกฎาคม", "ก.ค.", "07", "7"],
["สิงหาคม", "สิงหา", "ส.ค.", "08", "8"],
["กันยายน", "กันยา", "ก.ย.", "09", "9"],
["ตุลาคม", "ตุลา", "ต.ค.", "10"],
["พฤศจิกายน", "พฤศจิกา", "พ.ย.", "11"],
["ธันวาคม", "ธันวา", "ธ.ค.", "12"]
]
thai_full_month_lists_regex = "(" + '|'.join(
[str('|'.join([j for j in i])) for i in thai_full_month_lists]
) + ")"
year_all_regex = r"(\d\d\d\d|\d\d)"
dates_list = "(" + '|'.join(
[str(i) for i in range(32, 0, -1)] + [
"0" + str(i) for i in range(1, 10)
]
) + ")"
_DAY = {
"วันนี้": 0,
"คืนนี้": 0,
"พรุ่งนี้": 1,
"วันพรุ่งนี้": 1,
"คืนถัดจากนี้": 1,
"คืนหน้า": 1,
"มะรืน": 2,
"มะรืนนี้": 2,
"วันมะรืนนี้": 2,
"ถัดจากพรุ่งนี้": 2,
"ถัดจากวันพรุ่งนี้": 2,
"เมื่อวาน": -1,
"เมื่อวานนี้": -1,
"วานนี้": -1,
"เมื่อคืน": -1,
"เมื่อคืนนี้": -1,
"วานซืน": -2,
"เมื่อวานซืน": -2,
"เมื่อวานของเมื่อวาน": -2,
}
def convert_years(year: str, src="be", target="ad") -> str:
"""
Convert years
:param int year: year
:param str src: The src year
:param str target: The target year
:return: The years that be convert
:rtype: str
**Options for year**
* *be* - Buddhist calendar
* *ad* - Anno Domini
* *re* - Rattanakosin era
* *ah* - Anno Hejira
**Warning**: This function works properly only after 1941 \
because Thailand has change the Thai calendar in 1941.
If you are the time traveler or the historian, \
you should care about the correct calendar.
"""
output_year = None
if src == "be":
# พ.ศ. - 543 = ค.ศ.
if target == "ad":
output_year = str(int(year) - 543)
# พ.ศ. - 2324 = ร.ศ.
elif target == "re":
output_year = str(int(year) - 2324)
# พ.ศ. - 1122 = ฮ.ศ.
elif target == "ah":
output_year = str(int(year) - 1122)
elif src == "ad":
# ค.ศ. + 543 = พ.ศ.
if target == "be":
output_year = str(int(year) + 543)
# ค.ศ. + 543 - 2324 = ร.ศ.
elif target == "re":
output_year = str(int(year) + 543 - 2324)
# ค.ศ. +543- 1122 = ฮ.ศ.
elif target == "ah":
output_year = str(int(year) + 543 - 1122)
elif src == "re":
# ร.ศ. + 2324 = พ.ศ.
if target == "be":
output_year = str(int(year) + 2324)
# ร.ศ. + 2324 - 543 = ค.ศ.
elif target == "ad":
output_year = str(int(year) + 2324 - 543)
# ร.ศ. + 2324 - 1122 = ฮ.ศ.
elif target == "ah":
output_year = str(int(year) + 2324 - 1122)
elif src == "ah":
# ฮ.ศ. + 1122 = พ.ศ.
if target == "be":
output_year = str(int(year) + 1122)
# ฮ.ศ. +1122 - 543= ค.ศ.
elif target == "ad":
output_year = str(int(year) + 1122 - 543)
# ฮ.ศ. +1122 - 2324 = ร.ศ.
elif target == "re":
output_year = str(int(year) + 1122 - 2324)
if output_year is None:
raise NotImplementedError(
f"This function doesn't support {src} to {target}"
)
return output_year
def _find_month(text):
for i, m in enumerate(thai_full_month_lists):
for j in m:
if j in text:
return i + 1
def thai_strptime(
text: str,
fmt: str,
year: str = "be",
add_year: int = None,
tzinfo=ZoneInfo("Asia/Bangkok")
):
"""
Thai strptime
:param str text: text
:param str fmt: string containing date and time directives
:param str year: year of the text \
(ad isAnno Domini and be is Buddhist calendar)
:param int add_year: add year convert to ad
:param object tzinfo: tzinfo (default is Asia/Bangkok)
:return: The years that be convert to datetime.datetime
:rtype: datetime.datetime
The fmt char that support:
* *%d* - Day (1 - 31)
* *%B* - Thai month (03, 3, มี.ค., or มีนาคม)
* *%Y* - Year (66, 2566, or 2023)
* *%H* - Hour (0 - 23)
* *%M* - Minute (0 - 59)
* *%S* - Second (0 - 59)
* *%f* - Microsecond
:Example:
::
from pythainlp.util import thai_strptime
thai_strptime("15 ก.ค. 2565 09:00:01","%d %B %Y %H:%M:%S")
# output:
# datetime.datetime(
# 2022,
# 7,
# 15,
# 9,
# 0,
# 1,
# tzinfo=backports.zoneinfo.ZoneInfo(key='Asia/Bangkok')
# )
"""
d = ""
m = ""
y = ""
fmt = fmt.replace("%-m", "%m")
fmt = fmt.replace("%-d", "%d")
fmt = fmt.replace("%b", "%B")
fmt = fmt.replace("%-y", "%y")
data = {}
_old = fmt
if "%d" in fmt:
fmt = fmt.replace("%d", dates_list)
if "%B" in fmt:
fmt = fmt.replace("%B", thai_full_month_lists_regex)
if "%Y" in fmt:
fmt = fmt.replace("%Y", year_all_regex)
if "%H" in fmt:
fmt = fmt.replace("%H", r"(\d\d|\d)")
if "%M" in fmt:
fmt = fmt.replace("%M", r"(\d\d|\d)")
if "%S" in fmt:
fmt = fmt.replace("%S", r"(\d\d|\d)")
if "%f" in fmt:
fmt = fmt.replace("%f", r"(\d+)")
keys = [
i.strip().strip('-').strip(':').strip('.')
for i in _old.split("%") if i != ''
]
y = re.findall(fmt, text)
data = {i: ''.join(list(j)) for i, j in zip(keys, y[0])}
H = 0
M = 0
S = 0
f = 0
d = data['d']
m = _find_month(data['B'])
y = data['Y']
if "H" in keys:
H = data['H']
if "M" in keys:
M = data['M']
if "S" in keys:
S = data['S']
if "f" in keys:
f = data['f']
if int(y) < 100 and year == "be":
if add_year is None:
y = str(2500 + int(y))
else:
y = str(int(add_year) + int(y))
elif int(y) < 100 and year == "ad":
if add_year is None:
y = str(2000 + int(y))
else:
y = str(int(add_year) + int(y))
if year == "be":
y = convert_years(y, src="be", target="ad")
return datetime(
year=int(y),
month=int(m),
day=int(d),
hour=int(H),
minute=int(M),
second=int(S),
microsecond=int(f),
tzinfo=tzinfo
)
def now_reign_year() -> int:
"""
Return the reign year of the 10th King of Chakri dynasty.
:return: reign year of the 10th King of Chakri dynasty.
:rtype: int
:Example:
::
from pythainlp.util import now_reign_year
text = "เป็นปีที่ {reign_year} ในรัชกาลปัจจุบัน"\\
.format(reign_year=now_reign_year())
print(text)
# output: เป็นปีที่ 4 ในรัชการปัจจุบัน
"""
now_ = datetime.now()
return now_.year - 2015
def reign_year_to_ad(reign_year: int, reign: int) -> int:
"""
Convert reigh year to AD.
Return AD year according to the reign year for
the 7th to 10th King of Chakri dynasty, Thailand.
For instance, the AD year of the 4th reign year of the 10th King is 2019.
:param int reign_year: reign year of the King
:param int reign: the reign of the King (i.e. 7, 8, 9, and 10)
:return: the year in AD of the King given the reign and reign year.
:rtype: int
:Example:
::
from pythainlp.util import reign_year_to_ad
print("The 4th reign year of the King Rama X is in", \\
reign_year_to_ad(4, 10))
# output: The 4th reign year of the King Rama X is in 2019
print("The 1st reign year of the King Rama IX is in", \\
reign_year_to_ad(1, 9))
# output: The 4th reign year of the King Rama X is in 1946
"""
if int(reign) == 10:
ad = int(reign_year) + 2015
elif int(reign) == 9:
ad = int(reign_year) + 1945
elif int(reign) == 8:
ad = int(reign_year) + 1928
elif int(reign) == 7:
ad = int(reign_year) + 1924
return ad
def thaiword_to_date(
text: str, date: datetime = None
) -> Union[datetime, None]:
"""
Convert Thai relative date to :class:`datetime.datetime`.
:param str text: Thai text contains relative date
:param datetime.datetime date: date (default is datetime.datetime.now())
:return: datetime object, if it can be calculated. Otherwise, None.
:rtype: datetime.datetime
:Example:
thaiword_to_date("พรุ่งนี้")
# output:
# datetime of tomorrow
"""
if text not in _DAY:
return None
day_num = _DAY.get(text)
if not date:
date = datetime.now()
return date + timedelta(days=day_num)
| 10,729 | 25.234719 | 77 | py |
pythainlp-dev/pythainlp/util/digitconv.py | pythainlp-dev/pythainlp/util/digitconv.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert digits
"""
_arabic_thai = {
"0": "๐",
"1": "๑",
"2": "๒",
"3": "๓",
"4": "๔",
"5": "๕",
"6": "๖",
"7": "๗",
"8": "๘",
"9": "๙",
}
_thai_arabic = {
"๐": "0",
"๑": "1",
"๒": "2",
"๓": "3",
"๔": "4",
"๕": "5",
"๖": "6",
"๗": "7",
"๘": "8",
"๙": "9",
}
_digit_spell = {
"0": "ศูนย์",
"1": "หนึ่ง",
"2": "สอง",
"3": "สาม",
"4": "สี่",
"5": "ห้า",
"6": "หก",
"7": "เจ็ด",
"8": "แปด",
"9": "เก้า",
}
_spell_digit = {
"ศูนย์": "0",
"หนึ่ง": "1",
"สอง": "2",
"สาม": "3",
"สี่": "4",
"ห้า": "5",
"หก": "6",
"เจ็ด": "7",
"แปด": "8",
"เก้า": "9",
}
_arabic_thai_translate_table = str.maketrans(_arabic_thai)
_thai_arabic_translate_table = str.maketrans(_thai_arabic)
_digit_spell_translate_table = str.maketrans(_digit_spell)
def thai_digit_to_arabic_digit(text: str) -> str:
"""
This function convert Thai digits (i.e. ๑, ๓, ๑๐) to Arabic digits
(i.e. 1, 3, 10).
:param str text: Text with Thai digits such as '๑', '๒', '๓'
:return: Text with Thai digits being converted to Arabic digits
such as '1', '2', '3'
:rtype: str
:Example:
::
from pythainlp.util import thai_digit_to_arabic_digit
text = 'เป็นจำนวน ๑๒๓,๔๐๐.๒๕ บาท'
thai_digit_to_arabic_digit(text)
# output: เป็นจำนวน 123,400.25 บาท
"""
if not text or not isinstance(text, str):
return ""
return text.translate(_thai_arabic_translate_table)
def arabic_digit_to_thai_digit(text: str) -> str:
"""
This function convert Arabic digits (i.e. 1, 3, 10) to Thai digits
(i.e. ๑, ๓, ๑๐).
:param str text: Text with Arabic digits such as '1', '2', '3'
:return: Text with Arabic digits being converted to Thai digits
such as '๑', '๒', '๓'
:rtype: str
:Example:
::
from pythainlp.util import arabic_digit_to_thai_digit
text = 'เป็นจำนวน 123,400.25 บาท'
arabic_digit_to_thai_digit(text)
# output: เป็นจำนวน ๑๒๓,๔๐๐.๒๕ บาท
"""
if not text or not isinstance(text, str):
return ""
# Convert Arabic to Thai numerals
return text.translate(_arabic_thai_translate_table)
def digit_to_text(text: str) -> str:
"""
:param str text: Text with digits such as '1', '2', '๓', '๔'
:return: Text with digits being spelled out in Thai
"""
if not text or not isinstance(text, str):
return ""
# Convert Thai numerals to Arabic
text = text.translate(_thai_arabic_translate_table)
# Spell out Arabic numerals in Thai text
text = text.translate(_digit_spell_translate_table)
return text
def text_to_arabic_digit(text: str) -> str:
"""
This function convert Thai spelled out digits to Arabic digits.
:param text: A digit spelled out in Thai
:return: An Arabic digit such as '1', '2', '3' if the text is
Thai digit spelled out (ศูนย์, หนึ่ง, สอง, ..., เก้า).
Otherwise, it returns an empty string.
:rtype: str
:Example:
::
from pythainlp.util import text_to_arabic_digit
text_to_arabic_digit("ศูนย์")
# output: 0
text_to_arabic_digit("หนึ่ง")
# output: 1
text_to_arabic_digit("แปด")
# output: 8
text_to_arabic_digit("เก้า")
# output: 9
# For text that is not Thai digit spelled out
text_to_arabic_digit("สิบ") == ""
# output: True
text_to_arabic_digit("เก้าร้อย") == ""
# output: True
"""
if not text or text not in _spell_digit:
return ""
return _spell_digit[text]
def text_to_thai_digit(text: str) -> str:
"""
This function convert Thai spelled out digits to Thai digits.
:param text: A digit spelled out in Thai
:return: A Thai digit such as '๑', '๒', '๓' if the text is Thai digit
spelled out (ศูนย์, หนึ่ง, สอง, ..., เก้า).
Otherwise, it returns an empty string.
:rtype: str
:Example:
::
from pythainlp.util import text_to_thai_digit
text_to_thai_digit("ศูนย์")
# output: ๐
text_to_thai_digit("หนึ่ง")
# output: ๑
text_to_thai_digit("แปด")
# output: ๘
text_to_thai_digit("เก้า")
# output: ๙
# For text that is not Thai digit spelled out
text_to_thai_digit("สิบ") == ""
# output: True
text_to_thai_digit("เก้าร้อย") == ""
# output: True
"""
return arabic_digit_to_thai_digit(text_to_arabic_digit(text))
| 5,249 | 23.881517 | 74 | py |
pythainlp-dev/pythainlp/util/emojiconv.py | pythainlp-dev/pythainlp/util/emojiconv.py | # -*- coding_utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert emojis
"""
import re
_emoji_th = {
"😀": "หน้ายิ้มยิงฟัน",
"😁": "ยิ้มยิงฟันตายิ้ม",
"😂": "ร้องไห้ดีใจ",
"😃": "หน้ายิ้มอ้าปาก",
"😄": "หน้ายิ้มตายิ้ม_อ้าปาก",
"😅": "ยิ้มเหงื่อตก",
"😆": "ยิ้มตาหยี",
"😇": "ยิ้มเทวดา",
"😉": "ขยิบตา",
"😊": "หน้ายิ้มตายิ้ม",
"🙂": "หน้ายิ้มบางๆ",
"🙃": "หน้ากลับหัว",
"🤣": "ขำกลิ้ง",
"☺": "หน้ายิ้ม",
"😍": "ตาหัวใจ",
"😗": "หน้าจุ๊บ",
"😘": "หน้าส่งจุ๊บ",
"😙": "หน้ายิ้มส่งจุ๊บ",
"😚": "หยีตาส่งจุ๊บ",
"🤩": "หน้าตาเป็นประกาย",
"🥰": "หน้ายิ้มพร้อมกับหัวใจหลายดวง",
"🥲": "ใบหน้ายิ้มทั้งน้ำตา",
"😋": "แลบลิ้นมุมปาก",
"😛": "แลบลิ้น",
"😜": "แลบลิ้นหน้าทะเล้น",
"😝": "แลบลิ้นตาหยี",
"🤑": "หน้าเห็นแก่เงิน",
"🤪": "หน้าเพี้ยน",
"🤔": "หน้าครุ่นคิด",
"🤗": "ยิ้มกอด",
"🤫": "หน้าบอกให้เงียบ",
"🤭": "เอามือปิดปาก",
"😏": "แสยะยิ้ม",
"😐": "หน้าเฉยๆ",
"😑": "หน้าเย็นชา",
"😒": "หน้าหน่าย",
"😬": "เบะปาก",
"😶": "หน้าไม่มีปาก",
"🙄": "กลอกตา",
"🤐": "รูดซิปปาก",
"🤥": "ขี้โกหก",
"🤨": "หน้าประหลาดใจกับยักคิ้ว",
"😌": "โล่งใจ",
"😔": "คิดไม่ตก",
"😪": "ง่วง",
"😴": "หลับ",
"🤤": "น้ำลายไหล",
"😵": "หน้ามึน",
"😷": "ผ้าคาดปาก",
"🤒": "อมปรอท",
"🤕": "หัวแตก",
"🤢": "หน้าเขียว",
"🤧": "จาม",
"🤮": "หน้าอาเจียน",
"🤯": "ช็อค",
"🥴": "หน้างงงวย",
"🥵": "ร้อนมาก",
"🥶": "หนาวสั่น",
"🤠": "คาวบอยสวมหมวก",
"🥳": "ไปปาร์ตี้",
"🥸": "ปลอมตัว",
"😎": "หน้ายิ้มใส่แว่น",
"🤓": "เด็กเนิร์ด",
"🧐": "หน้ากับแว่นเลนส์เดียว",
"☹": "หน้าบึ้ง",
"😓": "เหงื่อตก",
"😕": "หน้าสับสน",
"😖": "หน้ารำคาญ",
"😞": "หน้าผิดหวัง",
"😟": "หน้ากังวล",
"😢": "ร้องไห้",
"😣": "อดทน",
"😥": "โล่งอก",
"😦": "หน้าบึ้งอ้าปาก",
"😧": "หน้าเจ็บปวด",
"😨": "หวาดกลัว",
"😩": "หน้าอิดโรย",
"😫": "เหนื่อย",
"😭": "ร้องไห้โฮ",
"😮": "อ้าปาก",
"😯": "หน้าจุ๊ๆ",
"😰": "กังวลเหงื่อตก",
"😱": "กลัวกรีดร้อง",
"😲": "หน้าประหลาดใจ",
"😳": "อายหน้าแดง",
"🙁": "หน้าบึ้งเล็กน้อย",
"🥱": "หน้ากำลังหาว",
"🥺": "หน้าอ้อนวอน",
"☠": "กะโหลกไขว้",
"👿": "หน้าบึ้งมีเขา",
"💀": "หัวกระโหลก",
"😈": "ยิ้มมีเขา",
"😠": "หน้าโกรธ",
"😡": "โกรธมาก",
"😤": "หน้าข่มอารมณ์",
"🤬": "หน้ากำลังด่า",
"👹": "ยักษ์ญี่ปุ่น",
"👺": "ปีศาจญี่ปุ่น",
"👻": "ผี",
"👽": "เอเลี่ยน",
"👾": "สัตว์ประหลาดเอเลี่ยน",
"💩": "อุนจิ",
"🤖": "หุ่นยนต์",
"🤡": "หน้าตลก",
"😸": "แมวยิ้มอ้าปาก_ยิ้มออกตา",
"😹": "แมวร้องไห้ดีใจ",
"😺": "แมวยิ้มอ้าปาก",
"😻": "แมวยิ้มมีตารูปหัวใจ",
"😼": "แมวยิ้มเจ้าเล่ห์",
"😽": "แมวส่งจุ๊บ",
"😾": "แมวโกรธ",
"😿": "แมวร้องไห้",
"🙀": "แมวตกใจ",
"🙈": "ลิงปิดตา",
"🙉": "ลิงปิดหู",
"🙊": "ลิงปิดปาก",
"❣": "เครื่องหมายอัศเจรีย์รูปหัวใจ",
"❤": "หัวใจสีแดง",
"💋": "รอยจูบ",
"💌": "จดหมายรัก",
"💓": "หัวใจเต้น",
"💔": "อกหัก",
"💕": "ใจ_2_ดวง",
"💖": "หัวใจวิบวับ",
"💗": "ใจพองโต",
"💘": "ศรปักใจ",
"💙": "หัวใจสีน้ำเงิน",
"💚": "หัวใจสีเขียว",
"💛": "หัวใจสีเหลือง",
"💜": "หัวใจสีม่วง",
"💝": "หัวใจผูกริบบิ้น",
"💞": "หัวใจโคจร",
"💟": "หัวใจประดับ",
"💢": "สัญลักษณ์ความโกรธ",
"💣": "ระเบิด",
"💤": "หลับปุ๋ย",
"💥": "การปะทะ",
"💦": "เหงื่อหยด",
"💨": "วิ่งฉิว",
"💫": "มึนหัว",
"💬": "พูดไม่ออก",
"💭": "ลูกโป่งความคิด",
"💯": "คะแนนเต็ม",
"🕳": "หลุม",
"🖤": "ใจดำ",
"🗨": "ฟองคำพูด",
"🗯": "ฟองคำพูดรุนแรง",
"🤍": "หัวใจสีขาว",
"🤎": "หัวใจสีน้ำตาล",
"🧡": "หัวใจสีส้ม",
"✋": "ตั้งฝ่ามือ",
"👋": "โบกมือ",
"🖐": "ชูมือกางนิ้ว",
"🖖": "ชูนิ้วแบบวัลแคน",
"🤚": "ยกมือ",
"✌": "ชู_2_นิ้ว",
"👌": "ทำมือโอเค",
"🤌": "หุบนิ้ว",
"🤏": "ทำมือบีบนิ้วเข้าหากัน",
"🤘": "ชูนิ้วชาวร็อก",
"🤙": "มือโทร",
"🤞": "นิ้วไขว้",
"🤟": "ทำมือ_‘ฉันรักเธอ’",
"☝": "นิ้วชี้ขึ้น",
"👆": "หลังมือนิ้วชี้ขึ้น",
"👇": "นิ้วชี้ลง",
"👈": "นิ้วชี้ทางซ้าย",
"👉": "นิ้วชี้ทางขวา",
"🖕": "ชูนิ้วกลาง",
"✊": "กำมือ",
"👊": "กำปั้น",
"👍": "ชูนิ้วโป้งขึ้น",
"👎": "คว่ำนิ้วโป้งลง",
"🤛": "กำปั้นขวา",
"🤜": "กำปั้นซ้าย",
"👏": "ตบมือ",
"👐": "แบมือ",
"🙌": "ชู_2_มือ",
"🙏": "พนมมือ",
"🤝": "จับมือ",
"🤲": "แบสองมือ",
"✍": "เขียนหนังสือ",
"💅": "สีทาเล็บ",
"🤳": "เซลฟี่",
"👀": "ตา_2_ข้าง",
"👁": "ตาข้างเดียว",
"👂": "หู",
"👃": "จมูก",
"👄": "ปาก",
"👅": "ลิ้น",
"💪": "เบ่งกล้าม",
"🦴": "กระดูก",
"🦵": "ขา",
"🦶": "เท้า",
"🦷": "ฟัน",
"🦻": "หูใส่อุปกรณ์ช่วยฟัง",
"🦾": "แขนกล",
"🦿": "ขากล",
"🧠": "สมอง",
"🫀": "หัวใจ",
"🫁": "ปอด",
"👦": "เด็กชาย",
"👧": "เด็กหญิง",
"👨": "ผู้ชาย",
"👩": "ผู้หญิง",
"👱": "คนผมทอง",
"👴": "ชายแก่",
"👵": "หญิงแก่",
"👶": "ทารก",
"🧑": "คน",
"🧒": "เด็ก",
"🧓": "คนชรา",
"🧔": "ผู้ชายมีเครา",
"💁": "โต๊ะสอบถาม",
"🙅": "มือทำท่าไม่โอเค",
"🙆": "ทำท่าโอเค",
"🙇": "ท่าขอโทษ",
"🙋": "ยกมือขึ้น",
"🙍": "ขมวดคิ้ว",
"🙎": "ปากยื่น",
"🤦": "หมดกัน",
"🤷": "ยักไหล่",
"🧏": "คนหูหนวก",
"👮": "เจ้าหน้าที่ตำรวจ",
"👰": "เจ้าสาว",
"👲": "ชายจีน",
"👳": "ชายโพกหัว",
"👷": "คนงานก่อสร้าง",
"👸": "เจ้าหญิง",
"💂": "การ์ดคุ้มกัน",
"🕵": "นักสืบ",
"🤰": "คนท้อง",
"🤱": "ให้นม",
"🤴": "เจ้าชาย",
"🤵": "คนหล่อ",
"🥷": "นินจา",
"🧕": "ผู้หญิงโพกศีรษะ",
"🎅": "ซานต้า",
"👼": "นางฟ้าเด็ก",
"🤶": "นางซานต้า",
"🦸": "ซุปเปอร์ฮีโร่",
"🦹": "ยอดมนุษย์",
"🧙": "นักเวทย์",
"🧚": "นางฟ้า",
"🧛": "แวมไพร์",
"🧜": "ครึ่งคนครึ่งปลา",
"🧝": "เอลฟ์",
"🧞": "ยักษ์จีนี่",
"🧟": "ซอมบี้",
"🏃": "คนวิ่ง",
"👯": "คนในชุดหูกระต่าย",
"💃": "นักเต้น",
"💆": "นวดหน้า",
"💇": "ตัดผม",
"🕴": "คนใส่สูทลอยได้",
"🕺": "คนเต้น",
"🚶": "คนเดิน",
"🧍": "คนกำลังยืน",
"🧎": "คนกำลังคุกเข่า",
"🧖": "คนในห้องอบไอน้ำ",
"🧗": "นักไต่เขา",
"⛷": "นักสกี",
"⛹": "คนเล่นบอล",
"🏂": "นักสโนว์บอร์ด",
"🏄": "นักโต้คลื่น",
"🏇": "แข่งม้า",
"🏊": "นักว่ายน้ำ",
"🏋": "นักยกน้ำหนัก",
"🏌": "นักกอล์ฟ",
"🚣": "นักพายเรือ",
"🚴": "นักปั่นจักรยาน",
"🚵": "นักปั่นจักรยานเสือภูเขา",
"🤸": "คนตีลังกา",
"🤹": "คนเล่นจั๊กกลิ้ง",
"🤺": "นักฟันดาบ",
"🤼": "นักมวยปล้ำ",
"🤽": "นักโปโลน้ำ",
"🤾": "นักแฮนด์บอล",
"🛀": "คนนอนแช่น้ำในอ่าง",
"🛌": "คนนอนหลับ",
"🧘": "คนนั่งสมาธิ",
"👪": "ครอบครัว",
"👫": "ชาย-หญิงจับมือ",
"👬": "ชาย-ชายจับมือ",
"👭": "หญิง-หญิงจับมือ",
"💏": "จูบ",
"💑": "คู่รัก",
"👣": "รอยเท้า",
"👤": "เงาครึ่งตัวคนเดียว",
"👥": "เงาครึ่งตัว_2_คน",
"🗣": "เงาคนกำลังพูด",
"🫂": "คนกอดกัน",
"🏻": "โทนผิวสีขาว",
"🏼": "โทนผิวสีขาวเหลือง",
"🏽": "โทนผิวสีเหลือง",
"🏾": "โทนผิวสีแทน",
"🏿": "โทนผิวสีเข้ม",
"🦰": "ผมแดง",
"🦱": "ผมหยิก",
"🦲": "หัวล้าน",
"🦳": "ผมขาว",
"🐀": "หนูตัวใหญ่",
"🐁": "หนูตัวเล็ก",
"🐂": "วัวตัวผู้",
"🐃": "ควาย",
"🐄": "วัว",
"🐅": "เสือ",
"🐆": "เสือดาว",
"🐇": "กระต่าย",
"🐈": "แมว",
"🐎": "ม้า",
"🐏": "แกะตัวผู้",
"🐐": "แพะ",
"🐑": "แกะ",
"🐒": "ลิง",
"🐕": "สุนัข",
"🐖": "หมู",
"🐗": "หมูป่าตัวผู้",
"🐘": "ช้าง",
"🐨": "โคอาล่า",
"🐩": "พุดเดิ้ล",
"🐪": "อูฐโหนกเดียว",
"🐫": "อูฐสองโหนก",
"🐭": "หน้าหนู",
"🐮": "หน้าวัว",
"🐯": "หน้าเสือ",
"🐰": "หน้ากระต่าย",
"🐱": "หน้าแมว",
"🐴": "หน้าม้า",
"🐵": "หน้าลิง",
"🐶": "หน้าสุนัข",
"🐷": "หน้าหมู",
"🐹": "หนูแฮมสเตอร์",
"🐺": "หมาป่า",
"🐻": "หมี",
"🐼": "แพนด้า",
"🐽": "จมูกหมู",
"🐾": "รอยเท้าสัตว์",
"🐿": "ชิปมังก์",
"🦁": "สิงโต",
"🦄": "ยูนิคอร์น",
"🦇": "ค้างคาว",
"🦊": "จิ้งจอก",
"🦌": "กวาง",
"🦍": "กอริลล่า",
"🦏": "แรด",
"🦒": "ยีราฟ",
"🦓": "ม้าลาย",
"🦔": "เฮดจ์ฮ็อก",
"🦘": "จิงโจ้",
"🦙": "ลามะ",
"🦛": "ฮิปโปโปเตมัส",
"🦝": "แรคคูน",
"🦡": "แบดเจอร์",
"🦣": "ช้างแมมมอธ",
"🦥": "สลอธ",
"🦦": "ตัวนาก",
"🦧": "อุรังอุตัง",
"🦨": "สกังก์",
"🦫": "บีเวอร์",
"🦬": "ควายไบซัน",
"🦮": "สุนัขนำทาง",
"🐓": "ไก่ตัวผู้",
"🐔": "ไก่",
"🐣": "ลูกเจี๊ยบออกจากไข่",
"🐤": "ลูกเจี๊ยบ",
"🐥": "ลูกเจี๊ยบยืนหันหน้า",
"🐦": "นก",
"🐧": "เพนกวิน",
"🕊": "นกพิราบขาว",
"🦃": "ไก่งวง",
"🦅": "อินทรี",
"🦆": "เป็ด",
"🦉": "นกฮูก",
"🦚": "นกยูง",
"🦜": "นกแก้ว",
"🦢": "หงส์",
"🦤": "นกโดโด",
"🦩": "นกฟลามิงโก",
"🪶": "ขนนก",
"🐸": "กบ",
"🐉": "มังกร",
"🐊": "จระเข้",
"🐍": "งู",
"🐢": "เต่า",
"🐲": "หน้ามังกร",
"🦎": "จิ้งจก",
"🦕": "ไดโนเสาร์",
"🦖": "ทีเร็กซ์",
"🐋": "ปลาวาฬ",
"🐙": "ปลาหมึกยักษ์",
"🐚": "หอย",
"🐟": "ปลา",
"🐠": "ปลาเขตร้อน",
"🐡": "ปลาปักเป้า",
"🐬": "ปลาโลมา",
"🐳": "ปลาวาฬพ่นน้ำ",
"🦈": "ฉลาม",
"🦭": "แมวน้ำ",
"🐌": "หอยทาก",
"🐛": "แมลง",
"🐜": "มด",
"🐝": "ผึ้ง",
"🐞": "เต่าทอง",
"🕷": "แมงมุม",
"🕸": "ใยแมงมุม",
"🦂": "แมงป่อง",
"🦋": "ผีเสื้อ",
"🦗": "จิ้งหรีด",
"🦟": "ยุง",
"🦠": "จุลินทรีย์",
"🪰": "แมลงวัน",
"🪱": "หนอน",
"🪲": "ด้วง",
"🪳": "แมลงสาบ",
"🌷": "ทิวลิป",
"🌸": "ดอกซากุระ",
"🌹": "ดอกกุหลาบ",
"🌺": "ดอกชบา",
"🌻": "ดอกทานตะวัน",
"🌼": "ดอกไม้บาน",
"🏵": "ลายดอกกุหลาบ",
"💐": "ช่อดอกไม้",
"💮": "ตราดอกไม้",
"🥀": "ดอกไม้เหี่ยว",
"☘": "ใบโคลเวอร์",
"🌱": "ต้นอ่อน",
"🌲": "ต้นสน",
"🌳": "ต้นไม้ร่มรื่น",
"🌴": "ต้นมะพร้าว",
"🌵": "ตะบองเพชร",
"🌾": "รวงข้าว",
"🌿": "สมุนไพร",
"🍀": "ใบโคลเวอร์_4_แฉก",
"🍁": "ใบเมเปิ้ล",
"🍂": "ใบไม้ร่วง",
"🍃": "ใบไม้ปลิว",
"🪴": "ไม้กระถาง",
"🍅": "มะเขือเทศ",
"🍇": "องุ่น",
"🍈": "เมลอน",
"🍉": "แตงโม",
"🍊": "ส้ม",
"🍋": "เลมอน",
"🍌": "กล้วย",
"🍍": "สับปะรด",
"🍎": "แอปเปิ้ลแดง",
"🍏": "แอปเปิ้ลเขียว",
"🍐": "ลูกแพร์",
"🍑": "ลูกพีช",
"🍒": "เชอร์รี่",
"🍓": "สตรอว์เบอร์รี่",
"🥝": "กีวี",
"🥥": "มะพร้าว",
"🥭": "มะม่วง",
"🫐": "บลูเบอร์รี่",
"🫒": "มะกอก",
"🌰": "เกาลัด",
"🌶": "พริก",
"🌽": "ข้าวโพด",
"🍄": "เห็ด",
"🍆": "มะเขือยาว",
"🥑": "อาโวคาโด",
"🥒": "แตงกวา",
"🥔": "มันฝรั่ง",
"🥕": "แครอท",
"🥜": "ถั่ว",
"🥦": "บรอกโคลี",
"🥬": "ผักใบเขียว",
"🧄": "กระเทียม",
"🧅": "หอมหัวใหญ่",
"🫑": "พริกหยวก",
"🌭": "ฮอทด็อก",
"🌮": "ทาโก้",
"🌯": "เบอร์ริโต",
"🍔": "แฮมเบอร์เกอร์",
"🍕": "พิซซ่า_1_ชิ้น",
"🍖": "เนื้อ",
"🍗": "น่องไก่",
"🍞": "ขนมปัง",
"🍟": "เฟรนช์ฟราย",
"🍲": "สตูว์",
"🍳": "ทำอาหาร",
"🍿": "ป๊อปคอร์น",
"🥐": "ครัวซอง",
"🥓": "เบคอน",
"🥖": "ขนมปังฝรั่งเศส",
"🥗": "สลัด",
"🥘": "กระทะ",
"🥙": "เคบับ",
"🥚": "ไข่",
"🥞": "แพนเค้ก",
"🥣": "ชามพร้อมช้อน",
"🥨": "เพรตเซล",
"🥩": "เนื้อหั่นชิ้น",
"🥪": "แซนด์วิช",
"🥫": "อาหารกระป๋อง",
"🥯": "เบเกิล",
"🧀": "เนยแข็ง",
"🧂": "เกลือ",
"🧆": "ฟาลาเฟล",
"🧇": "วาฟเฟิล",
"🧈": "เนย",
"🫓": "แฟลตเบรด",
"🫔": "ทามาเล่",
"🫕": "ฟองดูว์",
"🍘": "ข้าวอบกรอบ",
"🍙": "ข้าวปั้น",
"🍚": "ข้าวสวย",
"🍛": "ข้าวแกงกะหรี่",
"🍜": "ราเมน",
"🍝": "สปาเก็ตตี้",
"🍠": "มันเผา",
"🍡": "ดังโงะ",
"🍢": "โอเด้ง",
"🍣": "ซูชิ",
"🍤": "กุ้งทอด",
"🍥": "ลูกชิ้นปลา",
"🍱": "กล่องเบนโตะ",
"🥟": "เกี๊ยว",
"🥠": "คุกกี้เสี่ยงทาย",
"🥡": "อาหารกล่องซื้อกลับบ้าน",
"🥮": "ขนมไหว้พระจันทร์",
"🦀": "ปู",
"🦐": "กุ้ง",
"🦑": "หมึก",
"🦞": "กุ้งมังกร",
"🦪": "หอยนางรม",
"🍦": "ซอฟต์ครีม",
"🍧": "น้ำแข็งไส",
"🍨": "ไอศกรีม",
"🍩": "โดนัท",
"🍪": "คุกกี้",
"🍫": "ช็อกโกแลต",
"🍬": "ลูกอม",
"🍭": "อมยิ้ม",
"🍮": "คัสตาร์ด",
"🍯": "โถน้ำผึ้ง",
"🍰": "เค้ก",
"🎂": "เค้กวันเกิด",
"🥧": "พาย",
"🧁": "คัพเค้ก",
"☕": "เครื่องดื่มร้อน",
"🍵": "ถ้วยชา",
"🍶": "สาเก",
"🍷": "ไวน์",
"🍸": "ค็อกเทล",
"🍹": "เครื่องดื่มผสมน้ำผลไม้",
"🍺": "เบียร์",
"🍻": "เหยือกเบียร์ชนกัน",
"🍼": "ขวดนม",
"🍾": "แชมเปญ",
"🥂": "ชนแก้ว",
"🥃": "แก้วเหล้า",
"🥛": "แก้วนม",
"🥤": "แก้วพร้อมหลอด",
"🧃": "เครื่องดื่มแบบกล่อง",
"🧉": "ชามาเต",
"🧊": "ก้อนน้ำแข็ง",
"🧋": "ชาไข่มุก",
"🫖": "กาน้ำชา",
"🍴": "ส้อม_มีด",
"🍽": "จานพร้อมส้อม_มีด",
"🏺": "โถโบราณ",
"🔪": "มีดทำครัว",
"🥄": "ช้อน",
"🥢": "ตะเกียบ",
"🌍": "ลูกโลกแสดงทวีปยุโรป_แอฟริกา",
"🌎": "ลูกโลกแสดงทวีปอเมริกา",
"🌏": "ลูกโลกแสดงทวีปเอเชีย_ออสเตรเลีย",
"🌐": "ลูกโลกแสดงเส้นเมริเดียน",
"🗺": "แผนที่โลก",
"🗾": "แผนที่ญี่ปุ่น",
"🧭": "เข็มทิศ",
"⛰": "ภูเขา",
"🌋": "ภูเขาไฟ",
"🏔": "ภูเขามีหิมะ",
"🏕": "ตั้งแคมป์",
"🏖": "ร่มชายหาด",
"🏜": "ทะเลทราย",
"🏝": "เกาะ",
"🏞": "อุทยาน",
"🗻": "ภูเขาไฟฟูจิ",
"🏗": "ก่อสร้างอาคาร",
"🏘": "บ้านหลายหลัง",
"🏚": "บ้านร้าง",
"🏛": "อาคารสไตล์คลาสสิก",
"🏟": "สนามกีฬา",
"🏠": "บ้านเดี่ยว",
"🏡": "บ้านพร้อมสวน",
"🏢": "ตึกสำนักงาน",
"🏣": "ไปรษณีย์ญี่ปุ่น",
"🏤": "ไปรษณีย์ยุโรป",
"🏥": "โรงพยาบาล",
"🏦": "ธนาคาร",
"🏨": "โรงแรม",
"🏩": "ม่านรูด",
"🏪": "ร้านสะดวกซื้อ",
"🏫": "โรงเรียน",
"🏬": "ห้างสรรพสินค้า",
"🏭": "โรงงาน",
"🏯": "ปราสาทญี่ปุ่น",
"🏰": "ปราสาทยุโรป",
"💒": "งานแต่งงาน",
"🗼": "โตเกียวทาวเวอร์",
"🗽": "เทพีเสรีภาพ",
"🛖": "กระท่อม",
"🧱": "ก้อนอิฐ",
"🪨": "หิน",
"🪵": "ไม้",
"⛩": "ศาลเจ้าชินโต",
"⛪": "โบสถ์",
"🕋": "วิหารกะอ์บะฮ์",
"🕌": "มัสยิด",
"🕍": "โบสถ์ยิว",
"🛕": "วัดฮินดู",
"♨": "น้ำพุร้อน",
"⛲": "น้ำพุ",
"⛺": "เต็นท์",
"🌁": "หมอกลง",
"🌃": "ดาว",
"🌄": "ดวงอาทิตย์โผล่พ้นเขา",
"🌅": "ดวงอาทิตย์ขึ้น",
"🌆": "เมืองยามโพล้เพล้",
"🌇": "ดวงอาทิตย์ตก",
"🌉": "สะพานกลางคืน",
"🎠": "ม้าหมุน",
"🎡": "ชิงช้าสวรรค์",
"🎢": "รถไฟเหาะ",
"🎪": "ละครสัตว์",
"🏙": "หมู่ตึกสูง",
"💈": "ร้านตัดผม",
"⛽": "ปั๊มน้ำมัน",
"🏍": "มอเตอร์ไซค์",
"🏎": "รถแข่ง",
"🚂": "หัวรถจักรไอน้ำ",
"🚃": "ตู้รถไฟ",
"🚄": "ชินคันเซ็น",
"🚅": "รถไฟความเร็วสูง",
"🚆": "รถไฟ",
"🚇": "รถไฟใต้ดิน",
"🚈": "รถไฟรางเบา",
"🚉": "สถานีรถไฟ",
"🚊": "รถรางบนราง",
"🚋": "ตู้รถราง",
"🚌": "รถบัส",
"🚍": "รถบัสกำลังมา",
"🚎": "รถราง",
"🚏": "ป้ายรถบัส",
"🚐": "มินิบัส",
"🚑": "รถพยาบาล",
"🚒": "รถดับเพลิง",
"🚓": "รถตำรวจ",
"🚔": "รถตำรวจกำลังมา",
"🚕": "แท็กซี่",
"🚖": "แท็กซี่กำลังมา",
"🚗": "รถ",
"🚘": "รถกำลังมา",
"🚙": "รถบ้าน",
"🚚": "รถขนส่ง",
"🚛": "รถบรรทุก",
"🚜": "แทร็กเตอร์",
"🚝": "รถไฟรางเดี่ยว",
"🚞": "รางรถไฟภูเขา",
"🚥": "ไฟจราจรแนวนอน",
"🚦": "ไฟจราจรแนวตั้ง",
"🚧": "ป้ายไซต์ก่อสร้าง",
"🚨": "สัญญาณไฟตำรวจ",
"🚲": "จักรยาน",
"🛑": "เครื่องหมายหยุด",
"🛢": "ถังน้ำมัน",
"🛣": "ทางด่วน",
"🛤": "รางรถไฟ",
"🛴": "สกู๊ตเตอร์",
"🛵": "รถสกู๊ตเตอร์",
"🛹": "สเก็ตบอร์ด",
"🛺": "รถสามล้อ",
"🛻": "รถกระบะ",
"🛼": "รองเท้าสเก็ต",
"🦼": "วีลแชร์ไฟฟ้า",
"🦽": "วีลแชร์ธรรมดา",
"⚓": "สมอเรือ",
"⛴": "เรือเฟอร์รี",
"⛵": "เรือใบ",
"🚢": "เรือ",
"🚤": "เรือด่วน",
"🛥": "เรือยนต์",
"🛳": "เรือโดยสาร",
"🛶": "แคนู",
"✈": "เครื่องบิน",
"💺": "ที่นั่ง",
"🚀": "จรวด",
"🚁": "เฮลิคอปเตอร์",
"🚟": "รถไฟสะพานแขวน",
"🚠": "เคเบิลคาร์",
"🚡": "รถกระเช้าลอยฟ้า",
"🛩": "เครื่องบินเล็ก",
"🛫": "เครื่องบินขึ้น",
"🛬": "เครื่องบินลง",
"🛰": "ดาวเทียม",
"🛸": "จานบิน",
"🪂": "ร่มชูชีพ",
"🛎": "กระดิ่งโรงแรม",
"🧳": "กระเป๋าเดินทาง",
"⌚": "นาฬิกาข้อมือ",
"⌛": "นาฬิกาทราย",
"⏰": "นาฬิกาปลุก",
"⏱": "นาฬิกาจับเวลา",
"⏲": "นาฬิกานับถอยหลัง",
"⏳": "นาฬิกาทรายจับเวลา",
"🕐": "หนึ่งนาฬิกา",
"🕑": "สองนาฬิกา",
"🕒": "สามนาฬิกา",
"🕓": "สี่นาฬิกา",
"🕔": "ห้านาฬิกา",
"🕕": "หกนาฬิกา",
"🕖": "เจ็ดนาฬิกา",
"🕗": "แปดนาฬิกา",
"🕘": "เก้านาฬิกา",
"🕙": "สิบนาฬิกา",
"🕚": "สิบเอ็ดนาฬิกา",
"🕛": "สิบสองนาฬิกา",
"🕜": "หนึ่งนาฬิกาครึ่ง",
"🕝": "สองนาฬิกาครึ่ง",
"🕞": "สามนาฬิกาครึ่ง",
"🕟": "สี่นาฬิกาครึ่ง",
"🕠": "ห้านาฬิกาครึ่ง",
"🕡": "หกนาฬิกาครึ่ง",
"🕢": "เจ็ดนาฬิกาครึ่ง",
"🕣": "แปดนาฬิกาครึ่ง",
"🕤": "เก้านาฬิกาครึ่ง",
"🕥": "สิบนาฬิกาครึ่ง",
"🕦": "สิบเอ็ดนาฬิกาครึ่ง",
"🕧": "สิบสองนาฬิกาครึ่ง",
"🕰": "นาฬิกาบนหิ้ง",
"☀": "พระอาทิตย์",
"☁": "เมฆ",
"☂": "ร่ม",
"☃": "สโนว์แมน_หิมะ",
"☄": "ดาวหาง",
"☔": "ร่ม_หยดน้ำฝน",
"⚡": "ไฟฟ้าแรงสูง",
"⛄": "สโนว์แมน",
"⛅": "ดวงอาทิตย์หลังเมฆ",
"⛈": "ฝนฟ้าคะนอง",
"⛱": "ร่มปักดิน",
"❄": "เกล็ดหิมะ",
"⭐": "ดาวสีขาวขนาดกลาง",
"🌀": "ไซโคลน",
"🌂": "ร่มหุบ",
"🌈": "รุ้ง",
"🌊": "คลื่น",
"🌌": "ทางช้างเผือก",
"🌑": "จันทร์ดับ",
"🌒": "พระจันทร์เสี้ยวข้างขึ้น",
"🌓": "พระจันทร์ครึ่งซีกขวา",
"🌔": "ข้างขึ้น",
"🌕": "พระจันทร์เต็มดวง",
"🌖": "ข้างแรม",
"🌗": "พระจันทร์ครึ่งซีกซ้าย",
"🌘": "พระจันทร์เสี้ยวข้างแรม",
"🌙": "พระจันทร์เสี้ยว",
"🌚": "หน้าพระจันทร์ดับ",
"🌛": "หน้าพระจันทร์เสี้ยวขวา",
"🌜": "หน้าพระจันทร์เสี้ยวซ้าย",
"🌝": "หน้าพระจันทร์เต็มดวง",
"🌞": "หน้าพระอาทิตย์",
"🌟": "ดาวส่องแสง",
"🌠": "ดาวตก",
"🌡": "เครื่องวัดอุณหภูมิ",
"🌤": "เมฆน้อยบดบังพระอาทิตย์",
"🌥": "เมฆก้อนใหญ่บังพระอาทิตย์",
"🌦": "เมฆฝนบดบังพระอาทิตย์",
"🌧": "เมฆฝน",
"🌨": "เมฆ_หิมะ",
"🌩": "เมฆ_ฟ้าแลบ",
"🌪": "พายุทอร์นาโด",
"🌫": "หมอก",
"🌬": "พ่นลม",
"💧": "หยดน้ำ",
"🔥": "ไฟ",
"🪐": "ดาวเคราะห์ที่มีวงแหวน",
"✨": "ประกายวิบวับ",
"🎀": "ริบบิ้น",
"🎁": "ของขวัญ",
"🎃": "ฟักทองฮาโลวีน",
"🎄": "ต้นคริสต์มาส",
"🎆": "พลุ",
"🎇": "ดอกไม้ไฟ",
"🎈": "ลูกโป่ง",
"🎉": "ปาร์ตี้",
"🎊": "ลูกบอลใส่เศษกระดาษงานปาร์ตี้",
"🎋": "ต้นไม้ประดับคำอวยพร",
"🎍": "ต้นสนประดับ",
"🎎": "ตุ๊กตาญี่ปุ่น",
"🎏": "ธงปลาคาร์พ",
"🎐": "โมบายล์กระดิ่ง",
"🎑": "ไหว้พระจันทร์",
"🎗": "ริบบิ้นรำลึก",
"🎟": "ตั๋วเข้าชม",
"🎫": "ตั๋ว",
"🧧": "อั่งเปา",
"🧨": "ประทัด",
"🎖": "เหรียญกล้าหาญ",
"🏅": "เหรียญรางวัล",
"🏆": "ถ้วยรางวัล",
"🥇": "เหรียญทอง",
"🥈": "เหรียญเงิน",
"🥉": "เหรียญทองแดง",
"⚽": "ลูกฟุตบอล",
"⚾": "เบสบอล",
"⛳": "ธงในหลุม",
"⛸": "สเก็ตน้ำแข็ง",
"🎣": "ตกปลา",
"🎳": "โบว์ลิ่ง",
"🎽": "เสื้อวิ่ง",
"🎾": "เทนนิส",
"🎿": "สกี",
"🏀": "บาสเกตบอล",
"🏈": "อเมริกันฟุตบอล",
"🏉": "รักบี้",
"🏏": "คริกเก็ต",
"🏐": "วอลเลย์บอล",
"🏑": "ฮอกกี้",
"🏒": "ไม้ฮอกกี้",
"🏓": "ปิงปอง",
"🏸": "แบดมินตัน",
"🛷": "เลื่อนหิมะ",
"🤿": "หน้ากากดำน้ำ",
"🥅": "โกล",
"🥊": "นวม",
"🥋": "ชุดยูโด",
"🥌": "ลูกกลิ้งหิน",
"🥍": "ลาครอส",
"🥎": "ซอฟต์บอล",
"🥏": "จานร่อน",
"♟": "หมากรุก",
"♠": "โพดำ",
"♣": "ดอกจิก",
"♥": "โพแดง",
"♦": "ข้าวหลามตัด",
"🀄": "ไพ่นกกระจอกมังกรแดง",
"🃏": "ไพ่โจ๊กเกอร์",
"🎮": "วิดีโอเกม",
"🎯": "กลางเป้า",
"🎰": "สล็อตแมชชีน",
"🎱": "บิลเลียด",
"🎲": "ลูกเต๋า",
"🎴": "ไพ่ดอกไม้",
"🔮": "ลูกแก้ววิเศษ",
"🕹": "จอยสติ๊ก",
"🧩": "จิ๊กซอว์",
"🧸": "ตุ๊กตาหมี",
"🧿": "เครื่องราง",
"🪀": "โยโย่",
"🪁": "ว่าว",
"🪄": "ไม้กายสิทธิ์",
"🪅": "ปิญญาตา",
"🪆": "ตุ๊กตาแม่ลูกดก",
"🎨": "จานสีวาดรูป",
"🎭": "หน้ากาก",
"🖼": "รูปใส่กรอบ",
"🧵": "ด้าย",
"🧶": "ไหมพรม",
"🪡": "เข็มเย็บผ้า",
"🪢": "เงื่อน",
"⛑": "หมวกนิรภัยมีกากบาทขาว",
"🎒": "เป้นักเรียน",
"🎓": "หมวกรับปริญญา",
"🎩": "หมวกสูง",
"👑": "มงกุฎ",
"👒": "หมวกผู้หญิง",
"👓": "แว่นตา",
"👔": "เนคไท",
"👕": "เสื้อยืด",
"👖": "ยีนส์",
"👗": "ชุดกระโปรง",
"👘": "กิโมโน",
"👙": "บิกินี",
"👚": "เสื้อผู้หญิง",
"👛": "กระเป๋าใส่เงิน",
"👜": "กระเป๋าถือ",
"👝": "กระเป๋าใบเล็ก",
"👞": "รองเท้าชาย",
"👟": "รองเท้ากีฬา",
"👠": "รองเท้าส้นสูง",
"👡": "รองเท้าแตะผู้หญิง",
"👢": "รองเท้าบู๊ตผู้หญิง",
"💄": "ลิปสติก",
"💍": "แหวน",
"💎": "อัญมณี",
"📿": "ลูกประคำ",
"🕶": "แว่นกันแดด",
"🛍": "ถุงช็อปปิ้ง",
"🥻": "ชุดส่าหรี",
"🥼": "เสื้อกาวน์",
"🥽": "แว่นตากันลม",
"🥾": "รองเท้าปีนเขา",
"🥿": "รองเท้าส้นเตี้ย",
"🦺": "เสื้อนิรภัย",
"🧢": "หมวกแก๊ป",
"🧣": "ผ้าพันคอ",
"🧤": "ถุงมือ",
"🧥": "เสื้อโค้ต",
"🧦": "ถุงเท้า",
"🩰": "รองเท้าบัลเล่ต์",
"🩱": "ชุดว่ายน้ำวันพีซ",
"🩲": "กางเกงชั้นในชาย",
"🩳": "กางเกงขาสั้น",
"🩴": "รองเท้าลำลอง",
"🪖": "หมวกทหาร",
"📢": "เครื่องขยายเสียง",
"📣": "โทรโข่ง",
"📯": "แตรส่งสาร",
"🔇": "ไม่ใช้เสียง",
"🔈": "ลำโพงเสียงเบา",
"🔉": "ลำโพงเสียงปานกลาง",
"🔊": "ลำโพงเสียงดัง",
"🔔": "กระดิ่ง",
"🔕": "ไม่มีกระดิ่ง",
"🎙": "ไมค์สตูดิโอ",
"🎚": "ที่ปรับระดับเสียง",
"🎛": "ปุ่มควบคุมเสียง",
"🎤": "ไมโครโฟน",
"🎧": "หูฟัง",
"🎵": "โน้ตดนตรี",
"🎶": "โน้ตดนตรีหลายตัว",
"🎼": "บรรทัดห้าเส้น",
"📻": "วิทยุ",
"🎷": "แซ็กโซโฟน",
"🎸": "กีต้าร์",
"🎹": "เปียโน",
"🎺": "ทรัมเป็ต",
"🎻": "ไวโอลิน",
"🥁": "กลอง",
"🪕": "แบนโจ",
"🪗": "แอคคอร์เดียน",
"🪘": "กลองยาว",
"☎": "โทรศัพท์",
"📞": "หูโทรศัพท์",
"📟": "เพจเจอร์",
"📠": "แฟกซ์",
"📱": "โทรศัพท์มือถือ",
"📲": "โทรศัพท์มือถือพร้อมลูกศรชี้",
"⌨": "แป้นพิมพ์",
"💻": "แล็ปท็อป",
"💽": "แผ่นดิสก์",
"💾": "ฟลอปปี้ดิสก์",
"💿": "บลูเรย์",
"📀": "ดีวีดี",
"🔋": "แบตเตอรี่",
"🔌": "ปลั๊กไฟ",
"🖥": "คอมพิวเตอร์เดสก์ท็อป",
"🖨": "เครื่องพิมพ์",
"🖱": "เมาส์",
"🖲": "แทร็กบอล",
"🧮": "ลูกคิด",
"🎞": "เฟรมภาพยนตร์",
"🎥": "กล้องถ่ายภาพยนตร์",
"🎬": "สเลท",
"🏮": "โคมไฟแดง",
"💡": "หลอดไฟ",
"📷": "กล้อง",
"📸": "กล้องเปิดแฟลช",
"📹": "กล้องวิดีโอ",
"📺": "ทีวี",
"📼": "วิดีโอเทป",
"📽": "เครื่องฉายหนัง",
"🔍": "แว่นขยายเอียงซ้าย",
"🔎": "แว่นขยายเอียงขวา",
"🔦": "ไฟฉาย",
"🕯": "เทียน",
"🪔": "ตะเกียงดิยา",
"🏷": "ป้าย",
"📃": "เอกสารม้วนปลาย",
"📄": "เอกสาร",
"📑": "แถบคั่นหน้า",
"📒": "สมุดโน้ตเจาะรูข้าง",
"📓": "สมุดโน้ต",
"📔": "สมุดโน้ตมีลาย",
"📕": "หนังสือปิด",
"📖": "หนังสือเปิด",
"📗": "หนังสือสีเขียว",
"📘": "หนังสือสีน้ำเงิน",
"📙": "หนังสือสีส้ม",
"📚": "หนังสือ",
"📜": "ม้วนกระดาษ",
"📰": "หนังสือพิมพ์",
"🔖": "ที่คั่นหนังสือ",
"🗞": "ม้วนหนังสือพิมพ์",
"💰": "ถุงเงิน",
"💳": "บัตรเครดิต",
"💴": "ธนบัตรเยน",
"💵": "ธนบัตรดอลลาร์",
"💶": "ธนบัตรยูโร",
"💷": "ธนบัตรปอนด์",
"💸": "เงินบิน",
"💹": "ตลาดขึ้น",
"🧾": "ใบเสร็จ",
"🪙": "เหรียญ",
"✉": "ซองจดหมาย",
"📤": "กล่องขาออก",
"📥": "กล่องขาเข้า",
"📦": "พัสดุ",
"📧": "อีเมล",
"📨": "จดหมายเข้า",
"📩": "จดหมายออก",
"📪": "กล่องจดหมายปิดคว่ำธง",
"📫": "กล่องจดหมายปิดยกธง",
"📬": "กล่องจดหมายมีจดหมาย",
"📭": "กล่องจดหมายว่าง",
"📮": "ตู้ไปรษณีย์",
"🗳": "หีบบัตรลงคะแนน",
"✏": "ดินสอ",
"✒": "ปลายปากกาสีดำ",
"📝": "จดบันทึก",
"🖊": "ปากกา",
"🖋": "ปากกาหมึกซึม",
"🖌": "แปรงทาสี",
"🖍": "ดินสอสี",
"✂": "กรรไกร",
"💼": "กระเป๋าเอกสาร",
"📁": "แฟ้มเอกสาร",
"📂": "เปิดเอกสาร",
"📅": "ปฏิทิน",
"📆": "ปฏิทินแบบฉีกออก",
"📇": "ที่ใส่บัตร",
"📈": "แนวโน้มขึ้น",
"📉": "แนวโน้มลง",
"📊": "กราฟแท่ง",
"📋": "คลิปบอร์ด",
"📌": "หมุดปัก",
"📍": "หมุดหัวกลม",
"📎": "คลิปหนีบกระดาษ",
"📏": "ไม้บรรทัดตรง",
"📐": "ไม้บรรทัดสามเหลี่ยม",
"🖇": "คลิปหนีบกระดาษคู่",
"🗂": "ที่คั่นบัตรรายการ",
"🗃": "กล่องใส่แฟ้ม",
"🗄": "ตู้เอกสาร",
"🗑": "ตะกร้าขยะ",
"🗒": "สมุดโน้ตสันห่วง",
"🗓": "ปฏิทินสันห่วง",
"🔏": "ล็อคด้วยปากกา",
"🔐": "ล็อคด้วยกุญแจ",
"🔑": "กุญแจ",
"🔒": "ล็อคปิด",
"🔓": "ล็อคเปิด",
"🗝": "กุญแจเก่า",
"⚒": "ค้อน_จอบ",
"⚔": "ดาบไขว้",
"⚖": "ตราชั่ง",
"⚙": "เฟือง",
"⛏": "จอบ",
"⛓": "โซ่",
"🏹": "ธนู_ลูกศร",
"🔗": "สัญลักษณ์การลิงก์",
"🔧": "ประแจ",
"🔨": "ค้อน",
"🔩": "สลัก_น็อต",
"🔫": "ปืนฉีดน้ำ",
"🗜": "เครื่องบีบอัด",
"🗡": "ดาบสั้น",
"🛠": "ค้อน_ประแจ",
"🛡": "โล่",
"🦯": "ไม้เท้านำทาง",
"🧰": "กล่องเครื่องมือ",
"🧲": "แม่เหล็ก",
"🪃": "บูมเมอแรง",
"🪓": "ขวาน",
"🪚": "เลื่อย",
"🪛": "ไขควง",
"🪜": "บันไดปีน",
"🪝": "ตะขอ",
"⚗": "อุปกรณ์กลั่น",
"📡": "จานดาวเทียม",
"🔬": "กล้องจุลทรรศน์",
"🔭": "กล้องโทรทรรศน์",
"🧪": "หลอดทดลอง",
"🧫": "จานเพาะเชื้อ",
"🧬": "ดีเอ็นเอ",
"💉": "กระบอกฉีดยา",
"💊": "ยาเม็ด",
"🩸": "หยดเลือด",
"🩹": "พลาสเตอร์ปิดแผล",
"🩺": "เครื่องฟังตรวจ",
"🚪": "ประตู",
"🚽": "โถส้วม",
"🚿": "ฝักบัว",
"🛁": "อ่างอาบน้ำ",
"🛋": "โซฟากับโคมไฟ",
"🛏": "เตียง",
"🛒": "รถเข็น",
"🛗": "ลิฟต์",
"🧯": "ที่ดับเพลิง",
"🧴": "ขวดโลชั่น",
"🧷": "เข็มกลัดซ่อนปลาย",
"🧹": "ไม้กวาด",
"🧺": "ตะกร้า",
"🧻": "กระดาษชำระ",
"🧼": "สบู่",
"🧽": "ฟองน้ำ",
"🪑": "เก้าอี้",
"🪒": "ใบมีดโกน",
"🪞": "กระจก",
"🪟": "หน้าต่าง",
"🪠": "ที่ปั๊มชักโครก",
"🪣": "ถัง",
"🪤": "กับดักหนู",
"🪥": "แปรงสีฟัน",
"⚰": "โลงศพ",
"⚱": "โกศกระดูก",
"🗿": "รูปปั้นโมไอ",
"🚬": "ป้ายสูบบุหรี่",
"🪦": "แผ่นหินจารึก",
"🪧": "ป้ายประกาศ",
"♿": "รถเข็นผู้ป่วย",
"🏧": "เอทีเอ็ม",
"🚮": "ทิ้งขยะให้ลงถัง",
"🚰": "น้ำดื่ม",
"🚹": "ห้องน้ำชาย",
"🚺": "ห้องน้ำหญิง",
"🚻": "ห้องน้ำ",
"🚼": "ป้ายทารก",
"🚾": "ป้ายห้องน้ำ",
"🛂": "ตรวจพาสปอร์ต",
"🛃": "ศุลกากร",
"🛄": "รับสัมภาระ",
"🛅": "บริการฝากกระเป๋า",
"☢": "กัมมันตรังสี",
"☣": "เชื้อโรคอันตราย",
"⚠": "ป้ายระวัง",
"⛔": "ป้ายห้ามเข้า",
"📵": "ห้ามใช้โทรศัพท์",
"🔞": "ห้ามอายุต่ำกว่า_18_ปี",
"🚫": "ป้ายหวงห้าม",
"🚭": "ป้ายห้ามสูบบุหรี่",
"🚯": "ห้ามทิ้งขยะ",
"🚱": "ห้ามดื่มน้ำ",
"🚳": "ห้ามจักรยาน",
"🚷": "ห้ามคนเดินเท้า",
"🚸": "เด็กๆ_ข้ามถนน",
"↔": "ลูกศรชี้ซ้ายขวา",
"↕": "ลูกศรชี้ขึ้นลง",
"↖": "ลูกศรชี้มุมซ้ายบน",
"↗": "ลูกศรชี้มุมขวาบน",
"↘": "ลูกศรชี้มุมขวาล่าง",
"↙": "ลูกศรชี้มุุมซ้ายล่าง",
"↩": "ลูกศรวนซ้าย",
"↪": "ลูกศรวนขวา",
"➡": "ลูกศรชี้ไปทางขวา",
"⤴": "ลูกศรโค้งขึ้น",
"⤵": "ลูกศรโค้งลง",
"⬅": "ลูกศรชี้ไปทางซ้าย",
"⬆": "ลูกศรชี้ขึ้น",
"⬇": "ลูกศรชี้ลง",
"🔃": "สัญลักษณ์โหลดซ้ำ",
"🔄": "ลูกศรทวนเข็มนาฬิกา",
"🔙": "กลับ",
"🔚": "สิ้นสุด",
"🔛": "เปิด",
"🔜": "สัญลักษณ์เร็วๆ_นี้",
"🔝": "สัญลักษณ์บน",
"☦": "ไม้กางเขนออร์โธดอกซ์",
"☪": "พระจันทร์เสี้ยว_ดาว",
"☮": "เครื่องหมายสันติภาพ",
"☯": "หยินหยาง",
"☸": "ธรรมจักร",
"⚛": "อะตอม",
"✝": "ไม้กางเขนละติน",
"✡": "สตาร์ออฟเดวิด",
"🔯": "ดาว_6_แฉก",
"🕉": "เครื่องหมายโอม",
"🕎": "เชิงเทียน_7_กิ่ง",
"🛐": "ที่บูชา",
"♈": "ราศีเมษ",
"♉": "ราศีพฤษภ",
"♊": "ราศีเมถุน",
"♋": "ราศีกรกฎ",
"♌": "ราศีสิงห์",
"♍": "ราศีกันย์",
"♎": "ราศีตุลย์",
"♏": "ราศีพิจิก",
"♐": "ราศีธนู",
"♑": "ราศีมังกร",
"♒": "ราศีกุมภ์",
"♓": "ราศีมีน",
"⛎": "กลุ่มดาวคนแบกงู",
"⏏": "ปุ่มดีดออก",
"⏩": "เร่งไปข้างหน้า",
"⏪": "ถอยกลับ",
"⏫": "ลูกศรขึ้น",
"⏬": "ลูกศรลง",
"⏭": "เล่นแทร็กถัดไป",
"⏮": "ปุ่มแทร็กก่อนหน้า",
"⏯": "เล่นหรือหยุดชั่วคราว",
"⏸": "ปุ่มหยุุดชั่วคราว",
"⏹": "ปุ่มหยุด",
"⏺": "ปุ่มอัด",
"▶": "เล่น",
"◀": "ย้อนกลับ",
"🎦": "โรงภาพยนตร์",
"📳": "โหมดสั่น",
"📴": "ปิดมือถือ",
"📶": "สัญญาณมือถือ",
"🔀": "ลูกศรไขว้",
"🔁": "เล่นซ้ำ",
"🔂": "เล่นซ้ำเพลงเดียว",
"🔅": "แสงสว่างน้อย",
"🔆": "แสงสว่างมาก",
"🔼": "ปุ่มสามเหลี่ยมขึ้น",
"🔽": "ปุ่มสามเหลี่ยมลง",
"♀": "สัญลักษณ์เพศหญิง",
"♂": "สัญลักษณ์เพศชาย",
"⚧": "สัญลักษณ์คนข้ามเพศ",
"©": "ลิขสิทธิ์",
"®": "จดทะเบียน",
"‼": "เครื่องหมายอัศเจรีย์คู่",
"⁉": "เครื่องหมายอัศเจรีย์_คำถาม",
"™": "เครื่องหมายการค้า",
"☑": "กล่องกาเครื่องหมายมีเครื่องหมายถูก",
"♻": "สัญลักษณ์รีไซเคิล",
"♾": "ไม่มีที่สิ้นสุด",
"⚕": "เครื่องหมายการแพทย์",
"⚜": "สัญลักษณ์ดอกลิลลี่",
"✅": "ปุ่มเครื่องหมายถูก",
"✔": "เครื่องหมายถูก",
"✖": "คูณ",
"✳": "ดอกจัน_8_ซี่",
"✴": "ดาว_8_แฉก",
"❇": "เปล่งประกาย",
"❌": "เครื่องหมายกากบาท",
"❎": "ปุ่มเครื่องหมายกากบาท",
"❓": "เครื่องหมายคำถาม",
"❔": "เครื่องหมายคำถามสีขาว",
"❕": "เครื่องหมายอัศเจรีย์สีขาว",
"❗": "เครื่องหมายอัศเจรีย์สีแดง",
"➕": "บวก",
"➖": "ลบ",
"➗": "หาร",
"➰": "ห่วง",
"➿": "ห่วง_2_รู",
"⭕": "วงกลมกลวงสีแดง",
"〰": "เส้นคลื่น",
"〽": "เครื่องหมายเปลี่ยนท่อน",
"💱": "การแลกเปลี่ยนเงิน",
"💲": "สัญลักษณ์ดอลลาร์",
"📛": "ป้ายชื่อ",
"🔰": "สัญลักษณ์มือใหม่หัดขับ",
"🔱": "ฉมวก",
"🔟": "ปุ่มกดเลข_10",
"ℹ": "แหล่งข้อมูล",
"Ⓜ": "ตัวเอ็มในวงกลม",
"㊗": "ภาษาญี่ปุ่นคำว่า_“แสดงความยินดี”",
"㊙": "ภาษาญี่ปุ่นคำว่า_“ความลับ”",
"🅰": "เลือดกรุ๊ปเอ",
"🅱": "เลือดกรุ๊ปบี",
"🅾": "เลือดกรุ๊ปโอ",
"🅿": "ที่จอดรถ",
"🆎": "เลือดกรุ๊ปเอบี",
"🆑": "ลบข้อมูล",
"🆒": "เย็น",
"🆓": "ฟรี",
"🆔": "หมายเลขประจำตัว",
"🆕": "ใหม่",
"🆖": "ปุ่มเอ็นจี",
"🆗": "โอเค",
"🆘": "ช่วยด้วย",
"🆙": "ขึ้น",
"🆚": "ต่อสู้กับ",
"🈁": "ภาษาญี่ปุ่นคำว่า_“ที่นี่”",
"🈂": "ภาษาญี่ปุ่นคำว่า_“ค่าบริการ”",
"🈚": "ภาษาญี่ปุ่นคำว่า_“ไม่มี”",
"🈯": "ภาษาญี่ปุ่นคำว่า_“จองแล้ว”",
"🈲": "ภาษาญี่ปุ่นคำว่า_“ห้าม”",
"🈳": "ภาษาจีนว่างเปล่า",
"🈴": "ภาษาญี่ปุ่นคำว่า_“ผ่าน”",
"🈵": "ภาษาญี่ปุ่นคำว่า_“เต็ม”",
"🈶": "ภาษาญี่ปุ่นคำว่า_“คิดค่าใช้จ่าย”",
"🈷": "ภาษาญี่ปุ่นคำว่า_“จำนวนต่อเดือน”",
"🈸": "ภาษาญี่ปุ่นคำว่า_“ใบสมัคร”",
"🈹": "ภาษาญี่ปุ่นคำว่า_“ส่วนลด”",
"🈺": "ภาษาญี่ปุ่นคำว่า_“เปิดทำการ”",
"🉐": "ภาษาญี่ปุ่นคำว่า_“ราคาถูก”",
"🉑": "ภาษาญี่ปุ่นคำว่า_“ยอมรับได้”",
"🔠": "อักษรตัวพิมพ์ใหญ่",
"🔡": "อักษรตัวพิมพ์เล็ก",
"🔢": "หมายเลข",
"🔣": "สัญลักษณ์",
"🔤": "ตัวอักษรภาษาอังกฤษ",
"▪": "สี่เหลี่ยมเล็กสีดำ",
"▫": "สี่เหลี่ยมเล็กสีขาว",
"◻": "สี่เหลี่ยมขนาดกลางสีขาว",
"◼": "สี่เหลี่ยมขนาดกลางสีดำ",
"◽": "สี่เหลี่ยมเล็กปานกลางสีขาว",
"◾": "สี่เหลี่ยมเล็กปานกลางสีดำ",
"⚪": "วงกลมสีขาว",
"⚫": "วงกลมสีดำ",
"⬛": "สี่เหลี่ยมใหญ่สีดำ",
"⬜": "สี่เหลี่ยมใหญ่สีขาว",
"💠": "ข้าวหลามตัดมีจุดตรงกลาง",
"🔘": "ปุ่มวิทยุ",
"🔲": "ปุ่มสี่เหลี่ยมขอบดำ",
"🔳": "ปุ่มสี่เหลี่ยมขอบขาว",
"🔴": "วงกลมสีแดง",
"🔵": "วงกลมสีน้ำเงิน",
"🔶": "เพชรใหญ่สีส้ม",
"🔷": "เพชรใหญ่สีน้ำเงิน",
"🔸": "เพชรเล็กสีส้ม",
"🔹": "เพชรเล็กสีน้ำเงิน",
"🔺": "สามเหลี่ยมหงายสีแดง",
"🔻": "สามเหลี่ยมคว่ำสีแดง",
"🟠": "วงกลมสีส้ม",
"🟡": "วงกลมสีเหลือง",
"🟢": "วงกลมสีเขียว",
"🟣": "วงกลมสีม่วง",
"🟤": "วงกลมสีน้ำตาล",
"🟥": "สี่เหลี่ยมสีแดง",
"🟦": "สี่เหลี่ยมสีน้ำเงิน",
"🟧": "สี่เหลี่ยมสีส้ม",
"🟨": "สี่เหลี่ยมสีเหลือง",
"🟩": "สี่เหลี่ยมสีเขียว",
"🟪": "สี่เหลี่ยมสีม่วง",
"🟫": "สี่เหลี่ยมสีน้ำตาล",
"🎌": "ธงไขว้",
"🏁": "ธงตราหมากรุก",
"🏳": "ธงขาว",
"🏴": "ธงดำ",
"🚩": "ธงปักตำแหน่ง",
"#⃣": "ปุ่มกดเลข_#",
"*⃣": "ปุ่มกดเลข_*",
"0⃣": "ปุ่มกดเลข_0",
"1⃣": "ปุ่มกดเลข_1",
"2⃣": "ปุ่มกดเลข_2",
"3⃣": "ปุ่มกดเลข_3",
"4⃣": "ปุ่มกดเลข_4",
"5⃣": "ปุ่มกดเลข_5",
"6⃣": "ปุ่มกดเลข_6",
"7⃣": "ปุ่มกดเลข_7",
"8⃣": "ปุ่มกดเลข_8",
"9⃣": "ปุ่มกดเลข_9",
"🇦🇨": "ธง_เกาะแอสเซนชัน",
"🇦🇩": "ธง_อันดอร์รา",
"🇦🇪": "ธง_สหรัฐอาหรับเอมิเรตส์",
"🇦🇫": "ธง_อัฟกานิสถาน",
"🇦🇬": "ธง_แอนติกา_บาร์บูดา",
"🇦🇮": "ธง_แองกวิลลา",
"🇦🇱": "ธง_แอลเบเนีย",
"🇦🇲": "ธง_อาร์เมเนีย",
"🇦🇴": "ธง_แองโกลา",
"🇦🇶": "ธง_แอนตาร์กติกา",
"🇦🇷": "ธง_อาร์เจนตินา",
"🇦🇸": "ธง_อเมริกันซามัว",
"🇦🇹": "ธง_ออสเตรีย",
"🇦🇺": "ธง_ออสเตรเลีย",
"🇦🇼": "ธง_อารูบา",
"🇦🇽": "ธง_หมู่เกาะโอลันด์",
"🇦🇿": "ธง_อาเซอร์ไบจาน",
"🇧🇦": "ธง_บอสเนีย_เฮอร์เซโกวีนา",
"🇧🇧": "ธง_บาร์เบโดส",
"🇧🇩": "ธง_บังกลาเทศ",
"🇧🇪": "ธง_เบลเยียม",
"🇧🇫": "ธง_บูร์กินาฟาโซ",
"🇧🇬": "ธง_บัลแกเรีย",
"🇧🇭": "ธง_บาห์เรน",
"🇧🇮": "ธง_บุรุนดี",
"🇧🇯": "ธง_เบนิน",
"🇧🇱": "ธง_เซนต์บาร์เธเลมี",
"🇧🇲": "ธง_เบอร์มิวดา",
"🇧🇳": "ธง_บรูไน",
"🇧🇴": "ธง_โบลิเวีย",
"🇧🇶": "ธง_เนเธอร์แลนด์แคริบเบียน",
"🇧🇷": "ธง_บราซิล",
"🇧🇸": "ธง_บาฮามาส",
"🇧🇹": "ธง_ภูฏาน",
"🇧🇻": "ธง_เกาะบูเว",
"🇧🇼": "ธง_บอตสวานา",
"🇧🇾": "ธง_เบลารุส",
"🇧🇿": "ธง_เบลีซ",
"🇨🇦": "ธง_แคนาดา",
"🇨🇨": "ธง_หมู่เกาะโคโคส_(คีลิง)",
"🇨🇩": "ธง_คองโก_-_กินชาซา",
"🇨🇫": "ธง_สาธารณรัฐแอฟริกากลาง",
"🇨🇬": "ธง_คองโก_-_บราซซาวิล",
"🇨🇭": "ธง_สวิตเซอร์แลนด์",
"🇨🇮": "ธง_โกตดิวัวร์",
"🇨🇰": "ธง_หมู่เกาะคุก",
"🇨🇱": "ธง_ชิลี",
"🇨🇲": "ธง_แคเมอรูน",
"🇨🇳": "ธง_จีน",
"🇨🇴": "ธง_โคลอมเบีย",
"🇨🇵": "ธง_เกาะคลิปเปอร์ตัน",
"🇨🇷": "ธง_คอสตาริกา",
"🇨🇺": "ธง_คิวบา",
"🇨🇻": "ธง_เคปเวิร์ด",
"🇨🇼": "ธง_คูราเซา",
"🇨🇽": "ธง_เกาะคริสต์มาส",
"🇨🇾": "ธง_ไซปรัส",
"🇨🇿": "ธง_เช็ก",
"🇩🇪": "ธง_เยอรมนี",
"🇩🇬": "ธง_ดิเอโกการ์เซีย",
"🇩🇯": "ธง_จิบูตี",
"🇩🇰": "ธง_เดนมาร์ก",
"🇩🇲": "ธง_โดมินิกา",
"🇩🇴": "ธง_สาธารณรัฐโดมินิกัน",
"🇩🇿": "ธง_แอลจีเรีย",
"🇪🇦": "ธง_เซวตา_เมลียา",
"🇪🇨": "ธง_เอกวาดอร์",
"🇪🇪": "ธง_เอสโตเนีย",
"🇪🇬": "ธง_อียิปต์",
"🇪🇭": "ธง_ซาฮาราตะวันตก",
"🇪🇷": "ธง_เอริเทรีย",
"🇪🇸": "ธง_สเปน",
"🇪🇹": "ธง_เอธิโอเปีย",
"🇪🇺": "ธง_สหภาพยุโรป",
"🇫🇮": "ธง_ฟินแลนด์",
"🇫🇯": "ธง_ฟิจิ",
"🇫🇰": "ธง_หมู่เกาะฟอล์กแลนด์",
"🇫🇲": "ธง_ไมโครนีเซีย",
"🇫🇴": "ธง_หมู่เกาะแฟโร",
"🇫🇷": "ธง_ฝรั่งเศส",
"🇬🇦": "ธง_กาบอง",
"🇬🇧": "ธง_สหราชอาณาจักร",
"🇬🇩": "ธง_เกรเนดา",
"🇬🇪": "ธง_จอร์เจีย",
"🇬🇫": "ธง_เฟรนช์เกียนา",
"🇬🇬": "ธง_เกิร์นซีย์",
"🇬🇭": "ธง_กานา",
"🇬🇮": "ธง_ยิบรอลตาร์",
"🇬🇱": "ธง_กรีนแลนด์",
"🇬🇲": "ธง_แกมเบีย",
"🇬🇳": "ธง_กินี",
"🇬🇵": "ธง_กวาเดอลูป",
"🇬🇶": "ธง_อิเควทอเรียลกินี",
"🇬🇷": "ธง_กรีซ",
"🇬🇸": "ธง_เกาะเซาท์จอร์เจีย_หมู่เกาะเซาท์แซนด์วิช",
"🇬🇹": "ธง_กัวเตมาลา",
"🇬🇺": "ธง_กวม",
"🇬🇼": "ธง_กินี-บิสเซา",
"🇬🇾": "ธง_กายอานา",
"🇭🇰": "ธง_เขตปกครองพิเศษฮ่องกงแห่งสาธารณรัฐประชาชนจีน",
"🇭🇲": "ธง_เกาะเฮิร์ด_หมู่เกาะแมกดอนัลด์",
"🇭🇳": "ธง_ฮอนดูรัส",
"🇭🇷": "ธง_โครเอเชีย",
"🇭🇹": "ธง_เฮติ",
"🇭🇺": "ธง_ฮังการี",
"🇮🇨": "ธง_หมู่เกาะคานารี",
"🇮🇩": "ธง_อินโดนีเซีย",
"🇮🇪": "ธง_ไอร์แลนด์",
"🇮🇱": "ธง_อิสราเอล",
"🇮🇲": "ธง_เกาะแมน",
"🇮🇳": "ธง_อินเดีย",
"🇮🇴": "ธง_บริติชอินเดียนโอเชียนเทร์ริทอรี",
"🇮🇶": "ธง_อิรัก",
"🇮🇷": "ธง_อิหร่าน",
"🇮🇸": "ธง_ไอซ์แลนด์",
"🇮🇹": "ธง_อิตาลี",
"🇯🇪": "ธง_เจอร์ซีย์",
"🇯🇲": "ธง_จาเมกา",
"🇯🇴": "ธง_จอร์แดน",
"🇯🇵": "ธง_ญี่ปุ่น",
"🇰🇪": "ธง_เคนยา",
"🇰🇬": "ธง_คีร์กีซสถาน",
"🇰🇭": "ธง_กัมพูชา",
"🇰🇮": "ธง_คิริบาส",
"🇰🇲": "ธง_คอโมโรส",
"🇰🇳": "ธง_เซนต์คิตส์_เนวิส",
"🇰🇵": "ธง_เกาหลีเหนือ",
"🇰🇷": "ธง_เกาหลีใต้",
"🇰🇼": "ธง_คูเวต",
"🇰🇾": "ธง_หมู่เกาะเคย์แมน",
"🇰🇿": "ธง_คาซัคสถาน",
"🇱🇦": "ธง_ลาว",
"🇱🇧": "ธง_เลบานอน",
"🇱🇨": "ธง_เซนต์ลูเซีย",
"🇱🇮": "ธง_ลิกเตนสไตน์",
"🇱🇰": "ธง_ศรีลังกา",
"🇱🇷": "ธง_ไลบีเรีย",
"🇱🇸": "ธง_เลโซโท",
"🇱🇹": "ธง_ลิทัวเนีย",
"🇱🇺": "ธง_ลักเซมเบิร์ก",
"🇱🇻": "ธง_ลัตเวีย",
"🇱🇾": "ธง_ลิเบีย",
"🇲🇦": "ธง_โมร็อกโก",
"🇲🇨": "ธง_โมนาโก",
"🇲🇩": "ธง_มอลโดวา",
"🇲🇪": "ธง_มอนเตเนโกร",
"🇲🇫": "ธง_เซนต์มาร์ติน",
"🇲🇬": "ธง_มาดากัสการ์",
"🇲🇭": "ธง_หมู่เกาะมาร์แชลล์",
"🇲🇰": "ธง_มาซิโดเนียเหนือ",
"🇲🇱": "ธง_มาลี",
"🇲🇲": "ธง_เมียนมา_(พม่า)",
"🇲🇳": "ธง_มองโกเลีย",
"🇲🇴": "ธง_เขตปกครองพิเศษมาเก๊าแห่งสาธารณรัฐประชาชนจีน",
"🇲🇵": "ธง_หมู่เกาะนอร์เทิร์นมาเรียนา",
"🇲🇶": "ธง_มาร์ตินีก",
"🇲🇷": "ธง_มอริเตเนีย",
"🇲🇸": "ธง_มอนต์เซอร์รัต",
"🇲🇹": "ธง_มอลตา",
"🇲🇺": "ธง_มอริเชียส",
"🇲🇻": "ธง_มัลดีฟส์",
"🇲🇼": "ธง_มาลาวี",
"🇲🇽": "ธง_เม็กซิโก",
"🇲🇾": "ธง_มาเลเซีย",
"🇲🇿": "ธง_โมซัมบิก",
"🇳🇦": "ธง_นามิเบีย",
"🇳🇨": "ธง_นิวแคลิโดเนีย",
"🇳🇪": "ธง_ไนเจอร์",
"🇳🇫": "ธง_เกาะนอร์ฟอล์ก",
"🇳🇬": "ธง_ไนจีเรีย",
"🇳🇮": "ธง_นิการากัว",
"🇳🇱": "ธง_เนเธอร์แลนด์",
"🇳🇴": "ธง_นอร์เวย์",
"🇳🇵": "ธง_เนปาล",
"🇳🇷": "ธง_นาอูรู",
"🇳🇺": "ธง_นีอูเอ",
"🇳🇿": "ธง_นิวซีแลนด์",
"🇴🇲": "ธง_โอมาน",
"🇵🇦": "ธง_ปานามา",
"🇵🇪": "ธง_เปรู",
"🇵🇫": "ธง_เฟรนช์โปลินีเซีย",
"🇵🇬": "ธง_ปาปัวนิวกินี",
"🇵🇭": "ธง_ฟิลิปปินส์",
"🇵🇰": "ธง_ปากีสถาน",
"🇵🇱": "ธง_โปแลนด์",
"🇵🇲": "ธง_แซงปีแยร์_มีเกอลง",
"🇵🇳": "ธง_หมู่เกาะพิตแคร์น",
"🇵🇷": "ธง_เปอร์โตริโก",
"🇵🇸": "ธง_ดินแดนปาเลสไตน์",
"🇵🇹": "ธง_โปรตุเกส",
"🇵🇼": "ธง_ปาเลา",
"🇵🇾": "ธง_ปารากวัย",
"🇶🇦": "ธง_กาตาร์",
"🇷🇪": "ธง_เรอูนียง",
"🇷🇴": "ธง_โรมาเนีย",
"🇷🇸": "ธง_เซอร์เบีย",
"🇷🇺": "ธง_รัสเซีย",
"🇷🇼": "ธง_รวันดา",
"🇸🇦": "ธง_ซาอุดีอาระเบีย",
"🇸🇧": "ธง_หมู่เกาะโซโลมอน",
"🇸🇨": "ธง_เซเชลส์",
"🇸🇩": "ธง_ซูดาน",
"🇸🇪": "ธง_สวีเดน",
"🇸🇬": "ธง_สิงคโปร์",
"🇸🇭": "ธง_เซนต์เฮเลนา",
"🇸🇮": "ธง_สโลวีเนีย",
"🇸🇯": "ธง_สฟาลบาร์_ยานไมเอน",
"🇸🇰": "ธง_สโลวะเกีย",
"🇸🇱": "ธง_เซียร์ราลีโอน",
"🇸🇲": "ธง_ซานมาริโน",
"🇸🇳": "ธง_เซเนกัล",
"🇸🇴": "ธง_โซมาเลีย",
"🇸🇷": "ธง_ซูรินาเม",
"🇸🇸": "ธง_ซูดานใต้",
"🇸🇹": "ธง_เซาตูเม_ปรินซิปี",
"🇸🇻": "ธง_เอลซัลวาดอร์",
"🇸🇽": "ธง_ซินต์มาร์เทน",
"🇸🇾": "ธง_ซีเรีย",
"🇸🇿": "ธง_เอสวาตีนี",
"🇹🇦": "ธง_ทริสตันดาคูนา",
"🇹🇨": "ธง_หมู่เกาะเติกส์_หมู่เกาะเคคอส",
"🇹🇩": "ธง_ชาด",
"🇹🇫": "ธง_เฟรนช์เซาเทิร์นเทร์ริทอรีส์",
"🇹🇬": "ธง_โตโก",
"🇹🇭": "ธง_ไทย",
"🇹🇯": "ธง_ทาจิกิสถาน",
"🇹🇰": "ธง_โตเกเลา",
"🇹🇱": "ธง_ติมอร์-เลสเต",
"🇹🇲": "ธง_เติร์กเมนิสถาน",
"🇹🇳": "ธง_ตูนิเซีย",
"🇹🇴": "ธง_ตองกา",
"🇹🇷": "ธง_ตุรกี",
"🇹🇹": "ธง_ตรินิแดด_โตเบโก",
"🇹🇻": "ธง_ตูวาลู",
"🇹🇼": "ธง_ไต้หวัน",
"🇹🇿": "ธง_แทนซาเนีย",
"🇺🇦": "ธง_ยูเครน",
"🇺🇬": "ธง_ยูกันดา",
"🇺🇲": "ธง_หมู่เกาะรอบนอกของสหรัฐอเมริกา",
"🇺🇳": "ธง_สหประชาชาติ",
"🇺🇸": "ธง_สหรัฐอเมริกา",
"🇺🇾": "ธง_อุรุกวัย",
"🇺🇿": "ธง_อุซเบกิสถาน",
"🇻🇦": "ธง_นครวาติกัน",
"🇻🇨": "ธง_เซนต์วินเซนต์_เกรนาดีนส์",
"🇻🇪": "ธง_เวเนซุเอลา",
"🇻🇬": "ธง_หมู่เกาะบริติชเวอร์จิน",
"🇻🇮": "ธง_หมู่เกาะเวอร์จินของสหรัฐอเมริกา",
"🇻🇳": "ธง_เวียดนาม",
"🇻🇺": "ธง_วานูอาตู",
"🇼🇫": "ธง_วาลลิส_ฟุตูนา",
"🇼🇸": "ธง_ซามัว",
"🇽🇰": "ธง_โคโซโว",
"🇾🇪": "ธง_เยเมน",
"🇾🇹": "ธง_มายอต",
"🇿🇦": "ธง_แอฟริกาใต้",
"🇿🇲": "ธง_แซมเบีย",
"🇿🇼": "ธง_ซิมบับเว",
"👁🗨": "ตาในลูกโป่งคำพูด",
"👨🦰": "ผู้ชาย_ผมแดง",
"👨🦱": "ผู้ชาย_ผมหยิก",
"👨🦲": "ผู้ชาย_หัวล้าน",
"👨🦳": "ผู้ชาย_ผมขาว",
"👩🦰": "ผู้หญิง_ผมแดง",
"👩🦱": "ผู้หญิง_ผมหยิก",
"👩🦲": "ผู้หญิง_หัวล้าน",
"👩🦳": "ผู้หญิง_ผมขาว",
"👱♀": "ผู้หญิงผมทอง",
"👱♂": "ผู้ชายผมทอง",
"🧑🦰": "คน_ผมแดง",
"🧑🦱": "คน_ผมหยิก",
"🧑🦲": "คน_หัวล้าน",
"🧑🦳": "คน_ผมขาว",
"💁♀": "ผู้หญิงแบมือ",
"💁♂": "ผู้ชายแบมือ",
"🙅♀": "ผู้หญิงทำท่าไม่โอเค",
"🙅♂": "ผู้ชายทำท่าไม่โอเค",
"🙆♀": "ผู้หญิงทำท่าโอเค",
"🙆♂": "ผู้ชายทำท่าโอเค",
"🙇♀": "ผู้หญิงหมอบคำนับ",
"🙇♂": "ผู้ชายหมอบคำนับ",
"🙋♀": "ผู้หญิงยกมือ",
"🙋♂": "ผู้ชายยกมือ",
"🙍♀": "ผู้หญิงหน้าบึ้ง",
"🙍♂": "ผู้ชายหน้าบึ้ง",
"🙎♀": "ผู้หญิงโกรธ",
"🙎♂": "ผู้ชายโกรธ",
"🤦♀": "ผู้หญิงเอามือก่ายหน้าผาก",
"🤦♂": "ผู้ชายเอามือก่ายหน้าผาก",
"🤷♀": "ผู้หญิงยักไหล่",
"🤷♂": "ผู้ชายยักไหล่",
"🧏♀": "ผู้หญิงหูหนวก",
"🧏♂": "ผู้ชายหูหนวก",
"👨⚕": "หมอชาย",
"👨⚖": "ผู้พิพากษาชาย",
"👨✈": "นักบินชาย",
"👨🌾": "ชาวนาชาย",
"👨🍳": "พ่อครัว",
"👨🍼": "ผู้ชายให้นมลูก",
"👨🎓": "นักเรียนชาย",
"👨🎤": "นักร้องชาย",
"👨🎨": "ศิลปินชาย",
"👨🏫": "ครูชาย",
"👨🏭": "พนักงานโรงงานชาย",
"👨💻": "ผู้เชี่ยวชาญด้านเทคโนโลยีชาย",
"👨💼": "พนักงานบริษัทชาย",
"👨🔧": "ช่างซ่อมชาย",
"👨🔬": "นักวิทยาศาสตร์ชาย",
"👨🚀": "นักบินอวกาศชาย",
"👨🚒": "พนักงานดับเพลิงชาย",
"👩⚕": "หมอหญิง",
"👩⚖": "ผู้พิพากษาหญิง",
"👩✈": "นักบินหญิง",
"👩🌾": "ชาวนาหญิง",
"👩🍳": "แม่ครัว",
"👩🍼": "ผู้หญิงให้นมลูก",
"👩🎓": "นักเรียนหญิง",
"👩🎤": "นักร้องหญิง",
"👩🎨": "ศิลปินหญิง",
"👩🏫": "ครูหญิง",
"👩🏭": "พนักงานโรงงานหญิง",
"👩💻": "ผู้เชี่ยวชาญด้านเทคโนโลยีหญิง",
"👩💼": "พนักงานบริษัทหญิง",
"👩🔧": "ช่างซ่อมหญิง",
"👩🔬": "นักวิทยาศาสตร์หญิง",
"👩🚀": "นักบินอวกาศหญิง",
"👩🚒": "พนักงานดับเพลิงหญิง",
"👮♀": "ตำรวจหญิง",
"👮♂": "ตำรวจชาย",
"👰♀": "ผู้หญิงที่มีผ้าคลุมหน้า",
"👰♂": "ผู้ชายที่มีผ้าคลุมหน้า",
"👳♀": "ผู้หญิงโพกหัว",
"👳♂": "ผู้ชายโพกหัว",
"👷♀": "พนักงานก่อสร้างหญิง",
"👷♂": "พนักงานก่อสร้างชาย",
"💂♀": "องครักษ์หญิง",
"💂♂": "องครักษ์ชาย",
"🕵♀": "นักสืบหญิง",
"🕵♂": "นักสืบชาย",
"🤵♀": "ผู้หญิงใส่ทักซิโด้",
"🤵♂": "ผู้ชายใส่ทักซิโด้",
"🧑⚕": "หมอ",
"🧑⚖": "ผู้พิพากษา",
"🧑✈": "นักบิน",
"🧑🌾": "ชาวนา",
"🧑🍳": "กุ๊ก",
"🧑🍼": "คนให้นมลูก",
"🧑🎓": "บัณฑิต",
"🧑🎤": "นักร้อง",
"🧑🎨": "ศิลปิน",
"🧑🏫": "ครู",
"🧑🏭": "พนักงานโรงงาน",
"🧑💻": "ผู้เชี่ยวชาญด้านเทคโนโลยี",
"🧑💼": "พนักงานออฟฟิศ",
"🧑🔧": "ช่างกล",
"🧑🔬": "นักวิทยาศาสตร์",
"🧑🚀": "นักบินอวกาศ",
"🧑🚒": "พนักงานดับเพลิง",
"🦸♀": "ยอดหญิง",
"🦸♂": "พระเอก",
"🦹♀": "นางร้าย",
"🦹♂": "ตัวโกง",
"🧑🎄": "ซานตาคลอส",
"🧙♀": "แม่มด",
"🧙♂": "พ่อมด",
"🧚♀": "เทพธิดา",
"🧚♂": "เทพบุตร",
"🧛♀": "แวมไพร์ผู้หญิง",
"🧛♂": "แวมไพร์ผู้ชาย",
"🧜♀": "เงือก",
"🧜♂": "เงือกชาย",
"🧝♀": "เอลฟ์ผู้หญิง",
"🧝♂": "เอลฟ์ผู้ชาย",
"🧞♀": "ยักษ์จีนี่หญิง",
"🧞♂": "ยักษ์จีนี่ชาย",
"🧟♀": "ซอมบี้ผู้หญิง",
"🧟♂": "ซอมบี้ผู้ชาย",
"🏃♀": "ผู้หญิงวิ่ง",
"🏃♂": "ผู้ชายวิ่ง",
"👨🦯": "ผู้ชายเดินถือไม้เท้านำทาง",
"👨🦼": "ผู้ชายนั่งวีลแชร์ไฟฟ้า",
"👨🦽": "ผู้ชายนั่งวีลแชร์ธรรมดา",
"👩🦯": "ผู้หญิงเดินถือไม้เท้านำทาง",
"👩🦼": "ผู้หญิงนั่งวีลแชร์ไฟฟ้า",
"👩🦽": "ผู้หญิงนั่งวีลแชร์ธรรมดา",
"👯♀": "ผู้หญิงในชุดหูกระต่าย",
"👯♂": "ผู้ชายในชุดหูกระต่าย",
"💆♀": "ผู้หญิงกำลังนวดหน้า",
"💆♂": "ผู้ชายกำลังนวดหน้า",
"💇♀": "ผู้หญิงกำลังตัดผม",
"💇♂": "ผู้ชายกำลังตัดผม",
"🚶♀": "ผู้หญิงเดิน",
"🚶♂": "ผู้ชายเดิน",
"🧍♀": "ผู้หญิงกำลังยืน",
"🧍♂": "ผู้ชายกำลังยืน",
"🧎♀": "ผู้หญิงกำลังคุกเข่า",
"🧎♂": "ผู้ชายกำลังคุกเข่า",
"🧑🦯": "คนเดินถือไม้เท้านำทาง",
"🧑🦼": "คนนั่งวีลแชร์ไฟฟ้า",
"🧑🦽": "คนนั่งวีลแชร์ธรรมดา",
"🧖♀": "ผู้หญิงในห้องอบไอน้ำ",
"🧖♂": "ผู้ชายในห้องอบไอน้ำ",
"🧗♀": "ผู้หญิงไต่เขา",
"🧗♂": "ผู้ชายไต่เขา",
"⛹♀": "ผู้หญิงเล่นบอล",
"⛹♂": "ผู้ชายเล่นบอล",
"🏄♀": "ผู้หญิงโต้คลื่น",
"🏄♂": "ผู้ชายโต้คลื่น",
"🏊♀": "ผู้หญิงว่ายน้ำ",
"🏊♂": "ผู้ชายว่ายน้ำ",
"🏋♀": "ผู้หญิงยกน้ำหนัก",
"🏋♂": "ผู้ชายยกน้ำหนัก",
"🏌♀": "ผู้หญิงตีกอล์ฟ",
"🏌♂": "ผู้ชายตีกอล์ฟ",
"🚣♀": "ผู้หญิงพายเรือ",
"🚣♂": "ผู้ชายพายเรือ",
"🚴♀": "ผู้หญิงปั่นจักรยาน",
"🚴♂": "ผู้ชายปั่นจักรยาน",
"🚵♀": "ผู้หญิงปั่นจักรยานเสือภูเขา",
"🚵♂": "ผู้ชายปั่นจักรยานเสือภูเขา",
"🤸♀": "ผู้หญิงตีลังกา",
"🤸♂": "ผู้ชายตีลังกา",
"🤹♀": "ผู้หญิงเล่นจั๊กกลิ้ง",
"🤹♂": "ผู้ชายเล่นจั๊กกลิ้ง",
"🤼♀": "ผู้หญิงเล่นมวยปล้ำ",
"🤼♂": "ผู้ชายเล่นมวยปล้ำ",
"🤽♀": "ผู้หญิงเล่นโปโลน้ำ",
"🤽♂": "ผู้ชายเล่นโปโลน้ำ",
"🤾♀": "ผู้หญิงเล่นแฮนด์บอล",
"🤾♂": "ผู้ชายเล่นแฮนด์บอล",
"🧘♀": "ผู้หญิงนั่งสมาธิ",
"🧘♂": "ผู้ชายนั่งสมาธิ",
"👨👦": "ครอบครัว_ผู้ชาย_เด็กชาย",
"👨👧": "ครอบครัว_ผู้ชาย_เด็กหญิง",
"👩👦": "ครอบครัว_ผู้หญิง_เด็กชาย",
"👩👧": "ครอบครัว_ผู้หญิง_เด็กหญิง",
"🐈⬛": "แมวดำ",
"🐕🦺": "สุนัขบริการ",
"🐻❄": "หมีขั้วโลก",
"🏳⚧": "ธงคนข้ามเพศ",
"🏳🌈": "ธงสีรุ้ง",
"🏴☠": "ธงโจรสลัด",
"👨❤👨": "คู่รัก_ผู้ชาย_ผู้ชาย",
"👨👦👦": "ครอบครัว_ผู้ชาย_เด็กชาย_เด็กชาย",
"👨👧👦": "ครอบครัว_ผู้ชาย_เด็กหญิง_เด็กชาย",
"👨👧👧": "ครอบครัว_ผู้ชาย_เด็กหญิง_เด็กหญิง",
"👨👨👦": "ครอบครัว_ผู้ชาย_ผู้ชาย_เด็กชาย",
"👨👨👧": "ครอบครัว_ผู้ชาย_ผู้ชาย_เด็กหญิง",
"👨👩👦": "ครอบครัว_ผู้ชาย_ผู้หญิง_เด็กชาย",
"👨👩👧": "ครอบครัว_ผู้ชาย_ผู้หญิง_เด็กหญิง",
"👩❤👨": "คู่รัก_ผู้หญิง_ผู้ชาย",
"👩❤👩": "คู่รัก_ผู้หญิง_ผู้หญิง",
"👩👦👦": "ครอบครัว_ผู้หญิง_เด็กชาย_เด็กชาย",
"👩👧👦": "ครอบครัว_ผู้หญิง_เด็กหญิง_เด็กชาย",
"👩👧👧": "ครอบครัว_ผู้หญิง_เด็กหญิง_เด็กหญิง",
"👩👩👦": "ครอบครัว_ผู้หญิง_ผู้หญิง_เด็กชาย",
"👩👩👧": "ครอบครัว_ผู้หญิง_ผู้หญิง_เด็กหญิง",
"🧑🤝🧑": "คนจับมือกัน",
"👨❤💋👨": "จูบ_ผู้ชาย_ผู้ชาย",
"👨👨👦👦": "ครอบครัว_ผู้ชาย_ผู้ชาย_เด็กชาย_เด็กชาย",
"👨👨👧👦": "ครอบครัว_ผู้ชาย_ผู้ชาย_เด็กหญิง_เด็กชาย",
"👨👨👧👧": "ครอบครัว_ผู้ชาย_ผู้ชาย_เด็กหญิง_เด็กหญิง",
"👨👩👦👦": "ครอบครัว_ผู้ชาย_ผู้หญิง_เด็กชาย_เด็กชาย",
"👨👩👧👦": "ครอบครัว_ผู้ชาย_ผู้หญิง_เด็กหญิง_เด็กชาย",
"👨👩👧👧": "ครอบครัว_ผู้ชาย_ผู้หญิง_เด็กหญิง_เด็กหญิง",
"👩❤💋👨": "จูบ_ผู้หญิง_ผู้ชาย",
"👩❤💋👩": "จูบ_ผู้หญิง_ผู้หญิง",
"👩👩👦👦": "ครอบครัว_ผู้หญิง_ผู้หญิง_เด็กชาย_เด็กชาย",
"👩👩👧👦": "ครอบครัว_ผู้หญิง_ผู้หญิง_เด็กหญิง_เด็กชาย",
"👩👩👧👧": "ครอบครัว_ผู้หญิง_ผู้หญิง_เด็กหญิง_เด็กหญิง",
"🏴": "ธง_อังกฤษ",
"🏴": "ธง_สกอตแลนด์",
"🏴": "ธง_เวลส์",
}
_th_emoji = {v: k for k, v in _emoji_th.items()}
_emojis = sorted(_emoji_th.keys(), key=len, reverse=True)
_emoji_regex = re.compile("|".join(map(re.escape, _emojis)))
_delimiter = ":"
def emoji_to_thai(text: str, delimiters=(_delimiter, _delimiter)) -> str:
"""
This function convert emoji to thai meaning
:param str text: Text with Emoji
:return: Text with Emoji being converted to thai meaning
:rtype: str
:Example:
::
from pythainlp.util import emoji_to_thai
emoji_to_thai("จะมานั่งรถเมล์เหมือนผมก็ได้นะครับ ใกล้ชิดประชาชนดี 😀")
# output: จะมานั่งรถเมล์เหมือนผมก็ได้นะครับ
ใกล้ชิดประชาชนดี :หน้ายิ้มยิงฟัน:
emoji_to_thai("หิวข้าวอยากกินอาหารญี่ปุ่น 🍣")
# output: หิวข้าวอยากกินอาหารญี่ปุ่น :ซูชิ:
emoji_to_thai("🇹🇭 นี่คือธงประเทศไทย")
# output: :ธง_ไทย: นี่คือธงประเทศไทย
"""
return _emoji_regex.sub(
lambda match: delimiters[0]
+ _emoji_th[match.group(0)]
+ delimiters[1],
text,
)
| 46,483 | 23.791467 | 77 | py |
pythainlp-dev/pythainlp/util/encoding.py | pythainlp-dev/pythainlp/util/encoding.py | # -*- coding_utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def tis620_to_utf8(text: str)->str:
"""
Convert TIS-620 to UTF-8
:param str text: Text that use TIS-620 encoding
:return: Text that use UTF-8 encoding
:rtype: str
:Example:
::
from pythainlp.util import tis620_to_utf8
tis620_to_utf8("¡ÃзÃÇ§ÍØµÊÒË¡ÃÃÁ")
# output: 'กระทรวงอุตสาหกรรม'
"""
return text.encode("cp1252", "ignore").decode("tis-620")
| 1,022 | 30.96875 | 74 | py |
pythainlp-dev/pythainlp/util/keyboard.py | pythainlp-dev/pythainlp/util/keyboard.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions related to keyboard layout.
"""
EN_TH_KEYB_PAIRS = {
"Z": "(",
"z": "ผ",
"X": ")",
"x": "ป",
"C": "ฉ",
"c": "แ",
"V": "ฮ",
"v": "อ",
"B": "\u0e3a", # พินทุ
"b": "\u0e34", # สระอุ
"N": "\u0e4c", # การันต์
"n": "\u0e37", # สระอือ
"M": "?",
"m": "ท",
"<": "ฒ",
",": "ม",
">": "ฬ",
".": "ใ",
"?": "ฦ",
"/": "ฝ",
"A": "ฤ",
"a": "ฟ",
"S": "ฆ",
"s": "ห",
"D": "ฏ",
"d": "ก",
"F": "โ",
"f": "ด",
"G": "ฌ",
"g": "เ",
"H": "\u0e47", # ไม้ไต่คู้
"h": "\u0e49", # ไม้โท
"J": "\u0e4b", # ไม้จัตวา
"j": "\u0e48", # ไม้เอก
"K": "ษ",
"k": "า",
"L": "ศ",
"l": "ส",
":": "ซ",
";": "ว",
'"': ".",
"'": "ง",
"Q": "๐",
"q": "ๆ",
"W": '"',
"w": "ไ",
"E": "ฎ",
"e": "\u0e33", # สระอำ
"R": "ฑ",
"r": "พ",
"T": "ธ",
"t": "ะ",
"Y": "\u0e4d", # นิคหิต
"y": "\u0e31", # ไม้หันอากาศ
"U": "\u0e4a", # ไม้ตรี
"u": "\u0e35", # สระอ ี
"I": "ณ",
"i": "ร",
"O": "ฯ",
"o": "น",
"P": "ญ",
"p": "ย",
"{": "ฐ",
"[": "บ",
"}": ",",
"]": "ล",
"|": "ฅ",
"\\": "ฃ",
"~": "%",
"`": "_",
"@": "๑",
"2": "/",
"#": "๒",
"3": "-",
"$": "๓",
"4": "ภ",
"%": "๔",
"5": "ถ",
"^": "\u0e39", # สระอู
"6": "\u0e38", # สระอุ
"&": "฿",
"7": "\u0e36", # สระอึ
"*": "๕",
"8": "ค",
"(": "๖",
"9": "ต",
")": "๗",
"0": "จ",
"_": "๘",
"-": "ข",
"+": "๙",
"=": "ช",
}
TH_EN_KEYB_PAIRS = {v: k for k, v in EN_TH_KEYB_PAIRS.items()}
EN_TH_TRANSLATE_TABLE = str.maketrans(EN_TH_KEYB_PAIRS)
TH_EN_TRANSLATE_TABLE = str.maketrans(TH_EN_KEYB_PAIRS)
TIS_820_2531_MOD = [
["-", "ๅ", "/", "", "_", "ภ", "ถ", "ุ", "ึ", "ค", "ต", "จ", "ข", "ช"],
["ๆ", "ไ", "ำ", "พ", "ะ", "ั", "ี", "ร", "น", "ย", "บ", "ล", "ฃ"],
["ฟ", "ห", "ก", "ด", "เ", "้", "่", "า", "ส", "ว", "ง"],
["ผ", "ป", "แ", "อ", "ิ", "ื", "ท", "ม", "ใ", "ฝ"],
]
TIS_820_2531_MOD_SHIFT = [
["%", "+", "๑", "๒", "๓", "๔", "ู", "฿", "๕", "๖", "๗", "๘", "๙"],
["๐", '"', "ฎ", "ฑ", "ธ", "ํ", "๊", "ณ", "ฯ", "ญ", "ฐ", ",", "ฅ"],
["ฤ", "ฆ", "ฏ", "โ", "ฌ", "็", "๋", "ษ", "ศ", "ซ", "."],
["(", ")", "ฉ", "ฮ", "ฺ", "์", "?", "ฒ", "ฬ", "ฦ"],
]
def eng_to_thai(text: str) -> str:
"""
Corrects the given text that was incorrectly typed using English-US
Qwerty keyboard layout to the originally intended keyboard layout
that is the Thai Kedmanee keyboard.
:param str text: incorrect text input (type Thai with English keyboard)
:return: Thai text where incorrect typing with
a keyboard layout is corrected
:rtype: str
:Example:
Intentionally type "ธนาคารแห่งประเทศไทย", but got "Tok8kicsj'xitgmLwmp"::
from pythainlp.util import eng_to_thai
eng_to_thai("Tok8kicsj'xitgmLwmp")
# output: ธนาคารแห่งประเทศไทย
"""
return text.translate(EN_TH_TRANSLATE_TABLE)
def thai_to_eng(text: str) -> str:
"""
Corrects the given text that was incorrectly typed using Thai Kedmanee
keyboard layout to the originally intended keyboard layout
that is the English-US Qwerty keyboard.
:param str text: incorrect text input (type English with Thai keyboard)
:return: English text where incorrect typing with
a keyboard layout is corrected
:rtype: str
:Example:
Intentionally type "Bank of Thailand", but got "ฺฟืา นด ธ้ฟรสฟืก"::
from pythainlp.util import eng_to_thai
thai_to_eng("ฺฟืา นด ธ้ฟรสฟืก")
# output: 'Bank of Thailand'
"""
return text.translate(TH_EN_TRANSLATE_TABLE)
def thai_keyboard_dist(c1: str, c2: str, shift_dist: float = 0.0) -> float:
"""
Calculate euclidean distance between two Thai characters
according to their location on a Thai keyboard layout.
A modified TIS 820-2531 standard keyboard layout, which is developed
from Kedmanee layout and is the most commonly used Thai keyboard layout,
is used in distance calculation.
The modified TIS 820-2531 is TIS 820-2531 with few key extensions
proposed in TIS 820-2536 draft. See Figure 4, notice grey keys, in
https://www.nectec.or.th/it-standards/keyboard_layout/thai-key.html
Noted that the latest TIS 820-2538 has slight changes in layout from
TIS 820-2531. See Figure 2, notice the Thai Baht sign and ฅ-ฃ pair, in
https://www.nectec.or.th/it-standards/std820/std820.html
Since TIS 820-2538 is not widely adopted by keyboard manufacturer,
this function uses the de facto standard modified TIS 820-2531 instead.
:param str c1: first character
:param str c2: second character
:param str shift_dist: return value if they're shifted
:return: euclidean distance between two characters
:rtype: float
:Example:
from pythainlp.util import thai_keyboard_dist
thai_keyboard_dist("ด", "ะ")
# output: 1.4142135623730951
thai_keyboard_dist("ฟ", "ฤ")
# output: 0.0
thai_keyboard_dist("ฟ", "ห")
# output: 1.0
thai_keyboard_dist("ฟ", "ก")
# output: 2.0
thai_keyboard_dist("ฟ", "ฤ", 0.5)
# output: 0.5
"""
def get_char_coord(
ch: str, layouts=[TIS_820_2531_MOD, TIS_820_2531_MOD_SHIFT]
):
for layout in layouts:
for row in layout:
if ch in row:
r = layout.index(row)
c = row.index(ch)
return (r, c)
raise ValueError(ch + " not found in given keyboard layout")
coord1 = get_char_coord(c1)
coord2 = get_char_coord(c2)
distance = (
(coord1[0] - coord2[0]) ** 2 + (coord1[1] - coord2[1]) ** 2
) ** (0.5)
if distance == 0 and c1 != c2:
return shift_dist
return distance
| 6,537 | 26.470588 | 77 | py |
pythainlp-dev/pythainlp/util/keywords.py | pythainlp-dev/pythainlp/util/keywords.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Dict, List
from pythainlp.corpus import thai_stopwords
_STOPWORDS = thai_stopwords()
def rank(words: List[str], exclude_stopwords: bool = False) -> Counter:
"""
Count word frequecy given a list of Thai words with an option
to exclude stopwords.
:param list words: a list of words
:param bool exclude_stopwords: If this parameter is set to **True**
to exclude stopwords from counting.
Otherwise, the stopwords will be counted.
By default, `exclude_stopwords`is
set to **False**
:return: a Counter object representing word frequency from the text
:rtype: :class:`collections.Counter`
:Example:
Include stopwords in counting word frequency::
from pythainlp.util import rank
words = ["บันทึก", "เหตุการณ์", " ", "มี", "การ", "บันทึก", \\
"เป็น", " ", "ลายลักษณ์อักษร"]
rank(words)
# output:
# Counter(
# {
# ' ': 2,
# 'การ': 1,
# 'บันทึก': 2,
# 'มี': 1,
# 'ลายลักษณ์อักษร': 1,
# 'เป็น': 1,
# 'เหตุการณ์': 1
# })
Exclude stopword in counting word frequency::
from pythainlp.util import rank
words = ["บันทึก", "เหตุการณ์", " ", "มี", "การ", "บันทึก", \\
"เป็น", " ", "ลายลักษณ์อักษร"]
rank(words)
# output:
# Counter(
# {
# ' ': 2,
# 'บันทึก': 2,
# 'ลายลักษณ์อักษร': 1,
# 'เหตุการณ์': 1
# })
"""
if not words:
return None
if exclude_stopwords:
words = [word for word in words if word not in _STOPWORDS]
return Counter(words)
def find_keyword(word_list: List[str], min_len: int = 3) -> Dict[str, int]:
"""
This function count the frequency of words in the list
where stopword is excluded and returns as a frequency dictionary.
:param list word_list: a list of words
:param int min_len: the mininum frequency for words to obtain
:return: a dictionary object with key-value pair as word and its raw count
:rtype: dict[str, int]
:Example:
::
from pythainlp.util import find_keyword
words = ["บันทึก", "เหตุการณ์", "บันทึก", "เหตุการณ์",
" ", "มี", "การ", "บันทึก", "เป็น", " ", "ลายลักษณ์อักษร"
"และ", "การ", "บันทึก","เสียง","ใน","เหตุการณ์"]
find_keyword(words)
# output: {'บันทึก': 4, 'เหตุการณ์': 3}
find_keyword(words, min_len=1)
# output: {' ': 2, 'บันทึก': 4, 'ลายลักษณ์อักษรและ': 1,
'เสียง': 1, 'เหตุการณ์': 3}
"""
word_list = rank(word_list, exclude_stopwords=True)
return {k: v for k, v in word_list.items() if v >= min_len}
| 3,592 | 30.243478 | 78 | py |
pythainlp-dev/pythainlp/util/normalize.py | pythainlp-dev/pythainlp/util/normalize.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text normalization
"""
import re
from typing import List, Union
from pythainlp import thai_above_vowels as above_v
from pythainlp import thai_below_vowels as below_v
from pythainlp import thai_follow_vowels as follow_v
from pythainlp import thai_lead_vowels as lead_v
from pythainlp import thai_tonemarks as tonemarks
from pythainlp.tokenize import word_tokenize
_DANGLING_CHARS = f"{above_v}{below_v}{tonemarks}\u0e3a\u0e4c\u0e4d\u0e4e"
_RE_REMOVE_DANGLINGS = re.compile(f"^[{_DANGLING_CHARS}]+")
_ZERO_WIDTH_CHARS = "\u200b\u200c" # ZWSP, ZWNJ
_REORDER_PAIRS = [
("\u0e40\u0e40", "\u0e41"), # Sara E + Sara E -> Sara Ae
(
f"([{tonemarks}\u0e4c]+)([{above_v}{below_v}]+)",
"\\2\\1",
), # TONE/Thanthakhat + ABV/BLW VOWEL -> ABV/BLW VOWEL + TONE/Thanthakhat
(
f"\u0e4d([{tonemarks}]*)\u0e32",
"\\1\u0e33",
), # Nikhahit + TONEMARK + Sara Aa -> TONEMARK + Sara Am
(
f"([{follow_v}]+)([{tonemarks}]+)",
"\\2\\1",
), # FOLLOW VOWEL + TONEMARK+ -> TONEMARK + FOLLOW VOWEL
("([^\u0e24\u0e26])\u0e45", "\\1\u0e32"), # Lakkhangyao -> Sara Aa
]
# VOWELS + Phinthu, Thanthakhat, Nikhahit, Yamakkan
_NOREPEAT_CHARS = (
f"{follow_v}{lead_v}{above_v}{below_v}\u0e3a\u0e4c\u0e4d\u0e4e"
)
_NOREPEAT_PAIRS = list(
zip([f"({ch}[ ]*)+{ch}" for ch in _NOREPEAT_CHARS], _NOREPEAT_CHARS)
)
_RE_TONEMARKS = re.compile(f"[{tonemarks}]+")
_RE_REMOVE_NEWLINES = re.compile("[ \n]*\n[ \n]*")
def _last_char(matchobj): # to be used with _RE_NOREPEAT_TONEMARKS
return matchobj.group(0)[-1]
def remove_dangling(text: str) -> str:
"""
Remove Thai non-base characters at the beginning of text.
This is a common "typo", especially for input field in a form,
as these non-base characters can be visually hidden from user
who may accidentally typed them in.
A character to be removed should be both:
* tone mark, above vowel, below vowel, or non-base sign AND
* located at the beginning of the text
:param str text: input text
:return: text without dangling Thai characters at the beginning
:rtype: str
:Example:
::
from pythainlp.util import remove_dangling
remove_dangling('๊ก')
# output: 'ก'
"""
return _RE_REMOVE_DANGLINGS.sub("", text)
def remove_dup_spaces(text: str) -> str:
"""
Remove duplicate spaces. Replace multiple spaces with one space.
Multiple newline characters and empty lines will be replaced
with one newline character.
:param str text: input text
:return: text without duplicated spaces and newlines
:rtype: str
:Example:
::
from pythainlp.util import remove_dup_spaces
remove_dup_spaces('ก ข ค')
# output: 'ก ข ค'
"""
while " " in text:
text = text.replace(" ", " ")
text = _RE_REMOVE_NEWLINES.sub("\n", text)
text = text.strip()
return text
def remove_tonemark(text: str) -> str:
"""
Remove all Thai tone marks from the text.
Thai script has four tone marks indicating four tones as follows:
* Down tone (Thai: ไม้เอก _่ )
* Falling tone (Thai: ไม้โท _้ )
* High tone (Thai: ไม้ตรี _๊ )
* Rising tone (Thai: ไม้จัตวา _๋ )
Putting wrong tone mark is a common mistake in Thai writing.
By removing tone marks from the string, it could be used to
for a approximate string matching
:param str text: input text
:return: text without Thai tone marks
:rtype: str
:Example:
::
from pythainlp.util import remove_tonemark
remove_tonemark('สองพันหนึ่งร้อยสี่สิบเจ็ดล้านสี่แสนแปดหมื่นสามพันหกร้อยสี่สิบเจ็ด')
# output: สองพันหนึงรอยสีสิบเจ็ดลานสีแสนแปดหมืนสามพันหกรอยสีสิบเจ็ด
"""
for ch in tonemarks:
while ch in text:
text = text.replace(ch, "")
return text
def remove_zw(text: str) -> str:
"""
Remove zero-width characters.
These non-visible characters may cause unexpected result from the
user's point of view. Removing them can make string matching more robust.
Characters to be removed:
* Zero-width space (ZWSP)
* Zero-width non-joiner (ZWJP)
:param str text: input text
:return: text without zero-width characters
:rtype: str
"""
for ch in _ZERO_WIDTH_CHARS:
while ch in text:
text = text.replace(ch, "")
return text
def reorder_vowels(text: str) -> str:
"""
Reorder vowels and tone marks to the standard logical order/spelling.
Characters in input text will be reordered/transformed,
according to these rules:
* Sara E + Sara E -> Sara Ae
* Nikhahit + Sara Aa -> Sara Am
* tone mark + non-base vowel -> non-base vowel + tone mark
* follow vowel + tone mark -> tone mark + follow vowel
:param str text: input text
:return: text with vowels and tone marks in the standard logical order
:rtype: str
"""
for pair in _REORDER_PAIRS:
text = re.sub(pair[0], pair[1], text)
return text
def remove_repeat_vowels(text: str) -> str:
"""
Remove repeating vowels, tone marks, and signs.
This function will call reorder_vowels() first, to make sure that
double Sara E will be converted to Sara Ae and not be removed.
:param str text: input text
:return: text without repeating Thai vowels, tone marks, and signs
:rtype: str
"""
text = reorder_vowels(text)
for pair in _NOREPEAT_PAIRS:
text = re.sub(pair[0], pair[1], text)
# remove repeating tone marks, use last tone mark
text = _RE_TONEMARKS.sub(_last_char, text)
return text
def normalize(text: str) -> str:
"""
Normalize and clean Thai text with normalizing rules as follows:
* Remove zero-width spaces
* Remove duplicate spaces
* Reorder tone marks and vowels to standard order/spelling
* Remove duplicate vowels and signs
* Remove duplicate tone marks
* Remove dangling non-base characters at the beginning of text
normalize() simply call remove_zw(), remove_dup_spaces(),
remove_repeat_vowels(), and remove_dangling(), in that order.
If a user wants to customize the selection or the order of rules
to be applied, they can choose to call those functions by themselves.
Note: for Unicode normalization, see unicodedata.normalize().
:param str text: input text
:return: normalized text according to the fules
:rtype: str
:Example:
::
from pythainlp.util import normalize
normalize('เเปลก') # starts with two Sara E
# output: แปลก
normalize('นานาาา')
# output: นานา
"""
text = remove_zw(text)
text = remove_dup_spaces(text)
text = remove_repeat_vowels(text)
text = remove_dangling(text)
return text
def maiyamok(sent: Union[str, List[str]]) -> List[str]:
"""
Thai MaiYaMok
MaiYaMok (ๆ) is the mark of duplicate word in Thai language.
This function is preprocessing MaiYaMok in Thai sentence.
:param Union[str, List[str]] sent: input sentence (list or str)
:return: List of words
:rtype: List[str]
:Example:
::
from pythainlp.util import maiyamok
maiyamok("เด็กๆชอบไปโรงเรียน")
# output: ['เด็ก', 'เด็ก', 'ชอบ', 'ไป', 'โรงเรียน']
maiyamok(["ทำไม","คน","ดี"," ","ๆ","ๆ"," ","ถึง","ทำ","ไม่ได้"])
# output: ['ทำไม', 'คน', 'ดี', 'ดี', 'ดี', ' ', 'ถึง', 'ทำ', 'ไม่ได้']
"""
if isinstance(sent, str):
sent = word_tokenize(sent)
_list_word = []
i = 0
for j, text in enumerate(sent):
if text.isspace() and "ๆ" in sent[j + 1]:
continue
if " ๆ" in text:
text = text.replace(" ๆ", "ๆ")
if "ๆ" == text:
text = _list_word[i - 1]
elif "ๆ" in text:
text = text.replace("ๆ", "")
_list_word.append(text)
i += 1
_list_word.append(text)
i += 1
return _list_word
| 8,695 | 27.69967 | 92 | py |
pythainlp-dev/pythainlp/util/numtoword.py | pythainlp-dev/pythainlp/util/numtoword.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert number value to Thai read out
Adapted from
http://justmindthought.blogspot.com/2012/12/code-php.html
https://suksit.com/post/writing-bahttext-in-php/
"""
__all__ = ["bahttext", "num_to_thaiword"]
_VALUES = [
"",
"หนึ่ง",
"สอง",
"สาม",
"สี่",
"ห้า",
"หก",
"เจ็ด",
"แปด",
"เก้า",
]
_PLACES = ["", "สิบ", "ร้อย", "พัน", "หมื่น", "แสน", "ล้าน"]
_EXCEPTIONS = {"หนึ่งสิบ": "สิบ", "สองสิบ": "ยี่สิบ", "สิบหนึ่ง": "สิบเอ็ด"}
def bahttext(number: float) -> str:
"""
This function converts a number to Thai text and adds
a suffix "บาท" (Baht).
The precision will be fixed at two decimal places (0.00)
to fits "สตางค์" (Satang) unit.
This function works similar to `BAHTTEXT` function in Microsoft Excel.
:param float number: number to be converted into Thai Baht currency format
:return: text representing the amount of money in the format
of Thai currency
:rtype: str
:Example:
::
from pythainlp.util import bahttext
bahttext(1)
# output: หนึ่งบาทถ้วน
bahttext(21)
# output: ยี่สิบเอ็ดบาทถ้วน
bahttext(200)
# output: สองร้อยบาทถ้วน
"""
ret = ""
if number is None:
pass
elif number == 0:
ret = "ศูนย์บาทถ้วน"
else:
num_int, num_dec = "{:.2f}".format(number).split(".")
num_int = int(num_int)
num_dec = int(num_dec)
baht = num_to_thaiword(num_int)
if baht:
ret = "".join([ret, baht, "บาท"])
satang = num_to_thaiword(num_dec)
if satang and satang != "ศูนย์":
ret = "".join([ret, satang, "สตางค์"])
else:
ret = "".join([ret, "ถ้วน"])
return ret
def num_to_thaiword(number: int) -> str:
"""
This function convert number to Thai text
:param int number: an integer number to be converted to Thai text
:return: text representing the number in Thai
:rtype: str
:Example:
::
from pythainlp.util import num_to_thaiword
num_to_thaiword(1)
# output: หนึ่ง
num_to_thaiword(11)
# output: สิบเอ็ด
"""
output = ""
number_temp = number
if number is None:
return ""
elif number == 0:
output = "ศูนย์"
number = str(abs(number))
for place, value in enumerate(list(number[::-1])):
if place % 6 == 0 and place > 0:
output = _PLACES[6] + output
if value != "0":
output = _VALUES[int(value)] + _PLACES[place % 6] + output
for search, replac in _EXCEPTIONS.items():
output = output.replace(search, replac)
if number_temp < 0:
output = "ลบ" + output
return output
| 3,355 | 24.233083 | 78 | py |
pythainlp-dev/pythainlp/util/phoneme.py | pythainlp-dev/pythainlp/util/phoneme.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Phonemes util
"""
import unicodedata
from pythainlp.util.trie import Trie
from pythainlp.tokenize import Tokenizer
consonants_ipa_nectec = [
("k","k","k^"),
("kʰ","kh"),
("ŋ","ng","ng^"),
("tɕ","c"),
("tɕʰ","ch"),
("s","s"),
("j","j","j^"),
("d","d"),
("t","y","t^"),
("tʰ","th"),
("n","n","n^"),
("b","b"),
("p","p","p^"),
("pʰ","ph"),
("f","f"),
("m","m","m^"),
("r","r"),
("l","l"),
("w","w","w^"),
("h","h"),
("?","z","z^")
]
# ipa, initial, final
monophthong_ipa_nectec = [
("i","i"),
("e","e"),
("ɛ","x"),
("ɤ","q"),
("a","a"),
("am","am^"),
("aj","aj^"),
("aw","aw^"),
("u","u"),
("o","o"),
("ɔ","@"),
("ii","ii"),
("ee","ee"),
("ɛɛ","xx"),
("ɯɯ","vv"),
("ɤɤ","qq"),
("aa","aa"),
("uu","uu"),
("oo","oo"),
("","@@"), #-อ long
]
diphthong_ipa_nectec = [
("ia","ia"),
("ɯa","va"),
("ua","ua"),
("iia","iia"),
("ɯɯa","vva"),
("uua","uua"),
]
tones_ipa_nectec = [
("˧","0"),
("˨˩","1"),
("˥˩","2"),
("˦˥","3"),
("˩˩˦","4"),
]
dict_nectec_to_ipa = {i[1]:i[0] for i in consonants_ipa_nectec+monophthong_ipa_nectec+diphthong_ipa_nectec+tones_ipa_nectec}
dict_nectec_to_ipa.update({i[2]:i[0] for i in consonants_ipa_nectec if len(i)>2})
def nectec_to_ipa(pronunciation: str) -> str:
"""
Converter NECTEC system to IPA system
:param str pronunciation: NECTEC phoneme
:return: IPA that be convert
:rtype: str
:Example:
::
from pythainlp.util import nectec_to_ipa
print(nectec_to_ipa("kl-uua-j^-2"))
# output : 'kl uua j ˥˩'
References
----------
Pornpimon Palingoon, Sumonmas Thatphithakkul. Chapter 4 Speech processing and Speech corpus. In: Handbook of Thai Electronic Corpus. 1st ed. p. 122–56.
"""
pronunciation = pronunciation.split("-")
_temp = []
for i in pronunciation:
if i in dict_nectec_to_ipa.keys():
_temp.append(dict_nectec_to_ipa[i])
else:
_temp.append(i)
return ' '.join(_temp)
dict_ipa_rtgs = {
"b":"b",
"d":"d",
"f":"f",
"h":"h",
"j":"y",
"k":"k",
"kʰ":"kh",
"l":"l",
"m":"m",
"n":"n",
"ŋ":"ng",
"p":"p",
"pʰ":"ph",
"r":"r",
"s":"s",
"t":"t",
"tʰ":"th",
"tɕ":"ch",
"tɕʰ":"ch",
"w":"w",
"ʔ":"",
"j":"i",
"a":"a",
"e":"e",
"ɛ":"ae",
"i":"i",
"o":"o",
"ɔ":"o",
"u":"u",
"ɯ":"ue",
"ɤ":"oe",
"aː":"a",
"eː":"e",
"ɛː":"ae",
"iː":"i",
"oː":"o",
"ɔː":"o",
"uː":"u",
"ɯː":"ue",
"ɤː":"oe",
"ia":"ia",
"ua":"ua",
"ɯa":"uea",
"aj":"ai",
"aw":"ao",
"ew":"eo",
"ɛw":"aeo",
"iw":"io",
"ɔj":"io",
"uj":"ui",
"aːj":"ai",
"aːw":"ao",
"eːw":"eo",
"ɛːw":"aeo",
"oːj":"oi",
"ɔːj":"oi",
"ɤːj":"oei",
"iaw":"iao",
"uaj":"uai",
"ɯaj":"ueai",
".":".",
}
dict_ipa_rtgs_final = {
"w":"o"
}
trie = Trie(list(dict_ipa_rtgs.keys())+list(dict_ipa_rtgs_final.keys()))
ipa_cut = Tokenizer(custom_dict=trie, engine="newmm")
def ipa_to_rtgs(ipa: str) -> str:
"""
Converter IPA system to The Royal Thai General System of Transcription (RTGS)
Docs: https://en.wikipedia.org/wiki/Help:IPA/Thai
:param str ipa: IPA phoneme
:return: The RTGS that be convert
:rtype: str
:Example:
::
from pythainlp.util import ipa_to_rtgs
print(ipa_to_rtgs("kluaj"))
# output : 'kluai'
"""
_temp = []
_list_ipa = ipa_cut.word_tokenize(ipa)
for i,p in enumerate(_list_ipa):
if i == len(_list_ipa) -1 and p in list(dict_ipa_rtgs_final.keys()):
_temp.append(dict_ipa_rtgs_final[p])
elif p in list(dict_ipa_rtgs.keys()):
_temp.append(dict_ipa_rtgs[p])
else:
_temp.append(p)
_text = ''.join(_temp)
_text = unicodedata.normalize('NFKD', _text).encode('ascii', 'ignore')
return _text.decode("utf-8")
def remove_tone_ipa(ipa: str) -> str:
"""
Remove Thai Tone from IPA system
:param str ipa: IPA phoneme
:return: IPA phoneme that deleted tone
:rtype: str
:Example:
::
from pythainlp.util import remove_tone_ipa
print(remove_tone_ipa("laː˦˥.sa˨˩.maj˩˩˦"))
# output : laː.sa.maj
"""
_list_tone = ["˩˩˦", "˥˩", "˨˩", "˦˥", "˧"]
for tone in _list_tone:
ipa = ipa.replace(tone, "")
return ipa
| 5,191 | 19.935484 | 155 | py |
pythainlp-dev/pythainlp/util/spell_words.py | pythainlp-dev/pythainlp/util/spell_words.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import List
from pythainlp import (
thai_letters,
thai_consonants,
thai_lead_vowels,
thai_follow_vowels,
thai_above_vowels,
thai_below_vowels,
thai_tonemarks
)
from pythainlp.tokenize import Tokenizer
from pythainlp.tokenize import subword_tokenize
_r1=["เ-ย","เ-ะ","แ-ะ","โ-ะ","เ-าะ","เ-อะ","เ-อ","เ-า"]
_r2=["–ั:วะ","เ–ี:ยะ","เ–ือะ","–ั:ว","เ–ี:ย","เ–ื:อ","–ื:อ"]
tonemarks={i:"ไม้"+j for i,j in zip(list(thai_tonemarks),["เอก","โท","ตรี","จัตวา"])}
rule1=[i.replace("-",f"([{thai_letters}](thai_tonemarks)?)") for i in _r1]
rule2=[i.replace("–",f"([{thai_letters}])").replace(":",f"") for i in _r2]
rule3=[i.replace("–",f"([{thai_letters}])").replace(":",f"([{thai_tonemarks}])") for i in _r2]
dict_vowel_ex={}
for i in _r1+_r2:
dict_vowel_ex[i.replace("-","อ").replace("–","อ").replace(":","")]=i.replace("-","อ").replace(":","").replace("–","อ")
dict_vowel={}
for i in _r1+_r2:
dict_vowel[i.replace("-","อ").replace("–","อ").replace(":","")]=i.replace("-","อ").replace(":","").replace("–","อ")
for i in thai_lead_vowels:
dict_vowel[i]=i+"อ"
for i in thai_follow_vowels:
dict_vowel[i]="อ"+i
for i in thai_above_vowels:
dict_vowel[i]="อ"+i
for i in thai_below_vowels:
dict_vowel[i]="อ"+i
_cut=Tokenizer(list(dict_vowel.keys())+list(thai_consonants),engine="mm")
def _clean(w):
if bool(re.match('|'.join(rule3),w)):
for r in rule3:
if bool(re.match(r,w)):
_w=re.sub(r,"\\1==\\2==",w)
_temp=_w.split("==")
w=_temp[0]+r.replace(f"([{thai_letters}])","อ").replace(f"([{thai_tonemarks}])","")+_temp[1]
elif bool(re.match('|'.join(rule2),w)):
for r in rule2:
if bool(re.match(r,w)):
w=re.sub(r,"\\1",w)+r.replace(f"([{thai_letters}])","อ")
elif bool(re.match('|'.join(rule1),w)):
for r in rule1:
if bool(re.match(r,w)):
w=re.sub(r,"\\1",w)+r.replace(f"([{thai_letters}](thai_tonemarks)?)","อ")
return w
def spell_syllable(s: str)-> List[str]:
"""
Spell syllable by Thai word distribution form.
:param str s: Thai syllable only
:return: List of spell syllable
:rtype: List[str]
:Example:
::
from pythainlp.util.spell_words import spell_syllable
print(spell_syllable("แมว"))
# output: ['มอ', 'วอ', 'แอ', 'แมว']
"""
_t=s
s=_cut.word_tokenize(_clean(s))
_c_only = [i+"อ" for i in s if i in set(thai_consonants)]
_v_only = [dict_vowel[i] for i in s if i in set(dict_vowel.keys())]
_t_only = [tonemarks[i] for i in s if i in set(tonemarks.keys())]
_out=_c_only+_v_only+_t_only
_out.append(_t)
return _out
def spell_word(w: str)-> List[str]:
"""
Spell word by Thai word distribution form.
:param str w: Thai word only
:return: List of spell word
:rtype: List[str]
:Example:
::
from pythainlp.util.spell_words import spell_word
print(spell_word("คนดี"))
# output: ['คอ', 'นอ', 'คน', 'ดอ', 'อี', 'ดี', 'คนดี']
"""
_r=[]
_temp=subword_tokenize(w,engine="ssg")
for i in _temp:
_r.extend(spell_syllable(i))
if len(_temp)>1:
_r.append(w)
return _r | 3,873 | 31.016529 | 122 | py |
pythainlp-dev/pythainlp/util/strftime.py | pythainlp-dev/pythainlp/util/strftime.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai date/time formatting.
"""
import warnings
from datetime import datetime
from string import digits
from pythainlp import thai_digits
from pythainlp.util.date import (
thai_abbr_months,
thai_abbr_weekdays,
thai_full_months,
thai_full_weekdays,
)
__all__ = [
"thai_strftime",
]
_HA_TH_DIGITS = str.maketrans(digits, thai_digits)
_BE_AD_DIFFERENCE = 543
_NEED_L10N = "AaBbCcDFGgvXxYy+" # flags that need localization
_EXTENSIONS = "EO-_0^#" # extension flags
def _std_strftime(dt_obj: datetime, fmt_char: str) -> str:
"""
Standard datetime.strftime() with normalization and exception handling.
"""
str_ = ""
try:
str_ = dt_obj.strftime(f"%{fmt_char}")
if not str_ or str_ == "%{}".format(fmt_char):
# normalize outputs for unsupported directives
# in different platforms
# "%Q" may result "%Q", "Q", or "", make it "Q"
str_ = fmt_char
except ValueError as err:
# Unsupported directives may raise ValueError on Windows,
# in that case just use the fmt_char
warnings.warn(
(
f"String format directive unknown/not support: %{fmt_char}"
f"The system raises this ValueError: {err}"
),
UserWarning,
)
str_ = fmt_char
return str_
def _thai_strftime(dt_obj: datetime, fmt_char: str) -> str:
"""
Conversion support for thai_strftime().
The fmt_char should be in _NEED_L10N when call this function.
"""
str_ = ""
if fmt_char == "A":
# National representation of the full weekday name
str_ = thai_full_weekdays[dt_obj.weekday()]
elif fmt_char == "a":
# National representation of the abbreviated weekday
str_ = thai_abbr_weekdays[dt_obj.weekday()]
elif fmt_char == "B":
# National representation of the full month name
str_ = thai_full_months[dt_obj.month - 1]
elif fmt_char == "b":
# National representation of the abbreviated month name
str_ = thai_abbr_months[dt_obj.month - 1]
elif fmt_char == "C":
# Thai Buddhist century (AD+543)/100 + 1 as decimal number;
str_ = str(int((dt_obj.year + _BE_AD_DIFFERENCE) / 100) + 1).zfill(2)
elif fmt_char == "c":
# Locale’s appropriate date and time representation
# Wed 6 Oct 01:40:00 1976
# พ 6 ต.ค. 01:40:00 2519 <-- left-aligned weekday, right-aligned day
str_ = "{:<2} {:>2} {} {} {}".format(
thai_abbr_weekdays[dt_obj.weekday()],
dt_obj.day,
thai_abbr_months[dt_obj.month - 1],
dt_obj.strftime("%H:%M:%S"),
str(dt_obj.year + _BE_AD_DIFFERENCE).zfill(4),
)
elif fmt_char == "D":
# Equivalent to ``%m/%d/%y''
str_ = "{}/{}".format(
dt_obj.strftime("%m/%d"),
(str(dt_obj.year + _BE_AD_DIFFERENCE)[-2:]).zfill(2),
)
elif fmt_char == "F":
# Equivalent to ``%Y-%m-%d''
str_ = "{}-{}".format(
str(dt_obj.year + _BE_AD_DIFFERENCE).zfill(4),
dt_obj.strftime("%m-%d"),
)
elif fmt_char == "G":
# ISO 8601 year with century representing the year that contains
# the greater part of the ISO week (%V). Monday as the first day
# of the week.
str_ = str(int(dt_obj.strftime("%G")) + _BE_AD_DIFFERENCE).zfill(4)
elif fmt_char == "g":
# Same year as in ``%G'',
# but as a decimal number without century (00-99).
str_ = (
str(int(dt_obj.strftime("%G")) + _BE_AD_DIFFERENCE)[-2:]
).zfill(2)
elif fmt_char == "v":
# BSD extension, ' 6-Oct-1976'
str_ = "{:>2}-{}-{}".format(
dt_obj.day,
thai_abbr_months[dt_obj.month - 1],
str(dt_obj.year + _BE_AD_DIFFERENCE).zfill(4),
)
elif fmt_char == "X":
# Locale’s appropriate time representation.
str_ = dt_obj.strftime("%H:%M:%S")
elif fmt_char == "x":
# Locale’s appropriate date representation.
str_ = "{}/{}/{}".format(
str(dt_obj.day).zfill(2),
str(dt_obj.month).zfill(2),
str(dt_obj.year + _BE_AD_DIFFERENCE).zfill(4),
)
elif fmt_char == "Y":
# Year with century
str_ = (str(dt_obj.year + _BE_AD_DIFFERENCE)).zfill(4)
elif fmt_char == "y":
# Year without century
str_ = (str(dt_obj.year + _BE_AD_DIFFERENCE)[-2:]).zfill(2)
elif fmt_char == "+":
# National representation of the date and time
# (the format is similar to that produced by date(1))
# Wed 6 Oct 1976 01:40:00
str_ = "{:<2} {:>2} {} {} {}".format(
thai_abbr_weekdays[dt_obj.weekday()],
dt_obj.day,
thai_abbr_months[dt_obj.month - 1],
dt_obj.year + _BE_AD_DIFFERENCE,
dt_obj.strftime("%H:%M:%S"),
)
else:
# No known localization available, use Python's default
str_ = _std_strftime(dt_obj, fmt_char)
return str_
def thai_strftime(
dt_obj: datetime,
fmt: str = "%-d %b %y",
thaidigit: bool = False,
) -> str:
"""
Convert :class:`datetime.datetime` into Thai date and time format.
The formatting directives are similar to :func:`datatime.strrftime`.
This function uses Thai names and Thai Buddhist Era for these directives:
* **%a** - abbreviated weekday name
(i.e. "จ", "อ", "พ", "พฤ", "ศ", "ส", "อา")
* **%A** - full weekday name
(i.e. "วันจันทร์", "วันอังคาร", "วันเสาร์", "วันอาทิตย์")
* **%b** - abbreviated month name
(i.e. "ม.ค.","ก.พ.","มี.ค.","เม.ย.","พ.ค.","มิ.ย.", "ธ.ค.")
* **%B** - full month name
(i.e. "มกราคม", "กุมภาพันธ์", "พฤศจิกายน", "ธันวาคม",)
* **%y** - year without century (i.e. "56", "10")
* **%Y** - year with century (i.e. "2556", "2410")
* **%c** - date and time representation
(i.e. "พ 6 ต.ค. 01:40:00 2519")
* **%v** - short date representation
(i.e. " 6-ม.ค.-2562", "27-ก.พ.-2555")
Other directives will be passed to datetime.strftime()
:Note:
* The Thai Buddhist Era (BE) year is simply converted from AD
by adding 543. This is certainly not accurate for years
before 1941 AD, due to the change in Thai New Year's Day.
* This meant to be an interrim solution, since
Python standard's locale module (which relied on C's strftime())
does not support "th" or "th_TH" locale yet. If supported,
we can just locale.setlocale(locale.LC_TIME, "th_TH")
and then use native datetime.strftime().
We trying to make this platform-independent and support extentions
as many as possible. See these links for strftime() extensions
in POSIX, BSD, and GNU libc:
* Python
https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior
* C http://www.cplusplus.com/reference/ctime/strftime/
* GNU https://metacpan.org/pod/POSIX::strftime::GNU
* Linux https://linux.die.net/man/3/strftime
* OpenBSD https://man.openbsd.org/strftime.3
* FreeBSD https://www.unix.com/man-page/FreeBSD/3/strftime/
* macOS
https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man3/strftime.3.html
* PHP https://secure.php.net/manual/en/function.strftime.php
* JavaScript's implementation https://github.com/samsonjs/strftime
* strftime() quick reference http://www.strftime.net/
:param datetime dt_obj: an instantiatetd object of
:mod:`datetime.datetime`
:param str fmt: string containing date and time directives
:param bool thaidigit: If `thaidigit` is set to **False** (default),
number will be represented in Arabic digit.
If it is set to **True**, it will be represented
in Thai digit.
:return: Date and time text, with month in Thai name and year in
Thai Buddhist era. The year is simply converted from AD
by adding 543 (will not accurate for years before 1941 AD,
due to change in Thai New Year's Day).
:rtype: str
:Example:
::
from datetime import datetime
from pythainlp.util import thai_strftime
datetime_obj = datetime(year=2019, month=6, day=9, \\
hour=5, minute=59, second=0, microsecond=0)
print(datetime_obj)
# output: 2019-06-09 05:59:00
thai_strftime(datetime_obj, "%A %d %B %Y")
# output: 'วันอาทิตย์ 09 มิถุนายน 2562'
thai_strftime(datetime_obj, "%a %-d %b %y") # no padding
# output: 'อา 9 มิ.ย. 62'
thai_strftime(datetime_obj, "%a %_d %b %y") # space padding
# output: 'อา 9 มิ.ย. 62'
thai_strftime(datetime_obj, "%a %0d %b %y") # zero padding
# output: 'อา 09 มิ.ย. 62'
thai_strftime(datetime_obj, "%-H นาฬิกา %-M นาที", thaidigit=True)
# output: '๕ นาฬิกา ๕๙ นาที'
thai_strftime(datetime_obj, "%D (%v)")
# output: '06/09/62 ( 9-มิ.ย.-2562)'
thai_strftime(datetime_obj, "%c")
# output: 'อา 9 มิ.ย. 05:59:00 2562'
thai_strftime(datetime_obj, "%H:%M %p")
# output: '01:40 AM'
thai_strftime(datetime_obj, "%H:%M %#p")
# output: '01:40 am'
"""
thaidate_parts = []
i = 0
fmt_len = len(fmt)
while i < fmt_len:
str_ = ""
if fmt[i] == "%":
j = i + 1
if j < fmt_len:
fmt_char = fmt[j]
if fmt_char in _NEED_L10N: # requires localization?
str_ = _thai_strftime(dt_obj, fmt_char)
elif fmt_char in _EXTENSIONS:
fmt_char_ext = fmt_char
k = j + 1
if k < fmt_len:
fmt_char = fmt[k]
if fmt_char in _NEED_L10N:
str_ = _thai_strftime(dt_obj, fmt_char)
else:
str_ = _std_strftime(dt_obj, fmt_char)
if fmt_char_ext == "-":
# GNU libc extension,
# no padding
if str_[0] and str_[0] in " 0":
str_ = str_[1:]
elif fmt_char_ext == "_":
# GNU libc extension,
# explicitly specify space (" ") for padding
if str_[0] and str_[0] == "0":
str_ = " " + str_[1:]
elif fmt_char_ext == "0":
# GNU libc extension,
# explicitly specify zero ("0") for padding
if str_[0] and str_[0] == " ":
str_ = "0" + str_[1:]
elif fmt_char_ext == "^":
# GNU libc extension,
# convert to upper case
str_ = str_.upper()
elif fmt_char_ext == "#":
# GNU libc extension,
# swap case - useful for %Z
str_ = str_.swapcase()
elif fmt_char_ext == "E":
# POSIX extension,
# uses the locale's alternative representation
# Not implemented yet
pass
elif fmt_char_ext == "O":
# POSIX extension,
# uses the locale's alternative numeric symbols
str_ = str_.translate(_HA_TH_DIGITS)
i = i + 1 # consume char after format char
else:
# format char at string's end has no meaning
str_ = fmt_char_ext
else: # not in _NEED_L10N nor _EXTENSIONS
# no known localization available, use Python's default
str_ = _std_strftime(dt_obj, fmt_char)
i = i + 1 # consume char after "%"
else:
# % char at string's end has no meaning
str_ = "%"
else:
str_ = fmt[i]
thaidate_parts.append(str_)
i = i + 1
thaidate_text = "".join(thaidate_parts)
if thaidigit:
thaidate_text = thaidate_text.translate(_HA_TH_DIGITS)
return thaidate_text
| 13,440 | 37.402857 | 124 | py |
pythainlp-dev/pythainlp/util/syllable.py | pythainlp-dev/pythainlp/util/syllable.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Syllable tools
"""
import re
from pythainlp import thai_consonants, thai_tonemarks
spelling_class = {
"กง": list("ง"),
"กม": list("ม"),
"เกย": list("ย"),
"เกอว": list("ว"),
"กน": list("นญณรลฬ"),
"กก": list("กขคฆ"),
"กด": list("ดจชซฎฏฐฑฒตถทธศษส"),
"กบ": list("บปภพฟ"),
}
thai_consonants_all = list(thai_consonants)
thai_consonants_all.remove("อ")
_temp = list(
"".join(["".join(spelling_class[i]) for i in spelling_class.keys()])
)
not_spelling_class = [j for j in thai_consonants_all if j not in _temp]
# vowel's short sound
short = "ะัิึุ"
re_short = re.compile("เ(.*)ะ|แ(.*)ะ|เ(.*)อะ|โ(.*)ะ|เ(.*)าะ", re.U)
pattern = re.compile("เ(.*)า", re.U) # เ-า is live syllable
_check_1 = []
# these spelling consonant are live syllable.
for i in ["กง", "กน", "กม", "เกย", "เกอว"]:
_check_1.extend(spelling_class[i])
# these spelling consonant are dead syllable.
_check_2 = spelling_class["กก"] + spelling_class["กบ"] + spelling_class["กด"]
thai_low_sonorants = list("งนมยรลว")
thai_low_aspirates = list("คชซทพฟฮ")
thai_low_irregular = list("ฆญณธภฅฌฑฒฬ")
thai_mid_plains = list("กจดตบปอฎฏ")
thai_high_aspirates = list("ขฉถผฝสห")
thai_high_irregular = list("ศษฃฐ")
thai_initial_consonant_type = {
"low": thai_low_sonorants + thai_low_aspirates + thai_low_irregular,
"mid": thai_mid_plains,
"high": thai_high_aspirates + thai_high_irregular,
}
thai_initial_consonant_to_type = {}
for k, v in thai_initial_consonant_type.items():
for i in v:
thai_initial_consonant_to_type[i] = k
def sound_syllable(syllable: str) -> str:
"""
Sound syllable classification
This function is sound syllable classification.
It is live syllable or dead syllable.
:param str syllable: Thai syllable
:return: syllable's type (live or dead)
:rtype: str
:Example:
::
from pythainlp.util import sound_syllable
print(sound_syllable("มา"))
# output: live
print(sound_syllable("เลข"))
# output: dead
"""
# get consonants
consonants = [i for i in syllable if i in list(thai_consonants_all)]
# get spelling consonants
spelling_consonant = consonants[-1]
# if len of syllable < 2
if len(syllable) < 2:
return "dead"
elif (spelling_consonant in _check_2) and (
any((c in set("าีืแูาเโ")) for c in syllable) == False
and any((c in set("ำใไ")) for c in syllable) == False
and bool(pattern.search(syllable)) != True
):
return "dead"
elif any((c in set("าีืแูาโ")) for c in syllable): # in syllable:
if (
spelling_consonant in _check_1
and bool(re_short.search(syllable)) != True
):
return "live"
elif (
spelling_consonant != syllable[-1]
and bool(re_short.search(syllable)) != True
):
return "live"
elif spelling_consonant in _check_2:
return "dead"
elif bool(re_short.search(syllable)) or any(
(c in set(short)) for c in syllable
):
return "dead"
return "live"
elif any((c in set("ำใไ")) for c in syllable):
return "live" # if these vowel's long sound are live syllable
elif bool(pattern.search(syllable)): # if it is เ-า
return "live"
elif spelling_consonant in _check_1:
if (
bool(re_short.search(syllable))
or any((c in set(short)) for c in syllable)
) and len(consonants) < 2:
return "dead"
return "live"
elif bool(
re_short.search(syllable)
) or any( # if found vowel's short sound
(c in set(short)) for c in syllable
): # consonant in short
return "dead"
else:
return "dead"
def syllable_open_close_detector(syllable: str) -> str:
"""
Thai syllable open/close detector
This function is use for find Thai syllable that open or closed sound.
:param str syllable: Thai syllable
:return: open / close
:rtype: str
:Example:
::
from pythainlp.util import syllable_open_close_detector
print(syllable_open_close_detector("มาก"))
# output: close
print(syllable_open_close_detector("คะ"))
# output: open
"""
consonants = [i for i in syllable if i in list(thai_consonants)]
if len(consonants) < 2:
return "open"
elif len(consonants) == 2 and consonants[-1] == "อ":
return "open"
return "close"
def syllable_length(syllable: str) -> str:
"""
Thai syllable length
This function is use for find syllable's length. (long or short)
:param str syllable: Thai syllable
:return: syllable's length (long or short)
:rtype: str
:Example:
::
from pythainlp.util import syllable_length
print(syllable_length("มาก"))
# output: long
print(syllable_length("คะ"))
# output: short
"""
consonants = [i for i in syllable if i in list(thai_consonants)]
if len(consonants) < 3 and any((c in set(short)) for c in syllable):
return "short"
elif bool(re_short.search(syllable)):
return "short"
else:
return "long"
def _tone_mark_detector(syllable: str) -> str:
tone_mark = [i for i in syllable if i in list(thai_tonemarks)]
if tone_mark == []:
return ""
else:
return tone_mark[0]
def _check_sonorant_syllable(syllable: str) -> bool:
_sonorant = [i for i in syllable if i in thai_low_sonorants]
consonants = [i for i in syllable if i in list(thai_consonants)]
if _sonorant[-1] == consonants[-2]:
return True
elif _sonorant[-1] == consonants[-1]:
return True
return False
def tone_detector(syllable: str) -> str:
"""
Thai tone detector for syllables
:param str syllable: Thai syllable
:return: syllable's tone (l, m, h, r, f or empty if it cannot detector)
:rtype: str
:Example:
::
from pythainlp.util import tone_detector
print(tone_detector("มา"))
# output: m
print(tone_detector("ไม้"))
# output: h
"""
s = sound_syllable(syllable)
# get consonants
consonants = [i for i in syllable if i in list(thai_consonants)]
initial_consonant = consonants[0]
tone_mark = _tone_mark_detector(syllable)
syllable_check = syllable_open_close_detector(syllable)
syllable_check_lenght = syllable_length(syllable)
initial_consonant_type = thai_initial_consonant_to_type[initial_consonant]
# r for store value
r = ""
if len(consonants) > 1 and (
initial_consonant == "อ" or initial_consonant == "ห"
):
consonant_ending = _check_sonorant_syllable(syllable)
if (
initial_consonant == "อ"
and consonant_ending
and s == "live"
and tone_mark == "่"
):
r = "l"
elif initial_consonant == "อ" and consonant_ending and s == "dead":
r = "l"
elif (
initial_consonant == "ห"
and consonant_ending
and s == "live"
and tone_mark == "่"
):
r = "l"
elif (
initial_consonant == "ห"
and consonant_ending
and s == "live"
and tone_mark == "้"
):
r = "f"
elif initial_consonant == "ห" and consonant_ending and s == "dead":
r = "l"
elif initial_consonant == "ห" and consonant_ending and s == "live":
r = "r"
elif (
initial_consonant_type == "low"
and syllable_check_lenght == "short"
and syllable_check == "close"
and s == "dead"
):
r = "h"
elif (
initial_consonant_type == "low"
and syllable_check_lenght == "long"
and syllable_check == "close"
and s == "dead"
):
r = "f"
elif (
initial_consonant_type == "low"
and syllable_check_lenght == "short"
and syllable_check == "open"
):
r = "h"
elif initial_consonant_type == "high" and s == "live" and tone_mark == "่":
r = "l"
elif initial_consonant_type == "mid" and s == "live" and tone_mark == "่":
r = "l"
elif initial_consonant_type == "low" and tone_mark == "้":
r = "h"
elif initial_consonant_type == "mid" and tone_mark == "๋":
r = "r"
elif initial_consonant_type == "mid" and tone_mark == "๊":
r = "h"
elif initial_consonant_type == "low" and tone_mark == "่":
r = "f"
elif initial_consonant_type == "mid" and tone_mark == "้":
r = "f"
elif initial_consonant_type == "high" and tone_mark == "้":
r = "f"
elif initial_consonant_type == "mid" and s == "dead":
r = "l"
elif initial_consonant_type == "high" and s == "dead":
r = "l"
elif initial_consonant_type == "low" and s == "live":
r = "m"
elif initial_consonant_type == "mid" and s == "live":
r = "m"
elif initial_consonant_type == "high" and s == "live":
r = "r"
return r
| 9,765 | 28.683891 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.