Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
deep-translator | deep-translator-master/deep_translator/qcri.py | __copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
import os
from typing import List, Optional
import requests
from deep_translator.base import BaseTranslator
from deep_translator.constants import (
BASE_URLS,
QCRI_ENV_VAR,
QCRI_LANGUAGE_TO_CODE,
)
from deep_translator.exceptions import (
ApiKeyException,
ServerException,
TranslationNotFound,
)
from deep_translator.validate import request_failed
class QcriTranslator(BaseTranslator):
"""
class that wraps functions, which use the QRCI translator under the hood to translate word(s)
"""
def __init__(
self,
source: str = "en",
target: str = "en",
api_key: Optional[str] = os.getenv(QCRI_ENV_VAR, None),
**kwargs,
):
"""
@param api_key: your qrci api key.
Get one for free here https://mt.qcri.org/api/v1/ref
"""
if not api_key:
raise ApiKeyException(QCRI_ENV_VAR)
self.api_key = api_key
self.api_endpoints = {
"get_languages": "getLanguagePairs",
"get_domains": "getDomains",
"translate": "translate",
}
self.params = {"key": self.api_key}
super().__init__(
base_url=BASE_URLS.get("QCRI"),
source=source,
target=target,
languages=QCRI_LANGUAGE_TO_CODE,
**kwargs,
)
def _get(
self,
endpoint: str,
params: Optional[dict] = None,
return_text: bool = True,
):
if not params:
params = self.params
try:
res = requests.get(
self._base_url.format(endpoint=self.api_endpoints[endpoint]),
params=params,
)
return res.text if return_text else res
except Exception as e:
raise e
@property
def languages(self):
return self.get_supported_languages()
def get_domains(self):
domains = self._get("get_domains")
return domains
@property
def domains(self):
return self.get_domains()
def translate(self, text: str, **kwargs) -> str:
params = {
"key": self.api_key,
"langpair": f"{self._source}-{self._target}",
"domain": kwargs["domain"],
"text": text,
}
try:
response = self._get("translate", params=params, return_text=False)
except ConnectionError:
raise ServerException(503)
else:
if request_failed(status_code=response.status_code):
ServerException(response.status_code)
else:
res = response.json()
translation = res.get("translatedText")
if not translation:
raise TranslationNotFound(text)
return translation
def translate_file(self, path: str, **kwargs) -> str:
return self._translate_file(path, **kwargs)
def translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
translate a batch of texts
@domain: domain
@param batch: list of texts to translate
@return: list of translations
"""
return self._translate_batch(batch, **kwargs)
| 3,300 | 26.508333 | 97 | py |
deep-translator | deep-translator-master/deep_translator/tencent.py | """
tencent translator API
"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
import base64
import hashlib
import hmac
import os
import time
from typing import List, Optional
import requests
from deep_translator.base import BaseTranslator
from deep_translator.constants import (
BASE_URLS,
TENCENT_LANGUAGE_TO_CODE,
TENCENT_SECRET_ID_ENV_VAR,
TENCENT_SECRET_KEY_ENV_VAR,
)
from deep_translator.exceptions import (
ApiKeyException,
ServerException,
TencentAPIerror,
TranslationNotFound,
)
from deep_translator.validate import is_empty, is_input_valid
class TencentTranslator(BaseTranslator):
"""
class that wraps functions, which use the TentCentTranslator translator
under the hood to translate word(s)
"""
def __init__(
self,
source: str = "en",
target: str = "zh",
secret_id: Optional[str] = os.getenv(TENCENT_SECRET_ID_ENV_VAR, None),
secret_key: Optional[str] = os.getenv(
TENCENT_SECRET_KEY_ENV_VAR, None
),
**kwargs
):
"""
@param secret_id: your tencent cloud api secret id.
Get one here: https://console.cloud.tencent.com/capi
@param secret_key: your tencent cloud api secret key.
@param source: source language
@param target: target language
"""
if not secret_id:
raise ApiKeyException(env_var=TENCENT_SECRET_ID_ENV_VAR)
if not secret_key:
raise ApiKeyException(env_var=TENCENT_SECRET_KEY_ENV_VAR)
self.secret_id = secret_id
self.secret_key = secret_key
url = BASE_URLS.get("TENENT")
super().__init__(
base_url=url,
source=source,
target=target,
languages=TENCENT_LANGUAGE_TO_CODE,
**kwargs
)
def translate(self, text: str, **kwargs) -> str:
"""
@param text: text to translate
@return: translated text
"""
if is_input_valid(text):
if self._same_source_target() or is_empty(text):
return text
# Create the request parameters.
translate_endpoint = self._base_url.replace("https://", "")
params = {
"Action": "TextTranslate",
"Nonce": 11886,
"ProjectId": 0,
"Region": "ap-guangzhou",
"SecretId": self.secret_id,
"Source": self.source,
"SourceText": text,
"Target": self.target,
"Timestamp": int(time.time()),
"Version": "2018-03-21",
}
s = "GET" + translate_endpoint + "/?"
query_str = "&".join(
"%s=%s" % (k, params[k]) for k in sorted(params)
)
hmac_str = hmac.new(
self.secret_key.encode("utf8"),
(s + query_str).encode("utf8"),
hashlib.sha1,
).digest()
params["Signature"] = base64.b64encode(hmac_str)
# Do the request and check the connection.
try:
response = requests.get(self._base_url, params=params)
except ConnectionError:
raise ServerException(503)
# If the answer is not success, raise server exception.
if response.status_code != 200:
raise ServerException(response.status_code)
# Get the response and check is not empty.
res = response.json()
if not res:
raise TranslationNotFound(text)
# Process and return the response.
if "Error" in res["Response"]:
raise TencentAPIerror(res["Response"]["Error"]["Code"])
return res["Response"]["TargetText"]
def translate_file(self, path: str, **kwargs) -> str:
return self._translate_file(path, **kwargs)
def translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
@param batch: list of texts to translate
@return: list of translations
"""
return self._translate_batch(batch, **kwargs)
| 4,180 | 30.674242 | 78 | py |
deep-translator | deep-translator-master/deep_translator/validate.py | __copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
from typing import Optional
from deep_translator.exceptions import NotValidLength, NotValidPayload
def is_empty(text: str) -> bool:
return text == ""
def request_failed(status_code: int) -> bool:
"""Check if a request has failed or not.
A request is considered successfull if the status code is in the 2** range.
Args:
status_code (int): status code of the request
Returns:
bool: indicates request failure
"""
if status_code > 299 or status_code < 200:
return True
return False
def is_input_valid(
text: str, min_chars: int = 0, max_chars: Optional[int] = None
) -> bool:
"""
validate the target text to translate
@param min_chars: min characters
@param max_chars: max characters
@param text: text to translate
@return: bool
"""
if not isinstance(text, str):
raise NotValidPayload(text)
if max_chars and (not min_chars <= len(text) < max_chars):
raise NotValidLength(text, min_chars, max_chars)
return True
| 1,088 | 23.75 | 79 | py |
deep-translator | deep-translator-master/deep_translator/yandex.py | """
Yandex translator API
"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
import os
from typing import List, Optional
import requests
from deep_translator.base import BaseTranslator
from deep_translator.constants import BASE_URLS, YANDEX_ENV_VAR
from deep_translator.exceptions import (
ApiKeyException,
RequestError,
ServerException,
TooManyRequests,
TranslationNotFound,
)
from deep_translator.validate import is_input_valid, request_failed
class YandexTranslator(BaseTranslator):
"""
class that wraps functions, which use the yandex translator
under the hood to translate word(s)
"""
def __init__(
self,
source: str = "en",
target: str = "de",
api_key: Optional[str] = os.getenv(YANDEX_ENV_VAR, None),
**kwargs
):
"""
@param api_key: your yandex api key
"""
if not api_key:
raise ApiKeyException(YANDEX_ENV_VAR)
self.api_key = api_key
self.api_version = "v1.5"
self.api_endpoints = {
"langs": "getLangs",
"detect": "detect",
"translate": "translate",
}
super().__init__(
base_url=BASE_URLS.get("YANDEX"),
source=source,
target=target,
**kwargs
)
def _get_supported_languages(self):
return set(x.split("-")[0] for x in self.dirs)
@property
def languages(self):
return self.get_supported_languages()
@property
def dirs(self, proxies: Optional[dict] = None):
try:
url = self._base_url.format(
version=self.api_version, endpoint="getLangs"
)
print("url: ", url)
response = requests.get(
url, params={"key": self.api_key}, proxies=proxies
)
except requests.exceptions.ConnectionError:
raise ServerException(503)
else:
data = response.json()
if request_failed(status_code=response.status_code):
raise ServerException(response.status_code)
return data.get("dirs")
def detect(self, text: str, proxies: Optional[dict] = None):
response = None
params = {
"text": text,
"format": "plain",
"key": self.api_key,
}
try:
url = self._base_url.format(
version=self.api_version, endpoint="detect"
)
response = requests.post(url, data=params, proxies=proxies)
except RequestError:
raise
except ConnectionError:
raise ServerException(503)
except ValueError:
raise ServerException(response.status_code)
else:
response = response.json()
language = response["lang"]
status_code = response["code"]
if status_code != 200:
raise RequestError()
elif not language:
raise ServerException(501)
return language
def translate(
self, text: str, proxies: Optional[dict] = None, **kwargs
) -> str:
if is_input_valid(text):
params = {
"text": text,
"format": "plain",
"lang": self._target
if self._source == "auto"
else "{}-{}".format(self._source, self._target),
"key": self.api_key,
}
try:
url = self._base_url.format(
version=self.api_version, endpoint="translate"
)
response = requests.post(url, data=params, proxies=proxies)
except ConnectionError:
raise ServerException(503)
else:
response = response.json()
if response["code"] == 429:
raise TooManyRequests()
if response["code"] != 200:
raise ServerException(response["code"])
if not response["text"]:
raise TranslationNotFound()
return response["text"]
def translate_file(self, path: str, **kwargs) -> str:
"""
translate from a file
@param path: path to file
@return: translated text
"""
return self._translate_file(path, **kwargs)
def translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
translate a batch of texts
@param batch: list of texts to translate
@return: list of translations
"""
return self._translate_batch(batch, **kwargs)
| 4,618 | 28.050314 | 75 | py |
deep-translator | deep-translator-master/docs/conf.py | #!/usr/bin/env python
#
# deep_translator documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# import deep_translator
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
html_logo = "../assets/icon.jpg"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "deep_translator"
copyright = "2020, Nidhal Baccouri"
author = "Nidhal Baccouri"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
import toml
with open("../pyproject.toml", "r") as f:
tom = toml.load(f)
version = tom["tool"]["poetry"]["version"]
# The full version, including alpha/beta/rc tags.
# release = deep_translator.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "deep_translatordoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"deep_translator.tex",
"deep_translator Documentation",
"Nidhal Baccouri",
"manual",
),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"deep_translator",
"deep_translator Documentation",
[author],
1,
)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"deep_translator",
"deep_translator Documentation",
author,
"deep_translator",
"A flexible free and unlimited python tool to translate between different languages in a simple way using multiple translators.", # noqa: E501
"Miscellaneous",
),
]
| 5,225 | 28.862857 | 151 | py |
deep-translator | deep-translator-master/examples/libre.py | from deep_translator import LibreTranslator
res = LibreTranslator(source="de", target="en").translate("laufen")
print(res)
| 125 | 20 | 67 | py |
deep-translator | deep-translator-master/examples/linguee.py | from deep_translator import LingueeTranslator
res = LingueeTranslator(source="german", target="english").translate(
"laufen", return_all=False
)
print(res)
| 162 | 19.375 | 69 | py |
deep-translator | deep-translator-master/examples/mymemory.py | from deep_translator import MyMemoryTranslator
res = MyMemoryTranslator(source="ar", target="en").translate("آخُذ اَلْباص.")
print(res)
| 138 | 22.166667 | 77 | py |
deep-translator | deep-translator-master/examples/pons.py | from deep_translator import PonsTranslator
res = PonsTranslator(source="en", target="de").translate(
"good", return_all=False
)
print(res)
| 145 | 17.25 | 57 | py |
deep-translator | deep-translator-master/examples/trans.py | from deep_translator import GoogleTranslator, LingueeTranslator, PonsTranslator
# examples using google translate
english_text = "happy coding"
chinese_text = "這很好"
translator = GoogleTranslator(source="auto", target="german")
result1 = translator.translate(text=english_text)
result2 = translator.translate(text=chinese_text)
print(f"original english text: {english_text} | translated text: {result1}")
print(f"original chinese text: {chinese_text} | translated text: {result2}")
# file translation
result_file = translator.translate_file("./test.txt")
print("file translation: ", result_file)
# examples using linguee:
text = "cute"
translated = LingueeTranslator(source="english", target="german").translate(
word=text
)
print("Using Linguee ==> the translated text: ", translated)
# examples using pons:
text = "good"
translated = PonsTranslator(source="en", target="ar").translate(word=text)
print("using Pons ==> the translated text: ", translated)
| 965 | 32.310345 | 79 | py |
deep-translator | deep-translator-master/tests/__init__.py | """Unit test package for deep_translator."""
| 45 | 22 | 44 | py |
deep-translator | deep-translator-master/tests/test_baidu.py | from unittest.mock import Mock, patch
import pytest
from deep_translator import BaiduTranslator
from deep_translator.exceptions import BaiduAPIerror
@patch("deep_translator.baidu.requests")
def test_simple_translation(mock_requests):
translator = BaiduTranslator(
appid="this-is-an-valid-appid",
appkey="this-is-an-valid-appkey",
source="en",
target="zh",
)
# Set the request response mock.
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"from": "en",
"to": "zh",
"trans_result": [{"src": "hello", "dst": "你好"}],
}
mock_requests.post.return_value = mock_response
translation = translator.translate("hello")
assert translation == "你好"
@patch("deep_translator.baidu.requests.get")
def test_wrong_api_key(mock_requests):
translator = BaiduTranslator(
appid="this-is-a-wrong-appid",
appkey="this-is-a-wrong-appkey",
source="en",
target="zh",
)
# Set the response status_code only.
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"error_code": "54001",
"error_msg": "Invalid Sign",
}
mock_requests.post.return_value = mock_response
with pytest.raises(BaiduAPIerror):
translator.translate("Hello")
# the remaining tests are actual requests to Baidu translator API and use appid and appkey
# if appid and appkey variable is None, they are skipped
appid = None
appkey = None
@pytest.mark.skipif(
appid is None or appkey is None,
reason="appid or appkey is not provided",
)
def test_baidu_successful_post_onetarget():
posted = BaiduTranslator(
appid=appid, appkey=appkey, source="en", target="zh"
).translate("Hello! How are you?")
assert isinstance(posted, str)
| 1,868 | 27.318182 | 90 | py |
deep-translator | deep-translator-master/tests/test_cli.py | #!/usr/bin/env python
"""Tests for the CLI interface."""
import sys
import pytest
from deep_translator.cli import CLI
@pytest.fixture
def mock_args():
sys.argv[1:] = ["--source", "en", "--target", "de", "--text", "hello"]
return CLI(sys.argv[1:]).parse_args()
def test_source(mock_args):
assert mock_args.source == "en"
def test_target(mock_args):
assert mock_args.target == "de"
| 406 | 15.958333 | 74 | py |
deep-translator | deep-translator-master/tests/test_data.py | test_text_standard = "Hello world."
TRANSLATED_RESULTS = {
"afrikaans": "Hello Wêreld.",
"albanian": "Përshendetje Botë.",
"amharic": "ሰላም ልዑል.",
"arabic": "مرحبا بالعالم.",
"armenian": "Բարեւ աշխարհ.",
"azerbaijani": "Salam dünya.",
"basque": "Kaixo Mundua.",
"belarusian": "Прывітанне Сусвет.",
"bengali": "ওহে বিশ্ব.",
"bosnian": "Zdravo svijete.",
"bulgarian": "Здравей свят.",
"catalan": "Hola món.",
"cebuano": "Kumusta kalibutan.",
"chichewa": "Moni Dziko Lapansi.",
"chinese (simplified)": "你好,世界。",
"chinese (traditional)": "你好,世界。",
"corsican": "Bonghjornu mondu.",
"croatian": "Pozdrav svijete.",
"czech": "Ahoj světe.",
"danish": "Hej Verden.",
"dutch": "Hallo Wereld.",
"esperanto": "Saluton mondo.",
"estonian": "Tere, Maailm.",
"filipino": "Kamusta mundo",
"finnish": "Hei maailma.",
"french": "Bonjour le monde.",
"frisian": "Hallo wrâld.",
"galician": "Ola mundo.",
"georgian": "Გამარჯობა მსოფლიო.",
"german": "Hallo Welt.",
"greek": "Γειά σου Κόσμε.",
"gujarati": "હેલો વર્લ્ડ.",
"haitian creole": "Bonjou mond.",
"hausa": "Sannu Duniya.",
"hawaiian": "Aloha honua.",
"hebrew": "שלום עולם.",
"hindi": "नमस्ते दुनिया।",
"hmong": "Nyob zoo ntiaj teb.",
"hungarian": "Helló Világ.",
"icelandic": "Halló heimur.",
"igbo": "Ndewo Ụwa.",
"indonesian": "Halo Dunia.",
"irish": "Dia duit ar domhan.",
"italian": "Ciao mondo.",
"japanese": "こんにちは世界。",
"javanese": "Halo jagad.",
"kannada": "ಹಲೋ ವಿಶ್ವ.",
"kazakh": "Сәлем Әлем.",
"khmer": "សួស្តីពិភពលោក។",
"kinyarwanda": "Mwaramutse isi.",
"korean": "안녕하세요 세계입니다.",
"kurdish": "Hello cîhanê.",
"kyrgyz": "Салам дүйнө.",
"lao": "ສະບາຍດີຊາວໂລກ.",
"latin": "Salve mundi.",
"latvian": "Sveika pasaule.",
"lithuanian": "Labas pasauli.",
"luxembourgish": "Moien Welt.",
"macedonian": "Здраво свету.",
"malagasy": "Hello World.",
"malay": "Hai dunia.",
"malayalam": "ഹലോ വേൾഡ്.",
"maltese": "Hello dinja.",
"maori": "Kia ora te ao.",
"marathi": "नमस्कार जग.",
"mongolian": "Сайн уу ертөнц.",
"myanmar": "မင်္ဂလာပါကမ္ဘာလောက။",
"nepali": "नमस्कार संसार।",
"norwegian": "Hei Verden.",
"odia": "ନମସ୍କାର ବିଶ୍ୱବାସି।",
"pashto": "سلام نړی.",
"persian": "سلام دنیا.",
"polish": "Witaj świecie.",
"portuguese": "Olá Mundo.",
"punjabi": "ਸਤਿ ਸ੍ਰੀ ਅਕਾਲ ਦੁਨਿਆ.",
"romanian": "Salut Lume.",
"russian": "Привет, мир.",
"samoan": "Talofa lalolagi.",
"scots gaelic": "Hàlo a Shaoghail.",
"serbian": "Здраво Свете.",
"sesotho": "Lefatše Lumela.",
"shona": "Mhoro nyika.",
"sindhi": "هيلو دنيا.",
"sinhala": "හෙලෝ වර්ල්ඩ්.",
"slovak": "Ahoj svet.",
"slovenian": "Pozdravljen, svet.",
"somali": "Salaamu calaykum.",
"spanish": "Hola Mundo.",
"sundanese": "Halo Dunya.",
"swahili": "Salamu, Dunia.",
"swedish": "Hej världen.",
"tajik": "Салом Ҷаҳон.",
"tamil": "வணக்கம் உலகம்.",
"tatar": "Сәлам, Дөнья.",
"telugu": "హలో వరల్డ్.",
"thai": "สวัสดีชาวโลก.",
"turkish": "Selam Dünya.",
"turkmen": "Salam dünýä.",
"ukrainian": "Привіт Світ.",
"urdu": "سلام دنیا۔",
"uyghur": "ياخشىمۇسىز دۇنيا.",
"uzbek": "Salom Dunyo.",
"vietnamese": "Chào thế giới.",
"welsh": "Helo Byd.",
"xhosa": "Molo Lizwe.",
"yiddish": "העלא וועלט.",
"yoruba": "Mo ki O Ile Aiye.",
"zulu": "Sawubona Mhlaba.",
}
| 3,552 | 30.442478 | 40 | py |
deep-translator | deep-translator-master/tests/test_deepl.py | from unittest.mock import Mock, patch
import pytest
from deep_translator.deepl import DeeplTranslator
from deep_translator.exceptions import AuthorizationException
@patch("deep_translator.deepl.requests")
def test_simple_translation(mock_requests):
translator = DeeplTranslator(
api_key="imagine-this-is-an-valid-api-key", source="en", target="es"
)
# Set the request response mock.
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"translations": [{"text": "hola"}]}
mock_requests.get.return_value = mock_response
translation = translator.translate("hello")
assert translation == "hola"
@patch("deep_translator.deepl.requests.get")
def test_wrong_api_key(mock_requests):
translator = DeeplTranslator(
api_key="this-is-a-wrong-api-key!", source="en", target="es"
)
# Set the response status_code only.
mock_requests.return_value = Mock(status_code=403)
with pytest.raises(AuthorizationException):
translator.translate("Hello")
| 1,054 | 31.96875 | 76 | py |
deep-translator | deep-translator-master/tests/test_google.py | #!/usr/bin/env python
"""Tests for `deep_translator` package."""
import pytest
from deep_translator import GoogleTranslator, exceptions
from deep_translator.constants import GOOGLE_LANGUAGES_TO_CODES
@pytest.fixture
def google_translator():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
return GoogleTranslator(target="en")
def test_content(google_translator):
"""Sample pytest test function with the pytest fixture as an argument."""
assert google_translator.translate(text="좋은") == "good"
def test_abbreviations_and_languages_mapping():
for abb, lang in GOOGLE_LANGUAGES_TO_CODES.items():
g1 = GoogleTranslator(abb)
g2 = GoogleTranslator(lang)
assert g1._source == g2._source
def test_inputs():
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
GoogleTranslator(source="", target="")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
GoogleTranslator(source="auto", target="")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
GoogleTranslator(source="", target="en")
def test_empty_text(google_translator):
empty_txt = ""
res = google_translator.translate(text=empty_txt)
assert res == empty_txt
def test_payload(google_translator):
with pytest.raises(exceptions.NotValidPayload):
google_translator.translate(text={})
with pytest.raises(exceptions.NotValidPayload):
google_translator.translate(text=[])
with pytest.raises(exceptions.NotValidLength):
google_translator.translate("a" * 5001)
def test_one_character_words():
assert (
GoogleTranslator(source="es", target="en").translate("o") is not None
)
| 1,757 | 26.904762 | 77 | py |
deep-translator | deep-translator-master/tests/test_libre.py | #!/usr/bin/env python
"""Tests for `deep_translator` package."""
import pytest
from deep_translator import LibreTranslator, exceptions
from deep_translator.constants import LIBRE_LANGUAGES_TO_CODES
@pytest.fixture
def libre():
return LibreTranslator(source="en", target="fr", api_key="some_key")
def test_inputs():
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
LibreTranslator(source="", target="", api_key="some_key")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
LibreTranslator(source="auto", target="", api_key="some_key")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
LibreTranslator(source="", target="en", api_key="some_key")
def test_abbreviations_and_languages_mapping():
for abb, lang in LIBRE_LANGUAGES_TO_CODES.items():
l1 = LibreTranslator(source=abb, api_key="some_key")
l2 = LibreTranslator(source=lang, api_key="some_key")
assert l1._source == l2._source
def test_payload(libre):
with pytest.raises(exceptions.NotValidPayload):
libre.translate({})
with pytest.raises(exceptions.NotValidPayload):
libre.translate([])
| 1,186 | 28.675 | 72 | py |
deep-translator | deep-translator-master/tests/test_linguee.py | #!/usr/bin/env python
"""Tests for `deep_translator` package."""
import pytest
from deep_translator import LingueeTranslator, exceptions
@pytest.fixture
def linguee():
return LingueeTranslator(source="english", target="german")
def test_content(linguee):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
assert linguee.translate(word="good") is not None
def test_inputs():
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
LingueeTranslator(source="", target="")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
LingueeTranslator(source="auto", target="")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
LingueeTranslator(source="", target="en")
with pytest.raises(exceptions.LanguageNotSupportedException):
LingueeTranslator(source="en", target="fr")
ling_translate = LingueeTranslator("english", "french")
assert ling_translate._source == "english"
assert ling_translate._target == "french"
def test_payload(linguee):
with pytest.raises(exceptions.NotValidPayload):
linguee.translate({})
with pytest.raises(exceptions.NotValidPayload):
linguee.translate([])
with pytest.raises(exceptions.NotValidLength):
linguee.translate("a" * 51)
| 1,430 | 28.204082 | 77 | py |
deep-translator | deep-translator-master/tests/test_microsoft_trans.py | #!/usr/bin/env python
"""Tests for `deep_translator` package."""
from unittest.mock import patch
import pytest
import requests
from deep_translator import MicrosoftTranslator, exceptions
# mocked request.post
@patch.object(requests, "post")
def test_microsoft_successful_post_mock(mock_request_post):
returned_json = [
{"translations": [{"text": "See you later!", "to": "en"}]}
]
def res():
r = requests.Response()
def json_func():
return returned_json
r.json = json_func
return r
mock_request_post.return_value = res()
assert (
MicrosoftTranslator(
api_key="an_api_key", source="de", target="en"
).translate("auf wiedersehen!")
== "See you later!"
)
def test_MicrosoftAPIerror():
with pytest.raises(exceptions.MicrosoftAPIerror):
MicrosoftTranslator(
api_key="empty", source="de", target="en"
).translate("text")
# the remaining tests are actual requests to Microsoft API and use an api key
# if APIkey variable is None, they are skipped
APIkey = None
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_microsoft_successful_post_onetarget():
posted = MicrosoftTranslator(api_key=APIkey, target="en").translate(
"auf wiedersehen!"
)
assert isinstance(posted, str)
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_microsoft_successful_post_twotargets():
posted = MicrosoftTranslator(
api_key=APIkey, target=["en", "ru"]
).translate("auf wiedersehen!")
assert isinstance(posted, str)
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_incorrect_target_attributes():
with pytest.raises(exceptions.ServerException):
MicrosoftTranslator(api_key=APIkey, target="")
with pytest.raises(exceptions.ServerException):
MicrosoftTranslator(api_key="", target="nothing")
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_abbreviations():
m1 = MicrosoftTranslator(api_key=APIkey, source="en", target="fr")
m2 = MicrosoftTranslator(api_key=APIkey, source="English", target="French")
assert "".join(m1._source) == "".join(m2._source)
assert "".join(m1._target) == "".join(m2._target)
| 2,329 | 27.765432 | 79 | py |
deep-translator | deep-translator-master/tests/test_mymemory.py | #!/usr/bin/env python
"""Tests for `deep_translator` package."""
import pytest
from deep_translator import MyMemoryTranslator, exceptions
@pytest.fixture
def mymemory():
return MyMemoryTranslator(source="en-GB", target="fr-FR")
def test_content(mymemory):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
assert mymemory.translate(text="good") is not None
def test_inputs():
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
MyMemoryTranslator(source="", target="")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
MyMemoryTranslator(source="auto", target="")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
MyMemoryTranslator(source="", target="en-GB")
m1 = MyMemoryTranslator("en-GB", "fr-FR")
m2 = MyMemoryTranslator("english", "french")
assert m1._source == m2._source
assert m1._target == m2._target
def test_payload(mymemory):
with pytest.raises(exceptions.NotValidPayload):
mymemory.translate(text={})
with pytest.raises(exceptions.NotValidPayload):
mymemory.translate(text=[])
with pytest.raises(exceptions.NotValidLength):
mymemory.translate(text="a" * 501)
| 1,353 | 27.808511 | 77 | py |
deep-translator | deep-translator-master/tests/test_pons.py | #!/usr/bin/env python
"""Tests for `deep_translator` package."""
import pytest
from deep_translator import PonsTranslator, exceptions
@pytest.fixture
def pons():
return PonsTranslator(source="english", target="french")
def test_content(pons):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
assert pons.translate(word="good") is not None
def test_inputs():
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
PonsTranslator(source="", target="")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
PonsTranslator(source="auto", target="")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
PonsTranslator(source="", target="en")
l1 = PonsTranslator("en", "fr")
l2 = PonsTranslator("english", "french")
assert l1._source == l2._source
assert l1._target == l2._target
def test_payload(pons):
with pytest.raises(exceptions.NotValidPayload):
pons.translate({})
with pytest.raises(exceptions.NotValidPayload):
pons.translate([])
with pytest.raises(exceptions.NotValidLength):
pons.translate("a" * 51)
| 1,275 | 26.148936 | 77 | py |
deep-translator | deep-translator-master/tests/test_tencent.py | from unittest.mock import Mock, patch
import pytest
from deep_translator import TencentTranslator
from deep_translator.exceptions import TencentAPIerror
@patch("deep_translator.tencent.requests")
def test_simple_translation(mock_requests):
translator = TencentTranslator(
secret_id="imagine-this-is-an-valid-secret-id",
secret_key="imagine-this-is-an-valid-secret-key",
source="en",
target="zh",
)
# Set the request response mock.
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"Response": {
"TargetText": "你好",
"Source": "en",
"Target": "zh",
"RequestId": "000ee211-f19e-4a34-a214-e2bb1122d248",
}
}
mock_requests.get.return_value = mock_response
translation = translator.translate("hello")
assert translation == "你好"
@patch("deep_translator.tencent.requests")
def test_wrong_api_key(mock_requests):
translator = TencentTranslator(
secret_id="imagine-this-is-a-wrong-secret-id",
secret_key="imagine-this-is-a-wrong-secret-id",
source="en",
target="zh",
)
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"Response": {
"Error": {
"Code": "AuthFailure.SignatureFailure",
"Message": "The provided credentials could not be validated. \
Please check your signature is correct.",
},
"RequestId": "ed93f3cb-f35e-473f-b9f3-0d451b8b79c6",
}
}
mock_requests.get.return_value = mock_response
with pytest.raises(TencentAPIerror):
translator.translate("Hello")
# the remaining tests are actual requests to Tencent translator API and use secret_id and secret_key
# if secret_id and secret_key variable is None, they are skipped
secret_id = None
secret_key = None
@pytest.mark.skipif(
secret_id is None or secret_key is None,
reason="secret_id or secret_key is not provided",
)
def test_tencent_successful_post_onetarget():
posted = TencentTranslator(
secret_id=secret_id, secret_key=secret_key, source="en", target="zh"
).translate("Hello! How are you?")
assert isinstance(posted, str)
| 2,307 | 29.773333 | 100 | py |
pythainlp-dev/.pep8speaks.yml | pythainlp-dev/.pep8speaks.yml | scanner:
diff_only: True # If False, the entire file touched by the Pull Request is scanned for errors. If True, only the diff is scanned.
linter: pycodestyle # Other option is flake8
pycodestyle: # Same as scanner.linter value. Other option is flake8
max-line-length: 100 # Default is 79 in PEP 8
ignore: # Errors and warnings to ignore
- W504 # line break after binary operator
- E402 # module level import not at top of file
- E731 # do not assign a lambda expression, use a def
no_blank_comment: True # If True, no comment is made on PR without any errors.
| 611 | 50 | 134 | yml |
pythainlp-dev/.pyup.yml | pythainlp-dev/.pyup.yml | # autogenerated pyup.io config file
# see https://pyup.io/docs/configuration/ for all available options
schedule: ''
update: false
requirements:
- requirements.txt:
# update all dependencies and pin them
update: all
pin: True
- docker_requirements.txt:
# don't update dependencies, use global 'pin' default
update: False
| 357 | 24.571429 | 67 | yml |
pythainlp-dev/CODE_OF_CONDUCT.md | pythainlp-dev/CODE_OF_CONDUCT.md | # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at [email protected]. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
| 3,352 | 42.545455 | 87 | md |
pythainlp-dev/CONTRIBUTING.md | pythainlp-dev/CONTRIBUTING.md | # Contributing to PyThaiNLP
Hi! Thanks for your interest in contributing to [PyThaiNLP](https://github.com/PyThaiNLP/pythainlp).
Please refer to our [Contributor Covenant Code of Conduct](https://github.com/PyThaiNLP/pythainlp/blob/dev/CODE_OF_CONDUCT.md).
## Issue Report and Discussion
- Discussion: https://github.com/PyThaiNLP/pythainlp/discussions
- GitHub issues (problems and suggestions): https://github.com/PyThaiNLP/pythainlp/issues
- Facebook group (not specific to PyThaiNLP, can be Thai NLP discussion in general): https://www.facebook.com/groups/thainlp
## Code
## Code Guidelines
- Follows [PEP8](http://www.python.org/dev/peps/pep-0008/), use [black](https://github.com/ambv/black) with `--line-length` = 79;
- Name identifiers (variables, classes, functions, module names) with meaningful
and pronounceable names (`x` is always wrong);
- Please follow this [naming convention](https://namingconvention.org/python/). For example, global constant variables must be in `ALL_CAPS`;
<img src="https://i.stack.imgur.com/uBr10.png" />
- Write tests for your new features. Test suites are in `tests/` directory. (see "Testing" section below);
- Run all tests before pushing (just execute `tox`) so you will know if your
changes broke something;
- Commented code is [dead
code](http://www.codinghorror.com/blog/2008/07/coding-without-comments.html);
- All `#TODO` comments should be turned into [issues](https://github.com/pythainlp/pythainlp/issues) in GitHub;
- When appropriate, use [f-String](https://www.python.org/dev/peps/pep-0498/)
(use `f"{a} = {b}"`, instead of `"{} = {}".format(a, b)` and `"%s = %s' % (a, b)"`);
- All text files, including source code, must be ended with one empty line. This is [to please git](https://stackoverflow.com/questions/5813311/no-newline-at-end-of-file#5813359) and [to keep up with POSIX standard](https://stackoverflow.com/questions/729692/why-should-text-files-end-with-a-newline).
### Version Control System
- We use [Git](http://git-scm.com/) as our [version control system](http://en.wikipedia.org/wiki/Revision_control),
so it may be a good idea to familiarize yourself with it.
- You can start with the [Pro Git book](http://git-scm.com/book/) (free!).
### Commit Comment
- [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/)
- [Commit Verbs 101: why I like to use this and why you should also like it.](https://chris.beams.io/posts/git-commit/)
### Pull Request
- We use the famous [gitflow](http://nvie.com/posts/a-successful-git-branching-model/)
to manage our branches.
- When you do pull request on GitHub, Travis CI and AppVeyor will run tests
and several checks automatically. Click the "Details" link at the end of
each check to see what needs to be fixed.
## Documentation
- We use [Sphinx](https://www.sphinx-doc.org/en/master/) to generate API document
automatically from "docstring" comments in source code. This means the comment
section in the source code is important for the quality of documentation.
- A docstring should start with one summary line, ended the line with a full stop (period),
then followed by a blank line before the start new paragraph.
- A commit to release branches (e.g. `2.2`, `2.1`) with a title **"(build and deploy docs)"** (without quotes) will trigger the system to rebuild the documentation files and upload them to the website https://pythainlp.github.io/docs
## Testing
We use standard Python `unittest`. Test suites are in `tests/` directory.
To run unit tests locally together with code coverage test:
(from main `pythainlp/` directory)
```sh
coverage run -m unittest discover
```
See code coverage test:
```sh
coverage report
```
Generate code coverage test in HTML (files will be available in `htmlcov/` directory):
```sh
coverage html
```
Make sure the same tests pass on Travis CI and AppVeyor.
## Releasing
- We use [semantic versioning](https://semver.org/): MAJOR.MINOR.PATCH, with development build suffix: MAJOR.MINOR.PATCH-devBUILD
- Use [`bumpversion`](https://github.com/c4urself/bump2version/#installation) to manage versioning.
- `bumpversion [major|minor|patch|release|build]`
- Example:
```
#current_version = 2.3.3-dev0
bumpversion build
#current_version = 2.3.3-dev1
bumpversion build
#current_version = 2.3.3-dev2
bumpversion release
#current_version = 2.3.3-beta0
bumpversion release
#current_version = 2.3.3
bumpversion patch
#current_version = 2.3.6-dev0
bumpversion minor
#current_version = 2.3.1-dev0
bumpversion build
#current_version = 2.3.1-dev1
bumpversion major
#current_version = 3.0.0-dev0
bumpversion release
#current_version = 3.0.0-beta0
bumpversion release
#current_version = 3.0.0
```
## Credits
<a href="https://github.com/PyThaiNLP/pythainlp/graphs/contributors">
<img src="https://contributors-img.firebaseapp.com/image?repo=PyThaiNLP/pythainlp" />
</a>
Thanks all the [contributors](https://github.com/PyThaiNLP/pythainlp/graphs/contributors). (Image made with [contributors-img](https://contributors-img.firebaseapp.com))
### Development Lead
- Wannaphong Phatthiyaphaibun <[email protected]> - founder, distribution and maintainance
- Korakot Chaovavanich - initial tokenization and soundex code
- Charin Polpanumas - classification and benchmarking
- Peeradej Tanruangporn - documentation
- Arthit Suriyawongkul - refactoring, packaging, distribution, and maintainance
- Chakri Lowphansirikul - documentation
- Pattarawat Chormai - benchmarking
- Thanathip Suntorntip - nlpO3 maintainance, Rust Developer
- Can Udomcharoenchaikit - documentation and code
### Maintainers
- Arthit Suriyawongkul
- Wannaphong Phatthiyaphaibun
## References
- **[Maximum Matching]** -- Manabu Sassano. Deterministic Word Segmentation Using Maximum Matching with Fully Lexicalized Rules. Retrieved from http://www.aclweb.org/anthology/E14-4016
- **[MetaSound]** -- Snae & Brückner. (2009). Novel Phonetic Name Matching Algorithm with a Statistical Ontology for Analysing Names Given in Accordance with Thai Astrology. Retrieved from https://pdfs.semanticscholar.org/3983/963e87ddc6dfdbb291099aa3927a0e3e4ea6.pdf
- **[Thai Character Cluster]** -- T. Teeramunkong, V. Sornlertlamvanich, T. Tanhermhong and W. Chinnan, “Character cluster based Thai information retrieval,” in IRAL '00 Proceedings of the fifth international workshop on on Information retrieval with Asian languages, 2000.
- **[Enhanced Thai Character Cluster]** -- Jeeragone Inrut, Patiroop Yuanghirun, Sarayut Paludkong, Supot Nitsuwat, and Para Limmaneepraserth. “Thai word segmentation using combination of forward and backward longest matching techniques.” In International Symposium on Communications and Information Technology (ISCIT), pp. 37-40. 2001.
- เพ็ญศิริ ลี้ตระกูล. การเลือกประโยคสำคัญในการสรุปความภาษาไทย โดยใช้แบบจำลองแบบลำดับชั้น (Selection of Important Sentences in Thai Text Summarization Using a Hierarchical Model). Retrieved from http://digi.library.tu.ac.th/thesis/st/0192/
| 7,030 | 43.783439 | 336 | md |
pythainlp-dev/INTHEWILD.md | pythainlp-dev/INTHEWILD.md | # Who uses PyThaiNLP?
We'd like to keep track of who is using the package. Please send a PR with your company name or @githubhandle or company name with @githubhandle.
Currently, officially using PyThaiNLP:
1. [Hope Data Annotations Co., Ltd.](https://hopedata.org) ([@hopedataannotations](https://github.com/hopedataannotaions))
2. [Codustry (Thailand) Co., Ltd.](https://codustry.com) ([@codustry](https://github.com/codustry))
| 433 | 47.222222 | 145 | md |
pythainlp-dev/README.md | pythainlp-dev/README.md | <div align="center">
<img src="https://avatars0.githubusercontent.com/u/32934255?s=200&v=4"/>
<h1>PyThaiNLP: Thai Natural Language Processing in Python</h1>
<a href="https://pypi.python.org/pypi/pythainlp"><img alt="pypi" src="https://img.shields.io/pypi/v/pythainlp.svg"/></a>
<a href="https://www.python.org/downloads/release/python-370/"><img alt="Python 3.7" src="https://img.shields.io/badge/python-3.7-blue.svg"/></a>
<a href="https://opensource.org/licenses/Apache-2.0"><img alt="License" src="https://img.shields.io/badge/License-Apache%202.0-blue.svg"/></a>
<a href="https://pepy.tech/project/pythainlp"><img alt="Download" src="https://pepy.tech/badge/pythainlp/month"/></a>
<a href="https://github.com/PyThaiNLP/pythainlp/actions/workflows/test.ymlp"><img alt="Unit test and code coverage" src="https://github.com/PyThaiNLP/pythainlp/actions/workflows/test.yml/badge.svg"/></a>
<a href="https://coveralls.io/github/PyThaiNLP/pythainlp?branch=dev"><img alt="Coverage Status" src="https://coveralls.io/repos/github/PyThaiNLP/pythainlp/badge.svg?branch=dev"/></a>
<a href="https://www.codacy.com/gh/PyThaiNLP/pythainlp/dashboard?utm_source=github.com&utm_medium=referral&utm_content=PyThaiNLP/pythainlp&utm_campaign=Badge_Grade"><img src="https://app.codacy.com/project/badge/Grade/5821a0de122041c79999bbb280230ffb"/></a>
<a href="https://colab.research.google.com/github/PyThaiNLP/tutorials/blob/master/source/notebooks/pythainlp_get_started.ipynb"><img alt="Google Colab Badge" src="https://badgen.net/badge/Launch%20Quick%20Start%20Guide/on%20Google%20Colab/blue?icon=terminal"/></a>
<a href="https://zenodo.org/badge/latestdoi/61813823"><img alt="DOI" src="https://zenodo.org/badge/61813823.svg"/></a>
<a href="https://matrix.to/#/#thainlp:matrix.org" rel="noopener" target="_blank"><img src="https://matrix.to/img/matrix-badge.svg" alt="Chat on Matrix"></a>
</div>
PyThaiNLP is a Python package for text processing and linguistic analysis, similar to [NLTK](https://www.nltk.org/) with focus on Thai language.
PyThaiNLP เป็นไลบารีภาษาไพทอนสำหรับประมวลผลภาษาธรรมชาติ คล้ายกับ NLTK โดยเน้นภาษาไทย [ดูรายละเอียดภาษาไทยได้ที่ README_TH.MD](https://github.com/PyThaiNLP/pythainlp/blob/dev/README_TH.md)
**News**
> Now, You can contact or ask any questions with the PyThaiNLP team. <a href="https://matrix.to/#/#thainlp:matrix.org" rel="noopener" target="_blank"><img src="https://matrix.to/img/matrix-badge.svg" alt="Chat on Matrix"></a>
| Version | Description | Status |
|:------:|:--:|:------:|
| [4.0](https://github.com/PyThaiNLP/pythainlp/releases) | Stable | [Change Log](https://github.com/PyThaiNLP/pythainlp/issues/714) |
| [`dev`](https://github.com/PyThaiNLP/pythainlp/tree/dev) | Release Candidate for 4.1 | [Change Log](https://github.com/PyThaiNLP/pythainlp/issues/788) |
## Getting Started
- PyThaiNLP 2 requires Python 3.6+. Python 2.7 users can use PyThaiNLP 1.6. See [2.0 change log](https://github.com/PyThaiNLP/pythainlp/issues/118) | [Upgrading from 1.7](https://pythainlp.github.io/docs/2.0/notes/pythainlp-1_7-2_0.html) | [Upgrading ThaiNER from 1.7](https://github.com/PyThaiNLP/pythainlp/wiki/Upgrade-ThaiNER-from-PyThaiNLP-1.7-to-PyThaiNLP-2.0)
- [PyThaiNLP Get Started notebook](https://pythainlp.github.io/tutorials/notebooks/pythainlp_get_started.html) | [API document](https://pythainlp.github.io/docs) | [Tutorials](https://pythainlp.github.io/tutorials)
- [Official website](https://pythainlp.github.io/) | [PyPI](https://pypi.org/project/pythainlp/) | [Facebook page](https://www.facebook.com/pythainlp/)
- [Who uses PyThaiNLP?](https://github.com/PyThaiNLP/pythainlp/blob/dev/INTHEWILD.md)
- [Model cards](https://github.com/PyThaiNLP/pythainlp/wiki/Model-Cards) - for technical details, caveats, and ethical considerations of the models developed and used in PyThaiNLP
## Capabilities
PyThaiNLP provides standard NLP functions for Thai, for example part-of-speech tagging, linguistic unit segmentation (syllable, word, or sentence). Some of these functions are also available via command-line interface.
<details>
<summary>List of Features</summary>
- Convenient character and word classes, like Thai consonants (`pythainlp.thai_consonants`), vowels (`pythainlp.thai_vowels`), digits (`pythainlp.thai_digits`), and stop words (`pythainlp.corpus.thai_stopwords`) -- comparable to constants like `string.letters`, `string.digits`, and `string.punctuation`
- Thai linguistic unit segmentation/tokenization, including sentence (`sent_tokenize`), word (`word_tokenize`), and subword segmentations based on Thai Character Cluster (`subword_tokenize`)
- Thai part-of-speech tagging (`pos_tag`)
- Thai spelling suggestion and correction (`spell` and `correct`)
- Thai transliteration (`transliterate`)
- Thai soundex (`soundex`) with three engines (`lk82`, `udom83`, `metasound`)
- Thai collation (sort by dictionary order) (`collate`)
- Read out number to Thai words (`bahttext`, `num_to_thaiword`)
- Thai datetime formatting (`thai_strftime`)
- Thai-English keyboard misswitched fix (`eng_to_thai`, `thai_to_eng`)
- Command-line interface for basic functions, like tokenization and pos tagging (run `thainlp` in your shell)
</details>
## Installation
```sh
pip install --upgrade pythainlp
```
This will install the latest stable release of PyThaiNLP.
Install different releases:
- Stable release: `pip install --upgrade pythainlp`
- Pre-release (near ready): `pip install --upgrade --pre pythainlp`
- Development (likely to break things): `pip install https://github.com/PyThaiNLP/pythainlp/archive/dev.zip`
### Installation Options
Some functionalities, like Thai WordNet, may require extra packages. To install those requirements, specify a set of `[name]` immediately after `pythainlp`:
```sh
pip install pythainlp[extra1,extra2,...]
```
<details>
<summary>List of possible `extras`</summary>
- `full` (install everything)
- `attacut` (to support attacut, a fast and accurate tokenizer)
- `benchmarks` (for [word tokenization benchmarking](tokenization-benchmark.md))
- `icu` (for ICU, International Components for Unicode, support in transliteration and tokenization)
- `ipa` (for IPA, International Phonetic Alphabet, support in transliteration)
- `ml` (to support ULMFiT models for classification)
- `thai2fit` (for Thai word vector)
- `thai2rom` (for machine-learnt romanization)
- `wordnet` (for Thai WordNet API)
</details>
For dependency details, look at `extras` variable in [`setup.py`](https://github.com/PyThaiNLP/pythainlp/blob/dev/setup.py).
## Data directory
- Some additional data, like word lists and language models, may get automatically download during runtime.
- PyThaiNLP caches these data under the directory `~/pythainlp-data` by default.
- Data directory can be changed by specifying the environment variable `PYTHAINLP_DATA_DIR`.
- See the data catalog (`db.json`) at https://github.com/PyThaiNLP/pythainlp-corpus
## Command-Line Interface
Some of PyThaiNLP functionalities can be used at command line, using `thainlp` command.
For example, displaying a catalog of datasets:
```sh
thainlp data catalog
```
Showing how to use:
```sh
thainlp help
```
## Licenses
| | License |
|:---|:----|
| PyThaiNLP Source Code and Notebooks | [Apache Software License 2.0](https://github.com/PyThaiNLP/pythainlp/blob/dev/LICENSE) |
| Corpora, datasets, and documentations created by PyThaiNLP | [Creative Commons Zero 1.0 Universal Public Domain Dedication License (CC0)](https://creativecommons.org/publicdomain/zero/1.0/)|
| Language models created by PyThaiNLP | [Creative Commons Attribution 4.0 International Public License (CC-by)](https://creativecommons.org/licenses/by/4.0/) |
| Other corpora and models that may included with PyThaiNLP | See [Corpus License](https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/corpus_license.md) |
## Contribute to PyThaiNLP
- Please do fork and create a pull request :)
- For style guide and other information, including references to algorithms we use, please refer to our [contributing](https://github.com/PyThaiNLP/pythainlp/blob/dev/CONTRIBUTING.md) page.
## Who uses PyThaiNLP?
You can read [INTHEWILD.md](https://github.com/PyThaiNLP/pythainlp/blob/dev/INTHEWILD.md).
## Citations
If you use `PyThaiNLP` in your project or publication, please cite the library as follows
```
Wannaphong Phatthiyaphaibun, Korakot Chaovavanich, Charin Polpanumas, Arthit Suriyawongkul, Lalita Lowphansirikul, & Pattarawat Chormai. (2016, Jun 27). PyThaiNLP: Thai Natural Language Processing in Python. Zenodo. http://doi.org/10.5281/zenodo.3519354
```
or BibTeX entry:
``` bib
@misc{pythainlp,
author = {Wannaphong Phatthiyaphaibun and Korakot Chaovavanich and Charin Polpanumas and Arthit Suriyawongkul and Lalita Lowphansirikul and Pattarawat Chormai},
title = {{PyThaiNLP: Thai Natural Language Processing in Python}},
month = Jun,
year = 2016,
doi = {10.5281/zenodo.3519354},
publisher = {Zenodo},
url = {http://doi.org/10.5281/zenodo.3519354}
}
```
## Sponsors
| Logo | Description |
| --- | ----------- |
| [](https://airesearch.in.th/) | Since 2019, our contributors Korakot Chaovavanich and Lalita Lowphansirikul have been supported by [VISTEC-depa Thailand Artificial Intelligence Research Institute](https://airesearch.in.th/). |
| [](https://www.macstadium.com) | We get support free Mac Mini M1 from [MacStadium](https://www.macstadium.com) for doing Build CI. |
------
<div align="center">
Made with ❤️ | PyThaiNLP Team 💻 | "We build Thai NLP" 🇹🇭
</div>
------
<div align="center">
<strong>We have only one official repository at https://github.com/PyThaiNLP/pythainlp and another mirror at https://gitlab.com/pythainlp/pythainlp</strong>
</div>
<div align="center">
<strong>Beware of malware if you use code from mirrors other than the official two at GitHub and GitLab.</strong>
</div>
| 10,381 | 54.817204 | 374 | md |
pythainlp-dev/README_TH.md | pythainlp-dev/README_TH.md | <div align="center">
<img src="https://avatars0.githubusercontent.com/u/32934255?s=200&v=4"/>
<h1>PyThaiNLP: Thai Natural Language Processing in Python</h1>
<a href="https://pypi.python.org/pypi/pythainlp"><img alt="pypi" src="https://img.shields.io/pypi/v/pythainlp.svg"/></a>
<a href="https://www.python.org/downloads/release/python-370/"><img alt="Python 3.7" src="https://img.shields.io/badge/python-3.7-blue.svg"/></a>
<a href="https://opensource.org/licenses/Apache-2.0"><img alt="License" src="https://img.shields.io/badge/License-Apache%202.0-blue.svg"/></a>
<a href="https://pepy.tech/project/pythainlp"><img alt="Download" src="https://pepy.tech/badge/pythainlp/month"/></a>
<a href="https://ci.appveyor.com/project/wannaphongcom/pythainlp-9y1ch"><img alt="Build status" src="https://ci.appveyor.com/api/projects/status/9g3mfcwchi8em40x?svg=true"/></a>
<a href="https://coveralls.io/github/PyThaiNLP/pythainlp?branch=dev"><img alt="Coverage Status" src="https://coveralls.io/repos/github/PyThaiNLP/pythainlp/badge.svg?branch=dev"/></a>
<a href="https://www.codacy.com/app/pythainlp/pythainlp_2"><img alt="Codacy Badge" src="https://api.codacy.com/project/badge/Grade/cb946260c87a4cc5905ca608704406f7"/></a>
<a href="https://app.fossa.io/projects/git%2Bgithub.com%2FPyThaiNLP%2Fpythainlp"><img alt="FOSSA Status" src="https://app.fossa.io/api/projects/git%2Bgithub.com%2FPyThaiNLP%2Fpythainlp.svg?type=shield"/></a>
<a href="https://colab.research.google.com/github/PyThaiNLP/tutorials/blob/master/source/notebooks/pythainlp_get_started.ipynb"><img alt="Google Colab Badge" src="https://badgen.net/badge/Launch%20Quick%20Start%20Guide/on%20Google%20Colab/blue?icon=terminal"/></a>
<a href="https://zenodo.org/badge/latestdoi/61813823"><img alt="DOI" src="https://zenodo.org/badge/61813823.svg"/></a>
</div>
PyThaiNLP เป็นไลบารีภาษาไพทอนสำหรับประมวลผลภาษาธรรมชาติ โดยเน้นภาษาไทย
**ข่าวสาร**
> คุณสามารถพูดคุยหรือแชทกับทีม PyThaiNLP หรือผู้สนับสนุนคนอื่น ๆ ได้ที่ <a href="https://matrix.to/#/#thainlp:matrix.org" rel="noopener" target="_blank"><img src="https://matrix.to/img/matrix-badge.svg" alt="Chat on Matrix"></a>
| รุ่น | คำอธิบาย | สถานะ |
|:------:|:--:|:------:|
| [4.0](https://github.com/PyThaiNLP/pythainlp/releases) | Stable | [Change Log](https://github.com/PyThaiNLP/pythainlp/issues/714) |
| [`dev`](https://github.com/PyThaiNLP/pythainlp/tree/dev) | Release Candidate for 4.1 | [Change Log](https://github.com/PyThaiNLP/pythainlp/issues/788) |
ติดตามพวกเราบน [PyThaiNLP Facebook page](https://www.facebook.com/pythainlp/) เพื่อรับข่าวสารเพิ่มเติม
## เริ่มต้นกับ PyThaiNLP
พวกเราได้จัดทำ [PyThaiNLP Get Started Tutorial](https://pythainlp.github.io/tutorials/notebooks/pythainlp_get_started.html) สำหรับสำรวจความสามารถของ PyThaiNLP; พวกเรามีเอกสารสอนใช้งาน สามารถศึกษาได้ที่ [หน้า tutorial](https://pythainlp.github.io/tutorials).
อ่านเอกสารล่าสุดได้ที่ [https://pythainlp.github.io/docs](https://pythainlp.github.io/docs).
พวกเราพยายามทำให้โมดูลใช้งานได้ง่ายที่สุดเท่าที่จะเป็นไปได้; ตัวอย่างเช่น บางชุดข้อมูล (เช่น รายการคำและตัวแบบภาษา) จะถูกดาวน์โหลดอัตโนมัติเมื่อมีการเรียกใช้งาน โดย PyThaiNLP จะจัดเก็บข้อมูลเหล่านั้นไว้ในโฟลเดอร์ `~/pythainlp-data` เป็นค่าเริ่มต้น แต่ผู้ใช้งานสามารถระบุตำแหน่งที่ต้องการได้เองผ่านค่า environment variable `PYTHAINLP_DATA_DIR` อ่านรายละเอียดคลังข้อมูลเพิ่มเติมได้ที่ [PyThaiNLP/pythainlp-corpus](https://github.com/PyThaiNLP/pythainlp-corpus).
## ความสามารถ
PyThaiNLP มีความสามารถพื้นฐานสำหรับการประมวลผลภาษาไทย ตัวอย่างเช่นการกำกับหน้าที่ของคำ (part-of-speech tagging) การแบ่งหน่วยของข้อความตามหลักภาษาศาสตร์ (พยางค์ คำ และประโยค) บางความสามารถสามารถใช้งานได้ผ่านทางคอมมานด์ไลน์
<details>
<summary>รายการความสามารถ</summary>
- ชุดตัวอักขระและคำภาษาไทยที่เรียกใช้ได้สะดวก เช่น พยัญชนะ (`pythainlp.thai_consonants`), สระ (`pythainlp.thai_vowels`), ตัวเลข (`pythainlp.thai_digits`), และคำหยุด (stop word) (`pythainlp.corpus.thai_stopwords`) -- ซึ่งเทียบได้กับค่าคงที่มาตรฐานในไพทอนอย่าง `string.letters`, `string.digits`, และ `string.punctuation`
- Thai linguistic unit segmentation/tokenization, including sentence (`sent_tokenize`), word (`word_tokenize`), and subword segmentations based on Thai Character Cluster (`subword_tokenize`)
- Thai part-of-speech taggers (`pos_tag`)
- Thai spelling suggestion and correction (`spell` and `correct`)
- Thai transliteration (`transliterate`)
- Thai soundex (`soundex`) with three engines (`lk82`, `udom83`, `metasound`)
- Thai collation (sort by dictionoary order) (`collate`)
- Read out number to Thai words (`bahttext`, `num_to_thaiword`)
- Thai datetime formatting (`thai_strftime`)
- Thai-English keyboard misswitched fix (`eng_to_thai`, `thai_to_eng`)
- Command-line interface for basic functions, like tokenization and pos tagging (run `thainlp` in your shell)
</details>
อ่านรายละเอียดได้ที่ [tutorials](https://pythainlp.github.io/tutorials)
## การติดตั้ง
```sh
pip install --upgrade pythainlp
```
วิธีดังกล่าวเป็นการติดตั้งรุ่นเสถียรของ PyThaiNLP
PyThaiNLP ใช้ pip สำหรับจัดการโมดูลและใช้ PyPI เป็นช่องทางหลักในการแจกจ่ายโมดูล อ่านรายละเอียดได้ที่ [https://pypi.org/project/pythainlp/](https://pypi.org/project/pythainlp/)
ความแตกต่างในแต่ละรุ่น:
- รุ่นเสถียร: `pip install --upgrade pythainlp`
- รุ่นก่อนเสถียร (near ready): `pip install --upgrade --pre pythainlp`
- รุ่นที่กำลังพัฒนา (likely to break things): `pip install https://github.com/PyThaiNLP/pythainlp/archive/dev.zip`
### ตัวเลือกการติดตั้ง
บางความสามารถ เช่น Thai WordNet ต้องการโมดูลภายนอกในการทำงานนอกจาก PyThaiNLP ซึ่งในตอนติดตั้ง คุณจะต้องติดตั้งส่วนขยายพิเศษที่จำเป็นหรือ "extras" โดยระบุชื่อลงใน `[name]` ต่อท้าย `pythainlp`:
```sh
pip install pythainlp[extra1,extra2,...]
```
<details>
<summary>รายการสำหรับติดตั้งผ่าน `extras`</summary>
- `full` (ติดตั้งทุกอย่าง)
- `attacut` (เพื่อสนับสนุน attacut ซึ่งเป็นตัวตัดคำที่ทำงานได้รวดเร็วและมีประสิทธิภาพ)
- `benchmarks` (สำหรับ [word tokenization benchmarking](tokenization-benchmark.md))
- `icu` (สำหรับการรองรับ ICU หรือ International Components for Unicode ในการถอดเสียงเป็นอักษรและการตัดแบ่งคำ)
- `ipa` (สำหรับการรองรับ IPA หรือ International Phonetic Alphabet ในการถอดเสียงเป็นอักษร)
- `ml` (เพื่อให้สนับสนุนตัวแบบภาษา ULMFiT สำหรับการจำแนกข้อความ)
- `thai2fit` (สำหรับ Thai word vector)
- `thai2rom` (สำหรับการถอดอักษรไทยเป็นอักษรโรมัน)
- `wordnet` (สำหรับ Thai WordNet API)
</details>
สำหรับโมดูลที่ต้องการ สามารถดูรายละเอียดได้ที่ตัวแปร `extras` ใน [`setup.py`](https://github.com/PyThaiNLP/pythainlp/blob/dev/setup.py).
## Command-line
บางความสามารถของ PyThaiNLP สามารถใช้งานผ่าน command line ได้โดยใช้ `thainlp`
ตัวอย่าง, แสดงรายละเอียดของชุดข้อมูล:
```sh
thainlp data catalog
```
แสดงวิธีใช้งาน:
```sh
thainlp help
```
## ผู้ใช้งาน Python 2
- PyThaiNLP 2 สนับสนุน Python 3.6 ขึ้นไป บางความสามารถ สามารถใช้งานกับ Python 3 รุ่นก่อนหน้าได้ แต่ไม่ได้มีการทดสอบว่าใช้งานได้หรือไม่ อ่านเพิ่มเติม [1.7 -> 2.0 change log](https://github.com/PyThaiNLP/pythainlp/issues/118).
- [Upgrading from 1.7](https://pythainlp.github.io/docs/2.0/notes/pythainlp-1_7-2_0.html)
- [Upgrade ThaiNER from 1.7](https://github.com/PyThaiNLP/pythainlp/wiki/Upgrade-ThaiNER-from-PyThaiNLP-1.7-to-PyThaiNLP-2.0)
- ผู้ใช้งาน Python 2.7 สามารถใช้งาน PyThaiNLP 1.6
## การอ้างอิง
ถ้าคุณใช้ `PyThaiNLP` ในโปรเจคหรืองานวิจัยของคุณ คุณสามารถอ้างอิงได้ตามนี้
```
Wannaphong Phatthiyaphaibun, Korakot Chaovavanich, Charin Polpanumas, Arthit Suriyawongkul, Lalita Lowphansirikul, & Pattarawat Chormai. (2016, Jun 27). PyThaiNLP: Thai Natural Language Processing in Python. Zenodo. http://doi.org/10.5281/zenodo.3519354
```
หรือ BibTeX entry:
``` bib
@misc{pythainlp,
author = {Wannaphong Phatthiyaphaibun, Korakot Chaovavanich, Charin Polpanumas, Arthit Suriyawongkul, Lalita Lowphansirikul, Pattarawat Chormai},
title = {{PyThaiNLP: Thai Natural Language Processing in Python}},
month = Jun,
year = 2016,
doi = {10.5281/zenodo.3519354},
publisher = {Zenodo},
url = {http://doi.org/10.5281/zenodo.3519354}
}
```
## ร่วมสนับสนุน PyThaiNLP
- กรุณา fork แล้วพัฒนาต่อ จากนั้นสร้าง pull request กลับมา :)
- สำหรับเอกสารแนะนำและอื่น ๆ รวมถึงการอ้างอิงขั้นตอนที่เราใช้งาน สามารถเข้าไปศึกษาเพิ่มเติมได้ที่หน้า [contributing](https://github.com/PyThaiNLP/pythainlp/blob/dev/CONTRIBUTING.md)
## ใครใช้ PyThaiNLP?
คุณสามารถอ่านได้ที่ [INTHEWILD.md](https://github.com/PyThaiNLP/pythainlp/blob/dev/INTHEWILD.md)
## สัญญาอนุญาต
| | สัญญาอนุญาต |
|:---|:----|
| PyThaiNLP Source Code and Notebooks | [Apache Software License 2.0](https://github.com/PyThaiNLP/pythainlp/blob/dev/LICENSE) |
| Corpora, datasets, and documentations created by PyThaiNLP | [Creative Commons Zero 1.0 Universal Public Domain Dedication License (CC0)](https://creativecommons.org/publicdomain/zero/1.0/)|
| Language models created by PyThaiNLP | [Creative Commons Attribution 4.0 International Public License (CC-by)](https://creativecommons.org/licenses/by/4.0/) |
| Other corpora and models that may included with PyThaiNLP | See [Corpus License](https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/corpus_license.md) |
## บัตรโมเดล
สำหรับรายละเอียดทางเทคนิค ข้อควรระวัง และข้อคำนึงทางจริยธรรมของตัวแบบ (โมเดล) ที่ใช้ใน PyThaiNLP กรุณาดูที่ [Model cards](https://github.com/PyThaiNLP/pythainlp/wiki/Model-Cards)
## ผู้สนับสนุน
[](https://airesearch.in.th/)
ตั้งแต่ปี 2562 การสมทบพัฒนา PyThaiNLP โดย กรกฎ เชาวะวณิช และ ลลิตา โล่พันธุ์ศิริกุล สนับสนุนโดย [VISTEC-depa Thailand Artificial Intelligence Research Institute](https://airesearch.in.th/)
------
<div align="center">
สร้างด้วย ❤️ | ทีม PyThaiNLP 💻 | "เราสร้างการประมวลผลภาษาไทย" 🇹🇭
</div>
------
<div align="center">
<strong>เรามีที่เก็บข้อมูลอย่างเป็นทางการที่เดียวที่ https://github.com/PyThaiNLP/pythainlp และมีที่เก็บสำเนาอีกแห่งที่ https://gitlab.com/pythainlp/pythainlp</strong>
</div>
<div align="center">
<strong>โปรดระมัดระวังซอฟต์แวร์ประสงค์ร้ายหรือมัลแวร์ ถ้าคุณใช้โค้ดจากที่เก็บข้อมูลอื่นนอกเหนือจากที่ GitHub และ GitLab ข้างต้น</strong>
</div>
| 10,391 | 53.125 | 459 | md |
pythainlp-dev/SECURITY.md | pythainlp-dev/SECURITY.md | # Security Policy
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 4.0.x | :white_check_mark: |
| 3.1.x | :white_check_mark: |
| 3.0.x | :x: |
| 2.3.x | :x: |
| 2.2.x | :x: |
| 2.1.x | :x: |
| 2.0.x | :x: |
| < 2.0 | :x: |
| 372 | 23.866667 | 32 | md |
pythainlp-dev/appveyor.yml | pythainlp-dev/appveyor.yml | #---------------------------------#
# general configuration #
#---------------------------------#
#skip_commits:
# message: /[skip ci]/ # skip if the commit message contains "(skip ci)"
#---------------------------------#
# environment configuration #
#---------------------------------#
image: Visual Studio 2019
skip_branch_with_pr: true
# scripts that are called at very beginning, before repo cloning
init:
# If there is a newer build queued for the same PR, cancel this one.
# The AppVeyor 'rollout builds' option is supposed to serve the same
# purpose but it is problematic because it tends to cancel builds pushed
# directly to master instead of just PR builds (or the converse).
# credits: JuliaLang developers.
- ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod `
https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | `
Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { `
throw "There are newer queued builds for this pull request, skipping build." }
- ps: |
If (($env:SKIP_NOTAG -eq "true") -and ($env:APPVEYOR_REPO_TAG -ne "true")) {
Write-Host "Skipping build, not at a tag."
Exit-AppveyorBuild
}
- ECHO %APPVEYOR_BUILD_WORKER_IMAGE%
- "ECHO Python %PYTHON_VERSION% (%PYTHON_ARCH%bit) from %PYTHON%"
- ECHO %PYTHONIOENCODING%
- ECHO %ICU_VERSION%
- ECHO "Installed SDKs:"
- ps: "ls C:/Python*"
- ps: "ls \"C:/Program Files (x86)/Microsoft SDKs/Windows\""
# fetch repository as zip archive
# https://www.appveyor.com/docs/how-to/repository-shallow-clone/
shallow_clone: true
# set clone depth
clone_depth: 5 # clone entire repository history if not defined
environment:
global:
APPVEYOR_SAVE_CACHE_ON_ERROR: false
APPVEYOR_SKIP_FINALIZE_ON_EXIT: true
CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_env.cmd"
PYTHONIOENCODING: "utf-8"
ICU_VERSION: "64.2"
DISTUTILS_USE_SDK: "1"
PYTHAINLP_DATA_DIR: "%LOCALAPPDATA%/pythainlp-data"
matrix:
# - PYTHON: "C:/Python36"
# PYTHON_VERSION: "3.6"
# PYTHON_ARCH: "32"
# PYICU_PKG: "https://www.dropbox.com/s/pahorbq29y9cura/PyICU-2.3.1-cp36-cp36m-win32.whl?dl=1"
- PYTHON: "C:\\Miniconda36-x64"
PYTHON_VERSION: "3.6"
PYTHON_ARCH: "64"
PYICU_PKG: "https://www.dropbox.com/s/7t0rrxwckqbgivi/PyICU-2.3.1-cp36-cp36m-win_amd64.whl?dl=1"
# - PYTHON: "C:/Python37"
# PYTHON_VERSION: "3.7"
# PYTHON_ARCH: "32"
# PYICU_PKG: "https://www.dropbox.com/s/3xwdnwhdcu619x4/PyICU-2.3.1-cp37-cp37m-win32.whl?dl=1"
# - PYTHON: "C:/Python37-x64"
# PYTHON_VERSION: "3.7"
# PYTHON_ARCH: "64"
# PYICU_PKG: "https://www.dropbox.com/s/le5dckc3231opqt/PyICU-2.3.1-cp37-cp37m-win_amd64.whl?dl=1"
# - PYTHON: "C:\\Miniconda38-x64"
# PYTHON_VERSION: "3.8"
# PYTHON_ARCH: "64"
# PYICU_PKG: "https://www.dropbox.com/s/o6p2sj5z50iim1e/PyICU-2.3.1-cp38-cp38-win_amd64.whl?dl=1"
matrix:
fast_finish: true
#cache:
# - "%LOCALAPPDATA%/pip/Cache"
# - "%APPDATA%/nltk_data"
# - "%LOCALAPPDATA%/pythainlp-data"
install:
- chcp 65001
- "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
# - '"C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %PLATFORM%'
# - '"C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsall.bat" %PLATFORM%'
- '"C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" %PLATFORM%'
- ps: if (-not(Test-Path($env:PYTHON))) { & appveyor\install.ps1 }
- ECHO %PATH%
- python --version
- python -m pip install --disable-pip-version-check --user --upgrade pip setuptools
- python -m pip --version
- python -m pip install pyyaml
- python -m pip install -U "h5py>=2.10.0,<3" "tensorflow>=2.3.1,<3" deepcut
- python -m pip install %PYICU_PKG%
- conda install -y -c conda-forge fairseq
- conda remove --force -y pytorch
- python -m pip install torch==1.7.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
- python -m pip install -e .[full]
#---------------------------------#
# build configuration #
#---------------------------------#
platform:
- x64
# Skip .NET project specific build phase.
build: off
#---------------------------------#
# tests configuration #
#---------------------------------#
test_script:
- python -m unittest discover
#---------------------------------#
# global handlers #
#---------------------------------#
#on_success:
# # Remove old or huge cache files to hopefully not exceed the 1GB cache limit.
# #
# # If the cache limit is reached, the cache will not be updated (of not even
# # created in the first run). So this is a trade of between keeping the cache
# # current and having a cache at all.
# # NB: This is done only `on_success` since the cache in uploaded only on
# # success anyway.
# # Note: Cygwin is not available on Visual Studio 2019, can try Msys2.
# - "ECHO Remove old or huge cache"
# - C:\cygwin\bin\find "%LOCALAPPDATA%/pip" -type f -mtime +360 -delete
# - C:\cygwin\bin\find "%LOCALAPPDATA%/pip" -type f -size +50M -delete
# - C:\cygwin\bin\find "%LOCALAPPDATA%/pip" -empty -delete
# # Show size of cache
# - C:\cygwin\bin\du -hs "%LOCALAPPDATA%/pip/Cache"
# - C:\cygwin\bin\du -hs "%APPDATA%/nltk_data"
# - C:\cygwin\bin\du -hs "%LOCALAPPDATA%/pythainlp-data"
| 5,572 | 36.402685 | 135 | yml |
pythainlp-dev/setup.py | pythainlp-dev/setup.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Setup script for PyThaiNLP.
https://github.com/PyThaiNLP/pythainlp
"""
from setuptools import find_packages, setup
readme = """

PyThaiNLP is a Python library for Thai natural language processing.
The library provides functions like word tokenization, part-of-speech tagging,
transliteration, soundex generation, spell checking, and
date and time parsing/formatting.
Website: [pythainlp.github.io](https://pythainlp.github.io/)
# Install
For stable version:
```sh
pip install pythainlp
```
For development version:
```sh
pip install --upgrade --pre pythainlp
```
Some functionalities, like named-entity recognition, required extra packages.
See https://github.com/PyThaiNLP/pythainlp for installation options.
"""
requirements = [
"requests>=2.22.0",
"backports.zoneinfo; python_version<'3.9'"
]
extras = {
"attacut": ["attacut>=1.0.6"],
"benchmarks": ["PyYAML>=5.3.1", "numpy>=1.22", "pandas>=0.24"],
"icu": ["pyicu>=2.3"],
"ipa": ["epitran>=1.1"],
"ml": ["numpy>=1.22", "torch>=1.0.0"],
"ssg": ["ssg>=0.0.8"],
"thai2fit": ["emoji>=0.5.1", "gensim>=4.0.0", "numpy>=1.22"],
"thai2rom": ["numpy>=1.22", "torch>=1.0.0"],
"translate": [
"fairseq>=0.10.0",
"sacremoses>=0.0.41",
"sentencepiece>=0.1.91",
"torch>=1.0.0",
"transformers>=4.6.0",
],
"wunsen": ["wunsen>=0.0.1"],
"textaugment": [
"bpemb",
"gensim>=4.0.0"
],
"wangchanberta": [
"transformers>=4.6.0",
"sentencepiece>=0.1.91"
],
"mt5": ["transformers>=4.6.0", "sentencepiece>=0.1.91"],
"wtp": ["transformers>=4.6.0", "wtpsplit>=1.0.1"],
"wordnet": ["nltk>=3.3"],
"generate": ["fastai<2.0"],
"sefr_cut": ["sefr_cut>=1.1"],
"spell": [
"phunspell>=0.1.6",
"spylls>=0.1.5",
"symspellpy>=6.7.6"
],
"oskut": ["oskut>=1.3"],
"nlpo3": ["nlpo3>=1.2.2"],
"onnx": [
"sentencepiece>=0.1.91",
"numpy>=1.22",
"onnxruntime>=1.10.0"
],
"thai_nner": ["thai_nner"],
"esupar": [
"esupar>=1.3.8",
"numpy",
"transformers>=4.22.1",
],
"spacy_thai": ["spacy_thai>=0.7.1"],
"transformers_ud": [
"ufal.chu-liu-edmonds>=1.0.2",
"transformers>=4.22.1",
],
"dependency_parsing": [
"spacy_thai>=0.7.1",
"ufal.chu-liu-edmonds>=1.0.2",
"transformers>=4.22.1",
],
"coreference_resolution":{
"spacy>=3.0",
"fastcoref>=2.1.5",
},
"word_approximation":{
"panphon>=0.20.0"
},
"wangchanglm": [
"transformers>=4.6.0",
"sentencepiece>=0.1.91",
"pandas>=0.24"
],
"wsd":{
"sentence-transformers>=2.2.2"
},
"full": [
"PyYAML>=5.3.1",
"attacut>=1.0.4",
"emoji>=0.5.1",
"epitran>=1.1",
"fairseq>=0.10.0",
"gensim>=4.0.0",
"nltk>=3.3",
"numpy>=1.22",
"pandas>=0.24",
"pyicu>=2.3",
"sacremoses>=0.0.41",
"sentencepiece>=0.1.91",
"ssg>=0.0.8",
"torch>=1.0.0",
"fastai<2.0",
"bpemb>=0.3.2",
"transformers>=4.22.1",
"sefr_cut>=1.1",
"phunspell>=0.1.6",
"spylls>=0.1.5",
"symspellpy>=6.7.6",
"oskut>=1.3",
"nlpo3>=1.2.2",
"onnxruntime>=1.10.0",
"thai_nner",
"wunsen>=0.0.3",
"wtpsplit>=1.0.1",
"spacy_thai>=0.7.1",
"spacy>=3.0",
"fastcoref>=2.1.5",
"ufal.chu-liu-edmonds>=1.0.2",
"panphon>=0.20.0",
"sentence-transformers>=2.2.2",
],
}
setup(
name="pythainlp",
version="4.1.0dev0",
description="Thai Natural Language Processing library",
long_description=readme,
long_description_content_type="text/markdown",
author="PyThaiNLP",
author_email="[email protected]",
url="https://github.com/PyThaiNLP/pythainlp",
packages=find_packages(exclude=["tests", "tests.*"]),
test_suite="tests",
python_requires=">=3.7",
package_data={
"pythainlp": [
"corpus/*",
],
},
include_package_data=True,
install_requires=requirements,
extras_require=extras,
license="Apache Software License 2.0",
zip_safe=False,
keywords=[
"pythainlp",
"NLP",
"natural language processing",
"text analytics",
"text processing",
"localization",
"computational linguistics",
"ThaiNLP",
"Thai NLP",
"Thai language",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: Thai",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Text Processing",
"Topic :: Text Processing :: General",
"Topic :: Text Processing :: Linguistic",
],
entry_points={
"console_scripts": [
"thainlp = pythainlp.__main__:main",
],
},
project_urls={
"Documentation": "https://pythainlp.github.io/docs/4.0/",
"Tutorials": "https://pythainlp.github.io/tutorials/",
"Source Code": "https://github.com/PyThaiNLP/pythainlp",
"Bug Tracker": "https://github.com/PyThaiNLP/pythainlp/issues",
},
)
# TODO: Check extras and decide to download additional data, like model files
| 6,234 | 26.834821 | 78 | py |
pythainlp-dev/tokenization-benchmark.md | pythainlp-dev/tokenization-benchmark.md | # Word Tokenisation Benchmark for Thai (obsolete)
A framework for benchmarking tokenisation algorithms for Thai.
It has a command-line interface that allows users to conviniently execute the benchmarks
as well as a module interface for later use in their development pipelines.
## Metrics
<div align="center">
<img src="https://i.imgur.com/jVBOLa2.png"/>
</div>
### Character-Level (CL)
- True Positive (TP): no. of starting characters that are correctly predicted.
- True Negative (TN): no. of non-starting characters that are correctly predicted.
- False Positive (FP): no. of non-starting characters that are wrongly predicted as starting characters.
- False Negative (FN): no. of starting characters that are wrongly predicted as non-starting characters.
- Precision: TP / (TP + FP)
- Recall: TP / (TP+FN)
- f1: ...
### Word-Level (WL)
- Correctly Tokenised Words (CTW): no. of words in reference that are correctly tokenised.
- Precision: CTW / no. words in reference solution
- Recall: CTW / no. words in sample
-**** f1: ...
## Benchmark Results
| Vendor | Approach | Datasets |
|---|---|---|
| DeepCut | CNN | [-yellow.svg)][res-BEST-val-DeepCut] [-yellow.svg)][res-THNC-DeepCut] [-yellow.svg)][res-Orchid-DeepCut] [-yellow.svg)][res-WiseSight160-DeepCut] |
| PyThaiNLP-newmm | dictionary-based | [-yellow.svg)][res-BEST-val-PyThaiNLP-newmm] [-yellow.svg)][res-THNC-PyThaiNLP-newmm] [-yellow.svg)][res-Orchid-PyThaiNLP-newmm] [-yellow.svg)][res-WiseSight160-PyThaiNLP-newmm] |
| Sertis-BiGRU | Bi-directional RNN | [-yellow.svg)][res-BEST-val-Sertis-BiGRU] [-yellow.svg)][res-WiseSight160-Sertis-BiGRU] |
[res-BEST-val-DeepCut]: https://pythainlp.github.io/tokenization-benchmark-visualization/?experiment-name=BEST-val-DeepCut
[res-THNC-DeepCut]: https://pythainlp.github.io/tokenization-benchmark-visualization/?experiment-name=THNC-DeepCut
[res-Orchid-DeepCut]: https://pythainlp.github.io/tokenization-benchmark-visualization/?experiment-name=Orchid-DeepCut
[res-WiseSight160-DeepCut]: https://pythainlp.github.io/tokenization-benchmark-visualization/?experiment-name=WiseSight160-DeepCut
[res-BEST-val-PyThaiNLP-newmm]: https://pythainlp.github.io/tokenization-benchmark-visualization/?experiment-name=BEST-val-PyThaiNLP-newmm
[res-THNC-PyThaiNLP-newmm]: https://pythainlp.github.io/tokenization-benchmark-visualization/?experiment-name=THNC-PyThaiNLP-newmm
[res-Orchid-PyThaiNLP-newmm]: https://pythainlp.github.io/tokenization-benchmark-visualization/?experiment-name=Orchid-PyThaiNLP-newmm
[res-WiseSight160-PyThaiNLP-newmm]: https://pythainlp.github.io/tokenization-benchmark-visualization/?experiment-name=WiseSight160-PyThaiNLP-newmm
[res-BEST-val-Sertis-BiGRU]: https://pythainlp.github.io/tokenization-benchmark-visualization/?experiment-name=BEST-val-Sertis-BiGRU
[res-WiseSight160-Sertis-BiGRU]: https://pythainlp.github.io/tokenization-benchmark-visualization/?experiment-name=WiseSight160-Sertis-BiGRU
## Installation (WIP)
```
pip ...
```
## Usages (to be updated)
1. Command-line Interface
```
PYTHONPATH=`pwd` python scripts/thai-tokenisation-benchmark.py \
--test-file ./data/best-2010/TEST_100K_ANS.txt \
--input ./data/best-2010-syllable.txt
# Sample output
Benchmarking ./data/best-2010-deepcut.txt against ./data/best-2010/TEST_100K_ANS.txt with 2252 samples in total
============== Benchmark Result ==============
metric mean±std min max
char_level:tp 47.82±47.22 1.000000 354.0
char_level:tn 144.19±145.97 1.000000 887.0
char_level:fp 1.34±2.02 0.000000 23.0
char_level:fn 0.70±1.19 0.000000 14.0
char_level:precision 0.96±0.08 0.250000 1.0
char_level:recall 0.98±0.04 0.500000 1.0
char_level:f1 0.97±0.06 0.333333 1.0
word_level:precision 0.92±0.14 0.000000 1.0
word_level:recall 0.93±0.12 0.000000 1.0
word_level:f1 0.93±0.13 0.000000 1.0
```
2. Module Interface
```
from pythainlp.benchmarks import word_tokenisation as bwt
ref_samples = array of reference tokenised samples
tokenised_samples = array of tokenised samples, aka. from your algorithm
# dataframe contains metrics for each sample
df = bwt.benchmark(ref_samples, tokenised_samples)
```
## Related Work
- [Thai Tokenisers Docker][docker]: collection of pre-built Thai tokenisers Docker containers.
## Development
```
# unitests
$ TEST_VERBOSE=1 PYTHONPATH=. python tests/__init__.py
```
## Acknowledgement
This project was initiallly started by [Pattarawat Chormai][pat], while he was interning at [Dr. Attapol Thamrongrattanarit][ate]'s lab.
[docker]: https://github.com/PyThaiNLP/docker-thai-tokenizers
[ate]: https://attapol.github.io
[pat]: https://pat.chormai.org
| 5,427 | 49.728972 | 436 | md |
pythainlp-dev/.github/pull_request_template.md | pythainlp-dev/.github/pull_request_template.md | ### What does this changes
Brief summary of the changes
### What was wrong
Description of what was the root cause of the issue.
### How this fixes it
Description of how the changes fix the issue.
Fixes #...
### Your checklist for this pull request
🚨Please review the [guidelines for contributing](https://github.com/PyThaiNLP/pythainlp/blob/dev/CONTRIBUTING.md) to this repository.
- [ ] Passed code styles and structures
- [ ] Passed code linting checks and unit test
| 477 | 22.9 | 133 | md |
pythainlp-dev/.github/ISSUE_TEMPLATE/config.yml | pythainlp-dev/.github/ISSUE_TEMPLATE/config.yml | ---
blank_issues_enabled: true
| 31 | 9.666667 | 26 | yml |
pythainlp-dev/.github/ISSUE_TEMPLATE/feature_request.md | pythainlp-dev/.github/ISSUE_TEMPLATE/feature_request.md | ---
name: Feature request
about: Propose a change or an addition เสนอความสามารถใหม่
---
## Detailed description
<!--- Provide a detailed description of the change or addition you are proposing -->
## Context
<!--- Why is this change important to you? How would you use it? -->
<!--- How can it benefit other users? -->
## Possible implementation
<!--- Not obligatory, but suggest an idea for implementing addition or change -->
## Your environment
* PyThaiNLP version:
* Python version:
* Operating system and version (distro, 32/64-bit):
* More info (Docker, VM, etc.):
| 575 | 26.428571 | 84 | md |
pythainlp-dev/.github/ISSUE_TEMPLATE/issue_report.yml | pythainlp-dev/.github/ISSUE_TEMPLATE/issue_report.yml | name: "Issue report"
description: "Create a report to help us improve รายงานปัญหา"
title: "bug: "
labels: ["NEED_TO_LABEL", "type:bug"]
body:
- type: textarea
attributes:
label: "Description"
description: "Why you consider it to be an issue or a bug?"
validations:
required: true
- type: textarea
attributes:
label: "Expected results"
description: "Tell us what should happen."
validations:
required: true
- type: textarea
attributes:
label: "Current results"
description: "Tell us what happens instead."
validations:
required: true
- type: textarea
attributes:
label: "Steps to reproduce"
description: "Steps to reproduce the behavior."
validations:
required: true
- type: input
id: "version"
attributes:
label: "PyThaiNLP version"
validations:
required: true
- type: input
id: "python"
attributes:
label: "Python version"
validations:
required: true
- type: input
id: "os"
attributes:
label: "Operating system and version"
validations:
required: true
- type: textarea
attributes:
label: More info
description: (Docker, VM, etc.)
validations:
required: false
- type: textarea
attributes:
label: "Possible solution"
description: "(Optional) Suggest a fix for the issue, or ideas how to implement the change."
validations:
required: false
- type: textarea
attributes:
label: "Files"
description: "(Optional) A list of relevant files for this issue."
validations:
required: false
| 1,534 | 21.910448 | 96 | yml |
pythainlp-dev/.github/workflows/codeql-analysis.yml | pythainlp-dev/.github/workflows/codeql-analysis.yml | # For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ dev ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ dev ]
schedule:
- cron: '17 9 * * 5'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# ℹ️ Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
| 2,343 | 33.470588 | 188 | yml |
pythainlp-dev/.github/workflows/deploy_docs.yml | pythainlp-dev/.github/workflows/deploy_docs.yml | name: Deploy Docs dev
on:
push:
branches:
- dev
paths-ignore:
- '**.md'
- '**.yml'
jobs:
release:
name: Build
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v1
- name: Set up Python 3.8
uses: actions/setup-python@v1
with:
python-version: 3.8
- name: Install dependencies
env:
SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL: True
run: |
python -m pip install --upgrade pip
pip install pytest coverage coveralls
if [ -f docker_requirements.txt ]; then pip install -r docker_requirements.txt; fi
pip install deepcut
pip install .[full]
pip install boto smart_open sphinx sphinx-rtd-theme
python -m nltk.downloader omw-1.4
- name: Build sphinx documentation
run: |
cd docs && make html
cd ..
- name: Deploy
uses: peaceiris/actions-gh-pages@v3
with:
personal_token: ${{ secrets.PERSONAL_TOKEN }}
publish_dir: ./docs/_build/html
external_repository: PyThaiNLP/dev-docs
user_name: 'github-actions[bot]'
user_email: 'github-actions[bot]@users.noreply.github.com'
| 1,195 | 26.813953 | 90 | yml |
pythainlp-dev/.github/workflows/greetings.yml | pythainlp-dev/.github/workflows/greetings.yml | name: Greetings
on: [pull_request, issues]
jobs:
greeting:
runs-on: ubuntu-latest
steps:
- uses: actions/first-interaction@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
pr-message: 'Hello @${{ github.actor }}, thank you for submitting a PR! We will respond as soon as possible.'
issue-message: |
Hello @${{ github.actor }}, thank you for your interest in our work!
If this is a bug report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you.
| 572 | 32.705882 | 145 | yml |
pythainlp-dev/.github/workflows/lint.yml | pythainlp-dev/.github/workflows/lint.yml | name: Lint
on:
push:
branches:
- dev
paths-ignore:
- '**.md'
- '**.yml'
- 'docs/**'
pull_request:
branches:
- dev
paths-ignore:
- '**.md'
- '**.yml'
- 'docs/**'
jobs:
build:
runs-on: ubuntu-20.04
strategy:
matrix:
python-version: [3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Check Code Format with Black
uses: psf/black@stable
with:
options: "--check --verbose --line-length=79"
src: "./pythainlp"
version: "~= 22.0"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
pip install "h5py>=2.10.0,<3" "tensorflow>=2.3.1,<3"
pip install torch==1.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
pip install deepcut
pip install .[full]
pip install flake8 flake8-commas flake8-comprehensions flake8-tidy-imports
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
| 1,554 | 27.796296 | 91 | yml |
pythainlp-dev/.github/workflows/macos-test.yml | pythainlp-dev/.github/workflows/macos-test.yml | name: macOS Unit test and code coverage
on:
push:
paths-ignore:
- '**.md'
- 'docs/**'
# - '**.yml'
pull_request:
branches:
- dev
paths-ignore:
- '**.md'
- '**.yml'
- 'docs/**'
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [macos-latest, self-hosted]
python-version: [3.8]
steps:
- uses: actions/checkout@v2
- uses: conda-incubator/setup-miniconda@v2
with:
python-version: ${{ matrix.python-version }}
auto-activate-base: false
auto-update-conda: true
if: matrix.os == 'macos-latest'
# - name: Install mac m1
# run: |
# mkdir -p ~/miniconda3
# wget https://repo.anaconda.com/miniconda/Miniconda3-py38_4.12.0-MacOSX-arm64.sh
# chmod +x Miniconda3-py38_4.12.0-MacOSX-arm64.sh
# bash Miniconda3-py38_4.12.0-MacOSX-arm64.sh -b -u -p ~/miniconda3
# ~/miniconda3/bin/conda init bash
# ~/miniconda3/bin/conda init zsh
# if: matrix.os == 'self-hosted'
- name: Test PyThaiNLP - M1
shell: bash -l {0}
run: |
source ~/miniconda3/etc/profile.d/conda.sh
conda activate pythainlp38
conda info
conda list
python -m pip install --upgrade pip
pip uninstall --y pythainlp
pip install --no-deps fastai==1.0.61
pip install PyYAML attacut emoji epitran gensim nltk numpy pandas sacremoses sentencepiece ssg bpemb transformers sefr_cut phunspell spylls symspellpy tltk oskut nlpo3 onnxruntime thai_nner wunsen spacy_thai ufal.chu-liu-edmonds
pip install -e .
python -m nltk.downloader omw-1.4
python -m unittest discover
if: matrix.os == 'self-hosted'
- shell: bash -l {0}
run: |
conda info
conda list
if: matrix.os == 'self-hosted'
- name: Install PyTorch
shell: bash -l {0}
run: |
pip install torch==1.10.0
if: matrix.os != 'self-hosted'
- name: Install dependencies
shell: bash -l {0}
run: |
python -m pip install --upgrade pip
pip install pytest coverage coveralls
conda install -c conda-forge icu
conda install -c conda-forge pyicu
if [ -f docker_requirements.txt ]; then SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL=True pip install -r docker_requirements.txt; fi
pip install deepcut tltk
pip install .[full]
python -m nltk.downloader omw-1.4
if: matrix.os != 'self-hosted'
- name: Test
shell: bash -l {0}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_SERVICE_NAME: github
run: |
coverage run -m unittest discover
coveralls
if: matrix.os != 'self-hosted'
| 2,815 | 30.288889 | 236 | yml |
pythainlp-dev/.github/workflows/pypi-publish.yml | pythainlp-dev/.github/workflows/pypi-publish.yml | name: Upload package to PyPI
on:
release:
types: [created]
jobs:
deploy:
runs-on: ubuntu-20.04
strategy:
matrix:
python-version: [3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install setuptools wheel twine
python setup.py sdist bdist_wheel
- name: Publish a Python distribution to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_API_TOKEN }}
| 728 | 22.516129 | 54 | yml |
pythainlp-dev/.github/workflows/pypi-test.yml | pythainlp-dev/.github/workflows/pypi-test.yml | name: PyPI Unit test
on:
schedule:
- cron: '0 0 * * *' # Once per day
jobs:
build:
runs-on: ubuntu-20.04
strategy:
matrix:
python-version: [3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
env:
SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL: True
run: |
python -m pip install --upgrade pip
pip install deepcut tltk
SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL=True pip install -r https://raw.githubusercontent.com/PyThaiNLP/pythainlp/dev/docker_requirements.txt
pip install pythainlp[full]
python -m nltk.downloader omw-1.4
- name: Test
run: |
mkdir pythainlp_test
cd pythainlp_test
pip download --no-binary=:all: --no-dependencies pythainlp
file="find . -name *.tar.gz"
file=$(eval "$file")
tar -xvzf $file --one-top-level
second="/"
path=${file//.tar.gz/$second}
cd $path
ls
cd tests
mkdir tests
mv data tests/
python -m unittest discover
| 1,251 | 26.217391 | 158 | yml |
pythainlp-dev/.github/workflows/stale.yml | pythainlp-dev/.github/workflows/stale.yml | # workflows from https://github.com/faster-cpython/cpython/blob/main/.github/workflows/stale.yml
name: Mark stale pull requests
on:
schedule:
- cron: "0 0 * * *"
permissions:
pull-requests: write
jobs:
stale:
if: github.repository_owner == 'PyThaiNLP'
runs-on: ubuntu-latest
steps:
- name: "Check PRs"
uses: actions/stale@v6
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: 'This PR is stale because it has been open for 30 days with no activity.'
stale-pr-label: 'stale'
days-before-issue-stale: -1
days-before-pr-stale: 30
days-before-close: -1
ascending: true
operations-per-run: 120
| 707 | 23.413793 | 99 | yml |
pythainlp-dev/.github/workflows/test.yml | pythainlp-dev/.github/workflows/test.yml | name: Unit test and code coverage
on:
push:
paths-ignore:
- '**.md'
- '**.yml'
- 'docs/**'
pull_request:
paths-ignore:
- '**.md'
- '**.yml'
- 'docs/**'
jobs:
build:
runs-on: ubuntu-20.04
strategy:
matrix:
python-version: [3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest coverage coveralls
if [ -f docker_requirements.txt ]; then SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL=True pip install -r docker_requirements.txt; fi
pip install deepcut tltk
pip install .[full]
python -m nltk.downloader omw-1.4
- name: Test
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_SERVICE_NAME: github
run: |
coverage run -m unittest discover
coveralls
| 1,076 | 23.477273 | 144 | yml |
pythainlp-dev/.github/workflows/windows-test.yml | pythainlp-dev/.github/workflows/windows-test.yml | name: Windows Unit test and code coverage
on:
push:
paths-ignore:
- '**.md'
- '**.yml'
- 'docs/**'
pull_request:
branches:
- dev
paths-ignore:
- '**.md'
- '**.yml'
- 'docs/**'
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [windows-latest]
python-version: [3.8]
steps:
- uses: actions/checkout@v2
- uses: conda-incubator/setup-miniconda@v2
with:
python-version: ${{ matrix.python-version }}
auto-activate-base: true
auto-update-conda: true
- shell: powershell
run: |
conda info
conda list
- name: Install PyTorch
shell: powershell
run: |
pip install torch==1.8.1
- name: Install dependencies
shell: powershell
env:
SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL: True
run: |
python -m pip install --disable-pip-version-check --user --upgrade pip setuptools
python -m pip install backports.zoneinfo[tzdata]
python -m pip --version
python -m pip install pytest coverage coveralls
conda install -y -c conda-forge fairseq
python -m pip install https://www.dropbox.com/s/o6p2sj5z50iim1e/PyICU-2.3.1-cp38-cp38-win_amd64.whl?dl=1
python -m pip install -r docker_requirements.txt
python -m pip install .[full]
python -m nltk.downloader omw-1.4
python -m pip install spacy deepcut tltk
- name: Test
shell: powershell
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_SERVICE_NAME: github
run: |
coverage run -m unittest discover
coveralls
| 1,726 | 25.569231 | 112 | yml |
pythainlp-dev/docs/build_docs.sh | pythainlp-dev/docs/build_docs.sh | #!/bin/bash
make html
| 22 | 6.666667 | 11 | sh |
pythainlp-dev/docs/clean_directory.sh | pythainlp-dev/docs/clean_directory.sh | #!/bin/bash
# Delete all files and folders in the directory: /pythainlp/docs/<version>
# $1 : FTP_USER
# $2 : FTP_PASSWORD
# $3 : FTP_HOST
# $4 : Brnach name
FTP_USER=$1
FTP_PASSWORD=$2
FTP_HOST=$3
BRANCH_NAME=$4
remove_all_files()
{
# DIRECTORY=$1
echo "Delete files in: $1"
for f in `curl --list-only --ftp-create-dirs --ipv4 ftp://$FTP_USER:$FTP_PASSWORD@$FTP_HOST/$1/`; do
if [[ -d "$f" ]] || [[ "$f" = _* ]] || [[ "$f" = .doctree ]] || [[ "$f" != *"."* ]]; then
echo "--- deleting files in folder: $1/$f";
remove_all_files $1/$f
else
echo "Delete a file: $f"
curl --ipv4 ftp://$FTP_USER:$FTP_PASSWORD@$FTP_HOST -Q "DELE $1/$f"
fi
done
}
remove_empty_folders()
{
echo "Delete empty folders in: $1"
for f in `curl --list-only --ftp-create-dirs --ipv4 ftp://$FTP_USER:$FTP_PASSWORD@$FTP_HOST/$1/`; do
if [[ -d "$f" ]] || [[ "$f" = _* ]] || [[ "$f" = fonts ]] || [[ "$f" = pythainlp ]] || [[ "$f" = .doctree ]] || [[ "$f" != *"."* ]]; then
echo "--- Deleting folders in: $1/$f";
remove_empty_folders $1/$f
curl --ipv4 ftp://$FTP_USER:$FTP_PASSWORD@$FTP_HOST -Q "RMD $1/$f"
else
echo "Delete a folder: $f"
curl --ipv4 ftp://$FTP_USER:$FTP_PASSWORD@$FTP_HOST -Q "RMD $1/$f"
fi
done
}
echo "Start removing all files within: public_html/pythainlp/docs/$BRANCH_NAME/";
remove_all_files public_html/pythainlp/docs/$BRANCH_NAME;
echo "Start removing all empty folders within: public_html/pythainlp/docs/$BRANCH_NAME/";
remove_empty_folders public_html/pythainlp/docs/$BRANCH_NAME;
echo "Done.";
| 1,682 | 29.6 | 145 | sh |
pythainlp-dev/docs/conf.py | pythainlp-dev/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
# http://www.sphinx-doc.org/en/master/config
import os
import sys
import traceback
from datetime import datetime
import pythainlp
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "PyThaiNLP"
copyright = "2019, pythainlp_builders"
author = "pythainlp_builders"
curyear = datetime.today().year
copyright = f"2017-{curyear}, {project} (Apache Software License 2.0)"
# -- Get version information and date from Git ----------------------------
try:
from subprocess import check_output, STDOUT
current_branch = (
os.environ["CURRENT_BRANCH"]
if "CURRENT_BRANCH" in os.environ
else check_output(
["git", "symbolic-ref", "HEAD"], shell=False, stderr=STDOUT
)
.decode()
.strip()
.split("/")[-1]
)
release = (
os.environ["RELEASE"]
if "RELEASE" in os.environ
else check_output(
["git", "describe", "--tags", "--always"],
shell=False,
stderr=STDOUT,
)
.decode()
.strip()
.split("-")[0]
)
today = (
os.environ["TODAY"]
if "TODAY" in os.environ
else check_output(
["git", "show", "-s", "--format=%ad", "--date=short"],
shell=False,
stderr=STDOUT,
)
.decode()
.strip()
)
except Exception as e:
traceback.print_exc()
release = pythainlp.__version__
today = "<unknown date>"
current_branch = "<unknown>"
# The short X.Y version
version = f"{current_branch} ({release}) <br /> Published date: {today}"
# The full version, including alpha/beta/rc tags
# release = release
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"display_version": True}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom css file
html_css_files = ["style.css"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pythainlpdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pythainlp.tex",
"PyThaiNLP Documentation",
"pythainlp\\_builders",
"manual",
)
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pythainlp", "PyThaiNLP Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pythainlp",
"PyThaiNLP Documentation",
author,
"pythainlp",
"Python library for Thai language processing",
"Manual",
)
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"NLTK": ("https://www.nltk.org", None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Markdown
# from recommonmark.parser import CommonMarkParser
# source_parsers = {'.md': CommonMarkParser}
# source_suffix = ['.rst', '.md']
autodoc_default_options = {
'members': True,
'member-order': 'bysource',
'special-members': '__init__',
'undoc-members': True,
'exclude-members': '__weakref__'
}
| 7,237 | 27.952 | 79 | py |
pythainlp-dev/docs/_static/style.css | pythainlp-dev/docs/_static/style.css | .wy-nav-content {
max-width: none;
}
| 41 | 9.5 | 20 | css |
pythainlp-dev/pythainlp/__init__.py | pythainlp-dev/pythainlp/__init__.py | # -*- coding: utf-8 -*-
# PyThaiNLP: Thai Natural Language Processing in Python
#
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# URL: <https://pythainlp.github.io/>
# For license information, see LICENSE
__version__ = "4.1.0dev0"
thai_consonants = "กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรลวศษสหฬอฮ" # 44 chars
thai_vowels = (
"\u0e24\u0e26\u0e30\u0e31\u0e32\u0e33\u0e34\u0e35\u0e36\u0e37"
+ "\u0e38\u0e39\u0e40\u0e41\u0e42\u0e43\u0e44\u0e45\u0e4d\u0e47"
) # 20
thai_lead_vowels = "\u0e40\u0e41\u0e42\u0e43\u0e44" # 5
thai_follow_vowels = "\u0e30\u0e32\u0e33\u0e45" # 4
thai_above_vowels = "\u0e31\u0e34\u0e35\u0e36\u0e37\u0e4d\u0e47" # 7
thai_below_vowels = "\u0e38\u0e39" # 2
thai_tonemarks = "\u0e48\u0e49\u0e4a\u0e4b" # 4
# Paiyannoi, Maiyamok, Phinthu, Thanthakhat, Nikhahit, Yamakkan:
# These signs can be part of a word
thai_signs = "\u0e2f\u0e3a\u0e46\u0e4c\u0e4d\u0e4e" # 6 chars
# Any Thai character that can be part of a word
thai_letters = "".join(
[thai_consonants, thai_vowels, thai_tonemarks, thai_signs]
) # 74
# Fongman, Angkhankhu, Khomut:
# These characters are section markers
thai_punctuations = "\u0e4f\u0e5a\u0e5b" # 3 chars
thai_digits = "๐๑๒๓๔๕๖๗๘๙" # 10
thai_symbols = "\u0e3f" # Thai Bath ฿
# All Thai characters that presented in Unicode
thai_characters = "".join(
[thai_letters, thai_punctuations, thai_digits, thai_symbols]
)
from pythainlp.soundex import soundex
from pythainlp.spell import correct, spell
from pythainlp.tag import pos_tag
from pythainlp.tokenize import (
Tokenizer,
sent_tokenize,
subword_tokenize,
word_tokenize,
)
from pythainlp.transliterate import romanize, transliterate
from pythainlp.util import collate, thai_strftime
| 2,271 | 32.411765 | 76 | py |
pythainlp-dev/pythainlp/__main__.py | pythainlp-dev/pythainlp/__main__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from pythainlp import cli
def main(argv=None):
"""ThaiNLP command line."""
if not argv:
argv = sys.argv
parser = argparse.ArgumentParser(
prog="thainlp",
description="Thai natural language processing.",
usage=(
"thainlp <command> [options]\n\n"
"Example:\n\n"
"thainlp data catalog\n\n"
"--"
),
)
parser.add_argument(
"command",
type=str,
choices=cli.COMMANDS,
help="text processing action",
)
args = parser.parse_args(argv[1:2])
cli.exit_if_empty(args.command, parser)
if hasattr(cli, args.command):
command = getattr(cli, args.command)
command.App(argv)
if __name__ == "__main__":
main(argv=sys.argv)
| 1,428 | 25.962264 | 74 | py |
pythainlp-dev/pythainlp/augment/__init__.py | pythainlp-dev/pythainlp/augment/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai text augment
"""
__all__ = ["WordNetAug"]
from pythainlp.augment.wordnet import WordNetAug
| 716 | 31.590909 | 74 | py |
pythainlp-dev/pythainlp/augment/wordnet.py | pythainlp-dev/pythainlp/augment/wordnet.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thank https://dev.to/ton_ami/text-data-augmentation-synonym-replacement-4h8l
"""
__all__ = [
"WordNetAug",
"postype2wordnet",
]
from pythainlp.corpus import wordnet
from collections import OrderedDict
from pythainlp.tokenize import word_tokenize
from pythainlp.tag import pos_tag
from typing import List
from nltk.corpus import wordnet as wn
import itertools
orchid = {
"": "",
# NOUN
"NOUN": wn.NOUN,
"NCMN": wn.NOUN,
"NTTL": wn.NOUN,
"CNIT": wn.NOUN,
"CLTV": wn.NOUN,
"CMTR": wn.NOUN,
"CFQC": wn.NOUN,
"CVBL": wn.NOUN,
# VERB
"VACT": wn.VERB,
"VSTA": wn.VERB,
# PROPN
"PROPN": "",
"NPRP": "",
# ADJ
"ADJ": wn.ADJ,
"NONM": wn.ADJ,
"VATT": wn.ADJ,
"DONM": wn.ADJ,
# ADV
"ADV": wn.ADV,
"ADVN": wn.ADV,
"ADVI": wn.ADV,
"ADVP": wn.ADV,
"ADVS": wn.ADV,
# INT
"INT": "",
# PRON
"PRON": "",
"PPRS": "",
"PDMN": "",
"PNTR": "",
# DET
"DET": "",
"DDAN": "",
"DDAC": "",
"DDBQ": "",
"DDAQ": "",
"DIAC": "",
"DIBQ": "",
"DIAQ": "",
# NUM
"NUM": "",
"NCNM": "",
"NLBL": "",
"DCNM": "",
# AUX
"AUX": "",
"XVBM": "",
"XVAM": "",
"XVMM": "",
"XVBB": "",
"XVAE": "",
# ADP
"ADP": "",
"RPRE": "",
# CCONJ
"CCONJ": "",
"JCRG": "",
# SCONJ
"SCONJ": "",
"PREL": "",
"JSBR": "",
"JCMP": "",
# PART
"PART": "",
"FIXN": "",
"FIXV": "",
"EAFF": "",
"EITT": "",
"AITT": "",
"NEG": "",
# PUNCT
"PUNCT": "",
"PUNC": "",
}
def postype2wordnet(pos: str, corpus: str):
"""
convert part-of-speech type to wordnet type
:param str pos: pos type
:param str corpus: part-of-speech corpus
**Options for corpus**
* *orchid* - Orchid Corpus
"""
if corpus not in ["orchid"]:
return None
return orchid[pos]
class WordNetAug:
"""
Text Augment using wordnet
"""
def __init__(self):
pass
def find_synonyms(
self, word: str, pos: str = None, postag_corpus: str = "orchid"
) -> List[str]:
"""
Find synonyms from wordnet
:param str word: word
:param str pos: part-of-speech type
:param str postag_corpus: postag corpus name
:return: list of synonyms
:rtype: List[str]
"""
self.synonyms = []
if pos is None:
self.list_synsets = wordnet.synsets(word)
else:
self.p2w_pos = postype2wordnet(pos, postag_corpus)
if self.p2w_pos != "":
self.list_synsets = wordnet.synsets(word, pos=self.p2w_pos)
else:
self.list_synsets = wordnet.synsets(word)
for self.synset in wordnet.synsets(word):
for self.syn in self.synset.lemma_names(lang="tha"):
self.synonyms.append(self.syn)
self.synonyms_without_duplicates = list(
OrderedDict.fromkeys(self.synonyms)
)
return self.synonyms_without_duplicates
def augment(
self,
sentence: str,
tokenize: object = word_tokenize,
max_syn_sent: int = 6,
postag: bool = True,
postag_corpus: str = "orchid",
) -> List[List[str]]:
"""
Text Augment using wordnet
:param str sentence: thai sentence
:param object tokenize: function for tokenize word
:param int max_syn_sent: max number for synonyms sentence
:param bool postag: on part-of-speech
:param str postag_corpus: postag corpus name
:return: list of synonyms
:rtype: List[Tuple[str]]
:Example:
::
from pythainlp.augment import WordNetAug
aug = WordNetAug()
aug.augment("เราชอบไปโรงเรียน")
# output: [('เรา', 'ชอบ', 'ไป', 'ร.ร.'),
('เรา', 'ชอบ', 'ไป', 'รร.'),
('เรา', 'ชอบ', 'ไป', 'โรงเรียน'),
('เรา', 'ชอบ', 'ไป', 'อาคารเรียน'),
('เรา', 'ชอบ', 'ไปยัง', 'ร.ร.'),
('เรา', 'ชอบ', 'ไปยัง', 'รร.')]
"""
new_sentences = []
self.list_words = tokenize(sentence)
self.list_synonym = []
self.p_all = 1
if postag:
self.list_pos = pos_tag(self.list_words, corpus=postag_corpus)
for word, pos in self.list_pos:
self.temp = self.find_synonyms(word, pos, postag_corpus)
if self.temp == []:
self.list_synonym.append([word])
else:
self.list_synonym.append(self.temp)
self.p_all *= len(self.temp)
else:
for word in self.list_words:
self.temp = self.find_synonyms(word)
if self.temp == []:
self.list_synonym.append([word])
else:
self.list_synonym.append(self.temp)
self.p_all *= len(self.temp)
if max_syn_sent > self.p_all:
max_syn_sent = self.p_all
for x in list(itertools.product(*self.list_synonym))[0:max_syn_sent]:
new_sentences.append(x)
return new_sentences
| 5,868 | 24.854626 | 77 | py |
pythainlp-dev/pythainlp/augment/lm/__init__.py | pythainlp-dev/pythainlp/augment/lm/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
LM
"""
__all__ = [
"FastTextAug",
"Thai2transformersAug",
]
from pythainlp.augment.lm.fasttext import FastTextAug
from pythainlp.augment.lm.wangchanberta import Thai2transformersAug
| 810 | 30.192308 | 74 | py |
pythainlp-dev/pythainlp/augment/lm/fasttext.py | pythainlp-dev/pythainlp/augment/lm/fasttext.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
from gensim.models.fasttext import FastText as FastText_gensim
from pythainlp.tokenize import word_tokenize
from gensim.models.keyedvectors import KeyedVectors
import itertools
class FastTextAug:
"""
Text Augment from FastText
:param str model_path: path of model file
"""
def __init__(self, model_path: str):
"""
:param str model_path: path of model file
"""
if model_path.endswith(".bin"):
self.model = FastText_gensim.load_facebook_vectors(model_path)
elif model_path.endswith(".vec"):
self.model = KeyedVectors.load_word2vec_format(model_path)
else:
self.model = FastText_gensim.load(model_path)
self.dict_wv = list(self.model.key_to_index.keys())
def tokenize(self, text: str) -> List[str]:
"""
Thai text tokenize for fasttext
:param str text: thai text
:return: list of word
:rtype: List[str]
"""
return word_tokenize(text, engine="icu")
def modify_sent(self, sent: str, p: float = 0.7) -> List[List[str]]:
"""
:param str sent: text sentence
:param float p: probability
:rtype: List[List[str]]
"""
list_sent_new = []
for i in sent:
if i in self.dict_wv:
w = [j for j, v in self.model.most_similar(i) if v >= p]
if w == []:
list_sent_new.append([i])
else:
list_sent_new.append(w)
else:
list_sent_new.append([i])
return list_sent_new
def augment(
self, sentence: str, n_sent: int = 1, p: float = 0.7
) -> List[Tuple[str]]:
"""
Text Augment from FastText
You wants to download thai model
from https://fasttext.cc/docs/en/crawl-vectors.html.
:param str sentence: thai sentence
:param int n_sent: number sentence
:param float p: Probability of word
:return: list of synonyms
:rtype: List[Tuple[str]]
"""
self.sentence = self.tokenize(sentence)
self.list_synonym = self.modify_sent(self.sentence, p=p)
new_sentences = []
for x in list(itertools.product(*self.list_synonym))[0:n_sent]:
new_sentences.append(x)
return new_sentences
| 3,003 | 31.652174 | 74 | py |
pythainlp-dev/pythainlp/augment/lm/wangchanberta.py | pythainlp-dev/pythainlp/augment/lm/wangchanberta.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import (
CamembertTokenizer,
pipeline,
)
import random
from typing import List
model_name = "airesearch/wangchanberta-base-att-spm-uncased"
class Thai2transformersAug:
def __init__(self):
self.model_name = "airesearch/wangchanberta-base-att-spm-uncased"
self.target_tokenizer = CamembertTokenizer
self.tokenizer = CamembertTokenizer.from_pretrained(
self.model_name, revision="main"
)
self.tokenizer.additional_special_tokens = [
"<s>NOTUSED",
"</s>NOTUSED",
"<_>",
]
self.fill_mask = pipeline(
task="fill-mask",
tokenizer=self.tokenizer,
model=f"{self.model_name}",
revision="main",
)
self.MASK_TOKEN = self.tokenizer.mask_token
def generate(self, sentence: str, num_replace_tokens: int = 3):
self.sent2 = []
self.input_text = sentence
sent = [
i for i in self.tokenizer.tokenize(self.input_text) if i != "▁"
]
if len(sent) < num_replace_tokens:
num_replace_tokens = len(sent)
masked_text = self.input_text
for i in range(num_replace_tokens):
replace_token = [
sent.pop(random.randrange(len(sent))) for _ in range(1)
][0]
masked_text = masked_text + self.MASK_TOKEN
self.sent2 += [
str(j["sequence"]).replace("<s> ", "").replace("</s>", "")
for j in self.fill_mask(masked_text)
if j["sequence"] not in self.sent2
]
masked_text = self.input_text
return self.sent2
def augment(self, sentence: str, num_replace_tokens: int = 3) -> List[str]:
"""
Text Augment from wangchanberta
:param str sentence: thai sentence
:param int num_replace_tokens: number replace tokens
:return: list of text augment
:rtype: List[str]
:Example:
::
from pythainlp.augment.lm import Thai2transformersAug
aug=Thai2transformersAug()
aug.augment("ช้างมีทั้งหมด 50 ตัว บน")
# output: ['ช้างมีทั้งหมด 50 ตัว บนโลกใบนี้',
'ช้างมีทั้งหมด 50 ตัว บนสุด',
'ช้างมีทั้งหมด 50 ตัว บนบก',
'ช้างมีทั้งหมด 50 ตัว บนนั้น',
'ช้างมีทั้งหมด 50 ตัว บนหัว']
"""
self.sent2 = []
self.sent2 = self.generate(sentence, num_replace_tokens)
return self.sent2
| 3,157 | 32.595745 | 79 | py |
pythainlp-dev/pythainlp/augment/word2vec/__init__.py | pythainlp-dev/pythainlp/augment/word2vec/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Word2Vec
"""
__all__ = ["Word2VecAug", "Thai2fitAug", "LTW2VAug"]
from pythainlp.augment.word2vec.core import Word2VecAug
from pythainlp.augment.word2vec.thai2fit import Thai2fitAug
from pythainlp.augment.word2vec.ltw2v import LTW2VAug
| 856 | 34.708333 | 74 | py |
pythainlp-dev/pythainlp/augment/word2vec/bpemb_wv.py | pythainlp-dev/pythainlp/augment/word2vec/bpemb_wv.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pythainlp.augment.word2vec.core import Word2VecAug
from typing import List, Tuple
class BPEmbAug:
"""
Thai Text Augment using word2vec from BPEmb
BPEmb:
`github.com/bheinzerling/bpemb <https://github.com/bheinzerling/bpemb>`_
"""
def __init__(self, lang: str = "th", vs: int = 100000, dim: int = 300):
from bpemb import BPEmb
self.bpemb_temp = BPEmb(lang=lang, dim=dim, vs=vs)
self.model = self.bpemb_temp.emb
self.load_w2v()
def tokenizer(self, text: str) -> List[str]:
"""
:param str text: thai text
:rtype: List[str]
"""
return self.bpemb_temp.encode(text)
def load_w2v(self):
"""
Load BPEmb model
"""
self.aug = Word2VecAug(
self.model, tokenize=self.tokenizer, type="model"
)
def augment(
self, sentence: str, n_sent: int = 1, p: float = 0.7
) -> List[Tuple[str]]:
"""
Text Augment using word2vec from BPEmb
:param str sentence: thai sentence
:param int n_sent: number sentence
:param float p: Probability of word
:return: list of synonyms
:rtype: List[str]
:Example:
::
from pythainlp.augment.word2vec.bpemb_wv import BPEmbAug
aug = BPEmbAug()
aug.augment("ผมเรียน", n_sent=2, p=0.5)
# output: ['ผมสอน', 'ผมเข้าเรียน']
"""
self.sentence = sentence.replace(" ", "▁")
self.temp = self.aug.augment(self.sentence, n_sent, p=p)
self.temp_new = []
for i in self.temp:
self.t = ""
for j in i:
self.t += j.replace("▁", "")
self.temp_new.append(self.t)
return self.temp_new
| 2,394 | 29.316456 | 76 | py |
pythainlp-dev/pythainlp/augment/word2vec/core.py | pythainlp-dev/pythainlp/augment/word2vec/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
import itertools
class Word2VecAug:
def __init__(
self, model: str, tokenize: object, type: str = "file"
) -> None:
"""
:param str model: path model
:param object tokenize: tokenize function
:param str type: moodel type (file, binary)
"""
import gensim.models.keyedvectors as word2vec
self.tokenizer = tokenize
if type == "file":
self.model = word2vec.KeyedVectors.load_word2vec_format(model)
elif type == "binary":
self.model = word2vec.KeyedVectors.load_word2vec_format(
model, binary=True, unicode_errors="ignore"
)
else:
self.model = model
self.dict_wv = list(self.model.key_to_index.keys())
def modify_sent(self, sent: str, p: float = 0.7) -> List[List[str]]:
"""
:param str sent: text sentence
:param float p: probability
:rtype: List[List[str]]
"""
list_sent_new = []
for i in sent:
if i in self.dict_wv:
w = [j for j, v in self.model.most_similar(i) if v >= p]
if w == []:
list_sent_new.append([i])
else:
list_sent_new.append(w)
else:
list_sent_new.append([i])
return list_sent_new
def augment(
self, sentence: str, n_sent: int = 1, p: float = 0.7
) -> List[Tuple[str]]:
"""
:param str sentence: text sentence
:param int n_sent: max number for synonyms sentence
:param int p: probability
:return: list of synonyms
:rtype: List[Tuple[str]]
"""
self.sentence = self.tokenizer(sentence)
self.list_synonym = self.modify_sent(self.sentence, p=p)
new_sentences = []
for x in list(itertools.product(*self.list_synonym))[0:n_sent]:
new_sentences.append(x)
return new_sentences
| 2,616 | 33.434211 | 74 | py |
pythainlp-dev/pythainlp/augment/word2vec/ltw2v.py | pythainlp-dev/pythainlp/augment/word2vec/ltw2v.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pythainlp.augment.word2vec.core import Word2VecAug
from pythainlp.corpus import get_corpus_path
from pythainlp.tokenize import word_tokenize
from typing import List, Tuple
class LTW2VAug:
"""
Text Augment using word2vec from LTW2V
LTW2V:
`github.com/PyThaiNLP/large-thaiword2vec <https://github.com/PyThaiNLP/large-thaiword2vec>`_
"""
def __init__(self):
self.ltw2v_wv = get_corpus_path("ltw2v")
self.load_w2v()
def tokenizer(self, text: str) -> List[str]:
"""
:param str text: thai text
:rtype: List[str]
"""
return word_tokenize(text, engine="newmm")
def load_w2v(self): # insert substitute
"""
Load ltw2v word2vec model
"""
self.aug = Word2VecAug(self.ltw2v_wv, self.tokenizer, type="binary")
def augment(
self, sentence: str, n_sent: int = 1, p: float = 0.7
) -> List[Tuple[str]]:
"""
Text Augment using word2vec from Thai2Fit
:param str sentence: thai sentence
:param int n_sent: number sentence
:param float p: Probability of word
:return: list of text augment
:rtype: List[Tuple[str]]
:Example:
::
from pythainlp.augment.word2vec import LTW2VAug
aug = LTW2VAug()
aug.augment("ผมเรียน", n_sent=2, p=0.5)
# output: [('เขา', 'เรียนหนังสือ'), ('เขา', 'สมัครเรียน')]
"""
return self.aug.augment(sentence, n_sent, p)
| 2,122 | 29.768116 | 96 | py |
pythainlp-dev/pythainlp/augment/word2vec/thai2fit.py | pythainlp-dev/pythainlp/augment/word2vec/thai2fit.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pythainlp.augment.word2vec.core import Word2VecAug
from pythainlp.corpus import get_corpus_path
from pythainlp.tokenize import THAI2FIT_TOKENIZER
from typing import List, Tuple
class Thai2fitAug:
"""
Text Augment using word2vec from Thai2Fit
Thai2Fit:
`github.com/cstorm125/thai2fit <https://github.com/cstorm125/thai2fit>`_
"""
def __init__(self):
self.thai2fit_wv = get_corpus_path("thai2fit_wv")
self.load_w2v()
def tokenizer(self, text: str) -> List[str]:
"""
:param str text: thai text
:rtype: List[str]
"""
return THAI2FIT_TOKENIZER.word_tokenize(text)
def load_w2v(self):
"""
Load thai2fit word2vec model
"""
self.aug = Word2VecAug(self.thai2fit_wv, self.tokenizer, type="binary")
def augment(
self, sentence: str, n_sent: int = 1, p: float = 0.7
) -> List[Tuple[str]]:
"""
Text Augment using word2vec from Thai2Fit
:param str sentence: thai sentence
:param int n_sent: number sentence
:param float p: Probability of word
:return: list of text augment
:rtype: List[Tuple[str]]
:Example:
::
from pythainlp.augment.word2vec import Thai2fitAug
aug = Thai2fitAug()
aug.augment("ผมเรียน", n_sent=2, p=0.5)
# output: [('พวกเรา', 'เรียน'), ('ฉัน', 'เรียน')]
"""
return self.aug.augment(sentence, n_sent, p)
| 2,110 | 29.594203 | 79 | py |
pythainlp-dev/pythainlp/benchmarks/__init__.py | pythainlp-dev/pythainlp/benchmarks/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performance benchmarking.
"""
__all__ = ["benchmark"]
from pythainlp.benchmarks.word_tokenization import benchmark
| 735 | 32.454545 | 74 | py |
pythainlp-dev/pythainlp/benchmarks/word_tokenization.py | pythainlp-dev/pythainlp/benchmarks/word_tokenization.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from typing import List, Tuple
import numpy as np
import pandas as pd
SEPARATOR = "|"
# regex for removing to a space surrounded by separators, i.e. | |
SURROUNDING_SEPS_RX = re.compile(
"{sep}? ?{sep}$".format(sep=re.escape(SEPARATOR))
)
# regex for removing repeated separators, i.e. ||||
MULTIPLE_SEPS_RX = re.compile("{sep}+".format(sep=re.escape(SEPARATOR)))
# regex for removing tags, i.e. <NE>, </NE>
TAG_RX = re.compile(r"<\/?[A-Z]+>")
# regex for tailing separator, i.e. a|dog| -> a|dog
TAILING_SEP_RX = re.compile("{sep}$".format(sep=re.escape(SEPARATOR)))
def _f1(precision: float, recall: float) -> float:
"""
Compute f1.
:param float precision
:param float recall
:return: f1
:rtype: float
"""
if precision == recall == 0:
return 0
return 2 * precision * recall / (precision + recall)
def _flatten_result(my_dict: dict, sep: str = ":") -> dict:
"""
Flatten two-level dictionary.
Use keys in the first level as a prefix for keys in the two levels.
For example,
my_dict = { "a": { "b": 7 } }
flatten(my_dict)
{ "a:b": 7 }
:param dict my_dict: contains stats dictionary
:param str sep: separator between the two keys (default: ":")
:return: a one-level dictionary with key combined
:rtype: dict[str, float | str]
"""
items = []
for k1, kv2 in my_dict.items():
for k2, v in kv2.items():
new_key = f"{k1}{sep}{k2}"
items.append((new_key, v))
return dict(items)
def benchmark(ref_samples: List[str], samples: List[str]) -> pd.DataFrame:
"""
Performace benchmark of samples.
Please see :meth:`pythainlp.benchmarks.word_tokenization.compute_stats` for
metrics being computed.
:param list[str] ref_samples: ground truth samples
:param list[str] samples: samples that we want to evaluate
:return: dataframe with row x col = len(samples) x len(metrics)
:rtype: pandas.DataFrame
"""
results = []
for i, (r, s) in enumerate(zip(ref_samples, samples)):
try:
r, s = preprocessing(r), preprocessing(s)
if r and s:
stats = compute_stats(r, s)
stats = _flatten_result(stats)
stats["expected"] = r
stats["actual"] = s
results.append(stats)
except:
reason = """
[Error]
Reason: %s
Pair (i=%d)
--- label
%s
--- sample
%s
""" % (
sys.exc_info(),
i,
r,
s,
)
raise SystemExit(reason)
return pd.DataFrame(results)
def preprocessing(txt: str, remove_space: bool = True) -> str:
"""
Clean up text before performing evaluation.
:param str text: text to be preprocessed
:param bool remove_space: whether remove white space
:return: preprocessed text
:rtype: str
"""
txt = re.sub(SURROUNDING_SEPS_RX, "", txt)
if remove_space:
txt = re.sub(r"\s+", "", txt)
txt = re.sub(MULTIPLE_SEPS_RX, SEPARATOR, txt)
txt = re.sub(TAG_RX, "", txt)
txt = re.sub(TAILING_SEP_RX, "", txt).strip()
return txt
def compute_stats(ref_sample: str, raw_sample: str) -> dict:
"""
Compute statistics for tokenization quality
These statistics includes:
**Character-Level**:
True Positive, False Positive, True Negative, False Negative, Precision, Recall, and f1
**Word-Level**:
Precision, Recall, and f1
**Other**:
- Correct tokenization indicator: {0, 1} sequence indicating the correspoding
word is tokenized correctly.
:param str ref_sample: ground truth samples
:param str samples: samples that we want to evaluate
:return: metrics in character and word-level and correctly tokenized word indicators
:rtype: dict[str, float | str]
"""
ref_sample = _binary_representation(ref_sample)
sample = _binary_representation(raw_sample)
# Compute charater-level statistics
c_pos_pred, c_neg_pred = np.argwhere(sample == 1), np.argwhere(sample == 0)
c_pos_pred = c_pos_pred[c_pos_pred < ref_sample.shape[0]]
c_neg_pred = c_neg_pred[c_neg_pred < ref_sample.shape[0]]
c_tp = np.sum(ref_sample[c_pos_pred] == 1)
c_fp = np.sum(ref_sample[c_pos_pred] == 0)
c_tn = np.sum(ref_sample[c_neg_pred] == 0)
c_fn = np.sum(ref_sample[c_neg_pred] == 1)
c_precision = c_tp / (c_tp + c_fp)
c_recall = c_tp / (c_tp + c_fn)
c_f1 = _f1(c_precision, c_recall)
# Compute word-level statistics
# Find correctly tokenized words in the reference sample
word_boundaries = _find_word_boudaries(ref_sample)
# Find correctly tokenized words in the sample
ss_boundaries = _find_word_boudaries(sample)
tokenization_indicators = _find_words_correctly_tokenised(
word_boundaries, ss_boundaries
)
correctly_tokenised_words = np.sum(tokenization_indicators)
tokenization_indicators = list(
map(lambda x: str(x), tokenization_indicators)
)
return {
"char_level": {
"tp": c_tp,
"fp": c_fp,
"tn": c_tn,
"fn": c_fn,
},
"word_level": {
"correctly_tokenised_words": correctly_tokenised_words,
"total_words_in_sample": np.sum(sample),
"total_words_in_ref_sample": np.sum(ref_sample),
},
"global": {
"tokenisation_indicators": "".join(tokenization_indicators)
},
}
def _binary_representation(txt: str, verbose: bool = False):
"""
Transform text to {0, 1} sequence.
where (1) indicates that the corresponding character is the beginning of
a word. For example, ผม|ไม่|ชอบ|กิน|ผัก -> 10100...
:param str txt: input text that we want to transform
:param bool verbose: for debugging purposes
:return: {0, 1} sequence
:rtype: str
"""
chars = np.array(list(txt))
boundary = np.argwhere(chars == SEPARATOR).reshape(-1)
boundary = boundary - np.array(range(boundary.shape[0]))
bin_rept = np.zeros(len(txt) - boundary.shape[0])
bin_rept[list(boundary) + [0]] = 1
sample_wo_seps = list(txt.replace(SEPARATOR, ""))
# sanity check
assert len(sample_wo_seps) == len(bin_rept)
if verbose:
for c, m in zip(sample_wo_seps, bin_rept):
print("%s -- %d" % (c, m))
return bin_rept
def _find_word_boudaries(bin_reps) -> list:
"""
Find start and end location of each word.
:param str bin_reps: binary representation of a text
:return: list of tuples (start, end)
:rtype: list[tuple(int, int)]
"""
boundary = np.argwhere(bin_reps == 1).reshape(-1)
start_idx = boundary
end_idx = boundary[1:].tolist() + [bin_reps.shape[0]]
return list(zip(start_idx, end_idx))
def _find_words_correctly_tokenised(
ref_boundaries: List[Tuple[int, int]],
predicted_boundaries: List[Tuple[int, int]],
) -> Tuple[int]:
"""
Find whether each word is correctly tokenized.
:param list[tuple(int, int)] ref_boundaries: word boundaries of reference tokenization
:param list[tuple(int, int)] predicted_boundaries: word boundareies of predicted tokenization
:return: binary sequence where 1 indicates the corresponding word is tokenized correctly
:rtype: tuple[int]
"""
ref_b = dict(zip(ref_boundaries, [1] * len(ref_boundaries)))
labels = tuple(map(lambda x: ref_b.get(x, 0), predicted_boundaries))
return labels
| 8,159 | 27.235294 | 97 | py |
pythainlp-dev/pythainlp/chat/__init__.py | pythainlp-dev/pythainlp/chat/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
pythainlp.chat
"""
__all__ = ["ChatBotModel"]
from pythainlp.chat.core import ChatBotModel
| 711 | 31.363636 | 74 | py |
pythainlp-dev/pythainlp/chat/core.py | pythainlp-dev/pythainlp/chat/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class ChatBotModel:
def __init__(self):
"""
Chat with AI generation
"""
self.history = []
def reset_chat(self):
"""
Reset chat by clean history
"""
self.history = []
def load_model(
self,
model_name:str="wangchanglm",
return_dict:bool=True,
load_in_8bit:bool=False,
device:str="cuda",
torch_dtype=torch.float16,
offload_folder:str="./",
low_cpu_mem_usage:bool=True
):
"""
Load model
:param str model_name: Model name (Now, we support wangchanglm only)
:param bool return_dict: return_dict
:param bool load_in_8bit: load model in 8bit
:param str device: device (cpu, cuda or other)
:param torch_dtype torch_dtype: torch_dtype
:param str offload_folder: offload folder
:param bool low_cpu_mem_usage: low cpu mem usage
"""
if model_name == "wangchanglm":
from pythainlp.generate.wangchanglm import WangChanGLM
self.model = WangChanGLM()
self.model.load_model(
model_path="pythainlp/wangchanglm-7.5B-sft-en-sharded",
return_dict=return_dict,
load_in_8bit=load_in_8bit,
offload_folder=offload_folder,
device=device,
torch_dtype=torch_dtype,
low_cpu_mem_usage=low_cpu_mem_usage
)
else:
raise NotImplementedError(f"We doesn't support {model_name}.")
def chat(self, text:str)->str:
"""
Chatbot
:param str text: text for asking chatbot.
:return: the answer from chatbot.
:rtype: str
"""
_temp=""
if self.history!=[]:
for h,b in self.history:
_temp+=self.model.PROMPT_DICT['prompt_chatbot'].format_map({"human":h,"bot":b})+self.model.stop_token
_temp+=self.model.PROMPT_DICT['prompt_chatbot'].format_map({"human":human,"bot":""})
_bot = self.model.gen_instruct(_temp)
self.history.append((text,_bot))
return _bot
| 2,790 | 33.8875 | 117 | py |
pythainlp-dev/pythainlp/cli/__init__.py | pythainlp-dev/pythainlp/cli/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line helpers."""
import sys
from argparse import ArgumentParser
from pythainlp.cli import data, soundex, tag, tokenize, benchmark
# a command should be a verb when possible
COMMANDS = sorted(["data", "soundex", "tag", "tokenize", "benchmark"])
CLI_NAME = "thainlp"
def make_usage(command: str) -> dict:
prog = f"{CLI_NAME} {command}"
return dict(prog=prog, usage=f"{prog} [options]")
def exit_if_empty(command: str, parser: ArgumentParser) -> None:
if not command:
parser.print_help()
sys.exit(0)
| 1,158 | 30.324324 | 74 | py |
pythainlp-dev/pythainlp/cli/benchmark.py | pythainlp-dev/pythainlp/cli/benchmark.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import yaml
from pythainlp import cli
from pythainlp.benchmarks import word_tokenization
def _read_file(path):
with open(path, "r", encoding="utf-8") as f:
lines = map(lambda r: r.strip(), f.readlines())
return list(lines)
class App:
def __init__(self, argv):
parser = argparse.ArgumentParser(
prog="benchmark",
description=(
"Benchmark for various tasks;\n"
"currently, we have only for word tokenization."
),
usage=(
"thainlp benchmark [task] [task-options]\n\n"
"tasks:\n\n"
"word-tokenization benchmark word tokenization\n\n"
"--"
),
)
parser.add_argument("task", type=str, help="[word-tokenization]")
args = parser.parse_args(argv[2:3])
cli.exit_if_empty(args.task, parser)
task = str.lower(args.task)
task_argv = argv[3:]
if task == "word-tokenization":
WordTokenizationBenchmark(task, task_argv)
class WordTokenizationBenchmark:
def __init__(self, name, argv):
parser = argparse.ArgumentParser(**cli.make_usage("benchmark " + name))
parser.add_argument(
"--input-file",
action="store",
help="Path to input file to compare against the test file",
)
parser.add_argument(
"--test-file",
action="store",
help="Path to test file i.e. ground truth",
)
parser.add_argument(
"--save-details",
default=False,
action="store_true",
help=(
"Save comparison details to files (eval-XXX.json"
" and eval-details-XXX.json)"
),
)
args = parser.parse_args(argv)
actual = _read_file(args.input_file)
expected = _read_file(args.test_file)
assert len(actual) == len(
expected
), "Input and test files do not have the same number of samples"
print(
"Benchmarking %s against %s with %d samples in total"
% (args.input_file, args.test_file, len(actual))
)
df_raw = word_tokenization.benchmark(expected, actual)
columns = [
"char_level:tp",
"char_level:fp",
"char_level:tn",
"char_level:fn",
"word_level:correctly_tokenised_words",
"word_level:total_words_in_sample",
"word_level:total_words_in_ref_sample",
]
statistics = dict()
for c in columns:
statistics[c] = float(df_raw[c].sum())
statistics["char_level:precision"] = statistics["char_level:tp"] / (
statistics["char_level:tp"] + statistics["char_level:fp"]
)
statistics["char_level:recall"] = statistics["char_level:tp"] / (
statistics["char_level:tp"] + statistics["char_level:fn"]
)
statistics["word_level:precision"] = (
statistics["word_level:correctly_tokenised_words"]
/ statistics["word_level:total_words_in_sample"]
)
statistics["word_level:recall"] = (
statistics["word_level:correctly_tokenised_words"]
/ statistics["word_level:total_words_in_ref_sample"]
)
print("============== Benchmark Result ==============")
for c in ["tp", "fn", "tn", "fp", "precision", "recall"]:
c = f"char_level:{c}"
v = statistics[c]
print(f"{c:>40s} {v:.4f}")
for c in [
"total_words_in_sample",
"total_words_in_ref_sample",
"correctly_tokenised_words",
"precision",
"recall",
]:
c = f"word_level:{c}"
v = statistics[c]
print(f"{c:>40s} {v:.4f}")
if args.save_details:
dir_name = os.path.dirname(args.input_file)
file_name = args.input_file.split("/")[-1].split(".")[0]
res_path = "%s/eval-%s.yml" % (dir_name, file_name)
print("Evaluation result is saved to %s" % res_path)
with open(res_path, "w", encoding="utf-8") as outfile:
yaml.dump(statistics, outfile, default_flow_style=False)
res_path = "%s/eval-details-%s.json" % (dir_name, file_name)
print("Details of comparisons is saved to %s" % res_path)
with open(res_path, "w", encoding="utf-8") as f:
samples = []
for i, r in enumerate(df_raw.to_dict("records")):
expected, actual = r["expected"], r["actual"]
del r["expected"]
del r["actual"]
samples.append(
dict(metrics=r, expected=expected, actual=actual, id=i)
)
details = dict(metrics=statistics, samples=samples)
json.dump(details, f, ensure_ascii=False)
| 5,723 | 30.977654 | 79 | py |
pythainlp-dev/pythainlp/cli/data.py | pythainlp-dev/pythainlp/cli/data.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
thainlp dataset/corpus management command line.
"""
import argparse
from pythainlp import cli, corpus
from pythainlp.tools import get_pythainlp_data_path
class App:
def __init__(self, argv):
parser = argparse.ArgumentParser(
prog="data",
description="Manage dataset/corpus.",
usage=(
"thainlp data <subcommand>\n\n"
"subcommands:\n\n"
"catalog show list of available datasets\n"
"info <dataset_name> show information about the dataset\n"
"get <dataset_name> download the dataset\n"
"rm <dataset_name> remove the dataset\n"
"path show full path to data directory\n\n"
"Example:\n\n"
"thainlp data get thai2fit_wv\n\n"
"Current data path:\n\n"
f"{get_pythainlp_data_path()}\n\n"
"To change PyThaiNLP data path, set the operating system's\n"
"PYTHAINLP_DATA_DIR environment variable.\n\n"
"For more information about corpora that PyThaiNLP use, see:\n"
"https://github.com/PyThaiNLP/pythainlp-corpus/\n\n"
"--"
),
)
parser.add_argument(
"subcommand",
type=str,
choices=["catalog", "info", "get", "rm", "path"],
help="action on dataset/corpus",
)
args = parser.parse_args(argv[2:3])
getattr(self, args.subcommand)(argv)
def get(self, argv):
parser = argparse.ArgumentParser(
description="Download a dataset",
usage="thainlp data get <dataset_name>",
)
parser.add_argument(
"dataset_name",
type=str,
help="dataset/corpus's name",
)
args = parser.parse_args(argv[3:])
if corpus.download(args.dataset_name):
print("Downloaded successfully.")
else:
print("Not found.")
def rm(self, argv):
parser = argparse.ArgumentParser(
description="Remove a dataset",
usage="thainlp data rm <dataset_name>",
)
parser.add_argument(
"dataset_name",
type=str,
help="dataset/corpus's name",
)
args = parser.parse_args(argv[3:])
if corpus.remove(args.dataset_name):
print("Removed successfully.")
else:
print("Not found.")
def info(self, argv):
parser = argparse.ArgumentParser(
description="Print information about a dataset",
usage="thainlp data info <dataset_name>",
)
parser.add_argument(
"dataset_name",
type=str,
help="dataset/corpus's name",
)
args = parser.parse_args(argv[3:])
info = corpus.get_corpus_db_detail(args.dataset_name)
if info:
print(info)
else:
print("Not found.")
def catalog(self, argv):
"""Print dataset/corpus available for download."""
corpus_db = corpus.get_corpus_db(corpus.corpus_db_url())
corpus_db = corpus_db.json()
corpus_names = sorted(corpus_db.keys())
print("Dataset/corpus available for download:")
for name in corpus_names:
print(f"- {name} {corpus_db[name]['latest_version']}", end="")
corpus_info = corpus.get_corpus_db_detail(name)
if corpus_info:
print(f" (Local: {corpus_info['version']})")
else:
print()
print(
"\nUse subcommand 'get' to download a dataset.\n\n"
"Example: thainlp data get crfcut\n"
)
def path(self, argv):
"""Print path for local dataset."""
print(get_pythainlp_data_path())
| 4,521 | 34.328125 | 79 | py |
pythainlp-dev/pythainlp/cli/soundex.py | pythainlp-dev/pythainlp/cli/soundex.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
thainlp soundex command line.
Take input text from command line.
"""
import argparse
from pythainlp import cli
from pythainlp.soundex import DEFAULT_SOUNDEX_ENGINE, soundex
class App:
def __init__(self, argv):
parser = argparse.ArgumentParser(
prog="soundex",
description="Convert a text to its sound-based index.",
usage=(
"thainlp soundex [-a algorithm] <text>\n\n"
"algorithms:\n\n"
"udom83\n"
"lk82\n"
"metasound\n\n"
f"Default soundex algorithm is {DEFAULT_SOUNDEX_ENGINE}.\n\n"
"<text> should be inside double quotes.\n\n"
"Example:\n\n"
'thainlp soundex -a lk82 "มอเตอร์ไซค์"\n\n'
"--"
),
)
parser.add_argument(
"-a",
"--algo",
dest="algorithm",
type=str,
choices=["udom83", "lk82", "metasound"],
help="soundex algorithm",
default=DEFAULT_SOUNDEX_ENGINE,
)
parser.add_argument(
"text",
type=str,
help="input text",
)
args = parser.parse_args(argv[2:])
sdx = soundex(args.text, engine=args.algorithm)
print(sdx)
| 1,946 | 29.904762 | 77 | py |
pythainlp-dev/pythainlp/cli/tag.py | pythainlp-dev/pythainlp/cli/tag.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
thainlp tag command line.
"""
import argparse
from pythainlp import cli
from pythainlp.tag import locations, named_entity, pos_tag
class SubAppBase:
def __init__(self, name, argv):
parser = argparse.ArgumentParser(**cli.make_usage("tag " + name))
parser.add_argument(
"text",
type=str,
help="input text",
)
parser.add_argument(
"-s",
"--sep",
dest="separator",
type=str,
help=f"Token separator for input text. default: {self.separator}",
default=self.separator,
)
args = parser.parse_args(argv)
self.args = args
tokens = args.text.split(args.separator)
result = self.run(tokens)
for word, tag in result:
print(word, "/", tag)
class POSTaggingApp(SubAppBase):
def __init__(self, *args, **kwargs):
self.separator = "|"
self.run = pos_tag
super().__init__(*args, **kwargs)
class App:
def __init__(self, argv):
parser = argparse.ArgumentParser(
prog="tag",
description="Annotate a text with linguistic information",
usage=(
'thainlp tag <tag_type> [--sep "<separator>"] "<text>"\n\n'
"tag_type:\n\n"
"pos part-of-speech\n\n"
"<separator> and <text> should be inside double quotes.\n"
"<text> should be a tokenized text, "
"with tokens separated by <separator>.\n\n"
"Example:\n\n"
'thainlp tag pos -s " " "แรงดึงดูด เก็บ หัว คุณ ลง"\n\n'
"--"
),
)
parser.add_argument("tag_type", type=str, help="[pos]")
args = parser.parse_args(argv[2:3])
cli.exit_if_empty(args.tag_type, parser)
tag_type = str.lower(args.tag_type)
argv = argv[3:]
if tag_type == "pos":
POSTaggingApp("Part-of-Speech tagging", argv)
else:
print(f"Tag type not available: {tag_type}")
| 2,719 | 29.909091 | 78 | py |
pythainlp-dev/pythainlp/cli/tokenize.py | pythainlp-dev/pythainlp/cli/tokenize.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
thainlp tokenize command line.
"""
import argparse
from pythainlp import cli
from pythainlp.tokenize import (
DEFAULT_SENT_TOKENIZE_ENGINE,
DEFAULT_SUBWORD_TOKENIZE_ENGINE,
DEFAULT_SYLLABLE_TOKENIZE_ENGINE,
DEFAULT_WORD_TOKENIZE_ENGINE,
sent_tokenize,
subword_tokenize,
word_tokenize,
)
DEFAULT_SENT_TOKEN_SEPARATOR = "@@"
DEFAULT_SUBWORD_TOKEN_SEPARATOR = "/"
DEFAULT_SYLLABLE_TOKEN_SEPARATOR = "~"
DEFAULT_WORD_TOKEN_SEPARATOR = "|"
class SubAppBase:
def __init__(self, name, argv):
parser = argparse.ArgumentParser(**cli.make_usage("tokenize " + name))
parser.add_argument(
"text",
type=str,
nargs="?",
help="input text",
)
parser.add_argument(
"-s",
"--sep",
dest="separator",
type=str,
help=f"default: {self.separator}",
default=self.separator,
)
parser.add_argument(
"-a",
"--algo",
dest="algorithm",
type=str,
help=f"default: {self.algorithm}",
default=self.algorithm,
)
parser.add_argument(
"-w",
"--keep-whitespace",
dest="keep_whitespace",
action="store_true",
)
parser.add_argument(
"-nw",
"--no-whitespace",
dest="keep_whitespace",
action="store_false",
)
parser.set_defaults(keep_whitespace=True)
args = parser.parse_args(argv)
self.args = args
cli.exit_if_empty(args.text, parser)
result = self.run(
args.text,
engine=args.algorithm,
keep_whitespace=args.keep_whitespace,
)
print(args.separator.join(result) + args.separator)
class WordTokenizationApp(SubAppBase):
def __init__(self, *args, **kwargs):
self.keep_whitespace = True
self.algorithm = DEFAULT_WORD_TOKENIZE_ENGINE
self.separator = DEFAULT_WORD_TOKEN_SEPARATOR
self.run = word_tokenize
super().__init__(*args, **kwargs)
class SentenceTokenizationApp(SubAppBase):
def __init__(self, *args, **kwargs):
self.keep_whitespace = True
self.algorithm = DEFAULT_SENT_TOKENIZE_ENGINE
self.separator = DEFAULT_SENT_TOKEN_SEPARATOR
self.run = sent_tokenize
super().__init__(*args, **kwargs)
class SubwordTokenizationApp(SubAppBase):
def __init__(self, *args, **kwargs):
self.keep_whitespace = True
self.algorithm = DEFAULT_SUBWORD_TOKENIZE_ENGINE
self.separator = DEFAULT_SUBWORD_TOKEN_SEPARATOR
self.run = subword_tokenize
super().__init__(*args, **kwargs)
class App:
def __init__(self, argv):
parser = argparse.ArgumentParser(
prog="tokenize",
description="Break a text into small units (tokens).",
usage=(
'thainlp tokenize <token_type> [options] "<text>"\n\n'
"token_type:\n\n"
"subword subword (may not be a linguistic unit)\n"
"syllable syllable\n"
"word word\n"
"sent sentence\n\n"
"options:\n\n"
"--sep or -s <separator> specify custom separator\n"
" (default is a space)\n"
"--algo or -a <algorithm> tokenization algorithm\n"
" (see API doc for more info)\n"
"--keep-whitespace or -w keep whitespaces in output\n"
" (default)\n\n"
"<separator> and <text> should be inside double quotes.\n\n"
"Example:\n\n"
'thainlp tokenize word -s "|" "ใต้แสงนีออนเปลี่ยวเหงา"\n\n'
"--"
),
)
parser.add_argument(
"token_type",
type=str,
help="[subword|word|sent]",
)
args = parser.parse_args(argv[2:3])
cli.exit_if_empty(args.token_type, parser)
token_type = str.lower(args.token_type)
argv = argv[3:]
if token_type.startswith("w"):
WordTokenizationApp("word", argv)
elif token_type.startswith("su"):
SubwordTokenizationApp("subword", argv)
elif token_type.startswith("se"):
SentenceTokenizationApp("sent", argv)
else:
print(f"Token type not available: {token_type}")
| 5,231 | 31.7 | 78 | py |
pythainlp-dev/pythainlp/cls/__init__.py | pythainlp-dev/pythainlp/cls/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
pythainlp.cls
"""
__all__ = ["GzipModel"]
from pythainlp.cls.param_free import GzipModel
| 709 | 31.272727 | 74 | py |
pythainlp-dev/pythainlp/cls/param_free.py | pythainlp-dev/pythainlp/cls/param_free.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gzip
import numpy as np
from typing import Dict, List, Tuple, Union
class GzipModel:
"""
This class is a reimplemenatation of “Low-Resource” Text Classification: A Parameter-Free Classification Method with Compressors (Jiang et al., Findings 2023)
:param list training_data: list [(text_sample,label)]
"""
def __init__(self, training_data: List[Tuple[str, str]]):
self.training_data = np.array(training_data)
self.Cx2_list = self.train()
def train(self):
Cx2_list = list()
for i in range(len(self.training_data)):
Cx2_list.append(
len(gzip.compress(self.training_data[i][0].encode("utf-8")))
)
return Cx2_list
def predict(self, x1: str, k: int = 1):
Cx1 = len(gzip.compress(x1.encode("utf-8")))
disance_from_x1 = []
for i in range(len(self.Cx2_list)):
x2 = self.training_data[i][0]
Cx2 = self.Cx2_list[i]
x1x2 = "".join([x1, x2])
Cx1x2 = len(gzip.compress(x1x2.encode("utf-8")))
# normalized compression distance
ncd = (Cx1x2 - min(Cx1, Cx2)) / max(Cx1, Cx2)
disance_from_x1.append(ncd)
sorted_idx = np.argsort(np.array(disance_from_x1))
top_k_class = self.training_data[sorted_idx[:k], 1]
_, counts = np.unique(top_k_class, return_counts=True)
predict_class = top_k_class[counts.argmax()]
return predict_class
| 2,095 | 36.428571 | 162 | py |
pythainlp-dev/pythainlp/coref/__init__.py | pythainlp-dev/pythainlp/coref/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyThaiNLP Coreference Resolution
"""
__all__ = ["coreference_resolution"]
from pythainlp.coref.core import coreference_resolution
| 748 | 36.45 | 74 | py |
pythainlp-dev/pythainlp/coref/_fastcoref.py | pythainlp-dev/pythainlp/coref/_fastcoref.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import spacy
class FastCoref:
def __init__(self, model_name, nlp=spacy.blank("th"), device:str="cpu", type:str="FCoref") -> None:
if type == "FCoref":
from fastcoref import FCoref as _model
else:
from fastcoref import LingMessCoref as _model
self.model_name = model_name
self.nlp = nlp
self.model = _model(self.model_name,device=device,nlp=self.nlp)
def _to_json(self, _predict):
return {
"text":_predict.text,
"clusters_string":_predict.get_clusters(as_strings=True),
"clusters":_predict.get_clusters(as_strings=False)
}
def predict(self, texts:List[str])->dict:
return [self._to_json(i) for i in self.model.predict(texts=texts)]
| 1,418 | 35.384615 | 103 | py |
pythainlp-dev/pythainlp/coref/core.py | pythainlp-dev/pythainlp/coref/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
model = None
def coreference_resolution(texts:List[str], model_name:str="han-coref-v1.0", device:str="cpu"):
"""
Coreference Resolution
:param List[str] texts: list texts to do coreference resolution
:param str model_name: coreference resolution model
:param str device: device for running coreference resolution model (cpu, cuda, and other)
:return: List txets of coreference resolution
:rtype: List[dict]
:Options for model_name:
* *han-coref-v1.0* - (default) Han-Corf: Thai oreference resolution by PyThaiNLP v1.0
:Example:
::
from pythainlp.coref import coreference_resolution
print(
coreference_resolution(
["Bill Gates ได้รับวัคซีน COVID-19 เข็มแรกแล้ว ระบุ ผมรู้สึกสบายมาก"]
)
)
# output:
# [
# {'text': 'Bill Gates ได้รับวัคซีน COVID-19 เข็มแรกแล้ว ระบุ ผมรู้สึกสบายมาก',
# 'clusters_string': [['Bill Gates', 'ผม']],
# 'clusters': [[(0, 10), (50, 52)]]}
# ]
"""
global model
if isinstance(texts, str):
texts = [texts]
if model == None and model_name=="han-coref-v1.0":
from pythainlp.coref.han_coref import HanCoref
model = HanCoref(device=device)
return model.predict(texts) | 1,935 | 34.2 | 95 | py |
pythainlp-dev/pythainlp/coref/han_coref.py | pythainlp-dev/pythainlp/coref/han_coref.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pythainlp.coref._fastcoref import FastCoref
import spacy
class HanCoref(FastCoref):
def __init__(self,device:str="cpu",nlp=spacy.blank("th")) -> None:
super(self.__class__, self).__init__(
model_name="pythainlp/han-coref-v1.0",
device=device,
nlp=nlp
)
| 930 | 34.807692 | 74 | py |
pythainlp-dev/pythainlp/corpus/__init__.py | pythainlp-dev/pythainlp/corpus/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Corpus related functions.
Access to dictionaries, word lists, and language models.
Including download manager.
"""
__all__ = [
"corpus_path",
"corpus_db_path",
"corpus_db_url",
"countries",
"download",
"get_corpus",
"get_corpus_db",
"get_corpus_db_detail",
"get_corpus_default_db",
"get_corpus_path",
"provinces",
"remove",
"thai_dict",
"thai_family_names",
"thai_female_names",
"thai_male_names",
"thai_negations",
"thai_stopwords",
"thai_syllables",
"thai_words",
"thai_wsd_dict",
"thai_orst_words",
"path_pythainlp_corpus",
"get_path_folder_corpus",
]
import os
from pythainlp.tools import get_full_data_path, get_pythainlp_path
# Remote and local corpus databases
_CORPUS_DIRNAME = "corpus"
_CORPUS_PATH = os.path.join(get_pythainlp_path(), _CORPUS_DIRNAME)
_CHECK_MODE = os.getenv("PYTHAINLP_READ_MODE")
# remote corpus catalog URL
_CORPUS_DB_URL = "https://pythainlp.github.io/pythainlp-corpus/db.json"
# local corpus catalog filename
_CORPUS_DB_FILENAME = "db.json"
# local corpus catalog full path
_CORPUS_DB_PATH = get_full_data_path(_CORPUS_DB_FILENAME)
# create a local corpus database if it does not already exist
if not os.path.exists(_CORPUS_DB_PATH) and _CHECK_MODE != "1":
with open(_CORPUS_DB_PATH, "w", encoding="utf-8") as f:
f.write(r'{"_default": {}}')
def corpus_path() -> str:
"""
Get path where corpus files are kept locally.
"""
return _CORPUS_PATH
def corpus_db_url() -> str:
"""
Get remote URL of corpus catalog.
"""
return _CORPUS_DB_URL
def corpus_db_path() -> str:
"""
Get local path of corpus catalog.
"""
return _CORPUS_DB_PATH
from pythainlp.corpus.core import (
download,
get_corpus,
get_corpus_db,
get_corpus_db_detail,
get_corpus_default_db,
get_corpus_path,
get_path_folder_corpus,
remove,
path_pythainlp_corpus,
) # these imports must come before other pythainlp.corpus.* imports
from pythainlp.corpus.common import (
countries,
provinces,
thai_family_names,
thai_female_names,
thai_male_names,
thai_negations,
thai_stopwords,
thai_syllables,
thai_words,
thai_orst_words,
thai_dict,
thai_wsd_dict
)
| 2,914 | 23.291667 | 74 | py |
pythainlp-dev/pythainlp/corpus/common.py | pythainlp-dev/pythainlp/corpus/common.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common list of words.
"""
__all__ = [
"countries",
"provinces",
"thai_family_names",
"thai_female_names",
"thai_male_names",
"thai_negations",
"thai_stopwords",
"thai_syllables",
"thai_words",
"thai_dict",
"thai_wsd_dict",
]
from typing import FrozenSet, List, Union
from pythainlp.corpus import get_corpus, get_corpus_path
_THAI_COUNTRIES = set()
_THAI_COUNTRIES_FILENAME = "countries_th.txt"
_THAI_THAILAND_PROVINCES = set()
_THAI_THAILAND_PROVINCES_DETAILS = list()
_THAI_THAILAND_PROVINCES_FILENAME = "thailand_provinces_th.csv"
_THAI_SYLLABLES = set()
_THAI_SYLLABLES_FILENAME = "syllables_th.txt"
_THAI_WORDS = set()
_THAI_WORDS_FILENAME = "words_th.txt"
_THAI_STOPWORDS = set()
_THAI_STOPWORDS_FILENAME = "stopwords_th.txt"
_THAI_NEGATIONS = set()
_THAI_NEGATIONS_FILENAME = "negations_th.txt"
_THAI_FAMLIY_NAMES = set()
_THAI_FAMLIY_NAMES_FILENAME = "family_names_th.txt"
_THAI_FEMALE_NAMES = set()
_THAI_FEMALE_NAMES_FILENAME = "person_names_female_th.txt"
_THAI_MALE_NAMES = set()
_THAI_MALE_NAMES_FILENAME = "person_names_male_th.txt"
_THAI_ORST_WORDS = set()
_THAI_DICT = {}
_THAI_WSD_DICT = {}
def countries() -> FrozenSet[str]:
"""
Return a frozenset of country names in Thai such as "แคนาดา", "โรมาเนีย",
"แอลจีเรีย", and "ลาว".
\n(See: `dev/pythainlp/corpus/countries_th.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/countries_th.txt>`_)
:return: :class:`frozenset` containing countries names in Thai
:rtype: :class:`frozenset`
"""
global _THAI_COUNTRIES
if not _THAI_COUNTRIES:
_THAI_COUNTRIES = get_corpus(_THAI_COUNTRIES_FILENAME)
return _THAI_COUNTRIES
def provinces(details: bool = False) -> Union[FrozenSet[str], List[str]]:
"""
Return a frozenset of Thailand province names in Thai such as "กระบี่",
"กรุงเทพมหานคร", "กาญจนบุรี", and "อุบลราชธานี".
\n(See: `dev/pythainlp/corpus/thailand_provinces_th.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/thailand_provinces_th.txt>`_)
:param bool details: return details of provinces or not
:return: :class:`frozenset` containing province names of Thailand \
(if details is False) or :class:`list` containing :class:`dict` of \
province names and details such as \
[{'name_th': 'นนทบุรี', 'abbr_th': 'นบ', 'name_en': 'Nonthaburi', \
'abbr_en': 'NBI'}].
:rtype: :class:`frozenset` or :class:`list`
"""
global _THAI_THAILAND_PROVINCES, _THAI_THAILAND_PROVINCES_DETAILS
if not _THAI_THAILAND_PROVINCES or not _THAI_THAILAND_PROVINCES_DETAILS:
provs = set()
prov_details = list()
for line in get_corpus(_THAI_THAILAND_PROVINCES_FILENAME, as_is=True):
p = line.split(",")
prov = dict()
prov["name_th"] = p[0]
prov["abbr_th"] = p[1]
prov["name_en"] = p[2]
prov["abbr_en"] = p[3]
provs.add(prov["name_th"])
prov_details.append(prov)
_THAI_THAILAND_PROVINCES = frozenset(provs)
_THAI_THAILAND_PROVINCES_DETAILS = prov_details
if details:
return _THAI_THAILAND_PROVINCES_DETAILS
return _THAI_THAILAND_PROVINCES
def thai_syllables() -> FrozenSet[str]:
"""
Return a frozenset of Thai syllables such as "กรอบ", "ก็", "๑", "โมบ",
"โมน", "โม่ง", "กา", "ก่า", and, "ก้า".
\n(See: `dev/pythainlp/corpus/syllables_th.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/syllables_th.txt>`_)
We using thai syllables list from `KUCut <https://github.com/Thanabhat/KUCut>`_.
:return: :class:`frozenset` containing syllables in Thai language.
:rtype: :class:`frozenset`
"""
global _THAI_SYLLABLES
if not _THAI_SYLLABLES:
_THAI_SYLLABLES = get_corpus(_THAI_SYLLABLES_FILENAME)
return _THAI_SYLLABLES
def thai_words() -> FrozenSet[str]:
"""
Return a frozenset of Thai words such as "กติกา", "กดดัน", "พิษ",
and "พิษภัย". \n(See: `dev/pythainlp/corpus/words_th.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/words_th.txt>`_)
:return: :class:`frozenset` containing words in Thai language.
:rtype: :class:`frozenset`
"""
global _THAI_WORDS
if not _THAI_WORDS:
_THAI_WORDS = get_corpus(_THAI_WORDS_FILENAME)
return _THAI_WORDS
def thai_orst_words() -> FrozenSet[str]:
"""
Return a frozenset of Thai words from Royal Society of Thailand
\n(See: `dev/pythainlp/corpus/thai_orst_words.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/thai_orst_words>`_)
:return: :class:`frozenset` containing words in Thai language.
:rtype: :class:`frozenset`
"""
global _THAI_ORST_WORDS
if not _THAI_ORST_WORDS:
_THAI_ORST_WORDS = get_corpus("thai_orst_words.txt")
return _THAI_ORST_WORDS
def thai_stopwords() -> FrozenSet[str]:
"""
Return a frozenset of Thai stopwords such as "มี", "ไป", "ไง", "ขณะ",
"การ", and "ประการหนึ่ง". \n(See: `dev/pythainlp/corpus/stopwords_th.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/stopwords_th.txt>`_)
We using stopword lists by thesis's เพ็ญศิริ ลี้ตระกูล.
:See Also:
เพ็ญศิริ ลี้ตระกูล . \
การเลือกประโยคสำคัญในการสรุปความภาษาไทยโดยใช้แบบจำลองแบบลำดับชั้น. \
กรุงเทพมหานคร : มหาวิทยาลัยธรรมศาสตร์; 2551.
:return: :class:`frozenset` containing stopwords.
:rtype: :class:`frozenset`
"""
global _THAI_STOPWORDS
if not _THAI_STOPWORDS:
_THAI_STOPWORDS = get_corpus(_THAI_STOPWORDS_FILENAME)
return _THAI_STOPWORDS
def thai_negations() -> FrozenSet[str]:
"""
Return a frozenset of Thai negation words including "ไม่" and "แต่".
\n(See: `dev/pythainlp/corpus/negations_th.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/negations_th.txt>`_)
:return: :class:`frozenset` containing negations in Thai language.
:rtype: :class:`frozenset`
"""
global _THAI_NEGATIONS
if not _THAI_NEGATIONS:
_THAI_NEGATIONS = get_corpus(_THAI_NEGATIONS_FILENAME)
return _THAI_NEGATIONS
def thai_family_names() -> FrozenSet[str]:
"""
Return a frozenset of Thai family names
\n(See: `dev/pythainlp/corpus/family_names_th.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/family_names_th.txt>`_)
:return: :class:`frozenset` containing Thai family names.
:rtype: :class:`frozenset`
"""
global _THAI_FAMLIY_NAMES
if not _THAI_FAMLIY_NAMES:
_THAI_FAMLIY_NAMES = get_corpus(_THAI_FAMLIY_NAMES_FILENAME)
return _THAI_FAMLIY_NAMES
def thai_female_names() -> FrozenSet[str]:
"""
Return a frozenset of Thai female names
\n(See: `dev/pythainlp/corpus/person_names_female_th.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/person_names_female_th.txt>`_)
:return: :class:`frozenset` containing Thai female names.
:rtype: :class:`frozenset`
"""
global _THAI_FEMALE_NAMES
if not _THAI_FEMALE_NAMES:
_THAI_FEMALE_NAMES = get_corpus(_THAI_FEMALE_NAMES_FILENAME)
return _THAI_FEMALE_NAMES
def thai_male_names() -> FrozenSet[str]:
"""
Return a frozenset of Thai male names
\n(See: `dev/pythainlp/corpus/person_names_male_th.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/person_names_male_th.txt>`_)
:return: :class:`frozenset` containing Thai male names.
:rtype: :class:`frozenset`
"""
global _THAI_MALE_NAMES
if not _THAI_MALE_NAMES:
_THAI_MALE_NAMES = get_corpus(_THAI_MALE_NAMES_FILENAME)
return _THAI_MALE_NAMES
def thai_dict() -> dict:
"""
Return Thai dictionary with definition from wiktionary.
\n(See: `thai_dict\
<https://pythainlp.github.io/pythainlp-corpus/thai_dict.html>`_)
:return: Thai word with part-of-speech type and definition
:rtype: :class:`frozenset`
"""
global _THAI_DICT
if _THAI_DICT == {}:
import csv
_THAI_DICT = {"word":[], "meaning":[]}
with open(get_corpus_path("thai_dict"), newline="\n", encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile, delimiter=",")
for row in reader:
_THAI_DICT["word"].append(row["word"])
_THAI_DICT["meaning"].append(row["meaning"])
return _THAI_DICT
def thai_wsd_dict() -> dict:
"""
Return Thai Word Sense Disambiguation dictionary with definition from wiktionary.
\n(See: `thai_dict\
<https://pythainlp.github.io/pythainlp-corpus/thai_dict.html>`_)
:return: Thai word with part-of-speech type and definition
:rtype: :class:`frozenset`
"""
global _THAI_WSD_DICT
if _THAI_WSD_DICT == {}:
_thai_wsd = thai_dict()
_THAI_WSD_DICT = {"word":[],"meaning":[]}
for i,j in zip(_thai_wsd["word"],_thai_wsd["meaning"]):
_all_value = list(eval(j).values())
_use = []
for k in _all_value:
_use.extend(k)
_use=list(set(_use))
if len(_use)>1:
_THAI_WSD_DICT["word"].append(i)
_THAI_WSD_DICT["meaning"].append(_use)
return _THAI_WSD_DICT | 9,930 | 30.932476 | 100 | py |
pythainlp-dev/pythainlp/corpus/conceptnet.py | pythainlp-dev/pythainlp/corpus/conceptnet.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Get data from ConceptNet API at http://conceptnet.io
"""
import requests
def edges(word: str, lang: str = "th"):
"""
Get edges from `ConceptNet <http://api.conceptnet.io/>`_ API.
ConceptNet is a public semantic network, designed to help computers
understand the meanings of words that people use.
For example, the term "ConceptNet" is a "knowledge graph", and
"knowledge graph" has "common sense knowledge" which is a part of
"artificial inteligence". Also, "ConcepNet" is used for
"natural language understanding" which is a part of
"artificial intelligence".
| "ConceptNet" --is a--> "knowledge graph" --has--> "common sense"\
--a part of--> "artificial intelligence"
| "ConceptNet" --used for--> "natural language understanding"\
--a part of--> "artificial intelligence"
With this illustration, it shows relationships (represented as *Edge*)
between the terms (represented as *Node*)
:param str word: word to be sent to ConceptNet API
:param str lang: abbreviation of language (i.e. *th* for Thai, *en* for
English, or *ja* for Japan). By default, it is *th*
(Thai).
:return: return edges of the given word according to the
ConceptNet network.
:rtype: list[dict]
:Example:
::
from pythainlp.corpus.conceptnet import edges
edges('hello', lang='en')
# output:
# [{
# '@id': '/a/[/r/IsA/,/c/en/hello/,/c/en/greeting/]',
# '@type': 'Edge',
# 'dataset': '/d/conceptnet/4/en',
# 'end': {'@id': '/c/en/greeting',
# '@type': 'Node',
# 'label': 'greeting',
# 'language': 'en',
# 'term': '/c/en/greeting'},
# 'license': 'cc:by/4.0',
# 'rel': {'@id': '/r/IsA', '@type': 'Relation', 'label': 'IsA'},
# 'sources': [
# {
# '@id': '/and/[/s/activity/omcs/vote/,/s/contributor/omcs/bmsacr/]',
# '@type': 'Source',
# 'activity': '/s/activity/omcs/vote',
# 'contributor': '/s/contributor/omcs/bmsacr'
# },
# {
# '@id': '/and/[/s/activity/omcs/vote/,/s/contributor/omcs/test/]',
# '@type': 'Source',
# 'activity': '/s/activity/omcs/vote',
# 'contributor': '/s/contributor/omcs/test'}
# ],
# 'start': {'@id': '/c/en/hello',
# '@type': 'Node',
# 'label': 'Hello',
# 'language': 'en',
# 'term': '/c/en/hello'},
# 'surfaceText': '[[Hello]] is a kind of [[greeting]]',
# 'weight': 3.4641016151377544
# }, ...]
edges('สวัสดี', lang='th')
# output:
# [{
# '@id': '/a/[/r/RelatedTo/,/c/th/สวัสดี/n/,/c/en/prosperity/]',
# '@type': 'Edge',
# 'dataset': '/d/wiktionary/en',
# 'end': {'@id': '/c/en/prosperity',
# '@type': 'Node',
# 'label': 'prosperity',
# 'language': 'en',
# 'term': '/c/en/prosperity'},
# 'license': 'cc:by-sa/4.0',
# 'rel': {
# '@id': '/r/RelatedTo', '@type': 'Relation',
# 'label': 'RelatedTo'},
# 'sources': [{
# '@id': '/and/[/s/process/wikiparsec/2/,/s/resource/wiktionary/en/]',
# '@type': 'Source',
# 'contributor': '/s/resource/wiktionary/en',
# 'process': '/s/process/wikiparsec/2'}],
# 'start': {'@id': '/c/th/สวัสดี/n',
# '@type': 'Node',
# 'label': 'สวัสดี',
# 'language': 'th',
# 'sense_label': 'n',
# 'term': '/c/th/สวัสดี'},
# 'surfaceText': None,
# 'weight': 1.0
# }, ...]
"""
obj = requests.get(f"https://api.conceptnet.io/c/{lang}/{word}").json()
return obj["edges"]
| 4,489 | 35.504065 | 79 | py |
pythainlp-dev/pythainlp/corpus/core.py | pythainlp-dev/pythainlp/corpus/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Corpus related functions.
"""
import os
from typing import Union
import json
from pythainlp.corpus import corpus_db_path, corpus_db_url, corpus_path
from pythainlp.tools import get_full_data_path
from pythainlp import __version__
_CHECK_MODE = os.getenv("PYTHAINLP_READ_MODE")
def get_corpus_db(url: str):
"""
Get corpus catalog from server.
:param str url: URL corpus catalog
"""
import requests
corpus_db = None
try:
corpus_db = requests.get(url)
except requests.exceptions.HTTPError as http_err:
print(f"HTTP error occurred: {http_err}")
except Exception as err:
print(f"Non-HTTP error occurred: {err}")
return corpus_db
def get_corpus_db_detail(name: str, version: str = None) -> dict:
"""
Get details about a corpus, using information from local catalog.
:param str name: name corpus
:return: details about a corpus
:rtype: dict
"""
with open(corpus_db_path(), "r", encoding="utf-8-sig") as f:
local_db = json.load(f)
if version is None:
for corpus in local_db["_default"].values():
if corpus["name"] == name:
return corpus
else:
for corpus in local_db["_default"].values():
if corpus["name"] == name and corpus["version"] == version:
return corpus
return dict()
def path_pythainlp_corpus(filename: str) -> str:
"""
Get path pythainlp.corpus data
:param str filename: filename of the corpus to be read
:return: : path of corpus
:rtype: str
"""
return os.path.join(corpus_path(), filename)
def get_corpus(filename: str, as_is: bool = False) -> Union[frozenset, list]:
"""
Read corpus data from file and return a frozenset or a list.
Each line in the file will be a member of the set or the list.
By default, a frozenset will be return, with whitespaces stripped, and
empty values and duplicates removed.
If as_is is True, a list will be return, with no modifications
in member values and their orders.
:param str filename: filename of the corpus to be read
:return: :class:`frozenset` or :class:`list` consists of lines in the file
:rtype: :class:`frozenset` or :class:`list`
:Example:
::
from pythainlp.corpus import get_corpus
get_corpus('negations_th.txt')
# output:
# frozenset({'แต่', 'ไม่'})
get_corpus('ttc_freq.txt')
# output:
# frozenset({'โดยนัยนี้\\t1',
# 'ตัวบท\\t10',
# 'หยิบยื่น\\t3',
# ...})
"""
path = path_pythainlp_corpus(filename)
lines = []
with open(path, "r", encoding="utf-8-sig") as fh:
lines = fh.read().splitlines()
if as_is:
return lines
lines = [line.strip() for line in lines]
return frozenset(filter(None, lines))
def get_corpus_default_db(name: str, version: str = None) -> Union[str, None]:
"""
Get model path from default_db.json
:param str name: corpus name
:return: path to the corpus or **None** of the corpus doesn't \
exist in the device
:rtype: str
If you want edit default_db.json, \
you can edit in pythainlp/corpus/default_db.json
"""
default_db_path = path_pythainlp_corpus("default_db.json")
with open(default_db_path, encoding="utf-8-sig") as fh:
corpus_db = json.load(fh)
if name in list(corpus_db.keys()):
if version in list(corpus_db[name]["versions"].keys()):
return path_pythainlp_corpus(
corpus_db[name]["versions"][version]["filename"]
)
elif version is None: # load latest version
version = corpus_db[name]["latest_version"]
return path_pythainlp_corpus(
corpus_db[name]["versions"][version]["filename"]
)
def get_corpus_path(
name: str, version: str = None, force: bool = False
) -> Union[str, None]:
"""
Get corpus path.
:param str name: corpus name
:param str version: version
:param bool force: force download
:return: path to the corpus or **None** of the corpus doesn't \
exist in the device
:rtype: str
:Example:
(Please see the filename from
`this file
<https://pythainlp.github.io/pythainlp-corpus/db.json>`_
If the corpus already exists::
from pythainlp.corpus import get_corpus_path
print(get_corpus_path('ttc'))
# output: /root/pythainlp-data/ttc_freq.txt
If the corpus has not been downloaded yet::
from pythainlp.corpus import download, get_corpus_path
print(get_corpus_path('wiki_lm_lstm'))
# output: None
download('wiki_lm_lstm')
# output:
# Download: wiki_lm_lstm
# wiki_lm_lstm 0.32
# thwiki_lm.pth?dl=1: 1.05GB [00:25, 41.5MB/s]
# /root/pythainlp-data/thwiki_model_lstm.pth
print(get_corpus_path('wiki_lm_lstm'))
# output: /root/pythainlp-data/thwiki_model_lstm.pth
"""
# Customize your the corpus path then close the line after lines 164 through 190.
_CUSTOMIZE = {
# "the corpus name":"path"
}
if name in list(_CUSTOMIZE.keys()):
return _CUSTOMIZE[name]
default_path = get_corpus_default_db(name=name, version=version)
if default_path is not None:
return default_path
# check if the corpus is in local catalog, download if not
corpus_db_detail = get_corpus_db_detail(name, version=version)
if not corpus_db_detail or not corpus_db_detail.get("filename"):
download(name, version=version, force=force)
corpus_db_detail = get_corpus_db_detail(name, version=version)
if corpus_db_detail and corpus_db_detail.get("filename"):
# corpus is in the local catalog, get full path to the file
if corpus_db_detail.get("is_folder"):
path = get_full_data_path(corpus_db_detail.get("foldername"))
else:
path = get_full_data_path(corpus_db_detail.get("filename"))
# check if the corpus file actually exists, download if not
if not os.path.exists(path):
download(name, version=version, force=force)
if os.path.exists(path):
return path
return None
def _download(url: str, dst: str) -> int:
"""
Download helper.
@param: url to download file
@param: dst place to put the file
"""
_CHUNK_SIZE = 64 * 1024 # 64 KiB
import requests
from urllib.request import urlopen
file_size = int(urlopen(url).info().get("Content-Length", -1))
r = requests.get(url, stream=True)
with open(get_full_data_path(dst), "wb") as f:
pbar = None
try:
from tqdm.auto import tqdm
pbar = tqdm(total=int(r.headers["Content-Length"]))
except ImportError:
pbar = None
for chunk in r.iter_content(chunk_size=_CHUNK_SIZE):
if chunk:
f.write(chunk)
if pbar:
pbar.update(len(chunk))
if pbar:
pbar.close()
else:
print("Done.")
return file_size
def _check_hash(dst: str, md5: str) -> None:
"""
Check hash helper.
@param: dst place to put the file
@param: md5 place to hash the file (MD5)
"""
if md5 and md5 != "-":
import hashlib
with open(get_full_data_path(dst), "rb") as f:
content = f.read()
file_md5 = hashlib.md5(content).hexdigest()
if md5 != file_md5:
raise Exception("Hash does not match expected.")
def _version2int(v: str) -> int:
"""
X.X.X => X0X0X
"""
if "-" in v:
v = v.split("-")[0]
if v.endswith(".*"):
v = v.replace(".*", ".0") # X.X.* => X.X.0
v_list = v.split(".")
if len(v_list) < 3:
v_list.append("0")
v_new = ""
for i, value in enumerate(v_list):
if i != 0:
if len(value) < 2:
v_new += "0" + value
else:
v_new += value
else:
v_new += value
return int(v_new)
def _check_version(cause: str) -> bool:
temp = cause
check = False
__version = __version__
if "dev" in __version:
__version = __version.split("dev")[0]
elif "beta" in __version:
__version = __version.split("beta")[0]
v = _version2int(__version)
if cause == "*":
check = True
elif cause.startswith("==") and ">" not in cause and "<" not in cause:
temp = cause.replace("==", "")
check = v == _version2int(temp)
elif cause.startswith(">=") and "<" not in cause:
temp = cause.replace(">=", "")
check = v >= _version2int(temp)
elif cause.startswith(">") and "<" not in cause:
temp = cause.replace(">", "")
check = v > _version2int(temp)
elif cause.startswith(">=") and "<=" not in cause and "<" in cause:
temp = cause.replace(">=", "").split("<")
check = v >= _version2int(temp[0]) and v < _version2int(temp[1])
elif cause.startswith(">=") and "<=" in cause:
temp = cause.replace(">=", "").split("<=")
check = v >= _version2int(temp[0]) and v <= _version2int(temp[1])
elif cause.startswith(">") and "<" in cause:
temp = cause.replace(">", "").split("<")
check = v > _version2int(temp[0]) and v < _version2int(temp[1])
elif cause.startswith("<="):
temp = cause.replace("<=", "")
check = v <= _version2int(temp[0])
elif cause.startswith("<"):
temp = cause.replace("<", "")
check = v < _version2int(temp[0])
return check
def download(
name: str, force: bool = False, url: str = None, version: str = None
) -> bool:
"""
Download corpus.
The available corpus names can be seen in this file:
https://pythainlp.github.io/pythainlp-corpus/db.json
:param str name: corpus name
:param bool force: force download
:param str url: URL of the corpus catalog
:param str version: Version of the corpus
:return: **True** if the corpus is found and succesfully downloaded.
Otherwise, it returns **False**.
:rtype: bool
:Example:
::
from pythainlp.corpus import download
download('wiki_lm_lstm', force=True)
# output:
# Corpus: wiki_lm_lstm
# - Downloading: wiki_lm_lstm 0.1
# thwiki_lm.pth: 26%|██▌ | 114k/434k [00:00<00:00, 690kB/s]
By default, downloaded corpus and model will be saved in
``$HOME/pythainlp-data/``
(e.g. ``/Users/bact/pythainlp-data/wiki_lm_lstm.pth``).
"""
if _CHECK_MODE == "1":
print("PyThaiNLP is read-only mode. It can't download.")
return False
if not url:
url = corpus_db_url()
corpus_db = get_corpus_db(url)
if not corpus_db:
print(f"Cannot download corpus catalog from: {url}")
return False
corpus_db = corpus_db.json()
# check if corpus is available
if name in corpus_db:
with open(corpus_db_path(), "r", encoding="utf-8-sig") as f:
local_db = json.load(f)
corpus = corpus_db[name]
print("Corpus:", name)
if version is None:
for v, file in corpus["versions"].items():
if _check_version(file["pythainlp_version"]):
version = v
# version may still be None here
if version not in corpus["versions"]:
print("Not found corpus")
return False
elif (
_check_version(corpus["versions"][version]["pythainlp_version"])
is False
):
print("Versions Corpus not support")
return False
corpus_versions = corpus["versions"][version]
file_name = corpus_versions["filename"]
found = ""
for i, item in local_db["_default"].items():
# Do not check version here
if item["name"] == name:
# Record corpus no. if found in local database
found = i
break
# If not found in local, download
if force or not found:
print(f"- Downloading: {name} {version}")
_download(
corpus_versions["download_url"],
file_name,
)
_check_hash(
file_name,
corpus_versions["md5"],
)
is_folder = False
foldername = None
if corpus_versions["is_tar_gz"] == "True":
import tarfile
is_folder = True
foldername = name + "_" + str(version)
if not os.path.exists(get_full_data_path(foldername)):
os.mkdir(get_full_data_path(foldername))
with tarfile.open(get_full_data_path(file_name)) as tar:
tar.extractall(path=get_full_data_path(foldername))
elif corpus_versions["is_zip"] == "True":
import zipfile
is_folder = True
foldername = name + "_" + str(version)
if not os.path.exists(get_full_data_path(foldername)):
os.mkdir(get_full_data_path(foldername))
with zipfile.ZipFile(
get_full_data_path(file_name), "r"
) as zip:
zip.extractall(path=get_full_data_path(foldername))
if found:
local_db["_default"][found]["version"] = version
local_db["_default"][found]["filename"] = file_name
local_db["_default"][found]["is_folder"] = is_folder
local_db["_default"][found]["foldername"] = foldername
else:
# This awkward behavior is for backward-compatibility with
# database files generated previously using TinyDB
if local_db["_default"]:
corpus_no = (
max((int(no) for no in local_db["_default"])) + 1
)
else:
corpus_no = 1
local_db["_default"][str(corpus_no)] = {
"name": name,
"version": version,
"filename": file_name,
"is_folder": is_folder,
"foldername": foldername,
}
with open(corpus_db_path(), "w", encoding="utf-8") as f:
json.dump(local_db, f, ensure_ascii=False)
# Check if versions match if the corpus is found in local database
# but a re-download is not forced
else:
current_ver = local_db["_default"][found]["version"]
if current_ver == version:
# Already has the same version
print("- Already up to date.")
else:
# Has the corpus but different version
print(f"- Existing version: {current_ver}")
print(f"- New version available: {version}")
print("- Use download(data_name, force=True) to update")
return True
print("Corpus not found:", name)
return False
def remove(name: str) -> bool:
"""
Remove corpus
:param str name: corpus name
:return: **True** if the corpus is found and succesfully removed.
Otherwise, it returns **False**.
:rtype: bool
:Example:
::
from pythainlp.corpus import remove, get_corpus_path, get_corpus
print(remove('ttc'))
# output: True
print(get_corpus_path('ttc'))
# output: None
get_corpus('ttc')
# output:
# FileNotFoundError: [Errno 2] No such file or directory:
# '/usr/local/lib/python3.6/dist-packages/pythainlp/corpus/ttc'
"""
if _CHECK_MODE == "1":
print("PyThaiNLP is read-only mode. It can't remove corpus.")
return False
with open(corpus_db_path(), "r", encoding="utf-8-sig") as f:
db = json.load(f)
data = [
corpus for corpus in db["_default"].values() if corpus["name"] == name
]
if data:
path = get_corpus_path(name)
if data[0].get("is_folder"):
import shutil
os.remove(get_full_data_path(data[0].get("filename")))
shutil.rmtree(path, ignore_errors=True)
else:
os.remove(path)
for i, corpus in db["_default"].copy().items():
if corpus["name"] == name:
del db["_default"][i]
with open(corpus_db_path(), "w", encoding="utf-8") as f:
json.dump(db, f, ensure_ascii=False)
return True
return False
def get_path_folder_corpus(name, version, *path):
return os.path.join(get_corpus_path(name, version), *path)
| 17,493 | 30.128114 | 85 | py |
pythainlp-dev/pythainlp/corpus/corpus_license.md | pythainlp-dev/pythainlp/corpus/corpus_license.md | # Corpus License
- Corpora, datasets, and documentation created by PyThaiNLP project are released under [Creative Commons Zero 1.0 Universal Public Domain Dedication License](https://creativecommons.org/publicdomain/zero/1.0/) (CC0).
- Language models created by PyThaiNLP project are released under [Creative Commons Attribution 4.0 International Public License](https://creativecommons.org/licenses/by/4.0/) (CC-by).
- For more information about corpora that PyThaiNLP use, see [https://github.com/PyThaiNLP/pythainlp-corpus/](https://github.com/PyThaiNLP/pythainlp-corpus/).
## Dictionaries and Word Lists
The following word lists are created by PyThaiNLP project and released under
**Creative Commons Zero 1.0 Universal Public Domain Dedication License**
https://creativecommons.org/publicdomain/zero/1.0/
Filename | Description
---------|------------
countries_th.txt | List of countries in Thai
etcc.txt List of | Enhanced Thai Character Clusters
negations_th.txt | Negation word list
stopwords_th.txt | Stop word list
syllables_th.txt | List of Thai syllables
thailand_provinces_th.csv | List of Thailand provinces in Thai
tnc_freq.txt | Words and their frequencies, from Thai National Corpus
ttc_freq.txt | Words and their frequencies, from Thai Textbook Corpus
words_th.txt | List of Thai words
words_th_thai2fit_201810.txt | List of Thai words (frozen for thai2fit)
The following word lists are from **Thai Male and Female Names Corpus**
https://github.com/korkeatw/thai-names-corpus/ by Korkeat Wannapat
and released under their the original license which is
**Creative Commons Attribution-ShareAlike 4.0 International Public License**
https://creativecommons.org/licenses/by-sa/4.0/
Filename | Description
---------|------------
family_names_th.txt | List of family names in Thailand
person_names_female_th.txt | List of female names in Thailand
person_names_male_th.txt | List of male names in Thailand
## Models
The following language models are created by PyThaiNLP project
and released under
**Creative Commons Attribution 4.0 International Public License**
https://creativecommons.org/licenses/by/4.0/
Filename | Description
---------|------------
pos_orchid_perceptron.pkl | Part-of-speech tagging model, trained from ORCHID data, using perceptron
pos_orchid_unigram.json | Part-of-speech tagging model, trained from ORCHID data, using unigram
pos_ud_perceptron.pkl | Part-of-speech tagging model, trained from Parallel Universal Dependencies treebank, using perceptron
pos_ud_unigram.json | Part-of-speech tagging model, trained from Parallel Universal Dependencies treebank, using unigram
sentenceseg_crfcut.model | Sentence segmentation model, trained from TED subtitles, using CRF
## Thai WordNet
Thai WordNet (wordnet_th.db) is created by Thai Computational Linguistic
Laboratory at National Institute of Information and Communications
Technology (NICT), Japan, and released under the following license:
```
Copyright: 2011 NICT
Thai WordNet
This software and database is being provided to you, the LICENSEE, by
the National Institute of Information and Communications Technology
under the following license. By obtaining, using and/or copying this
software and database, you agree that you have read, understood, and
will comply with these terms and conditions:
Permission to use, copy, modify and distribute this software and
database and its documentation for any purpose and without fee or
royalty is hereby granted, provided that you agree to comply with
the following copyright notice and statements, including the
disclaimer, and that the same appear on ALL copies of the software,
database and documentation, including modifications that you make
for internal use or for distribution.
Thai WordNet Copyright 2011 by the National Institute of
Information and Communications Technology (NICT). All rights
reserved.
THIS SOFTWARE AND DATABASE IS PROVIDED "AS IS" AND NICT MAKES NO
REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE,
BUT NOT LIMITATION, NICT MAKES NO REPRESENTATIONS OR WARRANTIES OF
MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE
OF THE LICENSED SOFTWARE, DATABASE OR DOCUMENTATION WILL NOT INFRINGE
ANY THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS.
The name of the National Institute of Information and Communications
Technology may not be used in advertising or publicity pertaining to
distribution of the software and/or database. Title to copyright in
this software, database and any associated documentation shall at all
times remain with National Institute of Information and Communications
Technology and LICENSEE agrees to preserve same.
```
For more information about Thai WordNet, see
S. Thoongsup et al., ‘Thai WordNet construction’,
in Proceedings of the 7th Workshop on Asian Language Resources,
Suntec, Singapore, Aug. 2009, pp. 139–144.
https://www.aclweb.org/anthology/W09-3420.pdf
| 4,939 | 46.5 | 216 | md |
pythainlp-dev/pythainlp/corpus/oscar.py | pythainlp-dev/pythainlp/corpus/oscar.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai unigram word frequency from OSCAR Corpus (icu word tokenize)
Credit: Korakot Chaovavanich
https://web.facebook.com/groups/colab.thailand/permalink/1524070061101680/
"""
__all__ = ["word_freqs", "unigram_word_freqs"]
from collections import defaultdict
from typing import List, Tuple
from pythainlp.corpus import get_corpus_path
_FILENAME = "oscar_icu"
def word_freqs() -> List[Tuple[str, int]]:
"""
Get word frequency from OSCAR Corpus (icu word tokenize)
"""
word_freqs = []
_path = get_corpus_path(_FILENAME)
with open(_path, "r", encoding="utf-8-sig") as f:
_data = [i for i in f.readlines()]
del _data[0]
for line in _data:
_temp = line.strip().split(",")
if len(_temp) >= 2:
if _temp[0] != " " and '"' not in _temp[0]:
word_freqs.append((_temp[0], int(_temp[1])))
elif _temp[0] == " ":
word_freqs.append(("<s/>", int(_temp[1])))
return word_freqs
def unigram_word_freqs() -> defaultdict:
"""
Get unigram word frequency from OSCAR Corpus (icu word tokenize)
"""
_path = get_corpus_path(_FILENAME)
_word_freqs = defaultdict(int)
with open(_path, "r", encoding="utf-8-sig") as fh:
_data = [i for i in fh.readlines()]
del _data[0]
for i in _data:
_temp = i.strip().split(",")
if _temp[0] != " " and '"' not in _temp[0]:
_word_freqs[_temp[0]] = int(_temp[-1])
elif _temp[0] == " ":
_word_freqs["<s/>"] = int(_temp[-1])
return _word_freqs
| 2,240 | 31.478261 | 74 | py |
pythainlp-dev/pythainlp/corpus/th_en_translit.py | pythainlp-dev/pythainlp/corpus/th_en_translit.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai-English Transliteratation Dictionary v1.4
Wannaphong Phatthiyaphaibun. (2022).
wannaphong/thai-english-transliteration-dictionary: v1.4 (v1.4).
Zenodo. https://doi.org/10.5281/zenodo.6716672
"""
__all__ = [
"get_transliteration_dict",
"TRANSLITERATE_EN",
"TRANSLITERATE_FOLLOW_RTSG",
]
from collections import defaultdict
from pythainlp.corpus import path_pythainlp_corpus
_FILE_NAME = "th_en_transliteration_v1.4.tsv"
TRANSLITERATE_EN = "en"
TRANSLITERATE_FOLLOW_RTSG = "follow_rtsg"
def get_transliteration_dict() -> defaultdict:
"""
Get transliteration dictionary for Thai to English.
The returned dict is defaultdict[str, defaultdict[List[str], List[Optional[bool]]]] format.
"""
path = path_pythainlp_corpus(_FILE_NAME)
if not path:
raise FileNotFoundError(
f"Unable to load transliteration dictionary. "
f"{_FILE_NAME} is not found under pythainlp/corpus."
)
# use list, one word can have multiple transliterations.
trans_dict = defaultdict(
lambda: {TRANSLITERATE_EN: [], TRANSLITERATE_FOLLOW_RTSG: []}
)
try:
with open(path, "r", encoding="utf-8") as f:
# assume first row contains column names, skipped.
for line in f.readlines()[1:]:
stripped = line.strip()
if stripped:
th, *en_checked = stripped.split("\t")
# replace in-between whitespaces to prevent mismatch results from different tokenizers.
# e.g. "บอยแบนด์"
# route 1: "บอยแบนด์" -> ["บอย", "แบนด์"] -> ["boy", "band"] -> "boyband"
# route 2: "บอยแบนด์" -> [""บอยแบนด์""] -> ["boy band"] -> "boy band"
en_translit = en_checked[0].replace(" ", "")
trans_dict[th][TRANSLITERATE_EN].append(en_translit)
en_follow_rtgs = (
bool(en_checked[1]) if len(en_checked) == 2 else None
)
trans_dict[th][TRANSLITERATE_FOLLOW_RTSG].append(
en_follow_rtgs
)
except ValueError:
raise ValueError(
f"Unable to parse {_FILE_NAME}."
f"Make sure it is a 3-column tab-separated file with header."
)
else:
return trans_dict
TRANSLITERATE_DICT = get_transliteration_dict()
| 3,039 | 34.764706 | 107 | py |
pythainlp-dev/pythainlp/corpus/tnc.py | pythainlp-dev/pythainlp/corpus/tnc.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai National Corpus word frequency
"""
__all__ = [
"word_freqs",
"unigram_word_freqs",
"bigram_word_freqs",
"trigram_word_freqs",
]
from collections import defaultdict
from typing import List, Tuple
from pythainlp.corpus import get_corpus
from pythainlp.corpus import get_corpus_path
_FILENAME = "tnc_freq.txt"
_BIGRAM = "tnc_bigram_word_freqs"
_TRIGRAM = "tnc_trigram_word_freqs"
def word_freqs() -> List[Tuple[str, int]]:
"""
Get word frequency from Thai National Corpus (TNC)
\n(See: `dev/pythainlp/corpus/tnc_freq.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/tnc_freq.txt>`_)
Credit: Korakot Chaovavanich https://bit.ly/3wSkZsF
"""
lines = list(get_corpus(_FILENAME))
word_freqs = []
for line in lines:
word_freq = line.split("\t")
if len(word_freq) >= 2:
word_freqs.append((word_freq[0], int(word_freq[1])))
return word_freqs
def unigram_word_freqs() -> defaultdict:
"""
Get unigram word frequency from Thai National Corpus (TNC)
"""
lines = list(get_corpus(_FILENAME))
_word_freqs = defaultdict(int)
for i in lines:
_temp = i.strip().split(" ")
if len(_temp) >= 2:
_word_freqs[_temp[0]] = int(_temp[-1])
return _word_freqs
def bigram_word_freqs() -> defaultdict:
"""
Get bigram word frequency from Thai National Corpus (TNC)
"""
_path = get_corpus_path(_BIGRAM)
_word_freqs = defaultdict(int)
with open(_path, "r", encoding="utf-8-sig") as fh:
for i in fh.readlines():
_temp = i.strip().split(" ")
_word_freqs[(_temp[0], _temp[1])] = int(_temp[-1])
return _word_freqs
def trigram_word_freqs() -> defaultdict:
"""
Get trigram word frequency from Thai National Corpus (TNC)
"""
_path = get_corpus_path(_TRIGRAM)
_word_freqs = defaultdict(int)
with open(_path, "r", encoding="utf-8-sig") as fh:
for i in fh.readlines():
_temp = i.strip().split(" ")
_word_freqs[(_temp[0], _temp[1], _temp[2])] = int(_temp[-1])
return _word_freqs
| 2,755 | 27.708333 | 86 | py |
pythainlp-dev/pythainlp/corpus/ttc.py | pythainlp-dev/pythainlp/corpus/ttc.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai Textbook Corpus (TTC) word frequency
Credit: Korakot Chaovavanich
https://www.facebook.com/photo.php?fbid=363640477387469&set=gm.434330506948445&type=3&permPage=1
"""
__all__ = ["word_freqs", "unigram_word_freqs"]
from collections import defaultdict
from typing import List, Tuple
from pythainlp.corpus import get_corpus
_FILENAME = "ttc_freq.txt"
def word_freqs() -> List[Tuple[str, int]]:
"""
Get word frequency from Thai Textbook Corpus (TTC)
\n(See: `dev/pythainlp/corpus/ttc_freq.txt\
<https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/ttc_freq.txt>`_)
"""
lines = list(get_corpus(_FILENAME))
word_freqs = []
for line in lines:
word_freq = line.split("\t")
if len(word_freq) >= 2:
word_freqs.append((word_freq[0], int(word_freq[1])))
return word_freqs
def unigram_word_freqs() -> defaultdict:
"""
Get unigram word frequency from Thai Textbook Corpus (TTC)
"""
lines = list(get_corpus(_FILENAME))
_word_freqs = defaultdict(int)
for i in lines:
_temp = i.strip().split(" ")
if len(_temp) >= 2:
_word_freqs[_temp[0]] = int(_temp[-1])
return _word_freqs
| 1,824 | 29.416667 | 96 | py |
pythainlp-dev/pythainlp/corpus/util.py | pythainlp-dev/pythainlp/corpus/util.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tool for create word list
code is from Korakot Chaovavanich.
:See also:
* `Facebook post \
<https://www.facebook.com/groups/colab.thailand/permalink/1667821073393244>`_
* `Google Colab \
<https://colab.research.google.com/drive/19kY2jCHONuxmTJM0U8PIE_I5OK1rO-x_>`_
"""
from collections import Counter
from typing import Callable, Iterable, Iterator, List, Set, Tuple
from pythainlp.corpus import thai_words
from pythainlp.tokenize import newmm
from pythainlp.util import Trie
def index_pairs(words: List[str]) -> Iterator[Tuple[int, int]]:
"""
Return begining and ending index pairs of words
"""
i = 0
for w in words:
yield i, i + len(w)
i += len(w)
def find_badwords(
tokenize: Callable[[str], List[str]],
training_data: Iterable[Iterable[str]],
) -> Set[str]:
"""
Find words that do not work well with the `tokenize` function
for the provided `training_data`.
:param Callable[[str], List[str]] tokenize: a tokenize function
:param Iterable[Iterable[str]] training_data: tokenized text, to be used\
as a training set
:return: words that considered making `tokenize` perform unwell
:rtype: Set[str]
"""
right = Counter()
wrong = Counter()
for train_words in training_data:
train_set = set(index_pairs(train_words))
test_words = tokenize("".join(train_words))
test_pairs = index_pairs(test_words)
for w, p in zip(test_words, test_pairs):
if p in train_set:
right[w] += 1
else:
wrong[w] += 1
# if wrong more than right, then it's a bad word
bad_words = []
for w, count in wrong.items():
if count > right[w]:
bad_words.append(w)
return set(bad_words)
def revise_wordset(
tokenize: Callable[[str], List[str]],
orig_words: Iterable[str],
training_data: Iterable[Iterable[str]],
) -> Set[str]:
"""
Revise a set of word that could improve tokenization performance of
a dictionary-based `tokenize` function.
`orign_words` will be used as a base set for the dictionary.
Words that do not performed well with `training_data` will be removed.
The remaining words will be returned.
:param Callable[[str], List[str]] tokenize: a tokenize function, can be\
any function that takes a string as input and returns a List[str]
:param Iterable[str] orig_words: words that used by the tokenize function,\
will be used as a base for revision
:param Iterable[Iterable[str]] training_data: tokenized text, to be used\
as a training set
:return: words that considered making `tokenize` perform unwell
:rtype: Set[str]
:Example::
::
from pythainlp.corpus import thai_words
from pythainlp.corpus.util import revise_wordset
from pythainlp.tokenize.longest import segment
base_words = thai_words()
more_words = {
"ถวิล อุดล", "ทองอินทร์ ภูริพัฒน์", "เตียง ศิริขันธ์", "จำลอง ดาวเรือง"
}
base_words = base_words.union(more_words)
dict_trie = Trie(wordlist)
tokenize = lambda text: segment(text, dict_trie)
training_data = [
[str, str, str. ...],
[str, str, str, str, ...],
...
]
revised_words = revise_wordset(tokenize, wordlist, training_data)
"""
bad_words = find_badwords(tokenize, training_data)
return set(orig_words) - bad_words
def revise_newmm_default_wordset(
training_data: Iterable[Iterable[str]],
) -> Set[str]:
"""
Revise a set of word that could improve tokenization performance of
`pythainlp.tokenize.newmm`, a dictionary-based tokenizer and a default
tokenizer for PyThaiNLP.
Words from `pythainlp.corpus.thai_words()` will be used as a base set
for the dictionary. Words that do not performed well with `training_data`
will be removed. The remaining words will be returned.
:param Iterable[Iterable[str]] training_data: tokenized text, to be used\
as a training set
:return: words that considered making `tokenize` perform unwell
:rtype: Set[str]
"""
orig_words = thai_words()
trie = Trie(orig_words)
def tokenize(text):
return newmm.segment(text, custom_dict=trie)
revised_words = revise_wordset(tokenize, orig_words, training_data)
return revised_words
| 4,996 | 31.23871 | 85 | py |
pythainlp-dev/pythainlp/corpus/wordnet.py | pythainlp-dev/pythainlp/corpus/wordnet.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NLTK WordNet wrapper
API here is exactly the same as NLTK WordNet API,
except that lang (language) argument will be "tha" (Thai) by default.
For more on usage, see NLTK Howto:
https://www.nltk.org/howto/wordnet.html
"""
import nltk
try:
nltk.data.find("corpora/omw")
except LookupError:
nltk.download("omw")
try:
nltk.data.find("corpora/wordnet")
except LookupError:
nltk.download("wordnet")
from nltk.corpus import wordnet
def synsets(word: str, pos: str = None, lang: str = "tha"):
"""
This function return the synonym sets for all lemmas given the word
with an optional argument to constrain the part of speech of the word.
:param str word: word to find its synsets
:param str pos: the part of speech constraint (i.e. *n* for Noun, *v*
for Verb, *a* for Adjective, *s* for Adjective
satellites, and *r* for Adverb)
:param str lang: abbreviation of language (i.e. *eng*, *tha*).
By default, it is *tha*
:return: :class:`Synset` for all lemmas for the word constrained with
the argument *pos*.
:rtype: list[:class:`Synset`]
:Example:
>>> from pythainlp.corpus.wordnet import synsets
>>>
>>> synsets("ทำงาน")
[Synset('function.v.01'), Synset('work.v.02'),
Synset('work.v.01'), Synset('work.v.08')]
>>>
>>> synsets("บ้าน", lang="tha"))
[Synset('duplex_house.n.01'), Synset('dwelling.n.01'),
Synset('house.n.01'), Synset('family.n.01'), Synset('home.n.03'),
Synset('base.n.14'), Synset('home.n.01'),
Synset('houseful.n.01'), Synset('home.n.07')]
When specifying the part of speech constrain. For example,
the word "แรง" cound be interpreted as force (n.) or hard (adj.).
>>> from pythainlp.corpus.wordnet import synsets
>>> # By default, accept all part of speech
>>> synsets("แรง", lang="tha")
>>>
>>> # only Noun
>>> synsets("แรง", pos="n", lang="tha")
[Synset('force.n.03'), Synset('force.n.02')]
>>>
>>> # only Adjective
>>> synsets("แรง", pos="a", lang="tha")
[Synset('hard.s.10'), Synset('strong.s.02')]
"""
return wordnet.synsets(lemma=word, pos=pos, lang=lang)
def synset(name_synsets):
"""
This function return the synonym set (synset) given the name of synset
(i.e. 'dog.n.01', 'chase.v.01').
:param str name_synsets: name of the sysset
:return: :class:`Synset` of the given name
:rtype: :class:`Synset`
:Example:
>>> from pythainlp.corpus.wordnet import synset
>>>
>>> difficult = synset('difficult.a.01')
>>> difficult
Synset('difficult.a.01')
>>>
>>> difficult.definition()
'not easy; requiring great physical or mental effort to accomplish
or comprehend or endure'
"""
return wordnet.synset(name_synsets)
def all_lemma_names(pos: str = None, lang: str = "tha"):
"""
This function returns all lemma names for all synsets for the given
part of speech tag and language. If part of speech tag is not
specified, all synsets for all part of speech will be used.
:param str pos: the part of speech constraint (i.e. *n* for Noun,
*v* for Verb, *a* for Adjective, *s* for
Adjective satellites, and *r* for Adverb).
By default, *pos* is **None**.
:param str lang: abbreviation of language (i.e. *eng*, *tha*).
By default, it is *tha*.
:return: :class:`Synset` of lemmas names given the pos and language
:rtype: list[:class:`Synset`]
:Example:
>>> from pythainlp.corpus.wordnet import all_lemma_names
>>>
>>> all_lemma_names()
['อเมริโก_เวสปุชชี',
'เมืองชีย์เอนเน',
'การรับเลี้ยงบุตรบุญธรรม',
'ผู้กัด',
'ตกแต่งเรือด้วยธง',
'จิโอวานนิ_เวอร์จินิโอ',...]
>>>
>>> len(all_lemma_names())
80508
>>>
>>> all_lemma_names(pos="a")
['ซึ่งไม่มีแอลกอฮอล์',
'ซึ่งตรงไปตรงมา',
'ที่เส้นศูนย์สูตร',
'ทางจิตใจ',...]
>>>
>>> len(all_lemma_names(pos="a"))
5277
"""
return wordnet.all_lemma_names(pos=pos, lang=lang)
def all_synsets(pos: str = None):
"""
This function iterates over all synsets constrained by given
part of speech tag.
:param str pos: part of speech tag
:return: list of synsets constrained by given part of speech tag.
:rtype: Iterable[:class:`Synset`]
:Example:
>>> from pythainlp.corpus.wordnet import all_synsets
>>>
>>> generator = all_synsets(pos="n")
>>> next(generator)
Synset('entity.n.01')
>>> next(generator)
Synset('physical_entity.n.01')
>>> next(generator)
Synset('abstraction.n.06')
>>>
>>> generator = all_synsets()
>>> next(generator)
Synset('able.a.01')
>>> next(generator)
Synset('unable.a.01')
"""
return wordnet.all_synsets(pos=pos)
def langs():
"""
This function return a set of ISO-639 language codes.
:return: ISO-639 language codes
:rtype: list[str]
:Example:
>>> from pythainlp.corpus.wordnet import langs
>>> langs()
['eng', 'als', 'arb', 'bul', 'cat', 'cmn', 'dan',
'ell', 'eus', 'fas', 'fin', 'fra', 'glg', 'heb',
'hrv', 'ind', 'ita', 'jpn', 'nld', 'nno', 'nob',
'pol', 'por', 'qcn', 'slv', 'spa', 'swe', 'tha',
'zsm']
"""
return wordnet.langs()
def lemmas(word: str, pos: str = None, lang: str = "tha"):
"""
This function returns all lemmas given the word with an optional
argument to constrain the part of speech of the word.
:param str word: word to find its lammas
:param str pos: the part of speech constraint (i.e. *n* for Noun,
*v* for Verb, *a* for Adjective, *s* for
Adjective satellites, and *r* for Adverb)
:param str lang: abbreviation of language (i.e. *eng*, *tha*).
By default, it is *tha*.
:return: :class:`Synset` for all lemmas for the word constraine
with the argument *pos*.
:rtype: list[:class:`Lemma`]
:Example:
>>> from pythainlp.corpus.wordnet import lemmas
>>>
>>> lemmas("โปรด")
[Lemma('like.v.03.โปรด'), Lemma('like.v.02.โปรด')]
>>> print(lemmas("พระเจ้า"))
[Lemma('god.n.01.พระเจ้า'), Lemma('godhead.n.01.พระเจ้า'),
Lemma('father.n.06.พระเจ้า'), Lemma('god.n.03.พระเจ้า')]
When specify the part of speech tag.
>>> from pythainlp.corpus.wordnet import lemmas
>>>
>>> lemmas("ม้วน")
[Lemma('roll.v.18.ม้วน'), Lemma('roll.v.17.ม้วน'),
Lemma('roll.v.08.ม้วน'), Lemma('curl.v.01.ม้วน'),
Lemma('roll_up.v.01.ม้วน'), Lemma('wind.v.03.ม้วน'),
Lemma('roll.n.11.ม้วน')]
>>>
>>> # only lammas with Noun as the part of speech
>>> lemmas("ม้วน", pos="n")
[Lemma('roll.n.11.ม้วน')]
"""
return wordnet.lemmas(word, pos=pos, lang=lang)
def lemma(name_synsets):
"""
This function return lemma object given the name.
.. note::
Support only English language (*eng*).
:param str name_synsets: name of the synset
:return: lemma object with the given name
:rtype: :class:`Lemma`
:Example:
>>> from pythainlp.corpus.wordnet import lemma
>>>
>>> lemma('practice.v.01.exercise')
Lemma('practice.v.01.exercise')
>>>
>>> lemma('drill.v.03.exercise')
Lemma('drill.v.03.exercise')
>>>
>>> lemma('exercise.n.01.exercise')
Lemma('exercise.n.01.exercise')
"""
return wordnet.lemma(name_synsets)
def lemma_from_key(key):
"""
This function returns lemma object given the lemma key.
This is similar to :func:`lemma` but it needs to supply the key
of lemma instead of the name.
.. note::
Support only English language (*eng*).
:param str key: key of the lemma object
:return: lemma object with the given key
:rtype: :class:`Lemma`
:Example:
>>> from pythainlp.corpus.wordnet import lemma, lemma_from_key
>>>
>>> practice = lemma('practice.v.01.exercise')
>>> practice.key()
exercise%2:41:00::
>>> lemma_from_key(practice.key())
Lemma('practice.v.01.exercise')
"""
return wordnet.lemma_from_key(key)
def path_similarity(synsets1, synsets2):
"""
This function returns similarity between two synsets based on the
shortest path distance from the equation as follows.
.. math::
path\\_similarity = {1 \\over shortest\\_path\\_distance(synsets1,
synsets2) + 1}
The shortest path distance is calculated by the connection through
the is-a (hypernym/hyponym) taxonomy. The score is in the ranage
0 to 1. Path similarity of 1 indicates identicality.
:param `Synset` synsets1: first synset supplied to measures
the path similarity
:param `Synset` synsets2: second synset supplied to measures
the path similarity
:return: path similarity between two synsets
:rtype: float
:Example:
>>> from pythainlp.corpus.wordnet import path_similarity, synset
>>>
>>> entity = synset('entity.n.01')
>>> obj = synset('object.n.01')
>>> cat = synset('cat.n.01')
>>>
>>> path_similarity(entity, obj)
0.3333333333333333
>>> path_similarity(entity, cat)
0.07142857142857142
>>> path_similarity(obj, cat)
0.08333333333333333
"""
return wordnet.path_similarity(synsets1, synsets2)
def lch_similarity(synsets1, synsets2):
"""
This function returns Leacock Chodorow similarity (LCH)
between two synsets, based on the shortest path distance
and the maximum depth of the taxonomy. The equation to
calculate LCH similarity is shown below:
.. math::
lch\\_similarity = {-log(shortest\\_path\\_distance(synsets1,
synsets2) \\over 2 * taxonomy\\_depth}
:param `Synset` synsets1: first synset supplied to measures
the LCH similarity
:param `Synset` synsets2: second synset supplied to measures
the LCH similarity
:return: LCH similarity between two synsets
:rtype: float
:Example:
>>> from pythainlp.corpus.wordnet import lch_similarity, synset
>>>
>>> entity = synset('entity.n.01')
>>> obj = synset('object.n.01')
>>> cat = synset('cat.n.01')
>>>
>>> lch_similarity(entity, obj)
2.538973871058276
>>> lch_similarity(entity, cat)
0.9985288301111273
>>> lch_similarity(obj, cat)
1.1526795099383855
"""
return wordnet.lch_similarity(synsets1, synsets2)
def wup_similarity(synsets1, synsets2):
"""
This function returns Wu-Palmer similarity (WUP) between two synsets,
based on the depth of the two senses in the taxonomy and their
Least Common Subsumer (most specific ancestor node).
:param `Synset` synsets1: first synset supplied to measures
the WUP similarity
:param `Synset` synsets2: second synset supplied to measures
the WUP similarity
:return: WUP similarity between two synsets
:rtype: float
:Example:
>>> from pythainlp.corpus.wordnet import wup_similarity, synset
>>>
>>> entity = synset('entity.n.01')
>>> obj = synset('object.n.01')
>>> cat = synset('cat.n.01')
>>>
>>> wup_similarity(entity, obj)
0.5
>>> wup_similarity(entity, cat)
0.13333333333333333
>>> wup_similarity(obj, cat)
0.35294117647058826
"""
return wordnet.wup_similarity(synsets1, synsets2)
def morphy(form, pos: str = None):
"""
This function finds a possible base form for the given form,
with the given part of speech.
:param str form: the form to finds the base form
:param str pos: part of speech tag of words to be searched
:return: base form of the given form
:rtype: str
:Example:
>>> from pythainlp.corpus.wordnet import morphy
>>>
>>> morphy("dogs")
'dogs'
>>>
>>> morphy("thieves")
'thief'
>>>
>>> morphy("mixed")
'mix'
>>>
>>> morphy("calculated")
'calculate'
"""
return wordnet.morphy(form, pos=None)
def custom_lemmas(tab_file, lang: str):
"""
This function reads a custom tab file
(see: http://compling.hss.ntu.edu.sg/omw/)
containing mappings of lemmas in the given language.
:param tab_file: Tab file as a file or file-like object
:param str lang: abbreviation of language (i.e. *eng*, *tha*).
"""
return wordnet.custom_lemmas(tab_file, lang)
| 13,894 | 29.605727 | 74 | py |
pythainlp-dev/pythainlp/generate/__init__.py | pythainlp-dev/pythainlp/generate/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai Text generate
"""
__all__ = ["Unigram", "Bigram", "Trigram"]
from pythainlp.generate.core import Unigram, Bigram, Trigram
| 747 | 33 | 74 | py |
pythainlp-dev/pythainlp/generate/core.py | pythainlp-dev/pythainlp/generate/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text generator using n-gram language model
code from
https://towardsdatascience.com/understanding-word-n-grams-and-n-gram-probability-in-natural-language-processing-9d9eef0fa058
"""
import random
from pythainlp.corpus.tnc import unigram_word_freqs as tnc_word_freqs_unigram
from pythainlp.corpus.tnc import bigram_word_freqs as tnc_word_freqs_bigram
from pythainlp.corpus.tnc import trigram_word_freqs as tnc_word_freqs_trigram
from pythainlp.corpus.ttc import unigram_word_freqs as ttc_word_freqs_unigram
from pythainlp.corpus.oscar import (
unigram_word_freqs as oscar_word_freqs_unigram,
)
from typing import List, Union
class Unigram:
"""
Text generator using Unigram
:param str name: corpus name
* *tnc* - Thai National Corpus (default)
* *ttc* - Thai Textbook Corpus (TTC)
* *oscar* - OSCAR Corpus
"""
def __init__(self, name: str = "tnc"):
if name == "tnc":
self.counts = tnc_word_freqs_unigram()
elif name == "ttc":
self.counts = ttc_word_freqs_unigram()
elif name == "oscar":
self.counts = oscar_word_freqs_unigram()
self.word = list(self.counts.keys())
self.n = 0
for i in self.word:
self.n += self.counts[i]
self.prob = {i: self.counts[i] / self.n for i in self.word}
self._word_prob = {}
def gen_sentence(
self,
start_seq: str = None,
N: int = 3,
prob: float = 0.001,
output_str: bool = True,
duplicate: bool = False,
) -> Union[List[str], str]:
"""
:param str start_seq: word for begin word.
:param int N: number of word.
:param bool output_str: output is str
:param bool duplicate: duplicate word in sent
:return: list words or str words
:rtype: List[str], str
:Example:
::
from pythainlp.generate import Unigram
gen = Unigram()
gen.gen_sentence("แมว")
# ouput: 'แมวเวลานะนั้น'
"""
if start_seq is None:
start_seq = random.choice(self.word)
rand_text = start_seq.lower()
self._word_prob = {
i: self.counts[i] / self.n
for i in self.word
if self.counts[i] / self.n >= prob
}
return self._next_word(
rand_text, N, output_str, prob=prob, duplicate=duplicate
)
def _next_word(
self,
text: str,
N: int,
output_str: str,
prob: float,
duplicate: bool = False,
):
self.words = []
self.words.append(text)
self._word_list = list(self._word_prob.keys())
if N > len(self._word_list):
N = len(self._word_list)
for i in range(N):
self._word = random.choice(self._word_list)
if duplicate is False:
while self._word in self.words:
self._word = random.choice(self._word_list)
self.words.append(self._word)
if output_str:
return "".join(self.words)
return self.words
class Bigram:
"""
Text generator using Bigram
:param str name: corpus name
* *tnc* - Thai National Corpus (default)
"""
def __init__(self, name: str = "tnc"):
if name == "tnc":
self.uni = tnc_word_freqs_unigram()
self.bi = tnc_word_freqs_bigram()
self.uni_keys = list(self.uni.keys())
self.bi_keys = list(self.bi.keys())
self.words = [i[-1] for i in self.bi_keys]
def prob(self, t1: str, t2: str) -> float:
"""
probability word
:param int t1: text 1
:param int t2: text 2
:return: probability value
:rtype: float
"""
try:
v = self.bi[(t1, t2)] / self.uni[t1]
except ZeroDivisionError:
v = 0.0
return v
def gen_sentence(
self,
start_seq: str = None,
N: int = 4,
prob: float = 0.001,
output_str: bool = True,
duplicate: bool = False,
) -> Union[List[str], str]:
"""
:param str start_seq: word for begin word.
:param int N: number of word.
:param bool output_str: output is str
:param bool duplicate: duplicate word in sent
:return: list words or str words
:rtype: List[str], str
:Example:
::
from pythainlp.generate import Bigram
gen = Bigram()
gen.gen_sentence("แมว")
# ouput: 'แมวไม่ได้รับเชื้อมัน'
"""
if start_seq is None:
start_seq = random.choice(self.words)
self.late_word = start_seq
self.list_word = []
self.list_word.append(start_seq)
for i in range(N):
if duplicate:
self._temp = [
j for j in self.bi_keys if j[0] == self.late_word
]
else:
self._temp = [
j
for j in self.bi_keys
if j[0] == self.late_word and j[1] not in self.list_word
]
self._probs = [
self.prob(self.late_word, next_word[-1])
for next_word in self._temp
]
self._p2 = [j for j in self._probs if j >= prob]
if len(self._p2) == 0:
break
self.items = self._temp[self._probs.index(random.choice(self._p2))]
self.late_word = self.items[-1]
self.list_word.append(self.late_word)
if output_str:
return "".join(self.list_word)
return self.list_word
class Trigram:
"""
Text generator using Trigram
:param str name: corpus name
* *tnc* - Thai National Corpus (default)
"""
def __init__(self, name: str = "tnc"):
if name == "tnc":
self.uni = tnc_word_freqs_unigram()
self.bi = tnc_word_freqs_bigram()
self.ti = tnc_word_freqs_trigram()
self.uni_keys = list(self.uni.keys())
self.bi_keys = list(self.bi.keys())
self.ti_keys = list(self.ti.keys())
self.words = [i[-1] for i in self.bi_keys]
def prob(self, t1: str, t2: str, t3: str) -> float:
"""
probability word
:param int t1: text 1
:param int t2: text 2
:param int t3: text 3
:return: probability value
:rtype: float
"""
try:
v = self.ti[(t1, t2, t3)] / self.bi[(t1, t2)]
except ZeroDivisionError:
v = 0.0
return v
def gen_sentence(
self,
start_seq: str = None,
N: int = 4,
prob: float = 0.001,
output_str: bool = True,
duplicate: bool = False,
) -> Union[List[str], str]:
"""
:param str start_seq: word for begin word.
:param int N: number of word.
:param bool output_str: output is str
:param bool duplicate: duplicate word in sent
:return: list words or str words
:rtype: List[str], str
:Example:
::
from pythainlp.generate import Trigram
gen = Trigram()
gen.gen_sentence()
# ouput: 'ยังทำตัวเป็นเซิร์ฟเวอร์คือ'
"""
if start_seq is None:
start_seq = random.choice(self.bi_keys)
self.late_word = start_seq
self.list_word = []
self.list_word.append(start_seq)
for i in range(N):
if duplicate:
self._temp = [
j for j in self.ti_keys if j[:2] == self.late_word
]
else:
self._temp = [
j
for j in self.ti_keys
if j[:2] == self.late_word and j[1:] not in self.list_word
]
self._probs = [
self.prob(word[0], word[1], word[2]) for word in self._temp
]
self._p2 = [j for j in self._probs if j >= prob]
if len(self._p2) == 0:
break
self.items = self._temp[self._probs.index(random.choice(self._p2))]
self.late_word = self.items[1:]
self.list_word.append(self.late_word)
self.listdata = []
for i in self.list_word:
for j in i:
if j not in self.listdata:
self.listdata.append(j)
if output_str:
return "".join(self.listdata)
return self.listdata
| 9,224 | 28.951299 | 124 | py |
pythainlp-dev/pythainlp/generate/thai2fit.py | pythainlp-dev/pythainlp/generate/thai2fit.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai2fit: Thai Wikipeida Language Model for Text Generation
Code from
https://github.com/PyThaiNLP/tutorials/blob/master/source/notebooks/text_generation.ipynb
"""
__all__ = ["gen_sentence"]
import pandas as pd
import random
import pickle
from typing import List, Union
# fastai
import fastai
from fastai.text import *
# pythainlp
from pythainlp.ulmfit import *
# get dummy data
imdb = untar_data(URLs.IMDB_SAMPLE)
dummy_df = pd.read_csv(imdb / "texts.csv")
# get vocab
thwiki = THWIKI_LSTM
thwiki_itos = pickle.load(open(thwiki["itos_fname"], "rb"))
thwiki_vocab = fastai.text.transform.Vocab(thwiki_itos)
# dummy databunch
tt = Tokenizer(
tok_func=ThaiTokenizer,
lang="th",
pre_rules=pre_rules_th,
post_rules=post_rules_th,
)
processor = [
TokenizeProcessor(tokenizer=tt, chunksize=10000, mark_fields=False),
NumericalizeProcessor(vocab=thwiki_vocab, max_vocab=60000, min_freq=3),
]
data_lm = (
TextList.from_df(dummy_df, imdb, cols=["text"], processor=processor)
.split_by_rand_pct(0.2)
.label_for_lm()
.databunch(bs=64)
)
data_lm.sanity_check()
config = dict(
emb_sz=400,
n_hid=1550,
n_layers=4,
pad_token=1,
qrnn=False,
tie_weights=True,
out_bias=True,
output_p=0.25,
hidden_p=0.1,
input_p=0.2,
embed_p=0.02,
weight_p=0.15,
)
trn_args = dict(drop_mult=0.9, clip=0.12, alpha=2, beta=1)
learn = language_model_learner(
data_lm, AWD_LSTM, config=config, pretrained=False, **trn_args
)
# load pretrained models
learn.load_pretrained(**thwiki)
def gen_sentence(
start_seq: str = None,
N: int = 4,
prob: float = 0.001,
output_str: bool = True,
) -> Union[List[str], str]:
"""
Text generator using Thai2fit
:param str start_seq: word for begin word.
:param int N: number of word.
:param bool output_str: output is str
:param bool duplicate: duplicate word in sent
:return: list words or str words
:rtype: List[str], str
:Example:
::
from pythainlp.generate.thai2fit import gen_sentence
gen_sentence()
# output: 'แคทรียา อิงลิช (นักแสดง'
gen_sentence("แมว")
# output: 'แมว คุณหลวง '
"""
if start_seq is None:
start_seq = random.choice(list(thwiki_itos))
list_word = learn.predict(
start_seq, N, temperature=0.8, min_p=prob, sep="-*-"
).split("-*-")
if output_str:
return "".join(list_word)
return list_word
| 3,070 | 23.373016 | 89 | py |
pythainlp-dev/pythainlp/generate/wangchanglm.py | pythainlp-dev/pythainlp/generate/wangchanglm.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import torch
class WangChanGLM:
def __init__(self):
self.exclude_pattern = re.compile(r'[^ก-๙]+')
self.stop_token = "\n"
self.PROMPT_DICT = {
"prompt_input": (
"<context>: {input}\n<human>: {instruction}\n<bot>: "
),
"prompt_no_input": (
"<human>: {instruction}\n<bot>: "
),
"prompt_chatbot": (
"<human>: {human}\n<bot>: {bot}"
),
}
def is_exclude(self, text:str)->bool:
return bool(self.exclude_pattern.search(text))
def load_model(
self,
model_path:str="pythainlp/wangchanglm-7.5B-sft-en-sharded",
return_dict:bool=True,
load_in_8bit:bool=False,
device:str="cuda",
torch_dtype=torch.float16,
offload_folder:str="./",
low_cpu_mem_usage:bool=True
):
"""
Load model
:param str model_path: Model path
:param bool return_dict: return_dict
:param bool load_in_8bit: load model in 8bit
:param str device: device (cpu, cuda or other)
:param torch_dtype torch_dtype: torch_dtype
:param str offload_folder: offload folder
:param bool low_cpu_mem_usage: low cpu mem usage
"""
import pandas as pd
from transformers import AutoModelForCausalLM, AutoTokenizer
self.device = device
self.torch_dtype = torch_dtype
self.model_path = model_path
self.model = AutoModelForCausalLM.from_pretrained(
self.model_path,
return_dict=return_dict,
load_in_8bit=load_in_8bit,
device_map=device,
torch_dtype=torch_dtype,
offload_folder=offload_folder,
low_cpu_mem_usage=low_cpu_mem_usage
)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
self.df = pd.DataFrame(self.tokenizer.vocab.items(), columns=['text', 'idx'])
self.df['is_exclude'] = self.df.text.map(self.is_exclude)
self.exclude_ids = self.df[self.df.is_exclude==True].idx.tolist()
def gen_instruct(
self,
text:str,
max_new_tokens:int=512,
top_p:float=0.95,
temperature:float=0.9,
top_k:int=50,
no_repeat_ngram_size:int=2,
typical_p:float=1.,
thai_only:bool=True,
skip_special_tokens:bool=True
):
"""
Generate Instruct
:param str text: text
:param int max_new_tokens: max new tokens
:param float top_p: Top p
:param float temperature: temperature
:param int top_k: Top k
:param int no_repeat_ngram_size: no repeat ngram size
:param float typical_p: typical p
:param bool thai_only: Thai only
:param bool skip_special_tokens: skip special tokens
:return: the answer from Instruct.
:rtype: str
"""
batch = self.tokenizer(text, return_tensors="pt")
with torch.autocast(device_type=self.device, dtype=self.torch_dtype):
if thai_only:
output_tokens = self.model.generate(
input_ids=batch["input_ids"],
max_new_tokens=max_new_tokens, # 512
begin_suppress_tokens = self.exclude_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
#oasst k50
top_k=top_k,
top_p=top_p, # 0.95
typical_p=typical_p,
temperature=temperature, # 0.9
)
else:
output_tokens = self.model.generate(
input_ids=batch["input_ids"],
max_new_tokens=max_new_tokens, # 512
no_repeat_ngram_size=no_repeat_ngram_size,
#oasst k50
top_k=top_k,
top_p=top_p, # 0.95
typical_p=typical_p,
temperature=temperature, # 0.9
)
return self.tokenizer.decode(output_tokens[0][len(batch["input_ids"][0]):], skip_special_tokens=skip_special_tokens)
def instruct_generate(
self,
instruct: str,
context: str = None,
max_new_tokens=512,
temperature: float =0.9,
top_p: float = 0.95,
top_k:int=50,
no_repeat_ngram_size:int=2,
typical_p:float=1,
thai_only:bool=True,
skip_special_tokens:bool=True
):
"""
Generate Instruct
:param str instruct: Instruct
:param str context: context
:param int max_new_tokens: max new tokens
:param float top_p: Top p
:param float temperature: temperature
:param int top_k: Top k
:param int no_repeat_ngram_size: no repeat ngram size
:param float typical_p: typical p
:param bool thai_only: Thai only
:param bool skip_special_tokens: skip special tokens
:return: the answer from Instruct.
:rtype: str
"""
if context == None or context=="":
prompt = self.PROMPT_DICT['prompt_no_input'].format_map(
{'instruction': instruct, 'input': ''}
)
else:
prompt = self.PROMPT_DICT['prompt_input'].format_map(
{'instruction': instruct, 'input': context}
)
result = self.gen_instruct(
prompt,
max_new_tokens=max_new_tokens,
top_p=top_p,
top_k=top_k,
temperature=temperature,
no_repeat_ngram_size=no_repeat_ngram_size,
typical_p=typical_p,
thai_only=thai_only,
skip_special_tokens=skip_special_tokens
)
return result
| 6,451 | 35.451977 | 124 | py |
pythainlp-dev/pythainlp/khavee/__init__.py | pythainlp-dev/pythainlp/khavee/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["KhaveeVerifier"]
from pythainlp.khavee.core import KhaveeVerifier
| 693 | 37.555556 | 74 | py |
pythainlp-dev/pythainlp/khavee/core.py | pythainlp-dev/pythainlp/khavee/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
from pythainlp.tokenize import subword_tokenize
from pythainlp.util import sound_syllable
from pythainlp.util import remove_tonemark
class KhaveeVerifier:
def __init__(self):
"""
KhaveeVerifier: Thai Poetry verifier
"""
pass
def check_sara(self, word: str)-> str:
"""
Check the vowels in the Thai word.
:param str word: Thai word
:return: name vowel of the word.
:rtype: str
:Example:
::
from pythainlp.khavee import KhaveeVerifier
kv = KhaveeVerifier()
print(kv.check_sara('เริง'))
# output: 'เออ'
"""
sara = []
countoa = 0
# In case การันย์
if '์' in word[-1]:
word = word[:-2]
# In case สระเดี่ยว
for i in word:
if i == 'ะ' or i == 'ั':
sara.append('อะ')
elif i == 'ิ':
sara.append('อิ')
elif i == 'ุ':
sara.append('อุ')
elif i == 'ึ':
sara.append('อึ')
elif i == 'ี':
sara.append('อี')
elif i == 'ู':
sara.append('อู')
elif i == 'ื':
sara.append('อือ')
elif i == 'เ':
sara.append('เอ')
elif i == 'แ':
sara.append('แอ')
elif i == 'า':
sara.append('อา')
elif i == 'โ':
sara.append('โอ')
elif i == 'ำ':
sara.append('อำ')
elif i == 'อ':
countoa += 1
sara.append('ออ')
elif i == 'ั' and 'ว' in word:
sara.append('อัว')
elif i == 'ไ' or i == 'ใ':
sara.append('ไอ')
elif i == '็':
sara.append('ออ')
elif 'รร' in word:
if self.check_marttra(word) == 'กม':
sara.append('อำ')
else:
sara.append('อะ')
# Incase ออ
if countoa == 1 and 'อ' in word[-1] and 'เ' not in word:
sara.remove('ออ')
# In case เอ เอ
countA = 0
for i in sara:
if i == 'เอ':
countA = countA + 1
if countA > 1:
sara.remove('เอ')
sara.remove('เอ')
sara.append('แ')
# In case สระประสม
if 'เอ' in sara and 'อะ' in sara:
sara.remove('เอ')
sara.remove('อะ')
sara.append('เอะ')
elif 'แอ' in sara and 'อะ' in sara:
sara.remove('แอ')
sara.remove('อะ')
sara.append('แอะ')
if 'เอะ' in sara and 'ออ' in sara:
sara.remove('เอะ')
sara.remove('ออ')
sara.append('เออะ')
elif 'เอ' in sara and 'อิ' in sara:
sara.remove('เอ')
sara.remove('อิ')
sara.append('เออ')
elif 'เอ' in sara and 'ออ' in sara and 'อ' in word[-1]:
sara.remove('เอ')
sara.remove('ออ')
sara.append('เออ')
elif 'โอ' in sara and 'อะ' in sara:
sara.remove('โอ')
sara.remove('อะ')
sara.append('โอะ')
elif 'เอ' in sara and 'อี' in sara:
sara.remove('เอ')
sara.remove('อี')
sara.append('เอีย')
elif 'เอ' in sara and 'อือ' in sara:
sara.remove('เอ')
sara.remove('อือ')
sara.append('อัว')
elif 'เอ' in sara and 'อา' in sara:
sara.remove('เอ')
sara.remove('อา')
sara.append('เอา')
elif 'เ' in word and 'า' in word and 'ะ' in word:
sara = []
sara.append('เอาะ')
if 'อือ' in sara and 'เออ' in sara:
sara.remove('เออ')
sara.remove('อือ')
sara.append('เอือ')
elif 'ออ' in sara and len(sara) > 1:
sara.remove('ออ')
elif 'ว' in word and len(sara) == 0:
sara.append('อัว')
if 'ั' in word and self.check_marttra(word) == 'กา':
sara = []
sara.append('ไอ')
# In case อ
if word == 'เออะ':
sara = []
sara.append('เออะ')
elif word == 'เออ':
sara = []
sara.append('เออ')
elif word == 'เอ':
sara = []
sara.append('เอ')
elif word == 'เอะ':
sara = []
sara.append('เอะ')
elif word == 'เอา':
sara = []
sara.append('เอา')
elif word == 'เอาะ':
sara = []
sara.append('เอาะ')
if 'ฤา' in word or 'ฦา' in word:
sara = []
sara.append('อือ')
elif 'ฤ' in word or 'ฦ' in word:
sara = []
sara.append('อึ')
# In case กน
if sara == [] and len(word) == 2:
if word[-1] != 'ร':
sara.append('โอะ')
else:
sara.append('ออ')
elif sara == [] and len(word) == 3:
sara.append('ออ')
# incase บ่
if 'บ่' == word:
sara = []
sara.append('ออ')
if 'ํ' in word:
sara = []
sara.append('อำ')
if 'เ' in word and 'ื' in word and 'อ' in word:
sara = []
sara.append('เอือ')
if sara == []:
return 'Cant find Sara in this word'
else:
return sara[0]
def check_marttra(self, word: str) -> str:
"""
Check the Thai spelling Section in the Thai word.
:param str word: Thai word
:return: name spelling Section of the word.
:rtype: str
:Example:
::
from pythainlp.khavee import KhaveeVerifier
kv = KhaveeVerifier()
print(kv.check_marttra('สาว'))
# output: 'เกอว'
"""
if word[-1] == 'ร' and word[-2] in ['ต','ท'] :
word = word[:-1]
word = self.handle_karun_sound_silence(word)
word = remove_tonemark(word)
if 'ำ' in word or ('ํ' in word and 'า' in word) or 'ไ' in word or 'ใ' in word:
return 'กา'
elif word[-1] in ['า','ะ','ิ','ี','ุ','ู','อ'] or ('ี' in word and 'ย' in word[-1]) or ('ื' in word and 'อ' in word[-1]):
return 'กา'
elif word[-1] in ['ง']:
return 'กง'
elif word[-1] in ['ม']:
return 'กม'
elif word[-1] in ['ย']:
if 'ั' in word:
return 'กา'
else:
return 'เกย'
elif word[-1] in ['ว']:
return 'เกอว'
elif word[-1] in ['ก','ข','ค','ฆ']:
return 'กก'
elif word[-1] in ['จ','ช','ซ','ฎ','ฏ','ฐ','ฑ','ฒ','ด','ต','ถ','ท','ธ','ศ','ษ','ส'] :
return 'กด'
elif word[-1] in ['ญ',', ณ' ,'น' ,'ร' ,'ล' ,'ฬ']:
return 'กน'
elif word[-1] in ['บ', 'ป', 'พ', 'ฟ', 'ภ']:
return 'กบ'
else:
if '็' in word:
return 'กา'
else:
return 'Cant find Marttra in this word'
def is_sumpus(self, word1: str,word2: str) -> bool:
"""
Check the rhyme between two words.
:param str word1: Thai word
:param str word2: Thai word
:return: boolen
:rtype: bool
:Example:
::
from pythainlp.khavee import KhaveeVerifier
kv = KhaveeVerifier()
print(kv.is_sumpus('สรร','อัน'))
# output: True
print(kv.is_sumpus('สรร','แมว'))
# output: False
"""
marttra1 = self.check_marttra(word1)
marttra2 = self.check_marttra(word2)
sara1 = self.check_sara(word1)
sara2 = self.check_sara(word2)
if sara1 == 'อะ' and marttra1 == 'เกย':
sara1 = 'ไอ'
marttra1 = 'กา'
elif sara2 == 'อะ' and marttra2 == 'เกย':
sara2 = 'ไอ'
marttra2 = 'กา'
if sara1 == 'อำ' and marttra1 == 'กม':
sara1 = 'อำ'
marttra1 = 'กา'
elif sara2 == 'อำ' and marttra2 == 'กม':
sara2 = 'อำ'
marttra2 = 'กา'
if marttra1 == marttra2 and sara1 == sara2:
return True
else:
return False
def check_karu_lahu(self,text):
if (self.check_marttra(text) != 'กา' or (self.check_marttra(text) == 'กา' and self.check_sara(text) in ['อา','อี', 'อือ', 'อู', 'เอ', 'แอ', 'โอ', 'ออ', 'เออ', 'เอีย', 'เอือ' ,'อัว']) or self.check_sara(text) in ['อำ','ไอ','เอา']) and text not in ['บ่','ณ','ธ','ก็']:
return 'karu'
else:
return 'lahu'
def check_klon(self, text: str,k_type: int=8) -> Union[List[str], str]:
"""
Check the suitability of the poem according to Thai principles.
:param str text: Thai poem
:param int k_type: Type of Thai poem
:return: the check of the suitability of the poem according to Thai principles.
:rtype: Union[List[str], str]
:Example:
::
from pythainlp.khavee import KhaveeVerifier
kv = KhaveeVerifier()
print(kv.check_klon('''ฉันชื่อหมูกรอบ ฉันชอบกินไก่ แล้วก็วิ่งไล่ หมาชื่อนํ้าทอง ลคคนเก่ง เอ๋งเอ๋งคะนอง มีคนจับจอง เขาชื่อน้องเธียร''', k_type=4))
# output: The poem is correct according to the principle.
print(kv.check_klon('''ฉันชื่อหมูกรอบ ฉันชอบกินไก่ แล้วก็วิ่งไล่ หมาชื่อนํ้าทอง ลคคนเก่ง เอ๋งเอ๋งเสียงหมา มีคนจับจอง เขาชื่อน้องเธียร''',k_type=4))
# # -> ["Cant find rhyme between paragraphs ('หมา', 'จอง')in paragraph 2", "Cant find rhyme between paragraphs ('หมา', 'ทอง')in paragraph 2"]
"""
if k_type == 8:
try:
error = []
list_sumpus_sent1 = []
list_sumpus_sent2h = []
list_sumpus_sent2l = []
list_sumpus_sent3 = []
list_sumpus_sent4 = []
for i, sent in enumerate(text.split()):
sub_sent = subword_tokenize(sent,engine='dict')
# print(i)
if len(sub_sent) > 10:
error.append('In the sentence'+str(i+2)+'there are more than 10 words.'+str(sub_sent))
if (i+1) % 4 == 1:
list_sumpus_sent1.append(sub_sent[-1])
elif (i+1) % 4 == 2:
list_sumpus_sent2h.append([sub_sent[1],sub_sent[2],sub_sent[3],sub_sent[4]])
list_sumpus_sent2l.append(sub_sent[-1])
elif (i+1) % 4 == 3:
list_sumpus_sent3.append(sub_sent[-1])
elif (i+1) % 4 == 0:
list_sumpus_sent4.append(sub_sent[-1])
if len(list_sumpus_sent1) != len(list_sumpus_sent2h) or len(list_sumpus_sent2h) != len(list_sumpus_sent2l) or len(list_sumpus_sent2l) != len(list_sumpus_sent3) or len(list_sumpus_sent3) != len(list_sumpus_sent4) or len(list_sumpus_sent4) != len(list_sumpus_sent1):
return 'The poem does not complete 4 sentences.'
else:
for i in range(len(list_sumpus_sent1)):
countwrong = 0
for j in list_sumpus_sent2h[i]:
if self.is_sumpus(list_sumpus_sent1[i],j) == False:
countwrong +=1
if countwrong > 3:
error.append('Cant find rhyme between paragraphs '+str((list_sumpus_sent1[i],list_sumpus_sent2h[i]))+'in paragraph '+str(i+1))
if self.is_sumpus(list_sumpus_sent2l[i],list_sumpus_sent3[i]) == False:
# print(sumpus_sent2l,sumpus_sent3)
error.append('Cant find rhyme between paragraphs '+str((list_sumpus_sent2l[i],list_sumpus_sent3[i]))+'in paragraph '+str(i+1))
if i > 0:
if self.is_sumpus(list_sumpus_sent2l[i],list_sumpus_sent4[i-1]) == False:
error.append('Cant find rhyme between paragraphs '+str((list_sumpus_sent2l[i],list_sumpus_sent4[i-1]))+'in paragraph '+str(i+1))
if error == []:
return 'The poem is correct according to the principle.'
else:
return error
except:
return 'Something went wrong Make sure you enter it in correct form of klon 8.'
elif k_type == 4:
try:
error = []
list_sumpus_sent1 = []
list_sumpus_sent2h = []
list_sumpus_sent2l = []
list_sumpus_sent3 = []
list_sumpus_sent4 = []
for i, sent in enumerate(text.split()):
sub_sent = subword_tokenize(sent,engine='dict')
if len(sub_sent) > 5:
error.append('In the sentence'+str(i+2)+'there are more than 4 words.'+str(sub_sent))
if (i+1) % 4 == 1:
list_sumpus_sent1.append(sub_sent[-1])
elif (i+1) % 4 == 2:
# print([sub_sent[1],sub_sent[2]])
list_sumpus_sent2h.append([sub_sent[1],sub_sent[2]])
list_sumpus_sent2l.append(sub_sent[-1])
elif (i+1) % 4 == 3:
list_sumpus_sent3.append(sub_sent[-1])
elif (i+1) % 4 == 0:
list_sumpus_sent4.append(sub_sent[-1])
if len(list_sumpus_sent1) != len(list_sumpus_sent2h) or len(list_sumpus_sent2h) != len(list_sumpus_sent2l) or len(list_sumpus_sent2l) != len(list_sumpus_sent3) or len(list_sumpus_sent3) != len(list_sumpus_sent4) or len(list_sumpus_sent4) != len(list_sumpus_sent1):
return 'The poem does not complete 4 sentences.'
else:
for i in range(len(list_sumpus_sent1)):
countwrong = 0
for j in list_sumpus_sent2h[i]:
# print(list_sumpus_sent1[i],j)
if self.is_sumpus(list_sumpus_sent1[i],j) == False:
countwrong +=1
if countwrong > 1:
error.append('Cant find rhyme between paragraphs '+str((list_sumpus_sent1[i],list_sumpus_sent2h[i]))+'in paragraph '+str(i+1))
if self.is_sumpus(list_sumpus_sent2l[i],list_sumpus_sent3[i]) == False:
# print(sumpus_sent2l,sumpus_sent3)
error.append('Cant find rhyme between paragraphs '+str((list_sumpus_sent2l[i],list_sumpus_sent3[i]))+'in paragraph '+str(i+1))
if i > 0:
if self.is_sumpus(list_sumpus_sent2l[i],list_sumpus_sent4[i-1]) == False:
error.append('Cant find rhyme between paragraphs '+str((list_sumpus_sent2l[i],list_sumpus_sent4[i-1]))+'in paragraph '+str(i+1))
if error == []:
return 'The poem is correct according to the principle.'
else:
return error
except:
return 'Something went wrong Make sure you enter it in correct form.'
else:
return 'Something went wrong Make sure you enter it in correct form.'
def check_aek_too(self, text: Union[List[str], str], dead_syllable_as_aek:bool = False) -> Union[List[bool], List[str], bool, str]:
"""
Thai tonal word checker
:param Union[List[str], str] text: Thai word or list of Thai words
:param bool dead_syllable_as_aek: if True, dead syllable will be considered as aek
:return: the check if the word is aek or too or False(not both) or list of the check if input is list
:rtype: Union[List[bool], List[str], bool, str]
:Example:
::
from pythainlp.khavee import KhaveeVerifier
kv = KhaveeVerifier()
# การเช็คคำเอกโท
print(kv.check_aek_too('เอง'), kv.check_aek_too('เอ่ง'), kv.check_aek_too('เอ้ง'))
## -> False, aek, too
print(kv.check_aek_too(['เอง', 'เอ่ง', 'เอ้ง'])) # ใช้ List ได้เหมือนกัน
## -> [False, 'aek', 'too']
"""
if isinstance(text, list):
return [self.check_aek_too(t, dead_syllable_as_aek) for t in text]
if not isinstance(text, str):
raise TypeError('text must be str or iterable list[str]')
word_characters = [*text]
if '่' in word_characters and not '้' in word_characters:
return 'aek'
elif '้' in word_characters and not '่' in word_characters:
return 'too'
if dead_syllable_as_aek and sound_syllable(text) == 'dead':
return 'aek'
else:
return False
def handle_karun_sound_silence(self, word: str) -> str:
"""
Handle sound silence in Thai word using '์' character (Karun)
by stripping all the characters before the 'Karun' character that should be silenced
:param str text: Thai word
:return: Thai word with silence word stripped
:rtype: str
"""
sound_silenced = True if word.endswith('์') else False
if not sound_silenced:
return word
thai_consonants = "กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรลวศษสหฬอฮ"
locate_silenced = word.rfind('์') - 1
can_silence_two = True if word[locate_silenced-2] in thai_consonants else False
cut_off = 2 if can_silence_two else 1
word = word[:locate_silenced + 1 - cut_off]
return word
| 18,749 | 38.473684 | 281 | py |
pythainlp-dev/pythainlp/khavee/example.py | pythainlp-dev/pythainlp/khavee/example.py | # -*- coding: utf-8 -*-
import core
kv = core.KhaveeVerifier()
# การเช็คสระ
print('เออ',kv.check_sara('เมอ'))
# 'เออ'
# การเช็คมาตราตัวสะกด
print('เทอว',kv.check_marttra('เทอว'))
# 'เกอว'
# การตรวจสอบคำสำผัสที่ถูกต้อง
print('สรร อัน',kv.is_sumpus('สรร','อัน'))
# True
# การตรวจสอบคำสำผัสที่ผิด
print('เพื่อน ล้วน',kv.is_sumpus('เพื่อน','ล้วน'))
# False
# การตรวจสอบคำ ครุ ลหุ
print('สรร',kv.check_karu_lahu('สรร'))
#karu
# การตรวจสอบคำ ครุ ลหุ
print('ชิชะ',kv.check_karu_lahu('ชิชะ'))
# lahu
# การตรวจสอบกลอน 8 ที่ถูกฉันทลักษณ์
print(kv.check_klon('''ณรงค์วุฒิผู้เปี่ยมวุฒิสมสง่า มากวิชาหาความรู้ไปสู่ผล
เรื่องฟิสิกส์คณิตศาสตร์เอิร์นอดทน เล่นเกมเก่งลำดับต้นของโรงเรียน
ต่อมาหยกธนัชพรชอบนอนหลับ แต่ผลลัพธ์คือฉลาดเรื่องอ่านเขียน
เหมือนจะเล่นแต่เขายังพากเพียร ในการเรียนการเล่นบ้างคละกันไป
นรภัทรพุกกะมานป่านจอมแก่น ทั่วแว่นแคว้นโดนเขาแกล้งไม่สงสัย
เรื่องวิศวะเก่งกาจประหลาดใจ เรื่องฟิสิกส์ไร้ผู้ใดมาต่อกร
นริศราอีฟเก่งกว่าใครเพื่อน คอยช่วยเตือนเรื่องงานคอยสั่งสอน
อ่านตำราหาความรู้ไม่ละทอน เป็นคนดีศรีนครของจิตรลดา
ภัสนันท์นาคลออหรือมีมี่ เรื่องเกมนี้เก่งกาจไม่กังขา
เกมอะไรก็เล่นได้ไม่ลดวา สุดฉลาดมากปัญญามาครบครัน''',k_type=8))
# -> The poem is correct according to the principle.
# การตรวจสอบกลอน 8 ที่ผิดฉันทลักษณ์
print(kv.check_klon('''ณรงค์วุฒิผู้เปี่ยมวุฒิสมสง่า มากวิชาหาความรู้ไปสู่ผล
เรื่องฟิสิกส์คณิตศาสตร์เอิร์นอดทน เล่นเกมเก่งลำดับต้นของโรงเรียน
ต่อมาหยกธนัชพรชอบนอนหลับ แต่ผลลัพธ์คือฉลาดเรื่องอ่านเขียน
เหมือนจะเล่นแต่เขายังพากเพียร ในการเรียนการเล่นบ้างคละกันไป
นรภัทรพุกกะมานป่านจอมแก่น ทั่วแว่นแคว้นโดนเขาแกล้งไม่สงสัย
เรื่องวิศวะเก่งกาจประหลาดใจ เรื่องฟิสิกส์ไร้ผู้ใดมาต่อไป
นริศราอีฟเก่งกว่าใครเพื่อน คอยช่วยเตือนเรื่องงานคอยสั่งสอน
อ่านตำราหาความรู้ไม่ละทอน เป็นคนดีศรีนครของจิตรลดา
ภัสนันท์นาคลออหรือมีมี่ เรื่องเกมเอ่อเก่งกาจไม่กังขา
เกมอะไรก็เล่นได้ไม่ลดวา สุดฉลาดมากปัญญามาครบครัน''',k_type=8))
# -> ["Cant find rhyme between paragraphs ('สอน', 'ไป')in paragraph 4", "Cant find rhyme between paragraphs ('มี่', ['เกม', 'เอ่อ', 'เก่ง', 'กาจ'])in paragraph 5"]
# การตรวจสอบกลอน 4 ที่ถูกฉันทลักษณ์
print(kv.check_klon('''ฉันชื่อหมูกรอบ ฉันชอบกินไก่ แล้วก็วิ่งไล่ หมาชื่อนํ้าทอง ลคคนเก่ง เอ๋งเอ๋งคะนอง มีคนจับจอง เขาชื่อน้องเธียร''',k_type=4))
# -> The poem is correct according to the principle.
# การตรวจสอบกลอน 4 ที่ผิดฉันทลักษณ์
print(kv.check_klon('''ฉันชื่อหมูกรอบ ฉันชอบกินไก่ แล้วก็วิ่งไล่ หมาชื่อนํ้าทอง ลคคนเก่ง เอ๋งเอ๋งเสียงหมา มีคนจับจอง เขาชื่อน้องเธียร''',k_type=4))
# -> ["Cant find rhyme between paragraphs ('หมา', 'จอง')in paragraph 2", "Cant find rhyme between paragraphs ('หมา', 'ทอง')in paragraph 2"]
# การเช็คคำเอกโท
print(kv.check_aek_too('เอง'), kv.check_aek_too('เอ่ง'), kv.check_aek_too('เอ้ง'))
# -> False, aek, too
print(kv.check_aek_too(['เอง', 'เอ่ง', 'เอ้ง'])) # ใช้ List ได้เหมือนกัน
# -> [False, 'aek', 'too']
print(kv.check_aek_too(['ห๊ะ', 'เอ่ง', 'เอ้ง'], dead_syllable_as_aek=True)) # ใช้ List ได้เหมือนกัน และสามารถตั้งค่า ให้นับคำที่เสียงตายเป็นเอกได้ ตามการเช็คคฉันทลักษณ์กลอน
# -> ['aek', 'aek', 'too']
| 2,982 | 40.430556 | 172 | py |
pythainlp-dev/pythainlp/parse/__init__.py | pythainlp-dev/pythainlp/parse/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyThaiNLP Parse
"""
__all__ = ["dependency_parsing"]
from pythainlp.parse.core import dependency_parsing
| 723 | 35.2 | 74 | py |
pythainlp-dev/pythainlp/parse/core.py | pythainlp-dev/pythainlp/parse/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
_tagger = None
_tagger_name = ""
def dependency_parsing(
text: str, model: str = None, tag: str = "str", engine: str = "esupar"
) -> Union[List[List[str]], str]:
"""
Dependency Parsing
:param str text: text to do dependency parsing
:param str model: model for using with engine \
(for esupar and transformers_ud)
:param str tag: output type (str or list)
:param str engine: the name dependency parser
:return: str (conllu) or List
:rtype: Union[List[List[str]], str]
**Options for engine**
* *esupar* (default) - Tokenizer POS-tagger and Dependency-parser \
with BERT/RoBERTa/DeBERTa model. `GitHub \
<https://github.com/KoichiYasuoka/esupar>`_
* *spacy_thai* - Tokenizer, POS-tagger, and dependency-parser \
for Thai language, working on Universal Dependencies. \
`GitHub <https://github.com/KoichiYasuoka/spacy-thai>`_
* *transformers_ud* - TransformersUD \
`GitHub <https://github.com/KoichiYasuoka/>`_
* *ud_goeswith* - POS-tagging and dependency-parsing with \
using `goeswith` for subwords
**Options for model (esupar engine)**
* *th* (default) - KoichiYasuoka/roberta-base-thai-spm-upos model \
`Huggingface \
<https://huggingface.co/KoichiYasuoka/roberta-base-thai-spm-upos>`_
* *KoichiYasuoka/deberta-base-thai-upos* - DeBERTa(V2) model \
pre-trained on Thai Wikipedia texts for POS-tagging and \
dependency-parsing `Huggingface \
<https://huggingface.co/KoichiYasuoka/deberta-base-thai-upos>`_
* *KoichiYasuoka/roberta-base-thai-syllable-upos* - RoBERTa model \
pre-trained on Thai Wikipedia texts for POS-tagging and \
dependency-parsing. (syllable level) `Huggingface \
<https://huggingface.co/KoichiYasuoka/roberta-base-thai-syllable-upos>`_
* *KoichiYasuoka/roberta-base-thai-char-upos* - RoBERTa model \
pre-trained on Thai Wikipedia texts for POS-tagging \
and dependency-parsing. (char level) `Huggingface \
<https://huggingface.co/KoichiYasuoka/roberta-base-thai-char-upos>`_
If you want to train model for esupar, you can read \
`Huggingface <https://github.com/KoichiYasuoka/esupar>`_
**Options for model (transformers_ud engine)**
* *KoichiYasuoka/deberta-base-thai-ud-head* (default) - \
DeBERTa(V2) model pretrained on Thai Wikipedia texts \
for dependency-parsing (head-detection on Universal \
Dependencies) as question-answering, derived from \
deberta-base-thai. \
trained by th_blackboard.conll. `Huggingface \
<https://huggingface.co/KoichiYasuoka/deberta-base-thai-ud-head>`_
* *KoichiYasuoka/roberta-base-thai-spm-ud-head* - \
roberta model pretrained on Thai Wikipedia texts \
for dependency-parsing. `Huggingface \
<https://huggingface.co/KoichiYasuoka/roberta-base-thai-spm-ud-head>`_
**Options for model (ud_goeswith engine)**
* *KoichiYasuoka/deberta-base-thai-ud-goeswith* (default) - \
This is a DeBERTa(V2) model pre-trained on Thai Wikipedia \
texts for POS-tagging and dependency-parsing (using goeswith for subwords) \
`Huggingface <https://huggingface.co/KoichiYasuoka/deberta-base-thai-ud-goeswith>`_
:Example:
::
from pythainlp.parse import dependency_parsing
print(dependency_parsing("ผมเป็นคนดี", engine="esupar"))
# output:
# 1 ผม _ PRON _ _ 3 nsubj _ SpaceAfter=No
# 2 เป็น _ VERB _ _ 3 cop _ SpaceAfter=No
# 3 คน _ NOUN _ _ 0 root _ SpaceAfter=No
# 4 ดี _ VERB _ _ 3 acl _ SpaceAfter=No
print(dependency_parsing("ผมเป็นคนดี", engine="spacy_thai"))
# output:
# 1 ผม PRON PPRS _ 2 nsubj _ SpaceAfter=No
# 2 เป็น VERB VSTA _ 0 ROOT _ SpaceAfter=No
# 3 คนดี NOUN NCMN _ 2 obj _ SpaceAfter=No
"""
global _tagger, _tagger_name
if _tagger_name != engine:
if engine == "esupar":
from pythainlp.parse.esupar_engine import Parse
_tagger = Parse(model=model)
elif engine == "transformers_ud":
from pythainlp.parse.transformers_ud import Parse
_tagger = Parse(model=model)
elif engine == "spacy_thai":
from pythainlp.parse.spacy_thai_engine import Parse
_tagger = Parse()
elif engine == "ud_goeswith":
from pythainlp.parse.ud_goeswith import Parse
_tagger = Parse(model=model)
else:
raise NotImplementedError("The engine doesn't support.")
_tagger_name = engine
return _tagger(text, tag=tag)
| 5,798 | 44.661417 | 96 | py |
pythainlp-dev/pythainlp/parse/esupar_engine.py | pythainlp-dev/pythainlp/parse/esupar_engine.py | # -*- coding: utf-8 -*-
"""
esupar: Tokenizer POS-tagger and Dependency-parser with BERT/RoBERTa/DeBERTa models for Japanese and other languages
GitHub: https://github.com/KoichiYasuoka/esupar
"""
from typing import List, Union
try:
import esupar
except ImportError:
raise ImportError("Import Error; Install esupar by pip install esupar")
class Parse:
def __init__(self, model: str = "th") -> None:
if model == None:
model = "th"
self.nlp = esupar.load(model)
def __call__(
self, text: str, tag: str = "str"
) -> Union[List[List[str]], str]:
_data = str(self.nlp(text))
if tag == "list":
_temp = _data.splitlines()
_tag_data = []
for i in _temp:
_tag_data.append(i.split())
return _tag_data
return _data
| 853 | 25.6875 | 116 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.