Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
pythainlp-dev/pythainlp/util/thai.py | pythainlp-dev/pythainlp/util/thai.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Check if it is Thai text
"""
import string
from typing import Tuple
from pythainlp import (
thai_lead_vowels,
thai_follow_vowels,
thai_above_vowels,
thai_below_vowels,
thai_consonants,
thai_vowels,
thai_tonemarks,
thai_signs,
thai_digits,
thai_punctuations,
)
from pythainlp.transliterate import pronunciate
from pythainlp.util.syllable import tone_detector
_DEFAULT_IGNORE_CHARS = string.whitespace + string.digits + string.punctuation
_TH_FIRST_CHAR_ASCII = 3584
_TH_LAST_CHAR_ASCII = 3711
def isthaichar(ch: str) -> bool:
"""Check if a character is a Thai character.
:param ch: input character
:type ch: str
:return: True if ch is a Thai characttr, otherwise False.
:rtype: bool
:Example:
::
from pythainlp.util import isthaichar
isthaichar("ก") # THAI CHARACTER KO KAI
# output: True
isthaichar("๕") # THAI DIGIT FIVE
# output: True
"""
ch_val = ord(ch)
if ch_val >= _TH_FIRST_CHAR_ASCII and ch_val <= _TH_LAST_CHAR_ASCII:
return True
return False
def isthai(text: str, ignore_chars: str = ".") -> bool:
"""Check if every characters in a string are Thai character.
:param text: input text
:type text: str
:param ignore_chars: characters to be ignored, defaults to "."
:type ignore_chars: str, optional
:return: True if every characters in the input string are Thai,
otherwise False.
:rtype: bool
:Example:
::
from pythainlp.util import isthai
isthai("กาลเวลา")
# output: True
isthai("กาลเวลา.")
# output: True
isthai("กาล-เวลา")
# output: False
isthai("กาล-เวลา +66", ignore_chars="01234567890+-.,")
# output: True
"""
if not ignore_chars:
ignore_chars = ""
for ch in text:
if ch not in ignore_chars and not isthaichar(ch):
return False
return True
def countthai(text: str, ignore_chars: str = _DEFAULT_IGNORE_CHARS) -> float:
"""Find proportion of Thai characters in a given text
:param text: input text
:type text: str
:param ignore_chars: characters to be ignored, defaults to whitespaces,\\
digits, and puntuations.
:type ignore_chars: str, optional
:return: proportion of Thai characters in the text (percent)
:rtype: float
:Example:
::
from pythainlp.util import countthai
countthai("ไทยเอ็นแอลพี 3.0")
# output: 100.0
countthai("PyThaiNLP 3.0")
# output: 0.0
countthai("ใช้งาน PyThaiNLP 3.0")
# output: 40.0
countthai("ใช้งาน PyThaiNLP 3.0", ignore_chars="")
# output: 30.0
"""
if not text or not isinstance(text, str):
return 0.0
if not ignore_chars:
ignore_chars = ""
num_thai = 0
num_ignore = 0
for ch in text:
if ch in ignore_chars:
num_ignore += 1
elif isthaichar(ch):
num_thai += 1
num_count = len(text) - num_ignore
if num_count == 0:
return 0.0
return (num_thai / num_count) * 100
def display_thai_char(ch: str) -> str:
"""Prefix an underscore (_) to a high-position vowel or a tone mark,
to ease readability.
:param ch: input character
:type ch: str
:return: "_" + ch
:rtype: str
:Example:
::
from pythainlp.util import display_thai_char
display_thai_char("้")
# output: "_้"
"""
if (
ch in thai_above_vowels
or ch in thai_tonemarks
or ch in "\u0e33\u0e4c\u0e4d\u0e4e"
):
# last condition is Sra Aum, Thanthakhat, Nikhahit, Yamakkan
return "_" + ch
else:
return ch
def thai_word_tone_detector(word: str) -> Tuple[str, str]:
"""
Thai tone detector for word.
It use pythainlp.transliterate.pronunciate for convert word to\
pronunciation.
:param str word: Thai word.
:return: Thai pronunciation with tone each syllables.\
(l, m, h, r, f or empty if it cannot detector)
:rtype: Tuple[str, str]
:Example:
::
from pythainlp.util import thai_word_tone_detector
print(thai_word_tone_detector("คนดี"))
# output: [('คน', 'm'), ('ดี', 'm')]
print(thai_word_tone_detector("มือถือ"))
# output: [('มือ', 'm'), ('ถือ', 'r')]
"""
_pronunciate = pronunciate(word).split("-")
return [(i, tone_detector(i.replace("หฺ", "ห"))) for i in _pronunciate]
def count_thai_chars(text: str) -> dict:
"""
Count Thai characters by type
This function will give you numbers of Thai characters by type\
(consonants, vowels, lead_vowels, follow_vowels, above_vowels,\
below_vowels, tonemarks, signs, thai_digits, punctuations, non_thai)
:param str text: Text
:return: Dict with numbers of Thai characters by type
:rtype: dict
:Example:
::
from pythainlp.util import count_thai_chars
count_thai_chars("ทดสอบภาษาไทย")
# output: {
# 'vowels': 3,
# 'lead_vowels': 1,
# 'follow_vowels': 2,
# 'above_vowels': 0,
# 'below_vowels': 0,
# 'consonants': 9,
# 'tonemarks': 0,
# 'signs': 0,
# 'thai_digits': 0,
# 'punctuations': 0,
# 'non_thai': 0
# }
"""
_dict = {
"vowels": 0,
"lead_vowels": 0,
"follow_vowels": 0,
"above_vowels": 0,
"below_vowels": 0,
"consonants": 0,
"tonemarks": 0,
"signs": 0,
"thai_digits": 0,
"punctuations": 0,
"non_thai": 0,
}
for c in text:
if c in thai_vowels:
_dict["vowels"] += 1
if c in thai_lead_vowels:
_dict["lead_vowels"] += 1
elif c in thai_follow_vowels:
_dict["follow_vowels"] += 1
elif c in thai_above_vowels:
_dict["above_vowels"] += 1
elif c in thai_below_vowels:
_dict["below_vowels"] += 1
elif c in thai_consonants:
_dict["consonants"] += 1
elif c in thai_tonemarks:
_dict["tonemarks"] += 1
elif c in thai_signs:
_dict["signs"] += 1
elif c in thai_digits:
_dict["thai_digits"] += 1
elif c in thai_punctuations:
_dict["punctuations"] += 1
else:
_dict["non_thai"] += 1
return _dict
| 7,102 | 24.367857 | 78 | py |
pythainlp-dev/pythainlp/util/thaiwordcheck.py | pythainlp-dev/pythainlp/util/thaiwordcheck.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Check if a word is a "native Thai word"
Adapted from
https://github.com/wannaphong/open-thai-nlp-document/blob/master/check_thai_word.md
References
- ทีมงานทรูปลูกปัญญา 2015. ลักษณะของคำไทยแท้ http://www.trueplookpanya.com/learning/detail/30589-043067
- วารุณี บำรุงรส 2010. คำไทยแท้ https://www.gotoknow.org/posts/377619
"""
import re
_THANTHAKHAT_CHAR = "\u0e4c" # Thanthakhat (cancellation of sound)
# Non-native Thai characters
_TH_NON_NATIVE_CHARS = {
"ฆ",
"ณ",
"ฌ",
"ฎ",
"ฏ",
"ฐ",
"ฑ",
"ฒ",
"ธ",
"ศ",
"ษ",
"ฬ",
_THANTHAKHAT_CHAR,
}
# Native Thai final consonants
_TH_NATIVE_FINALS = {"ก", "ด", "บ", "น", "ง", "ม", "ย", "ว"}
# Known native Thai words (exceptions)
_TH_NATIVE_WORDS = {
"ฆ่า",
"เฆี่ยน",
"ศึก",
"ศอก",
"เศิก",
"เศร้า",
"ธ",
"ณ",
"ฯพณฯ",
"ใหญ่",
"หญ้า",
"ควาย",
"ความ",
"กริ่งเกรง",
"ผลิ",
}
# Diphthong prefixes (can starts native Thai word)
_TH_PREFIX_DIPHTHONG = {"กะ", "กระ", "ปะ", "ประ"}
# Thai consonant filter
# O ANG (U+0E2D) is omitted, as it can be considered as vowel
_TH_CONSONANTS_PATTERN = re.compile(r"[ก-ฬฮ]", re.U)
def is_native_thai(word: str) -> bool:
"""
Check if a word is an "native Thai word" (Thai: "คำไทยแท้")
This function based on a simple heuristic algorithm
and cannot be entirely reliable.
:param str word: word
:return: True or False
:rtype: bool
:Example:
English word::
from pythainlp.util import is_native_thai
is_native_thai("Avocado")
# output: False
Native Thai word::
is_native_thai("มะม่วง")
# output: True
is_native_thai("ตะวัน")
# output: True
Non-native Thai word::
is_native_thai("สามารถ")
# output: False
is_native_thai("อิสริยาภรณ์")
# output: False
"""
if not isinstance(word, str) or not word.strip():
return False
word = word.strip()
# Known native Thai words (exceptions)
if word in _TH_NATIVE_WORDS:
return True
# If a word contains non-Thai char, it is not a native Thai
if any(ch in word for ch in _TH_NON_NATIVE_CHARS):
return False
# If does not contain any Thai consonants -> cannot be Thai
chs = re.findall(_TH_CONSONANTS_PATTERN, word)
if not chs:
return False
# If there's only one Thai consonant -> it can be a native Thai
if len(chs) == 1:
return True
# If a word ends with native final, it can be a native Thai
if word[-1] in _TH_NATIVE_FINALS:
return True
# Note: This will not work, as it check the whole word, not the prefix.
# Prefix-sentitive tokenization is required in order to able to check this.
if word in _TH_PREFIX_DIPHTHONG:
return True
return False
| 3,460 | 23.546099 | 103 | py |
pythainlp-dev/pythainlp/util/time.py | pythainlp-dev/pythainlp/util/time.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Spell out time to Thai words.
Convert time string or time object to Thai words.
"""
from datetime import datetime, time
from typing import Union
from pythainlp.tokenize import Tokenizer
from pythainlp.util.numtoword import num_to_thaiword
from pythainlp.util.wordtonum import thaiword_to_num
_TIME_FORMAT_WITH_SEC = "%H:%M:%S"
_TIME_FORMAT_WITHOUT_SEC = "%H:%M"
_DICT_THAI_TIME = {
"ศูนย์": 0,
"หนึ่ง": 1,
"สอง": 2,
"ยี่": 2,
"สาม": 3,
"สี่": 4,
"ห้า": 5,
"หก": 6,
"เจ็ด": 7,
"แปด": 8,
"เก้า": 9,
"สิบ": 10,
"เอ็ด": 1,
# set the value of the time unit
"โมงเช้า": 6, # start counting at 7:00 a.m.
"โมงเย็น": 13,
"บ่าย": 13,
"บ่ายโมง": 13,
"ตี": 0,
"เที่ยงวัน": 12,
"เที่ยงคืน": 0,
"เที่ยง": 12,
"ทุ่ม": 18,
"นาฬิกา": 0,
"ครึ่ง": 30,
}
_THAI_TIME_CUT = Tokenizer(
custom_dict=list(_DICT_THAI_TIME.keys()), engine="newmm"
)
_THAI_TIME_AFFIX = [
"โมงเช้า",
"บ่ายโมง",
"โมงเย็น",
"โมง",
"นาฬิกา",
"ทุ่ม",
"ตี",
"เที่ยงคืน",
"เที่ยงวัน",
"เที่ยง",
]
def _format_6h(h: int) -> str:
"""Thai time (6-hour clock)."""
text = ""
if h == 0:
text += "เที่ยงคืน"
elif h < 7:
text += "ตี" + num_to_thaiword(h)
elif h < 12:
text += num_to_thaiword(h - 6) + "โมงเช้า"
elif h == 12:
text += "เที่ยง"
elif h < 18:
if h == 13:
text += "บ่ายโมง"
else:
text += "บ่าย" + num_to_thaiword(h - 12) + "โมง"
elif h == 18:
text += "หกโมงเย็น"
else:
text += num_to_thaiword(h - 18) + "ทุ่ม"
return text
def _format_m6h(h: int) -> str:
"""Thai time (modified 6-hour clock)."""
text = ""
if h == 0:
text += "เที่ยงคืน"
elif h < 6:
text += "ตี" + num_to_thaiword(h)
elif h < 12:
text += num_to_thaiword(h) + "โมง"
elif h == 12:
text += "เที่ยง"
elif h < 19:
text += num_to_thaiword(h - 12) + "โมง"
else:
text += num_to_thaiword(h - 18) + "ทุ่ม"
return text
def _format_24h(h: int) -> str:
"""Thai time (24-hour clock)."""
text = num_to_thaiword(h) + "นาฬิกา"
return text
def _format(
h: int,
m: int,
s: int,
fmt: str = "24h",
precision: Union[str, None] = None,
) -> str:
text = ""
if fmt == "6h":
text = _format_6h(h)
elif fmt == "m6h":
text = _format_m6h(h)
elif fmt == "24h":
text = _format_24h(h)
else:
raise NotImplementedError(f"Time format not supported: {fmt}")
if precision == "m" or precision == "s":
if (
m == 30
and (s == 0 or precision == "m")
and (fmt == "6h" or fmt == "m6h")
):
text += "ครึ่ง"
else:
text += num_to_thaiword(m) + "นาที"
if precision == "s":
text += num_to_thaiword(s) + "วินาที"
else:
if m:
if m == 30 and s == 0 and (fmt == "6h" or fmt == "m6h"):
text += "ครึ่ง"
else:
text += num_to_thaiword(m) + "นาที"
if s:
text += num_to_thaiword(s) + "วินาที"
return text
def time_to_thaiword(
time_data: Union[time, datetime, str],
fmt: str = "24h",
precision: Union[str, None] = None,
) -> str:
"""
Spell out time to Thai words.
:param str time_data: time input, can be a datetime.time object \
or a datetime.datetime object \
or a string (in H:M or H:M:S format, using 24-hour clock)
:param str fmt: time output format
* *24h* - 24-hour clock (default)
* *6h* - 6-hour clock
* *m6h* - Modified 6-hour clock
:param str precision: precision of the spell out
* *m* - always spell out to minute level
* *s* - always spell out to second level
* None - spell out only non-zero parts
:return: Time spell out in Thai words
:rtype: str
:Example:
::
time_to_thaiword("8:17")
# output:
# แปดนาฬิกาสิบเจ็ดนาที
time_to_thaiword("8:17", "6h")
# output:
# สองโมงเช้าสิบเจ็ดนาที
time_to_thaiword("8:17", "m6h")
# output:
# แปดโมงสิบเจ็ดนาที
time_to_thaiword("18:30", fmt="m6h")
# output:
# หกโมงครึ่ง
time_to_thaiword(datetime.time(12, 3, 0))
# output:
# สิบสองนาฬิกาสามนาที
time_to_thaiword(datetime.time(12, 3, 0), precision="s")
# output:
# สิบสองนาฬิกาสามนาทีศูนย์วินาที
"""
_time = None
if isinstance(time_data, time) or isinstance(time_data, datetime):
_time = time_data
else:
if not isinstance(time_data, str):
raise TypeError(
"Time data must be a datetime.time object, "
"a datetime.datetime object, or a string."
)
if not time_data:
raise ValueError("Time string cannot be empty.")
try:
_time = datetime.strptime(time_data, _TIME_FORMAT_WITH_SEC)
except ValueError:
try:
_time = datetime.strptime(time_data, _TIME_FORMAT_WITHOUT_SEC)
except ValueError:
pass
if not _time:
raise ValueError(
f"Time string '{time_data}' does not match H:M or H:M:S format."
)
text = _format(_time.hour, _time.minute, _time.second, fmt, precision)
return text
def thaiword_to_time(text: str, padding: bool = True) -> str:
"""
Convert Thai time in words into time (H:M).
:param str text: Thai time in words
:param bool padding: Zero padding the hour if True
:return: time string
:rtype: str
:Example:
::
thaiword_to_time"บ่ายโมงครึ่ง")
# output:
# 13:30
"""
keys_dict = list(_DICT_THAI_TIME.keys())
text = text.replace("กว่า", "").replace("ๆ", "").replace(" ", "")
_i = ["ตีหนึ่ง", "ตีสอง", "ตีสาม", "ตีสี่", "ตีห้า"]
_time = ""
for affix in _THAI_TIME_AFFIX:
if affix in text and affix != "ตี":
_time = text.replace(affix, affix + "|")
break
elif affix in text and affix == "ตี":
for j in _i:
if j in text:
_time = text.replace(j, j + "|")
break
else:
pass
if "|" not in _time:
raise ValueError("Cannot find any Thai word for time affix.")
_LIST_THAI_TIME = _time.split("|")
del _time
hour = _THAI_TIME_CUT.word_tokenize(_LIST_THAI_TIME[0])
minute = _LIST_THAI_TIME[1]
if len(minute) > 1:
minute = _THAI_TIME_CUT.word_tokenize(minute)
else:
minute = 0
text = ""
# determine hour
if hour[-1] == "นาฬิกา" and hour[0] in keys_dict and hour[:-1]:
text += str(thaiword_to_num("".join(hour[:-1])))
elif hour[0] == "ตี" and hour[1] in keys_dict:
text += str(_DICT_THAI_TIME[hour[1]])
elif hour[-1] == "โมงเช้า" and hour[0] in keys_dict:
if _DICT_THAI_TIME[hour[0]] < 6:
text += str(_DICT_THAI_TIME[hour[0]] + 6)
else:
text += str(_DICT_THAI_TIME[hour[0]])
elif (hour[-1] == "โมงเย็น" or hour[-1] == "โมง") and hour[0] == "บ่าย":
text += str(_DICT_THAI_TIME[hour[1]] + 12)
elif (hour[-1] == "โมงเย็น" or hour[-1] == "โมง") and hour[0] in keys_dict:
text += str(_DICT_THAI_TIME[hour[0]] + 12)
elif hour[-1] == "เที่ยงคืน":
text += "0"
elif hour[-1] == "เที่ยงวัน" or hour[-1] == "เที่ยง":
text += "12"
elif hour[0] == "บ่ายโมง":
text += "13"
elif hour[-1] == "ทุ่ม":
if len(hour) == 1:
text += "19"
else:
text += str(_DICT_THAI_TIME[hour[0]] + 18)
if not text:
raise ValueError("Cannot find any Thai word for hour.")
if padding and len(text) == 1:
text = "0" + text
text += ":"
# determine minute
if minute:
n = 0
for affix in minute:
if affix in keys_dict:
if affix != "สิบ":
n += _DICT_THAI_TIME[affix]
elif affix == "สิบ" and n != 0:
n *= 10
elif affix == "สิบ" and n == 0:
n += 10
if n != 0 and n > 9:
text += str(n)
else:
text += "0" + str(n)
else:
text += "00"
return text
| 9,120 | 25.591837 | 80 | py |
pythainlp-dev/pythainlp/util/trie.py | pythainlp-dev/pythainlp/util/trie.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Trie data structure.
Designed to use for tokenizer's dictionary, but can be for other purposes.
"""
from typing import Iterable, List, Union
class Trie:
class Node(object):
__slots__ = "end", "children"
def __init__(self):
self.end = False
self.children = {}
def __init__(self, words: Iterable[str]):
self.words = set(words)
self.root = Trie.Node()
for word in words:
self.add(word)
def add(self, word: str) -> None:
"""
Add a word to the trie.
Spaces in front of and following the word will be removed.
:param str text: a word
"""
word = word.strip()
self.words.add(word)
cur = self.root
for ch in word:
child = cur.children.get(ch)
if not child:
child = Trie.Node()
cur.children[ch] = child
cur = child
cur.end = True
def remove(self, word: str) -> None:
"""
Remove a word from the trie.
If the word is not found, do nothing.
:param str text: a word
"""
# remove from set first
if word not in self.words:
return
self.words.remove(word)
# then remove from nodes
parent = self.root
data = [] # track path to leaf
for ch in word:
child = parent.children[ch]
data.append((parent, child, ch))
parent = child
# remove the last one
child.end = False
# prune up the tree
for parent, child, ch in reversed(data):
if child.end or child.children:
break
del parent.children[ch] # remove from parent dict
def prefixes(self, text: str) -> List[str]:
"""
List all possible words from first sequence of characters in a word.
:param str text: a word
:return: a list of possible words
:rtype: List[str]
"""
res = []
cur = self.root
for i, ch in enumerate(text):
node = cur.children.get(ch)
if not node:
break
if node.end:
res.append(text[: i + 1])
cur = node
return res
def __contains__(self, key: str) -> bool:
return key in self.words
def __iter__(self) -> Iterable[str]:
yield from self.words
def __len__(self) -> int:
return len(self.words)
def dict_trie(dict_source: Union[str, Iterable[str], Trie]) -> Trie:
"""
Create a dictionary trie from a file or an iterable.
:param str|Iterable[str]|pythainlp.util.Trie dict_source: a path to
dictionary file or a list of words or a pythainlp.util.Trie object
:return: a trie object
:rtype: pythainlp.util.Trie
"""
trie = None
if isinstance(dict_source, str) and len(dict_source) > 0:
# dict_source is a path to dictionary text file
with open(dict_source, "r", encoding="utf8") as f:
_vocabs = f.read().splitlines()
trie = Trie(_vocabs)
elif isinstance(dict_source, Iterable) and not isinstance(
dict_source, str
):
# Note: Since Trie and str are both Iterable,
# so the Iterable check should be here, at the very end,
# because it has less specificality
trie = Trie(dict_source)
else:
raise TypeError(
"Type of dict_source must be pythainlp.util.Trie, "
"or Iterable[str], or non-empty str (path to source file)"
)
return trie
| 4,230 | 29.007092 | 76 | py |
pythainlp-dev/pythainlp/util/wordtonum.py | pythainlp-dev/pythainlp/util/wordtonum.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert number in words to a computable number value
First version of the code adapted from Korakot Chaovavanich's notebook
https://colab.research.google.com/drive/148WNIeclf0kOU6QxKd6pcfwpSs8l-VKD#scrollTo=EuVDd0nNuI8Q
"""
import re
from typing import List
from pythainlp.tokenize import Tokenizer
from pythainlp.corpus import thai_words
_ptn_digits = r"(|หนึ่ง|เอ็ด|สอง|ยี่|สาม|สี่|ห้า|หก|เจ็ด|แปด|เก้า)"
_ptn_six_figures = (
rf"({_ptn_digits}แสน)?({_ptn_digits}หมื่น)?({_ptn_digits}พัน)?"
rf"({_ptn_digits}ร้อย)?({_ptn_digits}สิบ)?{_ptn_digits}?"
)
_ptn_thai_numerals = rf"(ลบ)?({_ptn_six_figures}ล้าน)*{_ptn_six_figures}"
_re_thai_numerals = re.compile(_ptn_thai_numerals)
_digits = {
# "ศูนย์" was excluded as a special case
"หนึ่ง": 1,
"เอ็ด": 1,
"สอง": 2,
"ยี่": 2,
"สาม": 3,
"สี่": 4,
"ห้า": 5,
"หก": 6,
"เจ็ด": 7,
"แปด": 8,
"เก้า": 9,
}
_powers_of_10 = {
"สิบ": 10,
"ร้อย": 100,
"พัน": 1000,
"หมื่น": 10000,
"แสน": 100000,
# "ล้าน" was excluded as a special case
}
_valid_tokens = (
set(_digits.keys()) | set(_powers_of_10.keys()) | {"ล้าน", "ลบ"}
)
_tokenizer = Tokenizer(custom_dict=_valid_tokens)
def _check_is_thainum(word: str):
for j in list(_digits.keys()):
if j in word:
return (True, "num")
for j in ["สิบ", "ร้อย", "พัน", "หมื่น", "แสน", "ล้าน", "จุด", "ลบ"]:
if j in word:
return (True, "unit")
return (False, None)
_dict_words = [i for i in list(thai_words()) if not _check_is_thainum(i)[0]]
_dict_words += list(_digits.keys())
_dict_words += ["สิบ", "ร้อย", "พัน", "หมื่น", "แสน", "ล้าน", "จุด"]
_tokenizer_thaiwords = Tokenizer(_dict_words)
def thaiword_to_num(word: str) -> int:
"""
Converts the spelled-out numerals in Thai scripts into an actual integer.
:param str word: Spelled-out numerals in Thai scripts
:return: Corresponding integer value of the input
:rtype: int
:Example:
::
from pythainlp.util import thaiword_to_num
thaiword_to_num("ศูนย์")
# output: 0
thaiword_to_num("สองล้านสามแสนหกร้อยสิบสอง")
# output: 2300612
"""
if not isinstance(word, str):
raise TypeError(f"The input must be a string; given {word!r}")
if not word:
raise ValueError("The input string cannot be empty")
if word == "ศูนย์":
return 0
if not _re_thai_numerals.fullmatch(word):
raise ValueError("The input string is not a valid Thai numeral")
tokens = _tokenizer.word_tokenize(word)
accumulated = 0
next_digit = 1
is_minus = False
if tokens[0] == "ลบ":
is_minus = True
tokens.pop(0)
for token in tokens:
if token in _digits:
next_digit = _digits[token]
elif token in _powers_of_10:
# Absent digit assumed 1 before all powers of 10 (except million)
accumulated += max(next_digit, 1) * _powers_of_10[token]
next_digit = 0
else: # token == "ล้าน"
# Absent digit assumed 0 before word million
accumulated = (accumulated + next_digit) * 1000000
next_digit = 0
# Cleaning up trailing digit
accumulated += next_digit
if is_minus:
accumulated = -accumulated
return accumulated
def _decimal_unit(words: list) -> float:
_num = 0.0
for i, v in enumerate(words):
_num += int(thaiword_to_num(v)) / (10 ** (i + 1))
return _num
def words_to_num(words: list) -> float:
"""
Thai Words to float
:param str text: Thai words
:return: float of words
:rtype: float
:Example:
::
from pythainlp.util import words_to_num
words_to_num(["ห้า", "สิบ", "จุด", "เก้า", "ห้า"])
# output: 50.95
"""
num = 0
if "จุด" not in words:
num = thaiword_to_num("".join(words))
else:
words_int = "".join(words[: words.index("จุด")])
words_float = words[words.index("จุด") + 1 :]
num = thaiword_to_num(words_int)
if num <= -1:
num -= _decimal_unit(words_float)
else:
num += _decimal_unit(words_float)
return num
def text_to_num(text: str) -> List[str]:
"""
Thai text to list thai word with floating point number
:param str text: Thai text with the spelled-out numerals
:return: list of thai words with float value of the input
:rtype: List[str]
:Example:
::
from pythainlp.util import text_to_num
text_to_num("เก้าร้อยแปดสิบจุดเก้าห้าบาทนี่คือจำนวนทั้งหมด")
# output: ['980.95', 'บาท', 'นี่', 'คือ', 'จำนวน', 'ทั้งหมด']
text_to_num("สิบล้านสองหมื่นหนึ่งพันแปดร้อยแปดสิบเก้าบาท")
# output: ['10021889', 'บาท']
"""
_temp = _tokenizer_thaiwords.word_tokenize(text)
thainum = []
last_index = -1
list_word_new = []
for i, word in enumerate(_temp):
if (
_check_is_thainum(word)[0]
and last_index + 1 == i
and i + 1 == len(_temp)
):
thainum.append(word)
list_word_new.append(str(words_to_num(thainum)))
elif _check_is_thainum(word)[0] and last_index + 1 == i:
thainum.append(word)
last_index = i
elif _check_is_thainum(word)[0]:
thainum.append(word)
last_index = i
elif (
not _check_is_thainum(word)[0]
and last_index + 1 == i
and last_index != -1
):
list_word_new.append(str(words_to_num(thainum)))
thainum = []
list_word_new.append(word)
else:
list_word_new.append(word)
last_index = -1
return list_word_new
| 6,378 | 26.97807 | 95 | py |
pythainlp-dev/pythainlp/wangchanberta/__init__.py | pythainlp-dev/pythainlp/wangchanberta/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"ThaiNameTagger",
"segment",
"NamedEntityRecognition",
]
from pythainlp.wangchanberta.core import ThaiNameTagger, segment, NamedEntityRecognition
| 785 | 34.727273 | 88 | py |
pythainlp-dev/pythainlp/wangchanberta/core.py | pythainlp-dev/pythainlp/wangchanberta/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Union
import re
from transformers import (
CamembertTokenizer,
pipeline,
)
import warnings
from pythainlp.tokenize import word_tokenize
_model_name = "wangchanberta-base-att-spm-uncased"
_tokenizer = CamembertTokenizer.from_pretrained(
f"airesearch/{_model_name}", revision="main"
)
if _model_name == "wangchanberta-base-att-spm-uncased":
_tokenizer.additional_special_tokens = ["<s>NOTUSED", "</s>NOTUSED", "<_>"]
class ThaiNameTagger:
def __init__(
self, dataset_name: str = "thainer", grouped_entities: bool = True
):
"""
This function tags named-entitiy from text in IOB format.
Powered by wangchanberta from VISTEC-depa\
AI Research Institute of Thailand
:param str dataset_name:
* *thainer* - ThaiNER dataset
:param bool grouped_entities: grouped entities
"""
self.dataset_name = dataset_name
self.grouped_entities = grouped_entities
self.classify_tokens = pipeline(
task="ner",
tokenizer=_tokenizer,
model=f"airesearch/{_model_name}",
revision=f"finetuned@{self.dataset_name}-ner",
ignore_labels=[],
grouped_entities=self.grouped_entities,
)
def _IOB(self, tag):
if tag != "O":
return "B-" + tag
return "O"
def _clear_tag(self, tag):
return tag.replace("B-", "").replace("I-", "")
def get_ner(
self, text: str, pos: bool= False,tag: bool = False
) -> Union[List[Tuple[str, str]], str]:
"""
This function tags named-entitiy from text in IOB format.
Powered by wangchanberta from VISTEC-depa\
AI Research Institute of Thailand
:param str text: text in Thai to be tagged
:param bool tag: output like html tag.
:return: a list of tuple associated with tokenized word group, NER tag, \
and output like html tag (if the parameter `tag` is \
specified as `True`). \
Otherwise, return a list of tuple associated with tokenized \
word and NER tag
:rtype: Union[list[tuple[str, str]]], str
"""
if pos:
warnings.warn("This model doesn't support output postag and It doesn't output the postag.")
text = re.sub(" ", "<_>", text)
self.json_ner = self.classify_tokens(text)
self.output = ""
if self.grouped_entities and self.dataset_name == "thainer":
self.sent_ner = [
(
i["word"].replace("<_>", " ").replace("▁", ""),
self._IOB(i["entity_group"]),
)
for i in self.json_ner
]
elif self.dataset_name == "thainer":
self.sent_ner = [
(i["word"].replace("<_>", " ").replace("▁", ""), i["entity"])
for i in self.json_ner
if i["word"] != "▁"
]
else:
self.sent_ner = [
(
i["word"].replace("<_>", " ").replace("▁", ""),
i["entity"].replace("_", "-").replace("E-", "I-"),
)
for i in self.json_ner
]
if self.sent_ner[0][0] == "" and len(self.sent_ner) > 1:
self.sent_ner = self.sent_ner[1:]
for idx, (word, ner) in enumerate(self.sent_ner):
if idx > 0 and ner.startswith("B-"):
if self._clear_tag(ner) == self._clear_tag(
self.sent_ner[idx - 1][1]
):
self.sent_ner[idx] = (word, ner.replace("B-", "I-"))
if tag:
temp = ""
sent = ""
for idx, (word, ner) in enumerate(self.sent_ner):
if ner.startswith("B-") and temp != "":
sent += "</" + temp + ">"
temp = ner[2:]
sent += "<" + temp + ">"
elif ner.startswith("B-"):
temp = ner[2:]
sent += "<" + temp + ">"
elif ner == "O" and temp != "":
sent += "</" + temp + ">"
temp = ""
sent += word
if idx == len(self.sent_ner) - 1 and temp != "":
sent += "</" + temp + ">"
return sent
else:
return self.sent_ner
class NamedEntityRecognition:
def __init__(self, model: str ="pythainlp/thainer-corpus-v2-base-model") -> None:
"""
This function tags named-entitiy from text in IOB format.
Powered by wangchanberta from VISTEC-depa\
AI Research Institute of Thailand
:param str model: The model that use wangchanberta pretrained.
"""
from transformers import AutoTokenizer
from transformers import AutoModelForTokenClassification
self.tokenizer = AutoTokenizer.from_pretrained(model)
self.model = AutoModelForTokenClassification.from_pretrained(model)
def _fix_span_error(self, words, ner):
_ner = []
_ner=ner
_new_tag=[]
for i,j in zip(words,_ner):
i=self.tokenizer.decode(i)
if i.isspace() and j.startswith("B-"):
j="O"
if i=='' or i=='<s>' or i=='</s>':
continue
if i=="<_>":
i=" "
_new_tag.append((i,j))
return _new_tag
def get_ner(
self, text: str, pos: bool= False,tag: bool = False
) -> Union[List[Tuple[str, str]], str]:
"""
This function tags named-entitiy from text in IOB format.
Powered by wangchanberta from VISTEC-depa\
AI Research Institute of Thailand
:param str text: text in Thai to be tagged
:param bool tag: output like html tag.
:return: a list of tuple associated with tokenized word group, NER tag, \
and output like html tag (if the parameter `tag` is \
specified as `True`). \
Otherwise, return a list of tuple associated with tokenized \
word and NER tag
:rtype: Union[list[tuple[str, str]]], str
"""
import torch
if pos:
warnings.warn("This model doesn't support output postag and It doesn't output the postag.")
words_token = word_tokenize(text.replace(" ", "<_>"))
inputs=self.tokenizer(words_token,is_split_into_words=True,return_tensors="pt")
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
# forward pass
outputs = self.model(ids, attention_mask=mask)
logits = outputs[0]
predictions = torch.argmax(logits, dim=2)
predicted_token_class = [self.model.config.id2label[t.item()] for t in predictions[0]]
ner_tag=self._fix_span_error(inputs['input_ids'][0],predicted_token_class)
if tag:
temp = ""
sent = ""
for idx, (word, ner) in enumerate(ner_tag):
if ner.startswith("B-") and temp != "":
sent += "</" + temp + ">"
temp = ner[2:]
sent += "<" + temp + ">"
elif ner.startswith("B-"):
temp = ner[2:]
sent += "<" + temp + ">"
elif ner == "O" and temp != "":
sent += "</" + temp + ">"
temp = ""
sent += word
if idx == len(ner_tag) - 1 and temp != "":
sent += "</" + temp + ">"
return sent
return ner_tag
def segment(text: str) -> List[str]:
"""
Subword tokenize. SentencePiece from wangchanberta model.
:param str text: text to be tokenized
:return: list of subwords
:rtype: list[str]
"""
if not text or not isinstance(text, str):
return []
return _tokenizer.tokenize(text)
| 8,654 | 36.145923 | 103 | py |
pythainlp-dev/pythainlp/word_vector/__init__.py | pythainlp-dev/pythainlp/word_vector/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
thai2fit - Thai word vector.
Initial code from https://github.com/cstorm125/thai2fit
"""
__all__ = [
"WordVector",
]
from pythainlp.word_vector.core import (
WordVector,
)
| 800 | 28.666667 | 74 | py |
pythainlp-dev/pythainlp/word_vector/core.py | pythainlp-dev/pythainlp/word_vector/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
from gensim.models import KeyedVectors
from gensim.models.keyedvectors import Word2VecKeyedVectors
from numpy import ndarray, zeros
from pythainlp.corpus import get_corpus_path
from pythainlp.tokenize import THAI2FIT_TOKENIZER, word_tokenize
WV_DIM = 300 # word vector dimension
_MODEL_NAME = "thai2fit_wv"
_TK_SP = "xxspace"
_TK_EOL = "xxeol"
class WordVector:
"""
Word Vector class
:param str model_name: model name
**Options for model_name**
* *thai2fit_wv* (default) - word vector from thai2fit
* *ltw2v* - word vector from LTW2V: The Large Thai Word2Vec v0.1
* *ltw2v_v1.0_15_window* - word vector from LTW2V v1.0 and 15 window
* *ltw2v_v1.0_5_window* - word vector from LTW2V v1.0 and 5 window
"""
def __init__(self, model_name: str = "thai2fit_wv") -> None:
"""
Word Vector class
:param str model_name: model name
**Options for model_name**
* *thai2fit_wv* (default) - word vector from thai2fit
* *ltw2v* - word vector from LTW2V: The Large Thai Word2Vec
* *ltw2v_v1.0_15_window* - word2vec from LTW2V 1.0 and 15 window
* *ltw2v_v1.0_5_window* - word2vec from LTW2V v1.0 and 5 window
"""
self.load_wordvector(model_name)
def load_wordvector(self, model_name: str):
"""
Load word vector model.
:param str model_name: model name
"""
self.model_name = model_name
self.model = KeyedVectors.load_word2vec_format(
get_corpus_path(self.model_name),
binary=True,
unicode_errors="ignore",
)
self.WV_DIM = self.model.vector_size
if self.model_name == "thai2fit_wv":
self.tokenize = THAI2FIT_TOKENIZER.word_tokenize
else:
self.tokenize = word_tokenize
def get_model(self) -> Word2VecKeyedVectors:
"""
Get word vector model.
:return: `gensim` word2vec model
:rtype: gensim.models.keyedvectors.Word2VecKeyedVectors
"""
return self.model
def doesnt_match(self, words: List[str]) -> str:
"""
This function returns one word that is mostly unrelated to other words
in the list. We use the function :func:`doesnt_match`
from :mod:`gensim`.
:param list words: a list of words
:raises KeyError: if there is any word in `positive` or `negative`
not in the vocabulary of the model.
:return: the word that mostly unrelated
:rtype: strt
:Note:
* If a word in `words` is not in the vocabulary, :class:`KeyError`
will be raised.
:Example:
Pick the word "พริกไทย" (name of food) out of the list of meals
("อาหารเช้า", "อาหารเที่ยง", "อาหารเย็น").
>>> from pythainlp.word_vector import WordVector
>>>
>>> wv = WordVector()
>>> words = ['อาหารเช้า', 'อาหารเที่ยง', 'อาหารเย็น', 'พริกไทย']
>>> wv.doesnt_match(words)
พริกไทย
Pick the word "เรือ" (name of vehicle) out of the list of words
related to occupation ("ดีไซน์เนอร์", "พนักงานเงินเดือน", "หมอ").
>>> from pythainlp.word_vector import WordVector
>>>
>>> wv = WordVector()
>>> words = ['ดีไซน์เนอร์', 'พนักงานเงินเดือน', 'หมอ', 'เรือ']
>>> wv.doesnt_match(words)
เรือ
"""
return self.model.doesnt_match(words)
def most_similar_cosmul(
self, positive: List[str], negative: List[str]
) -> List[Tuple[str, float]]:
"""
This function find the top-10 words that are most similar with respect
to from two lists of words labeled as positive and negative.
The top-10 most similar words are obtained using multiplication
combination objective from Omer Levy and Yoav Goldberg
[OmerLevy_YoavGoldberg_2014]_.
We use the function :func:`gensim.most_similar_cosmul` directly from
:mod:`gensim`.
:param list positive: a list of words to add
:param list negative: a list of words to substract
:raises KeyError: if there is any word in `positive` or `negative`
not in the vocabulary of the model.
:return: list of top-10 most similar words and its similarity score
:rtype: list[tuple[str,float]]
:Note:
* With a single word in the positive list, it will find the
most similar words to the word given (similar
to :func:`gensim.most_similar`)
* If a word in `positive` or `negative` is not in the vocabulary,
:class:`KeyError` will be raised.
:Example:
Find the **top-10** most similar words to the word: "แม่น้ำ".
>>> from pythainlp.word_vector import WordVector
>>>
>>> wv = WordVector()
>>> list_positive = ['แม่น้ำ']
>>> list_negative = []
>>> wv.most_similar_cosmul(list_positive, list_negative)
[('ลำน้ำ', 0.8206598162651062), ('ทะเลสาบ', 0.775945782661438),
('ลุ่มน้ำ', 0.7490593194961548), ('คลอง', 0.7471904754638672),
('ปากแม่น้ำ', 0.7354257106781006), ('ฝั่งแม่น้ำ', 0.7120099067687988),
('ทะเล', 0.7030453681945801), ('ริมแม่น้ำ', 0.7015200257301331),
('แหล่งน้ำ', 0.6997432112693787), ('ภูเขา', 0.6960948705673218)]
Find the **top-10** most similar words to the words: "นายก",
"รัฐมนตรี", and "ประเทศ".
>>> from pythainlp.word_vector import WordVector
>>>
>>> wv = WordVector()
>>> list_positive = ['นายก', 'รัฐมนตรี', 'ประเทศ']
>>> list_negative = []
>>> wv.most_similar_cosmul(list_positive, list_negative)
[('รองนายกรัฐมนตรี', 0.2730445861816406),
('เอกอัครราชทูต', 0.26500266790390015),
('นายกรัฐมนตรี', 0.2649088203907013),
('ผู้ว่าราชการจังหวัด', 0.25119125843048096),
('ผู้ว่าการ', 0.2510434687137604), ('เลขาธิการ', 0.24824175238609314),
('ผู้ว่า', 0.2453523576259613), ('ประธานกรรมการ', 0.24147476255893707),
('รองประธาน', 0.24123257398605347), ('สมาชิกวุฒิสภา',
0.2405330240726471)]
Find the **top-10** most similar words when having **only** positive
list and **both** positive and negative lists.
>>> from pythainlp.word_vector import WordVector
>>>
>>> wv = WordVector()
>>> list_positive = ['ประเทศ', 'ไทย', 'จีน', 'ญี่ปุ่น']
>>> list_negative = []
>>> wv.most_similar_cosmul(list_positive, list_negative)
[('ประเทศจีน', 0.22022421658039093), ('เกาหลี', 0.2196873426437378),
('สหรัฐอเมริกา', 0.21660110354423523),
('ประเทศญี่ปุ่น', 0.21205860376358032),
('ประเทศไทย', 0.21159221231937408), ('เกาหลีใต้',
0.20321202278137207),
('อังกฤษ', 0.19610872864723206), ('ฮ่องกง', 0.1928885132074356),
('ฝรั่งเศส', 0.18383873999118805), ('พม่า', 0.18369348347187042)]
>>>
>>> list_positive = ['ประเทศ', 'ไทย', 'จีน', 'ญี่ปุ่น']
>>> list_negative = ['อเมริกา']
>>> wv.most_similar_cosmul(list_positive, list_negative)
[('ประเทศไทย', 0.3278159201145172), ('เกาหลี', 0.3201899230480194),
('ประเทศจีน', 0.31755179166793823), ('พม่า', 0.30845439434051514),
('ประเทศญี่ปุ่น', 0.306713730096817),
('เกาหลีใต้', 0.3003999888896942),
('ลาว', 0.2995176911354065), ('คนไทย', 0.2885020673274994),
('เวียดนาม', 0.2878379821777344), ('ชาวไทย', 0.28480708599090576)]
The function return :class:`KeyError` when the term "เมนูอาหารไทย"
is not in the vocabulary.
>>> from pythainlp.word_vector import WordVector
>>>
>>> wv = WordVector()
>>> list_positive = ['เมนูอาหารไทย']
>>> list_negative = []
>>> wv.most_similar_cosmul(list_positive, list_negative)
KeyError: "word 'เมนูอาหารไทย' not in vocabulary"
"""
return self.model.most_similar_cosmul(
positive=positive, negative=negative
)
def similarity(self, word1: str, word2: str) -> float:
"""
This function computae cosine similarity between two words.
:param str word1: first word to be compared
:param str word2: second word to be compared
:raises KeyError: if either `word1` or `word2` is not in the
vocabulary of the model.
:return: the cosine similarity between the two word vectors
:rtype: float
:Note:
* If a word in `word1` or `word2` is not in the vocabulary,
:class:`KeyError` will be raised.
:Example:
Compute consine similarity between two words: "รถไฟ" and "รถไฟฟ้า"
(train and electric train).
>>> from pythainlp.word_vector import WordVector
>>> wv = WordVector()
>>> wv.similarity('รถไฟ', 'รถไฟฟ้า')
0.43387136
Compute consine similarity between two words: "เสือดาว" and "รถไฟฟ้า"
(leopard and electric train).
>>> from pythainlp.word_vector import WordVector
>>>
>>> wv = WordVector()
>>> wv.similarity('เสือดาว', 'รถไฟฟ้า')
0.04300258
"""
return self.model.similarity(word1, word2)
def sentence_vectorizer(self, text: str, use_mean: bool = True) -> ndarray:
"""
This function convert a Thai sentence into vector.
Specifically, it first tokenize that text and map each tokenized words
with the word vectors from the model.
Then, word vectors are aggregatesd into one vector of 300 dimension
by calulating either mean, or summation of all word vectors.
:param str text: text input
:param bool use_mean: if `True` aggregate word vectors with mean of all
word vectors. Otherwise, aggregate with
summation of all word vectors
:return: 300-dimension vector representing the given sentence
in form of :mod:`numpy` array
:rtype: :class:`numpy.ndarray((1,300))`
:Example:
Vectorize the sentence, "อ้วนเสี้ยวเข้ายึดแคว้นกิจิ๋ว ในปี พ.ศ. 735",
into one sentence vector with two aggregation meanthods: mean
and summation.
>>> from pythainlp.word_vector import WordVector
>>>
>>> wv = WordVector()
>>> sentence = 'อ้วนเสี้ยวเข้ายึดแคว้นกิจิ๋ว ในปี พ.ศ. 735'
>>> wv.sentence_vectorizer(sentence, use_mean=True)
array([[-0.00421414, -0.08881307, 0.05081136, -0.05632929,
-0.06607185, 0.03059357, -0.113882 , -0.00074836, 0.05035743,
0.02914307,
...
0.02893357, 0.11327957, 0.04562086, -0.05015393, 0.11641257,
0.32304936, -0.05054322, 0.03639471, -0.06531371, 0.05048079]])
>>>
>>> wv.sentence_vectorizer(sentence, use_mean=False)
array([[-0.05899798, -1.24338295, 0.711359 , -0.78861002,
-0.92500597, 0.42831 , -1.59434797, -0.01047703, 0.705004
, 0.40800299,
...
0.40506999, 1.58591403, 0.63869202, -0.702155 , 1.62977601,
4.52269109, -0.70760502, 0.50952601, -0.914392 , 0.70673105]])
"""
vec = zeros((1, self.WV_DIM))
words = self.tokenize(text)
len_words = len(words)
if not len_words:
return vec
for word in words:
if word == " " and self.model_name == "thai2fit_wv":
word = _TK_SP
elif word == "\n" and self.model_name == "thai2fit_wv":
word = _TK_EOL
if word in self.model.index_to_key:
vec += self.model.get_vector(word)
if use_mean:
vec /= len_words
return vec
| 12,580 | 36.894578 | 79 | py |
pythainlp-dev/pythainlp/wsd/__init__.py | pythainlp-dev/pythainlp/wsd/__init__.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thai Word Sense Disambiguation (WSD)
"""
__all__ = ["get_sense"]
from pythainlp.wsd.core import get_sense | 723 | 37.105263 | 74 | py |
pythainlp-dev/pythainlp/wsd/core.py | pythainlp-dev/pythainlp/wsd/core.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
from typing import List, Tuple, Union
from pythainlp.corpus import thai_words
from pythainlp.tokenize import Tokenizer
from pythainlp.util.trie import Trie, dict_trie
from pythainlp.corpus import get_corpus_path, thai_wsd_dict
_wsd_dict = thai_wsd_dict()
_mean_all = {}
for i,j in zip(_wsd_dict["word"], _wsd_dict["meaning"]):
_mean_all[i]=j
_all_word = set(list(_mean_all.keys()))
_TRIE = Trie(list(_all_word))
_word_cut = Tokenizer(custom_dict=_TRIE)
class _SentenceTransformersModel:
def __init__(self, model:str="sentence-transformers/paraphrase-multilingual-mpnet-base-v2", device:str="cpu"):
from sentence_transformers import SentenceTransformer
self.device = device
self.model_name = model
self.model = SentenceTransformer(self.model_name, device=self.device)
def change_device(self, device: str):
from sentence_transformers import SentenceTransformer
self.device = device
self.model = SentenceTransformer(self.model_name, device=self.device)
def get_score(self, sentences1: str,sentences2: str)->float:
from sentence_transformers import util
embedding_1= self.model.encode(sentences1, convert_to_tensor=True)
embedding_2 = self.model.encode(sentences2, convert_to_tensor=True)
return 1-util.pytorch_cos_sim(embedding_1, embedding_2)[0][0].item()
_MODEL = None
def get_sense(
sentence: str,
word: str,
device:str="cpu",
custom_dict: Union[dict,None]=None,
custom_tokenizer: Tokenizer=_word_cut,
) -> Union[List[Tuple[str, float]], None]:
"""
Get word sense from the sentence.
This function will get definition and distance from context in sentence.
:param str sentence: Thai sentence
:param str word: Thai word
:param str device: device for running model.
:param dict custom_dict: Thai dictionary {"word":["definition",..]}
:param Tokenizer custom_tokenizer: Tokenizer for tokenize words from sentence.
:return: list of definition and distance or None (If word is not in the dictionary)
:rtype: Union[List[Tuple[str, float]], None]
We get the ideas from `Context-Aware Semantic Similarity Measurement for Unsupervised \
Word Sense Disambiguation <https://arxiv.org/abs/2305.03520>`_ to build get_sense function.
For Thai dictionary, We use Thai dictionary from wiktionary.
See more `thai_dict <https://pythainlp.github.io/pythainlp-corpus/thai_dict.html>`_.
For the model, We use Sentence Transformers model from \
sentence-transformers/paraphrase-multilingual-mpnet-base-v2 for \
Unsupervised Word Sense Disambiguation.
:Example:
::
from pythainlp.wsd import get_sense
print(get_sense("เขากำลังอบขนมคุกกี้","คุกกี้"))
# output:
# [('โปรแกรมคอมพิวเตอร์ใช้ในทางอินเทอร์เน็ตสำหรับเก็บข้อมูลของผู้ใช้งาน',
# 0.0974416732788086),
# ('ชื่อขนมชนิดหนึ่งจำพวกขนมเค้ก แต่ทำเป็นชิ้นเล็ก ๆ แบน ๆ แล้วอบให้กรอบ',
# 0.09319090843200684)]
print(get_sense("เว็บนี้ต้องการคุกกี้ในการทำงาน","คุกกี้"))
# output:
# [('โปรแกรมคอมพิวเตอร์ใช้ในทางอินเทอร์เน็ตสำหรับเก็บข้อมูลของผู้ใช้งาน',
# 0.1005704402923584),
# ('ชื่อขนมชนิดหนึ่งจำพวกขนมเค้ก แต่ทำเป็นชิ้นเล็ก ๆ แบน ๆ แล้วอบให้กรอบ',
# 0.12473666667938232)]
"""
global _MODEL
if custom_dict == None:
custom_dict = _mean_all
_w = custom_tokenizer.word_tokenize(sentence)
if word not in set(custom_dict.keys()) or word not in sentence:
return None
if _MODEL == None:
_MODEL = _SentenceTransformersModel(device=device)
if _MODEL.device!=device:
_MODEL.change_device(device=device)
_temp_mean = custom_dict[word]
_temp =[]
for i in _temp_mean:
_temp_2 = []
for j in _w:
if j == word:
j = word+f" ({word} ความหมาย '"+i.replace('(',"").replace(')',"")+"') "
_temp_2.append(j)
_temp.append((i,_MODEL.get_score(sentence,''.join(_temp_2))))
return _temp | 4,693 | 39.119658 | 114 | py |
pythainlp-dev/tests/__init__.py | pythainlp-dev/tests/__init__.py | # -*- coding: utf-8 -*-
"""
Unit test.
Each file in tests/ is for each main package.
"""
import sys
import unittest
sys.path.append("../pythainlp")
loader = unittest.TestLoader()
testSuite = loader.discover("tests")
testRunner = unittest.TextTestRunner(verbosity=1)
testRunner.run(testSuite)
| 295 | 17.5 | 49 | py |
pythainlp-dev/tests/test_augment.py | pythainlp-dev/tests/test_augment.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.augment import WordNetAug
from pythainlp.augment.wordnet import postype2wordnet
from pythainlp.augment.lm import Thai2transformersAug
from pythainlp.augment.word2vec.bpemb_wv import BPEmbAug
from pythainlp.augment.word2vec import (
Thai2fitAug,
LTW2VAug
)
import nltk
class TestTextaugmentPackage(unittest.TestCase):
def setUp(self):
self.text = "เรารักคุณมากที่สุดในโลก"
self.text2 = "เราอยู่ที่มหาวิทยาลัยขอนแก่น"
def test_WordNetAug(self):
nltk.download('omw-1.4', force=True) # load wordnet
wordnetaug = WordNetAug()
self.assertIsNotNone(wordnetaug.augment(self.text))
self.assertIsNotNone(wordnetaug.find_synonyms("ผม", pos=None))
self.assertIsNotNone(wordnetaug.augment(self.text, postag=False))
self.assertIsNone(postype2wordnet('n', 'abc'))
self.assertIsNotNone(postype2wordnet('NOUN', 'orchid'))
def test_Thai2fitAug(self):
_aug = Thai2fitAug()
self.assertIsNotNone(_aug.tokenizer(self.text))
self.assertIsNotNone(_aug.augment(self.text, n_sent=3, p=0.5))
def test_BPEmbAug(self):
_aug = BPEmbAug()
self.assertIsNotNone(_aug.tokenizer(self.text))
self.assertIsNotNone(_aug.augment(self.text, n_sent=3, p=0.5))
def test_LTW2VAug(self):
_aug = LTW2VAug()
self.assertIsNotNone(_aug.tokenizer(self.text))
self.assertIsNotNone(_aug.augment(self.text, n_sent=3, p=0.5))
def test_Thai2transformersAug(self):
_aug = Thai2transformersAug()
self.assertIsNotNone(_aug.augment(self.text2, num_replace_tokens=1))
| 1,665 | 34.446809 | 76 | py |
pythainlp-dev/tests/test_benchmarks.py | pythainlp-dev/tests/test_benchmarks.py | import unittest
import numpy as np
import yaml
from pythainlp.benchmarks import word_tokenization
with open("./tests/data/sentences.yml", "r", encoding="utf8") as stream:
TEST_DATA = yaml.safe_load(stream)
class TestBenchmarksPackage(unittest.TestCase):
def test_preprocessing(self):
self.assertIsNotNone(
word_tokenization.preprocessing(
txt="ทดสอบ การ ทำ ความสะอาด ข้อมูล<tag>ok</tag>"
)
)
def test_benchmark_not_none(self):
self.assertIsNotNone(
word_tokenization.benchmark(
["วัน", "จัน", "ทร์", "สี", "เหลือง"],
["วัน", "จันทร์", "สี", "เหลือง"],
)
)
def test_binary_representation(self):
sentence = "อากาศ|ร้อน|มาก|ครับ"
rept = word_tokenization._binary_representation(sentence)
self.assertEqual(
[1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0], rept.tolist()
)
def test_compute_stats(self):
for pair in TEST_DATA["sentences"]:
exp, act = pair["expected"], pair["actual"]
result = word_tokenization.compute_stats(
word_tokenization.preprocessing(exp),
word_tokenization.preprocessing(act),
)
self.assertIsNotNone(result)
def test_benchmark(self):
expected = []
actual = []
for pair in TEST_DATA["sentences"]:
expected.append(pair["expected"])
actual.append(pair["actual"])
df = word_tokenization.benchmark(expected, actual)
self.assertIsNotNone(df)
def test_count_correctly_tokenised_words(self):
for d in TEST_DATA["binary_sentences"]:
sample = np.array(list(d["actual"])).astype(int)
ref_sample = np.array(list(d["expected"])).astype(int)
sb = list(word_tokenization._find_word_boudaries(sample))
rb = list(word_tokenization._find_word_boudaries(ref_sample))
# in binary [{0, 1}, ...]
correctly_tokenized_words = word_tokenization._find_words_correctly_tokenised(
rb, sb
)
self.assertEqual(
np.sum(correctly_tokenized_words), d["expected_count"]
)
def test_words_correctly_tokenised(self):
r = [(0, 2), (2, 10), (10, 12)]
s = [(0, 10), (10, 12)]
expected = "01"
labels = word_tokenization._find_words_correctly_tokenised(r, s)
self.assertEqual(expected, "".join(np.array(labels).astype(str)))
def test_flatten_result(self):
result = dict(key1=dict(v1=6), key2=dict(v2=7))
actual = word_tokenization._flatten_result(result)
self.assertEqual(actual, {"key1:v1": 6, "key2:v2": 7})
| 2,781 | 30.613636 | 90 | py |
pythainlp-dev/tests/test_cli.py | pythainlp-dev/tests/test_cli.py | # -*- coding: utf-8 -*-
import unittest
from argparse import ArgumentError
from types import ModuleType
from pythainlp import __main__, cli
class TestMainPackage(unittest.TestCase):
def test_cli_main(self):
# call with no argument, should exit with 2
with self.assertRaises(SystemExit) as ex:
__main__.main()
self.assertEqual(ex.exception.code, 2)
with self.assertRaises((ArgumentError, SystemExit)):
self.assertIsNone(__main__.main(["thainlp"]))
with self.assertRaises((ArgumentError, SystemExit)):
self.assertIsNone(
__main__.main(["thainlp", "NOT_EXIST", "command"])
)
self.assertIsNone(__main__.main(["thainlp", "data", "path"]))
def test_cli_benchmark(self):
self.assertIsInstance(getattr(cli, "benchmark"), ModuleType)
with self.assertRaises(SystemExit) as ex:
cli.data.App(["thainlp", "benchmark"])
self.assertEqual(ex.exception.code, 2)
self.assertIsNotNone(
cli.benchmark.App(
[
"thainlp",
"benchmark",
"word-tokenization",
"--input-file",
"./tests/data/input.txt",
"--test-file",
"./tests/data/test.txt",
"--save-details"
]
)
)
def test_cli_data(self):
self.assertIsInstance(getattr(cli, "data"), ModuleType)
with self.assertRaises(SystemExit) as ex:
cli.data.App(["thainlp", "data"])
self.assertEqual(ex.exception.code, 2)
self.assertIsNotNone(cli.data.App(["thainlp", "data", "catalog"]))
self.assertIsNotNone(cli.data.App(["thainlp", "data", "path"]))
self.assertIsNotNone(cli.data.App(["thainlp", "data", "get", "test"]))
self.assertIsNotNone(cli.data.App(["thainlp", "data", "info", "test"]))
self.assertIsNotNone(cli.data.App(["thainlp", "data", "rm", "test"]))
self.assertIsNotNone(
cli.data.App(["thainlp", "data", "get", "NOT_EXIST"])
)
self.assertIsNotNone(
cli.data.App(["thainlp", "data", "info", "NOT_EXIST"])
)
self.assertIsNotNone(
cli.data.App(["thainlp", "data", "rm", "NOT_EXIST"])
)
def test_cli_soundex(self):
self.assertIsInstance(getattr(cli, "soundex"), ModuleType)
with self.assertRaises(SystemExit) as ex:
cli.data.App(["thainlp", "soundex"])
self.assertEqual(ex.exception.code, 2)
self.assertIsNotNone(cli.soundex.App(["thainlp", "soundex", "ทดสอบ"]))
def test_cli_tag(self):
self.assertIsInstance(getattr(cli, "tag"), ModuleType)
with self.assertRaises(SystemExit) as ex:
cli.data.App(["thainlp", "tag"])
self.assertEqual(ex.exception.code, 2)
self.assertIsNotNone(
cli.tag.App(
[
"thainlp",
"tag",
"pos",
"-s",
" ",
"มอเตอร์ไซค์ ความว่างเปล่า",
]
)
)
self.assertIsNotNone(
cli.tag.App(
[
"thainlp",
"tag",
"role",
"-s",
" ",
"มอเตอร์ไซค์ ความว่างเปล่า",
]
)
)
def test_cli_tokenize(self):
self.assertIsInstance(getattr(cli, "tokenize"), ModuleType)
with self.assertRaises(SystemExit) as ex:
cli.data.App(["thainlp", "tokenize"])
self.assertEqual(ex.exception.code, 2)
self.assertIsNotNone(
cli.tokenize.App(
["thainlp", "tokenize", "NOT_EXIST", "ไม่มีอยู่ จริง"]
)
)
self.assertIsNotNone(
cli.tokenize.App(
[
"thainlp",
"tokenize",
"subword",
"-s",
"|",
"ถ้าฉันยิงกระต่ายได้ ฉันก็ยิงฟาสซิสต์ได้",
]
)
)
self.assertIsNotNone(
cli.tokenize.App(
[
"thainlp",
"tokenize",
"syllable",
"-s",
"|",
"-w",
"ถ้าฉันยิงกระต่ายได้ ฉันก็ยิงฟาสซิสต์ได้",
]
)
)
self.assertIsNotNone(
cli.tokenize.App(
[
"thainlp",
"tokenize",
"word",
"-nw",
"-a",
"newmm",
"-s",
"|",
"ถ้าฉันยิงกระต่ายได้ ฉันก็ยิงฟาสซิสต์ได้",
]
)
)
self.assertIsNotNone(
cli.tokenize.App(
[
"thainlp",
"tokenize",
"sent",
"-s",
"|",
(
"ถ้าฉันยิงกระต่ายได้ ฉันก็ยิงฟาสซิสต์ได้"
"กระสุนสำหรับสมองของคุณวันนี้"
"แต่คุณก็จะลืมมันไปทั้งหมดอีกครั้ง"
),
]
)
)
| 5,525 | 29.7 | 79 | py |
pythainlp-dev/tests/test_cls.py | pythainlp-dev/tests/test_cls.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.cls import GzipModel
class TestClsPackage(unittest.TestCase):
def test_GzipModel(self):
training_data = [
("รายละเอียดตามนี้เลยค่าา ^^", "Neutral"),
("กลัวพวกมึงหาย อดกินบาบิก้อน", "Neutral"),
("บริการแย่มากก เป็นหมอได้ไง😤", "Negative"),
("ขับรถแย่มาก", "Negative"),
("ดีนะครับ", "Positive"),
("ลองแล้วรสนี้อร่อย... ชอบๆ", "Positive"),
("ฉันรู้สึกโกรธ เวลามือถือแบตหมด", "Negative"),
("เธอภูมิใจที่ได้ทำสิ่งดี ๆ และดีใจกับเด็ก ๆ", "Positive"),
("นี่เป็นบทความหนึ่ง", "Neutral")
]
model = GzipModel(training_data)
self.assertEqual(model.predict("ฉันดีใจ", k=1), "Positive")
| 757 | 35.095238 | 69 | py |
pythainlp-dev/tests/test_coref.py | pythainlp-dev/tests/test_coref.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.coref import coreference_resolution
class TestParsePackage(unittest.TestCase):
def test_coreference_resolution(self):
pass
# self.assertIsNotNone(
# coreference_resolution(
# "Bill Gates ได้รับวัคซีน COVID-19 เข็มแรกแล้ว ระบุ ผมรู้สึกสบายมาก"
# )
# ) | 376 | 25.928571 | 85 | py |
pythainlp-dev/tests/test_corpus.py | pythainlp-dev/tests/test_corpus.py | # -*- coding: utf-8 -*-
import unittest
from nltk.corpus import wordnet as wn
from pythainlp.corpus import (
conceptnet,
countries,
download,
get_corpus_db,
get_corpus_db_detail,
get_corpus_default_db,
get_corpus_path,
oscar,
provinces,
remove,
thai_family_names,
thai_female_names,
thai_male_names,
thai_negations,
thai_stopwords,
thai_syllables,
thai_words,
tnc,
ttc,
wordnet,
)
from pythainlp.corpus.util import revise_newmm_default_wordset
from requests import Response
import nltk
import os
class TestCorpusPackage(unittest.TestCase):
def test_conceptnet(self):
self.assertIsNotNone(conceptnet.edges("รัก"))
def test_corpus(self):
self.assertIsInstance(thai_negations(), frozenset)
self.assertIsInstance(thai_stopwords(), frozenset)
self.assertIsInstance(thai_syllables(), frozenset)
self.assertIsInstance(thai_words(), frozenset)
self.assertIsInstance(countries(), frozenset)
self.assertIsInstance(provinces(), frozenset)
self.assertIsInstance(provinces(details=True), list)
self.assertEqual(
len(provinces(details=False)), len(provinces(details=True))
)
self.assertIsInstance(thai_family_names(), frozenset)
self.assertIsInstance(list(thai_family_names())[0], str)
self.assertIsInstance(thai_female_names(), frozenset)
self.assertIsInstance(thai_male_names(), frozenset)
self.assertIsInstance(
get_corpus_db("https://example.com/XXXXXX0lkjasd/SXfmskdjKKXXX"),
Response,
) # URL does not exist, should get 404 response
self.assertIsNone(get_corpus_db("XXXlkja3sfdXX")) # Invalid URL
self.assertEqual(
get_corpus_db_detail("XXXmx3KSXX"), {}
) # corpus does not exist
self.assertEqual(
get_corpus_db_detail("XXXmx3KSXX", version="0.2"), {}
) # corpus does not exist
self.assertTrue(download("test")) # download the first time
self.assertTrue(download(name="test", force=True)) # force download
self.assertTrue(download(name="test")) # try download existing
self.assertFalse(
download(name="test", url="wrongurl")
) # URL not exist
self.assertFalse(
download(name="XxxXXxxx817d37sf")
) # corpus name not exist
self.assertIsNotNone(get_corpus_db_detail("test")) # corpus exists
self.assertIsNotNone(get_corpus_path("test")) # corpus exists
self.assertIsNone(get_corpus_default_db("test"))
self.assertIsNotNone(get_corpus_default_db("thainer", "1.5.1"))
self.assertIsNotNone(get_corpus_default_db("thainer"))
self.assertIsNone(get_corpus_default_db("thainer", "1.2"))
self.assertTrue(remove("test")) # remove existing
self.assertFalse(remove("test")) # remove non-existing
self.assertIsNone(get_corpus_path("XXXkdjfBzc")) # query non-existing
self.assertFalse(download(name="test", version="0.0"))
self.assertFalse(download(name="test", version="0.0.0"))
self.assertFalse(download(name="test", version="0.0.1"))
self.assertFalse(download(name="test", version="0.0.2"))
self.assertFalse(download(name="test", version="0.0.3"))
self.assertFalse(download(name="test", version="0.0.4"))
self.assertIsNotNone(download(name="test", version="0.0.5"))
self.assertTrue(download("test"))
self.assertIsNotNone(remove("test")) # remove existing
self.assertIsNotNone(download(name="test", version="0.0.6"))
self.assertIsNotNone(download(name="test", version="0.0.7"))
self.assertIsNotNone(download(name="test", version="0.0.8"))
self.assertIsNotNone(download(name="test", version="0.0.9"))
self.assertIsNotNone(download(name="test", version="0.0.10"))
with self.assertRaises(Exception) as context:
# Force re-downloading since the corpus already exists
self.assertIsNotNone(download(
name="test", version="0.0.11", force=True
))
self.assertTrue(
"Hash does not match expected."
in
str(context.exception)
)
self.assertIsNotNone(download(name="test", version="0.1"))
self.assertIsNotNone(remove("test"))
def test_oscar(self):
self.assertIsNotNone(oscar.word_freqs())
self.assertIsNotNone(oscar.unigram_word_freqs())
def test_tnc(self):
self.assertIsNotNone(tnc.word_freqs())
self.assertIsNotNone(tnc.unigram_word_freqs())
self.assertIsNotNone(tnc.bigram_word_freqs())
self.assertIsNotNone(tnc.trigram_word_freqs())
def test_ttc(self):
self.assertIsNotNone(ttc.word_freqs())
self.assertIsNotNone(ttc.unigram_word_freqs())
def test_wordnet(self):
nltk.download('omw-1.4', force=True) # load wordnet
self.assertIsNotNone(wordnet.langs())
self.assertIn("tha", wordnet.langs())
self.assertEqual(
wordnet.synset("spy.n.01").lemma_names("tha"), ["สปาย", "สายลับ"]
)
self.assertIsNotNone(wordnet.synsets("นก"))
self.assertIsNotNone(wordnet.all_synsets(pos=wn.ADJ))
self.assertIsNotNone(wordnet.lemmas("นก"))
self.assertIsNotNone(wordnet.all_lemma_names(pos=wn.ADV))
self.assertIsNotNone(wordnet.lemma("cat.n.01.cat"))
self.assertEqual(wordnet.morphy("dogs"), "dog")
bird = wordnet.synset("bird.n.01")
mouse = wordnet.synset("mouse.n.01")
self.assertEqual(
wordnet.path_similarity(bird, mouse), bird.path_similarity(mouse)
)
self.assertEqual(
wordnet.wup_similarity(bird, mouse), bird.wup_similarity(mouse)
)
self.assertEqual(
wordnet.lch_similarity(bird, mouse), bird.lch_similarity(mouse)
)
cat_key = wordnet.synsets("แมว")[0].lemmas()[0].key()
self.assertIsNotNone(wordnet.lemma_from_key(cat_key))
def test_revise_wordset(self):
training_data = [
["ถวิล อุดล", " ", "เป็น", "นักการเมือง", "หนึ่ง", "ใน"],
["สี่เสืออีสาน", " ", "ซึ่ง", "ประกอบ", "ด้วย", "ตัว", "นายถวิล"],
["เอง", " ", "นายทองอินทร์ ภูริพัฒน์", " ", "นายเตียง ศิริขันธ์"],
[" ", "และ", "นายจำลอง ดาวเรือง", " ", "และ", "เป็น", "รัฐมนตรี"],
["ที่", "ถูก", "สังหาร", "เมื่อ", "ปี", " ", "พ.ศ.", " ", "2492"],
]
self.assertIsInstance(revise_newmm_default_wordset(training_data), set)
def test_zip(self):
_p = get_corpus_path("test_zip")
self.assertEqual(os.path.isdir(_p), True)
self.assertEqual(remove("test_zip"), True)
| 6,821 | 38.433526 | 79 | py |
pythainlp-dev/tests/test_generate.py | pythainlp-dev/tests/test_generate.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.generate import Unigram, Bigram, Trigram
from pythainlp.generate.thai2fit import gen_sentence
class TestGeneratePackage(unittest.TestCase):
def test_unigram(self):
_tnc_unigram = Unigram("tnc")
self.assertIsNotNone(_tnc_unigram.gen_sentence("ผม"))
self.assertIsNotNone(_tnc_unigram.gen_sentence("ผม", output_str=False))
self.assertIsNotNone(_tnc_unigram.gen_sentence())
self.assertIsNotNone(_tnc_unigram.gen_sentence(duplicate=True))
_ttc_unigram = Unigram("ttc")
self.assertIsNotNone(_ttc_unigram.gen_sentence("ผม"))
self.assertIsNotNone(_ttc_unigram.gen_sentence("ผม", output_str=False))
self.assertIsNotNone(_ttc_unigram.gen_sentence())
self.assertIsNotNone(_ttc_unigram.gen_sentence(duplicate=True))
_oscar_unigram = Unigram("oscar")
self.assertIsNotNone(_oscar_unigram.gen_sentence("ผม"))
self.assertIsNotNone(
_oscar_unigram.gen_sentence("ผม", output_str=False)
)
self.assertIsNotNone(_oscar_unigram.gen_sentence())
self.assertIsNotNone(_oscar_unigram.gen_sentence(duplicate=True))
def test_bigram(self):
_bigram = Bigram()
self.assertIsNotNone(_bigram.gen_sentence("ผม"))
self.assertIsNotNone(_bigram.gen_sentence("ผม", output_str=False))
self.assertIsNotNone(_bigram.gen_sentence())
self.assertIsNotNone(_bigram.gen_sentence(duplicate=True))
def test_trigram(self):
_trigram = Trigram()
self.assertIsNotNone(_trigram.gen_sentence("ผม"))
self.assertIsNotNone(_trigram.gen_sentence("ผม", output_str=False))
self.assertIsNotNone(_trigram.gen_sentence())
self.assertIsNotNone(_trigram.gen_sentence(duplicate=True))
def test_thai2fit(self):
self.assertIsNotNone(gen_sentence("กาลครั้งหนึ่งนานมาแล้ว"))
self.assertIsNotNone(gen_sentence("กาลครั้งหนึ่งนานมาแล้ว", output_str=False))
self.assertIsNotNone(gen_sentence())
| 2,043 | 42.489362 | 86 | py |
pythainlp-dev/tests/test_khavee.py | pythainlp-dev/tests/test_khavee.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.khavee import KhaveeVerifier
kv = KhaveeVerifier()
class TestKhaveePackage(unittest.TestCase):
def test_check_sara(self):
self.assertEqual(kv.check_sara('เริง'), 'เออ')
def test_check_marttra(self):
self.assertEqual(kv.check_marttra('สาว'), 'เกอว')
def test_is_sumpus(self):
self.assertTrue(kv.is_sumpus('สรร','อัน'))
self.assertFalse(kv.is_sumpus('สรร','แมว'))
def test_check_klon(self):
self.assertEqual(
kv.check_klon('''ฉันชื่อหมูกรอบ ฉันชอบกินไก่ แล้วก็วิ่งไล่ หมาชื่อนํ้าทอง ลคคนเก่ง เอ๋งเอ๋งคะนอง มีคนจับจอง เขาชื่อน้องเธียร''',k_type=4),
'The poem is correct according to the principle.'
)
self.assertEqual(
kv.check_klon('''ฉันชื่อหมูกรอบ ฉันชอบกินไก่ แล้วก็วิ่งไล่ หมาชื่อนํ้าทอง ลคคนเก่ง เอ๋งเอ๋งเสียงหมา มีคนจับจอง เขาชื่อน้องเธียร''',k_type=4),
["Cant find rhyme between paragraphs ('หมา', 'จอง')in paragraph 2", "Cant find rhyme between paragraphs ('หมา', 'ทอง')in paragraph 2"]
)
def test_check_aek_too(self):
self.assertEqual(kv.check_aek_too('ไกด์'), False)
self.assertEqual(kv.check_aek_too('ไก่'), 'aek')
self.assertEqual(kv.check_aek_too('ไก้'), 'too')
self.assert_(kv.check_aek_too(['หนม', 'หน่ม', 'หน้ม']), [False, 'aek', 'too'])
| 1,383 | 38.542857 | 153 | py |
pythainlp-dev/tests/test_misspell.py | pythainlp-dev/tests/test_misspell.py | # -*- coding: utf-8 -*-
import unittest
import numpy as np
from pythainlp.tools.misspell import misspell
def _count_difference(st1, st2):
# this assumes len(st1) == len(st2)
count = 0
for i in range(len(st1)):
if st1[i] != st2[i]:
count += 1
return count
class TestTextMisspellPackage(unittest.TestCase):
def setUp(self):
self.texts = [
"เรารักคุณมากที่สุดในโลก",
"เราอยู่ที่มหาวิทยาลัยขอนแก่น"
]
def test_misspell_naive(self):
for text in self.texts:
misspelled = misspell(text, ratio=0.1)
self.assertEqual(len(text), len(misspelled))
diff = _count_difference(text, misspelled)
self.assertGreater(diff, 0, "we have some misspells.")
def test_misspell_with_ratio_0_percent(self):
for text in self.texts:
misspelled = misspell(text, ratio=0.0)
self.assertEqual(len(text), len(misspelled))
diff = _count_difference(text, misspelled)
self.assertEqual(
diff, 0,
"we shouldn't have any misspell with ratio=0."
)
def test_misspell_with_ratio_50_percent(self):
for text in self.texts:
misspelled = misspell(text, ratio=0.5)
self.assertEqual(len(text), len(misspelled))
diff = _count_difference(text, misspelled)
self.assertLessEqual(
np.abs(diff - 0.5 * len(text)),
2,
f"expect 0.5*len(text)±2 misspells with ratio=0.5. (Δ={diff})",
)
def test_misspell_with_ratio_100_percent(self):
for text in self.texts:
misspelled = misspell(text, ratio=1)
self.assertEqual(len(text), len(misspelled))
diff = _count_difference(text, misspelled)
self.assertLessEqual(
np.abs(diff - len(text)),
2,
f"expect len(text)-2 misspells with ratio=1.5. (Δ={diff})",
)
| 2,046 | 25.934211 | 79 | py |
pythainlp-dev/tests/test_parse.py | pythainlp-dev/tests/test_parse.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.parse import dependency_parsing
class TestParsePackage(unittest.TestCase):
def test_dependency_parsing(self):
self.assertIsNotNone(dependency_parsing("ผมเป็นคนดี", engine="esupar"))
self.assertIsNotNone(dependency_parsing("ผมเป็นคนดี", engine="esupar", tag="list"))
self.assertIsNotNone(dependency_parsing("ผมเป็นคนดี", engine="transformers_ud"))
self.assertIsNotNone(dependency_parsing("ผมเป็นคนดี", engine="transformers_ud", tag="list"))
self.assertIsNotNone(dependency_parsing("ผมเป็นคนดี", engine="spacy_thai"))
self.assertIsNotNone(dependency_parsing("ผมเป็นคนดี", engine="spacy_thai", tag="list"))
self.assertIsNotNone(dependency_parsing("ผมเป็นคนดี", engine="ud_goeswith"))
self.assertIsNotNone(dependency_parsing("ผมเป็นคนดี", engine="ud_goeswith", tag="list"))
| 896 | 51.764706 | 100 | py |
pythainlp-dev/tests/test_soundex.py | pythainlp-dev/tests/test_soundex.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.soundex import lk82, metasound, soundex, udom83, prayut_and_somchaip
from pythainlp.soundex.sound import word_approximation, audio_vector
class TestSoundexPackage(unittest.TestCase):
def test_soundex(self):
self.assertIsNotNone(soundex("a", engine="lk82"))
self.assertIsNotNone(soundex("a", engine="udom83"))
self.assertIsNotNone(soundex("a", engine="metasound"))
self.assertEqual(
soundex("vp", engine="prayut_and_somchaip"),
soundex("วีพี", engine="prayut_and_somchaip")
)
self.assertIsNotNone(soundex("a", engine="XXX"))
self.assertEqual(lk82(None), "")
self.assertEqual(lk82(""), "")
self.assertEqual(lk82("เหตุ"), lk82("เหด"))
self.assertEqual(lk82("รถ"), "ร3000")
self.assertIsNotNone(lk82("เกาะ"))
self.assertIsNotNone(lk82("อุยกูร์"))
self.assertIsNotNone(lk82("หยากไย่"))
self.assertIsNotNone(lk82("หอ"))
self.assertIsNotNone(lk82("อยู่"))
self.assertIsNotNone(lk82("อู่"))
self.assertIsNotNone(lk82("อย่าง"))
self.assertIsNotNone(lk82("เหย้า"))
self.assertIsNotNone(lk82("หยุด"))
self.assertIsNotNone(lk82("หืออือ"))
self.assertEqual(lk82("น์"), "")
self.assertEqual(udom83(None), "")
self.assertEqual(udom83(""), "")
self.assertEqual(udom83("เหตุ"), udom83("เหด"))
self.assertEqual(udom83("รถ"), "ร800000")
self.assertEqual(udom83("น์"), "")
self.assertEqual(metasound(None), "")
self.assertEqual(metasound(""), "")
self.assertEqual(metasound("เหตุ"), metasound("เหด"))
self.assertEqual(metasound("รักษ์"), metasound("รัก"))
self.assertEqual(metasound("บูรณะ"), "บ550")
self.assertEqual(metasound("คน"), "ค500")
self.assertEqual(metasound("คนA"), "ค500")
self.assertEqual(metasound("ดา"), "ด000")
self.assertIsNotNone(metasound("จะ"))
self.assertIsNotNone(metasound("ปา"))
self.assertIsNotNone(metasound("งง"))
self.assertIsNotNone(metasound("ลา"))
self.assertIsNotNone(metasound("มา"))
self.assertIsNotNone(metasound("ยา"))
self.assertIsNotNone(metasound("วา"))
self.assertIsNotNone(metasound("บูชา"))
self.assertIsNotNone(metasound("กมลา"))
self.assertIsNotNone(metasound("กาโวกาโว"))
self.assertIsNotNone(metasound("สุวรรณา"))
self.assertIsNotNone(metasound("ดอยบอย"))
self.assertEqual(prayut_and_somchaip(None), "")
self.assertEqual(prayut_and_somchaip(""), "")
self.assertEqual(prayut_and_somchaip("vp"), "11")
self.assertIsNotNone(prayut_and_somchaip("บา"))
self.assertIsNotNone(prayut_and_somchaip("go"))
self.assertIsNotNone(prayut_and_somchaip("อด"))
self.assertIsNotNone(prayut_and_somchaip("ลน"))
self.assertIsNotNone(prayut_and_somchaip("มอ"))
self.assertIsNotNone(prayut_and_somchaip("รอ"))
self.assertIsNotNone(prayut_and_somchaip("ขอ"))
self.assertIsNotNone(prayut_and_somchaip("บน"))
self.assertIsNotNone(prayut_and_somchaip("ณาญ"))
self.assertIsNotNone(prayut_and_somchaip("กาง"))
self.assertIsNotNone(prayut_and_somchaip("ว้าว"))
def test_word_approximation(self):
self.assertIsNotNone(word_approximation("รถ", ["รส","รด","คน"]))
def test_audio_vector(self):
self.assertIsNotNone(audio_vector("คน"))
| 3,526 | 41.493976 | 83 | py |
pythainlp-dev/tests/test_spell.py | pythainlp-dev/tests/test_spell.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.spell import (
NorvigSpellChecker,
correct,
spell,
spell_sent,
correct_sent,
symspellpy,
)
class TestSpellPackage(unittest.TestCase):
def test_spell(self):
self.assertEqual(spell(None), [""])
self.assertEqual(spell(""), [""])
result = spell("เน้ร")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เกสมร์")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เน้ร", engine="phunspell")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เกสมร์", engine="phunspell")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เน้ร", engine="symspellpy")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เกสมร์", engine="symspellpy")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เน้ร", engine="tltk")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เดก", engine="tltk")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
def test_word_correct(self):
self.assertEqual(correct(None), "")
self.assertEqual(correct(""), "")
self.assertEqual(correct("1"), "1")
self.assertEqual(correct("05"), "05")
self.assertEqual(correct("56"), "56")
self.assertEqual(correct("1.01"), "1.01")
result = correct("ทดสอง")
self.assertIsInstance(result, str)
self.assertNotEqual(result, "")
result = correct("ทดสอง", engine="phunspell")
self.assertIsInstance(result, str)
self.assertNotEqual(result, "")
result = correct("ทดสอง", engine="symspellpy")
self.assertIsInstance(result, str)
self.assertNotEqual(result, "")
def test_norvig_spell_checker(self):
checker = NorvigSpellChecker(dict_filter=None)
self.assertTrue(len(checker.dictionary()) > 0)
self.assertGreaterEqual(checker.prob("มี"), 0)
user_dict = [
("การงาน", 31), # longer than max_len
("กาม", 1), # fewer than min_freq
("กาล0", 64), # has digit
("๒๔๗๕", 64), # has digit
("hello", 8), # not Thai
("ลบ", -1), # negative count
("การ", 42), # OK
]
checker = NorvigSpellChecker(
custom_dict=user_dict, min_freq=2, max_len=5
)
self.assertEqual(len(checker.dictionary()), 1)
user_dict = [
"เอกราช",
"ปลอดภัย",
"เศรษฐกิจ",
"เสมอภาค",
"เสรีภาพ",
"การศึกษา",
]
checker = NorvigSpellChecker(custom_dict=user_dict)
self.assertEqual(len(checker.dictionary()), len(user_dict))
user_dict = {
"พหลโยธิน": 1,
"ขีตตะสังคะ": 2,
"พนมยงค์": 3,
"ภมรมนตรี": 4,
"มิตรภักดี": 5,
"ลพานุกรม": 6,
"สิงหเสนี": 7,
}
checker = NorvigSpellChecker(custom_dict=user_dict)
# "พหลโยธิน" will be removed,
# as it has frequency less than default min_freq (2)
self.assertEqual(len(checker.dictionary()), len(user_dict) - 1)
user_dict = [24, 6, 2475]
with self.assertRaises(TypeError):
checker = NorvigSpellChecker(custom_dict=user_dict)
def test_spell_sent(self):
self.spell_sent = ["เด็", "อินอร์เน็ต", "แรง"]
self.assertIsNotNone(spell_sent(self.spell_sent))
self.assertIsNotNone(spell_sent(self.spell_sent, engine="pn"))
self.assertIsNotNone(spell_sent(self.spell_sent, engine="phunspell"))
self.assertIsNotNone(spell_sent(self.spell_sent, engine="symspellpy"))
def test_correct_sent(self):
self.spell_sent = ["เด็", "อินอร์เน็ต", "แรง"]
self.assertIsNotNone(correct_sent(self.spell_sent))
self.assertIsNotNone(correct_sent(self.spell_sent, engine="pn"))
self.assertIsNotNone(correct_sent(self.spell_sent, engine="phunspell"))
self.assertIsNotNone(
correct_sent(self.spell_sent, engine="symspellpy")
)
self.assertIsNotNone(
symspellpy.correct_sent(self.spell_sent)
)
| 4,554 | 32.007246 | 79 | py |
pythainlp-dev/tests/test_summarize.py | pythainlp-dev/tests/test_summarize.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.summarize import summarize, extract_keywords
class TestSummarizePackage(unittest.TestCase):
def test_summarize(self):
text = (
"อาหาร หมายถึง ของแข็งหรือของเหลว "
"ที่กินหรือดื่มเข้าสู่ร่างกายแล้ว "
"จะทำให้เกิดพลังงานและความร้อนแก่ร่างกาย "
"ทำให้ร่างกายเจริญเติบโต "
"ซ่อมแซมส่วนที่สึกหรอ ควบคุมการเปลี่ยนแปลงต่างๆ ในร่างกาย "
"ช่วยทำให้อวัยวะต่างๆ ทำงานได้อย่างปกติ "
"อาหารจะต้องไม่มีพิษและไม่เกิดโทษต่อร่างกาย"
)
self.assertEqual(
summarize(text=text, n=1),
["อาหารจะต้องไม่มีพิษและไม่เกิดโทษต่อร่างกาย"],
)
self.assertIsNotNone(summarize(text, engine="mt5-small"))
self.assertIsNotNone(summarize([]))
self.assertIsNotNone(summarize(text, 1, engine="mt5-small"))
self.assertIsNotNone(summarize(text, 1, engine="mt5-cpe-kmutt-thai-sentence-sum"))
self.assertIsNotNone(summarize(text, 1, engine="XX"))
with self.assertRaises(ValueError):
self.assertIsNotNone(summarize(text, 1, engine="mt5-cat"))
def test_keyword_extraction(self):
text = (
"อาหาร หมายถึง ของแข็งหรือของเหลว "
"ที่กินหรือดื่มเข้าสู่ร่างกายแล้ว "
"จะทำให้เกิดพลังงานและความร้อนแก่ร่างกาย "
"ทำให้ร่างกายเจริญเติบโต "
"ซ่อมแซมส่วนที่สึกหรอ ควบคุมการเปลี่ยนแปลงต่างๆ ในร่างกาย "
"ช่วยทำให้อวัยวะต่างๆ ทำงานได้อย่างปกติ "
"อาหารจะต้องไม่มีพิษและไม่เกิดโทษต่อร่างกาย"
)
self.assertEqual(extract_keywords(""), [])
self.assertEqual(extract_keywords(" "), [])
# test default engine, common case
keywords = extract_keywords(text)
expected = ["ซ่อมแซมส่วน", "เจริญเติบโต", "อวัยวะต่างๆ", "ควบคุมการเปลี่ยนแปลง"]
for exp_kw in expected:
self.assertIn(exp_kw, keywords)
# test another engine
for max_kw in (5, 10):
keywords = extract_keywords(text, engine="frequency", max_keywords=max_kw)
self.assertEqual(len(keywords), max_kw)
# test invalid engine
with self.assertRaises(ValueError):
extract_keywords(text, engine="random engine")
# test different tokenizer
keywords = extract_keywords(text, tokenizer="attacut")
expected = ["อวัยวะต่างๆ", "ซ่อมแซมส่วน", "เจริญเติบโต", "เกิดพลังงาน"]
for exp_kw in expected:
self.assertIn(exp_kw, keywords)
# test overriding stop words
stpw = "เจริญเติบโต"
keywords = extract_keywords(text, stop_words=[stpw])
self.assertNotIn(stpw, keywords)
def test_keybert(self):
text = (
"อาหาร หมายถึง ของแข็งหรือของเหลว "
"ที่กินหรือดื่มเข้าสู่ร่างกายแล้ว "
"จะทำให้เกิดพลังงานและความร้อนแก่ร่างกาย "
"ทำให้ร่างกายเจริญเติบโต "
"ซ่อมแซมส่วนที่สึกหรอ ควบคุมการเปลี่ยนแปลงต่างๆ ในร่างกาย "
"ช่วยทำให้อวัยวะต่างๆ ทำงานได้อย่างปกติ "
"อาหารจะต้องไม่มีพิษและไม่เกิดโทษต่อร่างกาย"
)
from pythainlp.summarize.keybert import KeyBERT
from pythainlp.tokenize import word_tokenize
keybert = KeyBERT()
# test ngram range
ng_ranges = [(1, 1), (1, 2), (2, 2), (3, 3)]
for ng_min, ng_max in ng_ranges:
keywords = keybert.extract_keywords(text, keyphrase_ngram_range=(ng_min, ng_max))
for kw in keywords:
self.assertTrue(ng_min <= len(word_tokenize(kw)) <= ng_max)
# test max_keywords
max_kws = 10
keywords = keybert.extract_keywords(text, max_keywords=max_kws)
self.assertLessEqual(len(keywords), max_kws)
text_short = "เฮลโหล"
keywords = keybert.extract_keywords(text_short, max_keywords=max_kws)
self.assertLessEqual(len(keywords), max_kws)
| 3,956 | 37.794118 | 93 | py |
pythainlp-dev/tests/test_tag.py | pythainlp-dev/tests/test_tag.py | # -*- coding: utf-8 -*-
from pythainlp import corpus
import unittest
from os import path
from pythainlp import tag
from pythainlp.tag import (
chunk_parse,
PerceptronTagger,
perceptron,
pos_tag,
pos_tag_sents,
unigram,
tltk,
NER,
NNER,
)
from pythainlp.tag.locations import tag_provinces
from pythainlp.tag.thainer import ThaiNameTagger
class TestTagPackage(unittest.TestCase):
# ### pythainlp.tag.PerceptronTagger
def test_chunk_parse(self):
tokens = ["ผม", "รัก", "คุณ"]
w_p = pos_tag(tokens, engine="perceptron", corpus="orchid")
self.assertIsNotNone(chunk_parse(w_p))
# ### pythainlp.tag.pos_tag
def test_pos_tag(self):
tokens = ["ผม", "รัก", "คุณ"]
self.assertEqual(pos_tag(None), [])
self.assertEqual(pos_tag([]), [])
self.assertEqual(
pos_tag(["นักเรียน", "ถาม", "ครู"]),
[("นักเรียน", "NCMN"), ("ถาม", "VACT"), ("ครู", "NCMN")],
)
self.assertEqual(
len(pos_tag(["การ", "เดินทาง", "มี", "ความ", "ท้าทาย"])), 5
)
self.assertEqual(unigram.tag(None, corpus="pud"), [])
self.assertEqual(unigram.tag([], corpus="pud"), [])
self.assertEqual(unigram.tag(None, corpus="orchid"), [])
self.assertEqual(unigram.tag([], corpus="orchid"), [])
self.assertEqual(unigram.tag(None, corpus="blackboard"), [])
self.assertEqual(unigram.tag([], corpus="blackboard"), [])
self.assertIsNotNone(
pos_tag(tokens, engine="unigram", corpus="orchid")
)
self.assertIsNotNone(
pos_tag(tokens, engine="unigram", corpus="orchid_ud")
)
self.assertIsNotNone(pos_tag(tokens, engine="unigram", corpus="pud"))
self.assertIsNotNone(pos_tag([""], engine="unigram", corpus="pud"))
self.assertIsNotNone(pos_tag(tokens, engine="unigram", corpus="blackboard"))
self.assertIsNotNone(pos_tag([""], engine="unigram", corpus="blackboard"))
self.assertIsNotNone(
pos_tag([""], engine="unigram", corpus="blackboard_ud")
)
self.assertEqual(
pos_tag(["คุณ", "กำลัง", "ประชุม"], engine="unigram"),
[("คุณ", "PPRS"), ("กำลัง", "XVBM"), ("ประชุม", "VACT")],
)
self.assertTrue(
pos_tag(["การ", "รัฐประหาร"], corpus="orchid_ud")[0][1], "NOUN"
)
self.assertTrue(
pos_tag(["ความ", "พอเพียง"], corpus="orchid_ud")[0][1], "NOUN"
)
self.assertEqual(perceptron.tag(None, corpus="orchid"), [])
self.assertEqual(perceptron.tag([], corpus="orchid"), [])
self.assertEqual(perceptron.tag(None, corpus="orchid_ud"), [])
self.assertEqual(perceptron.tag([], corpus="orchid_ud"), [])
self.assertEqual(perceptron.tag(None, corpus="pud"), [])
self.assertEqual(perceptron.tag([], corpus="pud"), [])
self.assertEqual(perceptron.tag(None, corpus="blackboard"), [])
self.assertEqual(perceptron.tag([], corpus="blackboard"), [])
self.assertIsNotNone(
pos_tag(tokens, engine="perceptron", corpus="orchid")
)
self.assertIsNotNone(
pos_tag(tokens, engine="perceptron", corpus="orchid_ud")
)
self.assertIsNotNone(
pos_tag(tokens, engine="perceptron", corpus="pud")
)
self.assertIsNotNone(
pos_tag(tokens, engine="perceptron", corpus="blackboard")
)
self.assertIsNotNone(
pos_tag(tokens, engine="perceptron", corpus="blackboard_ud")
)
self.assertIsNotNone(
pos_tag(tokens, engine="tltk")
)
self.assertEqual(pos_tag_sents(None), [])
self.assertEqual(pos_tag_sents([]), [])
self.assertEqual(
pos_tag_sents([["ผม", "กิน", "ข้าว"], ["แมว", "วิ่ง"]]),
[
[("ผม", "PPRS"), ("กิน", "VACT"), ("ข้าว", "NCMN")],
[("แมว", "NCMN"), ("วิ่ง", "VACT")],
],
)
with self.assertRaises(ValueError):
self.assertIsNotNone(
tltk.pos_tag(tokens, corpus="blackboard")
)
# ### pythainlp.tag.PerceptronTagger
def test_perceptron_tagger(self):
tagger = PerceptronTagger()
# train data, with "กิน" > 20 instances to trigger conditions
# in _make_tagdict()
data = [
[("คน", "N"), ("เดิน", "V")],
[("ฉัน", "N"), ("เดิน", "V")],
[("แมว", "N"), ("เดิน", "V")],
[("คน", "N"), ("วิ่ง", "V")],
[("ปลา", "N"), ("ว่าย", "V")],
[("นก", "N"), ("บิน", "V")],
[("คน", "N"), ("พูด", "V")],
[("C-3PO", "N"), ("พูด", "V")],
[("คน", "N"), ("กิน", "V")],
[("แมว", "N"), ("กิน", "V")],
[("นก", "N"), ("กิน", "V")],
[("นก", "N"), ("นก", "V")],
[("คน", "N"), ("นก", "V")],
[("คน", "N"), ("กิน", "V"), ("นก", "N")],
[("คน", "N"), ("กิน", "V"), ("ปลา", "N")],
[("นก", "N"), ("กิน", "V"), ("ปลา", "N")],
[("คน", "N"), ("กิน", "V"), ("กาแฟ", "N")],
[("คน", "N"), ("คน", "V"), ("กาแฟ", "N")],
[("พระ", "N"), ("ฉัน", "V"), ("กาแฟ", "N")],
[("พระ", "N"), ("คน", "V"), ("กาแฟ", "N")],
[("พระ", "N"), ("ฉัน", "V"), ("ข้าว", "N")],
[("ฉัน", "N"), ("กิน", "V"), ("ข้าว", "N")],
[("เธอ", "N"), ("กิน", "V"), ("ปลา", "N")],
[("ปลา", "N"), ("กิน", "V"), ("แมลง", "N")],
[("แมวน้ำ", "N"), ("กิน", "V"), ("ปลา", "N")],
[("หนู", "N"), ("กิน", "V")],
[("เสือ", "N"), ("กิน", "V")],
[("ยีราฟ", "N"), ("กิน", "V")],
[("แรด", "N"), ("กิน", "V")],
[("หมู", "N"), ("กิน", "V")],
[("แมลง", "N"), ("กิน", "V")],
[("สิงโต", "N"), ("กิน", "V")],
[("เห็บ", "N"), ("กิน", "V")],
[("เหา", "N"), ("กิน", "V")],
[("เต่า", "N"), ("กิน", "V")],
[("กระต่าย", "N"), ("กิน", "V")],
[("จิ้งจก", "N"), ("กิน", "V")],
[("หมี", "N"), ("กิน", "V")],
[("หมา", "N"), ("กิน", "V")],
[("ตะพาบ", "N"), ("กิน", "V")],
[("เม่น", "N"), ("กิน", "V")],
[("หนอน", "N"), ("กิน", "V")],
[("ปี", "N"), ("2021", "N")],
]
filename = "ptagger_temp4XcDf.json"
tagger.train(data, save_loc=filename)
self.assertTrue(path.exists(filename))
words = ["นก", "เดิน"]
word_tags = tagger.tag(words)
self.assertEqual(len(words), len(word_tags))
words2, _ = zip(*word_tags)
self.assertEqual(words, list(words2))
with self.assertRaises(IOError):
tagger.load("ptagger_notexistX4AcOcX.pkl") # file does not exist
# ### pythainlp.tag.locations
def test_ner_locations(self):
self.assertEqual(
tag_provinces(["หนองคาย", "น่าอยู่"]),
[("หนองคาย", "B-LOCATION"), ("น่าอยู่", "O")],
)
# ### pythainlp.tag.named_entity
def test_ner(self):
ner = ThaiNameTagger(version="1.5")
self.assertEqual(ner.get_ner(""), [])
self.assertIsNotNone(ner.get_ner("แมวทำอะไรตอนห้าโมงเช้า"))
self.assertIsNotNone(ner.get_ner("แมวทำอะไรตอนห้าโมงเช้า", pos=False))
self.assertIsNotNone(
ner.get_ner(
"""คณะวิทยาศาสตร์ประยุกต์และวิศวกรรมศาสตร์ มหาวิทยาลัยขอนแก่น
วิทยาเขตหนองคาย 112 หมู่ 7 บ้านหนองเดิ่น ตำบลหนองกอมเกาะ อำเภอเมือง
จังหวัดหนองคาย 43000"""
)
)
self.assertIsNotNone(
ner.get_ner(
"""คณะวิทยาศาสตร์ประยุกต์และวิศวกรรมศาสตร์ มหาวิทยาลัยขอนแก่น
วิทยาเขตหนองคาย 112 หมู่ 7 บ้านหนองเดิ่น ตำบลหนองกอมเกาะ อำเภอเมือง
จังหวัดหนองคาย 43000""",
tag=True,
)
)
# arguement `tag` is True
self.assertIsNotNone(
ner.get_ner("วันที่ 15 ก.ย. 61 ทดสอบระบบเวลา 14:49 น.", tag=True)
)
ner = ThaiNameTagger(version="1.4")
self.assertEqual(ner.get_ner(""), [])
self.assertIsNotNone(ner.get_ner("แมวทำอะไรตอนห้าโมงเช้า"))
self.assertIsNotNone(ner.get_ner("แมวทำอะไรตอนห้าโมงเช้า", pos=False))
self.assertIsNotNone(
ner.get_ner(
"""คณะวิทยาศาสตร์ประยุกต์และวิศวกรรมศาสตร์ มหาวิทยาลัยขอนแก่น
วิทยาเขตหนองคาย 112 หมู่ 7 บ้านหนองเดิ่น
ตำบลหนองกอมเกาะ อำเภอเมือง
จังหวัดหนองคาย 43000"""
)
)
self.assertIsNotNone(
ner.get_ner(
"""คณะวิทยาศาสตร์ประยุกต์และวิศวกรรมศาสตร์ มหาวิทยาลัยขอนแก่น
วิทยาเขตหนองคาย 112 หมู่ 7 บ้านหนองเดิ่น
ตำบลหนองกอมเกาะ อำเภอเมือง
จังหวัดหนองคาย 43000""",
tag=True,
)
)
# arguement `tag` is True
self.assertEqual(
ner.get_ner("วันที่ 15 ก.ย. 61 ทดสอบระบบเวลา 14:49 น.", tag=True),
"วันที่ <DATE>15 ก.ย. 61</DATE> "
"ทดสอบระบบเวลา <TIME>14:49 น.</TIME>",
)
self.assertEqual(
ner.get_ner(
"url = https://thainlp.org/pythainlp/docs/2.0/", tag=True
),
"url = <URL>https://thainlp.org/pythainlp/docs/2.0/</URL>",
)
self.assertEqual(
ner.get_ner("[email protected]", tag=True),
"<EMAIL>[email protected]</EMAIL>",
)
self.assertEqual(
ner.get_ner("รหัสไปรษณีย์ 19130", tag=True),
"รหัสไปรษณีย์ <ZIP>19130</ZIP>",
)
self.assertEqual(
ner.get_ner("อาจารย์เอกพล ประจำคณะวิศวกรรมศาสตร์ ", tag=True),
"<PERSON>อาจารย์เอกพล</PERSON> ประจำ<ORGANIZATION>"
"คณะวิศวกรรมศาสตร์</ORGANIZATION> ",
)
'''self.assertEqual(
ner.get_ner(
"มาตรา 80 ปพพ ให้ใช้อัตราภาษีร้อยละ 10.0"
" ในการคำนวณภาษีมูลค่าเพิ่ม",
tag=True,
),
"<LAW>มาตรา 80 ปพพ</LAW> "
"ให้ใช้อัตราภาษี<PERCENT>ร้อยละ 10.0</PERCENT>"
" ในการคำนวณภาษีมูลค่าเพิ่ม",
)'''
self.assertEqual(
ner.get_ner("ยาว 20 เซนติเมตร", tag=True),
"ยาว <LEN>20 เซนติเมตร</LEN>",
)
self.assertEqual(
ner.get_ner("1 บาท", pos=True, tag=True), "<MONEY>1 บาท</MONEY>"
)
self.assertEqual(
ner.get_ner("ไทย", pos=False, tag=True), "<LOCATION>ไทย</LOCATION>"
)
self.assertIsNotNone(
ner.get_ner("บางแสนกรุงเทพ", pos=False, tag=True)
)
# arguement `tag` is False and `pos` is True
self.assertEqual(
ner.get_ner("ไทย", pos=True, tag=False),
[('ไทย', 'PROPN', 'B-LOCATION')],
)
# arguement `tag` is False and `pos` is False
self.assertIsNotNone(
ner.get_ner(
"วันที่ 15 ก.ย. 61 ทดสอบระบบเวลา 14:49 น.",
pos=False,
tag=False,
)
)
def test_tltk_ner(self):
self.assertEqual(tltk.get_ner(""), [])
self.assertIsNotNone(tltk.get_ner("แมวทำอะไรตอนห้าโมงเช้า"))
self.assertIsNotNone(tltk.get_ner("แมวทำอะไรตอนห้าโมงเช้า", pos=False))
self.assertIsNotNone(
tltk.get_ner(
"พลเอกประยุกธ์ จันทร์โอชา ประกาศในฐานะหัวหน้า"
)
)
self.assertIsNotNone(
tltk.get_ner(
"พลเอกประยุกธ์ จันทร์โอชา ประกาศในฐานะหัวหน้า",
tag=True,
)
)
self.assertIsNotNone(
tltk.get_ner(
"""คณะวิทยาศาสตร์ประยุกต์และวิศวกรรมศาสตร์ มหาวิทยาลัยขอนแก่น
จังหวัดหนองคาย 43000"""
)
)
self.assertIsNotNone(
tltk.get_ner(
"""คณะวิทยาศาสตร์ประยุกต์และวิศวกรรมศาสตร์ มหาวิทยาลัยขอนแก่น
จังหวัดหนองคาย 43000""",
tag=True,
)
)
def test_NER_class(self):
ner = NER(engine="thainer")
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า"))
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า", pos=False))
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า", tag=True))
ner = NER(engine="wangchanberta")
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า"))
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า", pos=False))
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า", tag=True))
ner = NER(engine="thainer-v2")
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า"))
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า", pos=False))
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า", tag=True))
ner = NER(engine="tltk")
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า"))
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า", pos=False))
self.assertIsNotNone(ner.tag("แมวทำอะไรตอนห้าโมงเช้า", tag=True))
with self.assertRaises(ValueError):
NER(engine="thainer", corpus="cat")
def test_NNER_class(self):
nner = NNER()
self.assertIsNotNone(nner.tag("แมวทำอะไรตอนห้าโมงเช้า"))
| 13,447 | 35.643052 | 84 | py |
pythainlp-dev/tests/test_tokenize.py | pythainlp-dev/tests/test_tokenize.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.tokenize import (
DEFAULT_WORD_DICT_TRIE,
Tokenizer,
attacut,
deepcut,
etcc,
longest,
multi_cut,
nercut,
newmm,
pyicu,
sent_tokenize,
ssg,
subword_tokenize,
tcc,
tcc_p,
word_tokenize,
sefr_cut,
tltk,
oskut,
word_detokenize,
paragraph_tokenize,
)
from pythainlp.tokenize import clause_tokenize as sent_clause_tokenize
from pythainlp.util import dict_trie
class TestTokenizePackage(unittest.TestCase):
def setUp(self):
self.text_1 = "หมอนทองตากลมหูว์MBK39 :.ฉฺ๐๐๓-#™±"
self.text_2 = "ทดสอบ"
self.long_text = (
"ไต้หวัน (แป่ะเอ๋ยี้: Tâi-oân; ไต่อวัน) หรือ ไถวาน "
"(อักษรโรมัน: Taiwan; จีนตัวย่อ: 台湾; จีนตัวเต็ม: 臺灣/台灣; พินอิน: "
"Táiwān; ไถวาน) หรือชื่อทางการว่า สาธารณรัฐจีน (จีนตัวย่อ: 中华民国; "
"จีนตัวเต็ม: 中華民國; พินอิน: Zhōnghuá "
"Mínguó) เป็นรัฐในทวีปเอเชียตะวันออก[7][8][9] ปัจจุบันประกอบด้วย"
"เกาะใหญ่ 5 แห่ง คือ จินเหมิน (金門), ไต้หวัน, เผิงหู (澎湖), หมาจู่ "
"(馬祖), และอูชิว (烏坵) กับทั้งเกาะเล็กเกาะน้อยอีกจำนวนหนึ่ง "
'ท้องที่ดังกล่าวเรียกรวมกันว่า "พื้นที่ไต้หวัน" (臺灣地區)\n'
"ไต้หวันด้านตะวันตกติดกับจีนแผ่นดินใหญ่ ด้านตะวันออกและตะวันออก"
"เฉียงเหนือติดกับญี่ปุ่น และด้านใต้ติดกับฟิลิปปินส์ กรุงไทเปเป็น"
"เมืองหลวง ส่วนไทเปใหม่เป็นเขตปกครองที่จัดตั้งขึ้นใหม่ กินพื้นที่"
"กรุงไทเปและเป็นเขตซึ่งประชากรหนาแน่นที่สุดในเวลานี้\n"
"เกาะไต้หวันเดิมเป็นที่อยู่ของชนพื้นเมือง และมีชาวจีนจากแผ่นดิน"
"ใหญ่เข้ามาอาศัยร่วมด้วย จนกระทั่งชาววิลันดาและสเปนเดินทางเข้า"
"มาในยุคสำรวจเมื่อศตวรรษที่ 17 และมาตั้งบ้านเรือนกลายเป็นนิคม"
"ใหญ่โต ต่อมาปี 1662 ราชวงศ์หมิงในแผ่นดินใหญ่ถูกราชวงศ์ชิงแทนที่ "
"เจิ้ง เฉิงกง (鄭成功) ขุนศึกหมิง รวมกำลังหนีมาถึงเกาะไต้หวัน "
"และรุกไล่ฝรั่งออกไปได้อย่างราบคาบ เขาจึงตั้งราชอาณาจักรตงหนิง "
'(東寧) ขึ้นบนเกาะเพื่อ "โค่นชิงฟื้นหมิง" แต่ในปี 1683 ราชวงศ์'
"ชิงปราบปรามอาณาจักรตงหนิงและเข้าครอบครองไต้หวันเป็นผลสำเร็จ "
"ไต้หวันจึงกลายเป็นมณฑลหนึ่งของจีน อย่างไรก็ดี ความบาดหมางระหว่าง"
"จีนกับญี่ปุ่นเป็นเหตุให้ญี่ปุ่นได้ไต้หวันไปในปี 1895\n"
"ก่อนเสียไต้หวันคืนแก่จีนหลังสงครามโลกครั้งที่สอง ช่วงนั้น มีการ"
"เปลี่ยนแปลงการปกครองในจีน พรรคก๊กมินตั๋ง ได้เป็นใหญ่ "
"แต่ไม่นานก็เสียทีให้แก่พรรคคอมมิวนิสต์จีน พรรคก๊กมินตั๋งจึงหนี"
"มายังเกาะไต้หวันและสถาปนาสาธารณรัฐจีนขึ้นบนเกาะแยกต่างหาก "
"ส่วนฝ่ายคอมมิวนิสต์จีนที่เป็นฝ่ายได้รับชัยชนะได้สถาปนาสาธารณรัฐ"
"ประชาชนจีนบนแผ่นดินใหญ่ อย่างไรก็ดี จีนยังคงถือว่า ไต้หวันเป็น"
"มณฑลหนึ่งของตน และไต้หวันเองก็ยังมิได้รับการยอมรับจากนานาชาติ"
"ว่าเป็นประเทศเอกราชมาจนบัดนี้\n"
"ในช่วงทศวรรษ 1980 ถึงต้นทศวรรษ 1990 การเมืองการปกครอง"
"สาธารณรัฐจีน (ไต้หวัน) เจริญรุ่งเรืองจนเป็นประชาธิปไตยที่มีพรรค"
"การเมืองหลายพรรคและมีการเลือกตั้งทั่วหน้า ในช่วงกลางศตวรรษที่ "
"20 เศรษฐกิจไต้หวันงอกงามอย่างรวดเร็ว ไต้หวันจึงกลายเป็นประเทศ"
"พัฒนาแล้ว ได้ชื่อว่าเป็นหนึ่งในสี่เสือแห่งเอเชีย มีอุตสาหกรรม"
"ล้ำหน้า และมีเศรษฐกิจใหญ่โตเป็นอันดับที่ 19 ของโลก[11][12] "
"อุตสาหกรรมที่ใช้เทคโนโลยีชั้นสูงของไต้หวันยังมีบทบาทสำคัญมากใน"
"เศรษฐกิจโลก เป็นเหตุให้ไต้หวันได้เป็นสมาชิกองค์การการค้าโลกและ"
"ความร่วมมือทางเศรษฐกิจเอเชีย-แปซิฟิก เสรีภาพของสื่อมวลชน เสรี"
"ภาพทางเศรษฐกิจ การสาธารณสุข[13]การศึกษา และดัชนีการพัฒนามนุษย์ใน"
"ไต้หวันยังได้รับการจัดอยู่ในอันดับสูงด้วย[14][4][15]\n"
"สาธารณรัฐจีน มีลักษณะเป็นกลุ่มเกาะ ภูมิประเทศติดกับทะเล ไม่ติด"
"กับประเทศใดเลย ห่างจากเกาะทางทิศเหนือและทิศตะวันตกเป็นสาธารณรัฐ"
"ประชาชนจีน ทิศใต้เป็นประเทศฟิลิปปินส์และทะเลจีนใต้ ส่วนทิศ"
"ตะวันออกเป็นมหาสมุทรแปซิฟิก\n"
"ในปี ค.ศ. 1638 หลังการพ่ายแพ้ของหลานชายของเจิ้ง เฉิงกง "
"จากการบุกโจมตีทางทัพเรือของราชวงศ์ชิงแมนจูที่นำทัพโดยชื่อ หลาง"
"จากทางใต้ของมณฑลฝูเจี้ยน ทำให้ราชวงศ์ชิงผนวกยึดเกาะไต้หวันเป็น"
"ส่วนหนึ่งสำเร็จ และวางไว้ภายใต้เขตอำนาจของมณฑลฝูเจี้ยน ราชสำนัก"
"ราชวงศ์ชิงพยายามลดการละเมิดสิทธิ์และความไม่ลงรอยกันในพื้นที่โดย"
"ออกกฎหมายเพื่อจัดการตรวจคนเข้าเมืองและเคารพสิทธิในที่ดินของชน"
"พื้นเมืองไต้หวัน ผู้อพยพจากฝูเจี้ยนทางใต้ส่วนใหญ่ยังคงเดินทางไป"
"ไต้หวัน เขตแดนระหว่างดินแดนที่เสียภาษีและสิ่งที่ถูกพิจารณาว่า"
'เป็นดินแดน "เขตอันตราย" เปลี่ยนไปทางทิศตะวันออกโดยชาวพื้นเมือง'
"บางคนเข้ารีตรับวัฒนธรรมแบบจีน ในขณะที่คนอื่นถอยกลับเข้าในภูเขา "
"ในช่วงเวลานี้มีความขัดแย้งจำนวนมากระหว่างกลุ่มชาวฮั่นด้วยกันเอง"
"จากภูมิภาคต่าง ๆ ของฝูเจี้ยนทางใต้โดยเฉพาะอย่างยิ่งระหว่างเฉวียน"
"โจวกับฉางโจว และระหว่างฝูเจี้ยนตอนใต้และชาวพื้นเมืองไต้หวัน\n"
"พ.ศ. 2454 (ค.ศ. 1911) การจลาจลอู่ฮั่นในประเทศจีน เป็นจุดเริ่มต้น"
"การล่มสลายของราชวงศ์ชิง เมื่อพรรคคอมมิวนิสต์จีนเข้ามีอำนาจในจีน"
"แผ่นดินใหญ่เมื่อ พ.ศ. 2492 (1949) พรรคก๊กมินตั๋ง พรรคการเมือง"
"ชาตินิยมของจีนที่เป็นฝ่ายแพ้ก็พาผู้คนอพยพหนีออกจากแผ่นดินใหญ่มา"
"ตั้งหลักที่ไต้หวัน เพื่อวางแผนกลับไปครองอำนาจในจีนต่อไป\n"
"ชาวจีนมากกว่า 1 ล้าน 5 แสนคน อพยพตามมาอยู่ที่เกาะไต้หวันในยุคที่"
"เหมา เจ๋อตง มีอำนาจเต็มที่ในจีนแผ่นดินใหญ่ ผู้นำของประเทศทั้งสอง"
"จีนคือผู้นำพรรคคอมมิวนิสต์กับผู้นำสาธารณรัฐจีนบนเกาะไต้หวัน แย่ง"
"กันเป็นกระบอกเสียงของประชาชนจีนในเวทีโลก แต่เสียงของนานาประเทศ"
"ส่วนใหญ่เกรงอิทธิพลของจีนแผ่นดินใหญ่ จึงให้การยอมรับจีนแผ่นดิน"
"ใหญ่มากกว่า\n"
"ในปี พ.ศ. 2514 (ค.ศ. 1971) ก่อนที่นายพล เจียง ไคเช็ก"
"(ภาษาจีน: 蔣中正) จะถึงอสัญกรรมไม่กี่ปี สาธารณรัฐจีนซึ่งเป็น"
"ประเทศที่ร่วมก่อตั้งองค์การสหประชาชาติได้สูญเสียสมาชิกภาพใน"
"ฐานะตัวแทนชาวจีนให้กับสาธารณรัฐประชาชนจีน ในปี พ.ศ. 2521 (1978)"
"สหประชาชาติประกาศรับรองจีนเดียวคือจีนแผ่นดินใหญ่และตัดสัมพันธ์"
"ทางการเมืองกับสาธารณรัฐจีน ทั้งสหรัฐอเมริกาก็ได้ถอนการรับรองว่า"
"สาธารณรัฐจีนมีฐานะเป็นรัฐ ไต้หวันจึงกลายเป็นเพียงดินแดนที่จีน"
"อ้างว่าเป็นส่วนหนึ่งของสาธารณรัฐประชาชนจีนตั้งแต่นั้นเป็นต้นมา\n"
"เมื่อเจียง ไคเช็ก ถึงแก่อสัญกรรมในปี พ.ศ. 2518 (1975) ลูกชาย"
"ที่ชื่อ เจี่ยง จิงกั๋ว ได้เป็นผู้สืบทอดการปกครอง"
"ไต้หวันต่อและเริ่มกระบวนการ วางรากฐานไปสู่ประชาธิปไตย\n"
"หลังจากที่ประธานาธิบดี เจียง จิงกั๋ว เสียชีวิต ไต้หวันจึงได้เข้า"
"สู่ระบอบประชาธิปไตยเต็มรูปแบบ ประธานาธิบดีคนใหม่ ซึ่งเกิดใน"
"ไต้หวัน ชื่อ หลี่ เติงฮุย ขึ้นบริหารประเทศ โดยการสนับสนุนของ"
"เจี่ยง จิงกั๋ว ทั้งที่ หลี่ เติงฮุย นั้นเคลื่อนไหว"
"สนับสนุนเอกราชไต้หวัน นาย รัฐบาลจีนที่ปักกิ่งได้ตั้ง"
'ฉายาประธานาธิบดีไต้หวันคนใหม่ว่า "จิ้งจกปากหวาน" '
"ช่วงเวลาที่นายหลี่ เติงฮุย เป็นประธานาธิบดี การเมืองของไต้หวัน"
"เกิดการแตกแยกออกเป็น 3 ฝ่ายคือ 1) พวกก๊กมินตั๋ง ที่ต้องการกลับ"
"ไปรวมประเทศกับจีนแผ่นดินใหญ่ (รวมจีนแผ่นดินใหญ่ภายใต้การปกครอง"
"ของสาธารณรัฐจีน) 2) พวกที่ต้องการให้ไต้หวันเป็นประเทศอิสระไม่"
"เกี่ยวข้องกับจีนแผ่นดินใหญ่ และ 3) พวกที่ต้องการดำรงฐานะของ"
"ประเทศไว้ดังเดิมต่อไป\n"
"ไต้หวันกับจีนแผ่นดินใหญ่นัดเจรจาหาทางออกของข้อขัดแย้งทางการเมือง"
"ครั้งแรกที่สิงคโปร์เมื่อปี พ.ศ. 2536 (ค.ศ. 1993) แต่ปรากฏว่าจีน"
"แผ่นดินใหญ่ประวิงเวลาลงนามในสัญญาหลายฉบับที่เป็นข้อตกลงร่วมกัน "
"ทำให้ผลการเจรจาคราวนั้นไม่ก้าวหน้าไปถึงไหน ความสัมพันธ์ระหว่าง"
"สองจีนเลวร้ายลงทุกที เมื่อประธานาธิบดี หลี่ เติงฮุย เดินทางไป"
"เยือนสหรัฐอเมริกาและได้รับการยอมรับอย่างเอิกเกริก ทำให้จีนแผ่น"
"ดินใหญ่ไม่พอใจอย่างมาก จึงข่มขวัญไต้หวันกับประเทศที่ให้การสนับ"
"สนุนไต้หวัน ด้วยการทำการซ้อมรบขึ้นใกล้ ๆ เกาะไต้หวัน สหรัฐ"
"อเมริกาออกมาแสดงอาการปกป้องคุ้มครองไต้หวันด้วยการส่งกำลังกอง"
"เรือรบของสหรัฐฯ มาป้วนเปี้ยนอยู่ในน่านน้ำที่จีนซ้อมรบ\n"
"ขณะที่โลกกำลังล่อแหลมกับสถานการณ์ที่ตึงเครียดในน่านน้ำจีนมาก"
"ขึ้นทุกทีนั้น ไต้หวันก็จัดให้มีการเลือกตั้งครั้งใหม่ และในการ"
"เลือกตั้งครั้งใหม่นั้นเอง ไต้หวันก็ได้นายหลี่ เติงฮุย เป็น"
"ประธานาธิบดีอีกครั้ง\n"
"ไต้หวันเข้าสู่สภาวะวิกฤต เมื่อเกิดแผ่นดินไหวครั้งร้ายแรงที่สุดใน"
"ประวัติศาสตร์ในเดือนกันยายน พ.ศ. 2542 (ค.ศ. 1999) ทำให้ประชากร"
"ส่วนมากที่เป็นชาวพื้นเมืองเสียชีวิตไป 2,000 คน ทั้งเมืองมีแต่"
"เศษซากปรักหักพังจากภัยธรรมชาติ และช่วงนี้ไต้หวันต้องเผชิญความ"
"ยากลำบาก จีนแผ่นดินใหญ่ก็เพิ่มความกดดันไม่ให้นานาชาติ"
"เข้ามายุ่งเกี่ยวกับไต้หวันแม้ในยามคับขันเช่นนี้ โดยประกาศว่า "
"หากมีประเทศใดจะเข้าไปให้ความช่วยเหลือไต้หวัน จะต้องได้รับอนุญาต"
"จากจีนก่อน ซึ่งคำประกาศของจีนแผ่นดินใหญ่สวนทางกับเมตตาธรรมของ"
"ประเทศทั่วโลกที่ต้องการให้ความช่วยเหลือไต้หวัน\n"
"เดือนมีนาคม พ.ศ. 2543 (ค.ศ. 2000) มีการเลือกตั้งใหม่ในไต้หวัน "
"ชาวไต้หวันเลือกผู้แทนจากพรรคประชาธิปไตยก้าวหน้า คือ นายเฉิน สุย"
"เปี่ยน เป็นประธานาธิบดีคนใหม่ของไต้หวัน ผู้ประกาศนโยบายการเมือง"
"แข็งกร้าวว่าไต้หวันต้องการแยกตัวเป็นอิสระจากจีนแผ่นดินใหญ่ ยุติ"
"ยุคของพรรคชาตินิยมที่ยังฝักใฝ่แผ่นดินใหญ่อยู่ จีนแผ่นดินใหญ่จึง"
"ถือว่าเป็นกบฏต่อการปกครองของจีน เพราะแต่ไหนแต่ไร ไต้หวันไม่เคย"
"ประกาศอย่างเป็นทางการว่าเป็นประเทศอิสระแยกจากจีน และจีนพูดอยู่"
"เสมอว่าไต้หวันเป็นเด็กในปกครองที่ค่อนข้างจะหัวดื้อและเกเร หาก"
"ไต้หวันประกาศว่าเป็นอิสระจากจีนเมื่อใด จีนก็จะยกกำลังจัดการ"
"กับไต้หวันทันที\n"
"ในขณะที่ความสัมพันธ์ทางการเมืองระหว่างสองจีนในสายตาชาวโลก"
"เลวร้ายลง จีนทั้งสองกลับมีการติดต่อทางการค้ากันมากขึ้น มีการ"
"ผ่อนปรนอนุญาตให้ชาวไต้หวันเดินทางไปจีนแผ่นดินใหญ่เพื่อเยี่ยม"
"ญาติได้ เกิดปรากฏการณ์สำคัญคือนักธุรกิจไต้หวันหอบเงินทุนกว่า "
"20,000 ล้านดอลลาร์สหรัฐ ไปลงทุนดำเนินธุรกิจทางตอนใต้ของจีน"
"แผ่นดินใหญ่ จนกระทั่งขณะนี้ชาวไต้หวันกลายเป็นนักลงทุนรายใหญ่"
"เป็นลำดับ 2 ของจีน\n"
"วันที่ 24 พฤษภาคม 2560 ศาลรัฐธรรมนูญวินิจฉัยว่ากฎหมายสมรส"
"ปัจจุบันในเวลานั้น ละเมิดรัฐธรรมนูญ โดยปฏิเสธสิทธิสมรสของคู่รัก"
"เพศเดียวกันชาวไต้หวัน ศาลวินิจฉัยว่าหากสภานิติบัญญัติไม่ผ่าน"
"การแก้ไขกฎหมายที่เพียงพอต่อกฎหมายสมรสของไต้หวันภายในสองปี "
"การสมรสเพศเดียวกันจะชอบด้วยกฎหมายโดยอัตโนมัติในไต้หวัน[17] "
"วันที่ 17 พฤษภาคม 2562 สภานิติบัญญัติไต้หวันอนุมัติ"
"ร่างกฎหมายทำให้การสมรสเพศเดียวกันชอบด้วยกฎหมาย"
" ทำให้เป็นประเทศแรกในทวีปเอเชียที่ผ่านกฎหมายดังกล่าว[18][19]"
)
self.danger_text1 = (
"ชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิ"
"ชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิชิ"
"ชิชิชิชิชิชิชิชิชิ"
)
self.danger_text2 = (
"ด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้าน"
"หน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้าน"
)
self.danger_text3 = (
"ด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้า"
"ด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้า"
"ด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้าด้านหน้า"
"ด้านหน้าด้านหน้าด้านกกกกกก"
"กกกกกกกกกกกกกกกกกกกกกกกกกกกกกกกกกกกกกก"
)
def test_Tokenizer(self):
_tokenizer = Tokenizer(DEFAULT_WORD_DICT_TRIE)
self.assertEqual(_tokenizer.word_tokenize(""), [])
_tokenizer.set_tokenize_engine("longest")
self.assertEqual(_tokenizer.word_tokenize(None), [])
_tokenizer = Tokenizer()
self.assertEqual(_tokenizer.word_tokenize("ก"), ["ก"])
with self.assertRaises(NotImplementedError):
Tokenizer(engine="catcut")
def test_clause_tokenize(self):
self.assertIsNotNone(sent_clause_tokenize(["ฉัน", "ทดสอบ"]))
self.assertIsInstance(sent_clause_tokenize(["ฉัน", "ทดสอบ"]), list)
def test_sent_tokenize(self):
self.assertEqual(sent_tokenize(None), [])
self.assertEqual(sent_tokenize(""), [])
self.assertEqual(
sent_tokenize("รักน้ำ รักปลา ", engine="whitespace"),
["รักน้ำ", "รักปลา", ""],
)
self.assertEqual(
sent_tokenize("รักน้ำ รักปลา ", engine="whitespace+newline"),
["รักน้ำ", "รักปลา"],
)
sent_1 = "ฉันไปโรงเรียน เธอไปโรงพยาบาล"
sent_1_toks = ["ฉันไปโรงเรียน ", "เธอไปโรงพยาบาล"]
sent_2 = "วันนี้ฉันกินข้าว และโดดเรียน"
sent_2_toks = ["วันนี้ฉันกินข้าว และโดดเรียน"]
sent_3 = (
"(1) บทความนี้ผู้เขียนสังเคราะห์ขึ้นมา"
+ "จากผลงานวิจัยที่เคยทำมาในอดีต"
+ " มิได้ทำการศึกษาค้นคว้าใหม่อย่างกว้างขวางแต่อย่างใด"
+ " จึงใคร่ขออภัยในความบกพร่องทั้งปวงมา ณ ที่นี้"
)
sent_3_toks = [
"(1) บทความนี้ผู้เขียนสังเคราะห์ขึ้นมา"
+ "จากผลงานวิจัยที่เคยทำมาในอดีต ",
"มิได้ทำการศึกษาค้นคว้าใหม่อย่างกว้างขวางแต่อย่างใด ",
"จึงใคร่ขออภัยในความบกพร่องทั้งปวงมา ณ ที่นี้",
]
self.assertEqual(
sent_tokenize(sent_1, engine="crfcut"),
sent_1_toks,
)
self.assertEqual(
sent_tokenize(sent_2, engine="crfcut"),
sent_2_toks,
)
self.assertEqual(
sent_tokenize(sent_3, engine="crfcut"),
sent_3_toks,
)
self.assertEqual(
sent_tokenize(sent_1),
sent_1_toks,
)
self.assertEqual(
sent_tokenize(sent_2),
sent_2_toks,
)
self.assertEqual(
sent_tokenize(sent_3),
sent_3_toks,
)
self.assertIsNotNone(
sent_tokenize(
sent_1,
keep_whitespace=False,
engine="whitespace",
),
)
self.assertIsNotNone(
sent_tokenize(
sent_1,
engine="tltk",
),
)
self.assertIsNotNone(
sent_tokenize(
sent_2,
engine="tltk",
),
)
self.assertIsNotNone(
sent_tokenize(
sent_3,
engine="tltk",
),
)
self.assertIsNotNone(
sent_tokenize(
sent_1,
engine="thaisum",
),
)
self.assertIsNotNone(
sent_tokenize(
sent_2,
engine="thaisum",
),
)
self.assertIsNotNone(
sent_tokenize(
sent_3,
engine="thaisum",
),
)
self.assertIsNotNone(
sent_tokenize(
sent_3,
engine="wtp",
),
)
self.assertIsNotNone(
sent_tokenize(
sent_3,
engine="wtp-tiny",
),
)
# self.assertIsNotNone(
# sent_tokenize(
# sent_3,
# engine="wtp-base",
# ),
# )
# self.assertIsNotNone(
# sent_tokenize(
# sent_3,
# engine="wtp-large",
# ),
# )
self.assertFalse(
" "
in sent_tokenize(
sent_1,
engine="whitespace",
keep_whitespace=False,
)
)
with self.assertRaises(ValueError):
sent_tokenize("ฉันไป กิน", engine="XX") # engine does not exist
def test_paragraph_tokenize(self):
sent = (
"(1) บทความนี้ผู้เขียนสังเคราะห์ขึ้นมา"
+ "จากผลงานวิจัยที่เคยทำมาในอดีต"
+ " มิได้ทำการศึกษาค้นคว้าใหม่อย่างกว้างขวางแต่อย่างใด"
+ " จึงใคร่ขออภัยในความบกพร่องทั้งปวงมา ณ ที่นี้"
)
self.assertIsNotNone(paragraph_tokenize(sent))
with self.assertRaises(ValueError):
paragraph_tokenize(sent, engine="ai2+2thai")
def test_subword_tokenize(self):
self.assertEqual(subword_tokenize(None), [])
self.assertEqual(subword_tokenize(""), [])
self.assertIsInstance(
subword_tokenize("สวัสดีดาวอังคาร", engine="tcc"), list
)
self.assertFalse(
"า" in subword_tokenize("สวัสดีดาวอังคาร", engine="tcc")
)
self.assertIsInstance(
subword_tokenize("สวัสดีดาวอังคาร", engine="tcc_p"), list
)
self.assertFalse(
"า" in subword_tokenize("สวัสดีดาวอังคาร", engine="tcc_p")
)
self.assertEqual(subword_tokenize(None, engine="etcc"), [])
self.assertEqual(subword_tokenize("", engine="etcc"), [])
self.assertIsInstance(
subword_tokenize("สวัสดิีดาวอังคาร", engine="etcc"), list
)
self.assertFalse(
"า" in subword_tokenize("สวัสดีดาวอังคาร", engine="etcc")
)
self.assertIsInstance(subword_tokenize("โควิด19", engine="etcc"), list)
self.assertEqual(subword_tokenize(None, engine="wangchanberta"), [])
self.assertEqual(subword_tokenize("", engine="wangchanberta"), [])
self.assertIsInstance(
subword_tokenize("สวัสดิีดาวอังคาร", engine="wangchanberta"), list
)
self.assertFalse(
"า" in subword_tokenize("สวัสดีดาวอังคาร", engine="wangchanberta")
)
self.assertIsInstance(
subword_tokenize("โควิด19", engine="wangchanberta"), list
)
self.assertFalse(
" " in subword_tokenize("พันธมิตร ชา นม", keep_whitespace=False)
)
self.assertEqual(
subword_tokenize("สวัสดีชาวโลก", engine="dict"),
["สวัส", "ดี", "ชาว", "โลก"],
)
self.assertFalse(
"า" in subword_tokenize("สวัสดีชาวโลก", engine="dict")
)
self.assertEqual(subword_tokenize(None, engine="ssg"), [])
self.assertEqual(
subword_tokenize("แมวกินปลา", engine="ssg"), ["แมว", "กิน", "ปลา"]
)
self.assertTrue(
"ดาว" in subword_tokenize("สวัสดีดาวอังคาร", engine="ssg")
)
self.assertFalse(
"า" in subword_tokenize("สวัสดีดาวอังคาร", engine="ssg")
)
self.assertFalse(
" " in subword_tokenize("พันธมิตร ชา นม", keep_whitespace=False)
)
self.assertEqual(subword_tokenize(None, engine="tltk"), [])
self.assertEqual(subword_tokenize("", engine="tltk"), [])
self.assertIsInstance(
subword_tokenize("สวัสดิีดาวอังคาร", engine="tltk"), list
)
self.assertFalse(
"า" in subword_tokenize("สวัสดีดาวอังคาร", engine="tltk")
)
self.assertIsInstance(subword_tokenize("โควิด19", engine="tltk"), list)
with self.assertRaises(ValueError):
subword_tokenize("นกแก้ว", engine="XX") # engine does not exist
def test_word_tokenize(self):
self.assertEqual(word_tokenize(""), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
self.assertIsNotNone(word_tokenize(self.text_1, engine="nlpo3"))
self.assertIsNotNone(word_tokenize(self.text_1, engine="attacut"))
self.assertIsNotNone(word_tokenize(self.text_1, engine="deepcut"))
self.assertIsNotNone(word_tokenize(self.text_1, engine="icu"))
self.assertIsNotNone(word_tokenize(self.text_1, engine="longest"))
self.assertIsNotNone(word_tokenize(self.text_1, engine="mm"))
self.assertIsNotNone(word_tokenize(self.text_1, engine="nercut"))
self.assertIsNotNone(word_tokenize(self.text_1, engine="newmm"))
self.assertIsNotNone(word_tokenize(self.text_1, engine="sefr_cut"))
self.assertIsNotNone(word_tokenize(self.text_1, engine="tltk"))
self.assertIsNotNone(word_tokenize(self.text_1, engine="oskut"))
with self.assertRaises(ValueError):
word_tokenize("หมอนทอง", engine="XX") # engine does not exist
self.assertTrue(
"ไฟ" in word_tokenize("รถไฟฟ้า", custom_dict=dict_trie(["ไฟ"]))
)
def test_attacut(self):
self.assertEqual(attacut.segment(None), [])
self.assertEqual(attacut.segment(""), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="attacut"),
["ฉัน", "รัก", "ภาษา", "ไทย", "เพราะ", "ฉัน", "เป็น", "คน", "ไทย"],
)
self.assertEqual(
attacut.segment(
"ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", model="attacut-sc"
),
["ฉัน", "รัก", "ภาษา", "ไทย", "เพราะ", "ฉัน", "เป็น", "คน", "ไทย"],
)
self.assertIsNotNone(
attacut.segment(
"ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", model="attacut-c"
)
)
def test_deepcut(self):
self.assertEqual(deepcut.segment(None), [])
self.assertEqual(deepcut.segment(""), [])
self.assertIsNotNone(deepcut.segment("ทดสอบ", DEFAULT_WORD_DICT_TRIE))
self.assertIsNotNone(deepcut.segment("ทดสอบ", ["ทด", "สอบ"]))
self.assertIsNotNone(word_tokenize("ทดสอบ", engine="deepcut"))
self.assertIsNotNone(
word_tokenize(
"ทดสอบ", engine="deepcut", custom_dict=DEFAULT_WORD_DICT_TRIE
)
)
def test_etcc(self):
self.assertEqual(etcc.segment(None), [])
self.assertEqual(etcc.segment(""), [])
self.assertIsInstance(etcc.segment("คืนความสุข"), list)
self.assertEqual(
etcc.segment("หาเงินเพื่อเรียน"),
["หา", "เงิน", "เพื่", "อ", "เรีย", "น"],
)
self.assertEqual(etcc.segment("หนังสือ"), ["ห", "นัง", "สือ"])
self.assertIsNotNone(
etcc.segment(
"หมูแมวเหล่านี้ด้วยเหตุผลเชื่อมโยงทางกรรมพันธุ์"
+ "สัตว์มีแขนขาหน้าหัวเราะเพราะแข็งขืน"
)
)
def test_icu(self):
self.assertEqual(pyicu.segment(None), [])
self.assertEqual(pyicu.segment(""), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="icu"),
["ฉัน", "รัก", "ภาษา", "ไทย", "เพราะ", "ฉัน", "เป็น", "คน", "ไทย"],
)
def test_tltk(self):
self.assertEqual(tltk.segment(None), [])
self.assertEqual(tltk.segment(""), [])
self.assertEqual(
tltk.syllable_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย"),
[
"ฉัน",
"รัก",
"ภา",
"ษา",
"ไทย",
"เพราะ",
"ฉัน",
"เป็น",
"คน",
"ไทย",
],
)
self.assertEqual(tltk.syllable_tokenize(None), [])
self.assertEqual(tltk.syllable_tokenize(""), [])
def test_longest(self):
self.assertEqual(longest.segment(None), [])
self.assertEqual(longest.segment(""), [])
self.assertIsInstance(
longest.segment("กรุงเทพฯมากๆเพราโพาง BKKฯ"), list
)
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="longest"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
longest_tokenizer = Tokenizer(["ปวด", "เฉียบ", "พลัน", "เฉียบพลัน"])
self.assertEqual(
longest_tokenizer.word_tokenize("ปวดเฉียบพลัน"),
["ปวด", "เฉียบพลัน"],
)
self.assertEqual(
longest_tokenizer.word_tokenize("เฉียบพลัน"),
["เฉียบพลัน"],
)
def test_mm(self):
self.assertEqual(multi_cut.segment(None), [])
self.assertEqual(multi_cut.segment(""), [])
self.assertIsNotNone(multi_cut.segment("ตัด", dict_trie([""])))
self.assertEqual(word_tokenize("", engine="mm"), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="mm"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
self.assertEqual(
word_tokenize("19...", engine="mm"),
["19", "..."],
)
self.assertEqual(
word_tokenize("19.", engine="mm"),
["19", "."],
)
self.assertEqual(
word_tokenize("19.84", engine="mm"),
["19.84"],
)
self.assertEqual(
word_tokenize("127.0.0.1", engine="mm"),
["127.0.0.1"],
)
self.assertEqual(
word_tokenize("USD1,984.42", engine="mm"),
["USD", "1,984.42"],
)
self.assertIsNotNone(multi_cut.mmcut("ทดสอบ"))
self.assertIsNotNone(
multi_cut.find_all_segment("รถไฟฟ้ากรุงเทพมหานครBTS")
)
self.assertEqual(multi_cut.find_all_segment(None), [])
def test_newmm(self):
self.assertEqual(newmm.segment(None), [])
self.assertEqual(newmm.segment(""), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="newmm"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
self.assertEqual(
word_tokenize("19...", engine="newmm"),
["19", "..."],
)
self.assertEqual(
word_tokenize("19.", engine="newmm"),
["19", "."],
)
self.assertEqual(
word_tokenize("19.84", engine="newmm"),
["19.84"],
)
self.assertEqual(
word_tokenize("127.0.0.1", engine="newmm"),
["127.0.0.1"],
)
self.assertEqual(
word_tokenize("USD1,984.42", engine="newmm"),
["USD", "1,984.42"],
)
self.assertEqual(
word_tokenize(
"สวัสดีครับ สบายดีไหมครับ",
engine="newmm",
keep_whitespace=True,
),
["สวัสดี", "ครับ", " ", "สบายดี", "ไหม", "ครับ"],
)
self.assertEqual(
word_tokenize("จุ๋มง่วงนอนยัง", engine="newmm"),
["จุ๋ม", "ง่วงนอน", "ยัง"],
)
self.assertEqual(
word_tokenize("จุ๋มง่วง", engine="newmm"), ["จุ๋ม", "ง่วง"]
)
self.assertEqual(
word_tokenize(
"จุ๋ม ง่วง", engine="newmm", keep_whitespace=False
),
["จุ๋ม", "ง่วง"],
)
self.assertFalse(
" "
in word_tokenize(
"จุ๋มง่วง",
keep_whitespace=False,
)
)
def test_newmm_longtext(self):
self.assertIsInstance(
word_tokenize(self.long_text, engine="newmm"), list
)
self.assertIsInstance(
word_tokenize(self.long_text, engine="newmm-safe"), list
)
def test_newmm_dangertext(self):
self.assertIsInstance(
word_tokenize(self.danger_text1, engine="newmm"), list
)
self.assertIsInstance(
word_tokenize(self.danger_text2, engine="newmm"), list
)
self.assertIsInstance(
word_tokenize(self.danger_text3, engine="newmm"), list
)
self.assertIsInstance(
word_tokenize(self.danger_text1, engine="newmm-safe"), list
)
self.assertIsInstance(
word_tokenize(self.danger_text2, engine="newmm-safe"), list
)
self.assertIsInstance(
word_tokenize(self.danger_text3, engine="newmm-safe"), list
)
def test_nercut(self):
self.assertEqual(nercut.segment(None), [])
self.assertEqual(nercut.segment(""), [])
self.assertIsNotNone(nercut.segment("ทดสอบ"))
self.assertEqual(nercut.segment("ทันแน่ๆ"), ['ทัน', 'แน่ๆ'])
self.assertEqual(nercut.segment("%1ครั้ง"), ['%', '1', 'ครั้ง'])
self.assertEqual(nercut.segment("ทุ๊กกโคนน"), ['ทุ๊กกโคนน'])
self.assertIsNotNone(
nercut.segment("อย่าลืมอัพการ์ดนะจ๊ะ")
)
self.assertIsNotNone(word_tokenize("ทดสอบ", engine="nercut"))
def test_ssg(self):
self.assertEqual(ssg.segment(None), [])
self.assertEqual(ssg.segment(""), [])
self.assertTrue(
"ดาว" in subword_tokenize("สวัสดีดาวอังคาร", engine="ssg")
)
def test_tcc(self):
self.assertEqual(tcc.segment(None), [])
self.assertEqual(tcc.segment(""), [])
self.assertEqual(
tcc.segment("ประเทศไทย"), ["ป", "ระ", "เท", "ศ", "ไท", "ย"]
)
self.assertEqual(
tcc.segment("พิสูจน์ได้ค่ะ"), ['พิ', 'สูจน์', 'ได้', 'ค่ะ']
)
self.assertEqual(
tcc.segment("หอมรดกไทย"), ['ห', 'อ', 'ม', 'ร', 'ด', 'ก', 'ไท', 'ย']
)
self.assertEqual(
tcc.segment("เรือน้อยลอยอยู่"), ['เรื', 'อ', 'น้', 'อ', 'ย', 'ล', 'อ', 'ย', 'อ', 'ยู่']
)
self.assertEqual(
tcc.segment("ประสานงานกับลูกค้า"), ['ป', 'ระ', 'สา', 'น', 'งา', 'น', 'กั', 'บ', 'ลู', 'ก', 'ค้า']
)
self.assertEqual(
tcc.segment("ประกันภัยสัมพันธ์"), ['ป', 'ระ', 'กั', 'น', 'ภั', 'ย', 'สั', 'ม', 'พั','นธ์'] # It don't look like TCC in ETCC paper
)
self.assertEqual(
tcc.segment("ตากลม"), ['ตา', 'ก', 'ล', 'ม']
)
self.assertEqual(
tcc.segment("เครื่องมือสื่อสารมีหลายชนิด"),
[
'เค',
'รื่อ',
'ง',
'มือ',
'สื่อ',
'สา',
'ร',
'มี',
'ห',
'ลา',
'ย',
'ช',
'นิ',
'ด'
]
)
self.assertEqual(
tcc.segment("ประชาชน"), ['ป', 'ระ', 'ชา', 'ช', 'น']
)
self.assertEqual(
tcc.segment("ไหมไทย"), ['ไห', 'ม', 'ไท', 'ย']
)
self.assertEqual(
tcc.segment("ยินดี"), ['ยิ', 'น', 'ดี']
)
self.assertEqual(
tcc.segment("ขุดหลุม"), ['ขุ', 'ด', 'ห', 'ลุ', 'ม']
)
self.assertEqual(list(tcc.tcc("")), [])
self.assertEqual(tcc.tcc_pos(""), set())
def test_tcc_p(self):
self.assertEqual(tcc_p.segment(None), [])
self.assertEqual(tcc_p.segment(""), [])
self.assertEqual(
tcc_p.segment("ประเทศไทย"), ["ป", "ระ", "เท", "ศ", "ไท", "ย"]
)
self.assertEqual(
tcc_p.segment("พิสูจน์ได้ค่ะ"), ['พิ', 'สูจน์', 'ได้', 'ค่ะ']
)
self.assertEqual(
tcc_p.segment("หอมรดกไทย"), ['ห', 'อ', 'ม', 'ร', 'ด', 'ก', 'ไท', 'ย']
)
self.assertEqual(
tcc_p.segment("เรือน้อยลอยอยู่"), ['เรือ', 'น้', 'อ', 'ย', 'ล', 'อ', 'ย', 'อ', 'ยู่']
)
# Not implementation
# self.assertEqual(
# tcc.segment("ประสานงานกับลูกค้า"), ['ป', 'ระ', 'สา', 'น', 'งา', 'น', 'กั', 'บ', 'ลู', 'ก', 'ค้า']
# )
# self.assertEqual(
# tcc.segment("ประกันภัยสัมพันธ์"), ['ป', 'ระ', 'กั', 'น', 'ภั', 'ย', 'สั', 'ม', 'พั','น','ธ์']
# )
# self.assertEqual(
# tcc.segment("ตากลม"), ['ตา', 'ก', 'ล', 'ม']
# )
self.assertEqual(list(tcc_p.tcc("")), [])
self.assertEqual(tcc_p.tcc_pos(""), set())
def test_sefr_cut(self):
self.assertEqual(sefr_cut.segment(None), [])
self.assertEqual(sefr_cut.segment(""), [])
self.assertIsNotNone(
sefr_cut.segment("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย"),
)
self.assertIsNotNone(
sefr_cut.segment("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="tnhc"),
)
def test_oskut(self):
self.assertEqual(oskut.segment(None), [])
self.assertEqual(oskut.segment(""), [])
self.assertIsNotNone(
oskut.segment("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย"),
)
self.assertIsNotNone(
oskut.segment("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="scads"),
)
def test_word_detokenize(self):
self.assertEqual(
word_detokenize(["ผม", "เลี้ยง", "5", "ตัว"]), "ผมเลี้ยง 5 ตัว"
)
self.assertEqual(
word_detokenize(["ผม", "เลี้ยง", " ", "5", "ตัว"], "list"),
[["ผม", "เลี้ยง", " ", "5", " ", "ตัว"]],
)
self.assertEqual(
word_detokenize(
["ผม", "เลี้ยง", "5", "10", "ตัว", "ๆ", "คน", "ดี"]
),
"ผมเลี้ยง 5 10 ตัว ๆ คนดี",
)
self.assertEqual(
word_detokenize(
["ผม", "เลี้ยง", "5", "ตัว", " ", "ๆ", "คน", "ดี"]
),
"ผมเลี้ยง 5 ตัว ๆ คนดี",
)
self.assertTrue(
isinstance(word_detokenize(["ผม", "เลี้ยง", "5", "ตัว"]), str)
)
self.assertEqual(
word_detokenize(["ม่ายย", " ", "ผม", "เลี้ยง", "5", "ตัว"]),
"ม่ายย ผมเลี้ยง 5 ตัว",
)
def test_numeric_data_format(self):
engines = ["attacut", "deepcut", "newmm", "sefr_cut"]
for engine in engines:
self.assertIn(
"127.0.0.1",
word_tokenize("ไอพีของคุณคือ 127.0.0.1 ครับ", engine=engine),
)
tokens = word_tokenize(
"เวลา 12:12pm มีโปรโมชั่น 11.11", engine=engine
)
self.assertTrue(
any([value in tokens for value in ["12:12pm", "12:12"]]),
msg=f"{engine}: {tokens}",
)
self.assertIn("11.11", tokens)
self.assertIn(
"1,234,567.89",
word_tokenize("รางวัลมูลค่า 1,234,567.89 บาท", engine=engine),
)
tokens = word_tokenize("อัตราส่วน 2.5:1 คือ 5:2", engine=engine)
self.assertIn("2.5:1", tokens)
self.assertIn("5:2", tokens)
# try turning off `join_broken_num`
engine = "attacut"
self.assertNotIn(
"127.0.0.1",
word_tokenize(
"ไอพีของคุณคือ 127.0.0.1 ครับ",
engine=engine,
join_broken_num=False,
),
)
self.assertNotIn(
"1,234,567.89",
word_tokenize(
"รางวัลมูลค่า 1,234,567.89 บาท",
engine=engine,
join_broken_num=False,
),
)
| 34,934 | 39.294118 | 142 | py |
pythainlp-dev/tests/test_tools.py | pythainlp-dev/tests/test_tools.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.tools import (
get_full_data_path,
get_pythainlp_data_path,
get_pythainlp_path,
)
class TestToolsPackage(unittest.TestCase):
def test_path(self):
data_filename = "ttc_freq.txt"
self.assertTrue(
get_full_data_path(data_filename).endswith(data_filename)
)
self.assertIsInstance(get_pythainlp_data_path(), str)
self.assertIsInstance(get_pythainlp_path(), str)
| 484 | 23.25 | 69 | py |
pythainlp-dev/tests/test_translate.py | pythainlp-dev/tests/test_translate.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.translate import (
ThZhTranslator,
ZhThTranslator,
Translate
)
from pythainlp.translate.en_th import (
EnThTranslator,
ThEnTranslator,
download_model_all
)
from pythainlp.corpus import remove
class TestTranslatePackage(unittest.TestCase):
def test_translate(self):
remove("scb_1m_th-en_spm")
self.assertIsNone(download_model_all())
self.th_en_translator = ThEnTranslator()
self.assertIsNotNone(
self.th_en_translator.translate(
"แมวกินปลา",
)
)
self.en_th_translator = EnThTranslator()
self.assertIsNotNone(
self.en_th_translator.translate(
"the cat eats fish.",
)
)
self.th_zh_translator = ThZhTranslator()
self.assertIsNotNone(
self.th_zh_translator.translate(
"ผมรักคุณ",
)
)
self.zh_th_translator = ZhThTranslator()
self.assertIsNotNone(
self.zh_th_translator.translate(
"我爱你",
)
)
self.th_en_translator = Translate('th', 'en')
self.assertIsNotNone(
self.th_en_translator.translate(
"แมวกินปลา",
)
)
self.en_th_translator = Translate('en', 'th')
self.assertIsNotNone(
self.en_th_translator.translate(
"the cat eats fish.",
)
)
self.th_zh_translator = Translate('th', 'zh')
self.assertIsNotNone(
self.th_zh_translator.translate(
"ผมรักคุณ",
)
)
self.zh_th_translator = Translate('zh', 'th')
self.assertIsNotNone(
self.zh_th_translator.translate(
"我爱你",
)
)
self.th_fr_translator = Translate('th', 'fr')
self.assertIsNotNone(
self.th_fr_translator.translate(
"ทดสอบระบบ",
)
)
self.th_fr_translator = Translate('th', 'fr', engine="small100")
self.assertIsNotNone(
self.th_fr_translator.translate(
"ทดสอบระบบ",
)
)
self.th_ja_translator = Translate('th', 'ja', engine="small100")
self.assertIsNotNone(
self.th_fr_translator.translate(
"ทดสอบระบบ",
)
)
with self.assertRaises(ValueError):
self.th_cat_translator = Translate('th', 'cat', engine="fkfj")
| 2,578 | 27.655556 | 74 | py |
pythainlp-dev/tests/test_transliterate.py | pythainlp-dev/tests/test_transliterate.py | # -*- coding: utf-8 -*-
import unittest
import torch
from pythainlp.transliterate import romanize, transliterate, pronunciate, puan
from pythainlp.transliterate.ipa import trans_list, xsampa_list
from pythainlp.transliterate.thai2rom import ThaiTransliterator
from pythainlp.transliterate.thai2rom_onnx import ThaiTransliterator_ONNX
from pythainlp.transliterate.wunsen import WunsenTransliterate
from pythainlp.corpus import remove
_BASIC_TESTS = {
None: "",
"": "",
"abc": "abc",
"หมอก": "mok",
"หาย": "hai",
"แมว": "maeo",
"เดือน": "duean",
"ดำ": "dam",
"ดู": "du",
"บัว": "bua",
"กก": "kok",
"พร": "phon",
"กร": "kon",
"กรร": "kan",
"กรรม": "kam",
# "กรม": "krom", # failed
"ฝ้าย": "fai",
"นพพร": "nopphon",
"อัก": "ak",
# "ทีปกร": "thipakon", # failed
# "ธรรพ์": "than", # failed
# "ธรรม": "tham", # failed
# "มหา": "maha", # failed
# "หยาก": "yak", # failed
# "อยาก": "yak", # failed
# "ยมก": "yamok", # failed
# "กลัว": "klua", # failed
# "บ้านไร่": "banrai", # failed
# "ชารินทร์": "charin", # failed
}
# these are set of two-syllable words,
# to test if the transliteration/romanization is consistent, say
# romanize(1+2) = romanize(1) + romanize(2)
_CONSISTENCY_TESTS = [
# ("กระจก", "กระ", "จก"), # failed
# ("ระเบิด", "ระ", "เบิด"), # failed
# ("หยากไย่", "หยาก", "ไย่"), # failed
("ตากใบ", "ตาก", "ใบ"),
# ("จัดสรร", "จัด", "สรร"), # failed
]
class TestTransliteratePackage(unittest.TestCase):
def test_romanize(self):
self.assertEqual(romanize(None), "")
self.assertEqual(romanize(""), "")
self.assertEqual(romanize("แมว"), "maeo")
self.assertEqual(romanize("แมว", engine="tltk"), "maeo")
def test_romanize_royin_basic(self):
for word in _BASIC_TESTS:
expect = _BASIC_TESTS[word]
self.assertEqual(romanize(word, engine="royin"), expect)
def test_romanize_royin_consistency(self):
for word, part1, part2 in _CONSISTENCY_TESTS:
self.assertEqual(
romanize(word, engine="royin"),
(
romanize(part1, engine="royin")
+ romanize(part2, engine="royin")
),
)
def test_romanize_thai2rom(self):
self.assertEqual(romanize("แมว", engine="thai2rom"), "maeo")
self.assertEqual(romanize("บ้านไร่", engine="thai2rom"), "banrai")
self.assertEqual(romanize("สุนัข", engine="thai2rom"), "sunak")
self.assertEqual(romanize("นก", engine="thai2rom"), "nok")
self.assertEqual(romanize("ความอิ่ม", engine="thai2rom"), "khwam-im")
self.assertEqual(
romanize("กานต์ ณรงค์", engine="thai2rom"), "kan narong"
)
self.assertEqual(romanize("สกุนต์", engine="thai2rom"), "sakun")
self.assertEqual(romanize("ชารินทร์", engine="thai2rom"), "charin")
def test_romanize_thai2rom_onnx(self):
self.assertEqual(romanize("แมว", engine="thai2rom_onnx"), "maeo")
self.assertEqual(romanize("บ้านไร่", engine="thai2rom_onnx"), "banrai")
self.assertEqual(romanize("สุนัข", engine="thai2rom_onnx"), "sunak")
self.assertEqual(romanize("นก", engine="thai2rom_onnx"), "nok")
self.assertEqual(
romanize("ความอิ่ม", engine="thai2rom_onnx"), "khwam-im"
)
self.assertEqual(
romanize("กานต์ ณรงค์", engine="thai2rom_onnx"), "kan narong"
)
self.assertEqual(romanize("สกุนต์", engine="thai2rom_onnx"), "sakun")
self.assertEqual(
romanize("ชารินทร์", engine="thai2rom_onnx"), "charin"
)
def test_romanize_lookup(self):
# found in v1.4
self.assertEqual(romanize("บอล", engine="lookup"), "ball")
self.assertEqual(romanize("บอยแบนด์", engine="lookup"), "boyband")
self.assertEqual(romanize("กาแล็กซี", engine="lookup"), "galaxy")
self.assertEqual(romanize("กีย์เซอไรต์", engine="lookup"), "geyserite")
self.assertEqual(romanize("พลีโอนาสต์", engine="lookup"), "pleonaste")
self.assertEqual(
romanize("คาราเมล คาปูชิโน่", engine="lookup"),
"caramel cappuccino",
)
## found individually, but needs tokenization
self.assertEqual(
romanize("คาราเมลคาปูชิโน่", engine="lookup"), "khanamenkhapuchino"
)
# not found in v1.4
## default fallback
self.assertEqual(romanize("ภาพยนตร์", engine="lookup"), "phapn")
self.assertEqual(romanize("แมว", engine="lookup"), "maeo")
## fallback = 'thai2rom'
self.assertEqual(
romanize("ความอิ่ม", engine="lookup", fallback_engine="thai2rom"),
"khwam-im",
)
self.assertEqual(
romanize("สามารถ", engine="lookup", fallback_engine="thai2rom"),
"samat",
)
def test_thai2rom_prepare_sequence(self):
transliterater = ThaiTransliterator()
UNK_TOKEN = 1 # UNK_TOKEN or <UNK> is represented by 1
END_TOKEN = 3 # END_TOKEN or <end> is represented by 3
self.assertListEqual(
transliterater._prepare_sequence_in("A")
.cpu()
.detach()
.numpy()
.tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
self.assertListEqual(
transliterater._prepare_sequence_in("♥")
.cpu()
.detach()
.numpy()
.tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
self.assertNotEqual(
transliterater._prepare_sequence_in("ก")
.cpu()
.detach()
.numpy()
.tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
def test_thai2rom_onnx_prepare_sequence(self):
transliterater = ThaiTransliterator_ONNX()
UNK_TOKEN = 1 # UNK_TOKEN or <UNK> is represented by 1
END_TOKEN = 3 # END_TOKEN or <end> is represented by 3
self.assertListEqual(
transliterater._prepare_sequence_in("A").tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
self.assertListEqual(
transliterater._prepare_sequence_in("♥").tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
self.assertNotEqual(
transliterater._prepare_sequence_in("ก").tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
def test_transliterate(self):
self.assertEqual(transliterate(""), "")
self.assertEqual(transliterate("แมว", "pyicu"), "mæw")
self.assertEqual(transliterate("คน", engine="ipa"), "kʰon")
self.assertIsNotNone(transliterate("คน", engine="thaig2p"))
self.assertIsNotNone(transliterate("แมว", engine="thaig2p"))
self.assertIsNotNone(transliterate("คน", engine="tltk_g2p"))
self.assertIsNotNone(transliterate("แมว", engine="tltk_g2p"))
self.assertIsNotNone(transliterate("คน", engine="tltk_ipa"))
self.assertIsNotNone(transliterate("แมว", engine="tltk_ipa"))
self.assertIsNotNone(transliterate("คน", engine="iso_11940"))
self.assertIsNotNone(transliterate("แมว", engine="iso_11940"))
self.assertIsNotNone(trans_list("คน"))
self.assertIsNotNone(xsampa_list("คน"))
def test_transliterate_iso11940(self):
self.assertEqual(
transliterate("เชียงใหม่", engine="iso_11940"), "echīyngıh̄m̀"
)
self.assertEqual(
transliterate("ภาษาไทย", engine="iso_11940"), "p̣hās̛̄āịthy"
)
def test_transliterate_wunsen(self):
wt = WunsenTransliterate()
self.assertEqual(wt.transliterate("ohayō", lang="jp"), "โอฮาโย")
self.assertEqual(
wt.transliterate(
"ohayou", lang="jp", jp_input="Hepburn-no diacritic"
),
"โอฮาโย",
)
self.assertEqual(
wt.transliterate("ohayō", lang="jp", system="RI35"), "โอะฮะโย"
)
self.assertEqual(
wt.transliterate("annyeonghaseyo", lang="ko"), "อันนย็องฮาเซโย"
)
self.assertEqual(wt.transliterate("xin chào", lang="vi"), "ซีน จ่าว")
self.assertEqual(wt.transliterate("ni3 hao3", lang="zh"), "หนี เห่า")
self.assertEqual(
wt.transliterate("ni3 hao3", lang="zh", zh_sandhi=False),
"หนี่ เห่า",
)
self.assertEqual(
wt.transliterate("ni3 hao3", lang="zh", system="RI49"), "หนี ห่าว"
)
with self.assertRaises(NotImplementedError):
wt.transliterate("xin chào", lang="vii")
def test_pronunciate(self):
self.assertEqual(pronunciate(""), "")
remove("thai_w2p")
self.assertIsNotNone(pronunciate("คน", engine="w2p"))
self.assertIsNotNone(pronunciate("แมว", engine="w2p"))
self.assertIsNotNone(pronunciate("มข.", engine="w2p"))
self.assertIsNotNone(pronunciate("มช.", engine="w2p"))
self.assertIsNotNone(pronunciate("jks", engine="w2p"))
def test_puan(self):
self.assertEqual(puan("แมว"), "แมว")
self.assertEqual(puan("นาริน"), "นิน-รา")
self.assertEqual(puan("นาริน", show_pronunciation=False), "นินรา")
self.assertEqual(
puan("การทำความดี", show_pronunciation=False), "ดานทำความกี"
)
| 10,073 | 34.978571 | 79 | py |
pythainlp-dev/tests/test_ulmfit.py | pythainlp-dev/tests/test_ulmfit.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.tokenize import THAI2FIT_TOKENIZER
from pythainlp.ulmfit import (
THWIKI_LSTM,
ThaiTokenizer,
document_vector,
merge_wgts,
post_rules_th,
post_rules_th_sparse,
pre_rules_th,
pre_rules_th_sparse,
process_thai,
)
from pythainlp.ulmfit.preprocess import (
fix_html,
lowercase_all,
remove_space,
replace_rep_after,
replace_rep_nonum,
replace_url,
replace_wrep_post,
replace_wrep_post_nonum,
rm_brackets,
rm_useless_newlines,
rm_useless_spaces,
spec_add_spaces,
ungroup_emoji,
)
from pythainlp.ulmfit.tokenizer import BaseTokenizer as base_tokenizer
import pandas as pd
import pickle
import torch
# fastai
import fastai
from fastai.text import *
# pythainlp
from pythainlp.ulmfit import *
class TestUlmfitPackage(unittest.TestCase):
def test_ThaiTokenizer(self):
self.thai = ThaiTokenizer()
self.assertIsNotNone(self.thai.tokenizer("ทดสอบการตัดคำ"))
self.assertIsNone(self.thai.add_special_cases(["แมว"]))
def test_BaseTokenizer(self):
self.base = base_tokenizer(lang="th")
self.assertIsNotNone(self.base.tokenizer("ทดสอบ การ ตัด คำ"))
self.assertIsNone(self.base.add_special_cases(["แมว"]))
def test_load_pretrained(self):
self.assertIsNotNone(THWIKI_LSTM)
def test_pre_rules_th(self):
self.assertIsNotNone(pre_rules_th)
def test_post_rules_th(self):
self.assertIsNotNone(post_rules_th)
def test_pre_rules_th_sparse(self):
self.assertIsNotNone(pre_rules_th_sparse)
def test_post_rules_th_sparse(self):
self.assertIsNotNone(post_rules_th_sparse)
def test_fix_html(self):
self.assertEqual(
fix_html("Some HTML text<br />"), "Some HTML& text\n"
)
def test_rm_useless_spaces(self):
self.assertEqual(
rm_useless_spaces("Inconsistent use of spaces."),
"Inconsistent use of spaces.",
)
def test_spec_add_spaces(self):
self.assertEqual(
spec_add_spaces("I #like to #put #hashtags #everywhere!"),
"I # like to # put # hashtags # everywhere!",
)
def test_replace_rep_after(self):
self.assertEqual(replace_rep_after("น้อยยยยยยยย"), "น้อยxxrep8 ")
def test_replace_rep_nonum(self):
self.assertEqual(replace_rep_nonum("น้อยยยยยยยย"), "น้อย xxrep ")
def test_replace_wrep_post(self):
self.assertEqual(
replace_wrep_post(["น้อย", "น้อย"]), ["xxwrep", "1", "น้อย"]
)
self.assertEqual(
replace_wrep_post(["นก", "กา", "กา", "กา"]),
["นก", "xxwrep", "2", "กา"],
)
def test_replace_wrep_post_nonum(self):
self.assertEqual(
replace_wrep_post_nonum(["น้อย", "น้อย"]), ["xxwrep", "น้อย"]
)
self.assertEqual(
replace_wrep_post_nonum(["นก", "กา", "กา", "กา"]),
["นก", "xxwrep", "กา"],
)
def test_remove_space(self):
self.assertEqual(remove_space([" ", "น้อย", " ", "."]), ["น้อย", "."])
def test_replace_url(self):
self.assertEqual(replace_url("https://thainlp.org web"), "xxurl web")
def test_rm_useless_newlines(self):
self.assertEqual(rm_useless_newlines("text\n\n"), "text ")
def test_rm_brackets(self):
self.assertEqual(rm_brackets("()()(ข้อความ)"), "(ข้อความ)")
self.assertEqual(rm_brackets("[][][ข้อความ]"), "[ข้อความ]")
self.assertEqual(rm_brackets("{}{}{ข้อความ}"), "{ข้อความ}")
def test_ungroup_emoji(self):
self.assertEqual(ungroup_emoji("👍👍👍"), ["👍", "👍", "👍"])
def test_lowercase_all(self):
self.assertEqual(
lowercase_all("HeLlO ."), ["h", "e", "l", "l", "o", " ", "."]
)
def test_process_thai_sparse(self):
text = "👍👍👍 #AnA มากกกก น้อยน้อย ().1146"
actual = process_thai(text)
# after pre_rules_th_sparse
# >>> "👍👍👍 # Ana มาก xxrep น้้อยน้อย .1146"
#
# after tokenize with word_tokenize(engine="newmm")
# >>> ["👍👍👍", " ", "#", " ","Ana", " ", "มาก", "xxrep",
# " ", "น้อย", "น้อย", " ", ".", "1146"]
#
# after post_rules_th
# - remove whitespace token (" ")
# >>> ["xxwrep, "👍", "#", "ana", "มาก",
# "xxrep", "xxwrep", "น้อย", ".", "1146"]
expect = [
"xxwrep",
"👍",
"#",
"ana",
"มาก",
"xxrep",
"xxwrep",
"น้อย",
".",
"1146",
]
self.assertEqual(actual, expect)
def test_process_thai_dense(self):
text = "👍👍👍 #AnA มากกกก น้อยน้อย ().1146"
actual = process_thai(
text,
pre_rules=pre_rules_th,
post_rules=post_rules_th,
tok_func=THAI2FIT_TOKENIZER.word_tokenize,
)
# after pre_rules_th
# >>> "👍👍👍 # Ana มากxxrep4 น้้อยน้อย .1146"
#
# after tokenize with word_tokenize(engine="newmm")
# >>> ["👍👍👍", " ", "#", "Ana", " ", "มาก", "xxrep", "4",
# " ", "น้อย", "น้อย", " ", ".", "1146"]
# after post_rules_th
# -- because it performs `replace_wrep_post` before `ungroup_emoji`,
# 3 repetitive emoji are not marked with special token "xxwrep num"
#
# >>> ["👍", "👍","👍", " ", "#", "ana", " ", "มาก",
# "xxrep", "4", " ", "xxwrep", "1", "น้อย", " ",
# ".", "1146"]
expect = [
"👍",
"👍",
"👍",
" ",
"#",
" ",
"ana",
" ",
"มาก",
"xxrep",
"4",
" ",
"xxwrep",
"1",
"น้อย",
" ",
".",
"1146",
]
self.assertEqual(actual, expect)
def test_document_vector(self):
imdb = untar_data(URLs.IMDB_SAMPLE)
dummy_df = pd.read_csv(imdb/'texts.csv')
thwiki = THWIKI_LSTM
thwiki_itos = pickle.load(open(thwiki['itos_fname'], 'rb'))
thwiki_vocab = fastai.text.transform.Vocab(thwiki_itos)
tt = Tokenizer(
tok_func=ThaiTokenizer,
lang='th',
pre_rules=pre_rules_th,
post_rules=post_rules_th
)
processor = [
TokenizeProcessor(
tokenizer=tt, chunksize=10000, mark_fields=False
),
NumericalizeProcessor(
vocab=thwiki_vocab, max_vocab=60000, min_freq=3
)
]
data_lm = (
TextList.from_df(
dummy_df,
imdb,
cols=['text'],
processor=processor
)
.split_by_rand_pct(0.2)
.label_for_lm()
.databunch(bs=64)
)
data_lm.sanity_check()
config = dict(
emb_sz=400,
n_hid=1550,
n_layers=4,
pad_token=1,
qrnn=False,
tie_weights=True,
out_bias=True,
output_p=0.25,
hidden_p=0.1,
input_p=0.2,
embed_p=0.02,
weight_p=0.15
)
trn_args = dict(drop_mult=0.9, clip=0.12, alpha=2, beta=1)
learn = language_model_learner(
data_lm,
AWD_LSTM,
config=config,
pretrained=False,
**trn_args
)
learn.load_pretrained(**thwiki)
self.assertIsNotNone(
document_vector('วันนี้วันดีปีใหม่', learn, data_lm)
)
self.assertIsNotNone(
document_vector('วันนี้วันดีปีใหม่', learn, data_lm, agg="sum")
)
with self.assertRaises(ValueError):
document_vector('วันนี้วันดีปีใหม่', learn, data_lm, agg='abc')
def test_merge_wgts(self):
wgts = {'0.encoder.weight': torch.randn(5,3)}
itos_pre = ["แมว", "คน", "หนู"]
itos_new = ["ปลา", "เต่า", "นก"]
em_sz = 3
self.assertIsNotNone(merge_wgts(em_sz, wgts, itos_pre, itos_new))
| 8,261 | 28.091549 | 78 | py |
pythainlp-dev/tests/test_util.py | pythainlp-dev/tests/test_util.py | # -*- coding: utf-8 -*-
"""
Unit tests for pythainlp.util module.
"""
import os
import unittest
from collections import Counter
from datetime import datetime, time, timedelta, timezone
from pythainlp.corpus import _CORPUS_PATH, thai_words
from pythainlp.corpus.common import _THAI_WORDS_FILENAME
from pythainlp.util import (
Trie,
arabic_digit_to_thai_digit,
bahttext,
collate,
countthai,
count_thai_chars,
dict_trie,
display_thai_char,
digit_to_text,
emoji_to_thai,
eng_to_thai,
find_keyword,
is_native_thai,
isthai,
isthaichar,
normalize,
now_reign_year,
num_to_thaiword,
maiyamok,
rank,
reign_year_to_ad,
remove_dangling,
remove_dup_spaces,
remove_tonemark,
remove_zw,
text_to_arabic_digit,
text_to_thai_digit,
thaiword_to_date,
thai_digit_to_arabic_digit,
thai_strftime,
thaiword_to_time,
time_to_thaiword,
thai_to_eng,
thaiword_to_num,
thai_keyboard_dist,
text_to_num,
words_to_num,
sound_syllable,
syllable_length,
syllable_open_close_detector,
tone_detector,
thai_word_tone_detector,
convert_years,
thai_strptime,
nectec_to_ipa,
ipa_to_rtgs,
remove_tone_ipa,
tis620_to_utf8,
)
from pythainlp.util.spell_words import spell_word
class TestUtilPackage(unittest.TestCase):
# ### pythainlp.util.collate
def test_collate(self):
self.assertEqual(collate(["ไก่", "กก"]), ["กก", "ไก่"])
self.assertEqual(
collate(["ไก่", "เป็ด", "หมู", "วัว"]),
["ไก่", "เป็ด", "วัว", "หมู"],
)
# ### pythainlp.util.numtoword
def test_number(self):
self.assertEqual(
bahttext(5611116.50),
"ห้าล้านหกแสนหนึ่งหมื่นหนึ่งพันหนึ่งร้อยสิบหกบาทห้าสิบสตางค์",
)
self.assertEqual(bahttext(116), "หนึ่งร้อยสิบหกบาทถ้วน")
self.assertEqual(bahttext(0), "ศูนย์บาทถ้วน")
self.assertEqual(bahttext(None), "")
self.assertEqual(num_to_thaiword(None), "")
self.assertEqual(num_to_thaiword(0), "ศูนย์")
self.assertEqual(num_to_thaiword(112), "หนึ่งร้อยสิบสอง")
self.assertEqual(num_to_thaiword(-273), "ลบสองร้อยเจ็ดสิบสาม")
self.assertEqual(thaiword_to_num("ศูนย์"), 0)
self.assertEqual(thaiword_to_num("แปด"), 8)
self.assertEqual(thaiword_to_num("ยี่สิบ"), 20)
self.assertEqual(thaiword_to_num("ร้อยสิบสอง"), 112)
self.assertEqual(
thaiword_to_num("หกล้านหกแสนหกหมื่นหกพันหกร้อยหกสิบหก"), 6666666
)
self.assertEqual(thaiword_to_num("สองล้านสามแสนหกร้อยสิบสอง"), 2300612)
self.assertEqual(thaiword_to_num("หนึ่งร้อยสิบล้าน"), 110000000)
self.assertEqual(
thaiword_to_num("สิบห้าล้านล้านเจ็ดสิบสอง"), 15000000000072
)
self.assertEqual(thaiword_to_num("หนึ่งล้านล้าน"), 1000000000000)
self.assertEqual(
thaiword_to_num("สองแสนสี่หมื่นสามสิบล้านสี่พันล้าน"),
240030004000000000,
)
self.assertEqual(thaiword_to_num("ร้อยสิบล้านแปดแสนห้าพัน"), 110805000)
self.assertEqual(thaiword_to_num("ลบหนึ่ง"), -1)
text = "ลบหนึ่งร้อยล้านสี่แสนห้าพันยี่สิบเอ็ด"
self.assertEqual(num_to_thaiword(thaiword_to_num(text)), text)
with self.assertRaises(ValueError):
thaiword_to_num("ศูนย์อะไรนะ")
with self.assertRaises(ValueError):
thaiword_to_num("")
with self.assertRaises(ValueError):
thaiword_to_num("ห้าพันสี่หมื่น")
with self.assertRaises(TypeError):
thaiword_to_num(None)
with self.assertRaises(TypeError):
thaiword_to_num(["หนึ่ง"])
self.assertEqual(words_to_num("ศูนย์"), 0)
self.assertEqual(words_to_num("แปด"), 8)
self.assertEqual(words_to_num("ยี่สิบ"), 20)
self.assertEqual(words_to_num("ร้อยสิบสอง"), 112)
self.assertEqual(words_to_num("ลบแปด"), -8)
self.assertEqual(words_to_num("ลบยี่สิบ"), -20)
self.assertEqual(words_to_num("ลบร้อยสิบสอง"), -112)
self.assertEqual(
words_to_num("หกล้านหกแสนหกหมื่นหกพันหกร้อยหกสิบหก"), 6666666
)
self.assertEqual(words_to_num("สองล้านสามแสนหกร้อยสิบสอง"), 2300612)
self.assertEqual(words_to_num("หนึ่งร้อยสิบล้าน"), 110000000)
self.assertEqual(
words_to_num("สิบห้าล้านล้านเจ็ดสิบสอง"), 15000000000072
)
self.assertEqual(words_to_num("หนึ่งล้านล้าน"), 1000000000000)
self.assertEqual(
words_to_num("สองแสนสี่หมื่นสามสิบล้านสี่พันล้าน"),
240030004000000000,
)
self.assertEqual(words_to_num("ร้อยสิบล้านแปดแสนห้าพัน"), 110805000)
self.assertEqual(words_to_num("ลบหนึ่ง"), -1)
text = "ลบหนึ่งร้อยล้านสี่แสนห้าพันยี่สิบเอ็ด"
self.assertEqual(num_to_thaiword(words_to_num(text)), text)
self.assertIsNotNone(
text_to_num("เก้าร้อยแปดสิบจุดเก้าห้าบาทนี่คือจำนวนทั้งหมด")
)
self.assertIsNotNone(
text_to_num("สิบล้านสองหมื่นหนึ่งพันแปดร้อยแปดสิบเก้าบาท")
)
self.assertIsNotNone(
text_to_num("สิบล้านสองหมื่นหนึ่งพันแปดร้อยแปดสิบเก้า")
)
self.assertEqual(
arabic_digit_to_thai_digit("ไทยแลนด์ 4.0"), "ไทยแลนด์ ๔.๐"
)
self.assertEqual(arabic_digit_to_thai_digit(""), "")
self.assertEqual(arabic_digit_to_thai_digit(None), "")
self.assertEqual(
thai_digit_to_arabic_digit("๔๐๔ Not Found"), "404 Not Found"
)
self.assertEqual(thai_digit_to_arabic_digit(""), "")
self.assertEqual(thai_digit_to_arabic_digit(None), "")
self.assertEqual(digit_to_text("RFC 7258"), "RFC เจ็ดสองห้าแปด")
self.assertEqual(digit_to_text(""), "")
self.assertEqual(digit_to_text(None), "")
self.assertEqual(text_to_arabic_digit("เจ็ด"), "7")
self.assertEqual(text_to_arabic_digit(""), "")
self.assertEqual(text_to_arabic_digit(None), "")
self.assertEqual(text_to_thai_digit("เก้า"), "๙")
self.assertEqual(text_to_thai_digit(""), "")
self.assertEqual(text_to_thai_digit(None), "")
# ### pythainlp.util.keyboard
def test_keyboard(self):
self.assertEqual(eng_to_thai("l;ylfu8iy["), "สวัสดีครับ")
self.assertEqual(
eng_to_thai("Tok8kicsj'xitgmLwmp"), "ธนาคารแห่งประเทศไทย"
)
self.assertEqual(thai_to_eng("สวัสดีครับ"), "l;ylfu8iy[")
self.assertEqual(thai_to_eng("่นีพืฟสรหท"), "journalism")
self.assertEqual(thai_to_eng("๋นีพืฟสรหท"), "Journalism")
# ### pythainlp.util.keywords
def test_find_keywords(self):
word_list = ["แมว", "กิน", "ปลา", "อร่อย", "แมว", "เป็น", "แมว"]
self.assertEqual(find_keyword(word_list), {"แมว": 3})
def test_rank(self):
self.assertEqual(rank([]), None)
self.assertEqual(
rank(["แมว", "คน", "แมว"]), Counter({"แมว": 2, "คน": 1})
)
self.assertIsNotNone(
rank(["แมว", "คน", "แมว"], exclude_stopwords=True)
)
# ### pythainlp.util.keyboard
def test_thai_keyboard_dist(self):
self.assertEqual(thai_keyboard_dist("ฟ", "ฤ"), 0.0)
self.assertEqual(thai_keyboard_dist("ฟ", "ห"), 1.0)
self.assertEqual(thai_keyboard_dist("ฟ", "ก"), 2.0)
self.assertEqual(thai_keyboard_dist("ฟ", "ฤ", 0.5), 0.5)
self.assertNotEqual(
thai_keyboard_dist("๘", "๙"), thai_keyboard_dist("๙", "๐")
)
with self.assertRaises(ValueError):
thai_keyboard_dist("ພ", "พ")
# ### pythainlp.util.date
def test_date(self):
self.assertIsNotNone(now_reign_year())
self.assertEqual(reign_year_to_ad(2, 10), 2017)
self.assertIsNotNone(reign_year_to_ad(2, 9))
self.assertIsNotNone(reign_year_to_ad(2, 8))
self.assertIsNotNone(reign_year_to_ad(2, 7))
# ### pythainlp.util.strftime
def test_thai_strftime(self):
date = datetime(1976, 10, 6, 1, 40, tzinfo=timezone.utc)
self.assertEqual(thai_strftime(date, "%d"), "06")
self.assertEqual(thai_strftime(date, "%-d"), "6") # no padding
self.assertEqual(thai_strftime(date, "%_d"), " 6") # space padding
self.assertEqual(thai_strftime(date, "%0d"), "06") # zero padding
self.assertEqual(thai_strftime(date, "%H"), "01")
self.assertEqual(thai_strftime(date, "%-H"), "1") # no padding
self.assertEqual(thai_strftime(date, "%_M"), "40") # space padding
self.assertEqual(thai_strftime(date, "%0M"), "40") # zero padding
self.assertEqual(thai_strftime(date, "%e"), " 6")
self.assertEqual(thai_strftime(date, "%-e"), "6") # no padding
self.assertEqual(thai_strftime(date, "%_e"), " 6") # space padding
self.assertEqual(thai_strftime(date, "%0e"), "06") # zero padding
self.assertEqual(thai_strftime(date, "%Ed"), "06") # locale's alt rep
self.assertEqual(thai_strftime(date, "%Od"), "๐๖") # locale's numeric
self.assertEqual(
thai_strftime(date, "%d", thaidigit=True), "๐๖"
) # Thai digit
self.assertEqual(thai_strftime(date, "%%"), "%") # % escape
self.assertEqual(thai_strftime(date, "%"), "%") # one %
self.assertEqual(thai_strftime(date, "%-"), "-") # lone dash
self.assertEqual(thai_strftime(date, "%c"), "พ 6 ต.ค. 01:40:00 2519")
self.assertEqual(
thai_strftime(date, "%0c"), "พ 6 ต.ค. 01:40:00 2519"
)
self.assertEqual(
thai_strftime(date, "%c", True), "พ ๖ ต.ค. ๐๑:๔๐:๐๐ ๒๕๑๙"
)
self.assertEqual(
thai_strftime(
date, "%Aที่ %d %B พ.ศ. %Y เวลา %H:%Mน. (%a %d-%b-%y) %% %"
),
"วันพุธที่ 06 ตุลาคม พ.ศ. 2519 เวลา 01:40น. (พ 06-ต.ค.-19) % %",
)
self.assertEqual(thai_strftime(date, "%Q"), "Q") # not support
self.assertIsNotNone(
thai_strftime(date, "%A%a%B%b%C%c%D%F%G%g%v%X%x%Y%y%+%%")
)
self.assertEqual(
thai_strftime(date, "%p").upper(), thai_strftime(date, "%^p")
) # '^' extension for upper case
self.assertEqual(
thai_strftime(date, "%Z").swapcase(), thai_strftime(date, "%#Z")
) # '#' extension for swap case
date = datetime(1, 2, 3)
self.assertEqual(thai_strftime(date, "%Y"), "0544")
self.assertEqual(thai_strftime(date, "%y"), "44")
self.assertEqual(len(thai_strftime(date, "%G")), 4)
self.assertEqual(len(thai_strftime(date, "%g")), 2)
# ### pythainlp.util.time
def test_time_to_thaiword(self):
self.assertEqual(time_to_thaiword("8:17"), time_to_thaiword("08:17"))
self.assertEqual(time_to_thaiword("8:17"), "แปดนาฬิกาสิบเจ็ดนาที")
self.assertEqual(
time_to_thaiword("8:17", "6h"), "สองโมงเช้าสิบเจ็ดนาที"
)
self.assertEqual(time_to_thaiword("8:17", "m6h"), "แปดโมงสิบเจ็ดนาที")
self.assertEqual(
time_to_thaiword("13:30:01", "6h", "m"), "บ่ายโมงครึ่ง"
)
self.assertEqual(
time_to_thaiword(time(12, 3, 0)), "สิบสองนาฬิกาสามนาที"
)
self.assertEqual(
time_to_thaiword(time(12, 3, 1)),
"สิบสองนาฬิกาสามนาทีหนึ่งวินาที",
)
self.assertEqual(
time_to_thaiword(datetime(2014, 5, 22, 12, 3, 0), precision="s"),
"สิบสองนาฬิกาสามนาทีศูนย์วินาที",
)
self.assertEqual(
time_to_thaiword(datetime(2014, 5, 22, 12, 3, 1), precision="m"),
"สิบสองนาฬิกาสามนาที",
)
self.assertEqual(
time_to_thaiword(datetime(1976, 10, 6, 12, 30, 1), "6h", "m"),
"เที่ยงครึ่ง",
)
self.assertEqual(time_to_thaiword("18:30"), "สิบแปดนาฬิกาสามสิบนาที")
self.assertEqual(
time_to_thaiword("18:30:00"), "สิบแปดนาฬิกาสามสิบนาที"
)
self.assertEqual(
time_to_thaiword("18:30:01"), "สิบแปดนาฬิกาสามสิบนาทีหนึ่งวินาที"
)
self.assertEqual(
time_to_thaiword("18:30:01", precision="m"),
"สิบแปดนาฬิกาสามสิบนาที",
)
self.assertEqual(
time_to_thaiword("18:30:01", precision="s"),
"สิบแปดนาฬิกาสามสิบนาทีหนึ่งวินาที",
)
self.assertEqual(
time_to_thaiword("18:30:01", fmt="m6h", precision="m"),
"หกโมงครึ่ง",
)
self.assertEqual(
time_to_thaiword("18:30:01", fmt="m6h"),
"หกโมงสามสิบนาทีหนึ่งวินาที",
)
self.assertEqual(
time_to_thaiword("18:30:01", fmt="m6h", precision="m"),
"หกโมงครึ่ง",
)
self.assertIsNotNone(time_to_thaiword("0:30"))
self.assertIsNotNone(time_to_thaiword("0:30", "6h"))
self.assertIsNotNone(time_to_thaiword("0:30", "m6h"))
self.assertIsNotNone(time_to_thaiword("4:30"))
self.assertIsNotNone(time_to_thaiword("4:30", "6h"))
self.assertIsNotNone(time_to_thaiword("4:30", "m6h"))
self.assertIsNotNone(time_to_thaiword("12:30"))
self.assertIsNotNone(time_to_thaiword("12:30", "6h"))
self.assertIsNotNone(time_to_thaiword("12:30", "m6h"))
self.assertIsNotNone(time_to_thaiword("13:30"))
self.assertIsNotNone(time_to_thaiword("13:30", "6h"))
self.assertIsNotNone(time_to_thaiword("13:30", "m6h"))
self.assertIsNotNone(time_to_thaiword("15:30"))
self.assertIsNotNone(time_to_thaiword("15:30", "6h"))
self.assertIsNotNone(time_to_thaiword("15:30", "m6h"))
self.assertIsNotNone(time_to_thaiword("18:30"))
self.assertIsNotNone(time_to_thaiword("18:30", "6h"))
self.assertIsNotNone(time_to_thaiword("18:30", "m6h"))
self.assertIsNotNone(time_to_thaiword("19:30"))
self.assertIsNotNone(time_to_thaiword("19:30", "6h"))
self.assertIsNotNone(time_to_thaiword("19:30", "m6h"))
with self.assertRaises(NotImplementedError):
time_to_thaiword(
"8:17", fmt="xx"
) # format string is not supported
with self.assertRaises(TypeError):
time_to_thaiword(42) # input is not datetime/time/str
with self.assertRaises(ValueError):
time_to_thaiword("") # input is empty
with self.assertRaises(ValueError):
time_to_thaiword("13:73:23") # input is not in H:M:S format
with self.assertRaises(ValueError):
time_to_thaiword(
"24:00"
) # input is not in H:M:S format (over 23:59:59)
def test_thaiword_to_time(self):
self.assertEqual(thaiword_to_time("บ่ายโมงครึ่ง"), "13:30")
self.assertEqual(thaiword_to_time("บ่ายสามโมงสิบสองนาที"), "15:12")
self.assertEqual(thaiword_to_time("สิบโมงเช้าสิบสองนาที"), "10:12")
self.assertEqual(thaiword_to_time("บ่ายโมงสิบสามนาที"), "13:13")
self.assertEqual(thaiword_to_time("ศูนย์นาฬิกาสิบเอ็ดนาที"), "00:11")
self.assertEqual(
thaiword_to_time("บ่ายโมงเย็นสามสิบเอ็ดนาที"), "13:31"
)
self.assertEqual(thaiword_to_time("เที่ยงคืนหนึ่งนาที"), "00:01")
self.assertEqual(thaiword_to_time("เที่ยงครึ่ง"), "12:30")
self.assertEqual(thaiword_to_time("ห้าโมงเย็นสามสิบสี่นาที"), "17:34")
self.assertEqual(thaiword_to_time("หนึ่งทุ่มสามสิบแปดนาที"), "19:38")
self.assertEqual(thaiword_to_time("ทุ่มสามสิบแปด"), "19:38")
self.assertEqual(
thaiword_to_time("สองโมงเช้าสิบสองนาที", padding=False), "8:12"
)
self.assertEqual(thaiword_to_time("สิบโมงเช้า"), "10:00")
self.assertEqual(thaiword_to_time("ตีสามสิบห้า"), "03:15")
self.assertEqual(thaiword_to_time("ตีสามสิบห้านาที"), "03:15")
with self.assertRaises(ValueError):
thaiword_to_time("ไม่มีคำบอกเวลา")
with self.assertRaises(ValueError):
thaiword_to_time("นาฬิกา")
def test_thaiword_to_date(self):
now = datetime.now()
self.assertEqual(
now + timedelta(days=0), thaiword_to_date("วันนี้", now)
)
self.assertEqual(
now + timedelta(days=1),
thaiword_to_date("พรุ่งนี้", now),
)
self.assertEqual(
now + timedelta(days=2),
thaiword_to_date("มะรืนนี้", now),
)
self.assertEqual(
now + timedelta(days=-1),
thaiword_to_date("เมื่อวาน", now),
)
self.assertEqual(
now + timedelta(days=-2), thaiword_to_date("วานซืน", now)
)
self.assertIsNotNone(thaiword_to_date("วันนี้"))
# it's error if "พรุ่งนี้" is 1 not 32.
# self.assertEqual(
# thaiword_to_date("วันนี้").day + 1,
# thaiword_to_date("พรุ่งนี้").day,
# )
self.assertIsNone(thaiword_to_date("วันไหน"))
# ### pythainlp.util.trie
def test_trie(self):
self.assertIsNotNone(Trie([]))
self.assertIsNotNone(Trie(["ทดสอบ", "ทด", "ทอด", "ทอผ้า"]))
self.assertIsNotNone(Trie({"ทอด", "ทอง", "ทาง"}))
self.assertIsNotNone(Trie(("ทอด", "ทอง", "ทาง")))
self.assertIsNotNone(Trie(Trie(["ทดสอบ", "ทดลอง"])))
trie = Trie(["ทด", "ทดสอบ", "ทดลอง"])
self.assertIn("ทด", trie)
trie.add("ทบ")
self.assertEqual(len(trie), 4)
self.assertEqual(len(trie.prefixes("ทดสอบ")), 2)
trie.remove("ทบ")
trie.remove("ทด")
self.assertEqual(len(trie), 2)
trie = Trie([])
self.assertEqual(len(trie), 0)
trie.remove("หมด")
self.assertEqual(len(trie), 0)
self.assertIsNotNone(dict_trie(Trie(["ลอง", "ลาก"])))
self.assertIsNotNone(dict_trie(("ลอง", "สร้าง", "Trie", "ลน")))
self.assertIsNotNone(dict_trie(["ลอง", "สร้าง", "Trie", "ลน"]))
self.assertIsNotNone(dict_trie({"ลอง", "สร้าง", "Trie", "ลน"}))
self.assertIsNotNone(dict_trie(thai_words()))
self.assertIsNotNone(
dict_trie(os.path.join(_CORPUS_PATH, _THAI_WORDS_FILENAME))
)
with self.assertRaises(TypeError):
dict_trie("")
with self.assertRaises(TypeError):
dict_trie(None)
with self.assertRaises(TypeError):
dict_trie(42)
# ### pythainlp.util.normalize
def test_normalize(self):
self.assertIsNotNone(normalize("พรรค์จันทร์ab์"))
# normalize sara e + sara e
self.assertEqual(normalize("เเปลก"), "แปลก")
# normalize consonant + nikhahit + sara aa
self.assertEqual(normalize("นํา"), "นำ")
self.assertEqual(normalize("\u0e01\u0e4d\u0e32"), "\u0e01\u0e33")
# normalize consonant + tone mark + nikhahit + sara aa
self.assertEqual(
normalize("\u0e01\u0e48\u0e4d\u0e32"), "\u0e01\u0e48\u0e33"
)
# reorder consonant + follow vowel + tone mark
self.assertEqual(normalize("\u0e01\u0e30\u0e48"), "\u0e01\u0e48\u0e30")
# reorder consonant + nikhahit + tone mark + sara aa
self.assertEqual(
normalize("\u0e01\u0e4d\u0e48\u0e32"), "\u0e01\u0e48\u0e33"
)
# reorder consonant + follow vowel + tone mark
self.assertEqual(normalize("\u0e01\u0e32\u0e48"), "\u0e01\u0e48\u0e32")
# normalize lakkhangyao to sara aa
self.assertEqual(normalize("นๅคา"), "นาคา")
self.assertEqual(normalize("ฤๅษี"), "ฤๅษี")
# remove repeating following vowels
self.assertEqual(normalize("กาา"), "กา")
self.assertEqual(normalize("กา า า า"), "กา")
self.assertEqual(normalize("กา าาะา"), "กาะา")
# remove epeating tone marks
self.assertEqual(normalize("\u0e01\u0e48\u0e48"), "\u0e01\u0e48")
# remove repeating different ton emarks
self.assertEqual(normalize("\u0e01\u0e48\u0e49"), "\u0e01\u0e49")
self.assertEqual(
normalize("\u0e01\u0e48\u0e49\u0e48\u0e49"), "\u0e01\u0e49"
)
# remove tone mark at the beginning of text
self.assertEqual(remove_dangling("\u0e48\u0e01"), "\u0e01")
self.assertEqual(remove_dangling("\u0e48\u0e48\u0e01"), "\u0e01")
self.assertEqual(remove_dangling("\u0e48\u0e49\u0e01"), "\u0e01")
self.assertEqual(remove_dangling("\u0e48\u0e01\u0e48"), "\u0e01\u0e48")
# remove duplicate spaces
self.assertEqual(remove_dup_spaces(" ab c d "), "ab c d")
self.assertEqual(remove_dup_spaces("\nab c \n d \n"), "ab c\nd")
# remove tone marks
self.assertEqual(remove_tonemark("จิ้น"), "จิน")
self.assertEqual(remove_tonemark("เก๋า"), "เกา")
# remove zero width chars
self.assertEqual(remove_zw("กา\u200b"), "กา")
self.assertEqual(remove_zw("ก\u200cา"), "กา")
self.assertEqual(remove_zw("\u200bกา"), "กา")
self.assertEqual(remove_zw("กา\u200b\u200c\u200b"), "กา")
# maiyamok
self.assertEqual(
maiyamok("เด็กๆชอบไปโรงเรียน"),
['เด็ก', 'เด็ก', 'ชอบ', 'ไป', 'โรงเรียน']
)
self.assertEqual(
maiyamok([
"ทำไม",
"คน",
"ดี",
" ",
"ๆ",
"ๆ",
" ",
"ถึง",
"ทำ",
"ไม่ได้"
]),
["ทำไม", "คน", "ดี", "ดี", "ดี", " ", "ถึง", "ทำ", "ไม่ได้"]
)
self.assertEqual(
maiyamok([
"ทำไม",
"คน",
"ดี",
" ",
" ๆ",
"ๆ",
" ",
"ถึง",
"ทำ",
"ไม่ได้"
]),
["ทำไม", "คน", "ดี", "ดี", "ดี", " ", "ถึง", "ทำ", "ไม่ได้"]
)
self.assertEqual(
maiyamok([
"ทำไม",
"คน",
"ดีๆ",
" ",
"ๆ",
"ๆ",
" ",
"ถึง",
"ทำ",
"ไม่ได้"
]),
["ทำไม", "คน", "ดี", "ดี", "ดี", "ดี", " ", "ถึง", "ทำ", "ไม่ได้"]
)
# ### pythainlp.util.thai
def test_countthai(self):
self.assertEqual(countthai(""), 0.0)
self.assertEqual(countthai("123"), 0.0)
self.assertEqual(countthai("1 2 3"), 0.0)
self.assertEqual(countthai("ประเทศไทย"), 100.0)
self.assertEqual(countthai("โรค COVID-19"), 37.5)
self.assertEqual(countthai("(กกต.)", ".()"), 100.0)
self.assertEqual(countthai("(กกต.)", None), 50.0)
def test_count_thai_chars(self):
self.assertEquals(
count_thai_chars("ทดสอบภาษาไทย"),
{
'vowels': 3,
'lead_vowels': 1,
'follow_vowels': 2,
'above_vowels': 0,
'below_vowels': 0,
'consonants': 9,
'tonemarks': 0,
'signs': 0,
'thai_digits': 0,
'punctuations': 0,
'non_thai': 0,
}
)
self.assertEquals(
count_thai_chars("มี ๕ บาทไหม๏ เกมส์หรือเกมกันแน่ที่กรุเทพฯ ใช้"),
{
'vowels': 12,
'lead_vowels': 6,
'follow_vowels': 1,
'above_vowels': 4,
'below_vowels': 1,
'consonants': 22,
'tonemarks': 3,
'signs': 2,
'thai_digits': 1,
'punctuations': 1,
'non_thai': 4,
}
)
def test_isthaichar(self):
self.assertEqual(isthaichar("ก"), True)
self.assertEqual(isthaichar("a"), False)
self.assertEqual(isthaichar("0"), False)
def test_isthai(self):
self.assertEqual(isthai("ไทย"), True)
self.assertEqual(isthai("ไทย0"), False)
self.assertEqual(isthai("ต.ค."), True)
self.assertEqual(isthai("(ต.ค.)"), False)
self.assertEqual(isthai("ต.ค.", ignore_chars=None), False)
self.assertEqual(isthai("(ต.ค.)", ignore_chars=".()"), True)
def test_is_native_thai(self):
self.assertEqual(is_native_thai(None), False)
self.assertEqual(is_native_thai(""), False)
self.assertEqual(is_native_thai("116"), False)
self.assertEqual(is_native_thai("abc"), False)
self.assertEqual(is_native_thai("ตา"), True)
self.assertEqual(is_native_thai("ยา"), True)
self.assertEqual(is_native_thai("ฆ่า"), True)
self.assertEqual(is_native_thai("คน"), True)
self.assertEqual(is_native_thai("กะ"), True)
self.assertEqual(is_native_thai("มอ"), True)
self.assertEqual(is_native_thai("กะ"), True)
self.assertEqual(is_native_thai("กระ"), True)
self.assertEqual(is_native_thai("ประท้วง"), True)
self.assertEqual(is_native_thai("ศา"), False)
self.assertEqual(is_native_thai("ลักษ์"), False)
self.assertEqual(is_native_thai("มาร์ค"), False)
self.assertEqual(is_native_thai("เลข"), False)
self.assertEqual(is_native_thai("เทเวศน์"), False)
self.assertEqual(is_native_thai("เทเวศร์"), False)
def test_display_thai_char(self):
self.assertEqual(display_thai_char("้"), "_้")
self.assertEqual(display_thai_char("ป"), "ป")
self.assertEqual(display_thai_char("์"), "_์")
self.assertEqual(display_thai_char("ำ"), "_ำ")
self.assertEqual(display_thai_char("๎"), "_๎")
self.assertEqual(display_thai_char("ํ"), "_ํ")
# ### pythainlp.util.emojiconv
def test_emoji_to_thai(self):
self.assertEqual(
emoji_to_thai(
"จะมานั่งรถเมล์เหมือนผมก็ได้นะครับ ใกล้ชิดประชาชนดี 😀"
),
(
"จะมานั่งรถเมล์เหมือนผมก็ได้นะครับ "
"ใกล้ชิดประชาชนดี :หน้ายิ้มยิงฟัน:"
),
)
self.assertEqual(
emoji_to_thai("หิวข้าวอยากกินอาหารญี่ปุ่น 🍣"),
"หิวข้าวอยากกินอาหารญี่ปุ่น :ซูชิ:",
)
self.assertEqual(
emoji_to_thai("🇹🇭 นี่คือธงประเทศไทย"),
":ธง_ไทย: นี่คือธงประเทศไทย",
)
def test_sound_syllable(self):
test = [
("มา", "live"),
("ดู", "live"),
("ปู", "live"),
("เวลา", "live"),
("ปี", "live"),
("จำ", "live"),
("น้ำ", "live"),
("ใช่", "live"),
("เผ่า", "live"),
("เสา", "live"),
("ไป", "live"),
("จริง", "live"),
("กิน", "live"),
("กำ", "live"),
("มา", "live"),
("สาว", "live"),
("ฉุย", "live"),
("ธุ", "dead"),
("ระ", "dead"),
("กะ", "dead"),
("ทิ", "dead"),
("เกะ", "dead"),
("กะ", "dead"),
("บท", "dead"),
("บาท", "dead"),
("ลาภ", "dead"),
("เมฆ", "dead"),
("เลข", "dead"),
("ธูป", "dead"),
("บ", "dead"),
("บ่", "dead"),
("ก็", "dead"),
("เพราะ", "dead"),
("เกาะ", "dead"),
("แคะ", "dead"),
]
for i, j in test:
self.assertEqual(sound_syllable(i), j)
def test_tone_detector(self):
data = [
("l", "กด"),
("l", "ต่อ"),
("l", "ฉาก"),
("l", "ใส่"),
("l", "อยาก"),
("l", "อยู่"),
("l", "หนวก"),
("l", "ใหม่"),
("m", "ควาย"),
("m", "ไป"),
("h", "คะ"),
("h", "วัด"),
("h", "ไม้"),
("h", "โต๊ะ"),
("r", "เขา"),
("r", "ก๋ง"),
("r", "หญิง"),
("f", "มาก"),
("f", "ใช่"),
("f", "ไหม้"),
("f", "ต้น"),
("f", "ผู้"),
]
for i, j in data:
self.assertEqual(tone_detector(j), i)
def test_syllable_length(self):
self.assertEqual(syllable_length("มาก"), "long")
self.assertEqual(syllable_length("คะ"), "short")
def test_syllable_open_close_detector(self):
self.assertEqual(syllable_open_close_detector("มาก"), "close")
self.assertEqual(syllable_open_close_detector("คะ"), "open")
def test_thai_word_tone_detector(self):
self.assertIsNotNone(thai_word_tone_detector("คนดี"))
self.assertEqual(
thai_word_tone_detector("ราคา"),
[('รา', 'm'), ('คา', 'm')]
)
def test_thai_strptime(self):
self.assertIsNotNone(
thai_strptime(
"05-7-65 09:00:01.10600",
"%d-%B-%Y %H:%M:%S.%f",
year="be"
)
)
self.assertIsNotNone(
thai_strptime(
"24-6-75 09:00:00",
"%d-%B-%Y %H:%M:%S",
year="be",
add_year="2400"
)
)
self.assertIsNotNone(
thai_strptime(
"05-7-22 09:00:01.10600",
"%d-%B-%Y %H:%M:%S.%f",
year="ad"
)
)
self.assertIsNotNone(
thai_strptime(
"05-7-99 09:00:01.10600",
"%d-%B-%Y %H:%M:%S.%f",
year="ad",
add_year="1900"
)
)
def test_convert_years(self):
self.assertEqual(convert_years("2566", src="be", target="ad"), "2023")
self.assertEqual(convert_years("2566", src="be", target="re"), "242")
self.assertEqual(convert_years("2566", src="be", target="ah"), "1444")
self.assertEqual(convert_years("2023", src="ad", target="be"), "2566")
self.assertEqual(convert_years("2023", src="ad", target="ah"), "1444")
self.assertEqual(convert_years("2023", src="ad", target="re"), "242")
self.assertEqual(convert_years("1444", src="ah", target="be"), "2566")
self.assertEqual(convert_years("1444", src="ah", target="ad"), "2023")
self.assertEqual(convert_years("1444", src="ah", target="re"), "242")
self.assertEqual(convert_years("242", src="re", target="be"), "2566")
self.assertEqual(convert_years("242", src="re", target="ad"), "2023")
self.assertEqual(convert_years("242", src="re", target="ah"), "1444")
with self.assertRaises(NotImplementedError):
self.assertIsNotNone(convert_years("2023", src="cat", target="dog"))
def test_nectec_to_ipa(self):
self.assertEqual(nectec_to_ipa("kl-uua-j^-2"), 'kl uua j ˥˩')
def test_ipa_to_rtgs(self):
self.assertEqual(ipa_to_rtgs("kluaj"), "kluai")
self.assertEqual(ipa_to_rtgs("waːw"), "wao")
self.assertEqual(ipa_to_rtgs("/naː˥˩/"), "/na/")
def test_remove_tone_ipa(self):
self.assertEqual(remove_tone_ipa("laː˦˥.sa˨˩.maj˩˩˦"), "laː.sa.maj")
def test_tis620_to_utf8(self):
self.assertEqual(tis620_to_utf8("¡ÃзÃÇ§ÍØµÊÒË¡ÃÃÁ"), "กระทรวงอุตสาหกรรม")
def test_spell_word(self):
self.assertEqual(spell_word("เสือ"),['สอ', 'เอือ', 'เสือ'])
self.assertEqual(spell_word("เสื้อ"),['สอ', 'เอือ', 'ไม้โท', 'เสื้อ'])
self.assertEqual(spell_word("คน"),['คอ', 'นอ', 'คน'])
self.assertEqual(spell_word("คนดี"),['คอ', 'นอ', 'คน', 'ดอ', 'อี', 'ดี', 'คนดี'])
| 31,702 | 36.122951 | 89 | py |
pythainlp-dev/tests/test_wangchanberta.py | pythainlp-dev/tests/test_wangchanberta.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.wangchanberta import ThaiNameTagger, segment
class TestWangchanberta(unittest.TestCase):
def test_thainer_wangchanberta(self):
ner = ThaiNameTagger()
self.assertIsNotNone(
ner.get_ner("I คิด therefore I am ผ็ฎ์")
)
ner = ThaiNameTagger()
self.assertIsNotNone(
ner.get_ner("I คิด therefore I am ผ็ฎ์", tag=True)
)
self.assertIsNotNone(
ner.get_ner(
"โรงเรียนสวนกุหลาบเป็นโรงเรียนที่ดี แต่ไม่มีสวนกุหลาบ",
tag=True
)
)
ner = ThaiNameTagger(grouped_entities=False)
self.assertIsNotNone(
ner.get_ner("I คิด therefore I am ผ็ฎ์", tag=True)
)
def test_segment_wangchanberta(self):
self.assertIsNotNone(
segment("I คิด therefore I am ผ็ฎ์")
)
self.assertIsNotNone(
segment([])
)
| 978 | 24.763158 | 71 | py |
pythainlp-dev/tests/test_word_vector.py | pythainlp-dev/tests/test_word_vector.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.word_vector import WordVector
class TestWordVectorPackage(unittest.TestCase):
def test_thai2vec(self):
_wv = WordVector("thai2fit_wv")
self.assertGreaterEqual(
_wv.similarity("แบคทีเรีย", "คน"), 0
)
self.assertIsNotNone(_wv.sentence_vectorizer(""))
self.assertIsNotNone(_wv.get_model())
self.assertIsNotNone(
_wv.sentence_vectorizer("เสรีภาพในการชุมนุม")
)
self.assertIsNotNone(
_wv.sentence_vectorizer(
"เสรีภาพในการรวมตัว\nสมาคม", use_mean=True
)
)
self.assertIsNotNone(
_wv.sentence_vectorizer("I คิด therefore I am ผ็ฎ์")
)
self.assertIsNotNone(
_wv.most_similar_cosmul(
["สหรัฐอเมริกา", "ประธานาธิบดี"], ["ประเทศไทย"]
)[0][0]
)
self.assertEqual(
_wv.doesnt_match(["ญี่ปุ่น", "พม่า", "ไอติม"]), "ไอติม"
)
def test_ltw2v(self):
_wv = WordVector("ltw2v")
self.assertGreaterEqual(
_wv.similarity("แบคทีเรีย", "คน"), 0
)
self.assertIsNotNone(_wv.sentence_vectorizer(""))
self.assertIsNotNone(_wv.get_model())
self.assertIsNotNone(
_wv.sentence_vectorizer("เสรีภาพในการชุมนุม")
)
self.assertIsNotNone(
_wv.sentence_vectorizer(
"เสรีภาพในการรวมตัว\nสมาคม", use_mean=True
)
)
self.assertIsNotNone(
_wv.sentence_vectorizer("I คิด therefore I am ผ็ฎ์")
)
self.assertIsNotNone(
_wv.most_similar_cosmul(
["สหรัฐอเมริกา", "ประธานาธิบดี"], ["ไทย"]
)[0][0]
)
self.assertEqual(
_wv.doesnt_match(["ญี่ปุ่น", "พม่า", "ไอติม"]), "ไอติม"
)
| 1,889 | 29.483871 | 67 | py |
pythainlp-dev/tests/test_wsd.py | pythainlp-dev/tests/test_wsd.py | # -*- coding: utf-8 -*-
import unittest
from pythainlp.wsd import get_sense
class TestWsdPackage(unittest.TestCase):
def test_get_sense(self):
self.assertIsNotNone(get_sense("เขากำลังอบขนมคุกกี้","คุกกี้"))
self.assertIsNotNone(get_sense("เว็บนี้ต้องการคุกกี้ในการทำงาน","คุกกี้"))
self.assertIsNone(get_sense("เว็บนี้ต้องการคุกกี้ในการทำงาน","คน"))
| 380 | 33.636364 | 82 | py |
pythainlp-dev/tests/data/eval-input.yml | pythainlp-dev/tests/data/eval-input.yml | char_level:fn: 1.0
char_level:fp: 0.0
char_level:precision: 1.0
char_level:recall: 0.8
char_level:tn: 9.0
char_level:tp: 4.0
word_level:correctly_tokenised_words: 3.0
word_level:precision: 0.75
word_level:recall: 0.6
word_level:total_words_in_ref_sample: 5.0
word_level:total_words_in_sample: 4.0
| 297 | 23.833333 | 41 | yml |
pythainlp-dev/tests/data/sentences.yml | pythainlp-dev/tests/data/sentences.yml | sentences:
-
expected: >-
ผม|ไม่|ชอบ|กิน|ผัก
actual: >-
ผม|ไม่|ชอบ|กิน|ผัก
-
expected: >-
ผม|ไม่|ชอบ|กิน|ผัก
actual: >-
ผม|ไม่|ชอบ|กินผัก
-
expected: >-
ผม|ไม่|ชอบ|กิน|ผัก|
actual: >-
ผม|ไม่|ชอบ|กินผัก|
-
expected: >-
ผม|ไม่|ชอบ|กินผัก|
actual: >-
ผม|ไม่|ชอบ|กิน|ผัก|
binary_sentences:
-
expected: "10001010"
actual: "10001010"
expected_count: 3
-
expected: "10001010"
actual: "10101010"
expected_count: 2
-
expected: "10101010"
actual: "10001010"
expected_count: 2
-
expected: "10001010"
actual: "10001000"
expected_count: 1
-
expected: "10001010"
actual: "10101000"
expected_count: 0
-
expected: "10101001000" # "ฝน|ตก|ที่|ทะเล
actual: "10001001010" # "ฝนตก|ที่|ทะ|เล"
expected_count: 1 | 1,040 | 21.630435 | 50 | yml |
null | ceph-main/.readthedocs.yml | ---
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
formats: []
build:
os: ubuntu-22.04
tools:
python: "3.8"
apt_packages:
- ditaa
- graphviz
python:
install:
- requirements: admin/doc-requirements.txt
- requirements: admin/doc-read-the-docs.txt
- requirements: admin/doc-pybind.txt
sphinx:
builder: dirhtml
configuration: doc/conf.py
| 446 | 19.318182 | 75 | yml |
null | ceph-main/README.md | # Ceph - a scalable distributed storage system
Please see https://ceph.com/ for current info.
## Contributing Code
Most of Ceph is dual licensed under the LGPL version 2.1 or 3.0. Some
miscellaneous code is under a BSD-style license or is public domain.
The documentation is licensed under Creative Commons
Attribution Share Alike 3.0 (CC-BY-SA-3.0). There are a handful of headers
included here that are licensed under the GPL. Please see the file
COPYING for a full inventory of licenses by file.
Code contributions must include a valid "Signed-off-by" acknowledging
the license for the modified or contributed file. Please see the file
SubmittingPatches.rst for details on what that means and on how to
generate and submit patches.
We do not require assignment of copyright to contribute code; code is
contributed under the terms of the applicable license.
## Checking out the source
You can clone from github with
git clone [email protected]:ceph/ceph
or, if you are not a github user,
git clone https://github.com/ceph/ceph.git
Ceph contains many git submodules that need to be checked out with
git submodule update --init --recursive
## Build Prerequisites
The list of Debian or RPM packages dependencies can be installed with:
./install-deps.sh
## Building Ceph
Note that these instructions are meant for developers who are
compiling the code for development and testing. To build binaries
suitable for installation we recommend you build deb or rpm packages
or refer to the `ceph.spec.in` or `debian/rules` to see which
configuration options are specified for production builds.
Build instructions:
./do_cmake.sh
cd build
ninja
(do_cmake.sh now defaults to creating a debug build of ceph that can
be up to 5x slower with some workloads. Please pass
"-DCMAKE_BUILD_TYPE=RelWithDebInfo" to do_cmake.sh to create a non-debug
release.
The number of jobs used by `ninja` is derived from the number of CPU cores of
the building host if unspecified. Use the `-j` option to limit the job number
if the build jobs are running out of memory. On average, each job takes around
2.5GiB memory.)
This assumes you make your build dir a subdirectory of the ceph.git
checkout. If you put it elsewhere, just point `CEPH_GIT_DIR` to the correct
path to the checkout. Any additional CMake args can be specified by setting ARGS
before invoking do_cmake. See [cmake options](#cmake-options)
for more details. Eg.
ARGS="-DCMAKE_C_COMPILER=gcc-7" ./do_cmake.sh
To build only certain targets use:
ninja [target name]
To install:
ninja install
### CMake Options
If you run the `cmake` command by hand, there are many options you can
set with "-D". For example, the option to build the RADOS Gateway is
defaulted to ON. To build without the RADOS Gateway:
cmake -DWITH_RADOSGW=OFF [path to top-level ceph directory]
Another example below is building with debugging and alternate locations
for a couple of external dependencies:
cmake -DCMAKE_INSTALL_PREFIX=/opt/ceph -DCMAKE_C_FLAGS="-Og -g3 -gdwarf-4" \
..
Ceph has several bundled dependencies such as Boost, RocksDB and Arrow. By
default, cmake will build these bundled dependencies from source instead of
using libraries that are already installed on the system. You can opt-in to
using these system libraries, provided they meet the minimum version required
by Ceph, with cmake options like `WITH_SYSTEM_BOOST`:
cmake -DWITH_SYSTEM_BOOST=ON [...]
To view an exhaustive list of -D options, you can invoke `cmake` with:
cmake -LH
If you often pipe `ninja` to `less` and would like to maintain the
diagnostic colors for errors and warnings (and if your compiler
supports it), you can invoke `cmake` with:
cmake -DDIAGNOSTICS_COLOR=always ...
Then you'll get the diagnostic colors when you execute:
ninja | less -R
Other available values for 'DIAGNOSTICS_COLOR' are 'auto' (default) and
'never'.
## Building a source tarball
To build a complete source tarball with everything needed to build from
source and/or build a (deb or rpm) package, run
./make-dist
This will create a tarball like ceph-$version.tar.bz2 from git.
(Ensure that any changes you want to include in your working directory
are committed to git.)
## Running a test cluster
To run a functional test cluster,
cd build
ninja vstart # builds just enough to run vstart
../src/vstart.sh --debug --new -x --localhost --bluestore
./bin/ceph -s
Almost all of the usual commands are available in the bin/ directory.
For example,
./bin/rados -p rbd bench 30 write
./bin/rbd create foo --size 1000
To shut down the test cluster,
../src/stop.sh
To start or stop individual daemons, the sysvinit script can be used:
./bin/init-ceph restart osd.0
./bin/init-ceph stop
## Running unit tests
To build and run all tests (in parallel using all processors), use `ctest`:
cd build
ninja
ctest -j$(nproc)
(Note: Many targets built from src/test are not run using `ctest`.
Targets starting with "unittest" are run in `ninja check` and thus can
be run with `ctest`. Targets starting with "ceph_test" can not, and should
be run by hand.)
When failures occur, look in build/Testing/Temporary for logs.
To build and run all tests and their dependencies without other
unnecessary targets in Ceph:
cd build
ninja check -j$(nproc)
To run an individual test manually, run `ctest` with -R (regex matching):
ctest -R [regex matching test name(s)]
(Note: `ctest` does not build the test it's running or the dependencies needed
to run it)
To run an individual test manually and see all the tests output, run
`ctest` with the -V (verbose) flag:
ctest -V -R [regex matching test name(s)]
To run tests manually and run the jobs in parallel, run `ctest` with
the `-j` flag:
ctest -j [number of jobs]
There are many other flags you can give `ctest` for better control
over manual test execution. To view these options run:
man ctest
## Building the Documentation
### Prerequisites
The list of package dependencies for building the documentation can be
found in `doc_deps.deb.txt`:
sudo apt-get install `cat doc_deps.deb.txt`
### Building the Documentation
To build the documentation, ensure that you are in the top-level
`/ceph` directory, and execute the build script. For example:
admin/build-doc
## Reporting Issues
To report an issue and view existing issues, please visit https://tracker.ceph.com/projects/ceph.
| 6,418 | 27.402655 | 97 | md |
null | ceph-main/SECURITY.md | # Security Policy
The information below, as well as information about past
vulnerabilities, can be found at
https://docs.ceph.com/en/latest/security/
## Supported Versions
A new major Ceph release is made every year, and security and bug fixes
are backported to the last two releases. For the current active
releases and the estimated end-of-life for each, please refer to
https://docs.ceph.com/en/latest/releases/
## Reporting a Vulnerability
To report a vulnerability, please send email to [email protected]
* Please do not file a public ceph tracker issue for a vulnerability.
* We urge reporters to provide as much information as is practical
(a reproducer, versions affected, fix if available, etc.), as this
can speed up the process considerably.
* Please let us know to whom credit should be given and with what
affiliations.
* If this issue is not yet disclosed publicly and you have any
disclosure date in mind, please share the same along with the
report.
Although you are not required to, you may encrypt your message using
the following GPG key:
**6EEF26FFD4093B99: Ceph Security Team ([email protected])**
**Download:** [MIT PGP Public Key Server](https://pgp.mit.edu/pks/lookup?op=vindex&search=0x6EEF26FFD4093B99)
**Fingerprint:** A527 D019 21F9 7178 C232 66C1 6EEF 26FF D409 3B99
## Vulnerability Management Process
* The report will be acknowledged within three business days or less.
* The team will investigate and update the email thread with relevant
information and may ask for additional information or guidance
surrounding the reported issue.
* If the team does not confirm the report, no further action will be
taken and the issue will be closed.
* If the team confirms the report, a unique CVE identifier will be
assigned and shared with the reporter. The team will take action to
fix the issue.
* If a reporter has no disclosure date in mind, a Ceph security team
member will coordinate a release date (CRD) with the list members
and share the mutually agreed disclosure date with the reporter.
* The vulnerability disclosure / release date is set excluding Friday and
holiday periods.
* Embargoes are preferred for Critical and High impact
issues. Embargo should not be held for more than 90 days from the
date of vulnerability confirmation, except under unusual
circumstances. For Low and Moderate issues with limited impact and
an easy workaround or where an issue that is already public, a
standard patch release process will be followed to fix the
vulnerability once CVE is assigned.
* Medium and Low severity issues will be released as part of the next
standard release cycle, with at least a 7 days advanced
notification to the list members prior to the release date. The CVE
fix details will be included in the release notes, which will be
linked in the public announcement.
* Commits will be handled in a private repository for review and
testing and a new patch version will be released from this private
repository.
* If a vulnerability is unintentionally already fixed in the public
repository, a few days are given to downstream stakeholders/vendors
to prepare for updating before the public disclosure.
* An announcement will be made disclosing the vulnerability. The
fastest place to receive security announcements is via the
[email protected] or [email protected] mailing
lists. (These lists are low-traffic).
If the report is considered embargoed, we ask you to not disclose the
vulnerability before it has been fixed and announced, unless you
received a response from the Ceph security team that you can do
so. This holds true until the public disclosure date that was agreed
upon by the list. Thank you for improving the security of Ceph and its
ecosystem. Your efforts and responsible disclosure are greatly
appreciated and will be acknowledged.
| 3,893 | 45.357143 | 112 | md |
null | ceph-main/do_cmake.sh | #!/usr/bin/env bash
set -ex
if [ -d .git ]; then
git submodule update --init --recursive
fi
: ${BUILD_DIR:=build}
: ${CEPH_GIT_DIR:=..}
if [ -e $BUILD_DIR ]; then
echo "'$BUILD_DIR' dir already exists; either rm -rf '$BUILD_DIR' and re-run, or set BUILD_DIR env var to a different directory name"
exit 1
fi
PYBUILD="3"
ARGS="-GNinja"
if [ -r /etc/os-release ]; then
source /etc/os-release
case "$ID" in
fedora)
if [ "$VERSION_ID" -ge "37" ] ; then
PYBUILD="3.11"
elif [ "$VERSION_ID" -ge "35" ] ; then
PYBUILD="3.10"
elif [ "$VERSION_ID" -ge "33" ] ; then
PYBUILD="3.9"
elif [ "$VERSION_ID" -ge "32" ] ; then
PYBUILD="3.8"
else
PYBUILD="3.7"
fi
;;
rocky|rhel|centos)
MAJOR_VER=$(echo "$VERSION_ID" | sed -e 's/\..*$//')
if [ "$MAJOR_VER" -ge "9" ] ; then
PYBUILD="3.9"
elif [ "$MAJOR_VER" -ge "8" ] ; then
PYBUILD="3.6"
fi
;;
opensuse*|suse|sles)
PYBUILD="3"
ARGS+=" -DWITH_RADOSGW_AMQP_ENDPOINT=OFF"
ARGS+=" -DWITH_RADOSGW_KAFKA_ENDPOINT=OFF"
;;
ubuntu)
MAJOR_VER=$(echo "$VERSION_ID" | sed -e 's/\..*$//')
if [ "$MAJOR_VER" -ge "22" ] ; then
PYBUILD="3.10"
fi
;;
esac
elif [ "$(uname)" == FreeBSD ] ; then
PYBUILD="3"
ARGS+=" -DWITH_RADOSGW_AMQP_ENDPOINT=OFF"
ARGS+=" -DWITH_RADOSGW_KAFKA_ENDPOINT=OFF"
else
echo Unknown release
exit 1
fi
ARGS+=" -DWITH_PYTHON3=${PYBUILD}"
if type ccache > /dev/null 2>&1 ; then
echo "enabling ccache"
ARGS+=" -DWITH_CCACHE=ON"
fi
cxx_compiler="g++"
c_compiler="gcc"
# 20 is used for more future-proof
for i in $(seq 20 -1 11); do
if type -t gcc-$i > /dev/null; then
cxx_compiler="g++-$i"
c_compiler="gcc-$i"
break
fi
done
ARGS+=" -DCMAKE_CXX_COMPILER=$cxx_compiler"
ARGS+=" -DCMAKE_C_COMPILER=$c_compiler"
mkdir $BUILD_DIR
cd $BUILD_DIR
if type cmake3 > /dev/null 2>&1 ; then
CMAKE=cmake3
else
CMAKE=cmake
fi
${CMAKE} $ARGS "$@" $CEPH_GIT_DIR || exit 1
set +x
# minimal config to find plugins
cat <<EOF > ceph.conf
[global]
plugin dir = lib
erasure code dir = lib
EOF
echo done.
if [[ ! "$ARGS $@" =~ "-DCMAKE_BUILD_TYPE" ]]; then
cat <<EOF
****
WARNING: do_cmake.sh now creates debug builds by default. Performance
may be severely affected. Please use -DCMAKE_BUILD_TYPE=RelWithDebInfo
if a performance sensitive build is required.
****
EOF
fi
| 2,584 | 21.675439 | 137 | sh |
null | ceph-main/do_freebsd.sh | #!/bin/sh -xve
export NPROC=`sysctl -n hw.ncpu`
if [ x"$1"x = x"--deps"x ]; then
sudo ./install-deps.sh
fi
if [ x"$CEPH_DEV"x != xx ]; then
BUILDOPTS="$BUILDOPTS V=1 VERBOSE=1"
CXX_FLAGS_DEBUG="-DCEPH_DEV"
C_FLAGS_DEBUG="-DCEPH_DEV"
fi
# To test with a new release Clang, use with cmake:
# -D CMAKE_CXX_COMPILER="/usr/local/bin/clang++-devel" \
# -D CMAKE_C_COMPILER="/usr/local/bin/clang-devel" \
COMPILE_FLAGS="-O0 -g"
COMPILE_FLAGS="${COMPILE_FLAGS} -fuse-ld=/usr/local/bin/ld -Wno-unused-command-line-argument"
CMAKE_CXX_FLAGS_DEBUG="$CXX_FLAGS_DEBUG $COMPILE_FLAGS"
CMAKE_C_FLAGS_DEBUG="$C_FLAGS_DEBUG $COMPILE_FLAGS"
#
# On FreeBSD we need to preinstall all the tools that are required for building
# dashboard, because versions fetched are not working on FreeBSD.
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
echo Keeping the old build
if [ -d ${BUILD_DIR}.old ]; then
sudo mv ${BUILD_DIR}.old ${BUILD_DIR}.del
sudo rm -rf ${BUILD_DIR}.del &
fi
if [ -d ${BUILD_DIR} ]; then
sudo mv ${BUILD_DIR} ${BUILD_DIR}.old
fi
mkdir ${BUILD_DIR}
./do_cmake.sh "$*" \
-D WITH_CCACHE=ON \
-D CMAKE_BUILD_TYPE=Debug \
-D CMAKE_CXX_FLAGS_DEBUG="$CMAKE_CXX_FLAGS_DEBUG" \
-D CMAKE_C_FLAGS_DEBUG="$CMAKE_C_FLAGS_DEBUG" \
-D ENABLE_GIT_VERSION=OFF \
-D WITH_RADOSGW_AMQP_ENDPOINT=OFF \
-D WITH_RADOSGW_KAFKA_ENDPOINT=OFF \
-D WITH_SYSTEMD=OFF \
-D WITH_SYSTEM_BOOST=ON \
-D WITH_SYSTEM_NPM=ON \
-D WITH_LTTNG=OFF \
-D WITH_BABELTRACE=OFF \
-D WITH_SEASTAR=OFF \
-D WITH_FUSE=ON \
-D WITH_KRBD=OFF \
-D WITH_XFS=OFF \
-D WITH_KVS=ON \
-D CEPH_MAN_DIR=man \
-D WITH_LIBCEPHFS=OFF \
-D WITH_CEPHFS=OFF \
-D WITH_MGR=YES \
-D WITH_RDMA=OFF \
-D WITH_SPDK=OFF \
-D WITH_JAEGER=OFF \
2>&1 | tee cmake.log
echo -n "start building: "; date
printenv
cd ${BUILD_DIR}
gmake -j$CPUS V=1 VERBOSE=1
gmake tests
echo -n "start testing: "; date ;
ctest -j $CPUS || RETEST=1
echo "Testing result, retest: = " $RETEST
if [ $RETEST -eq 1 ]; then
# make sure no leftovers are there
killall ceph-osd || true
killall ceph-mgr || true
killall ceph-mds || true
killall ceph-mon || true
# clean up after testing
rm -rf td/* /tmp/td src/test/td/* || true
rm -rf /tmp/ceph-asok.* || true
rm -rf /tmp/cores.* || true
rm -rf /tmp/*.core || true
ctest --output-on-failure --rerun-failed
fi
STATUS=$?
# cleanup after the fact
rm -rf /tmp/tmp* /tmp/foo /tmp/pip* /tmp/big* /tmp/pymp* $TMPDIR || true
echo -n "Ended: "; date
return $STATUS
| 2,522 | 23.980198 | 93 | sh |
null | ceph-main/install-deps.sh | #!/usr/bin/env bash
#
# Ceph distributed storage system
#
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
set -e
if ! [ "${_SOURCED_LIB_BUILD}" = 1 ]; then
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CEPH_ROOT="${SCRIPT_DIR}"
. "${CEPH_ROOT}/src/script/lib-build.sh" || exit 2
fi
DIR=/tmp/install-deps.$$
trap "rm -fr $DIR" EXIT
mkdir -p $DIR
if test $(id -u) != 0 ; then
SUDO=sudo
fi
# enable UTF-8 encoding for programs like pip that expect to
# print more than just ascii chars
export LC_ALL=C.UTF-8
ARCH=$(uname -m)
function munge_ceph_spec_in {
local with_seastar=$1
shift
local with_zbd=$1
shift
local for_make_check=$1
shift
local OUTFILE=$1
sed -e 's/@//g' < ceph.spec.in > $OUTFILE
# http://rpm.org/user_doc/conditional_builds.html
if $with_seastar; then
sed -i -e 's/%bcond_with seastar/%bcond_without seastar/g' $OUTFILE
fi
if $with_zbd; then
sed -i -e 's/%bcond_with zbd/%bcond_without zbd/g' $OUTFILE
fi
if $for_make_check; then
sed -i -e 's/%bcond_with make_check/%bcond_without make_check/g' $OUTFILE
fi
}
function munge_debian_control {
local version=$1
shift
local control=$1
case "$version" in
*squeeze*|*wheezy*)
control="/tmp/control.$$"
grep -v babeltrace debian/control > $control
;;
esac
echo $control
}
function ensure_decent_gcc_on_ubuntu {
ci_debug "Start ensure_decent_gcc_on_ubuntu() in install-deps.sh"
# point gcc to the one offered by g++-7 if the used one is not
# new enough
local old=$(gcc -dumpfullversion -dumpversion)
local new=$1
local codename=$2
if dpkg --compare-versions $old ge ${new}.0; then
return
fi
if [ ! -f /usr/bin/g++-${new} ]; then
$SUDO tee /etc/apt/sources.list.d/ubuntu-toolchain-r.list <<EOF
deb [lang=none] http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu $codename main
deb [arch=amd64 lang=none] http://mirror.nullivex.com/ppa/ubuntu-toolchain-r-test $codename main
EOF
# import PPA's signing key into APT's keyring
cat << ENDOFKEY | $SUDO apt-key add -
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.6
Comment: Hostname: keyserver.ubuntu.com
mI0ESuBvRwEEAMi4cDba7xlKaaoXjO1n1HX8RKrkW+HEIl79nSOSJyvzysajs7zUow/OzCQp
9NswqrDmNuH1+lPTTRNAGtK8r2ouq2rnXT1mTl23dpgHZ9spseR73s4ZBGw/ag4bpU5dNUSt
vfmHhIjVCuiSpNn7cyy1JSSvSs3N2mxteKjXLBf7ABEBAAG0GkxhdW5jaHBhZCBUb29sY2hh
aW4gYnVpbGRziLYEEwECACAFAkrgb0cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAe
k3eiup7yfzGKA/4xzUqNACSlB+k+DxFFHqkwKa/ziFiAlkLQyyhm+iqz80htRZr7Ls/ZRYZl
0aSU56/hLe0V+TviJ1s8qdN2lamkKdXIAFfavA04nOnTzyIBJ82EAUT3Nh45skMxo4z4iZMN
msyaQpNl/m/lNtOLhR64v5ZybofB2EWkMxUzX8D/FQ==
=LcUQ
-----END PGP PUBLIC KEY BLOCK-----
ENDOFKEY
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get update -y || true
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get install -y g++-${new}
fi
}
function ensure_python3_sphinx_on_ubuntu {
ci_debug "Running ensure_python3_sphinx_on_ubuntu() in install-deps.sh"
local sphinx_command=/usr/bin/sphinx-build
# python-sphinx points $sphinx_command to
# ../share/sphinx/scripts/python2/sphinx-build when it's installed
# let's "correct" this
if test -e $sphinx_command && head -n1 $sphinx_command | grep -q python$; then
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove python-sphinx
fi
}
function install_pkg_on_ubuntu {
ci_debug "Running install_pkg_on_ubuntu() in install-deps.sh"
local project=$1
shift
local sha1=$1
shift
local codename=$1
shift
local force=$1
shift
local pkgs=$@
local missing_pkgs
if [ $force = "force" ]; then
missing_pkgs="$@"
else
for pkg in $pkgs; do
if ! apt -qq list $pkg 2>/dev/null | grep -q installed; then
missing_pkgs+=" $pkg"
ci_debug "missing_pkgs=$missing_pkgs"
fi
done
fi
if test -n "$missing_pkgs"; then
local shaman_url="https://shaman.ceph.com/api/repos/${project}/master/${sha1}/ubuntu/${codename}/repo"
in_jenkins && echo -n "CI_DEBUG: Downloading $shaman_url ... "
$SUDO curl --silent --fail --write-out "%{http_code}" --location $shaman_url --output /etc/apt/sources.list.d/$project.list
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get update -y -o Acquire::Languages=none -o Acquire::Translation=none || true
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get install --allow-unauthenticated -y $missing_pkgs
fi
}
function install_boost_on_ubuntu {
ci_debug "Running install_boost_on_ubuntu() in install-deps.sh"
local ver=1.82
local installed_ver=$(apt -qq list --installed ceph-libboost*-dev 2>/dev/null |
grep -e 'libboost[0-9].[0-9]\+-dev' |
cut -d' ' -f2 |
cut -d'.' -f1,2)
if test -n "$installed_ver"; then
if echo "$installed_ver" | grep -q "^$ver"; then
return
else
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove "ceph-libboost.*${installed_ver}.*"
$SUDO rm -f /etc/apt/sources.list.d/ceph-libboost${installed_ver}.list
fi
fi
local codename=$1
local project=libboost
local sha1=2804368f5b807ba8334b0ccfeb8af191edeb996f
install_pkg_on_ubuntu \
$project \
$sha1 \
$codename \
check \
ceph-libboost-atomic$ver-dev \
ceph-libboost-chrono$ver-dev \
ceph-libboost-container$ver-dev \
ceph-libboost-context$ver-dev \
ceph-libboost-coroutine$ver-dev \
ceph-libboost-date-time$ver-dev \
ceph-libboost-filesystem$ver-dev \
ceph-libboost-iostreams$ver-dev \
ceph-libboost-program-options$ver-dev \
ceph-libboost-python$ver-dev \
ceph-libboost-random$ver-dev \
ceph-libboost-regex$ver-dev \
ceph-libboost-system$ver-dev \
ceph-libboost-test$ver-dev \
ceph-libboost-thread$ver-dev \
ceph-libboost-timer$ver-dev
}
function install_libzbd_on_ubuntu {
ci_debug "Running install_libzbd_on_ubuntu() in install-deps.sh"
local codename=$1
local project=libzbd
local sha1=1fadde94b08fab574b17637c2bebd2b1e7f9127b
install_pkg_on_ubuntu \
$project \
$sha1 \
$codename \
check \
libzbd-dev
}
motr_pkgs_url='https://github.com/Seagate/cortx-motr/releases/download/2.0.0-rgw'
function install_cortx_motr_on_ubuntu {
if dpkg -l cortx-motr-dev &> /dev/null; then
return
fi
if [ "$(lsb_release -sc)" = "jammy" ]; then
install_pkg_on_ubuntu \
cortx-motr \
39f89fa1c6945040433a913f2687c4b4e6cbeb3f \
jammy \
check \
cortx-motr \
cortx-motr-dev
else
local deb_arch=$(dpkg --print-architecture)
local motr_pkg="cortx-motr_2.0.0.git3252d623_$deb_arch.deb"
local motr_dev_pkg="cortx-motr-dev_2.0.0.git3252d623_$deb_arch.deb"
$SUDO curl -sL -o/var/cache/apt/archives/$motr_pkg $motr_pkgs_url/$motr_pkg
$SUDO curl -sL -o/var/cache/apt/archives/$motr_dev_pkg $motr_pkgs_url/$motr_dev_pkg
# For some reason libfabric pkg is not available in arm64 version
# of Ubuntu 20.04 (Focal Fossa), so we borrow it from more recent
# versions for now.
if [[ "$deb_arch" == 'arm64' ]]; then
local lf_pkg='libfabric1_1.11.0-2_arm64.deb'
$SUDO curl -sL -o/var/cache/apt/archives/$lf_pkg http://ports.ubuntu.com/pool/universe/libf/libfabric/$lf_pkg
$SUDO apt-get install -y /var/cache/apt/archives/$lf_pkg
fi
$SUDO apt-get install -y /var/cache/apt/archives/{$motr_pkg,$motr_dev_pkg}
$SUDO apt-get install -y libisal-dev
fi
}
function version_lt {
test $1 != $(echo -e "$1\n$2" | sort -rV | head -n 1)
}
function ensure_decent_gcc_on_rh {
local old=$(gcc -dumpversion)
local dts_ver=$1
if version_lt $old $dts_ver; then
if test -t 1; then
# interactive shell
cat <<EOF
Your GCC is too old. Please run following command to add DTS to your environment:
scl enable gcc-toolset-$dts_ver bash
Or add the following line to the end of ~/.bashrc and run "source ~/.bashrc" to add it permanently:
source scl_source enable gcc-toolset-$dts_ver
EOF
else
# non-interactive shell
source /opt/rh/gcc-toolset-$dts_ver/enable
fi
fi
}
function populate_wheelhouse() {
ci_debug "Running populate_wheelhouse() in install-deps.sh"
local install=$1
shift
# although pip comes with virtualenv, having a recent version
# of pip matters when it comes to using wheel packages
PIP_OPTS="--timeout 300 --exists-action i"
pip $PIP_OPTS $install \
'setuptools >= 0.8' 'pip >= 21.0' 'wheel >= 0.24' 'tox >= 2.9.1' || return 1
if test $# != 0 ; then
# '--use-feature=fast-deps --use-deprecated=legacy-resolver' added per
# https://github.com/pypa/pip/issues/9818 These should be able to be
# removed at some point in the future.
pip --use-feature=fast-deps --use-deprecated=legacy-resolver $PIP_OPTS $install $@ || return 1
fi
}
function activate_virtualenv() {
ci_debug "Running activate_virtualenv() in install-deps.sh"
local top_srcdir=$1
local env_dir=$top_srcdir/install-deps-python3
if ! test -d $env_dir ; then
python3 -m venv ${env_dir}
. $env_dir/bin/activate
if ! populate_wheelhouse install ; then
rm -rf $env_dir
return 1
fi
fi
. $env_dir/bin/activate
}
function preload_wheels_for_tox() {
ci_debug "Running preload_wheels_for_tox() in install-deps.sh"
local ini=$1
shift
pushd . > /dev/null
cd $(dirname $ini)
local require_files=$(ls *requirements*.txt 2>/dev/null) || true
local constraint_files=$(ls *constraints*.txt 2>/dev/null) || true
local require=$(echo -n "$require_files" | sed -e 's/^/-r /')
local constraint=$(echo -n "$constraint_files" | sed -e 's/^/-c /')
local md5=wheelhouse/md5
if test "$require"; then
if ! test -f $md5 || ! md5sum -c $md5 > /dev/null; then
rm -rf wheelhouse
fi
fi
if test "$require" && ! test -d wheelhouse ; then
type python3 > /dev/null 2>&1 || continue
activate_virtualenv $top_srcdir || exit 1
python3 -m pip install --upgrade pip
populate_wheelhouse "wheel -w $wip_wheelhouse" $require $constraint || exit 1
mv $wip_wheelhouse wheelhouse
md5sum $require_files $constraint_files > $md5
fi
popd > /dev/null
}
for_make_check=false
if tty -s; then
# interactive
for_make_check=true
elif [ $FOR_MAKE_CHECK ]; then
for_make_check=true
else
for_make_check=false
fi
if [ x$(uname)x = xFreeBSDx ]; then
if [ "$INSTALL_EXTRA_PACKAGES" ]; then
echo "Installing extra packages not supported on FreeBSD" >&2
exit 1
fi
$SUDO pkg install -yq \
devel/babeltrace \
devel/binutils \
devel/git \
devel/gperf \
devel/gmake \
devel/cmake \
devel/nasm \
devel/boost-all \
devel/boost-python-libs \
devel/valgrind \
devel/pkgconf \
devel/libedit \
devel/libtool \
devel/google-perftools \
lang/cython \
net/openldap24-client \
archivers/snappy \
archivers/liblz4 \
ftp/curl \
misc/e2fsprogs-libuuid \
misc/getopt \
net/socat \
textproc/expat2 \
textproc/gsed \
lang/gawk \
textproc/libxml2 \
textproc/xmlstarlet \
textproc/jq \
textproc/py-sphinx \
emulators/fuse \
java/junit \
lang/python36 \
devel/py-pip \
devel/py-flake8 \
devel/py-tox \
devel/py-argparse \
devel/py-nose \
devel/py-prettytable \
devel/py-yaml \
www/py-routes \
www/py-flask \
www/node \
www/npm \
www/fcgi \
security/nss \
security/krb5 \
security/oath-toolkit \
sysutils/flock \
sysutils/fusefs-libs \
# Now use pip to install some extra python modules
pip install pecan
exit
else
[ $WITH_SEASTAR ] && with_seastar=true || with_seastar=false
[ $WITH_ZBD ] && with_zbd=true || with_zbd=false
[ $WITH_PMEM ] && with_pmem=true || with_pmem=false
[ $WITH_RADOSGW_MOTR ] && with_rgw_motr=true || with_rgw_motr=false
source /etc/os-release
case "$ID" in
debian|ubuntu|devuan|elementary|softiron)
echo "Using apt-get to install dependencies"
if [ "$INSTALL_EXTRA_PACKAGES" ]; then
if ! $SUDO apt-get install -y $INSTALL_EXTRA_PACKAGES ; then
# try again. ported over from run-make.sh (orignally e278295)
# In the case that apt-get is interrupted, like when a jenkins
# job is cancelled, the package manager will be in an inconsistent
# state. Run the command again after `dpkg --configure -a` to
# bring package manager back into a clean state.
$SUDO dpkg --configure -a
ci_debug "trying to install $INSTALL_EXTRA_PACKAGES again"
$SUDO apt-get install -y $INSTALL_EXTRA_PACKAGES
fi
fi
$SUDO apt-get install -y devscripts equivs
$SUDO apt-get install -y dpkg-dev
ensure_python3_sphinx_on_ubuntu
case "$VERSION" in
*Bionic*)
ensure_decent_gcc_on_ubuntu 9 bionic
[ ! $NO_BOOST_PKGS ] && install_boost_on_ubuntu bionic
$with_zbd && install_libzbd_on_ubuntu bionic
;;
*Focal*)
ensure_decent_gcc_on_ubuntu 11 focal
[ ! $NO_BOOST_PKGS ] && install_boost_on_ubuntu focal
$with_zbd && install_libzbd_on_ubuntu focal
;;
*Jammy*)
[ ! $NO_BOOST_PKGS ] && install_boost_on_ubuntu jammy
$SUDO apt-get install -y gcc
;;
*)
$SUDO apt-get install -y gcc
;;
esac
if ! test -r debian/control ; then
echo debian/control is not a readable file
exit 1
fi
touch $DIR/status
ci_debug "Running munge_debian_control() in install-deps.sh"
backports=""
control=$(munge_debian_control "$VERSION" "debian/control")
case "$VERSION" in
*squeeze*|*wheezy*)
backports="-t $codename-backports"
;;
esac
# make a metapackage that expresses the build dependencies,
# install it, rm the .deb; then uninstall the package as its
# work is done
build_profiles=""
if $for_make_check; then
build_profiles+=",pkg.ceph.check"
fi
if $with_seastar; then
build_profiles+=",pkg.ceph.crimson"
fi
if $with_pmem; then
build_profiles+=",pkg.ceph.pmdk"
fi
ci_debug "for_make_check=$for_make_check"
ci_debug "with_seastar=$with_seastar"
ci_debug "with_jaeger=$with_jaeger"
ci_debug "build_profiles=$build_profiles"
ci_debug "Now running 'mk-build-deps' and installing ceph-build-deps package"
$SUDO env DEBIAN_FRONTEND=noninteractive mk-build-deps \
--build-profiles "${build_profiles#,}" \
--install --remove \
--tool="apt-get -y --no-install-recommends $backports" $control || exit 1
ci_debug "Removing ceph-build-deps"
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove ceph-build-deps
if [ "$control" != "debian/control" ] ; then rm $control; fi
# for rgw motr backend build checks
if $with_rgw_motr; then
install_cortx_motr_on_ubuntu
fi
;;
rocky|centos|fedora|rhel|ol|virtuozzo)
builddepcmd="dnf -y builddep --allowerasing"
echo "Using dnf to install dependencies"
case "$ID" in
fedora)
$SUDO dnf install -y dnf-utils
;;
rocky|centos|rhel|ol|virtuozzo)
MAJOR_VERSION="$(echo $VERSION_ID | cut -d. -f1)"
$SUDO dnf install -y dnf-utils selinux-policy-targeted
rpm --quiet --query epel-release || \
$SUDO dnf -y install --nogpgcheck https://dl.fedoraproject.org/pub/epel/epel-release-latest-$MAJOR_VERSION.noarch.rpm
$SUDO rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-$MAJOR_VERSION
$SUDO rm -f /etc/yum.repos.d/dl.fedoraproject.org*
if test $ID = centos -a $MAJOR_VERSION = 8 ; then
# Enable 'powertools' or 'PowerTools' repo
$SUDO dnf config-manager --set-enabled $(dnf repolist --all 2>/dev/null|gawk 'tolower($0) ~ /^powertools\s/{print $1}')
dts_ver=11
# before EPEL8 and PowerTools provide all dependencies, we use sepia for the dependencies
$SUDO dnf config-manager --add-repo http://apt-mirror.front.sepia.ceph.com/lab-extras/8/
$SUDO dnf config-manager --setopt=apt-mirror.front.sepia.ceph.com_lab-extras_8_.gpgcheck=0 --save
$SUDO dnf -y module enable javapackages-tools
elif test $ID = rhel -a $MAJOR_VERSION = 8 ; then
dts_ver=11
$SUDO dnf config-manager --set-enabled "codeready-builder-for-rhel-8-${ARCH}-rpms"
$SUDO dnf config-manager --add-repo http://apt-mirror.front.sepia.ceph.com/lab-extras/8/
$SUDO dnf config-manager --setopt=apt-mirror.front.sepia.ceph.com_lab-extras_8_.gpgcheck=0 --save
$SUDO dnf -y module enable javapackages-tools
fi
;;
esac
if [ "$INSTALL_EXTRA_PACKAGES" ]; then
$SUDO dnf install -y $INSTALL_EXTRA_PACKAGES
fi
munge_ceph_spec_in $with_seastar $with_zbd $for_make_check $DIR/ceph.spec
# for python3_pkgversion macro defined by python-srpm-macros, which is required by python3-devel
$SUDO dnf install -y python3-devel
$SUDO $builddepcmd $DIR/ceph.spec 2>&1 | tee $DIR/yum-builddep.out
[ ${PIPESTATUS[0]} -ne 0 ] && exit 1
if [ -n "$dts_ver" ]; then
ensure_decent_gcc_on_rh $dts_ver
fi
IGNORE_YUM_BUILDEP_ERRORS="ValueError: SELinux policy is not managed or store cannot be accessed."
sed "/$IGNORE_YUM_BUILDEP_ERRORS/d" $DIR/yum-builddep.out | grep -i "error:" && exit 1
# for rgw motr backend build checks
if ! rpm --quiet -q cortx-motr-devel &&
{ [[ $FOR_MAKE_CHECK ]] || $with_rgw_motr; }; then
$SUDO dnf install -y \
"$motr_pkgs_url/isa-l-2.30.0-1.el7.${ARCH}.rpm" \
"$motr_pkgs_url/cortx-motr-2.0.0-1_git3252d623_any.el8.${ARCH}.rpm" \
"$motr_pkgs_url/cortx-motr-devel-2.0.0-1_git3252d623_any.el8.${ARCH}.rpm"
fi
;;
opensuse*|suse|sles)
echo "Using zypper to install dependencies"
zypp_install="zypper --gpg-auto-import-keys --non-interactive install --no-recommends"
$SUDO $zypp_install systemd-rpm-macros rpm-build || exit 1
if [ "$INSTALL_EXTRA_PACKAGES" ]; then
$SUDO $zypp_install $INSTALL_EXTRA_PACKAGES
fi
munge_ceph_spec_in $with_seastar false $for_make_check $DIR/ceph.spec
$SUDO $zypp_install $(rpmspec -q --buildrequires $DIR/ceph.spec) || exit 1
;;
*)
echo "$ID is unknown, dependencies will have to be installed manually."
exit 1
;;
esac
fi
# use pip cache if possible but do not store it outside of the source
# tree
# see https://pip.pypa.io/en/stable/reference/pip_install.html#caching
if $for_make_check; then
mkdir -p install-deps-cache
top_srcdir=$(pwd)
export XDG_CACHE_HOME=$top_srcdir/install-deps-cache
wip_wheelhouse=wheelhouse-wip
#
# preload python modules so that tox can run without network access
#
find . -name tox.ini | while read ini ; do
preload_wheels_for_tox $ini
done
rm -rf $top_srcdir/install-deps-python3
rm -rf $XDG_CACHE_HOME
type git > /dev/null || (echo "Dashboard uses git to pull dependencies." ; false)
fi
ci_debug "End install-deps.sh" || true
| 21,125 | 35.804878 | 139 | sh |
null | ceph-main/make-debs.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
set -xe
. /etc/os-release
base=${1:-/tmp/release}
releasedir=$base/$NAME/WORKDIR
rm -fr $(dirname $releasedir)
mkdir -p $releasedir
#
# remove all files not under git so they are not
# included in the distribution.
#
git clean -dxf
#
# git describe provides a version that is
# a) human readable
# b) is unique for each commit
# c) compares higher than any previous commit
# d) contains the short hash of the commit
#
vers=$(git describe --match "v*" | sed s/^v//)
./make-dist $vers
#
# rename the tarbal to match debian conventions and extract it
#
mv ceph-$vers.tar.bz2 $releasedir/ceph_$vers.orig.tar.bz2
tar -C $releasedir -jxf $releasedir/ceph_$vers.orig.tar.bz2
#
# copy the debian directory over and remove -dbg packages
# because they are large and take time to build
#
cp -a debian $releasedir/ceph-$vers/debian
cd $releasedir
perl -ni -e 'print if(!(/^Package: .*-dbg$/../^$/))' ceph-$vers/debian/control
perl -pi -e 's/--dbg-package.*//' ceph-$vers/debian/rules
#
# always set the debian version to 1 which is ok because the debian
# directory is included in the sources and the upstream version will
# change each time it is modified.
#
dvers="$vers-1"
#
# update the changelog to match the desired version
#
cd ceph-$vers
chvers=$(head -1 debian/changelog | perl -ne 's/.*\(//; s/\).*//; print')
if [ "$chvers" != "$dvers" ]; then
DEBEMAIL="[email protected]" dch -D $VERSION_CODENAME --force-distribution -b -v "$dvers" "new version"
fi
#
# create the packages
# a) with ccache to speed things up when building repeatedly
# b) do not sign the packages
# c) use half of the available processors
#
: ${NPROC:=$(($(nproc) / 2))}
if test $NPROC -gt 1 ; then
j=-j${NPROC}
fi
PATH=/usr/lib/ccache:$PATH dpkg-buildpackage $j -uc -us
cd ../..
mkdir -p $VERSION_CODENAME/conf
cat > $VERSION_CODENAME/conf/distributions <<EOF
Codename: $VERSION_CODENAME
Suite: stable
Components: main
Architectures: $(dpkg --print-architecture) source
EOF
if [ ! -e conf ]; then
ln -s $VERSION_CODENAME/conf conf
fi
reprepro --basedir $(pwd) include $VERSION_CODENAME WORKDIR/*.changes
#
# teuthology needs the version in the version file
#
echo $dvers > $VERSION_CODENAME/version
| 2,777 | 29.195652 | 105 | sh |
null | ceph-main/make-srpm.sh | #!/bin/sh
#
# Create a SRPM which can be used to build Ceph
#
# ./make-srpm.sh <version>
# rpmbuild --rebuild /tmp/ceph/ceph-<version>-0.el7.centos.src.rpm
#
./make-dist $1
rpmbuild -D"_sourcedir `pwd`" -D"_specdir `pwd`" -D"_srcrpmdir `pwd`" -bs ceph.spec
| 259 | 20.666667 | 83 | sh |
null | ceph-main/mingw_conf.sh | # MINGW Settings:
# Due to inconsistencies between distributions, mingw versions, binaries,
# and directories must be determined (or defined) prior to building.
# This script expects the following variables:
# * OS - currently ubuntu, rhel, or suse. In the future we may attempt to
# detect the platform.
# * MINGW_CMAKE_FILE - if set, a cmake toolchain file will be created
# * MINGW_POSIX_FLAGS - if set, Mingw Posix compatibility mode will be
# enabled by defining the according flags.
# -Common mingw settings-
MINGW_PREFIX="x86_64-w64-mingw32-"
MINGW_BASE="x86_64-w64-mingw32"
MINGW_CPP="${MINGW_BASE}-c++"
MINGW_DLLTOOL="${MINGW_BASE}-dlltool"
MINGW_WINDRES="${MINGW_BASE}-windres"
MINGW_STRIP="${MINGW_BASE}-strip"
MINGW_OBJCOPY="${MINGW_BASE}-objcopy"
# -Distribution specific mingw settings-
case "$OS" in
ubuntu)
mingwPosix="-posix"
mingwLibDir="/usr/lib/gcc"
mingwVersion="$(${MINGW_CPP}${mingwPosix} -dumpversion)"
mingwTargetLibDir="${mingwLibDir}/${MINGW_BASE}/${mingwVersion}"
mingwLibpthreadDir="/usr/${MINGW_BASE}/lib"
PTW32Include=/usr/share/mingw-w64/include
PTW32Lib=/usr/x86_64-w64-mingw32/lib
;;
rhel)
mingwPosix=""
mingwLibDir="/usr/lib64/gcc"
mingwVersion="$(${MINGW_CPP}${mingwPosix} -dumpversion)"
mingwTargetLibDir="/usr/${MINGW_BASE}/sys-root/mingw/bin"
mingwLibpthreadDir="$mingwTargetLibDir"
PTW32Include=/usr/x86_64-w64-mingw32/sys-root/mingw/include
PTW32Lib=/usr/x86_64-w64-mingw32/sys-root/mingw/lib
;;
suse)
mingwPosix=""
mingwLibDir="/usr/lib64/gcc"
mingwVersion="$(${MINGW_CPP}${mingwPosix} -dumpversion)"
mingwTargetLibDir="/usr/${MINGW_BASE}/sys-root/mingw/bin"
mingwLibpthreadDir="$mingwTargetLibDir"
PTW32Include=/usr/x86_64-w64-mingw32/sys-root/mingw/include
PTW32Lib=/usr/x86_64-w64-mingw32/sys-root/mingw/lib
;;
*)
echo "$ID is unknown, automatic mingw configuration is not possible."
exit 1
;;
esac
# -Common mingw settings, dependent upon distribution specific settings-
MINGW_FIND_ROOT_LIB_PATH="${mingwLibDir}/\${TOOLCHAIN_PREFIX}/${mingwVersion}"
MINGW_CC="${MINGW_BASE}-gcc${mingwPosix}"
MINGW_CXX="${MINGW_BASE}-g++${mingwPosix}"
# End MINGW configuration
if [[ -n $MINGW_CMAKE_FILE ]]; then
cat > $MINGW_CMAKE_FILE <<EOL
set(CMAKE_SYSTEM_NAME Windows)
set(TOOLCHAIN_PREFIX ${MINGW_BASE})
set(CMAKE_SYSTEM_PROCESSOR x86_64)
# We'll need to use posix threads in order to use
# C++11 features, such as std::thread.
set(CMAKE_C_COMPILER \${TOOLCHAIN_PREFIX}-gcc${mingwPosix})
set(CMAKE_CXX_COMPILER \${TOOLCHAIN_PREFIX}-g++${mingwPosix})
set(CMAKE_RC_COMPILER \${TOOLCHAIN_PREFIX}-windres)
set(CMAKE_FIND_ROOT_PATH /usr/\${TOOLCHAIN_PREFIX} ${MINGW_FIND_ROOT_LIB_PATH})
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# TODO: consider switching this to "ONLY". The issue with
# that is that all our libs should then be under
# CMAKE_FIND_ROOT_PATH and CMAKE_PREFIX_PATH would be ignored.
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH)
EOL
if [[ -n $MINGW_POSIX_FLAGS ]]; then
cat >> $MINGW_CMAKE_FILE <<EOL
# Some functions (e.g. localtime_r) will not be available unless we set
# the following flag.
add_definitions(-D_POSIX=1)
add_definitions(-D_POSIX_C_SOURCE=1)
add_definitions(-D_POSIX_=1)
add_definitions(-D_POSIX_THREADS=1)
EOL
fi
fi | 3,506 | 37.538462 | 79 | sh |
null | ceph-main/run-make-check.sh | #!/usr/bin/env bash
#
# Ceph distributed storage system
#
# Copyright (C) 2014 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
#
# To just look at what this script will do, run it like this:
#
# $ DRY_RUN=echo ./run-make-check.sh
#
source src/script/run-make.sh
set -e
function run() {
# to prevent OSD EMFILE death on tests, make sure ulimit >= 1024
$DRY_RUN ulimit -n $(ulimit -Hn)
if [ $(ulimit -n) -lt 1024 ];then
echo "***ulimit -n too small, better bigger than 1024 for test***"
return 1
fi
# increase the aio-max-nr, which is by default 65536. we could reach this
# limit while running seastar tests and bluestore tests.
local m=16
local procs="$(($(get_processors) * 2))"
if [ "${procs}" -gt $m ]; then
m="${procs}"
fi
local aiomax="$((65536 * procs))"
if [ "$(/sbin/sysctl -n fs.aio-max-nr )" -lt "${aiomax}" ]; then
$DRY_RUN sudo /sbin/sysctl -q -w fs.aio-max-nr="${aiomax}"
fi
CHECK_MAKEOPTS=${CHECK_MAKEOPTS:-$DEFAULT_MAKEOPTS}
if in_jenkins; then
if ! ctest $CHECK_MAKEOPTS --no-compress-output --output-on-failure --test-output-size-failed 1024000 -T Test; then
# do not return failure, as the jenkins publisher will take care of this
rm -fr ${TMPDIR:-/tmp}/ceph-asok.*
fi
else
if ! $DRY_RUN ctest $CHECK_MAKEOPTS --output-on-failure; then
rm -fr ${TMPDIR:-/tmp}/ceph-asok.*
return 1
fi
fi
}
function main() {
if [[ $EUID -eq 0 ]] ; then
echo "For best results, run this script as a normal user configured"
echo "with the ability to run commands as root via sudo."
fi
echo -n "Checking hostname sanity... "
if $DRY_RUN hostname --fqdn >/dev/null 2>&1 ; then
echo "OK"
else
echo "NOT OK"
echo "Please fix 'hostname --fqdn', otherwise 'make check' will fail"
return 1
fi
# uses run-make.sh to install-deps
FOR_MAKE_CHECK=1 prepare
configure "$@"
in_jenkins && echo "CI_DEBUG: Running 'build tests'"
build tests
echo "make check: successful build on $(git rev-parse HEAD)"
FOR_MAKE_CHECK=1 run
}
if [ "$0" = "$BASH_SOURCE" ]; then
main "$@"
fi
| 2,537 | 29.214286 | 123 | sh |
null | ceph-main/win32_build.sh | #!/usr/bin/env bash
set -e
set -o pipefail
SCRIPT_DIR="$(dirname "$BASH_SOURCE")"
SCRIPT_DIR="$(realpath "$SCRIPT_DIR")"
num_vcpus=$(nproc)
CEPH_DIR="${CEPH_DIR:-$SCRIPT_DIR}"
BUILD_DIR="${BUILD_DIR:-${CEPH_DIR}/build}"
DEPS_DIR="${DEPS_DIR:-$CEPH_DIR/build.deps}"
ZIP_DEST="${ZIP_DEST:-$BUILD_DIR/ceph.zip}"
CLEAN_BUILD=${CLEAN_BUILD:-}
SKIP_BUILD=${SKIP_BUILD:-}
# Usefull when packaging existing binaries.
SKIP_CMAKE=${SKIP_CMAKE:-}
SKIP_DLL_COPY=${SKIP_DLL_COPY:-}
SKIP_TESTS=${SKIP_TESTS:-}
SKIP_BINDIR_CLEAN=${SKIP_BINDIR_CLEAN:-}
# Use Ninja's default, it might be useful when having few cores.
NUM_WORKERS_DEFAULT=$(( $num_vcpus + 2 ))
NUM_WORKERS=${NUM_WORKERS:-$NUM_WORKERS_DEFAULT}
DEV_BUILD=${DEV_BUILD:-}
# Unless SKIP_ZIP is set, we're preparing an archive that contains the Ceph
# binaries, debug symbols as well as the required DLLs.
SKIP_ZIP=${SKIP_ZIP:-}
# By default, we'll move the debug symbols to separate files located in the
# ".debug" directory. If "EMBEDDED_DBG_SYM" is set, the debug symbols will
# remain embedded in the binaries.
#
# Unfortunately we cannot use pdb symbols when cross compiling. cv2pdb
# well as llvm rely on mspdb*.dll in order to support this proprietary format.
EMBEDDED_DBG_SYM=${EMBEDDED_DBG_SYM:-}
# Allow for OS specific customizations through the OS flag.
# Valid options are currently "ubuntu", "suse", and "rhel".
OS=${OS}
if [[ -z $OS ]]; then
source /etc/os-release
case "$ID" in
opensuse*|suse|sles)
OS="suse"
;;
rhel|centos)
OS="rhel"
;;
ubuntu)
OS="ubuntu"
;;
*)
echo "Unsupported Linux distro $ID."
echo "only SUSE, Ubuntu and RHEL are supported."
echo "Set the OS environment variable to override."
exit 1
;;
esac
fi
export OS="$OS"
# We'll have to be explicit here since auto-detecting doesn't work
# properly when cross compiling.
ALLOCATOR=${ALLOCATOR:-libc}
# Debug builds don't work with MINGW for the time being, failing with
# can't close <file>: File too big
# -Wa,-mbig-obj does not help.
CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-}
if [[ -z $CMAKE_BUILD_TYPE ]]; then
# By default, we're building release binaries with minimal debug information.
export CFLAGS="$CFLAGS -g1"
export CXXFLAGS="$CXXFLAGS -g1"
CMAKE_BUILD_TYPE=Release
fi
# Some tests can't use shared libraries yet due to unspecified dependencies.
# We'll do a static build by default for now.
ENABLE_SHARED=${ENABLE_SHARED:-OFF}
binDir="$BUILD_DIR/bin"
strippedBinDir="$BUILD_DIR/bin_stripped"
# GDB will look for this directory by default.
dbgDirname=".debug"
dbgSymbolDir="$strippedBinDir/${dbgDirname}"
depsSrcDir="$DEPS_DIR/src"
depsToolsetDir="$DEPS_DIR/mingw"
cmakeGenerator="Ninja"
lz4Dir="${depsToolsetDir}/lz4"
sslDir="${depsToolsetDir}/openssl"
boostDir="${depsToolsetDir}/boost"
zlibDir="${depsToolsetDir}/zlib"
backtraceDir="${depsToolsetDir}/libbacktrace"
snappyDir="${depsToolsetDir}/snappy"
winLibDir="${depsToolsetDir}/windows/lib"
wnbdSrcDir="${depsSrcDir}/wnbd"
wnbdLibDir="${depsToolsetDir}/wnbd/lib"
dokanSrcDir="${depsSrcDir}/dokany"
dokanLibDir="${depsToolsetDir}/dokany/lib"
depsDirs="$lz4Dir;$sslDir;$boostDir;$zlibDir;$backtraceDir;$snappyDir"
depsDirs+=";$winLibDir"
# Cmake recommends using CMAKE_PREFIX_PATH instead of link_directories.
# Still, some library dependencies may not include the full path (e.g. Boost
# sets the "-lz" flag through INTERFACE_LINK_LIBRARIES), which is why
# we have to ensure that the linker will be able to locate them.
linkDirs="$zlibDir/lib"
lz4Lib="${lz4Dir}/lib/dll/liblz4-1.dll"
lz4Include="${lz4Dir}/lib"
if [[ -n $CLEAN_BUILD ]]; then
echo "Cleaning up build dir: $BUILD_DIR"
rm -rf $BUILD_DIR
rm -rf $DEPS_DIR
fi
if [[ -z $SKIP_BINDIR_CLEAN ]]; then
echo "Cleaning up bin dir: $binDir"
rm -rf $binDir
fi
mkdir -p $BUILD_DIR
cd $BUILD_DIR
if [[ ! -f ${depsToolsetDir}/completed ]]; then
echo "Preparing dependencies: $DEPS_DIR. Log: ${BUILD_DIR}/build_deps.log"
NUM_WORKERS=$NUM_WORKERS DEPS_DIR=$DEPS_DIR OS="$OS"\
"$SCRIPT_DIR/win32_deps_build.sh" | tee "${BUILD_DIR}/build_deps.log"
fi
# Due to distribution specific mingw settings, the mingw.cmake file
# must be built prior to running cmake.
MINGW_CMAKE_FILE="$BUILD_DIR/mingw32.cmake"
MINGW_POSIX_FLAGS=1
source "$SCRIPT_DIR/mingw_conf.sh"
if [[ -z $SKIP_CMAKE ]]; then
# We'll need to cross compile Boost.Python before enabling
# "WITH_MGR".
echo "Generating solution. Log: ${BUILD_DIR}/cmake.log"
# This isn't propagated to some of the subprojects, we'll use an env variable
# for now.
export CMAKE_PREFIX_PATH=$depsDirs
if [[ -n $DEV_BUILD ]]; then
echo "Dev build enabled."
echo "Git versioning will be disabled."
ENABLE_GIT_VERSION="OFF"
WITH_CEPH_DEBUG_MUTEX="ON"
else
ENABLE_GIT_VERSION="ON"
WITH_CEPH_DEBUG_MUTEX="OFF"
fi
# As opposed to Linux, Windows shared libraries can't have unresolved
# symbols. Until we fix the dependencies (which are either unspecified
# or circular), we'll have to stick to static linking.
cmake -D CMAKE_PREFIX_PATH=$depsDirs \
-D MINGW_LINK_DIRECTORIES="$linkDirs" \
-D CMAKE_TOOLCHAIN_FILE="$MINGW_CMAKE_FILE" \
-D WITH_LIBCEPHSQLITE=OFF \
-D WITH_RDMA=OFF -D WITH_OPENLDAP=OFF \
-D WITH_GSSAPI=OFF -D WITH_XFS=OFF \
-D WITH_FUSE=OFF -D WITH_DOKAN=ON \
-D WITH_BLUESTORE=OFF -D WITH_LEVELDB=OFF \
-D WITH_LTTNG=OFF -D WITH_BABELTRACE=OFF -D WITH_JAEGER=OFF \
-D WITH_SYSTEM_BOOST=ON -D WITH_MGR=OFF -D WITH_KVS=OFF \
-D WITH_LIBCEPHFS=ON -D WITH_KRBD=OFF -D WITH_RADOSGW=OFF \
-D ENABLE_SHARED=$ENABLE_SHARED -D WITH_RBD=ON -D BUILD_GMOCK=ON \
-D WITH_CEPHFS=OFF -D WITH_MANPAGE=OFF \
-D WITH_MGR_DASHBOARD_FRONTEND=OFF -D WITH_SYSTEMD=OFF -D WITH_TESTS=ON \
-D LZ4_INCLUDE_DIR=$lz4Include -D LZ4_LIBRARY=$lz4Lib \
-D Backtrace_INCLUDE_DIR="$backtraceDir/include" \
-D Backtrace_LIBRARY="$backtraceDir/lib/libbacktrace.a" \
-D ENABLE_GIT_VERSION=$ENABLE_GIT_VERSION \
-D ALLOCATOR="$ALLOCATOR" -D CMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE \
-D WNBD_INCLUDE_DIRS="$wnbdSrcDir/include" \
-D WNBD_LIBRARIES="$wnbdLibDir/libwnbd.a" \
-D WITH_CEPH_DEBUG_MUTEX=$WITH_CEPH_DEBUG_MUTEX \
-D DOKAN_INCLUDE_DIRS="$dokanSrcDir/dokan" \
-D DOKAN_LIBRARIES="$dokanLibDir/libdokan.a" \
-G "$cmakeGenerator" \
$CEPH_DIR 2>&1 | tee "${BUILD_DIR}/cmake.log"
fi # [[ -z $SKIP_CMAKE ]]
if [[ -z $SKIP_BUILD ]]; then
echo "Building using $NUM_WORKERS workers. Log: ${BUILD_DIR}/build.log"
echo "" > "${BUILD_DIR}/build.log"
cd $BUILD_DIR
ninja_targets="rados rbd rbd-wnbd "
ninja_targets+=" ceph-conf ceph-immutable-object-cache"
ninja_targets+=" cephfs ceph-dokan"
# TODO: do we actually need the ceph compression libs?
ninja_targets+=" compressor ceph_lz4 ceph_snappy ceph_zlib ceph_zstd"
if [[ -z $SKIP_TESTS ]]; then
ninja_targets+=" tests ceph_radosacl ceph_scratchtool "
ninja_targets+=`ninja -t targets | grep ceph_test | cut -d ":" -f 1 | grep -v exe`
fi
ninja -v $ninja_targets 2>&1 | tee "${BUILD_DIR}/build.log"
fi
if [[ -z $SKIP_DLL_COPY ]]; then
# To adjust mingw paths, see 'mingw_conf.sh'.
required_dlls=(
$zlibDir/zlib1.dll
$lz4Dir/lib/dll/liblz4-1.dll
$sslDir/bin/libcrypto-1_1-x64.dll
$sslDir/bin/libssl-1_1-x64.dll
$mingwTargetLibDir/libstdc++-6.dll
$mingwTargetLibDir/libgcc_s_seh-1.dll
$mingwTargetLibDir/libssp*.dll
$mingwLibpthreadDir/libwinpthread-1.dll
$boostDir/lib/*.dll)
echo "Copying required dlls to $binDir."
cp ${required_dlls[@]} $binDir
fi
if [[ -z $SKIP_ZIP ]]; then
# Use a temp directory, in order to create a clean zip file
ZIP_TMPDIR=$(mktemp -d win_binaries.XXXXX)
if [[ -z $EMBEDDED_DBG_SYM ]]; then
echo "Extracting debug symbols from binaries."
rm -rf $strippedBinDir; mkdir $strippedBinDir
rm -rf $dbgSymbolDir; mkdir $dbgSymbolDir
# Strip files individually, to save time and space
for file in $binDir/*.exe $binDir/*.dll; do
dbgFilename=$(basename $file).debug
dbgFile="$dbgSymbolDir/$dbgFilename"
strippedFile="$strippedBinDir/$(basename $file)"
echo "Copying debug symbols: $dbgFile"
$MINGW_OBJCOPY --only-keep-debug $file $dbgFile
$MINGW_STRIP --strip-debug --strip-unneeded -o $strippedFile $file
$MINGW_OBJCOPY --remove-section .gnu_debuglink $strippedFile
$MINGW_OBJCOPY --add-gnu-debuglink=$dbgFile $strippedFile
done
# Copy any remaining files to the stripped directory
for file in $binDir/*; do
[[ ! -f $strippedBinDir/$(basename $file) ]] && \
cp $file $strippedBinDir
done
ln -s $strippedBinDir $ZIP_TMPDIR/ceph
else
ln -s $binDir $ZIP_TMPDIR/ceph
fi
echo "Building zip archive $ZIP_DEST."
# Include the README file in the archive
ln -s $CEPH_DIR/README.windows.rst $ZIP_TMPDIR/ceph/README.windows.rst
cd $ZIP_TMPDIR
[[ -f $ZIP_DEST ]] && rm $ZIP_DEST
zip -r $ZIP_DEST ceph
cd -
rm -rf $ZIP_TMPDIR/ceph/README.windows.rst $ZIP_TMPDIR
echo -e '\n WIN32 files zipped to: '$ZIP_DEST'\n'
fi
| 9,371 | 34.908046 | 88 | sh |
null | ceph-main/win32_deps_build.sh | #!/usr/bin/env bash
set -e
SCRIPT_DIR="$(dirname "$BASH_SOURCE")"
SCRIPT_DIR="$(realpath "$SCRIPT_DIR")"
num_vcpus=$(nproc)
NUM_WORKERS=${NUM_WORKERS:-$num_vcpus}
DEPS_DIR="${DEPS_DIR:-$SCRIPT_DIR/build.deps}"
depsSrcDir="$DEPS_DIR/src"
depsToolsetDir="$DEPS_DIR/mingw"
lz4SrcDir="${depsSrcDir}/lz4"
lz4Dir="${depsToolsetDir}/lz4"
lz4Tag="v1.9.2"
sslTag="OpenSSL_1_1_1c"
sslDir="${depsToolsetDir}/openssl"
sslSrcDir="${depsSrcDir}/openssl"
# For now, we'll keep the version number within the file path when not using git.
boostUrl="https://boostorg.jfrog.io/artifactory/main/release/1.82.0/source/boost_1_82_0.tar.gz"
boostSrcDir="${depsSrcDir}/boost_1_82_0"
boostDir="${depsToolsetDir}/boost"
zlibDir="${depsToolsetDir}/zlib"
zlibSrcDir="${depsSrcDir}/zlib"
backtraceDir="${depsToolsetDir}/libbacktrace"
backtraceSrcDir="${depsSrcDir}/libbacktrace"
snappySrcDir="${depsSrcDir}/snappy"
snappyDir="${depsToolsetDir}/snappy"
snappyTag="1.1.9"
# Additional Windows libraries, which aren't provided by Mingw
winLibDir="${depsToolsetDir}/windows/lib"
wnbdUrl="https://github.com/cloudbase/wnbd"
wnbdTag="main"
wnbdSrcDir="${depsSrcDir}/wnbd"
wnbdLibDir="${depsToolsetDir}/wnbd/lib"
dokanUrl="https://github.com/dokan-dev/dokany"
dokanTag="v2.0.5.1000"
dokanSrcDir="${depsSrcDir}/dokany"
dokanLibDir="${depsToolsetDir}/dokany/lib"
# Allow for OS specific customizations through the OS flag (normally
# passed through from win32_build).
# Valid options are currently "ubuntu", "rhel", and "suse".
OS=${OS:-"ubuntu"}
function _make() {
make -j $NUM_WORKERS $@
}
if [[ -d $DEPS_DIR ]]; then
echo "Cleaning up dependency build dir: $DEPS_DIR"
rm -rf $DEPS_DIR
fi
mkdir -p $DEPS_DIR
mkdir -p $depsToolsetDir
mkdir -p $depsSrcDir
echo "Installing required packages."
case "$OS" in
rhel)
# pkgconf needs https://bugzilla.redhat.com/show_bug.cgi?id=1975416
sudo yum -y --setopt=skip_missing_names_on_install=False install \
mingw64-gcc-c++ \
cmake \
pkgconf \
python3-devel \
autoconf \
libtool \
ninja-build \
zip \
python3-PyYAML \
gcc \
diffutils \
patch \
wget \
perl \
git-core
;;
ubuntu)
sudo apt-get update
sudo env DEBIAN_FRONTEND=noninteractive apt-get -y install \
mingw-w64 g++ cmake pkg-config \
python3-dev python3-yaml \
autoconf libtool ninja-build wget zip \
git
;;
suse)
for PKG in mingw64-cross-gcc-c++ mingw64-libgcc_s_seh1 mingw64-libstdc++6 \
cmake pkgconf python3-devel autoconf libtool ninja zip \
python3-PyYAML \
gcc patch wget git; do
rpm -q $PKG >/dev/null || zypper -n install $PKG
done
;;
esac
MINGW_CMAKE_FILE="$DEPS_DIR/mingw.cmake"
source "$SCRIPT_DIR/mingw_conf.sh"
echo "Building zlib."
cd $depsSrcDir
if [[ ! -d $zlibSrcDir ]]; then
git clone --depth 1 https://github.com/madler/zlib
fi
cd $zlibSrcDir
# Apparently the configure script is broken...
sed -e s/"PREFIX = *$"/"PREFIX = ${MINGW_PREFIX}"/ -i win32/Makefile.gcc
_make -f win32/Makefile.gcc
_make BINARY_PATH=$zlibDir \
INCLUDE_PATH=$zlibDir/include \
LIBRARY_PATH=$zlibDir/lib \
SHARED_MODE=1 \
-f win32/Makefile.gcc install
echo "Building lz4."
cd $depsToolsetDir
if [[ ! -d $lz4Dir ]]; then
git clone --branch $lz4Tag --depth 1 https://github.com/lz4/lz4
cd $lz4Dir
fi
cd $lz4Dir
_make BUILD_STATIC=no CC=${MINGW_CC%-posix*} \
DLLTOOL=${MINGW_DLLTOOL} \
WINDRES=${MINGW_WINDRES} \
TARGET_OS=Windows_NT
echo "Building OpenSSL."
cd $depsSrcDir
if [[ ! -d $sslSrcDir ]]; then
git clone --branch $sslTag --depth 1 https://github.com/openssl/openssl
cd $sslSrcDir
fi
cd $sslSrcDir
mkdir -p $sslDir
CROSS_COMPILE="${MINGW_PREFIX}" ./Configure \
mingw64 shared --prefix=$sslDir --libdir="$sslDir/lib"
_make depend
_make
_make install_sw
echo "Building boost."
cd $depsSrcDir
if [[ ! -d $boostSrcDir ]]; then
echo "Downloading boost."
wget -qO- $boostUrl | tar xz
fi
cd $boostSrcDir
echo "using gcc : mingw32 : ${MINGW_CXX} ;" > user-config.jam
# Workaround for https://github.com/boostorg/thread/issues/156
# Older versions of mingw provided a different pthread lib.
sed -i 's/lib$(libname)GC2.a/lib$(libname).a/g' ./libs/thread/build/Jamfile.v2
sed -i 's/mthreads/pthreads/g' ./tools/build/src/tools/gcc.py
sed -i 's/mthreads/pthreads/g' ./tools/build/src/tools/gcc.jam
sed -i 's/pthreads/mthreads/g' ./tools/build/src/tools/gcc.py
sed -i 's/pthreads/mthreads/g' ./tools/build/src/tools/gcc.jam
export PTW32_INCLUDE=${PTW32Include}
export PTW32_LIB=${PTW32Lib}
echo "Patching boost."
# Fix getting Windows page size
# TODO: send this upstream and maybe use a fork until it merges.
# Meanwhile, we might consider moving those to ceph/cmake/modules/BuildBoost.cmake.
# This cmake module will first have to be updated to support Mingw though.
patch -N boost/thread/pthread/thread_data.hpp <<EOL
--- boost/thread/pthread/thread_data.hpp 2019-10-11 15:26:15.678703586 +0300
+++ boost/thread/pthread/thread_data.hpp.new 2019-10-11 15:26:07.321463698 +0300
@@ -32,6 +32,10 @@
# endif
#endif
+#if defined(_WIN32)
+#include <windows.h>
+#endif
+
#include <pthread.h>
#include <unistd.h>
@@ -54,6 +58,10 @@
if (size==0) return;
#ifdef BOOST_THREAD_USES_GETPAGESIZE
std::size_t page_size = getpagesize();
+#elif _WIN32
+ SYSTEM_INFO system_info;
+ ::GetSystemInfo (&system_info);
+ std::size_t page_size = system_info.dwPageSize;
#else
std::size_t page_size = ::sysconf( _SC_PAGESIZE);
#endif
EOL
./bootstrap.sh
./b2 install --user-config=user-config.jam toolset=gcc-mingw32 \
target-os=windows release \
link=static,shared \
threadapi=win32 --prefix=$boostDir \
address-model=64 architecture=x86 \
binary-format=pe abi=ms -j $NUM_WORKERS \
-sZLIB_INCLUDE=$zlibDir/include -sZLIB_LIBRARY_PATH=$zlibDir/lib \
--without-python --without-mpi --without-log --without-wave
echo "Building libbacktrace."
cd $depsSrcDir
if [[ ! -d $backtraceSrcDir ]]; then
git clone --depth 1 https://github.com/ianlancetaylor/libbacktrace
fi
mkdir -p $backtraceSrcDir/build
cd $backtraceSrcDir/build
../configure --prefix=$backtraceDir --exec-prefix=$backtraceDir \
--host ${MINGW_BASE} --enable-host-shared \
--libdir="$backtraceDir/lib"
_make LDFLAGS="-no-undefined"
_make install
echo "Building snappy."
cd $depsSrcDir
if [[ ! -d $snappySrcDir ]]; then
git clone --branch $snappyTag --depth 1 https://github.com/google/snappy
cd $snappySrcDir
fi
mkdir -p $snappySrcDir/build
cd $snappySrcDir/build
cmake -DCMAKE_INSTALL_PREFIX=$snappyDir \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=ON \
-DSNAPPY_BUILD_TESTS=OFF \
-DSNAPPY_BUILD_BENCHMARKS=OFF \
-DCMAKE_TOOLCHAIN_FILE=$MINGW_CMAKE_FILE \
../
_make
_make install
cmake -DCMAKE_INSTALL_PREFIX=$snappyDir \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=OFF \
-DSNAPPY_BUILD_TESTS=OFF \
-DCMAKE_TOOLCHAIN_FILE=$MINGW_CMAKE_FILE \
../
_make
_make install
echo "Generating mswsock.lib."
# mswsock.lib is not provided by mingw, so we'll have to generate
# it.
mkdir -p $winLibDir
cat > $winLibDir/mswsock.def <<EOF
LIBRARY MSWSOCK.DLL
EXPORTS
AcceptEx@32
EnumProtocolsA@12
EnumProtocolsW@12
GetAcceptExSockaddrs@32
GetAddressByNameA@40
GetAddressByNameW@40
GetNameByTypeA@12
GetNameByTypeW@12
GetServiceA@28
GetServiceW@28
GetTypeByNameA@8
GetTypeByNameW@8
MigrateWinsockConfiguration@12
NPLoadNameSpaces@12
SetServiceA@24
SetServiceW@24
TransmitFile@28
WSARecvEx@16
dn_expand@20
getnetbyname@4
inet_network@4
rcmd@24
rexec@24rresvport@4
s_perror@8sethostname@8
EOF
$MINGW_DLLTOOL -d $winLibDir/mswsock.def \
-l $winLibDir/libmswsock.a
echo "Fetching libwnbd."
cd $depsSrcDir
if [[ ! -d $wnbdSrcDir ]]; then
git clone --branch $wnbdTag --depth 1 $wnbdUrl
fi
cd $wnbdSrcDir
mkdir -p $wnbdLibDir
$MINGW_DLLTOOL -d $wnbdSrcDir/libwnbd/libwnbd.def \
-D libwnbd.dll \
-l $wnbdLibDir/libwnbd.a
echo "Fetching dokany."
cd $depsSrcDir
if [[ ! -d $dokanSrcDir ]]; then
git clone --branch $dokanTag --depth 1 $dokanUrl
fi
mkdir -p $dokanLibDir
$MINGW_DLLTOOL -d $dokanSrcDir/dokan/dokan.def \
-l $dokanLibDir/libdokan.a
# That's probably the easiest way to deal with the dokan imports.
# dokan.h is defined in both ./dokan and ./sys while both are using
# sys/public.h without the "sys" prefix.
cp $dokanSrcDir/sys/public.h $dokanSrcDir/dokan
echo "Finished building Ceph dependencies."
touch $depsToolsetDir/completed
| 8,885 | 27.031546 | 95 | sh |
null | ceph-main/.github/dependabot.yml | ---
version: 2
updates:
- package-ecosystem: "npm"
directory: "/src/pybind/mgr/dashboard/frontend"
schedule:
interval: "daily"
commit-message:
prefix: "mgr/dashboard:"
labels:
- "dashboard"
pull-request-branch-name:
separator: "-"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
commit-message:
prefix: ".github/workflows:"
pull-request-branch-name:
separator: "-"
| 481 | 19.956522 | 51 | yml |
null | ceph-main/.github/labeler.yml | api-change:
- src/pybind/mgr/dashboard/openapi.yaml
build/ops:
- "**/CMakeLists.txt"
- admin/**
- ceph.spec.in
- cmake/**
- debian/**
- do_cmake.sh
- do_freebsd.sh
- install-deps.sh
- keys/**
- make-debs.sh
- make-dist
- make-srpm.sh
- run-make-check.sh
- win32_build.sh
- win32_deps_build.sh
documentation:
- AUTHORS
- CONTRIBUTING.rst
- COPYING*
- CodingStyle
- PendingReleaseNotes
- README.*
- SubmittingPatches*
- doc/**
- doc_deps.deb.txt
- man/**
- "**/*.+(rst|md)"
libcephsqlite:
- doc/rados/api/libcephsqlite.rst
- qa/suites/rados/basic/tasks/libcephsqlite.yaml
- qa/workunits/rados/test_libcephsqlite.sh
- src/SimpleRADOSStriper.*
- src/include/libcephsqlite.h
- src/libcephsqlite.cc
- src/test/libcephsqlite/**
mon:
- doc/man/8/ceph-mon.rst
- doc/man/8/monmaptool.rst
- doc/mon/**
- qa/workunits/mon/**
- src/mon/**
- src/test/mon/**
mgr:
- doc/mgr/**
- src/mgr/**
- src/pybind/mgr/ceph_module.pyi
- src/pybind/mgr/mgr_module.py
- src/pybind/mgr/mgr_util.py
- src/pybind/mgr/object_format.py
- src/pybind/mgr/requirements.txt
- src/pybind/mgr/tox.ini
- src/test/mgr/**
pybind:
- src/pybind/cephfs/**
- src/pybind/mgr/**
- src/pybind/rados/**
- src/pybind/rbd/**
- src/pybind/rgw/**
- src/pybind/**
- src/python-common/**
common:
- src/common/**
- src/global/**
- src/log/**
cephadm:
- doc/cephadm/**
- doc/dev/cephadm/**
- doc/man/8/cephadm.rst
- qa/suites/orch/cephadm/**
- qa/tasks/cephadm.py
- qa/tasks/cephadm_cases
- qa/tasks/mgr/test_cephadm_orchestrator.py
- qa/tasks/mgr/test_orchestrator_cli.py
- qa/workunits/cephadm/**
- src/cephadm/**
- src/pybind/mgr/cephadm/**
- src/python-common/**
orchestrator:
- doc/mgr/orchestrator.rst
- doc/mgr/orchestrator_modules.rst
- src/pybind/mgr/orchestrator/**
- src/pybind/mgr/rook/**
- src/pybind/mgr/test_orchestrator/**
- qa/tasks/mgr/test_orchestrator_cli.py
rook:
- doc/mgr/rook.rst
- src/pybind/mgr/rook/**
- qa/tasks/rook.py
- qa/suites/orch/rook/smoke/**
bluestore:
- src/os/bluestore/**
core:
- doc/man/8/ceph-authtool.rst
- doc/man/8/ceph-conf.rst
- doc/man/8/ceph-create-keys.rst
- doc/man/8/ceph-kvstore-tool.rst
- doc/man/8/ceph-mon.rst
- doc/man/8/ceph-objectstore-tool.rst
- doc/man/8/ceph-osd.rst
- doc/man/8/ceph.rst
- doc/man/8/crushtool.rst
- doc/man/8/monmaptool.rst
- doc/man/8/rados.rst
- doc/rados/**
- qa/standalone/**
- qa/suites/rados/**
- qa/workunits/erasure-code/**
- qa/workunits/mgr/**
- qa/workunits/mon/**
- qa/workunits/objectstore/**
- qa/workunits/rados/**
- src/ceph.in
- src/ceph_osd.cc
- src/ceph_mon.cc
- src/blk/**
- src/crush/*
- src/erasure-code/**
- src/kv/**
- src/librados/**
- src/mgr/**
- src/mon/**
- src/msg/**
- src/os/**
- src/osd/**
- src/tools/ceph_dedup_tool.cc
- src/tools/ceph_kvstore_tool.cc
- src/tools/ceph_monstore_tool.cc
- src/tools/ceph_objectstore_tool.*
- src/tools/crushtool.cc
- src/tools/kvstore_tool.*
- src/tools/monmaptool.cc
- src/tools/osdmaptool.cc
- src/tools/rados/**
- src/test/librados/**
- src/test/osd/**
crimson:
- doc/dev/crimson/**
- src/crimson/**
- src/test/crimson/**
- qa/suites/crimson-rados/**
dashboard:
- src/pybind/mgr/dashboard/**
- qa/suites/rados/dashboard/**
- qa/tasks/mgr/test_dashboard.py
- qa/tasks/mgr/dashboard/**
- monitoring/**
- doc/mgr/dashboard.rst
- doc/dev/developer_guide/dash-devel.rst
cephfs:
- doc/cephfs/**
- doc/man/8/ceph-fuse.rst
- doc/man/8/ceph-mds.rst
- doc/man/8/ceph-syn.rst
- doc/man/8/mount.ceph.rst
- doc/man/8/mount.fuse.ceph.rst
- qa/suites/fs/**
- qa/suites/multimds/**
- qa/tasks/ceph_fuse.py
- qa/tasks/cephfs/**
- qa/tasks/cephfs_test_runner.py
- qa/tasks/fs.py
- qa/tasks/kclient.py
- qa/tasks/mds_creation_failure.py
- qa/tasks/mds_thrash.py
- src/ceph_fuse.cc
- src/ceph_mds.cc
- src/ceph_syn.cc
- src/client/**
- src/include/ceph_fs.h
- src/include/ceph_fuse.h
- src/include/cephfs/**
- src/include/filepath.h
- src/include/frag.h
- src/include/fs_types.h
- src/libcephfs.cc
- src/mds/**
- src/mon/MDSMonitor.*
- src/mon/FSCommands.*
- src/pybind/cephfs/**
- src/pybind/mgr/mds_autoscaler/**
- src/pybind/mgr/status/**
- src/pybind/mgr/volumes/**
- src/test/fs/**
- src/test/libcephfs/**
- src/tools/cephfs/**
- src/tools/cephfs_mirror/**
CI:
- .github/**
rbd:
- doc/man/8/rbd*
- doc/rbd/**
- qa/suites/rbd/**
- qa/workunits/rbd/**
- src/include/rbd/**
- src/librbd/**
- src/pybind/mgr/rbd_support/**
- src/pybind/rbd/**
- src/test/librbd/**
- src/test/rbd_mirror/**
- src/tools/rbd/**
- src/tools/rbd_ggate/**
- src/tools/rbd_mirror/**
- src/tools/rbd_nbd/**
- src/tools/rbd_wnbd/**
rgw:
- qa/suites/rgw/**
- qa/tasks/rgw*
- qa/tasks/s3*
- src/cls/cmpomap/**
- src/cls/fifo/**
- src/cls/otp/**
- src/cls/queue/**
- src/cls/rgw/**
- src/cls/rgw_gc/**
- src/cls/timeindex/**
- src/mrgw.sh
- src/rgw/**
- src/test/cls_rgw/**
- src/test/librgw_*
- src/test/rgw/**
- src/test/test_rgw*
ceph-volume:
- src/ceph-volume/**
- doc/ceph-volume/**
- src/python-common/ceph/deployment/drive_group.py
- src/python-common/ceph/deployment/drive_selection/**
tests:
- qa/tasks/**
- qa/workunits/**
- src/test/**
nfs:
- src/pybind/mgr/nfs/**
- src/pybind/mgr/cephadm/services/nfs.py
- src/pybind/mgr/cephadm/templates/services/nfs/ganesha.conf.j2
- src/pybind/mgr/dashboard/controllers/nfs.py
- src/pybind/mgr/dashboard/tests/test_nfs.py
- qa/tasks/cephfs/test_nfs.py
- doc/mgr/nfs.rst
- doc/cephfs/nfs.rst
- doc/cephadm/nfs.rst
- doc/radosgw/nfs.rst
- doc/dev/vstart-ganesha.rst
monitoring:
- doc/cephadm/monitoring.rst
- src/pybind/mgr/cephadm/services/monitoring.py
- src/pybind/mgr/cephadm/templates/services/alertmanager/**
- src/pybind/mgr/cephadm/templates/services/grafana/**
- src/pybind/mgr/cephadm/templates/services/prometheus/**
- src/pybind/mgr/dashboard/ci/check_grafana_dashboards.py
- src/pybind/mgr/prometheus/**
- monitoring/**
telemetry:
- doc/mgr/telemetry.rst
- qa/suites/upgrade/telemetry-upgrade/**
- qa/workunits/test_telemetry_pacific.sh
- qa/workunits/test_telemetry_pacific_x.sh
- qa/workunits/test_telemetry_quincy.sh
- qa/workunits/test_telemetry_quincy_x.sh
- src/pybind/mgr/telemetry/**
- src/telemetry/**
| 6,522 | 21.728223 | 65 | yml |
null | ceph-main/.github/milestone.yml | base-branch:
- "(luminous)"
- "(nautilus)"
- "(octopus)"
- "(pacific)"
- "(quincy)"
- "(reef)"
| 107 | 12.5 | 16 | yml |
null | ceph-main/.github/pull_request_template.md |
<!--
- Please give your pull request a title like
[component]: [short description]
- Please use this format for each git commit message:
[component]: [short description]
[A longer multiline description]
Fixes: [ticket URL on tracker.ceph.com, create one if necessary]
Signed-off-by: [Your Name] <[your email]>
For examples, use "git log".
-->
## Contribution Guidelines
- To sign and title your commits, please refer to [Submitting Patches to Ceph](https://github.com/ceph/ceph/blob/main/SubmittingPatches.rst).
- If you are submitting a fix for a stable branch (e.g. "pacific"), please refer to [Submitting Patches to Ceph - Backports](https://github.com/ceph/ceph/blob/master/SubmittingPatches-backports.rst) for the proper workflow.
## Checklist
- Tracker (select at least one)
- [ ] References tracker ticket
- [ ] Very recent bug; references commit where it was introduced
- [ ] New feature (ticket optional)
- [ ] Doc update (no ticket needed)
- [ ] Code cleanup (no ticket needed)
- Component impact
- [ ] Affects [Dashboard](https://tracker.ceph.com/projects/dashboard/issues/new), opened tracker ticket
- [ ] Affects [Orchestrator](https://tracker.ceph.com/projects/orchestrator/issues/new), opened tracker ticket
- [ ] No impact that needs to be tracked
- Documentation (select at least one)
- [ ] Updates relevant documentation
- [ ] No doc update is appropriate
- Tests (select at least one)
- [ ] Includes [unit test(s)](https://docs.ceph.com/en/latest/dev/developer_guide/tests-unit-tests/)
- [ ] Includes [integration test(s)](https://docs.ceph.com/en/latest/dev/developer_guide/testing_integration_tests/)
- [ ] Includes bug reproducer
- [ ] No tests
<details>
<summary>Show available Jenkins commands</summary>
- `jenkins retest this please`
- `jenkins test classic perf`
- `jenkins test crimson perf`
- `jenkins test signed`
- `jenkins test make check`
- `jenkins test make check arm64`
- `jenkins test submodules`
- `jenkins test dashboard`
- `jenkins test dashboard cephadm`
- `jenkins test api`
- `jenkins test docs`
- `jenkins render docs`
- `jenkins test ceph-volume all`
- `jenkins test ceph-volume tox`
- `jenkins test windows`
</details>
| 2,245 | 33.030303 | 223 | md |
null | ceph-main/.github/workflows/create-backport-trackers.yml | ---
name: Create backport trackers for trackers in "Pending Backport" state
on:
# To manually trigger this: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_dispatch
workflow_dispatch:
inputs:
issues:
description: 'whitespace-separated list of issue numbers'
type: string
default: ''
debug:
description: '--debug: Show debug-level messages'
default: false
type: boolean
resolveParent:
description: '--resolve-parent: Resolve parent issue if all backports resolved/rejected'
default: false
type: boolean
force:
description: >
--force: When issue numbers provided, process them even if not in
'Pending Backport' status.
Otherwise, process all issues in 'Pending Backport' status even if
already processed (tag 'backport_processed' added)'
default: false
type: boolean
dryRun:
description: '--dry-run: Do not write anything to Redmine'
default: false
type: boolean
schedule:
# Every 5 minutes: https://crontab.guru/every-5-minutes
- cron: '*/5 * * * *'
jobs:
create-backports:
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
steps:
- uses: Bhacaz/checkout-files@e3e34e7daef91a5f237485bb88a260aee4be29dd
with:
files: src/script/backport-create-issue src/script/requirements.backport-create-issue.txt
- uses: actions/setup-python@v4
with:
python-version: '>=3.6'
cache: 'pip'
cache-dependency-path: src/script/requirements.backport-create-issue.txt
- run: pip install -r src/script/requirements.backport-create-issue.txt
- run: python3 src/script/backport-create-issue ${{ inputs.debug && '--debug' || '' }} ${{ inputs.resolveParent && '--resolve-parent' || '' }} ${{ inputs.force && '--force' || '' }} ${{ inputs.dryRun && '--dry-run' || '' }} ${{ inputs.issues }}
env:
REDMINE_API_KEY: ${{ secrets.REDMINE_API_KEY_BACKPORT_BOT }}
| 2,105 | 40.294118 | 250 | yml |
null | ceph-main/.github/workflows/needs-rebase.yml | ---
name: "Pull Request Needs Rebase?"
on:
pull_request_target:
types: [opened, synchronize, reopened]
jobs:
needs-rebase:
runs-on: ubuntu-latest
steps:
# eps1lon/[email protected]
# (NOTE: pinning the action to a given commit is a security best-practice:
# https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/security-hardening-for-github-actions)
- name: Check if PR needs rebase
uses: eps1lon/actions-label-merge-conflict@b8bf8341285ec9a4567d4318ba474fee998a6919
with:
repoToken: "${{ secrets.GITHUB_TOKEN }}"
dirtyLabel: "needs-rebase"
commentOnDirty: "This pull request can no longer be automatically merged: a rebase is needed and changes have to be manually resolved"
| 804 | 41.368421 | 144 | yml |
null | ceph-main/.github/workflows/pr-check-deps.yml | name: Check PR dependencies
on: [pull_request_target]
jobs:
check_dependencies:
runs-on: ubuntu-latest
name: Check PR Dependencies
steps:
- uses: gregsdennis/dependencies-action@80b5ffec566913b1494d5a8577ab0d60e476271d
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
| 298 | 26.181818 | 84 | yml |
null | ceph-main/.github/workflows/pr-checklist.yml | ---
name: "Pull Request Checklist"
on:
pull_request:
types:
- edited
- opened
- reopened
jobs:
pr_checklist:
runs-on: ubuntu-latest
name: Verify
steps:
- name: Sleep for 30 seconds
run: sleep 30s
shell: bash
- name: Action
id: checklist
uses: ceph/ceph-pr-checklist-action@32e92d1a2a7c9991ed51de5fccb2296551373d60
| 396 | 18.85 | 84 | yml |
null | ceph-main/.github/workflows/pr-triage.yml | ---
name: "Pull Request Triage"
on: pull_request_target
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
jobs:
pr-triage:
runs-on: ubuntu-latest
steps:
- name: Assign labels based on modified files
# https://github.com/marketplace/actions/labeler?version=v4.0.2
uses: actions/labeler@5c7539237e04b714afd8ad9b4aed733815b9fab4
with:
sync-labels: ''
repo-token: "${{ secrets.GITHUB_TOKEN }}"
- name: Assign to Dashboard project
# https://github.com/marketplace/actions/add-to-github-projects?version=v0.4.1
uses: actions/add-to-project@4756e6330fe1e0a736690d3cfd9f11c9399c2ed4
with:
project-url: https://github.com/orgs/ceph/projects/2
github-token: ${{ secrets.GITHUB_TOKEN }}
labeled: dashboard
- name: Assign milestone based on target brach name
# https://github.com/marketplace/actions/pull-request-milestone?version=v1.3.0
uses: iyu/actions-milestone@e93115c90ff7bcddee71086e9253f1b6a5f4b48a
with:
configuration-path: .github/milestone.yml
repo-token: "${{ secrets.GITHUB_TOKEN }}"
| 1,154 | 38.827586 | 86 | yml |
null | ceph-main/.github/workflows/stale.yml | # Configuration for stale action workflow - https://github.com/actions/stale
name: 'Close stale issues and PRs'
on:
schedule:
- cron: '0 * * * *'
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v5
with:
# PAT for GitHub API authentication
repo-token: "${{ secrets.GITHUB_TOKEN }}"
# Comment on the staled PRs
stale-pr-message: >
This pull request has been automatically marked as stale because it
has not had any activity for 60 days. It will be closed if no further
activity occurs for another 30 days.
If you are a maintainer or core committer, please follow-up on this
pull request to identify what steps should be taken by the author to move this
proposed change forward.
If you are the author of this pull request, thank you for your proposed
contribution. If you believe this change is still appropriate, please
ensure that any feedback has been addressed and ask for a code review.
# Comment on the staled PRs while closed
close-pr-message: >
This pull request has been automatically closed because there has
been no activity for 90 days. Please feel free to reopen this pull
request (or open a new one) if the proposed change is still
appropriate. Thank you for your contribution!
# Idle number of days before making PRs stale (exempts Issues)
days-before-pr-stale: 60
# Idle number of days before closing stale PRs (exempts Issues)
days-before-pr-close: 30
# Label to apply on staled PRs
stale-pr-label: 'stale'
# Labels on PRs exempted from stale
exempt-pr-labels: 'pinned,security'
# Exempt all PRs with milestones from stale (also exempts Issues)
exempt-all-pr-milestones: true
# Max number of operations per run
operations-per-run: 100
# Change the order used to fetch the issues and pull requests from GitHub
# So we now start with the oldest PRs and work our way backwards
ascending: true
| 2,417 | 39.983051 | 96 | yml |
null | ceph-main/bin/git-archive-all.sh | #!/usr/bin/env bash
#
# File: git-archive-all.sh
#
# Description: A utility script that builds an archive file(s) of all
# git repositories and submodules in the current path.
# Useful for creating a single tarfile of a git super-
# project that contains other submodules.
#
# Examples: Use git-archive-all.sh to create archive distributions
# from git repositories. To use, simply do:
#
# cd $GIT_DIR; git-archive-all.sh
#
# where $GIT_DIR is the root of your git superproject.
#
# License: GPL3
#
###############################################################################
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
###############################################################################
# DEBUGGING
set -e
set -C # noclobber
# TRAP SIGNALS
trap 'cleanup' QUIT EXIT
# For security reasons, explicitly set the internal field separator
# to newline, space, tab
OLD_IFS=$IFS
IFS='
'
function cleanup () {
rm -rf $TMPDIR
IFS="$OLD_IFS"
}
function usage () {
echo "Usage is as follows:"
echo
echo "$PROGRAM <--version>"
echo " Prints the program version number on a line by itself and exits."
echo
echo "$PROGRAM <--usage|--help|-?>"
echo " Prints this usage output and exits."
echo
echo "$PROGRAM [--format <fmt>] [--prefix <path>] [--verbose|-v] [--separate|-s]"
echo " [--tree-ish|-t <tree-ish>] [--ignore pattern] [output_file]"
echo " Creates an archive for the entire git superproject, and its submodules"
echo " using the passed parameters, described below."
echo
echo " If '--format' is specified, the archive is created with the named"
echo " git archiver backend. Obviously, this must be a backend that git archive"
echo " understands. The format defaults to 'tar' if not specified."
echo
echo " If '--prefix' is specified, the archive's superproject and all submodules"
echo " are created with the <path> prefix named. The default is to not use one."
echo
echo " If '--separate' or '-s' is specified, individual archives will be created"
echo " for each of the superproject itself and its submodules. The default is to"
echo " concatenate individual archives into one larger archive."
echo
echo " If '--tree-ish' is specified, the archive will be created based on whatever"
echo " you define the tree-ish to be. Branch names, commit hash, etc. are acceptable."
echo " Defaults to HEAD if not specified. See git archive's documentation for more"
echo " information on what a tree-ish is."
echo
echo " If '--ignore' is specified, we will filter out any submodules that"
echo " match the specified pattern."
echo
echo " If 'output_file' is specified, the resulting archive is created as the"
echo " file named. This parameter is essentially a path that must be writeable."
echo " When combined with '--separate' ('-s') this path must refer to a directory."
echo " Without this parameter or when combined with '--separate' the resulting"
echo " archive(s) are named with a dot-separated path of the archived directory and"
echo " a file extension equal to their format (e.g., 'superdir.submodule1dir.tar')."
echo
echo " If '--verbose' or '-v' is specified, progress will be printed."
}
function version () {
echo "$PROGRAM version $VERSION"
}
# Internal variables and initializations.
readonly PROGRAM=`basename "$0"`
readonly VERSION=0.2
OLD_PWD="`pwd`"
TMPDIR=`mktemp -d "${TMPDIR:-/tmp}/$PROGRAM.XXXXXX"`
TMPFILE=`mktemp "$TMPDIR/$PROGRAM.XXXXXX"` # Create a place to store our work's progress
TOARCHIVE=`mktemp "$TMPDIR/$PROGRAM.toarchive.XXXXXX"`
OUT_FILE=$OLD_PWD # assume "this directory" without a name change by default
SEPARATE=0
VERBOSE=0
TARCMD=tar
[[ $(uname) == "Darwin" ]] && TARCMD=gnutar
FORMAT=tar
PREFIX=
TREEISH=HEAD
IGNORE=
# RETURN VALUES/EXIT STATUS CODES
readonly E_BAD_OPTION=254
readonly E_UNKNOWN=255
# Process command-line arguments.
while test $# -gt 0; do
case $1 in
--format )
shift
FORMAT="$1"
shift
;;
--prefix )
shift
PREFIX="$1"
shift
;;
--separate | -s )
shift
SEPARATE=1
;;
--tree-ish | -t )
shift
TREEISH="$1"
shift
;;
--ignore )
shift
IGNORE="$1"
shift
;;
--version )
version
exit
;;
--verbose | -v )
shift
VERBOSE=1
;;
-? | --usage | --help )
usage
exit
;;
-* )
echo "Unrecognized option: $1" >&2
usage
exit $E_BAD_OPTION
;;
* )
break
;;
esac
done
if [ ! -z "$1" ]; then
OUT_FILE="$1"
shift
fi
# Validate parameters; error early, error often.
if [ $SEPARATE -eq 1 -a ! -d $OUT_FILE ]; then
echo "When creating multiple archives, your destination must be a directory."
echo "If it's not, you risk being surprised when your files are overwritten."
exit
elif [ `git config -l | grep -q '^core\.bare=false'; echo $?` -ne 0 ]; then
echo "$PROGRAM must be run from a git working copy (i.e., not a bare repository)."
exit
fi
# Create the superproject's git-archive
if [ $VERBOSE -eq 1 ]; then
echo -n "creating superproject archive..."
fi
git archive --format=$FORMAT --prefix="$PREFIX" $TREEISH > $TMPDIR/$(basename "$(pwd)").$FORMAT
if [ $VERBOSE -eq 1 ]; then
echo "done"
fi
echo $TMPDIR/$(basename "$(pwd)").$FORMAT >| $TMPFILE # clobber on purpose
superfile=`head -n 1 $TMPFILE`
if [ $VERBOSE -eq 1 ]; then
echo -n "looking for subprojects..."
fi
# find all '.git' dirs, these show us the remaining to-be-archived dirs
# we only want directories that are below the current directory
find . -mindepth 2 -name '.git' -type d -print | sed -e 's/^\.\///' -e 's/\.git$//' >> $TOARCHIVE
# as of version 1.7.8, git places the submodule .git directories under the superprojects .git dir
# the submodules get a .git file that points to their .git dir. we need to find all of these too
find . -mindepth 2 -name '.git' -type f -print | xargs grep -l "gitdir" | sed -e 's/^\.\///' -e 's/\.git$//' >> $TOARCHIVE
if [ -n "$IGNORE" ]; then
cat $TOARCHIVE | grep -v $IGNORE > $TOARCHIVE.new
mv $TOARCHIVE.new $TOARCHIVE
fi
if [ $VERBOSE -eq 1 ]; then
echo "done"
echo " found:"
cat $TOARCHIVE | while read arch
do
echo " $arch"
done
fi
if [ $VERBOSE -eq 1 ]; then
echo -n "archiving submodules..."
fi
while read path; do
TREEISH=$(git submodule | grep "^ .*${path%/} " | cut -d ' ' -f 2) # git submodule does not list trailing slashes in $path
cd "$path"
git archive --format=$FORMAT --prefix="${PREFIX}$path" ${TREEISH:-HEAD} > "$TMPDIR"/"$(echo "$path" | sed -e 's/\//./g')"$FORMAT
if [ $FORMAT == 'zip' ]; then
# delete the empty directory entry; zipped submodules won't unzip if we don't do this
zip -d "$(tail -n 1 $TMPFILE)" "${PREFIX}${path%/}" >/dev/null # remove trailing '/'
fi
echo "$TMPDIR"/"$(echo "$path" | sed -e 's/\//./g')"$FORMAT >> $TMPFILE
cd "$OLD_PWD"
done < $TOARCHIVE
if [ $VERBOSE -eq 1 ]; then
echo "done"
fi
if [ $VERBOSE -eq 1 ]; then
echo -n "concatenating archives into single archive..."
fi
# Concatenate archives into a super-archive.
if [ $SEPARATE -eq 0 ]; then
if [ $FORMAT == 'tar' ]; then
sed -e '1d' $TMPFILE | while read file; do
$TARCMD --concatenate -f "$superfile" "$file" && rm -f "$file"
done
elif [ $FORMAT == 'zip' ]; then
sed -e '1d' $TMPFILE | while read file; do
# zip incorrectly stores the full path, so cd and then grow
cd `dirname "$file"`
zip -g "$superfile" `basename "$file"` && rm -f "$file"
done
cd "$OLD_PWD"
fi
echo "$superfile" >| $TMPFILE # clobber on purpose
fi
if [ $VERBOSE -eq 1 ]; then
echo "done"
fi
if [ $VERBOSE -eq 1 ]; then
echo -n "moving archive to $OUT_FILE..."
fi
while read file; do
mv "$file" "$OUT_FILE"
done < $TMPFILE
if [ $VERBOSE -eq 1 ]; then
echo "done"
fi
| 9,140 | 31.073684 | 132 | sh |
null | ceph-main/ceph-menv/build_links.sh | #!/bin/bash
DIR=`dirname $0`
ROOT=$1
[ "$ROOT" == "" ] && ROOT="$HOME/ceph"
mkdir -p $DIR/bin
echo $PWD
for f in `ls $ROOT/build/bin`; do
echo $f
ln -sf ../mdo.sh $DIR/bin/$f
done
echo "MRUN_CEPH_ROOT=$ROOT" > $DIR/.menvroot
| 246 | 13.529412 | 44 | sh |
null | ceph-main/ceph-menv/mdo.sh | #!/bin/bash
cmd=`basename $0`
MENV_ROOT=`dirname $0`/..
if [ -f $MENV_ROOT/.menvroot ]; then
. $MENV_ROOT/.menvroot
fi
[ "$MRUN_CEPH_ROOT" == "" ] && MRUN_CEPH_ROOT=$HOME/ceph
if [ "$MRUN_CLUSTER" == "" ]; then
${MRUN_CEPH_ROOT}/build/bin/$cmd "$@"
exit $?
fi
${MRUN_CEPH_ROOT}/src/mrun $MRUN_CLUSTER $cmd "$@"
| 327 | 18.294118 | 56 | sh |
null | ceph-main/ceph-menv/mset.sh | get_color() {
s=$1
sum=1 # just so that 'c1' isn't green that doesn't contrast with the rest of my prompt
for i in `seq 1 ${#s}`; do
c=${s:$((i-1)):1};
o=`printf '%d' "'$c"`
sum=$((sum+$o))
done
echo $sum
}
if [ "$1" == "" ]; then
unset MRUN_CLUSTER
unset MRUN_PROMPT
else
export MRUN_CLUSTER=$1
export MRUN_PROMPT='['${MRUN_CLUSTER}'] '
col=$(get_color $1)
MRUN_PROMPT_COLOR=$((col%7+31))
fi
| 533 | 23.272727 | 96 | sh |
null | ceph-main/cmake/modules/CephCheck_link.c | int main()
{}
| 14 | 4 | 10 | c |
null | ceph-main/cmake/modules/FindStdFilesystem_test.cc | #include <filesystem>
namespace fs = std::filesystem;
int main() {
fs::create_directory("sandbox");
fs::remove_all("sandbox");
}
| 139 | 14.555556 | 36 | cc |
null | ceph-main/cmake/modules/patch-dpdk-conf.sh | #!/bin/sh
# -*- mode:sh; tab-width:4; indent-tabs-mode:nil -*
setconf() {
local key=$1
local val=$2
if grep -q ^$key= ${conf}; then
sed -i -e "s:^$key=.*$:$key=$val:g" ${conf}
else
echo $key=$val >> ${conf}
fi
}
conf=$1/.config
shift
machine=$1
shift
arch=$1
shift
numa=$1
shift
setconf CONFIG_RTE_MACHINE "${machine}"
setconf CONFIG_RTE_ARCH "${arch}"
# Disable experimental features
setconf CONFIG_RTE_NEXT_ABI n
setconf CONFIG_RTE_LIBRTE_MBUF_OFFLOAD n
# Disable unmaintained features
setconf CONFIG_RTE_LIBRTE_POWER n
setconf CONFIG_RTE_EAL_IGB_UIO n
setconf CONFIG_RTE_LIBRTE_KNI n
setconf CONFIG_RTE_KNI_KMOD n
setconf CONFIG_RTE_KNI_PREEMPT_DEFAULT n
# no pdump
setconf CONFIG_RTE_LIBRTE_PDUMP n
# no vm support
setconf CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT n
setconf CONFIG_RTE_LIBRTE_VHOST n
setconf CONFIG_RTE_LIBRTE_VHOST_NUMA n
setconf CONFIG_RTE_LIBRTE_VMXNET3_PMD n
setconf CONFIG_RTE_LIBRTE_PMD_VHOST n
setconf CONFIG_RTE_APP_EVENTDEV n
setconf CONFIG_RTE_MAX_VFIO_GROUPS 64
# no test
setconf CONFIG_RTE_APP_TEST n
setconf CONFIG_RTE_TEST_PMD n
# async/dpdk does not like it
setconf CONFIG_RTE_MBUF_REFCNT_ATOMIC n
# balanced allocation of hugepages
setconf CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES "${numa}"
| 1,272 | 20.948276 | 54 | sh |
null | ceph-main/doc/README.md | 0 | 0 | 0 | md |
|
null | ceph-main/doc/conf.py | import fileinput
import glob
import logging
import os
import shutil
import sys
import yaml
import sphinx.util
top_level = \
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)))
pybind_rgw_mod = __import__('rgw', globals(), locals(), [], 0)
sys.modules['pybind_rgw_mod'] = pybind_rgw_mod
def parse_ceph_release():
with open(os.path.join(top_level, 'src/ceph_release')) as f:
lines = f.readlines()
assert(len(lines) == 3)
# 16, pacific, dev
version, codename, status = [line.strip() for line in lines]
return version, codename, status
def latest_stable_release():
with open(os.path.join(top_level, 'doc/releases/releases.yml')) as input:
releases = yaml.safe_load(input)['releases']
# get the first release
return next(iter(releases.keys()))
def is_release_eol(codename):
with open(os.path.join(top_level, 'doc/releases/releases.yml')) as input:
releases = yaml.safe_load(input)['releases']
return 'actual_eol' in releases.get(codename, {})
# project information
project = 'Ceph'
copyright = ('2016, Ceph authors and contributors. '
'Licensed under Creative Commons Attribution Share Alike 3.0 '
'(CC-BY-SA-3.0)')
version, codename, release = parse_ceph_release()
pygments_style = 'sphinx'
# HTML output options
html_theme = 'ceph'
html_theme_options = {
'logo_only': True,
'display_version': False,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
'vcs_pageview_mode': 'edit',
'style_nav_header_background': '#eee',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
html_theme_path = ['_themes']
html_title = "Ceph Documentation"
html_logo = 'logo.png'
html_context = {'is_release_eol': is_release_eol(codename)}
html_favicon = 'favicon.ico'
html_show_sphinx = False
html_static_path = ["_static"]
html_sidebars = {
'**': ['smarttoc.html', 'searchbox.html']
}
html_css_files = ['css/custom.css']
# general configuration
templates_path = ['_templates']
source_suffix = '.rst'
exclude_patterns = ['**/.#*',
'**/*~',
'start/quick-common.rst',
'**/*.inc.rst']
if tags.has('man'): # noqa: F821
master_doc = 'man_index'
exclude_patterns += ['index.rst',
'architecture.rst',
'glossary.rst',
'release*.rst',
'api/*',
'cephadm/*',
'cephfs/*',
'dev/*',
'governance.rst',
'foundation.rst',
'install/*',
'mon/*',
'rados/*',
'mgr/*',
'ceph-volume/*',
'radosgw/*',
'rbd/*',
'start/*',
'releases/*']
else:
master_doc = 'index'
exclude_patterns += ['man_index.rst']
build_with_rtd = os.environ.get('READTHEDOCS') == 'True'
sys.path.insert(0, os.path.abspath('_ext'))
smartquotes_action = "qe"
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx-prompt',
'sphinx_autodoc_typehints',
'sphinx_substitution_extensions',
'breathe',
'ceph_commands',
'ceph_releases',
'ceph_confval',
'sphinxcontrib.mermaid',
'sphinxcontrib.openapi',
'sphinxcontrib.seqdiag',
]
ditaa = shutil.which("ditaa")
if ditaa is not None:
# in case we don't have binfmt_misc enabled or jar is not registered
ditaa_args = ['-jar', ditaa]
ditaa = 'java'
extensions += ['sphinxcontrib.ditaa']
else:
extensions += ['plantweb.directive']
plantweb_defaults = {
'engine': 'ditaa'
}
if build_with_rtd:
extensions += ['sphinx_search.extension']
# sphinx.ext.todo options
todo_include_todos = True
# sphinx_substitution_extensions options
rst_prolog = f"""
.. |stable-release| replace:: {latest_stable_release()}
"""
# breath options
breathe_default_project = "Ceph"
# see $(top_srcdir)/Doxyfile
breathe_build_directory = os.path.join(top_level, "build-doc")
breathe_projects = {"Ceph": os.path.join(top_level, breathe_build_directory)}
breathe_projects_source = {
"Ceph": (os.path.join(top_level, "src/include/rados"),
["rados_types.h", "librados.h"])
}
breathe_domain_by_extension = {'py': 'py',
'c': 'c', 'h': 'c',
'cc': 'cxx', 'hpp': 'cxx'}
breathe_doxygen_config_options = {
'EXPAND_ONLY_PREDEF': 'YES',
'MACRO_EXPANSION': 'YES',
'PREDEFINED': 'CEPH_RADOS_API= ',
'WARN_IF_UNDOCUMENTED': 'NO',
}
# graphviz options
graphviz_output_format = 'svg'
def generate_state_diagram(input_paths, output_path):
sys.path.append(os.path.join(top_level, 'doc', 'scripts'))
from gen_state_diagram import do_filter, StateMachineRenderer
inputs = [os.path.join(top_level, fn) for fn in input_paths]
output = os.path.join(top_level, output_path)
def process(app):
with fileinput.input(files=inputs) as f:
input = do_filter(f)
render = StateMachineRenderer()
render.read_input(input)
with open(output, 'w') as dot_output:
render.emit_dot(dot_output)
return process
# mocking ceph_module offered by ceph-mgr. `ceph_module` is required by
# mgr.mgr_module
class Dummy(object):
def __getattr__(self, _):
return lambda *args, **kwargs: None
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
mock = type(name, (Dummy,), {})
mock.__module__ = __name__
return mock
# autodoc options
sys.modules['ceph_module'] = Mock()
if build_with_rtd:
autodoc_mock_imports = ['ceph']
pybinds = ['pybind/mgr',
'python-common']
else:
pybinds = ['pybind',
'pybind/mgr',
'python-common']
for c in pybinds:
pybind = os.path.join(top_level, 'src', c)
if pybind not in sys.path:
sys.path.insert(0, pybind)
# openapi
openapi_logger = sphinx.util.logging.getLogger('sphinxcontrib.openapi.openapi30')
openapi_logger.setLevel(logging.WARNING)
# seqdiag
seqdiag_antialias = True
seqdiag_html_image_format = 'SVG'
# ceph_confval
ceph_confval_imports = glob.glob(os.path.join(top_level,
'src/common/options',
'*.yaml.in'))
ceph_confval_mgr_module_path = 'src/pybind/mgr'
ceph_confval_mgr_python_path = 'src/pybind'
# handles edit-on-github and old version warning display
def setup(app):
if ditaa is None:
# add "ditaa" as an alias of "diagram"
from plantweb.directive import DiagramDirective
app.add_directive('ditaa', DiagramDirective)
app.connect('builder-inited',
generate_state_diagram(['src/osd/PeeringState.h',
'src/osd/PeeringState.cc'],
'doc/dev/peering_graph.generated.dot'))
| 7,505 | 27.648855 | 81 | py |
null | ceph-main/doc/_ext/ceph_commands.py | import io
import os
import sys
import contextlib
from docutils.parsers.rst import directives
from docutils.parsers.rst import Directive
from jinja2 import Template
from pcpp.preprocessor import Preprocessor
from sphinx.util import logging
from sphinx.util.console import bold
from importlib import reload
logger = logging.getLogger(__name__)
class Flags:
NOFORWARD = (1 << 0)
OBSOLETE = (1 << 1)
DEPRECATED = (1 << 2)
MGR = (1 << 3)
POLL = (1 << 4)
HIDDEN = (1 << 5)
VALS = {
NOFORWARD: 'no_forward',
OBSOLETE: 'obsolete',
DEPRECATED: 'deprecated',
MGR: 'mgr',
POLL: 'poll',
HIDDEN: 'hidden',
}
def __init__(self, fs):
self.fs = fs
def __contains__(self, other):
return other in str(self)
def __str__(self):
keys = Flags.VALS.keys()
es = {Flags.VALS[k] for k in keys if self.fs & k == k}
return ', '.join(sorted(es))
def __bool__(self):
return bool(str(self))
class CmdParam(object):
t = {
'CephInt': 'int',
'CephString': 'str',
'CephChoices': 'str',
'CephPgid': 'str',
'CephOsdName': 'str',
'CephPoolname': 'str',
'CephObjectname': 'str',
'CephUUID': 'str',
'CephEntityAddr': 'str',
'CephIPAddr': 'str',
'CephName': 'str',
'CephBool': 'bool',
'CephFloat': 'float',
'CephFilepath': 'str',
}
bash_example = {
'CephInt': '1',
'CephString': 'string',
'CephChoices': 'choice',
'CephPgid': '0',
'CephOsdName': 'osd.0',
'CephPoolname': 'poolname',
'CephObjectname': 'objectname',
'CephUUID': 'uuid',
'CephEntityAddr': 'entityaddr',
'CephIPAddr': '0.0.0.0',
'CephName': 'name',
'CephBool': 'true',
'CephFloat': '0.0',
'CephFilepath': '/path/to/file',
}
def __init__(self, type, name,
who=None, n=None, req=True, range=None, strings=None,
goodchars=None, positional=True):
self.type = type
self.name = name
self.who = who
self.n = n == 'N'
self.req = req != 'false'
self.range = range.split('|') if range else []
self.strings = strings.split('|') if strings else []
self.goodchars = goodchars
self.positional = positional != 'false'
assert who == None
def help(self):
advanced = []
if self.type != 'CephString':
advanced.append(self.type + ' ')
if self.range:
advanced.append('range= ``{}`` '.format('..'.join(self.range)))
if self.strings:
advanced.append('strings=({}) '.format(' '.join(self.strings)))
if self.goodchars:
advanced.append('goodchars= ``{}`` '.format(self.goodchars))
if self.n:
advanced.append('(can be repeated)')
advanced = advanced or ["(string)"]
return ' '.join(advanced)
def mk_example_value(self):
if self.type == 'CephChoices' and self.strings:
return self.strings[0]
if self.range:
return self.range[0]
return CmdParam.bash_example[self.type]
def mk_bash_example(self, simple):
val = self.mk_example_value()
if self.type == 'CephBool':
return '--' + self.name
if simple:
if self.type == "CephChoices" and self.strings:
return val
elif self.type == "CephString" and self.name != 'who':
return 'my_' + self.name
else:
return CmdParam.bash_example[self.type]
else:
return '--{}={}'.format(self.name, val)
class CmdCommand(object):
def __init__(self, prefix, args, desc,
module=None, perm=None, flags=0, poll=None):
self.prefix = prefix
self.params = sorted([CmdParam(**arg) for arg in args],
key=lambda p: p.req, reverse=True)
self.help = desc
self.module = module
self.perm = perm
self.flags = Flags(flags)
self.needs_overload = False
def is_reasonably_simple(self):
if len(self.params) > 3:
return False
if any(p.n for p in self.params):
return False
return True
def mk_bash_example(self):
simple = self.is_reasonably_simple()
line = ' '.join(['ceph', self.prefix] + [p.mk_bash_example(simple) for p in self.params])
return line
class Sig:
@staticmethod
def _parse_arg_desc(desc):
try:
return dict(kv.split('=', 1) for kv in desc.split(',') if kv)
except ValueError:
return desc
@staticmethod
def parse_cmd(cmd):
parsed = [Sig._parse_arg_desc(s) or s for s in cmd.split()]
prefix = [s for s in parsed if isinstance(s, str)]
params = [s for s in parsed if not isinstance(s, str)]
return ' '.join(prefix), params
@staticmethod
def parse_args(args):
return [Sig._parse_arg_desc(arg) for arg in args.split()]
TEMPLATE = '''
{%- set punct_char = '-' -%}
{# add a header if we have multiple commands in this section #}
{% if commands | length > 1 %}
{{ section }}
{{ section | length * '-' }}
{# and demote the subsection #}
{% set punct_char = '^' %}
{% endif %}
{% for command in commands %}
{{ command.prefix }}
{{ command.prefix | length * punct_char }}
{{ command.help | wordwrap(70) }}
:Example command:
.. code-block:: bash
{{ command.mk_bash_example() | wordwrap(70) | indent(9) }}
{%- if command.params %}
:Parameters:{% for param in command.params -%}
{{" -" | indent(12, not loop.first) }} **{% if param.positional %}{{param.name}}{% else %}--{{param.name}}{% endif %}**: {{ param.help() }}
{% endfor %}
{% endif %}
:Ceph Module: {{ command.module }}
:Required Permissions: ``{{ command.perm }}``
{%- if command.flags %}
:Command Flags: ``{{ command.flags }}``
{% endif %}
{% endfor %}
'''
def group_by_prefix(commands):
last_prefix = None
grouped = []
for cmd in commands:
prefix = cmd.prefix.split(' ', 1)[0]
if prefix == last_prefix:
grouped.append(cmd)
elif last_prefix is None:
last_prefix = prefix
grouped = [cmd]
else:
yield last_prefix, grouped
last_prefix = prefix
grouped = [cmd]
assert grouped
yield last_prefix, grouped
def render_commands(commands):
rendered = io.StringIO()
for section, grouped in group_by_prefix(commands):
logger.debug('rendering commands: %s: %d', section, len(grouped))
rendered.write(Template(TEMPLATE).render(
section=section,
commands=grouped))
return rendered.getvalue().split('\n')
class CephMgrCommands(Directive):
"""
extracts commands from specified mgr modules
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {'python_path': directives.unchanged}
def _normalize_path(self, dirname):
my_dir = os.path.dirname(os.path.realpath(__file__))
src_dir = os.path.abspath(os.path.join(my_dir, '../..'))
return os.path.join(src_dir, dirname)
def _is_mgr_module(self, dirname, name):
if not os.path.isdir(os.path.join(dirname, name)):
return False
if not os.path.isfile(os.path.join(dirname, name, '__init__.py')):
return False
return name not in ['tests']
@contextlib.contextmanager
def mocked_modules(self):
# src/pybind/mgr/tests
from tests import mock
mock_imports = ['rados',
'rbd',
'cephfs',
'dateutil',
'dateutil.parser']
# make dashboard happy
mock_imports += ['OpenSSL',
'jwt',
'bcrypt',
'jsonpatch',
'rook.rook_client',
'rook.rook_client.ceph',
'rook.rook_client._helper',
'cherrypy=3.2.3']
# make diskprediction_local happy
mock_imports += ['numpy',
'scipy']
# make restful happy
mock_imports += ['pecan',
'pecan.rest',
'pecan.hooks',
'werkzeug',
'werkzeug.serving']
for m in mock_imports:
args = {}
parts = m.split('=', 1)
mocked = parts[0]
if len(parts) > 1:
args['__version__'] = parts[1]
sys.modules[mocked] = mock.Mock(**args)
try:
yield
finally:
for m in mock_imports:
mocked = m.split('=', 1)[0]
sys.modules.pop(mocked)
def _collect_module_commands(self, name):
with self.mocked_modules():
logger.info(bold(f"loading mgr module '{name}'..."))
mgr_mod = __import__(name, globals(), locals(), [], 0)
reload(mgr_mod)
from tests import M
def subclass(x):
try:
return issubclass(x, M)
except TypeError:
return False
ms = [c for c in mgr_mod.__dict__.values()
if subclass(c) and 'Standby' not in c.__name__]
[m] = ms
assert isinstance(m.COMMANDS, list)
return m.COMMANDS
def _normalize_command(self, command):
if 'handler' in command:
del command['handler']
if 'cmd' in command:
command['prefix'], command['args'] = Sig.parse_cmd(command['cmd'])
del command['cmd']
else:
command['args'] = Sig.parse_args(command['args'])
command['flags'] = (1 << 3)
command['module'] = 'mgr'
return command
def _render_cmds(self, commands):
lines = render_commands(commands)
assert lines
lineno = self.lineno - self.state_machine.input_offset - 1
source = self.state_machine.input_lines.source(lineno)
self.state_machine.insert_input(lines, source)
def run(self):
module_path = self._normalize_path(self.arguments[0])
sys.path.insert(0, module_path)
for path in self.options.get('python_path', '').split(':'):
sys.path.insert(0, self._normalize_path(path))
os.environ['UNITTEST'] = 'true'
modules = [name for name in os.listdir(module_path)
if self._is_mgr_module(module_path, name)]
commands = sum([self._collect_module_commands(name) for name in modules], [])
cmds = [CmdCommand(**self._normalize_command(c)) for c in commands]
cmds = [cmd for cmd in cmds if 'hidden' not in cmd.flags]
cmds = sorted(cmds, key=lambda cmd: cmd.prefix)
self._render_cmds(cmds)
orig_rgw_mod = sys.modules['pybind_rgw_mod']
sys.modules['rgw'] = orig_rgw_mod
return []
class MyProcessor(Preprocessor):
def __init__(self):
super().__init__()
self.cmds = []
self.undef('__DATE__')
self.undef('__TIME__')
self.expand_linemacro = False
self.expand_filemacro = False
self.expand_countermacro = False
self.line_directive = '#line'
self.define("__PCPP_VERSION__ " + '')
self.define("__PCPP_ALWAYS_FALSE__ 0")
self.define("__PCPP_ALWAYS_TRUE__ 1")
def eval(self, src):
_cmds = []
NONE = 0
NOFORWARD = (1 << 0)
OBSOLETE = (1 << 1)
DEPRECATED = (1 << 2)
MGR = (1 << 3)
POLL = (1 << 4)
HIDDEN = (1 << 5)
TELL = (1 << 6)
def FLAG(a):
return a
def COMMAND(cmd, desc, module, perm):
_cmds.append({
'cmd': cmd,
'desc': desc,
'module': module,
'perm': perm
})
def COMMAND_WITH_FLAG(cmd, desc, module, perm, flag):
_cmds.append({
'cmd': cmd,
'desc': desc,
'module': module,
'perm': perm,
'flags': flag
})
self.parse(src)
out = io.StringIO()
self.write(out)
out.seek(0)
s = out.read()
exec(s, globals(), locals())
return _cmds
class CephMonCommands(Directive):
"""
extracts commands from specified header file
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
def _src_dir(self):
my_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.abspath(os.path.join(my_dir, '../..'))
def _parse_headers(self, headers):
src_dir = self._src_dir()
src = '\n'.join(f'#include "{src_dir}/{header}"' for header in headers)
return MyProcessor().eval(src)
def _normalize_command(self, command):
if 'handler' in command:
del command['handler']
command['prefix'], command['args'] = Sig.parse_cmd(command['cmd'])
del command['cmd']
return command
def _render_cmds(self, commands):
lines = render_commands(commands)
assert lines
lineno = self.lineno - self.state_machine.input_offset - 1
source = self.state_machine.input_lines.source(lineno)
self.state_machine.insert_input(lines, source)
def run(self):
headers = self.arguments[0].split()
commands = self._parse_headers(headers)
cmds = [CmdCommand(**self._normalize_command(c)) for c in commands]
cmds = [cmd for cmd in cmds if 'hidden' not in cmd.flags]
cmds = sorted(cmds, key=lambda cmd: cmd.prefix)
self._render_cmds(cmds)
return []
def setup(app):
app.add_directive("ceph-mgr-commands", CephMgrCommands)
app.add_directive("ceph-mon-commands", CephMonCommands)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 14,356 | 29.353066 | 139 | py |
null | ceph-main/doc/_ext/ceph_confval.py | import io
import contextlib
import os
import sys
from typing import Any, Dict, List, Union
from docutils.nodes import Node
from docutils.parsers.rst import directives
from docutils.statemachine import StringList
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains.python import PyField
from sphinx.environment import BuildEnvironment
from sphinx.locale import _
from sphinx.util import logging, status_iterator, ws_re
from sphinx.util.docutils import switch_source_input, SphinxDirective
from sphinx.util.docfields import Field
from sphinx.util.nodes import make_id
import jinja2
import jinja2.filters
import yaml
logger = logging.getLogger(__name__)
TEMPLATE = '''
{% if desc %}
{{ desc | wordwrap(70) | indent(3) }}
{% endif %}
:type: ``{{opt.type}}``
{%- if default is not none %}
{%- if opt.type == 'size' %}
:default: ``{{ default | eval_size | iec_size }}``
{%- elif opt.type == 'secs' %}
:default: ``{{ default | readable_duration(opt.type) }}``
{%- elif opt.type in ('uint', 'int', 'float') %}
:default: ``{{ default | readable_num(opt.type) }}``
{%- elif opt.type == 'millisecs' %}
:default: ``{{ default }}`` milliseconds
{%- elif opt.type == 'bool' %}
:default: ``{{ default | string | lower }}``
{%- else %}
:default: {{ default | literal }}
{%- endif -%}
{%- endif %}
{%- if opt.enum_values %}
:valid choices:{% for enum_value in opt.enum_values -%}
{{" -" | indent(18, not loop.first) }} {{ enum_value | literal }}
{% endfor %}
{%- endif %}
{%- if opt.min is defined and opt.max is defined %}
:allowed range: ``[{{ opt.min }}, {{ opt.max }}]``
{%- elif opt.min is defined %}
:min: ``{{ opt.min }}``
{%- elif opt.max is defined %}
:max: ``{{ opt.max }}``
{%- endif %}
{%- if opt.constraint %}
:constraint: {{ opt.constraint }}
{% endif %}
{%- if opt.policies %}
:policies: {{ opt.policies }}
{% endif %}
{%- if opt.example %}
:example: {{ opt.example }}
{%- endif %}
{%- if opt.see_also %}
:see also: {{ opt.see_also | map('ref_confval') | join(', ') }}
{%- endif %}
{% if opt.note %}
.. note::
{{ opt.note }}
{%- endif -%}
{%- if opt.warning %}
.. warning::
{{ opt.warning }}
{%- endif %}
'''
def eval_size(value) -> int:
try:
return int(value)
except ValueError:
times = dict(_K=1 << 10,
_M=1 << 20,
_G=1 << 30,
_T=1 << 40)
for unit, m in times.items():
if value.endswith(unit):
return int(value[:-len(unit)]) * m
raise ValueError(f'unknown value: {value}')
def readable_duration(value: str, typ: str) -> str:
try:
if typ == 'sec':
v = int(value)
postfix = 'second' if v == 1 else 'seconds'
return f'{v} {postfix}'
elif typ == 'float':
return str(float(value))
else:
return str(int(value))
except ValueError:
times = dict(_min=['minute', 'minutes'],
_hr=['hour', 'hours'],
_day=['day', 'days'])
for unit, readables in times.items():
if value.endswith(unit):
v = int(value[:-len(unit)])
postfix = readables[0 if v == 1 else 1]
return f'{v} {postfix}'
raise ValueError(f'unknown value: {value}')
def do_plain_num(value: str, typ: str) -> str:
if typ == 'float':
return str(float(value))
else:
return str(int(value))
def iec_size(value: int) -> str:
if value == 0:
return '0B'
units = dict(Ei=60,
Pi=50,
Ti=40,
Gi=30,
Mi=20,
Ki=10,
B=0)
for unit, bits in units.items():
m = 1 << bits
if value % m == 0:
value //= m
return f'{value}{unit}'
raise Exception(f'iec_size() failed to convert {value}')
def do_fileize_num(value: str, typ: str) -> str:
v = eval_size(value)
return iec_size(v)
def readable_num(value: str, typ: str) -> str:
e = ValueError()
for eval_func in [do_plain_num,
readable_duration,
do_fileize_num]:
try:
return eval_func(value, typ)
except ValueError as ex:
e = ex
raise e
def literal(name) -> str:
if name:
return f'``{name}``'
else:
return f'<empty string>'
def ref_confval(name) -> str:
return f':confval:`{name}`'
def jinja_template() -> jinja2.Template:
env = jinja2.Environment()
env.filters['eval_size'] = eval_size
env.filters['iec_size'] = iec_size
env.filters['readable_duration'] = readable_duration
env.filters['readable_num'] = readable_num
env.filters['literal'] = literal
env.filters['ref_confval'] = ref_confval
return env.from_string(TEMPLATE)
FieldValueT = Union[bool, float, int, str]
class CephModule(SphinxDirective):
"""
Directive to name the mgr module for which options are documented.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
def run(self) -> List[Node]:
module = self.arguments[0].strip()
if module == 'None':
self.env.ref_context.pop('ceph:module', None)
else:
self.env.ref_context['ceph:module'] = module
return []
class CephOption(ObjectDescription):
"""
emit option loaded from given command/options/<name>.yaml.in file
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'module': directives.unchanged,
'default': directives.unchanged
}
doc_field_types = [
Field('default',
label=_('Default'),
has_arg=False,
names=('default',)),
Field('type',
label=_('Type'),
has_arg=False,
names=('type',),
bodyrolename='class'),
]
template = jinja_template()
opts: Dict[str, Dict[str, FieldValueT]] = {}
mgr_opts: Dict[str, # module name
Dict[str, # option name
Dict[str, # field_name
FieldValueT]]] = {}
def _load_yaml(self) -> Dict[str, Dict[str, FieldValueT]]:
if CephOption.opts:
return CephOption.opts
opts = []
for fn in status_iterator(self.config.ceph_confval_imports,
'loading options...', 'red',
len(self.config.ceph_confval_imports),
self.env.app.verbosity):
self.env.note_dependency(fn)
try:
with open(fn, 'r') as f:
yaml_in = io.StringIO()
for line in f:
if '@' not in line:
yaml_in.write(line)
yaml_in.seek(0)
opts += yaml.safe_load(yaml_in)['options']
except OSError as e:
message = f'Unable to open option file "{fn}": {e}'
raise self.error(message)
CephOption.opts = dict((opt['name'], opt) for opt in opts)
return CephOption.opts
def _normalize_path(self, dirname):
my_dir = os.path.dirname(os.path.realpath(__file__))
src_dir = os.path.abspath(os.path.join(my_dir, '../..'))
return os.path.join(src_dir, dirname)
def _is_mgr_module(self, dirname, name):
if not os.path.isdir(os.path.join(dirname, name)):
return False
if not os.path.isfile(os.path.join(dirname, name, '__init__.py')):
return False
return name not in ['tests']
@contextlib.contextmanager
def mocked_modules(self):
# src/pybind/mgr/tests
from tests import mock
mock_imports = ['rados',
'rbd',
'cephfs',
'dateutil',
'dateutil.parser']
# make dashboard happy
mock_imports += ['OpenSSL',
'jwt',
'bcrypt',
'jsonpatch',
'rook.rook_client',
'rook.rook_client.ceph',
'rook.rook_client._helper',
'cherrypy=3.2.3']
# make diskprediction_local happy
mock_imports += ['numpy',
'scipy']
# make restful happy
mock_imports += ['pecan',
'pecan.rest',
'pecan.hooks',
'werkzeug',
'werkzeug.serving']
for m in mock_imports:
args = {}
parts = m.split('=', 1)
mocked = parts[0]
if len(parts) > 1:
args['__version__'] = parts[1]
sys.modules[mocked] = mock.Mock(**args)
try:
yield
finally:
for m in mock_imports:
mocked = m.split('=', 1)[0]
sys.modules.pop(mocked)
def _collect_options_from_module(self, name):
with self.mocked_modules():
mgr_mod = __import__(name, globals(), locals(), [], 0)
# import 'M' from src/pybind/mgr/tests
from tests import M
def subclass(x):
try:
return issubclass(x, M)
except TypeError:
return False
ms = [c for c in mgr_mod.__dict__.values()
if subclass(c) and 'Standby' not in c.__name__]
[m] = ms
assert isinstance(m.MODULE_OPTIONS, list)
return m.MODULE_OPTIONS
def _load_module(self, module) -> Dict[str, Dict[str, FieldValueT]]:
mgr_opts = CephOption.mgr_opts.get(module)
if mgr_opts is not None:
return mgr_opts
python_path = self.config.ceph_confval_mgr_python_path
for path in python_path.split(':'):
sys.path.insert(0, self._normalize_path(path))
module_path = self.env.config.ceph_confval_mgr_module_path
module_path = self._normalize_path(module_path)
sys.path.insert(0, module_path)
if not self._is_mgr_module(module_path, module):
raise self.error(f'module "{module}" not found under {module_path}')
fn = os.path.join(module_path, module, 'module.py')
if os.path.exists(fn):
self.env.note_dependency(fn)
os.environ['UNITTEST'] = 'true'
opts = self._collect_options_from_module(module)
CephOption.mgr_opts[module] = dict((opt['name'], opt) for opt in opts)
return CephOption.mgr_opts[module]
def _current_module(self) -> str:
return self.options.get('module',
self.env.ref_context.get('ceph:module'))
def _render_option(self, name) -> str:
cur_module = self._current_module()
if cur_module:
try:
opt = self._load_module(cur_module).get(name)
except Exception as e:
message = f'Unable to load module "{cur_module}": {e}'
raise self.error(message)
else:
opt = self._load_yaml().get(name)
if opt is None:
raise self.error(f'Option "{name}" not found!')
if cur_module and 'type' not in opt:
# the type of module option defaults to 'str'
opt['type'] = 'str'
desc = opt.get('fmt_desc') or opt.get('long_desc') or opt.get('desc')
opt_default = opt.get('default')
default = self.options.get('default', opt_default)
try:
return self.template.render(opt=opt,
desc=desc,
default=default)
except Exception as e:
message = (f'Unable to render option "{name}": {e}. ',
f'opt={opt}, desc={desc}, default={default}')
raise self.error(message)
def handle_signature(self,
sig: str,
signode: addnodes.desc_signature) -> str:
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub(' ', sig)
cur_module = self._current_module()
if cur_module:
return '/'.join(['mgr', cur_module, name])
else:
return name
def transform_content(self, contentnode: addnodes.desc_content) -> None:
name = self.arguments[0]
source, lineno = self.get_source_info()
source = f'{source}:{lineno}:<confval>'
fields = StringList(self._render_option(name).splitlines() + [''],
source=source, parent_offset=lineno)
with switch_source_input(self.state, fields):
self.state.nested_parse(fields, 0, contentnode)
def add_target_and_index(self,
name: str,
sig: str,
signode: addnodes.desc_signature) -> None:
node_id = make_id(self.env, self.state.document, self.objtype, name)
signode['ids'].append(node_id)
self.state.document.note_explicit_target(signode)
entry = f'{name}; configuration option'
self.indexnode['entries'].append(('pair', entry, node_id, '', None))
std = self.env.get_domain('std')
std.note_object(self.objtype, name, node_id, location=signode)
def _reset_ref_context(app, env, docname):
env.ref_context.pop('ceph:module', None)
def setup(app) -> Dict[str, Any]:
app.add_config_value('ceph_confval_imports',
default=[],
rebuild='html',
types=[str])
app.add_config_value('ceph_confval_mgr_module_path',
default=[],
rebuild='html',
types=[str])
app.add_config_value('ceph_confval_mgr_python_path',
default=[],
rebuild='',
types=[str])
app.add_object_type(
'confsec',
'confsec',
objname='configuration section',
indextemplate='pair: %s; configuration section',
doc_field_types=[
Field(
'example',
label=_('Example'),
has_arg=False,
)]
)
app.add_object_type(
'confval',
'confval',
objname='configuration option',
)
app.add_directive_to_domain('std', 'mgr_module', CephModule)
app.add_directive_to_domain('std', 'confval', CephOption, override=True)
app.connect('env-purge-doc', _reset_ref_context)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 15,171 | 31.982609 | 80 | py |
null | ceph-main/doc/_ext/ceph_releases.py | # cobbled together from:
# https://github.com/sphinx-contrib/documentedlist/blob/master/sphinxcontrib/documentedlist.py
# https://github.com/sphinx-doc/sphinx/blob/v1.6.3/sphinx/ext/graphviz.py
# https://github.com/thewtex/sphinx-contrib/blob/master/exceltable/sphinxcontrib/exceltable.py
# https://bitbucket.org/prometheus/sphinxcontrib-htsql/src/331a542c29a102eec9f8cba44797e53a49de2a49/sphinxcontrib/htsql.py?at=default&fileviewer=file-view-default
# into the glory that follows:
import json
import yaml
import jinja2
import sphinx
import datetime
from docutils.parsers.rst import Directive
from docutils import nodes
from sphinx.util import logging
logger = logging.getLogger(__name__)
class CephReleases(Directive):
has_content = False
required_arguments = 2
optional_arguments = 0
option_spec = {}
def run(self):
filename = self.arguments[0]
current = self.arguments[1] == 'current'
document = self.state.document
env = document.settings.env
rel_filename, filename = env.relfn2path(filename)
env.note_dependency(filename)
try:
with open(filename, 'r') as fp:
releases = yaml.safe_load(fp)
releases = releases["releases"]
except Exception as e:
return [document.reporter.warning(
"Failed to open Ceph releases file {}: {}".format(filename, e),
line=self.lineno)]
table = nodes.table()
tgroup = nodes.tgroup(cols=3)
table += tgroup
tgroup.extend(
nodes.colspec(colwidth=30, colname='c'+str(idx))
for idx, _ in enumerate(range(4)))
thead = nodes.thead()
tgroup += thead
row_node = nodes.row()
thead += row_node
row_node.extend(
nodes.entry(h, nodes.paragraph(text=h))
for h in ["Name", "Initial release", "Latest",
"End of life (estimated)" if current else "End of life"])
releases = releases.items()
releases = sorted(releases, key=lambda t: t[0], reverse=True)
tbody = nodes.tbody()
tgroup += tbody
rows = []
for code_name, info in releases:
actual_eol = info.get("actual_eol", None)
if current:
if actual_eol and actual_eol <= datetime.datetime.now().date():
continue
else:
if not actual_eol:
continue
trow = nodes.row()
entry = nodes.entry()
para = nodes.paragraph(text=f"`{code_name.title()} <{code_name}>`_")
sphinx.util.nodes.nested_parse_with_titles(
self.state, para, entry)
#entry += para
trow += entry
sorted_releases = sorted(info["releases"],
key=lambda t: [t["released"]] + list(map(lambda v: int(v), t["version"].split("."))))
oldest_release = sorted_releases[0]
newest_release = sorted_releases[-1]
entry = nodes.entry()
para = nodes.paragraph(text="{}".format(
oldest_release["released"]))
entry += para
trow += entry
entry = nodes.entry()
if newest_release.get("skip_ref", False):
para = nodes.paragraph(text="{}".format(
newest_release["version"]))
else:
para = nodes.paragraph(text="`{}`_".format(
newest_release["version"]))
sphinx.util.nodes.nested_parse_with_titles(
self.state, para, entry)
#entry += para
trow += entry
entry = nodes.entry()
if current:
para = nodes.paragraph(text=info.get("target_eol", '--'))
else:
para = nodes.paragraph(text=info.get('actual_eol', '--'))
entry += para
trow += entry
rows.append(trow)
tbody.extend(rows)
return [table]
RELEASES_TEMPLATE = '''
.. mermaid::
gantt
dateFormat YYYY-MM-DD
axisFormat %Y
section Active Releases
{% for release in active_releases %}
{{ release.code_name }} (latest {{ release.last_version }}): done, {{ release.debute_date }},{{ release.lifetime }}d
{% endfor %}
section Archived Releases
{% for release in archived_releases %}
{{ release.code_name }} (latest {{ release.last_version }}): done, {{ release.debute_date }},{{ release.lifetime }}d
{% endfor %}
'''
class ReleasesGantt(Directive):
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
template = jinja2.Environment().from_string(RELEASES_TEMPLATE)
def _render_time_line(self, filename):
try:
with open(filename) as f:
releases = yaml.safe_load(f)['releases']
except Exception as e:
message = f'Unable read release file: "{filename}": {e}'
self.error(message)
active_releases = []
archived_releases = []
# just update `releases` with extracted info
for code_name, info in releases.items():
last_release = info['releases'][0]
first_release = info['releases'][-1]
last_version = last_release['version']
debute_date = first_release['released']
if 'actual_eol' in info:
lifetime = info['actual_eol'] - first_release['released']
else:
lifetime = info['target_eol'] - first_release['released']
release = dict(code_name=code_name,
last_version=last_version,
debute_date=debute_date,
lifetime=lifetime.days)
if 'actual_eol' in info:
archived_releases.append(release)
else:
active_releases.append(release)
rendered = self.template.render(active_releases=active_releases,
archived_releases=archived_releases)
return rendered.splitlines()
def run(self):
filename = self.arguments[0]
document = self.state.document
env = document.settings.env
rel_filename, filename = env.relfn2path(filename)
env.note_dependency(filename)
lines = self._render_time_line(filename)
lineno = self.lineno - self.state_machine.input_offset - 1
source = self.state_machine.input_lines.source(lineno)
self.state_machine.insert_input(lines, source)
return []
class CephTimeline(Directive):
has_content = False
required_arguments = 3
optional_arguments = 0
option_spec = {}
def run(self):
filename = self.arguments[0]
document = self.state.document
env = document.settings.env
rel_filename, filename = env.relfn2path(filename)
env.note_dependency(filename)
try:
with open(filename, 'r') as fp:
releases = yaml.safe_load(fp)
except Exception as e:
return [document.reporter.warning(
"Failed to open Ceph releases file {}: {}".format(filename, e),
line=self.lineno)]
display_releases = self.arguments[1:]
timeline = []
for code_name, info in releases["releases"].items():
if code_name in display_releases:
for release in info.get("releases", []):
released = release["released"]
timeline.append((released, code_name, release["version"],
release.get("skip_ref", False)))
assert "development" not in releases["releases"]
if "development" in display_releases:
for release in releases["development"]["releases"]:
released = release["released"]
timeline.append((released, "development", release["version"],
release.get("skip_ref", False)))
timeline = sorted(timeline, key=lambda t: t[0], reverse=True)
table = nodes.table()
tgroup = nodes.tgroup(cols=3)
table += tgroup
columns = ["Date"] + display_releases
tgroup.extend(
nodes.colspec(colwidth=30, colname='c'+str(idx))
for idx, _ in enumerate(range(len(columns))))
thead = nodes.thead()
tgroup += thead
row_node = nodes.row()
thead += row_node
for col in columns:
entry = nodes.entry()
if col.lower() in ["date", "development"]:
para = nodes.paragraph(text=col.title())
else:
para = nodes.paragraph(text=f"`{col.title()} <{col}>`_".format(col))
sphinx.util.nodes.nested_parse_with_titles(
self.state, para, entry)
row_node += entry
tbody = nodes.tbody()
tgroup += tbody
rows = []
for row_info in timeline:
trow = nodes.row()
entry = nodes.entry()
para = nodes.paragraph(text=row_info[0])
entry += para
trow += entry
for release in display_releases:
entry = nodes.entry()
if row_info[1] == release:
if row_info[3]: # if skip ref
para = nodes.paragraph(text=row_info[2])
else:
para = nodes.paragraph(text="`{}`_".format(row_info[2]))
sphinx.util.nodes.nested_parse_with_titles(
self.state, para, entry)
else:
para = nodes.paragraph(text="--")
entry += para
trow += entry
rows.append(trow)
tbody.extend(rows)
return [table]
TIMELINE_TEMPLATE = '''
.. mermaid::
gantt
dateFormat YYYY-MM-DD
axisFormat %Y-%m
{% if title %}
title {{title}}
{% endif %}
{% for display_release in display_releases %}
section {{ display_release }}
{%if releases[display_release].actual_eol %}
End of life: crit, {{ releases[display_release].actual_eol }},4d
{% else %}
End of life (estimated): crit, {{ releases[display_release].target_eol }},4d
{% endif %}
{% for release in releases[display_release].releases | sort(attribute='released', reverse=True) %}
{{ release.version }}: milestone, done, {{ release.released }},0d
{% endfor %}
{% endfor %}
'''
class TimeLineGantt(Directive):
has_content = True
required_arguments = 2
optional_arguments = 0
final_argument_whitespace = True
template = jinja2.Environment().from_string(TIMELINE_TEMPLATE)
def _render_time_line(self, filename, display_releases):
try:
with open(filename) as f:
releases = yaml.safe_load(f)['releases']
except Exception as e:
message = f'Unable read release file: "{filename}": {e}'
self.error(message)
rendered = self.template.render(display_releases=display_releases,
releases=releases)
return rendered.splitlines()
def run(self):
filename = self.arguments[0]
display_releases = self.arguments[1].split()
document = self.state.document
env = document.settings.env
rel_filename, filename = env.relfn2path(filename)
env.note_dependency(filename)
lines = self._render_time_line(filename, display_releases)
lineno = self.lineno - self.state_machine.input_offset - 1
source = self.state_machine.input_lines.source(lineno)
self.state_machine.insert_input(lines, source)
return []
def setup(app):
app.add_directive('ceph_releases', CephReleases)
app.add_directive('ceph_releases_gantt', ReleasesGantt)
app.add_directive('ceph_timeline', CephTimeline)
app.add_directive('ceph_timeline_gantt', TimeLineGantt)
return {
'parallel_read_safe': True,
'parallel_write_safe': True
}
| 12,230 | 33.747159 | 164 | py |
null | ceph-main/doc/_static/css/custom.css | dt {
scroll-margin-top: 3em;
}
h2 {
scroll-margin-top: 4em;
}
h3 {
scroll-margin-top: 4em;
}
section {
scroll-margin-top: 4em;
}
span {
scroll-margin-top: 2em;
}
ul.simple > li > ul > li:last-child {
margin-block-end : 1em;
}
div.section > ul > li > p {
margin-block-start : 0.6em;
margin-block-end : 0.6em;
}
| 354 | 11.241379 | 37 | css |
null | ceph-main/doc/_templates/page.html | {% extends "!page.html" %}
{% block body %}
{%- if release == 'dev' %}
<div id="dev-warning" class="admonition note">
<p class="first admonition-title">Notice</p>
<p class="last">This document is for a development version of Ceph.</p>
</div>
{%- endif %}
{%- if is_release_eol %}
<div id="eol-warning" class="admonition warning">
<p class="first admonition-title">Warning</p>
<p class="last">This document is for an unsupported version of Ceph.</p>
</div>
{%- endif %}
{%- if not is_release_eol %}
<div id="docubetter" align="right" style="padding: 5px; font-weight: bold;">
<a href="https://pad.ceph.com/p/Report_Documentation_Bugs">Report a Documentation Bug</a>
</div>
{%- endif %}
{{ super() }}
{% endblock %}
| 736 | 27.346154 | 93 | html |
null | ceph-main/doc/_templates/smarttoc.html | {#
Sphinx sidebar template: smart table of contents.
Shows a sidebar ToC that gives you a more global view of the
documentation, and not the confusing cur/prev/next which is the
default sidebar.
The ToC will open and collapse automatically to show the part of the
hierarchy you are in. Top-level items will always be visible.
#}
<h3><a href="{{ pathto(master_doc) }}">{{ _('Table Of Contents') }}</a></h3>
{{ toctree(maxdepth=-1, includehidden=True) }}
<!-- ugly kludge to make genindex look like it's part of the toc -->
<ul style="margin-top: -10px"><li class="toctree-l1"><a class="reference internal" href="{{ pathto('genindex') }}">Index</a></li></ul>
| 685 | 39.352941 | 134 | html |
null | ceph-main/doc/_themes/ceph/layout.html | {# TEMPLATE VAR SETTINGS #}
{%- set url_root = pathto('', 1) %}
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
{%- if not embedded and docstitle %}
{%- set titlesuffix = " — "|safe + docstitle|e %}
{%- else %}
{%- set titlesuffix = "" %}
{%- endif %}
{%- set lang_attr = 'en' if language == None else (language | replace('_', '-')) %}
{%- set sphinx_writer = 'writer-html5' if html5_doctype else 'writer-html4' %}
<!DOCTYPE html>
<html class="{{ sphinx_writer }}" lang="{{ lang_attr }}" >
<head>
<meta charset="utf-8" />
{{ metatags }}
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
{% block htmltitle %}
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
{% endblock %}
{# CSS #}
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
<link rel="stylesheet" href="{{ pathto('_static/pygments.css', 1) }}" type="text/css" />
{%- for css in css_files %}
{%- if css|attr("rel") %}
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}" type="text/css"{% if css.title is not none %} title="{{ css.title }}"{% endif %} />
{%- else %}
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css" />
{%- endif %}
{%- endfor %}
{%- for cssfile in extra_css_files %}
<link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" />
{%- endfor %}
{# FAVICON #}
{% if favicon %}
<link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1) }}"/>
{% endif %}
{# CANONICAL URL (deprecated) #}
{% if theme_canonical_url and not pageurl %}
<link rel="canonical" href="{{ theme_canonical_url }}{{ pagename }}.html"/>
{% endif %}
{# CANONICAL URL #}
{%- if pageurl %}
<link rel="canonical" href="{{ pageurl|e }}" />
{%- endif %}
{# JAVASCRIPTS #}
{%- block scripts %}
<!--[if lt IE 9]>
<script src="{{ pathto('_static/js/html5shiv.min.js', 1) }}"></script>
<![endif]-->
{%- if not embedded %}
{# XXX Sphinx 1.8.0 made this an external js-file, quick fix until we refactor the template to inherit more blocks directly from sphinx #}
{% if sphinx_version >= "1.8.0" %}
<script type="text/javascript" id="documentation_options" data-url_root="{{ url_root }}" src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
{%- for scriptfile in script_files %}
{{ js_tag(scriptfile) }}
{%- endfor %}
{% else %}
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT:'{{ url_root }}',
VERSION:'{{ release|e }}',
LANGUAGE:'{{ language }}',
COLLAPSE_INDEX:false,
FILE_SUFFIX:'{{ '' if no_search_suffix else file_suffix }}',
HAS_SOURCE: {{ has_source|lower }},
SOURCELINK_SUFFIX: '{{ sourcelink_suffix }}'
};
</script>
{%- for scriptfile in script_files %}
<script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script>
{%- endfor %}
{% endif %}
<script type="text/javascript" src="{{ pathto('_static/js/theme.js', 1) }}"></script>
{# OPENSEARCH #}
{%- if use_opensearch %}
<link rel="search" type="application/opensearchdescription+xml"
title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}"
href="{{ pathto('_static/opensearch.xml', 1) }}"/>
{%- endif %}
{%- endif %}
{%- endblock %}
{%- block linktags %}
{%- if hasdoc('about') %}
<link rel="author" title="{{ _('About these documents') }}" href="{{ pathto('about') }}" />
{%- endif %}
{%- if hasdoc('genindex') %}
<link rel="index" title="{{ _('Index') }}" href="{{ pathto('genindex') }}" />
{%- endif %}
{%- if hasdoc('search') %}
<link rel="search" title="{{ _('Search') }}" href="{{ pathto('search') }}" />
{%- endif %}
{%- if hasdoc('copyright') %}
<link rel="copyright" title="{{ _('Copyright') }}" href="{{ pathto('copyright') }}" />
{%- endif %}
{%- if next %}
<link rel="next" title="{{ next.title|striptags|e }}" href="{{ next.link|e }}" />
{%- endif %}
{%- if prev %}
<link rel="prev" title="{{ prev.title|striptags|e }}" href="{{ prev.link|e }}" />
{%- endif %}
{%- endblock %}
{%- block extrahead %} {% endblock %}
</head>
<body class="wy-body-for-nav">
{% block extrabody %} {% endblock %}
<header class="top-bar">
{% include "breadcrumbs.html" %}
</header>
<div class="wy-grid-for-nav">
{# SIDE NAV, TOGGLES ON MOBILE #}
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
<div class="wy-side-scroll">
<div class="wy-side-nav-search" {% if theme_style_nav_header_background %} style="background: {{theme_style_nav_header_background}}" {% endif %}>
{% block sidebartitle %}
{% if logo and theme_logo_only %}
<a href="{{ pathto(master_doc) }}">
{% else %}
<a href="{{ pathto(master_doc) }}" class="icon icon-home"> {{ project }}
{% endif %}
{% if logo %}
{# Not strictly valid HTML, but it's the only way to display/scale
it properly, without weird scripting or heaps of work
#}
<img src="{{ pathto('_static/' + logo, 1) }}" class="logo" alt="{{ _('Logo') }}"/>
{% endif %}
</a>
{% if theme_display_version %}
{%- set nav_version = version %}
{% if READTHEDOCS and current_version %}
{%- set nav_version = current_version %}
{% endif %}
{% if nav_version %}
<div class="version">
{{ nav_version }}
</div>
{% endif %}
{% endif %}
{% include "searchbox.html" %}
{% endblock %}
</div>
{% block navigation %}
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
{% block menu %}
{#
The singlehtml builder doesn't handle this toctree call when the
toctree is empty. Skip building this for now.
#}
{% if 'singlehtml' not in builder %}
{% set global_toc = toctree(maxdepth=theme_navigation_depth|int,
collapse=theme_collapse_navigation|tobool,
includehidden=theme_includehidden|tobool,
titles_only=theme_titles_only|tobool) %}
{% endif %}
{% if global_toc %}
{{ global_toc }}
{% else %}
<!-- Local TOC -->
<div class="local-toc">{{ toc }}</div>
{% endif %}
{% endblock %}
</div>
{% endblock %}
</div>
</nav>
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
{# MOBILE NAV, TRIGGLES SIDE NAV ON TOGGLE #}
<nav class="wy-nav-top" aria-label="top navigation">
{% block mobile_nav %}
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
<a href="{{ pathto(master_doc) }}">{{ project }}</a>
{% endblock %}
</nav>
<div class="wy-nav-content">
{%- block content %}
{% if theme_style_external_links|tobool %}
<div class="rst-content style-external-links">
{% else %}
<div class="rst-content">
{% endif %}
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
{%- block document %}
<div itemprop="articleBody">
{% block body %}{% endblock %}
</div>
{% if self.comments()|trim %}
<div class="articleComments">
{% block comments %}{% endblock %}
</div>
{% endif%}
</div>
{%- endblock %}
{% include "footer.html" %}
</div>
{%- endblock %}
</div>
</section>
</div>
{% include "versions.html" %}
<script type="text/javascript">
jQuery(function () {
SphinxRtdTheme.Navigation.enable({{ 'true' if theme_sticky_navigation|tobool else 'false' }});
});
</script>
{# Do not conflict with RTD insertion of analytics script #}
{% if not READTHEDOCS %}
{% if theme_analytics_id %}
<!-- Theme Analytics -->
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', '{{ theme_analytics_id }}', 'auto');
{% if theme_analytics_anonymize_ip|tobool %}
ga('set', 'anonymizeIp', true);
{% endif %}
ga('send', 'pageview');
</script>
{% endif %}
{% endif %}
{%- block footer %} {% endblock %}
</body>
</html>
| 9,080 | 34.893281 | 162 | html |
null | ceph-main/doc/releases/releases.yml | #
# there are two sections
#
# releases: ... for named releases
# development: ... for dev releases
#
# by default a `version` is interpreted as a sphinx reference when rendered (see
# schedule.rst for the existing tags such as `_13.2.2`). If a version should not
# be treated as a reference, add `skip_ref: True` as a key/value pair sibling to
# the `version` key.
#
# If a version might represent an actual number (e.g. 0.80) quote it.
#
releases:
quincy:
target_eol: 2024-06-01
releases:
- version: 17.2.6
released: 2023-04-10
- version: 17.2.5
released: 2022-10-19
- version: 17.2.4
released: 2022-09-30
- version: 17.2.3
released: 2022-07-29
- version: 17.2.2
released: 2022-07-21
- version: 17.2.1
released: 2022-06-23
- version: 17.2.0
released: 2022-04-19
pacific:
target_eol: 2023-06-01
releases:
- version: 16.2.13
released: 2023-05-09
- version: 16.2.12
released: 2023-04-14
- version: 16.2.11
released: 2023-01-26
- version: 16.2.10
released: 2022-07-21
- version: 16.2.9
released: 2022-05-19
- version: 16.2.8
released: 2022-05-16
- version: 16.2.7
released: 2021-12-14
- version: 16.2.6
released: 2021-09-16
- version: 16.2.5
released: 2021-07-08
- version: 16.2.4
released: 2021-05-13
- version: 16.2.3
released: 2021-05-06
- version: 16.2.2
released: 2021-05-05
- version: 16.2.1
released: 2021-04-19
- version: 16.2.0
released: 2021-03-31
octopus:
target_eol: 2022-06-01
actual_eol: 2022-08-09
releases:
- version: 15.2.17
released: 2022-08-09
- version: 15.2.16
released: 2022-03-01
- version: 15.2.15
released: 2021-10-20
- version: 15.2.14
released: 2021-08-05
- version: 15.2.13
released: 2021-05-26
- version: 15.2.12
released: 2021-05-13
- version: 15.2.11
released: 2021-04-19
- version: 15.2.10
released: 2021-03-18
- version: 15.2.9
released: 2021-02-23
- version: 15.2.8
released: 2020-12-16
- version: 15.2.7
released: 2020-11-30
- version: 15.2.6
released: 2020-11-18
- version: 15.2.5
released: 2020-09-16
- version: 15.2.4
released: 2020-06-30
- version: 15.2.3
released: 2020-05-29
- version: 15.2.2
released: 2020-05-18
- version: 15.2.1
released: 2020-04-09
- version: 15.2.0
released: 2020-03-23
nautilus:
target_eol: 2021-06-01
actual_eol: 2021-06-30
releases:
- version: 14.2.22
released: 2021-06-30
- version: 14.2.21
released: 2021-05-13
- version: 14.2.20
released: 2021-04-19
- version: 14.2.19
released: 2021-03-30
- version: 14.2.18
released: 2021-03-15
- version: 14.2.17
released: 2021-03-11
- version: 14.2.16
released: 2020-12-16
- version: 14.2.15
released: 2020-11-23
- version: 14.2.14
released: 2020-11-18
- version: 14.2.13
released: 2020-11-02
- version: 14.2.12
released: 2020-09-21
- version: 14.2.11
released: 2020-08-11
- version: 14.2.10
released: 2020-06-26
- version: 14.2.9
released: 2020-04-15
- version: 14.2.8
released: 2020-03-03
- version: 14.2.7
released: 2020-01-31
- version: 14.2.6
released: 2020-01-09
- version: 14.2.5
released: 2019-12-10
- version: 14.2.4
released: 2019-09-17
- version: 14.2.3
released: 2019-09-04
- version: 14.2.2
released: 2019-07-17
- version: 14.2.1
released: 2019-04-29
- version: 14.2.0
released: 2019-03-19
mimic:
target_eol: 2020-06-01
actual_eol: 2020-07-22
releases:
- version: 13.2.10
released: 2020-04-23
- version: 13.2.9
released: 2020-04-16
- version: 13.2.8
released: 2019-12-13
- version: 13.2.7
released: 2019-11-25
- version: 13.2.6
released: 2019-06-04
- version: 13.2.5
released: 2019-03-13
- version: 13.2.4
released: 2019-01-07
- version: 13.2.3
released: 2019-01-07
- version: 13.2.2
released: 2018-09-01
- version: 13.2.1
released: 2018-07-01
- version: 13.2.0
released: 2018-06-01
luminous:
target_eol: 2019-06-01
actual_eol: 2020-03-01
releases:
- version: 12.2.13
released: 2020-01-31
- version: 12.2.12
released: 2019-04-12
- version: 12.2.11
released: 2019-01-31
- version: 12.2.10
released: 2018-11-27
- version: 12.2.9
released: 2018-11-01
- version: 12.2.8
released: 2018-09-01
- version: 12.2.7
released: 2018-07-01
- version: 12.2.6
released: 2018-07-01
- version: 12.2.5
released: 2018-04-01
- version: 12.2.4
released: 2018-02-01
- version: 12.2.3
released: 2018-02-01
- version: 12.2.2
released: 2017-12-01
- version: 12.2.1
released: 2017-09-01
- version: 12.2.0
released: 2017-08-01
kraken:
target_eol: 2017-08-01
actual_eol: 2017-08-01
releases:
- version: 11.2.1
released: 2017-08-01
- version: 11.2.0
released: 2017-01-01
jewel:
target_eol: 2018-06-01
actual_eol: 2018-07-01
releases:
- version: 10.2.11
released: 2018-07-11
- version: 10.2.10
released: 2017-10-01
- version: 10.2.9
released: 2017-07-01
- version: 10.2.8
released: 2017-07-01
- version: 10.2.7
released: 2017-04-01
- version: 10.2.6
released: 2017-03-01
- version: 10.2.5
released: 2016-12-01
- version: 10.2.4
released: 2016-12-01
- version: 10.2.3
released: 2016-09-01
- version: 10.2.2
released: 2016-06-01
- version: 10.2.1
released: 2016-05-01
- version: 10.2.0
released: 2016-04-01
infernalis:
actual_eol: 2016-04-01
releases:
- version: 9.2.1
released: 2016-02-01
- version: 9.2.0
released: 2015-11-01
hammer:
target_eol: 2017-05-01
actual_eol: 2017-08-01
releases:
- version: 0.94.10
released: 2017-02-01
- version: 0.94.9
released: 2016-08-01
- version: 0.94.8
released: 2016-08-01
- version: 0.94.7
released: 2016-05-01
- version: 0.94.6
released: 2016-02-01
- version: 0.94.5
released: 2015-10-01
- version: 0.94.4
released: 2015-10-01
- version: 0.94.3
released: 2015-08-01
- version: 0.94.2
released: 2015-06-01
- version: 0.94.1
released: 2015-04-01
- version: '0.94'
released: 2015-04-01
giant:
actual_eol: 2015-04-01
releases:
- version: 0.87.2
released: 2015-04-01
- version: 0.87.1
released: 2015-02-01
- version: '0.87'
released: 2014-10-01
firefly:
target_eol: 2016-01-01
actual_eol: 2016-04-01
releases:
- version: 0.80.11
released: 2015-11-01
- version: 0.80.10
released: 2015-07-01
- version: 0.80.9
released: 2015-03-01
- version: 0.80.8
released: 2015-01-01
- version: 0.80.7
released: 2014-10-01
- version: 0.80.6
released: 2014-10-01
- version: 0.80.5
released: 2014-07-01
- version: 0.80.4
released: 2014-07-01
- version: 0.80.3
released: 2014-07-01
- version: 0.80.2
released: 2014-07-01
- version: 0.80.1
released: 2014-05-01
- version: '0.80'
released: 2014-05-01
emperor:
actual_eol: 2014-05-01
releases:
- version: 0.72.2
released: 2013-12-01
- version: 0.72.1
released: 2013-11-01
- version: '0.72'
released: 2013-11-01
dumpling:
target_eol: 2015-03-01
actual_eol: 2015-05-01
releases:
- version: 0.67.11
released: 2014-09-01
- version: 0.67.10
released: 2014-08-01
- version: 0.67.9
released: 2014-05-01
- version: 0.67.8
released: 2014-05-01
- version: 0.67.7
released: 2014-02-01
- version: 0.67.6
released: 2014-02-01
- version: 0.67.5
released: 2013-12-01
- version: 0.67.4
released: 2013-10-01
- version: 0.67.3
released: 2013-09-01
- version: 0.67.2
released: 2013-08-01
- version: 0.67.1
released: 2013-08-01
- version: '0.67'
released: 2013-08-01
development:
releases:
- version: 15.1.1
released: 2020-03-13
skip_ref: true
- version: 15.1.0
released: 2020-01-29
skip_ref: true
- version: 15.0.0
released: 2019-04-03
skip_ref: true
- version: 14.1.1
released: 2019-03-11
skip_ref: true
- version: 14.1.0
released: 2019-02-22
skip_ref: true
- version: 14.0.1
released: 2018-11-05
skip_ref: true
- version: 14.0.0
released: 2018-05-06
skip_ref: true
- version: 13.1.0
released: 2018-05-01
skip_ref: true
- version: 13.0.2
released: 2018-04-01
skip_ref: true
- version: 13.0.1
released: 2018-02-01
skip_ref: true
- version: 13.0.0
released: 2017-08-01
skip_ref: true
- version: 12.1.4
released: 2017-08-01
skip_ref: true
- version: 12.1.3
released: 2017-08-01
skip_ref: true
- version: 12.1.2
released: 2017-08-01
skip_ref: true
- version: 12.1.1
released: 2017-07-01
skip_ref: true
- version: 12.1.0
released: 2017-06-01
skip_ref: true
- version: 12.0.3
released: 2017-05-01
skip_ref: true
- version: 12.0.2
released: 2017-04-01
skip_ref: true
- version: 12.0.1
released: 2017-03-01
skip_ref: true
- version: 12.0.0
released: 2017-02-01
skip_ref: true
- version: 11.1.1
released: 2017-01-01
skip_ref: true
- version: 11.1.0
released: 2016-12-01
skip_ref: true
- version: 11.0.2
released: 2016-10-01
- version: 11.0.1
released: 2016-10-01
skip_ref: true
- version: 11.0.0
released: 2016-06-01
skip_ref: true
- version: 10.1.2
released: 2016-04-01
- version: 10.1.1
released: 2016-04-01
- version: 10.1.0
released: 2016-03-01
- version: 10.0.5
released: 2016-03-01
- version: 10.0.4
released: 2016-03-01
skip_ref: true
- version: 10.0.3
released: 2016-02-01
- version: 10.0.2
released: 2016-01-01
- version: 10.0.1
released: 2015-12-01
- version: 10.0.0
released: 2015-11-01
- version: 9.1.0
released: 2015-10-01
- version: 9.0.3
released: 2015-08-01
- version: 9.0.2
released: 2015-07-01
- version: 9.0.1
released: 2015-06-01
- version: 9.0.0
released: 2015-05-01
- version: '0.93'
released: 2015-02-01
- version: '0.92'
released: 2015-02-01
- version: '0.91'
released: 2015-01-01
- version: '0.90'
released: 2014-12-01
- version: '0.89'
released: 2014-12-01
- version: '0.88'
released: 2014-11-01
- version: '0.86'
released: 2014-10-01
- version: '0.85'
released: 2014-09-01
- version: '0.84'
released: 2014-08-01
- version: '0.83'
released: 2014-07-01
- version: '0.82'
released: 2014-06-01
- version: '0.81'
released: 2014-06-01
- version: '0.79'
released: 2014-04-01
- version: '0.78'
released: 2014-03-01
- version: '0.77'
released: 2014-02-01
- version: '0.76'
released: 2014-01-01
- version: '0.75'
released: 2014-01-01
- version: '0.74'
released: 2013-12-01
- version: '0.73'
released: 2013-12-01
| 12,490 | 23.444227 | 80 | yml |
null | ceph-main/doc/scripts/README.md | Script Usage
============
Peering State Model: gen_state_diagram.py
------------------------------------------
$ git clone https://github.com/ceph/ceph.git
$ cd ceph
$ cat src/osd/PeeringState.h src/osd/PeeringState.cc | doc/scripts/gen_state_diagram.py > doc/dev/peering_graph.generated.dot
$ sed -i 's/7,7/1080,1080/' doc/dev/peering_graph.generated.dot
$ dot -Tsvg doc/dev/peering_graph.generated.dot > doc/dev/peering_graph.generated.svg
| 464 | 37.75 | 129 | md |
null | ceph-main/doc/scripts/gen_state_diagram.py | #!/usr/bin/python3
import itertools
import re
import sys
def do_filter(generator):
return acc_lines(remove_multiline_comments(to_char(remove_single_line_comments(generator))))
def acc_lines(generator):
current = ""
for i in generator:
current += i
if i == ';' or \
i == '{' or \
i == '}':
yield current.lstrip("\n")
current = ""
def to_char(generator):
for line in generator:
for char in line:
if char != '\n':
yield char
else:
yield ' '
def remove_single_line_comments(generator):
for i in generator:
if len(i) and i[0] == '#':
continue
yield re.sub(r'//.*', '', i)
def remove_multiline_comments(generator):
saw = ""
in_comment = False
for char in generator:
if in_comment:
if saw == "*":
if char == "/":
in_comment = False
saw = ""
if char == "*":
saw = "*"
continue
if saw == "/":
if char == '*':
in_comment = True
saw = ""
continue
else:
yield saw
saw = ""
if char == '/':
saw = "/"
continue
yield char
class StateMachineRenderer(object):
def __init__(self):
self.states = {} # state -> parent
self.machines = {} # state-> initial
self.edges = {} # event -> [(state, state)]
self.context = [] # [(context, depth_encountered)]
self.context_depth = 0
self.state_contents = {}
self.subgraphnum = 0
self.clusterlabel = {}
self.color_palette = itertools.cycle([
"#000000", # black
"#1e90ff", # dodgerblue
"#ff0000", # red
"#0000ff", # blue
"#ffa500", # orange
"#40e0d0", # turquoise
"#c71585", # mediumvioletred
])
def __str__(self):
return f'''-------------------
states: {self.states}
machines: {self.machines}
edges: {self.edges}
context: {self.context}
state_contents: {self.state_contents}
--------------------'''
def read_input(self, input_lines):
previous_line = None
for line in input_lines:
self.get_state(line)
self.get_event(line)
# pass two lines at a time to get the context so that regexes can
# match on split signatures
self.get_context(line, previous_line)
previous_line = line
def get_context(self, line, previous_line):
match = re.search(r"(\w+::)*::(?P<tag>\w+)::\w+\(const (?P<event>\w+)", line)
if match is None and previous_line is not None:
# it is possible that we need to match on the previous line as well, so join
# them to make them one line and try and get this matching
joined_line = ' '.join([previous_line, line])
match = re.search(r"(\w+::)*::(?P<tag>\w+)::\w+\(\s*const (?P<event>\w+)", joined_line)
if match is not None:
self.context.append((match.group('tag'), self.context_depth, match.group('event')))
if '{' in line:
self.context_depth += 1
if '}' in line:
self.context_depth -= 1
while len(self.context) and self.context[-1][1] == self.context_depth:
self.context.pop()
def get_state(self, line):
if "boost::statechart::state_machine" in line:
tokens = re.search(
r"boost::statechart::state_machine<\s*(\w*),\s*(\w*)\s*>",
line)
if tokens is None:
raise Exception("Error: malformed state_machine line: " + line)
self.machines[tokens.group(1)] = tokens.group(2)
self.context.append((tokens.group(1), self.context_depth, ""))
return
if "boost::statechart::state" in line:
tokens = re.search(
r"boost::statechart::state<\s*(\w*),\s*(\w*)\s*,?\s*(\w*)\s*>",
line)
if tokens is None:
raise Exception("Error: malformed state line: " + line)
self.states[tokens.group(1)] = tokens.group(2)
if tokens.group(2) not in self.state_contents.keys():
self.state_contents[tokens.group(2)] = []
self.state_contents[tokens.group(2)].append(tokens.group(1))
if tokens.group(3):
self.machines[tokens.group(1)] = tokens.group(3)
self.context.append((tokens.group(1), self.context_depth, ""))
return
def get_event(self, line):
if "boost::statechart::transition" in line:
for i in re.finditer(r'boost::statechart::transition<\s*([\w:]*)\s*,\s*(\w*)\s*>',
line):
if i.group(1) not in self.edges.keys():
self.edges[i.group(1)] = []
if not self.context:
raise Exception("no context at line: " + line)
self.edges[i.group(1)].append((self.context[-1][0], i.group(2)))
i = re.search("return\s+transit<\s*(\w*)\s*>()", line)
if i is not None:
if not self.context:
raise Exception("no context at line: " + line)
if not self.context[-1][2]:
raise Exception("no event in context at line: " + line)
if self.context[-1][2] not in self.edges.keys():
self.edges[self.context[-1][2]] = []
self.edges[self.context[-1][2]].append((self.context[-1][0], i.group(1)))
def emit_dot(self, output):
top_level = []
for state in self.machines.keys():
if state not in self.states.keys():
top_level.append(state)
print('Top Level States: ', top_level, file=sys.stderr)
print('digraph G {', file=output)
print('\tsize="7,7"', file=output)
print('\tcompound=true;', file=output)
for i in self.emit_state(top_level[0]):
print('\t' + i, file=output)
for i in self.edges.keys():
for j in self.emit_event(i):
print(j, file=output)
print('}', file=output)
def emit_state(self, state):
if state in self.state_contents.keys():
self.clusterlabel[state] = "cluster%s" % (str(self.subgraphnum),)
yield "subgraph cluster%s {" % (str(self.subgraphnum),)
self.subgraphnum += 1
yield """\tlabel = "%s";""" % (state,)
yield """\tcolor = "black";"""
if state in self.machines.values():
yield """\tstyle = "filled";"""
yield """\tfillcolor = "lightgrey";"""
for j in self.state_contents[state]:
for i in self.emit_state(j):
yield "\t"+i
yield "}"
else:
found = False
for (k, v) in self.machines.items():
if v == state:
yield state+"[shape=Mdiamond style=filled fillcolor=lightgrey];"
found = True
break
if not found:
yield state+";"
def emit_event(self, event):
def append(app):
retval = "["
for i in app:
retval += (i + ",")
retval += "]"
return retval
for (fro, to) in self.edges[event]:
color = next(self.color_palette)
appendix = ['label="%s"' % (event,),
'color="%s"' % (color,),
'fontcolor="%s"' % (color,)]
if fro in self.machines.keys():
appendix.append("ltail=%s" % (self.clusterlabel[fro],))
while fro in self.machines.keys():
fro = self.machines[fro]
if to in self.machines.keys():
appendix.append("lhead=%s" % (self.clusterlabel[to],))
while to in self.machines.keys():
to = self.machines[to]
yield("%s -> %s %s;" % (fro, to, append(appendix)))
if __name__ == '__main__':
INPUT_GENERATOR = do_filter(line for line in sys.stdin)
RENDERER = StateMachineRenderer()
RENDERER.read_input(INPUT_GENERATOR)
RENDERER.emit_dot(output=sys.stdout)
| 8,474 | 33.876543 | 99 | py |
null | ceph-main/examples/librados/hello_radosstriper.cc | #include "rados/librados.hpp"
#include "radosstriper/libradosstriper.hpp"
#include <iostream>
#include <string>
int main(int argc, char* argv[])
{
if(argc != 6)
{
std::cout <<"Please put in correct params\n"<<
"Stripe Count:\n"<<
"Object Size:\n" <<
"File Name:\n" <<
"Object Name:\n"
"Pool Name:"<< std::endl;
return EXIT_FAILURE;
}
uint32_t strip_count = std::stoi(argv[1]);
uint32_t obj_size = std::stoi(argv[2]);
std::string fname = argv[3];
std::string obj_name = argv[4];
std::string pool_name = argv[5];
int ret = 0;
librados::IoCtx io_ctx;
librados::Rados cluster;
libradosstriper::RadosStriper* rs = new libradosstriper::RadosStriper;
// make sure the keyring file is in /etc/ceph/ and is world readable
ret = cluster.init2("client.admin","ceph",0);
if( ret < 0)
{
std::cerr << "Couldn't init cluster "<< ret << std::endl;
}
// make sure ceph.conf is in /etc/ceph/ and is world readable
ret = cluster.conf_read_file("ceph.conf");
if( ret < 0)
{
std::cerr << "Couldn't read conf file "<< ret << std::endl;
}
ret = cluster.connect();
if(ret < 0)
{
std::cerr << "Couldn't connect to cluster "<< ret << std::endl;
}
else
{
std::cout << "Connected to Cluster"<< std::endl;
}
ret = cluster.ioctx_create(pool_name.c_str(), io_ctx);
if(ret < 0)
{
std::cerr << "Couldn't Create IO_CTX"<< ret << std::endl;
}
ret = libradosstriper::RadosStriper::striper_create(io_ctx,rs);
if(ret < 0)
{
std::cerr << "Couldn't Create RadosStriper"<< ret << std::endl;
delete rs;
}
uint64_t alignment = 0;
ret = io_ctx.pool_required_alignment2(&alignment);
if(ret < 0)
{
std::cerr << "IO_CTX didn't give alignment "<< ret
<< "\n Is this an erasure coded pool? "<< std::endl;
delete rs;
io_ctx.close();
cluster.shutdown();
return EXIT_FAILURE;
}
std::cout << "Pool alignment: "<< alignment << std::endl;
rs->set_object_layout_stripe_unit(alignment);
// how many objects are we striping across?
rs->set_object_layout_stripe_count(strip_count);
// how big should each object be?
rs->set_object_layout_object_size(obj_size);
std::string err = "no_err";
librados::bufferlist bl;
bl.read_file(fname.c_str(),&err);
if(err != "no_err")
{
std::cout << "Error reading file into bufferlist: "<< err << std::endl;
delete rs;
io_ctx.close();
cluster.shutdown();
return EXIT_FAILURE;
}
std::cout << "Writing: " << fname << "\nas: "<< obj_name << std::endl;
rs->write_full(obj_name,bl);
std::cout << "done with: " << fname << std::endl;
delete rs;
io_ctx.close();
cluster.shutdown();
}
| 2,756 | 25.76699 | 77 | cc |
null | ceph-main/examples/librados/hello_world.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
* Copyright 2013 Inktank
*/
// install the librados-dev package to get this
#include <rados/librados.hpp>
#include <iostream>
#include <string>
int main(int argc, const char **argv)
{
int ret = 0;
// we will use all of these below
const char *pool_name = "hello_world_pool";
std::string hello("hello world!");
std::string object_name("hello_object");
librados::IoCtx io_ctx;
// first, we create a Rados object and initialize it
librados::Rados rados;
{
ret = rados.init("admin"); // just use the client.admin keyring
if (ret < 0) { // let's handle any error that might have come back
std::cerr << "couldn't initialize rados! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we just set up a rados cluster object" << std::endl;
}
/*
* Now we need to get the rados object its config info. It can
* parse argv for us to find the id, monitors, etc, so let's just
* use that.
*/
{
ret = rados.conf_parse_argv(argc, argv);
if (ret < 0) {
// This really can't happen, but we need to check to be a good citizen.
std::cerr << "failed to parse config options! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we just parsed our config options" << std::endl;
// We also want to apply the config file if the user specified
// one, and conf_parse_argv won't do that for us.
for (int i = 0; i < argc; ++i) {
if ((strcmp(argv[i], "-c") == 0) || (strcmp(argv[i], "--conf") == 0)) {
ret = rados.conf_read_file(argv[i+1]);
if (ret < 0) {
// This could fail if the config file is malformed, but it'd be hard.
std::cerr << "failed to parse config file " << argv[i+1]
<< "! error" << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
break;
}
}
}
/*
* next, we actually connect to the cluster
*/
{
ret = rados.connect();
if (ret < 0) {
std::cerr << "couldn't connect to cluster! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we just connected to the rados cluster" << std::endl;
}
/*
* let's create our own pool instead of scribbling over real data.
* Note that this command creates pools with default PG counts specified
* by the monitors, which may not be appropriate for real use -- it's fine
* for testing, though.
*/
{
ret = rados.pool_create(pool_name);
if (ret < 0) {
std::cerr << "couldn't create pool! error " << ret << std::endl;
return EXIT_FAILURE;
}
std::cout << "we just created a new pool named " << pool_name << std::endl;
}
/*
* create an "IoCtx" which is used to do IO to a pool
*/
{
ret = rados.ioctx_create(pool_name, io_ctx);
if (ret < 0) {
std::cerr << "couldn't set up ioctx! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we just created an ioctx for our pool" << std::endl;
}
/*
* now let's do some IO to the pool! We'll write "hello world!" to a
* new object.
*/
{
/*
* "bufferlist"s are Ceph's native transfer type, and are carefully
* designed to be efficient about copying. You can fill them
* up from a lot of different data types, but strings or c strings
* are often convenient. Just make sure not to deallocate the memory
* until the bufferlist goes out of scope and any requests using it
* have been finished!
*/
librados::bufferlist bl;
bl.append(hello);
/*
* now that we have the data to write, let's send it to an object.
* We'll use the synchronous interface for simplicity.
*/
ret = io_ctx.write_full(object_name, bl);
if (ret < 0) {
std::cerr << "couldn't write object! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we just wrote new object " << object_name
<< ", with contents\n" << hello << std::endl;
}
/*
* now let's read that object back! Just for fun, we'll do it using
* async IO instead of synchronous. (This would be more useful if we
* wanted to send off multiple reads at once; see
* http://docs.ceph.com/docs/master/rados/api/librados/#asychronous-io )
*/
{
librados::bufferlist read_buf;
int read_len = 4194304; // this is way more than we need
// allocate the completion from librados
librados::AioCompletion *read_completion = librados::Rados::aio_create_completion();
// send off the request.
ret = io_ctx.aio_read(object_name, read_completion, &read_buf, read_len, 0);
if (ret < 0) {
std::cerr << "couldn't start read object! error " << ret << std::endl;
ret = EXIT_FAILURE;
read_completion->release();
goto out;
}
// wait for the request to complete, and check that it succeeded.
read_completion->wait_for_complete();
ret = read_completion->get_return_value();
if (ret < 0) {
std::cerr << "couldn't read object! error " << ret << std::endl;
ret = EXIT_FAILURE;
read_completion->release();
goto out;
}
std::cout << "we read our object " << object_name
<< ", and got back " << ret << " bytes with contents\n";
std::string read_string;
read_buf.begin().copy(ret, read_string);
std::cout << read_string << std::endl;
read_completion->release();
}
/*
* We can also use xattrs that go alongside the object.
*/
{
librados::bufferlist version_bl;
version_bl.append('1');
ret = io_ctx.setxattr(object_name, "version", version_bl);
if (ret < 0) {
std::cerr << "failed to set xattr version entry! error "
<< ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we set the xattr 'version' on our object!" << std::endl;
}
/*
* And if we want to be really cool, we can do multiple things in a single
* atomic operation. For instance, we can update the contents of our object
* and set the version at the same time.
*/
{
librados::bufferlist bl;
bl.append(hello);
bl.append("v2");
librados::ObjectWriteOperation write_op;
write_op.write_full(bl);
librados::bufferlist version_bl;
version_bl.append('2');
write_op.setxattr("version", version_bl);
ret = io_ctx.operate(object_name, &write_op);
if (ret < 0) {
std::cerr << "failed to do compound write! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we overwrote our object " << object_name
<< " with contents\n" << bl.c_str() << std::endl;
}
/*
* And to be even cooler, we can make sure that the object looks the
* way we expect before doing the write! Notice how this attempt fails
* because the xattr differs.
*/
{
librados::ObjectWriteOperation failed_write_op;
librados::bufferlist bl;
bl.append(hello);
bl.append("v2");
librados::ObjectWriteOperation write_op;
write_op.write_full(bl);
librados::bufferlist version_bl;
version_bl.append('2');
librados::bufferlist old_version_bl;
old_version_bl.append('1');
failed_write_op.cmpxattr("version", LIBRADOS_CMPXATTR_OP_EQ, old_version_bl);
failed_write_op.write_full(bl);
failed_write_op.setxattr("version", version_bl);
ret = io_ctx.operate(object_name, &failed_write_op);
if (ret < 0) {
std::cout << "we just failed a write because the xattr wasn't as specified"
<< std::endl;
} else {
std::cerr << "we succeeded on writing despite an xattr comparison mismatch!"
<< std::endl;
ret = EXIT_FAILURE;
goto out;
}
/*
* Now let's do the update with the correct xattr values so it
* actually goes through
*/
bl.clear();
bl.append(hello);
bl.append("v3");
old_version_bl.clear();
old_version_bl.append('2');
version_bl.clear();
version_bl.append('3');
librados::ObjectWriteOperation update_op;
update_op.cmpxattr("version", LIBRADOS_CMPXATTR_OP_EQ, old_version_bl);
update_op.write_full(bl);
update_op.setxattr("version", version_bl);
ret = io_ctx.operate(object_name, &update_op);
if (ret < 0) {
std::cerr << "failed to do a compound write update! error " << ret
<< std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we overwrote our object " << object_name
<< " following an xattr test with contents\n" << bl.c_str()
<< std::endl;
}
ret = EXIT_SUCCESS;
out:
/*
* And now we're done, so let's remove our pool and then
* shut down the connection gracefully.
*/
int delete_ret = rados.pool_delete(pool_name);
if (delete_ret < 0) {
// be careful not to
std::cerr << "We failed to delete our test pool!" << std::endl;
ret = EXIT_FAILURE;
}
rados.shutdown();
return ret;
}
| 9,293 | 30.720137 | 88 | cc |
null | ceph-main/examples/librados/hello_world_c.c | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
* Copyright 2013 Inktank
*/
// install the librados-dev package to get this
#include <rados/librados.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, const char **argv)
{
int ret = 0;
// we will use all of these below
const char *pool_name = "hello_world_pool";
const char* hello = "hello world!";
const char* object_name = "hello_object";
rados_ioctx_t io_ctx = NULL;
int pool_created = 0;
// first, we create a Rados object and initialize it
rados_t rados = NULL;
{
ret = rados_create(&rados, "admin"); // just use the client.admin keyring
if (ret < 0) { // let's handle any error that might have come back
printf("couldn't initialize rados! error %d\n", ret);
ret = EXIT_FAILURE;
goto out;
}
printf("we just set up a rados cluster object\n");
}
/*
* Now we need to get the rados object its config info. It can
* parse argv for us to find the id, monitors, etc, so let's just
* use that.
*/
{
ret = rados_conf_parse_argv(rados, argc, argv);
if (ret < 0) {
// This really can't happen, but we need to check to be a good citizen.
printf("failed to parse config options! error %d\n", ret);
ret = EXIT_FAILURE;
goto out;
}
printf("we just parsed our config options\n");
// We also want to apply the config file if the user specified
// one, and conf_parse_argv won't do that for us.
int i;
for (i = 0; i < argc; ++i) {
if ((strcmp(argv[i], "-c") == 0) || (strcmp(argv[i], "--conf") == 0)) {
ret = rados_conf_read_file(rados, argv[i+1]);
if (ret < 0) {
// This could fail if the config file is malformed, but it'd be hard.
printf("failed to parse config file %s! error %d\n", argv[i+1], ret);
ret = EXIT_FAILURE;
goto out;
}
break;
}
}
}
/*
* next, we actually connect to the cluster
*/
{
ret = rados_connect(rados);
if (ret < 0) {
printf("couldn't connect to cluster! error %d\n", ret);
ret = EXIT_FAILURE;
goto out;
}
printf("we just connected to the rados cluster\n");
}
/*
* let's create our own pool instead of scribbling over real data.
* Note that this command creates pools with default PG counts specified
* by the monitors, which may not be appropriate for real use -- it's fine
* for testing, though.
*/
{
ret = rados_pool_create(rados, pool_name);
if (ret < 0) {
printf("couldn't create pool! error %d\n", ret);
return EXIT_FAILURE;
}
printf("we just created a new pool named %s\n", pool_name);
pool_created = 1;
}
/*
* create an "IoCtx" which is used to do IO to a pool
*/
{
ret = rados_ioctx_create(rados, pool_name, &io_ctx);
if (ret < 0) {
printf("couldn't set up ioctx! error %d\n", ret);
ret = EXIT_FAILURE;
goto out;
}
printf("we just created an ioctx for our pool\n");
}
/*
* now let's do some IO to the pool! We'll write "hello world!" to a
* new object.
*/
{
/*
* now that we have the data to write, let's send it to an object.
* We'll use the synchronous interface for simplicity.
*/
ret = rados_write_full(io_ctx, object_name, hello, strlen(hello));
if (ret < 0) {
printf("couldn't write object! error %d\n", ret);
ret = EXIT_FAILURE;
goto out;
}
printf("we just wrote new object %s, with contents '%s'\n", object_name, hello);
}
/*
* now let's read that object back! Just for fun, we'll do it using
* async IO instead of synchronous. (This would be more useful if we
* wanted to send off multiple reads at once; see
* http://docs.ceph.com/docs/master/rados/api/librados/#asychronous-io )
*/
{
int read_len = 4194304; // this is way more than we need
char* read_buf = malloc(read_len + 1); // add one for the terminating 0 we'll add later
if (!read_buf) {
printf("couldn't allocate read buffer\n");
ret = EXIT_FAILURE;
goto out;
}
// allocate the completion from librados
rados_completion_t read_completion;
ret = rados_aio_create_completion2(NULL, NULL, &read_completion);
if (ret < 0) {
printf("couldn't create completion! error %d\n", ret);
ret = EXIT_FAILURE;
free(read_buf);
goto out;
}
printf("we just created a new completion\n");
// send off the request.
ret = rados_aio_read(io_ctx, object_name, read_completion, read_buf, read_len, 0);
if (ret < 0) {
printf("couldn't start read object! error %d\n", ret);
ret = EXIT_FAILURE;
free(read_buf);
rados_aio_release(read_completion);
goto out;
}
// wait for the request to complete, and check that it succeeded.
rados_aio_wait_for_complete(read_completion);
ret = rados_aio_get_return_value(read_completion);
if (ret < 0) {
printf("couldn't read object! error %d\n", ret);
ret = EXIT_FAILURE;
free(read_buf);
rados_aio_release(read_completion);
goto out;
}
read_buf[ret] = 0; // null-terminate the string
printf("we read our object %s, and got back %d bytes with contents\n%s\n", object_name, ret, read_buf);
free(read_buf);
rados_aio_release(read_completion);
}
/*
* We can also use xattrs that go alongside the object.
*/
{
const char* version = "1";
ret = rados_setxattr(io_ctx, object_name, "version", version, strlen(version));
if (ret < 0) {
printf("failed to set xattr version entry! error %d\n", ret);
ret = EXIT_FAILURE;
goto out;
}
printf("we set the xattr 'version' on our object!\n");
}
/*
* And if we want to be really cool, we can do multiple things in a single
* atomic operation. For instance, we can update the contents of our object
* and set the version at the same time.
*/
{
const char* content = "v2";
rados_write_op_t write_op = rados_create_write_op();
if (!write_op) {
printf("failed to allocate write op\n");
ret = EXIT_FAILURE;
goto out;
}
rados_write_op_write_full(write_op, content, strlen(content));
const char* version = "2";
rados_write_op_setxattr(write_op, "version", version, strlen(version));
ret = rados_write_op_operate(write_op, io_ctx, object_name, NULL, 0);
if (ret < 0) {
printf("failed to do compound write! error %d\n", ret);
ret = EXIT_FAILURE;
rados_release_write_op(write_op);
goto out;
}
printf("we overwrote our object %s with contents\n%s\n", object_name, content);
rados_release_write_op(write_op);
}
/*
* And to be even cooler, we can make sure that the object looks the
* way we expect before doing the write! Notice how this attempt fails
* because the xattr differs.
*/
{
rados_write_op_t failed_write_op = rados_create_write_op();
if (!failed_write_op) {
printf("failed to allocate write op\n");
ret = EXIT_FAILURE;
goto out;
}
const char* content = "v2";
const char* version = "2";
const char* old_version = "1";
rados_write_op_cmpxattr(failed_write_op, "version", LIBRADOS_CMPXATTR_OP_EQ, old_version, strlen(old_version));
rados_write_op_write_full(failed_write_op, content, strlen(content));
rados_write_op_setxattr(failed_write_op, "version", version, strlen(version));
ret = rados_write_op_operate(failed_write_op, io_ctx, object_name, NULL, 0);
if (ret < 0) {
printf("we just failed a write because the xattr wasn't as specified\n");
} else {
printf("we succeeded on writing despite an xattr comparison mismatch!\n");
ret = EXIT_FAILURE;
rados_release_write_op(failed_write_op);
goto out;
}
rados_release_write_op(failed_write_op);
/*
* Now let's do the update with the correct xattr values so it
* actually goes through
*/
content = "v3";
old_version = "2";
version = "3";
rados_write_op_t update_op = rados_create_write_op();
if (!failed_write_op) {
printf("failed to allocate write op\n");
ret = EXIT_FAILURE;
goto out;
}
rados_write_op_cmpxattr(update_op, "version", LIBRADOS_CMPXATTR_OP_EQ, old_version, strlen(old_version));
rados_write_op_write_full(update_op, content, strlen(content));
rados_write_op_setxattr(update_op, "version", version, strlen(version));
ret = rados_write_op_operate(update_op, io_ctx, object_name, NULL, 0);
if (ret < 0) {
printf("failed to do a compound write update! error %d\n", ret);
ret = EXIT_FAILURE;
rados_release_write_op(update_op);
goto out;
}
printf("we overwrote our object %s following an xattr test with contents\n%s\n", object_name, content);
rados_release_write_op(update_op);
}
ret = EXIT_SUCCESS;
out:
if (io_ctx) {
rados_ioctx_destroy(io_ctx);
}
if (pool_created) {
/*
* And now we're done, so let's remove our pool and then
* shut down the connection gracefully.
*/
int delete_ret = rados_pool_delete(rados, pool_name);
if (delete_ret < 0) {
// be careful not to
printf("We failed to delete our test pool!\n");
ret = EXIT_FAILURE;
}
}
rados_shutdown(rados);
return ret;
}
| 9,667 | 30.698361 | 115 | c |
null | ceph-main/examples/librbd/hello_world.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
// install the librados-dev and librbd package to get this
#include <rados/librados.hpp>
#include <rbd/librbd.hpp>
#include <iostream>
#include <string>
#include <sstream>
int main(int argc, const char **argv)
{
int ret = 0;
// we will use all of these below
const char *pool_name = "hello_world_pool";
std::string hello("hello world!");
std::string object_name("hello_object");
librados::IoCtx io_ctx;
// first, we create a Rados object and initialize it
librados::Rados rados;
{
ret = rados.init("admin"); // just use the client.admin keyring
if (ret < 0) { // let's handle any error that might have come back
std::cerr << "couldn't initialize rados! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just set up a rados cluster object" << std::endl;
}
}
/*
* Now we need to get the rados object its config info. It can
* parse argv for us to find the id, monitors, etc, so let's just
* use that.
*/
{
ret = rados.conf_parse_argv(argc, argv);
if (ret < 0) {
// This really can't happen, but we need to check to be a good citizen.
std::cerr << "failed to parse config options! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just parsed our config options" << std::endl;
// We also want to apply the config file if the user specified
// one, and conf_parse_argv won't do that for us.
for (int i = 0; i < argc; ++i) {
if ((strcmp(argv[i], "-c") == 0) || (strcmp(argv[i], "--conf") == 0)) {
ret = rados.conf_read_file(argv[i+1]);
if (ret < 0) {
// This could fail if the config file is malformed, but it'd be hard.
std::cerr << "failed to parse config file " << argv[i+1]
<< "! error" << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
break;
}
}
}
}
/*
* next, we actually connect to the cluster
*/
{
ret = rados.connect();
if (ret < 0) {
std::cerr << "couldn't connect to cluster! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just connected to the rados cluster" << std::endl;
}
}
/*
* let's create our own pool instead of scribbling over real data.
* Note that this command creates pools with default PG counts specified
* by the monitors, which may not be appropriate for real use -- it's fine
* for testing, though.
*/
{
ret = rados.pool_create(pool_name);
if (ret < 0) {
std::cerr << "couldn't create pool! error " << ret << std::endl;
return EXIT_FAILURE;
} else {
std::cout << "we just created a new pool named " << pool_name << std::endl;
}
}
/*
* create an "IoCtx" which is used to do IO to a pool
*/
{
ret = rados.ioctx_create(pool_name, io_ctx);
if (ret < 0) {
std::cerr << "couldn't set up ioctx! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just created an ioctx for our pool" << std::endl;
}
}
/*
* create an rbd image and write data to it
*/
{
std::string name = "librbd_test";
uint64_t size = 2 << 20;
int order = 0;
librbd::RBD rbd;
librbd::Image image;
ret = rbd.create(io_ctx, name.c_str(), size, &order);
if (ret < 0) {
std::cerr << "couldn't create an rbd image! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just created an rbd image" << std::endl;
}
ret = rbd.open(io_ctx, image, name.c_str(), NULL);
if (ret < 0) {
std::cerr << "couldn't open the rbd image! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just opened the rbd image" << std::endl;
}
int TEST_IO_SIZE = 512;
char test_data[TEST_IO_SIZE + 1];
int i;
for (i = 0; i < TEST_IO_SIZE; ++i) {
test_data[i] = (char) (rand() % (126 - 33) + 33);
}
test_data[TEST_IO_SIZE] = '\0';
size_t len = strlen(test_data);
ceph::bufferlist bl;
bl.append(test_data, len);
ret = image.write(0, len, bl);
if (ret < 0) {
std::cerr << "couldn't write to the rbd image! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just wrote data to our rbd image " << std::endl;
}
/*
* let's read the image and compare it to the data we wrote
*/
ceph::bufferlist bl_r;
int read;
read = image.read(0, TEST_IO_SIZE, bl_r);
if (read < 0) {
std::cerr << "we couldn't read data from the image! error" << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::string bl_res(bl_r.c_str(), read);
int res = memcmp(bl_res.c_str(), test_data, TEST_IO_SIZE);
if (res != 0) {
std::cerr << "what we read didn't match expected! error" << std::endl;
} else {
std::cout << "we read our data on the image successfully" << std::endl;
}
image.close();
/*
*let's now delete the image
*/
ret = rbd.remove(io_ctx, name.c_str());
if (ret < 0) {
std::cerr << "failed to delete rbd image! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just deleted our rbd image " << std::endl;
}
}
ret = EXIT_SUCCESS;
out:
/*
* And now we're done, so let's remove our pool and then
* shut down the connection gracefully.
*/
int delete_ret = rados.pool_delete(pool_name);
if (delete_ret < 0) {
// be careful not to
std::cerr << "We failed to delete our test pool!" << std::endl;
ret = EXIT_FAILURE;
}
rados.shutdown();
return ret;
}
| 6,157 | 26.864253 | 81 | cc |
null | ceph-main/examples/rgw/rgw_admin_curl.sh | #!/usr/bin/env bash
show_help()
{
echo "Usage: `basename $0` -a <access-key> -s <secret-key>" \
"-e <rgw-endpoint> -r <http-request>" \
"-p <admin-resource> -q \"<http-query-string>\""
echo " -a Access key of rgw user"
echo " -s Secret key of rgw user"
echo " -e RGW endpoint in <ipaddr:port> format"
echo " -r HTTP request type GET/PUT/DELETE"
echo " -p RGW admin resource e.g user, bucket etc"
echo " -q HTTP query string"
echo " -j (Optional) Print output in pretty JSON format"
echo " Examples :"
echo " - To create rgw user"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r PUT -p user" \
"-q \"uid=admin&display-name=Administrator\""
echo " - To get rgw user info"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r GET -p user -q \"uid=admin\""
echo " - To list buckets"
echo " (List all buckets)"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r GET -p bucket"
echo " (For specific rgw user)"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r GET -p bucket -q \"uid=admin\""
echo " - To delete bucket"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r DELETE -p bucket -q \"bucket=foo\""
echo " - To delete rgw user"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r DELETE -p user -q \"uid=admin\""
exit 1
}
access_key=""
secret_key=""
rgw_endpoint=""
http_request=""
admin_resource=""
http_query=""
use_jq=false
while getopts "a:s:e:r:p:q:j" opt; do
case "$opt" in
a)
access_key=${OPTARG}
;;
s) secret_key=${OPTARG}
;;
e) rgw_endpoint=${OPTARG}
;;
r) http_request=${OPTARG}
;;
p) admin_resource=${OPTARG}
;;
q) http_query=${OPTARG}
;;
j) use_jq=true
;;
*)
show_help
exit 1
;;
esac
done
shift $((OPTIND-1))
if [ -z "${access_key}" ] || [ -z "${secret_key}" ] || \
[ -z "${rgw_endpoint}" ] || [ -z "${http_request}" ] || \
[ -z "${admin_resource}" ] || [ -z "${http_query}" ]; then
if [ "${http_request}" = "GET" ] && [ "${admin_resource}" = "bucket" ] && \
[ -z "${http_query}" ]; then
:
else
show_help
fi
fi
resource="/admin/${admin_resource}"
contentType="application/x-compressed-tar"
dateTime=`date -R -u`
headerToSign="${http_request}
${contentType}
${dateTime}
${resource}"
signature=`echo -en "$headerToSign" | \
openssl sha1 -hmac ${secret_key} -binary | base64`
if "$use_jq";
then
curl -X ${http_request} -H "Content-Type: ${contentType}" -H "Date: ${dateTime}" \
-H "Authorization: AWS ${access_key}:${signature}" -H "Host: ${rgw_endpoint}" \
"http://${rgw_endpoint}${resource}?${http_query}" 2> /dev/null|jq "."
else
curl -X ${http_request} -H "Content-Type: ${contentType}" -H "Date: ${dateTime}" \
-H "Authorization: AWS ${access_key}:${signature}" -H "Host: ${rgw_endpoint}" \
"http://${rgw_endpoint}${resource}?${http_query}"
fi
echo ""
| 3,813 | 32.752212 | 88 | sh |
null | ceph-main/examples/rgw/boto3/README.md | # Introduction
This directory contains examples on how to use AWS CLI/boto3 to exercise the RadosGW extensions to the S3 API.
This is an extension to the [AWS SDK](https://github.com/boto/botocore/blob/develop/botocore/data/s3/2006-03-01/service-2.json).
# Users
For the standard client to support these extensions, the: ``service-2.sdk-extras.json`` file should be placed under: ``~/.aws/models/s3/2006-03-01/`` directory.
For more information see [here](https://github.com/boto/botocore/blob/develop/botocore/loaders.py#L33).
## Python
The [boto3 client](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) could be used with the extensions, code samples exists in this directory.
## AWS CLI
The standard [AWS CLI](https://docs.aws.amazon.com/cli/latest/) may also be used with these extensions. For example:
- Unordered listing:
```
aws --endpoint-url http://localhost:8000 s3api list-objects --bucket=mybucket --allow-unordered
```
- Unordered listing (version 2):
```
aws --endpoint-url http://localhost:8000 s3api list-objects-v2 --bucket=mybucket --allow-unordered
```
- Topic creation with endpoint:
```
aws --endpoint-url http://localhost:8000 sns create-topic --name=mytopic --attributes='{"push-endpoint": "amqp://localhost:5672", "amqp-exchange": "ex1", "amqp-ack-level": "broker"}'
```
Expected output:
```
{
"TopicArn": "arn:aws:sns:default::mytopic"
}
```
- Get topic attributes:
```
aws --endpoint-url http://localhost:8000 sns get-topic-attributes --topic-arn="arn:aws:sns:default::mytopic"
```
Expected output:
```
{
"Attributes": {
"User": "",
"Name": "mytopic",
"EndPoint": "{\"EndpointAddress\":\"amqp://localhost:5672\",\"EndpointArgs\":\"Attributes.entry.1.key=push-endpoint&Attributes.entry.1.value=amqp://localhost:5672&Attributes.entry.2.key=amqp-exchange&Attributes.entry.2.value=ex1&Attributes.entry.3.key=amqp-ack-level&Attributes.entry.3.value=broker&Version=2010-03-31&amqp-ack-level=broker&amqp-exchange=ex1&push-endpoint=amqp://localhost:5672\",\"EndpointTopic\":\"mytopic\",\"HasStoredSecret\":\"false\",\"Persistent\":\"false\"}",
"TopicArn": "arn:aws:sns:default::mytopic",
"OpaqueData": ""
}
}
```
- Bucket notifications with filtering extensions (bucket must exist before calling this command):
```
aws --region=default --endpoint-url http://localhost:8000 s3api put-bucket-notification-configuration --bucket=mybucket --notification-configuration='{"TopicConfigurations": [{"Id": "notif1", "TopicArn": "arn:aws:sns:default::mytopic", "Events": ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"], "Filter": {"Metadata": {"FilterRules": [{"Name": "x-amz-meta-foo", "Value": "bar"}, {"Name": "x-amz-meta-hello", "Value": "world"}]}, "Key": {"FilterRules": [{"Name": "regex", "Value": "([a-z]+)"}]}}}]}'
```
- Get configuration of a specific notification of a bucket:
```
aws --endpoint-url http://localhost:8000 s3api get-bucket-notification-configuration --bucket=mybucket --notification=notif1
```
Expected output:
```
{
"TopicConfigurations": [
{
"Id": "notif1",
"TopicArn": "arn:aws:sns:default::mytopic",
"Events": [
"s3:ObjectCreated:*",
"s3:ObjectRemoved:*"
],
"Filter": {
"Key": {
"FilterRules": [
{
"Name": "regex",
"Value": "([a-z]+)"
}
]
},
"Metadata": {
"FilterRules": [
{
"Name": "x-amz-meta-foo",
"Value": "bar"
},
{
"Name": "x-amz-meta-hello",
"Value": "world"
}
]
}
}
}
]
}
```
# Developers
Anyone developing an extension to the S3 API supported by AWS, please modify ``service-2.sdk-extras.json`` (all extensions should go into the same file), so that boto3 could be used to test the new API.
In addition, python files with code samples should be added to this directory demonstrating use of the new API.
When testing you changes please:
- make sure that the modified file is in the boto3 path as explained above
- make sure that the standard S3 tests suit is not broken, even with the extensions files in the path
| 4,208 | 39.471154 | 495 | md |
null | ceph-main/examples/rgw/boto3/append_object.py | #!/usr/bin/python
from __future__ import print_function
import boto3
import sys
import json
def js_print(arg):
print(json.dumps(arg, indent=2))
if len(sys.argv) != 3:
print('Usage: ' + sys.argv[0] + ' <bucket> <key>')
sys.exit(1)
# bucket name as first argument
bucketname = sys.argv[1]
keyname = sys.argv[2]
# endpoint and keys from vstart
endpoint = 'http://127.0.0.1:8000'
access_key='0555b35654ad1656d804'
secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
client = boto3.client('s3',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
print('deleting object first')
js_print(client.delete_object(Bucket=bucketname, Key=keyname))
print('appending at position 0')
resp = client.put_object(Bucket=bucketname, Key=keyname,
Append=True,
AppendPosition=0,
Body='8letters')
js_print(resp)
append_pos = resp['AppendPosition']
print('appending at position %d' % append_pos)
js_print(client.put_object(Bucket=bucketname, Key=keyname,
Append=True,
AppendPosition=append_pos,
Body='8letters'))
| 1,241 | 27.883721 | 69 | py |
null | ceph-main/examples/rgw/boto3/delete_notification.py | #!/usr/bin/python
import boto3
import sys
if len(sys.argv) == 3:
# bucket name as first argument
bucketname = sys.argv[1]
# notification name as second argument
notification_name = sys.argv[2]
elif len(sys.argv) == 2:
# bucket name as first argument
bucketname = sys.argv[1]
notification_name = ""
else:
print('Usage: ' + sys.argv[0] + ' <bucket> [notification]')
sys.exit(1)
# endpoint and keys from vstart
endpoint = 'http://127.0.0.1:8000'
access_key='0555b35654ad1656d804'
secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
client = boto3.client('s3',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
# deleting a specific notification configuration from a bucket (when NotificationId is provided) or
# deleting all notification configurations on a bucket (without deleting the bucket itself) are extension to AWS S3 API
if notification_name == "":
print(client.delete_bucket_notification_configuration(Bucket=bucketname))
else:
print(client.delete_bucket_notification_configuration(Bucket=bucketname,
Notification=notification_name))
| 1,229 | 32.243243 | 119 | py |
null | ceph-main/examples/rgw/boto3/get_notification.py | #!/usr/bin/python
import boto3
import sys
if len(sys.argv) != 3:
print('Usage: ' + sys.argv[0] + ' <bucket> <notification>')
sys.exit(1)
# bucket name as first argument
bucketname = sys.argv[1]
# notification name as second argument
notification_name = sys.argv[2]
# endpoint and keys from vstart
endpoint = 'http://127.0.0.1:8000'
access_key='0555b35654ad1656d804'
secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
client = boto3.client('s3',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
# getting a specific notification configuration is an extension to AWS S3 API
print(client.get_bucket_notification_configuration(Bucket=bucketname,
Notification=notification_name))
| 826 | 27.517241 | 83 | py |
null | ceph-main/examples/rgw/boto3/get_usage_stats.py | #!/usr/bin/python
from __future__ import print_function
import boto3
import json
# endpoint and keys from vstart
endpoint = 'http://127.0.0.1:8000'
access_key='0555b35654ad1656d804'
secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
client = boto3.client('s3',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
print(json.dumps(client.get_usage_stats(), indent=2))
| 449 | 24 | 69 | py |
null | ceph-main/examples/rgw/boto3/list_unordered.py | #!/usr/bin/python
import boto3
import sys
if len(sys.argv) != 2:
print('Usage: ' + sys.argv[0] + ' <bucket>')
sys.exit(1)
# bucket name as first argument
bucketname = sys.argv[1]
# endpoint and keys from vstart
endpoint = 'http://127.0.0.1:8000'
access_key='0555b35654ad1656d804'
secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
client = boto3.client('s3',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
# getting an unordered list of objects is an extension to AWS S3 API
print(client.list_objects(Bucket=bucketname, AllowUnordered=True))
| 640 | 23.653846 | 69 | py |
null | ceph-main/examples/rgw/boto3/notification_filters.py | #!/usr/bin/python
import boto3
import sys
if len(sys.argv) != 4:
print('Usage: ' + sys.argv[0] + ' <bucket> <topic ARN> <notification Id>')
sys.exit(1)
# bucket name as first argument
bucketname = sys.argv[1]
# topic ARN as second argument
topic_arn = sys.argv[2]
# notification id as third argument
notification_id = sys.argv[3]
# endpoint and keys from vstart
endpoint = 'http://127.0.0.1:8000'
access_key='0555b35654ad1656d804'
secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
client = boto3.client('s3',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
# regex filter on the object name and metadata based filtering are extension to AWS S3 API
# bucket and topic should be created beforehand
topic_conf_list = [{'Id': notification_id,
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
'Filter': {
'Metadata': {
'FilterRules': [{'Name': 'x-amz-meta-foo', 'Value': 'bar'},
{'Name': 'x-amz-meta-hello', 'Value': 'world'}]
},
'Tags': {
'FilterRules': [{'Name': 'foo', 'Value': 'bar'},
{'Name': 'hello', 'Value': 'world'}]
},
'Key': {
'FilterRules': [{'Name': 'regex', 'Value': '([a-z]+)'}]
}
}}]
print(client.put_bucket_notification_configuration(Bucket=bucketname,
NotificationConfiguration={'TopicConfigurations': topic_conf_list}))
| 1,775 | 35.244898 | 119 | py |
null | ceph-main/examples/rgw/boto3/topic_attributes.py | import sys
import boto3
from pprint import pprint
if len(sys.argv) == 2:
# topic arn as first argument
topic_arn = sys.argv[1]
else:
print ('Usage: ' + sys.argv[0] + ' <topic arn>')
sys.exit(1)
# endpoint and keys from vstart
endpoint = 'http://127.0.0.1:8000'
access_key='0555b35654ad1656d804'
secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
# Add info to client to get the topi attirubutes of a given topi
client = boto3.client('sns',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
# getting attributes of a specific topic is an extension to AWS sns
pprint(client.get_topic_attributes(TopicArn=topic_arn))
| 713 | 28.75 | 69 | py |
null | ceph-main/examples/rgw/boto3/topic_with_endpoint.py | #!/usr/bin/python
import boto3
from botocore.client import Config
import sys
if len(sys.argv) == 2:
# topic name as first argument
topic_name = sys.argv[1]
else:
print('Usage: ' + sys.argv[0] + ' <topic name> ')
sys.exit(1)
# endpoint and keys from vstart
endpoint = 'http://127.0.0.1:8000'
access_key='0555b35654ad1656d804'
secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
client = boto3.client('sns',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
config=Config(signature_version='s3'))
# to see the list of available "regions" use:
# radosgw-admin realm zonegroup list
# this is standard AWS services call, using custom attributes to add AMQP endpoint information to the topic
attributes = {"push-endpoint": "amqp://localhost:5672", "amqp-exchange": "ex1", "amqp-ack-level": "broker"}
print(client.create_topic(Name=topic_name, Attributes=attributes))
| 970 | 30.322581 | 107 | py |
null | ceph-main/examples/rgw/golang/object-upload/README.md | # Introduction
This directory contains Golang code examples on how to upload an object to an S3 bucket running on a Ceph RGW cluster.
# Prerequisite
Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
Go installed on the Linux machine.
## Workflow Procedure
1. Install AWS CLI version one on your Linux machine as explained [here](https://docs.aws.amazon.com/cli/v1/userguide/install-linux.html)
2. Create a bucket on the Ceph cluster with the command
```
aws --endpoint-url http://localhost:8000 s3 mb s3://sample-bucket
```
3. Navigate through your file system to where the Golang example code exist on your terminal.
4. Install the required Golang packages on the system
```
go mod init examples/object-upload/v2
go get github.com/aws/aws-sdk-go
go mod tidy
```
5. Run the Golang program as ``` go run object-upload.go -b sample-bucket -f fortuna.txt ``` on the terminal window to test out object upload to Ceph RGW cluster.
| 1,025 | 45.636364 | 162 | md |
null | ceph-main/examples/rgw/golang/object-upload/object-upload.go | package main
import (
"flag"
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
func main() {
bucket := flag.String("b", "", "The name of the bucket")
filename := flag.String("f", "", "Complete file path to object to be uploaded")
flag.Parse()
if *bucket == "" {
fmt.Println("You must supply the name of the bucket")
fmt.Println("-b BUCKET")
return
}
if *filename == "" {
fmt.Println("You must supply the object to be uploaded")
fmt.Println("-f FILE/FILEPATH")
return
}
file, err := os.Open(*filename)
if err != nil {
exitErrorf("Unable to open file %q, %v", filename, err)
}
defer file.Close()
//Ceph RGW Cluster credentials
access_key := "0555b35654ad1656d804"
secret_key := "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
token_id := ""
url := "http://127.0.0.1:8000"
defaultResolver := endpoints.DefaultResolver()
s3CustResolverFn := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
if service == "s3" {
return endpoints.ResolvedEndpoint{
URL: url,
}, nil
}
return defaultResolver.EndpointFor(service, region, optFns...)
}
sess := session.Must(session.NewSessionWithOptions(session.Options{
Config: aws.Config{
Region: aws.String("default"),
Credentials: credentials.NewStaticCredentials(access_key, secret_key, token_id),
S3ForcePathStyle: aws.Bool(true),
EndpointResolver: endpoints.ResolverFunc(s3CustResolverFn),
},
}))
uploader := s3manager.NewUploader(sess)
// Upload the file's body to S3 bucket as an object with the key being the
// same as the filename.
_, err = uploader.Upload(&s3manager.UploadInput{
Bucket: bucket,
Key: filename,
Body: file,
})
if err != nil {
exitErrorf("Unable to upload %q to %q, %v", *filename, *bucket, err)
}
fmt.Printf("Successfully uploaded %q to %q\n", *filename, *bucket)
}
func exitErrorf(msg string, args ...interface{}) {
fmt.Fprintf(os.Stderr, msg+"\n", args...)
os.Exit(1)
}
| 2,187 | 24.741176 | 123 | go |
null | ceph-main/examples/rgw/golang/put-bucket-notification-creation/README.md | # Introduction
This directory contains Golang code examples on how to create a put bucket notification to a topic and S3 bucket running on a Ceph RGW cluster.
# Prerequisite
Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
Go installed on the Linux machine.
## Workflow Procedure
1. Install AWS CLI version one on your Linux machine as explained [here](https://docs.aws.amazon.com/cli/v1/userguide/install-linux.html)
2. Create a topic on the Ceph cluster with the command
```
aws --region default --endpoint-url http://localhost:8000 sns create-topic --name=sample-topic --attributes='{"push-endpoint": "http://localhost:10900"}'
```
3. Create a bucket to which the topic will be attached to with the command
```
aws --endpoint-url http://localhost:8000 s3 mb s3://sample-bucket
```
4. Navigate through your file system to where the Golang example code exists on your terminal.
5. Install the required Golang packages on the system.
```
go mod init examples/put-bucket-notification-creation/v2
go get github.com/aws/aws-sdk-go
go mod tidy
```
6. Run the Golang program as ``` go run put-bucket-notification-creation.go -b sample-bucket -t arn:aws:sns:default::sample-topic ``` on the terminal window to create the put bucket notification with the suffix filter rule.
7. Upload any jpg file you have to the bucket with the command
```
aws --endpoint-url http://localhost:8000 s3 cp your-jpg-file.jpg s3://sample-bucket
```
| 1,524 | 49.833333 | 223 | md |
null | ceph-main/examples/rgw/golang/put-bucket-notification-creation/put-bucket-notification-creation.go | package main
import (
"flag"
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
func main() {
bucket := flag.String("b", "", "Name of the bucket to add notification to")
topic := flag.String("t", "", "The topic onto which the notification is attached to")
flag.Parse()
if *bucket == "" {
fmt.Println("You must supply the name of the bucket")
fmt.Println("-b BUCKET")
return
}
if *topic == "" {
fmt.Println("You must supply the name of the topic ARN")
fmt.Println("-t TOPIC ARN")
return
}
//Ceph RGW Credentials
access_key := "0555b35654ad1656d804"
secret_key := "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
token_id := ""
url := "http://127.0.0.1:8000"
defaultResolver := endpoints.DefaultResolver()
CustResolverFn := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
if service == "s3" {
return endpoints.ResolvedEndpoint{
URL: url,
}, nil
}
return defaultResolver.EndpointFor(service, region, optFns...)
}
sess := session.Must(session.NewSessionWithOptions(session.Options{
Config: aws.Config{
Region: aws.String("default"),
Credentials: credentials.NewStaticCredentials(access_key, secret_key, token_id),
S3ForcePathStyle: aws.Bool(true),
EndpointResolver: endpoints.ResolverFunc(CustResolverFn),
},
}))
svc := s3.New(sess)
suffixRule := []*s3.FilterRule{
{
Name: aws.String("suffix"),
Value: aws.String("jpg"),
},
}
input := &s3.PutBucketNotificationConfigurationInput{
Bucket: bucket,
NotificationConfiguration: &s3.NotificationConfiguration{
TopicConfigurations: []*s3.TopicConfiguration{
{
Events: []*string{aws.String("s3:ObjectCreated:*")},
Filter: &s3.NotificationConfigurationFilter{
Key: &s3.KeyFilter{
FilterRules: suffixRule,
},
},
Id: aws.String("notif1"), //Raises MalformedXML if absent
TopicArn: topic,
},
},
},
}
_, err := svc.PutBucketNotificationConfiguration(input)
if err != nil {
exitErrorf("Unable to create Put Bucket Notification because of %s", err)
}
fmt.Println("Put bucket notification added to ", *topic)
}
func exitErrorf(msg string, args ...interface{}) {
fmt.Fprintf(os.Stderr, msg+"\n", args...)
os.Exit(1)
}
| 2,465 | 24.42268 | 121 | go |
null | ceph-main/examples/rgw/golang/topic-creation/README.md | # Introduction
This directory contains Golang code example on how to create an SNS Topic on a Ceph RGW cluster.
# Prerequisite
Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
Go installed on the Linux machine.
## Workflow Procedure
1. Navigate through your file system to where the Golang example code exists on your terminal.
2. Install the required Golang packages on the system.
```
go mod init examples/topic-creation/v2
go get github.com/aws/aws-sdk-go
go mod tidy
```
3. Run the Golang program as ``` go run topic-creation.go -t sample-topic-1 -a '{"push-endpoint": "http://127.0.0.1:10900"}' ``` on the terminal window to create SNS topic with custom attributes.
| 771 | 44.411765 | 195 | md |
null | ceph-main/examples/rgw/golang/topic-creation/topic-creation.go | package main
import (
"encoding/json"
"flag"
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sns"
)
func main() {
topic := flag.String("t", "", "The name of the topic")
attributes := flag.String("a", "", "Topic attributes needed")
flag.Parse()
attributesmap := map[string]*string{}
err := json.Unmarshal([]byte(*attributes), &attributesmap) // convert JSON string to Go map
if err != nil {
exitErrorf("Check your JSON String for any errors: %s : %s", err, *attributes)
}
if *topic == "" {
fmt.Println("You must supply the name of the topic")
fmt.Println("-t TOPIC")
return
}
if *attributes == "" {
fmt.Println("You must supply topic attributes")
fmt.Println("-a ATTRIBUTES")
return
}
//Ceph RGW Cluster credentials
access_key := "0555b35654ad1656d804"
secret_key := "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
token_id := ""
url := "http://127.0.0.1:8000"
defaultResolver := endpoints.DefaultResolver()
snsCustResolverFn := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
if service == "sns" {
return endpoints.ResolvedEndpoint{
URL: url,
}, nil
}
return defaultResolver.EndpointFor(service, region, optFns...)
}
sess := session.Must(session.NewSessionWithOptions(session.Options{
Config: aws.Config{
Region: aws.String("default"),
Credentials: credentials.NewStaticCredentials(access_key, secret_key, token_id),
S3ForcePathStyle: aws.Bool(true),
EndpointResolver: endpoints.ResolverFunc(snsCustResolverFn),
},
}))
client := sns.New(sess)
results, err := client.CreateTopic(&sns.CreateTopicInput{
Attributes: attributesmap,
Name: topic,
})
if err != nil {
exitErrorf("Unable to create topic %s, %s", *topic, err)
}
fmt.Printf("Succesfully created %s \n", *results.TopicArn)
}
func exitErrorf(msg string, args ...interface{}) {
fmt.Fprintf(os.Stderr, msg+"\n", args...)
os.Exit(1)
}
| 2,133 | 25.02439 | 124 | go |
null | ceph-main/examples/rgw/java/ceph-s3-upload/README.md | # Introduction
This directory contains Java code examples on how to upload an object to an S3 bucket running on a Ceph RGW cluster.
# Prerequisites
Linux machine running an RGW Ceph cluster. Preferrably started with the ``OSD=1 MON=1 MDS=0 MGR=0 RGW=1 ../src/vstart.sh --debug --new `` command.
Java and Maven installed on the Linux machine.
## Workflow Procedure
1. Install AWS CLI version 1 on your Linux machine as explained [here](https://docs.aws.amazon.com/cli/v1/userguide/install-linux.html)
2. Create a bucket on the Ceph cluster with the command
``
aws --endpoint-url http://localhost:8000 s3 mb s3://sample-bucket
``
3. Navigate through your file system into the ``ceph-s3-upload`` folder using your terminal. Please ensure you see the pom.xml file.
4. Run `` mvn clean package `` to install the required Java packages on the system.
5. Once successful, run `` java -jar target/ceph-s3-upload-1.0-SNAPSHOT-jar-with-dependencies.jar sample-bucket ceph-s3-upload.txt `` to test out Java s3 object upload on Ceph RGW cluster.
| 1,041 | 60.294118 | 188 | md |
null | ceph-main/examples/rgw/java/ceph-s3-upload/src/main/java/org/example/cephs3upload/App.java | package org.example.cephs3upload;
import com.amazonaws.services.s3.model.AmazonS3Exception;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import java.io.File;
import java.nio.file.Paths;
public class App
{
public static void main( String[] args )
{
final String USAGE = "\n" +
"To run this example, supply the name of an S3 bucket and a file to\n" +
"upload to it.\n" +
"\n" +
"Ex: java -jar target/ceph-s3-upload-1.0-SNAPSHOT-jar-with-dependencies.jar <bucketname> <filename>\n";
if (args.length < 2) {
System.out.println(USAGE);
System.exit(1);
}
String bucket_name = args[0];
String file_path = args[1];
String key_name = Paths.get(file_path).getFileName().toString();
System.out.format("Uploading %s to S3 bucket %s...\n", file_path, bucket_name);
// Put in the CEPH RGW access and secret keys here in that order "access key" "secret key"
// Must also be specified here
BasicAWSCredentials credentials = new BasicAWSCredentials("0555b35654ad1656d804","h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==");
// Note That the AWSClient builder takes in the endpoint and the region
// This has to be specified in this file
final AmazonS3 s3 = AmazonS3ClientBuilder
.standard()
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("http://127.0.0.1:8000", "default"))
.build();
try {
s3.putObject(bucket_name, key_name, new File(file_path));
} catch (AmazonS3Exception e) {
System.err.println(e.getMessage()); // raises more explicit error message than e.getErrorMessage() e.g when Bucket is not available
System.exit(1);
}
System.out.println("Object upload successful!");
}
}
| 2,294 | 43.134615 | 149 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.