Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
spaCy
spaCy-master/spacy/lang/fr/punctuation.py
from ..char_classes import ( ALPHA, ALPHA_LOWER, ALPHA_UPPER, CONCAT_QUOTES, CURRENCY, LIST_ELLIPSES, LIST_PUNCT, LIST_QUOTES, UNITS, merge_chars, ) from ..punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES ELISION = "' ’".replace(" ", "") HYPHENS = r"- – — ‐ ‑".replace(" ", "") _prefixes_elision = "d l n" _prefixes_elision += " " + _prefixes_elision.upper() _hyphen_suffixes = "ce clés elle en il ils je là moi nous on t vous" _hyphen_suffixes += " " + _hyphen_suffixes.upper() _prefixes = TOKENIZER_PREFIXES + [ r"(?:({pe})[{el}])(?=[{a}])".format( a=ALPHA, el=ELISION, pe=merge_chars(_prefixes_elision) ) ] _suffixes = ( LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES + [ r"(?<=[0-9])\+", r"(?<=°[FfCcKk])\.", # °C. -> ["°C", "."] r"(?<=[0-9])%", # 4% -> ["4", "%"] r"(?<=[0-9])(?:{c})".format(c=CURRENCY), r"(?<=[0-9])(?:{u})".format(u=UNITS), r"(?<=[0-9{al}{e}(?:{q})])\.".format( al=ALPHA_LOWER, e=r"%²\-\+", q=CONCAT_QUOTES ), r"(?<=[{au}][{au}])\.".format(au=ALPHA_UPPER), r"(?<=[{a}])[{h}]({hs})".format( a=ALPHA, h=HYPHENS, hs=merge_chars(_hyphen_suffixes) ), ] ) _infixes = TOKENIZER_INFIXES + [ r"(?<=[{a}][{el}])(?=[{a}])".format(a=ALPHA, el=ELISION) ] TOKENIZER_PREFIXES = _prefixes TOKENIZER_SUFFIXES = _suffixes TOKENIZER_INFIXES = _infixes
1,452
24.491228
68
py
spaCy
spaCy-master/spacy/lang/fr/stop_words.py
STOP_WORDS = set( """ a à â abord afin ah ai aie ainsi ait allaient allons alors anterieur anterieure anterieures antérieur antérieure antérieures apres après as assez attendu au aupres auquel aura auraient aurait auront aussi autre autrement autres autrui aux auxquelles auxquels avaient avais avait avant avec avoir avons ayant bas basee bat c' c’ ça car ce ceci cela celle celle-ci celle-la celle-là celles celles-ci celles-la celles-là celui celui-ci celui-la celui-là cent cependant certain certaine certaines certains certes ces cet cette ceux ceux-ci ceux-là chacun chacune chaque chez ci cinq cinquantaine cinquante cinquantième cinquième combien comme comment compris concernant d' d’ da dans de debout dedans dehors deja dejà delà depuis derriere derrière des desormais desquelles desquels dessous dessus deux deuxième deuxièmement devant devers devra different differente differentes differents différent différente différentes différents dire directe directement dit dite dits divers diverse diverses dix dix-huit dix-neuf dix-sept dixième doit doivent donc dont douze douzième du duquel durant dès déja déjà désormais effet egalement eh elle elle-meme elle-même elles elles-memes elles-mêmes en encore enfin entre envers environ es ès est et etaient étaient etais étais etait était etant étant etc etre être eu eux eux-mêmes exactement excepté également fais faisaient faisant fait facon façon feront font gens ha hem hep hi ho hormis hors hou houp hue hui huit huitième hé i il ils importe j' j’ je jusqu jusque juste l' l’ la laisser laquelle le lequel les lesquelles lesquels leur leurs longtemps lors lorsque lui lui-meme lui-même là lès m' m’ ma maint maintenant mais malgre malgré me meme memes merci mes mien mienne miennes miens mille moi moi-meme moi-même moindres moins mon même mêmes n' n’ na ne neanmoins neuvième ni nombreuses nombreux nos notamment notre nous nous-mêmes nouveau nul néanmoins nôtre nôtres o ô on ont onze onzième or ou ouias ouste outre ouvert ouverte ouverts où par parce parfois parle parlent parler parmi partant pas pendant pense permet personne peu peut peuvent peux plus plusieurs plutot plutôt possible possibles pour pourquoi pourrais pourrait pouvait prealable precisement premier première premièrement pres procedant proche près préalable précisement pu puis puisque qu' qu’ quand quant quant-à-soi quarante quatorze quatre quatre-vingt quatrième quatrièmement que quel quelconque quelle quelles quelqu'un quelque quelques quels qui quiconque quinze quoi quoique relative relativement rend rendre restant reste restent retour revoici revoila revoilà s' s’ sa sait sans sauf se seize selon semblable semblaient semble semblent sent sept septième sera seraient serait seront ses seul seule seulement seuls seules si sien sienne siennes siens sinon six sixième soi soi-meme soi-même soit soixante son sont sous souvent specifique specifiques spécifique spécifiques stop suffisant suffisante suffit suis suit suivant suivante suivantes suivants suivre sur surtout t' t’ ta tant te tel telle tellement telles tels tenant tend tenir tente tes tien tienne tiennes tiens toi toi-meme toi-même ton touchant toujours tous tout toute toutes treize trente tres trois troisième troisièmement très tu té un une unes uns va vais vas vers via vingt voici voila voilà vont vos votre votres vous vous-mêmes vu vé vôtre vôtres y """.split() )
3,403
39.047059
96
py
spaCy
spaCy-master/spacy/lang/fr/syntax_iterators.py
from typing import Iterator, Tuple, Union from ...errors import Errors from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]: """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ labels = [ "nsubj", "nsubj:pass", "obj", "obl", "obl:agent", "obl:arg", "obl:mod", "nmod", "pcomp", "appos", "ROOT", ] post_modifiers = ["flat", "flat:name", "flat:foreign", "fixed", "compound"] doc = doclike.doc # Ensure works on both Doc and Span. if not doc.has_annotation("DEP"): raise ValueError(Errors.E029) np_deps = {doc.vocab.strings.add(label) for label in labels} np_modifs = {doc.vocab.strings.add(modifier) for modifier in post_modifiers} np_label = doc.vocab.strings.add("NP") adj_label = doc.vocab.strings.add("amod") det_label = doc.vocab.strings.add("det") det_pos = doc.vocab.strings.add("DET") adp_pos = doc.vocab.strings.add("ADP") conj_label = doc.vocab.strings.add("conj") conj_pos = doc.vocab.strings.add("CCONJ") prev_end = -1 for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced if word.left_edge.i <= prev_end: continue if word.dep in np_deps: right_childs = list(word.rights) right_child = right_childs[0] if right_childs else None if right_child: if ( right_child.dep == adj_label ): # allow chain of adjectives by expanding to right right_end = right_child.right_edge elif ( right_child.dep == det_label and right_child.pos == det_pos ): # cut relative pronouns here right_end = right_child elif right_child.dep in np_modifs: # Check if we can expand to right right_end = word.right_edge else: right_end = word else: right_end = word prev_end = right_end.i left_index = word.left_edge.i left_index = left_index + 1 if word.left_edge.pos == adp_pos else left_index yield left_index, right_end.i + 1, np_label elif word.dep == conj_label: head = word.head while head.dep == conj_label and head.head.i < head.i: head = head.head # If the head is an NP, and we're coordinated to it, we're an NP if head.dep in np_deps: prev_end = word.i left_index = word.left_edge.i # eliminate left attached conjunction left_index = ( left_index + 1 if word.left_edge.pos == conj_pos else left_index ) yield left_index, word.i + 1, np_label SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
3,124
35.337209
88
py
spaCy
spaCy-master/spacy/lang/fr/tokenizer_exceptions.py
import re from ...symbols import ORTH from ...util import update_exc from ..char_classes import ALPHA, ALPHA_LOWER from ..tokenizer_exceptions import BASE_EXCEPTIONS from .punctuation import ELISION, HYPHENS # not using the large _tokenizer_exceptions_list by default as it slows down the tokenizer # from ._tokenizer_exceptions_list import FR_BASE_EXCEPTIONS FR_BASE_EXCEPTIONS = ["aujourd'hui", "Aujourd'hui"] def upper_first_letter(text): if len(text) == 0: return text if len(text) == 1: return text.upper() return text[0].upper() + text[1:] def lower_first_letter(text): if len(text) == 0: return text if len(text) == 1: return text.lower() return text[0].lower() + text[1:] _exc = {"J.-C.": [{ORTH: "J."}, {ORTH: "-C."}]} for exc_data in [ {ORTH: "av."}, {ORTH: "janv."}, {ORTH: "févr."}, {ORTH: "avr."}, {ORTH: "juill."}, {ORTH: "sept."}, {ORTH: "oct."}, {ORTH: "nov."}, {ORTH: "déc."}, {ORTH: "apr."}, {ORTH: "Dr."}, {ORTH: "M."}, {ORTH: "Mr."}, {ORTH: "Mme."}, {ORTH: "Mlle."}, {ORTH: "n°"}, {ORTH: "d°"}, {ORTH: "St."}, {ORTH: "Ste."}, ]: _exc[exc_data[ORTH]] = [exc_data] for orth in [ "après-midi", "au-delà", "au-dessus", "celle-ci", "celles-ci", "celui-ci", "cf.", "ci-dessous", "elle-même", "en-dessous", "etc.", "jusque-là", "lui-même", "MM.", "No.", "peut-être", "pp.", "quelques-uns", "rendez-vous", "Vol.", ]: _exc[orth] = [{ORTH: orth}] for verb in [ "a", "est", "semble", "indique", "moque", "passe", ]: for orth in [verb, verb.title()]: for pronoun in ["elle", "il", "on"]: token = f"{orth}-t-{pronoun}" _exc[token] = [{ORTH: orth}, {ORTH: "-t"}, {ORTH: "-" + pronoun}] for verb in ["est"]: for orth in [verb, verb.title()]: _exc[f"{orth}-ce"] = [{ORTH: orth}, {ORTH: "-ce"}] for pre in ["qu'", "n'"]: for orth in [pre, pre.title()]: _exc[f"{orth}est-ce"] = [{ORTH: orth}, {ORTH: "est"}, {ORTH: "-ce"}] for verb, pronoun in [("est", "il"), ("EST", "IL")]: _exc[f"{verb}-{pronoun}"] = [{ORTH: verb}, {ORTH: "-" + pronoun}] for s, verb, pronoun in [("s", "est", "il"), ("S", "EST", "IL")]: _exc[f"{s}'{verb}-{pronoun}"] = [ {ORTH: s + "'"}, {ORTH: verb}, {ORTH: "-" + pronoun}, ] _infixes_exc = [] # type: ignore[var-annotated] orig_elision = "'" orig_hyphen = "-" # loop through the elison and hyphen characters, and try to substitute the ones that weren't used in the original list for infix in FR_BASE_EXCEPTIONS: variants_infix = {infix} for elision_char in [x for x in ELISION if x != orig_elision]: variants_infix.update( [word.replace(orig_elision, elision_char) for word in variants_infix] ) for hyphen_char in [x for x in ["-", "‐"] if x != orig_hyphen]: variants_infix.update( [word.replace(orig_hyphen, hyphen_char) for word in variants_infix] ) variants_infix.update([upper_first_letter(word) for word in variants_infix]) _infixes_exc.extend(variants_infix) for orth in _infixes_exc: _exc[orth] = [{ORTH: orth}] _hyphen_prefix = [ "a[ée]ro", "abat", "a[fg]ro", "after", "aigues?", "am[ée]ricano", "anglo", "anti", "apr[èe]s", "arabo", "arcs?", "archi", "arrières?", "audio", "avant", "avion", "auto", "banc", "bas(?:ses?)?", "bateaux?", "bec?", "belles?", "beau", "best", "bio?", "bien", "blanc", "bo[îi]te", "bonn?e?s?", "bois", "bou(?:c|rg)", "b[êe]ta", "cache", "cap(?:ello)?", "casse", "castel", "champ", "chapelle", "ch[âa]teau(?:neuf)?", "chasse", "cha(?:ud|t)e?s?", "chauffe", "chou", "chromo", "claire?s?", "co(?:de|ca)?", "compte", "contre", "cordon", "coupe?", "courte?s?", "couvre", "crash", "crise", "croche", "cross", "cyber", "côte", "demi", "di(?:sney)?", "dix", "d[ée]s?", "dys", "ex?", "émirato", "entre", "est", "ethno", "ex", "extra", "extrême", "[ée]co", "faux", "fil", "fort", "franco?s?", "gallo", "gardes?", "gastro", "grande?", "gratte", "gr[ée]co", "gros", "g[ée]o", "haute?s?", "homm?es?", "hors", "hyper", "indo", "infra", "inter", "intra", "islamo", "italo", "jean", "labio", "latino", "live", "lot", "louis", "m[ai]cro", "mal", "médio", "mesnil", "mi(?:ni)?", "mono", "mont?s?", "moyen", "multi", "m[ée]cano", "m[ée]dico", "m[ée]do", "m[ée]ta", "mots?", "neuro", "noix", "non", "nord", "notre", "n[ée]o", "ouest", "outre", "ouvre", "passe", "perce", "pharmaco", "ph[oy]to", "pieds?", "pique", "poissons?", "ponce", "pont", "po[rs]t", "pousse", "primo", "pro(?:cès|to)?", "pare", "petite?s?", "plessis", "porte", "pré", "prêchi", "protège", "pseudo", "pêle", "péri", "puy", "quasi", "quatre", "radio", "recourt", "rythmo", "(?:re)?doubles?", "r[ée]", "r[ée]tro", "requin", "sans?", "sa?inte?s?", "semi", "serre", "sino", "socio", "sociale?s?", "soixante", "sous", "su[bdrs]", "super", "taille", "tire", "thermo", "tiers", "tourne", "toute?s?", "tra[iî]ne?", "trans", "trente", "trois", "trousse", "tr(?:i|ou)", "t[ée]l[ée]", "utéro", "vaso", "vi[cd]e", "vid[ée]o", "vie(?:ux|i?lles?|i?l)", "vill(?:e|eneuve|ers|ette|iers|y)", "vingt", "voitures?", "wagons?", "ultra", "à", "[ée]lectro", "[ée]qui", "Fontaine", "La Chapelle", "Marie", "Le Mesnil", "Neuville", "Pierre", "Val", "Vaux", ] _regular_exp = [ "^a[{hyphen}]sexualis[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^arginine[{hyphen}]méthyl[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^binge[{hyphen}]watch[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^black[{hyphen}]out[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^bouche[{hyphen}]por[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^burn[{hyphen}]out[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^by[{hyphen}]pass[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^ch[{elision}]tiis[{al}]+$".format(elision=ELISION, al=ALPHA_LOWER), "^chape[{hyphen}]chut[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^down[{hyphen}]load[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^[ée]tats[{hyphen}]uni[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^droits?[{hyphen}]de[{hyphen}]l'homm[{al}]+$".format( hyphen=HYPHENS, al=ALPHA_LOWER ), "^fac[{hyphen}]simil[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^fleur[{hyphen}]bleuis[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^flic[{hyphen}]flaqu[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^fox[{hyphen}]trott[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^google[{hyphen}]is[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^hard[{hyphen}]discount[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^hip[{hyphen}]hop[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^jet[{hyphen}]set[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^knock[{hyphen}]out[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^lèche[{hyphen}]bott[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^litho[{hyphen}]typographi[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^lock[{hyphen}]out[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^lombri[{hyphen}]compost[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^mac[{hyphen}]adamis[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^marque[{hyphen}]pag[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^mouton[{hyphen}]noiris[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^new[{hyphen}]york[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^pair[{hyphen}]programm[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^people[{hyphen}]is[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^plan[{hyphen}]socialis[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^premier[{hyphen}]ministr[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^prud[{elision}]hom[{al}]+$".format(elision=ELISION, al=ALPHA_LOWER), "^réarc[{hyphen}]bout[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^refox[{hyphen}]trott[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^remicro[{hyphen}]ond[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^repique[{hyphen}]niqu[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^repetit[{hyphen}]déjeun[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^rick[{hyphen}]roll[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^rond[{hyphen}]ponn[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^shift[{hyphen}]cliqu[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^soudo[{hyphen}]bras[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^stabilo[{hyphen}]boss[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^strip[{hyphen}]teas[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^terra[{hyphen}]form[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^teuf[{hyphen}]teuf[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^yo[{hyphen}]yo[{al}]+$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^zig[{hyphen}]zag[{al}]*$".format(hyphen=HYPHENS, al=ALPHA_LOWER), "^z[{elision}]yeut[{al}]+$".format(elision=ELISION, al=ALPHA_LOWER), ] # catching cases like faux-vampire _regular_exp += [ "^{prefix}[{hyphen}][{al}][{hyphen}{al}{elision}]*$".format( prefix=p, hyphen=HYPHENS, # putting the - first in the [] range avoids having to use a backslash elision=ELISION, al=ALPHA_LOWER, ) for p in _hyphen_prefix ] # catching cases like entr'abat _elision_prefix = ["r?é?entr", "grande?s?", "r"] _regular_exp += [ "^{prefix}[{elision}][{al}][{hyphen}{al}{elision}]*$".format( prefix=p, elision=ELISION, hyphen=HYPHENS, al=ALPHA_LOWER ) for p in _elision_prefix ] # catching cases like saut-de-ski, pet-en-l'air _hyphen_combination = [ "l[èe]s?", "la", "en", "des?", "d[eu]", "sur", "sous", "aux?", "à", "et", "près", "saint", ] _regular_exp += [ "^[{a}]+[{hyphen}]{hyphen_combo}[{hyphen}](?:l[{elision}])?[{a}]+$".format( hyphen_combo=hc, elision=ELISION, hyphen=HYPHENS, a=ALPHA ) for hc in _hyphen_combination ] TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc) TOKEN_MATCH = re.compile( "(?iu)" + "|".join("(?:{})".format(m) for m in _regular_exp) ).match
11,174
24.168919
118
py
spaCy
spaCy-master/spacy/lang/ga/__init__.py
from typing import Optional from thinc.api import Model from ...language import BaseDefaults, Language from .lemmatizer import IrishLemmatizer from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class IrishDefaults(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS stop_words = STOP_WORDS class Irish(Language): lang = "ga" Defaults = IrishDefaults @Irish.factory( "lemmatizer", assigns=["token.lemma"], default_config={"model": None, "mode": "pos_lookup", "overwrite": False}, default_score_weights={"lemma_acc": 1.0}, ) def make_lemmatizer( nlp: Language, model: Optional[Model], name: str, mode: str, overwrite: bool ): return IrishLemmatizer(nlp.vocab, model, name, mode=mode, overwrite=overwrite) __all__ = ["Irish"]
819
23.117647
82
py
spaCy
spaCy-master/spacy/lang/ga/lemmatizer.py
from typing import Dict, List, Tuple from ...pipeline import Lemmatizer from ...tokens import Token class IrishLemmatizer(Lemmatizer): # This is a lookup-based lemmatiser using data extracted from # BuNaMo (https://github.com/michmech/BuNaMo) @classmethod def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]: if mode == "pos_lookup": # fmt: off required = [ "lemma_lookup_adj", "lemma_lookup_adp", "lemma_lookup_noun", "lemma_lookup_verb" ] # fmt: on return (required, []) else: return super().get_lookups_config(mode) def pos_lookup_lemmatize(self, token: Token) -> List[str]: univ_pos = token.pos_ string = unponc(token.text) if univ_pos not in ["PROPN", "ADP", "ADJ", "NOUN", "VERB"]: return [string.lower()] demutated = demutate(string) secondary = "" if string[0:1].lower() == "h" and string[1:2].lower() in "aáeéiíoóuú": secondary = string[1:] lookup_pos = univ_pos.lower() if univ_pos == "PROPN": lookup_pos = "noun" if token.has_morph(): # TODO: lookup is actually required for the genitive forms, but # this is not in BuNaMo, and would not be of use with IDT. if univ_pos == "NOUN" and ( "VerbForm=Vnoun" in token.morph or "VerbForm=Inf" in token.morph ): hpref = "Form=HPref" in token.morph return [demutate(string, hpref).lower()] elif univ_pos == "ADJ" and "VerbForm=Part" in token.morph: return [demutate(string).lower()] lookup_table = self.lookups.get_table("lemma_lookup_" + lookup_pos, {}) def to_list(value): if value is None: value = [] elif not isinstance(value, list): value = [value] return value if univ_pos == "ADP": return to_list(lookup_table.get(string, string.lower())) ret = [] if univ_pos == "PROPN": ret.extend(to_list(lookup_table.get(demutated))) ret.extend(to_list(lookup_table.get(secondary))) else: ret.extend(to_list(lookup_table.get(demutated.lower()))) ret.extend(to_list(lookup_table.get(secondary.lower()))) if len(ret) == 0: ret = [string.lower()] return ret def demutate(word: str, is_hpref: bool = False) -> str: UVOWELS = "AÁEÉIÍOÓUÚ" LVOWELS = "aáeéiíoóuú" lc = word.lower() # remove eclipsis if lc.startswith("bhf"): word = word[2:] elif lc.startswith("mb"): word = word[1:] elif lc.startswith("gc"): word = word[1:] elif lc.startswith("nd"): word = word[1:] elif lc.startswith("ng"): word = word[1:] elif lc.startswith("bp"): word = word[1:] elif lc.startswith("dt"): word = word[1:] elif word[0:1] == "n" and word[1:2] in UVOWELS: word = word[1:] elif lc.startswith("n-") and word[2:3] in LVOWELS: word = word[2:] # non-standard eclipsis elif lc.startswith("bh-f"): word = word[3:] elif lc.startswith("m-b"): word = word[2:] elif lc.startswith("g-c"): word = word[2:] elif lc.startswith("n-d"): word = word[2:] elif lc.startswith("n-g"): word = word[2:] elif lc.startswith("b-p"): word = word[2:] elif lc.startswith("d-t"): word = word[2:] # t-prothesis elif lc.startswith("ts"): word = word[1:] elif lc.startswith("t-s"): word = word[2:] # h-prothesis, if known to be present elif is_hpref and word[0:1] == "h": word = word[1:] # h-prothesis, simple case # words can also begin with 'h', but unlike eclipsis, # a hyphen is not used, so that needs to be handled # elsewhere elif word[0:1] == "h" and word[1:2] in UVOWELS: word = word[1:] # lenition # this breaks the previous if, to handle super-non-standard # text where both eclipsis and lenition were used. if lc[0:1] in "bcdfgmpst" and lc[1:2] == "h": word = word[0:1] + word[2:] return word def unponc(word: str) -> str: # fmt: off PONC = { "ḃ": "bh", "ċ": "ch", "ḋ": "dh", "ḟ": "fh", "ġ": "gh", "ṁ": "mh", "ṗ": "ph", "ṡ": "sh", "ṫ": "th", "Ḃ": "BH", "Ċ": "CH", "Ḋ": "DH", "Ḟ": "FH", "Ġ": "GH", "Ṁ": "MH", "Ṗ": "PH", "Ṡ": "SH", "Ṫ": "TH" } # fmt: on buf = [] for ch in word: if ch in PONC: buf.append(PONC[ch]) else: buf.append(ch) return "".join(buf)
4,889
29
80
py
spaCy
spaCy-master/spacy/lang/ga/stop_words.py
STOP_WORDS = set( """ a ach ag agus an aon ar arna as ba beirt bhúr caoga ceathair ceathrar chomh chuig chun cois céad cúig cúigear daichead dar de deich deichniúr den dhá do don dtí dá dár dó faoi faoin faoina faoinár fara fiche gach gan go gur haon hocht i iad idir in ina ins inár is le leis lena lenár mar mo muid mé na nach naoi naonúr ná ní níor nó nócha ocht ochtar ochtó os roimh sa seacht seachtar seachtó seasca seisear siad sibh sinn sna sé sí tar thar thú triúr trí trína trínár tríocha tú um ár é éis í ó ón óna ónár """.split() )
567
11.909091
66
py
spaCy
spaCy-master/spacy/lang/ga/tokenizer_exceptions.py
from ...symbols import NORM, ORTH from ...util import update_exc from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = { "'acha'n": [{ORTH: "'ach", NORM: "gach"}, {ORTH: "a'n", NORM: "aon"}], "dem'": [{ORTH: "de", NORM: "de"}, {ORTH: "m'", NORM: "mo"}], "ded'": [{ORTH: "de", NORM: "de"}, {ORTH: "d'", NORM: "do"}], "lem'": [{ORTH: "le", NORM: "le"}, {ORTH: "m'", NORM: "mo"}], "led'": [{ORTH: "le", NORM: "le"}, {ORTH: "d'", NORM: "do"}], "théis": [{ORTH: "th", NORM: "tar"}, {ORTH: "éis", NORM: "éis"}], "tréis": [{ORTH: "tr", NORM: "tar"}, {ORTH: "éis", NORM: "éis"}], } for exc_data in [ {ORTH: "'gus", NORM: "agus"}, {ORTH: "'ach", NORM: "gach"}, {ORTH: "ao'", NORM: "aon"}, {ORTH: "'niar", NORM: "aniar"}, {ORTH: "'níos", NORM: "aníos"}, {ORTH: "'ndiu", NORM: "inniu"}, {ORTH: "'nocht", NORM: "anocht"}, {ORTH: "m'"}, {ORTH: "Aib."}, {ORTH: "Ath."}, {ORTH: "Beal."}, {ORTH: "a.C.n."}, {ORTH: "m.sh."}, {ORTH: "M.F."}, {ORTH: "M.Fómh."}, {ORTH: "D.F."}, {ORTH: "D.Fómh."}, {ORTH: "r.C."}, {ORTH: "R.C."}, {ORTH: "r.Ch."}, {ORTH: "r.Chr."}, {ORTH: "R.Ch."}, {ORTH: "R.Chr."}, {ORTH: "⁊rl."}, {ORTH: "srl."}, {ORTH: "Co."}, {ORTH: "Ean."}, {ORTH: "Feab."}, {ORTH: "gCo."}, {ORTH: ".i."}, {ORTH: "B'"}, {ORTH: "b'"}, {ORTH: "lch."}, {ORTH: "Lch."}, {ORTH: "lgh."}, {ORTH: "Lgh."}, {ORTH: "Lún."}, {ORTH: "Már."}, {ORTH: "Meith."}, {ORTH: "Noll."}, {ORTH: "Samh."}, {ORTH: "tAth."}, {ORTH: "tUas."}, {ORTH: "teo."}, {ORTH: "Teo."}, {ORTH: "Uas."}, {ORTH: "uimh."}, {ORTH: "Uimh."}, ]: _exc[exc_data[ORTH]] = [exc_data] for orth in ["d'", "D'"]: _exc[orth] = [{ORTH: orth}] TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
1,868
24.958333
74
py
spaCy
spaCy-master/spacy/lang/grc/__init__.py
from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class AncientGreekDefaults(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS prefixes = TOKENIZER_PREFIXES suffixes = TOKENIZER_SUFFIXES infixes = TOKENIZER_INFIXES lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class AncientGreek(Language): lang = "grc" Defaults = AncientGreekDefaults __all__ = ["AncientGreek"]
620
26
82
py
spaCy
spaCy-master/spacy/lang/grc/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.grc.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "ἐρᾷ μὲν ἁγνὸς οὐρανὸς τρῶσαι χθόνα, ἔρως δὲ γαῖαν λαμβάνει γάμου τυχεῖν·", "εὐδαίμων Χαρίτων καὶ Μελάνιππος ἔφυ, θείας ἁγητῆρες ἐφαμερίοις φιλότατος.", "ὃ μὲν δὴ ἀπόστολος ἐς τὴν Μίλητον ἦν.", "Θρασύβουλος δὲ σαφέως προπεπυσμένος πάντα λόγον καὶ εἰδὼς τὰ Ἀλυάττης μέλλοι ποιήσειν μηχανᾶται τοιάδε.", "φιλόπαις δ' ἦν ἐκμανῶς καὶ Ἀλέξανδρος ὁ βασιλεύς.", "Ἀντίγονος ὁ βασιλεὺς ἐπεκώμαζε τῷ Ζήνωνι", "αὐτὰρ ὃ δεύτατος ἦλθεν ἄναξ ἀνδρῶν Ἀγαμέμνων ἕλκος ἔχων", ]
650
35.166667
110
py
spaCy
spaCy-master/spacy/lang/grc/lex_attrs.py
from ...attrs import LIKE_NUM _num_words = [ # CARDINALS "εἷς", "ἑνός", "ἑνί", "ἕνα", "μία", "μιᾶς", "μιᾷ", "μίαν", "ἕν", "δύο", "δυοῖν", "τρεῖς", "τριῶν", "τρισί", "τρία", "τέτταρες", "τεττάρων", "τέτταρσι", "τέτταρα", "τέτταρας", "πέντε", "ἕξ", "ἑπτά", "ὀκτώ", "ἐννέα", "δέκα", "ἕνδεκα", "δώδεκα", "πεντεκαίδεκα", "ἑκκαίδεκα", "ἑπτακαίδεκα", "ὀκτωκαίδεκα", "ἐννεακαίδεκα", "εἴκοσι", "τριάκοντα", "τετταράκοντα", "πεντήκοντα", "ἑξήκοντα", "ἑβδομήκοντα", "ὀγδοήκοντα", "ἐνενήκοντα", "ἑκατόν", "διακόσιοι", "διακοσίων", "διακοσιᾶν", "διακοσίους", "διακοσίοις", "διακόσια", "διακόσιαι", "διακοσίαις", "διακοσίαισι", "διηκόσιοι", "διηκοσίων", "διηκοσιέων", "διακοσίας", "διηκόσια", "διηκόσιαι", "διηκοσίας", "τριακόσιοι", "τριακοσίων", "τριακοσιᾶν", "τριακοσίους", "τριακοσίοις", "τριακόσια", "τριακόσιαι", "τριακοσίαις", "τριακοσίαισι", "τριακοσιέων", "τριακοσίας", "τριηκόσια", "τριηκοσίας", "τριηκόσιοι", "τριηκοσίοισιν", "τριηκοσίους", "τριηκοσίων", "τετρακόσιοι", "τετρακοσίων", "τετρακοσιᾶν", "τετρακοσίους", "τετρακοσίοις", "τετρακόσια", "τετρακόσιαι", "τετρακοσίαις", "τετρακοσίαισι", "τετρακοσιέων", "τετρακοσίας", "πεντακόσιοι", "πεντακοσίων", "πεντακοσιᾶν", "πεντακοσίους", "πεντακοσίοις", "πεντακόσια", "πεντακόσιαι", "πεντακοσίαις", "πεντακοσίαισι", "πεντακοσιέων", "πεντακοσίας", "ἑξακόσιοι", "ἑξακοσίων", "ἑξακοσιᾶν", "ἑξακοσίους", "ἑξακοσίοις", "ἑξακόσια", "ἑξακόσιαι", "ἑξακοσίαις", "ἑξακοσίαισι", "ἑξακοσιέων", "ἑξακοσίας", "ἑπτακόσιοι", "ἑπτακοσίων", "ἑπτακοσιᾶν", "ἑπτακοσίους", "ἑπτακοσίοις", "ἑπτακόσια", "ἑπτακόσιαι", "ἑπτακοσίαις", "ἑπτακοσίαισι", "ἑπτακοσιέων", "ἑπτακοσίας", "ὀκτακόσιοι", "ὀκτακοσίων", "ὀκτακοσιᾶν", "ὀκτακοσίους", "ὀκτακοσίοις", "ὀκτακόσια", "ὀκτακόσιαι", "ὀκτακοσίαις", "ὀκτακοσίαισι", "ὀκτακοσιέων", "ὀκτακοσίας", "ἐνακόσιοι", "ἐνακοσίων", "ἐνακοσιᾶν", "ἐνακοσίους", "ἐνακοσίοις", "ἐνακόσια", "ἐνακόσιαι", "ἐνακοσίαις", "ἐνακοσίαισι", "ἐνακοσιέων", "ἐνακοσίας", "χίλιοι", "χιλίων", "χιλιῶν", "χιλίους", "χιλίοις", "χίλιαι", "χιλίας", "χιλίαις", "χίλια", "χίλι", "δισχίλιοι", "δισχιλίων", "δισχιλιῶν", "δισχιλίους", "δισχιλίοις", "δισχίλιαι", "δισχιλίας", "δισχιλίαις", "δισχίλια", "δισχίλι", "τρισχίλιοι", "τρισχιλίων", "τρισχιλιῶν", "τρισχιλίους", "τρισχιλίοις", "τρισχίλιαι", "τρισχιλίας", "τρισχιλίαις", "τρισχίλια", "τρισχίλι", "μύριοι", "μύριοί", "μυρίων", "μυρίοις", "μυρίους", "μύριαι", "μυρίαις", "μυρίας", "μύρια", "δισμύριοι", "δισμύριοί", "δισμυρίων", "δισμυρίοις", "δισμυρίους", "δισμύριαι", "δισμυρίαις", "δισμυρίας", "δισμύρια", "δεκακισμύριοι", "δεκακισμύριοί", "δεκακισμυρίων", "δεκακισμυρίοις", "δεκακισμυρίους", "δεκακισμύριαι", "δεκακισμυρίαις", "δεκακισμυρίας", "δεκακισμύρια", # ANCIENT GREEK NUMBERS (1-100) "α", "β", "γ", "δ", "ε", "ϛ", "ζ", "η", "θ", "ι", "ια", "ιβ", "ιγ", "ιδ", "ιε", "ιϛ", "ιζ", "ιη", "ιθ", "κ", "κα", "κβ", "κγ", "κδ", "κε", "κϛ", "κζ", "κη", "κθ", "λ", "λα", "λβ", "λγ", "λδ", "λε", "λϛ", "λζ", "λη", "λθ", "μ", "μα", "μβ", "μγ", "μδ", "με", "μϛ", "μζ", "μη", "μθ", "ν", "να", "νβ", "νγ", "νδ", "νε", "νϛ", "νζ", "νη", "νθ", "ξ", "ξα", "ξβ", "ξγ", "ξδ", "ξε", "ξϛ", "ξζ", "ξη", "ξθ", "ο", "οα", "οβ", "ογ", "οδ", "οε", "οϛ", "οζ", "οη", "οθ", "π", "πα", "πβ", "πγ", "πδ", "πε", "πϛ", "πζ", "πη", "πθ", "ϟ", "ϟα", "ϟβ", "ϟγ", "ϟδ", "ϟε", "ϟϛ", "ϟζ", "ϟη", "ϟθ", "ρ", ] def like_num(text): if text.lower() in _num_words: return True return False LEX_ATTRS = {LIKE_NUM: like_num}
4,587
13.611465
36
py
spaCy
spaCy-master/spacy/lang/grc/punctuation.py
from ..char_classes import ( ALPHA, ALPHA_LOWER, ALPHA_UPPER, CONCAT_QUOTES, HYPHENS, LIST_CURRENCY, LIST_ELLIPSES, LIST_ICONS, LIST_PUNCT, LIST_QUOTES, ) _prefixes = ( [ "†", "⸏", ] + LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES + LIST_CURRENCY + LIST_ICONS ) _suffixes = ( LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES + LIST_ICONS + [ "†", "⸎", r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])[\-\.⸏]", ] ) _infixes = ( LIST_ELLIPSES + LIST_ICONS + [ r"(?<=[0-9])[+\-\*^](?=[0-9-])", r"(?<=[{al}{q}])\.(?=[{au}{q}])".format( al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES ), r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA), r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS), r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA), r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])—", ] ) TOKENIZER_PREFIXES = _prefixes TOKENIZER_SUFFIXES = _suffixes TOKENIZER_INFIXES = _infixes
1,063
18
68
py
spaCy
spaCy-master/spacy/lang/grc/stop_words.py
STOP_WORDS = set( """ αὐτῷ αὐτοῦ αὐτῆς αὐτόν αὐτὸν αὐτῶν αὐτὸς αὐτὸ αὐτό αὐτός αὐτὴν αὐτοῖς αὐτοὺς αὔτ' αὐτὰ αὐτῇ αὐτὴ αὐτὼ αὑταὶ καὐτὸς αὐτά αὑτός αὐτοῖσι αὐτοῖσιν αὑτὸς αὐτήν αὐτοῖσί αὐτοί αὐτοὶ αὐτοῖο αὐτάων αὐτὰς αὐτέων αὐτώ αὐτάς αὐτούς αὐτή αὐταί αὐταὶ αὐτῇσιν τὠυτῷ τὠυτὸ ταὐτὰ ταύτῃ αὐτῇσι αὐτῇς αὐταῖς αὐτᾶς αὐτὰν ταὐτὸν γε γ' γέ γὰρ γάρ δαῖτα δαιτὸς δαιτὶ δαὶ δαιτί δαῖτ' δαΐδας δαΐδων δἰ διὰ διά δὲ δ' δέ δὴ δή εἰ εἴ κεἰ κεἴ αἴ αἲ εἲ αἰ ἐστί ἐστιν ὢν ἦν ἐστὶν ὦσιν εἶναι ὄντι εἰσιν ἐστι ὄντα οὖσαν ἦσαν ἔστι ὄντας ἐστὲ εἰσὶ εἶ ὤν ἦ οὖσαι ἔσται ἐσμὲν ἐστ' ἐστίν ἔστ' ὦ ἔσει ἦμεν εἰμι εἰσὶν ἦσθ' ἐστὶ ᾖ οὖσ' ἔστιν εἰμὶ εἴμ' ἐσθ' ᾖς στί εἴην εἶναί οὖσα κἄστ' εἴη ἦσθα εἰμ' ἔστω ὄντ' ἔσθ' ἔμμεναι ἔω ἐὼν ἐσσι ἔσσεται ἐστὸν ἔσαν ἔστων ἐόντα ἦεν ἐοῦσαν ἔην ἔσσομαι εἰσί ἐστόν ἔσκεν ἐόντ' ἐών ἔσσεσθ' εἰσ' ἐόντες ἐόντε ἐσσεῖται εἰμεν ἔασιν ἔσκε ἔμεναι ἔσεσθαι ἔῃ εἰμὲν εἰσι ἐόντας ἔστε εἰς ἦτε εἰμί ἔσσεαι ἔμμεν ἐοῦσα ἔμεν ᾖσιν ἐστε ἐόντι εἶεν ἔσσονται ἔησθα ἔσεσθε ἐσσί ἐοῦσ' ἔασι ἔα ἦα ἐόν ἔσσεσθαι ἔσομαι ἔσκον εἴης ἔωσιν εἴησαν ἐὸν ἐουσέων ἔσσῃ ἐούσης ἔσονται ἐούσας ἐόντων ἐόντος ἐσομένην ἔστωσαν ἔωσι ἔας ἐοῦσαι ἣν εἰσίν ἤστην ὄντες ὄντων οὔσας οὔσαις ὄντος οὖσι οὔσης ἔσῃ ὂν ἐσμεν ἐσμέν οὖσιν ἐσομένους ἐσσόμεσθα ἒς ἐς ἔς ἐν κεἰς εἲς κἀν ἔν κατὰ κατ' καθ' κατά κάτα κὰπ κὰκ κὰδ κὰρ κάρ κὰγ κὰμ καὶ καί μετὰ μεθ' μετ' μέτα μετά μέθ' μέτ' μὲν μέν μὴ μή μη οὐκ οὒ οὐ οὐχ οὐχὶ κοὐ κοὐχ οὔ κοὐκ οὐχί οὐκὶ οὐδὲν οὐδεὶς οὐδέν κοὐδεὶς κοὐδὲν οὐδένα οὐδενὸς οὐδέν' οὐδενός οὐδενὶ οὐδεμία οὐδείς οὐδεμίαν οὐδὲ οὐδ' κοὐδ' οὐδέ οὔτε οὔθ' οὔτέ τε οὔτ' οὕτως οὕτω οὕτῶ χοὔτως οὖν ὦν ὧν τοῦτο τοῦθ' τοῦτον τούτῳ τούτοις ταύτας αὕτη ταῦτα οὗτος ταύτης ταύτην τούτων ταῦτ' τοῦτ' τούτου αὗται τούτους τοῦτό ταῦτά τούτοισι χαὔτη ταῦθ' χοὖτοι τούτοισιν οὗτός οὗτοι τούτω τουτέων τοῦτὸν οὗτοί τοῦτου οὗτοὶ ταύτῃσι ταύταις ταυτὶ παρὰ παρ' πάρα παρά πὰρ παραὶ πάρ' περὶ πέρι περί πρὸς πρός ποτ' ποτὶ προτὶ προτί πότι σὸς σήν σὴν σὸν σόν σὰ σῶν σοῖσιν σός σῆς σῷ σαῖς σῇ σοῖς σοῦ σ' σὰν σά σὴ σὰς σᾷ σοὺς σούς σοῖσι σῇς σῇσι σή σῇσιν σοὶ σου ὑμεῖς σὲ σύ σοι ὑμᾶς ὑμῶν ὑμῖν σε σέ σὺ σέθεν σοί ὑμὶν σφῷν ὑμίν τοι τοὶ σφὼ ὔμμ' σφῶϊ σεῖο τ' σφῶϊν ὔμμιν σέο σευ σεῦ ὔμμι ὑμέων τύνη ὑμείων τοί ὔμμες σεο τέ τεοῖο ὑμέας σὺν ξὺν σύν θ' τί τι τις τινες τινα τινος τινὸς τινὶ τινῶν τίς τίνες τινὰς τιν' τῳ του τίνα τοῦ τῷ τινί τινά τίνος τινι τινας τινὰ τινων τίν' τευ τέο τινές τεο τινὲς τεῷ τέῳ τινός τεῳ τισὶ τοιαῦτα τοιοῦτον τοιοῦθ' τοιοῦτος τοιαύτην τοιαῦτ' τοιούτου τοιαῦθ' τοιαύτῃ τοιούτοις τοιαῦται τοιαῦτά τοιαύτη τοιοῦτοι τοιούτων τοιούτοισι τοιοῦτο τοιούτους τοιούτῳ τοιαύτης τοιαύταις τοιαύτας τοιοῦτός τίνι τοῖσι τίνων τέων τέοισί τὰ τῇ τώ τὼ ἀλλὰ ἀλλ' ἀλλά ἀπ' ἀπὸ κἀπ' ἀφ' τἀπὸ κἀφ' ἄπο ἀπό τὠπὸ τἀπ' ἄλλων ἄλλῳ ἄλλη ἄλλης ἄλλους ἄλλοις ἄλλον ἄλλο ἄλλου τἄλλα ἄλλα ἄλλᾳ ἄλλοισιν τἄλλ' ἄλλ' ἄλλος ἄλλοισι κἄλλ' ἄλλοι ἄλλῃσι ἄλλόν ἄλλην ἄλλά ἄλλαι ἄλλοισίν ὧλλοι ἄλλῃ ἄλλας ἀλλέων τἆλλα ἄλλως ἀλλάων ἄλλαις τἆλλ' ἂν ἄν κἂν τἂν ἃν κεν κ' κέν κέ κε χ' ἄρα τἄρα ἄρ' τἄρ' ἄρ ῥα ῥά ῥ τὰρ ἄρά ἂρ ἡμᾶς με ἐγὼ ἐμὲ μοι κἀγὼ ἡμῶν ἡμεῖς ἐμοὶ ἔγωγ' ἁμοὶ ἡμῖν μ' ἔγωγέ ἐγώ ἐμοί ἐμοῦ κἀμοῦ ἔμ' κἀμὲ ἡμὶν μου ἐμέ ἔγωγε νῷν νὼ χἠμεῖς ἁμὲ κἀγώ κἀμοὶ χἠμᾶς ἁγὼ ἡμίν κἄμ' ἔμοιγ' μοί τοὐμὲ ἄμμε ἐγὼν ἐμεῦ ἐμεῖο μευ ἔμοιγε ἄμμι μέ ἡμέας νῶϊ ἄμμιν ἧμιν ἐγών νῶΐ ἐμέθεν ἥμιν ἄμμες νῶι ἡμείων ἄμμ' ἡμέων ἐμέο ἐκ ἔκ ἐξ κἀκ κ ἃκ κἀξ ἔξ εξ Ἐκ τἀμὰ ἐμοῖς τοὐμόν ἐμᾶς τοὐμὸν ἐμῶν ἐμὸς ἐμῆς ἐμῷ τὠμῷ ἐμὸν τἄμ' ἐμὴ ἐμὰς ἐμαῖς ἐμὴν ἐμόν ἐμὰ ἐμός ἐμοὺς ἐμῇ ἐμᾷ οὑμὸς ἐμοῖν οὑμός κἀμὸν ἐμαὶ ἐμή ἐμάς ἐμοῖσι ἐμοῖσιν ἐμῇσιν ἐμῇσι ἐμῇς ἐμήν ἔνι ἐνὶ εἰνὶ εἰν ἐμ ἐπὶ ἐπ' ἔπι ἐφ' κἀπὶ τἀπὶ ἐπί ἔφ' ἔπ' ἐὰν ἢν ἐάν ἤν ἄνπερ αὑτοῖς αὑτὸν αὑτῷ ἑαυτοῦ αὑτόν αὑτῆς αὑτῶν αὑτοῦ αὑτὴν αὑτοῖν χαὐτοῦ αὑταῖς ἑωυτοῦ ἑωυτῇ ἑωυτὸν ἐωυτῷ ἑωυτῆς ἑωυτόν ἑωυτῷ ἑωυτάς ἑωυτῶν ἑωυτοὺς ἑωυτοῖσι ἑαυτῇ ἑαυτούς αὑτοὺς ἑαυτῶν ἑαυτοὺς ἑαυτὸν ἑαυτῷ ἑαυτοῖς ἑαυτὴν ἑαυτῆς ἔτι ἔτ' ἔθ' κἄτι ἢ ἤ ἠέ ἠὲ ἦε ἦέ ἡ τοὺς τὴν τὸ τῶν τὸν ὁ ἁ οἱ τοῖς ταῖς τῆς τὰς αἱ τό τὰν τᾶς τοῖσιν αἳ χὠ τήν τά τοῖν τάς ὅ χοἰ ἣ ἥ χἠ τάν τᾶν ὃ οἳ οἵ τοῖο τόν τοῖιν τούς τάων ταὶ τῇς τῇσι τῇσιν αἵ τοῖό τοῖσίν ὅττί ταί Τὴν τῆ τῶ τάδε ὅδε τοῦδε τόδε τόνδ' τάδ' τῆσδε τῷδε ὅδ' τῶνδ' τῇδ' τοῦδέ τῶνδε τόνδε τόδ' τοῦδ' τάσδε τήνδε τάσδ' τήνδ' ταῖσδέ τῇδε τῆσδ' τάνδ' τῷδ' τάνδε ἅδε τοῖσδ' ἥδ' τᾷδέ τοῖσδε τούσδ' ἥδε τούσδε τώδ' ἅδ' οἵδ' τῶνδέ οἵδε τᾷδε τοῖσδεσσι τώδε τῇδέ τοῖσιδε αἵδε τοῦδὲ τῆδ' αἵδ' τοῖσδεσι ὃν ἃ ὃς ᾧ οὗ ἅπερ οὓς ἧς οἷς ἅσπερ ᾗ ἅ χὦνπερ ὣ αἷς ᾇ ὅς ἥπερ ἃς ὅσπερ ὅνπερ ὧνπερ ᾧπερ ὅν αἷν οἷσι ἇς ἅς ὥ οὕς ἥν οἷσιν ἕης ὅου ᾗς οἷσί οἷσίν τοῖσί ᾗσιν οἵπερ αἷσπερ ὅστις ἥτις ὅτου ὅτοισι ἥντιν' ὅτῳ ὅντιν' ὅττι ἅσσά ὅτεῳ ὅτις ὅτιν' ὅτευ ἥντινα αἵτινές ὅντινα ἅσσα ᾧτινι οἵτινες ὅτι ἅτις ὅτ' ὑμὴ ὑμήν ὑμὸν ὑπὲρ ὕπερ ὑπέρτερον ὑπεὶρ ὑπέρτατος ὑπὸ ὑπ' ὑφ' ὕπο ὑπαὶ ὑπό ὕπ' ὕφ' ὣς ὡς ὥς ὧς ὥστ' ὥστε ὥσθ' ὤ ὢ """.split() )
4,757
75.741935
157
py
spaCy
spaCy-master/spacy/lang/grc/tokenizer_exceptions.py
from ...symbols import NORM, ORTH from ...util import update_exc from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} for token in ["᾽Απ'", "᾽ΑΠ'", "ἀφ'", "᾽Αφ", "ἀπὸ"]: _exc[token] = [{ORTH: token, NORM: "από"}] for token in ["᾽Αλλ'", "ἀλλ'", "ἀλλὰ"]: _exc[token] = [{ORTH: token, NORM: "ἀλλά"}] for token in ["παρ'", "Παρ'", "παρὰ", "παρ"]: _exc[token] = [{ORTH: token, NORM: "παρά"}] for token in ["καθ'", "Καθ'", "κατ'", "Κατ'", "κατὰ"]: _exc[token] = [{ORTH: token, NORM: "κατά"}] for token in ["Ἐπ'", "ἐπ'", "ἐπὶ", "Εφ'", "εφ'"]: _exc[token] = [{ORTH: token, NORM: "επί"}] for token in ["Δι'", "δι'", "διὰ"]: _exc[token] = [{ORTH: token, NORM: "διά"}] for token in ["Ὑπ'", "ὑπ'", "ὑφ'"]: _exc[token] = [{ORTH: token, NORM: "ὑπό"}] for token in ["Μετ'", "μετ'", "μεθ'", "μετὰ"]: _exc[token] = [{ORTH: token, NORM: "μετά"}] for token in ["Μ'", "μ'", "μέ", "μὲ"]: _exc[token] = [{ORTH: token, NORM: "με"}] for token in ["Σ'", "σ'", "σέ", "σὲ"]: _exc[token] = [{ORTH: token, NORM: "σε"}] for token in ["Τ'", "τ'", "τέ", "τὲ"]: _exc[token] = [{ORTH: token, NORM: "τε"}] for token in ["Δ'", "δ'", "δὲ"]: _exc[token] = [{ORTH: token, NORM: "δέ"}] _other_exc = { "μὲν": [{ORTH: "μὲν", NORM: "μέν"}], "μὴν": [{ORTH: "μὴν", NORM: "μήν"}], "τὴν": [{ORTH: "τὴν", NORM: "τήν"}], "τὸν": [{ORTH: "τὸν", NORM: "τόν"}], "καὶ": [{ORTH: "καὶ", NORM: "καί"}], "καὐτός": [{ORTH: "κ", NORM: "καί"}, {ORTH: "αὐτός"}], "καὐτὸς": [{ORTH: "κ", NORM: "καί"}, {ORTH: "αὐτὸς", NORM: "αὐτός"}], "κοὐ": [{ORTH: "κ", NORM: "καί"}, {ORTH: "οὐ"}], "χἡ": [{ORTH: "χ", NORM: "καί"}, {ORTH: "ἡ"}], "χοἱ": [{ORTH: "χ", NORM: "καί"}, {ORTH: "οἱ"}], "χἱκετεύετε": [{ORTH: "χ", NORM: "καί"}, {ORTH: "ἱκετεύετε"}], "κἀν": [{ORTH: "κ", NORM: "καί"}, {ORTH: "ἀν", NORM: "ἐν"}], "κἀγὼ": [{ORTH: "κἀ", NORM: "καί"}, {ORTH: "γὼ", NORM: "ἐγώ"}], "κἀγώ": [{ORTH: "κἀ", NORM: "καί"}, {ORTH: "γώ", NORM: "ἐγώ"}], "ἁγώ": [{ORTH: "ἁ", NORM: "ἃ"}, {ORTH: "γώ", NORM: "ἐγώ"}], "ἁγὼ": [{ORTH: "ἁ", NORM: "ἃ"}, {ORTH: "γὼ", NORM: "ἐγώ"}], "ἐγᾦδα": [{ORTH: "ἐγ", NORM: "ἐγώ"}, {ORTH: "ᾦδα", NORM: "οἶδα"}], "ἐγᾦμαι": [{ORTH: "ἐγ", NORM: "ἐγώ"}, {ORTH: "ᾦμαι", NORM: "οἶμαι"}], "κἀς": [{ORTH: "κ", NORM: "καί"}, {ORTH: "ἀς", NORM: "ἐς"}], "κᾆτα": [{ORTH: "κ", NORM: "καί"}, {ORTH: "ᾆτα", NORM: "εἶτα"}], "κεἰ": [{ORTH: "κ", NORM: "καί"}, {ORTH: "εἰ"}], "κεἰς": [{ORTH: "κ", NORM: "καί"}, {ORTH: "εἰς"}], "χὤτε": [{ORTH: "χ", NORM: "καί"}, {ORTH: "ὤτε", NORM: "ὅτε"}], "χὤπως": [{ORTH: "χ", NORM: "καί"}, {ORTH: "ὤπως", NORM: "ὅπως"}], "χὤτι": [{ORTH: "χ", NORM: "καί"}, {ORTH: "ὤτι", NORM: "ὅτι"}], "χὤταν": [{ORTH: "χ", NORM: "καί"}, {ORTH: "ὤταν", NORM: "ὅταν"}], "οὑμός": [{ORTH: "οὑ", NORM: "ὁ"}, {ORTH: "μός", NORM: "ἐμός"}], "οὑμὸς": [{ORTH: "οὑ", NORM: "ὁ"}, {ORTH: "μὸς", NORM: "ἐμός"}], "οὑμοί": [{ORTH: "οὑ", NORM: "οἱ"}, {ORTH: "μοί", NORM: "ἐμoί"}], "οὑμοὶ": [{ORTH: "οὑ", NORM: "οἱ"}, {ORTH: "μοὶ", NORM: "ἐμoί"}], "σοὔστι": [{ORTH: "σοὔ", NORM: "σοί"}, {ORTH: "στι", NORM: "ἐστι"}], "σοὐστί": [{ORTH: "σοὐ", NORM: "σοί"}, {ORTH: "στί", NORM: "ἐστί"}], "σοὐστὶ": [{ORTH: "σοὐ", NORM: "σοί"}, {ORTH: "στὶ", NORM: "ἐστί"}], "μοὖστι": [{ORTH: "μοὖ", NORM: "μοί"}, {ORTH: "στι", NORM: "ἐστι"}], "μοὔστι": [{ORTH: "μοὔ", NORM: "μοί"}, {ORTH: "στι", NORM: "ἐστι"}], "τοὔνομα": [{ORTH: "τοὔ", NORM: "τό"}, {ORTH: "νομα", NORM: "ὄνομα"}], "οὑν": [{ORTH: "οὑ", NORM: "ὁ"}, {ORTH: "ν", NORM: "ἐν"}], "ὦνερ": [{ORTH: "ὦ", NORM: "ὦ"}, {ORTH: "νερ", NORM: "ἄνερ"}], "ὦνδρες": [{ORTH: "ὦ", NORM: "ὦ"}, {ORTH: "νδρες", NORM: "ἄνδρες"}], "προὔχων": [{ORTH: "προὔ", NORM: "πρό"}, {ORTH: "χων", NORM: "ἔχων"}], "προὔχοντα": [{ORTH: "προὔ", NORM: "πρό"}, {ORTH: "χοντα", NORM: "ἔχοντα"}], "ὥνεκα": [{ORTH: "ὥ", NORM: "οὗ"}, {ORTH: "νεκα", NORM: "ἕνεκα"}], "θοἰμάτιον": [{ORTH: "θο", NORM: "τό"}, {ORTH: "ἰμάτιον"}], "ὥνεκα": [{ORTH: "ὥ", NORM: "οὗ"}, {ORTH: "νεκα", NORM: "ἕνεκα"}], "τὠληθές": [{ORTH: "τὠ", NORM: "τὸ"}, {ORTH: "ληθές", NORM: "ἀληθές"}], "θἡμέρᾳ": [{ORTH: "θ", NORM: "τῇ"}, {ORTH: "ἡμέρᾳ"}], "ἅνθρωπος": [{ORTH: "ἅ", NORM: "ὁ"}, {ORTH: "νθρωπος", NORM: "ἄνθρωπος"}], "τἄλλα": [{ORTH: "τ", NORM: "τὰ"}, {ORTH: "ἄλλα"}], "τἆλλα": [{ORTH: "τἆ", NORM: "τὰ"}, {ORTH: "λλα", NORM: "ἄλλα"}], "ἁνήρ": [{ORTH: "ἁ", NORM: "ὁ"}, {ORTH: "νήρ", NORM: "ἀνήρ"}], "ἁνὴρ": [{ORTH: "ἁ", NORM: "ὁ"}, {ORTH: "νὴρ", NORM: "ἀνήρ"}], "ἅνδρες": [{ORTH: "ἅ", NORM: "οἱ"}, {ORTH: "νδρες", NORM: "ἄνδρες"}], "ἁγαθαί": [{ORTH: "ἁ", NORM: "αἱ"}, {ORTH: "γαθαί", NORM: "ἀγαθαί"}], "ἁγαθαὶ": [{ORTH: "ἁ", NORM: "αἱ"}, {ORTH: "γαθαὶ", NORM: "ἀγαθαί"}], "ἁλήθεια": [{ORTH: "ἁ", NORM: "ἡ"}, {ORTH: "λήθεια", NORM: "ἀλήθεια"}], "τἀνδρός": [{ORTH: "τ", NORM: "τοῦ"}, {ORTH: "ἀνδρός"}], "τἀνδρὸς": [{ORTH: "τ", NORM: "τοῦ"}, {ORTH: "ἀνδρὸς", NORM: "ἀνδρός"}], "τἀνδρί": [{ORTH: "τ", NORM: "τῷ"}, {ORTH: "ἀνδρί"}], "τἀνδρὶ": [{ORTH: "τ", NORM: "τῷ"}, {ORTH: "ἀνδρὶ", NORM: "ἀνδρί"}], "αὑτός": [{ORTH: "αὑ", NORM: "ὁ"}, {ORTH: "τός", NORM: "αὐτός"}], "αὑτὸς": [{ORTH: "αὑ", NORM: "ὁ"}, {ORTH: "τὸς", NORM: "αὐτός"}], "ταὐτοῦ": [{ORTH: "τ", NORM: "τοῦ"}, {ORTH: "αὐτοῦ"}], } _exc.update(_other_exc) TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
5,395
47.178571
80
py
spaCy
spaCy-master/spacy/lang/gu/__init__.py
from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS class GujaratiDefaults(BaseDefaults): stop_words = STOP_WORDS class Gujarati(Language): lang = "gu" Defaults = GujaratiDefaults __all__ = ["Gujarati"]
251
15.8
46
py
spaCy
spaCy-master/spacy/lang/gu/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.gu.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "લોકશાહી એ સરકારનું એક એવું તંત્ર છે જ્યાં નાગરિકો મત દ્વારા સત્તાનો ઉપયોગ કરે છે.", "તે ગુજરાત રાજ્યના ધરમપુર શહેરમાં આવેલું હતું", "કર્ણદેવ પહેલો સોલંકી વંશનો રાજા હતો", "તેજપાળને બે પત્ની હતી", "ગુજરાતમાં ભારતીય જનતા પક્ષનો ઉદય આ સમયગાળા દરમિયાન થયો", "આંદોલનકારીઓએ ચીમનભાઇ પટેલના રાજીનામાની માંગણી કરી.", "અહિયાં શું જોડાય છે?", "મંદિરનો પૂર્વાભિમુખ ભાગ નાના મંડપ સાથે થોડો લંબચોરસ આકારનો છે.", ]
595
30.368421
88
py
spaCy
spaCy-master/spacy/lang/gu/stop_words.py
STOP_WORDS = set( """ એમ આ એ રહી છે છો હતા હતું હતી હોય હતો શકે તે તેના તેનું તેને તેની તેઓ તેમને તેમના તેમણે તેમનું તેમાં અને અહીં થી થઈ થાય જે ને કે ના ની નો ને નું શું માં પણ પર જેવા જેવું જાય જેમ જેથી માત્ર માટે પરથી આવ્યું એવી આવી રીતે સુધી થાય થઈ સાથે લાગે હોવા છતાં રહેલા કરી કરે કેટલા કોઈ કેમ કર્યો કર્યુ કરે સૌથી ત્યારબાદ તથા દ્વારા જુઓ જાઓ જ્યારે ત્યારે શકો નથી હવે અથવા થતો દર એટલો પરંતુ """.split() )
418
3.707865
17
py
spaCy
spaCy-master/spacy/lang/he/__init__.py
from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS class HebrewDefaults(BaseDefaults): stop_words = STOP_WORDS lex_attr_getters = LEX_ATTRS writing_system = {"direction": "rtl", "has_case": False, "has_letters": True} class Hebrew(Language): lang = "he" Defaults = HebrewDefaults __all__ = ["Hebrew"]
391
20.777778
81
py
spaCy
spaCy-master/spacy/lang/he/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.he.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "סין מקימה קרן של 440 מיליון דולר להשקעה בהייטק בישראל", 'רה"מ הודיע כי יחרים טקס בחסותו', "הכנסת צפויה לאשר איכון אוטומטי של שיחות למוקד 100", "תוכנית לאומית תהפוך את ישראל למעצמה דיגיטלית", "סע לשלום, המפתחות בפנים.", "מלצר, פעמיים טורקי!", "ואהבת לרעך כמוך.", "היום נעשה משהו בלתי נשכח.", "איפה הילד?", "מיהו נשיא צרפת?", "מהי בירת ארצות הברית?", "איך קוראים בעברית לצ'ופצ'יק של הקומקום?", "מה הייתה הדקה?", "מי אומר שלום ראשון, זה שעולה או זה שיורד?", ]
676
26.08
60
py
spaCy
spaCy-master/spacy/lang/he/lex_attrs.py
from ...attrs import LIKE_NUM _num_words = [ "אפס", "אחד", "אחת", "שתיים", "שתים", "שניים", "שנים", "שלוש", "שלושה", "ארבע", "ארבעה", "חמש", "חמישה", "שש", "שישה", "שבע", "שבעה", "שמונה", "תשע", "תשעה", "עשר", "עשרה", "אחד עשר", "אחת עשרה", "שנים עשר", "שתים עשרה", "שלושה עשר", "שלוש עשרה", "ארבעה עשר", "ארבע עשרה", "חמישה עשר", "חמש עשרה", "ששה עשר", "שש עשרה", "שבעה עשר", "שבע עשרה", "שמונה עשר", "שמונה עשרה", "תשעה עשר", "תשע עשרה", "עשרים", "שלושים", "ארבעים", "חמישים", "שישים", "שבעים", "שמונים", "תשעים", "מאה", "אלף", "מליון", "מליארד", "טריליון", ] _ordinal_words = [ "ראשון", "שני", "שלישי", "רביעי", "חמישי", "שישי", "שביעי", "שמיני", "תשיעי", "עשירי", ] def like_num(text): if text.startswith(("+", "-", "±", "~")): text = text[1:] text = text.replace(",", "").replace(".", "") if text.isdigit(): return True if text.count("/") == 1: num, denom = text.split("/") if num.isdigit() and denom.isdigit(): return True if text in _num_words: return True # Check ordinal number if text in _ordinal_words: return True return False LEX_ATTRS = {LIKE_NUM: like_num}
1,426
13.864583
49
py
spaCy
spaCy-master/spacy/lang/he/stop_words.py
STOP_WORDS = set( """ אני את אתה אנחנו אתן אתם הם הן היא הוא שלי שלו שלך שלה שלנו שלכם שלכן שלהם שלהן לי לו לה לנו לכם לכן להם להן אותה אותו זה זאת אלה אלו תחת מתחת מעל בין עם עד על אל מול של אצל כמו אחר אותו בלי לפני אחרי מאחורי עלי עליו עליה עליך עלינו עליכם עליכן עליהם עליהן כל כולם כולן כך ככה כזה כזאת זה אותי אותה אותם אותך אותו אותן אותנו ואת את אתכם אתכן איתי איתו איתך איתה איתם איתן איתנו איתכם איתכן יהיה תהיה הייתי היתה היה להיות עצמי עצמו עצמה עצמם עצמן עצמנו מי מה איפה היכן במקום שבו אם לאן למקום שבו מקום בו איזה מהיכן איך כיצד באיזו מידה מתי בשעה ש כאשר כש למרות לפני אחרי מאיזו סיבה הסיבה שבגללה למה מדוע לאיזו תכלית כי יש אין אך מנין מאין מאיפה יכל יכלה יכלו יכול יכולה יכולים יכולות יוכלו יוכל מסוגל לא רק אולי אין לאו אי כלל בעד נגד אם עם אל אלה אלו אף על מעל מתחת מצד בשביל לבין באמצע בתוך דרך מבעד באמצעות למעלה למטה מחוץ מן לעבר מכאן כאן הנה הרי פה שם אך ברם שוב אבל מבלי בלי מלבד רק בגלל מכיוון עד אשר ואילו למרות כמו כפי אז אחרי כן לכן לפיכך עז מאוד מעט מעטים במידה שוב יותר מדי גם כן נו אחר אחרת אחרים אחרות אשר או """.split() )
1,061
3.762332
17
py
spaCy
spaCy-master/spacy/lang/hi/__init__.py
from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS class HindiDefaults(BaseDefaults): stop_words = STOP_WORDS lex_attr_getters = LEX_ATTRS class Hindi(Language): lang = "hi" Defaults = HindiDefaults __all__ = ["Hindi"]
305
17
46
py
spaCy
spaCy-master/spacy/lang/hi/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.hi.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "एप्पल 1 अरब डॉलर के लिए यू.के. स्टार्टअप खरीदने पर विचार कर रहा है।", "स्वायत्त कारें निर्माताओं की ओर बीमा दायित्व रखतीं हैं।", "सैन फ्रांसिस्को फुटपाथ वितरण रोबोटों पर प्रतिबंध लगाने का विचार कर रहा है।", "लंदन यूनाइटेड किंगडम का विशाल शहर है।", "आप कहाँ हो?", "फ्रांस के राष्ट्रपति कौन हैं?", "संयुक्त राज्यों की राजधानी क्या है?", "बराक ओबामा का जन्म कब हुआ था?", "जवाहरलाल नेहरू भारत के पहले प्रधानमंत्री हैं।", "राजेंद्र प्रसाद, भारत के पहले राष्ट्रपति, दो कार्यकाल के लिए कार्यालय रखने वाले एकमात्र व्यक्ति हैं।", ]
726
33.619048
107
py
spaCy
spaCy-master/spacy/lang/hi/lex_attrs.py
from ...attrs import LIKE_NUM, NORM from ..norm_exceptions import BASE_NORMS # fmt: off _stem_suffixes = [ ["ो", "े", "ू", "ु", "ी", "ि", "ा"], ["कर", "ाओ", "िए", "ाई", "ाए", "ने", "नी", "ना", "ते", "ीं", "ती", "ता", "ाँ", "ां", "ों", "ें"], ["ाकर", "ाइए", "ाईं", "ाया", "ेगी", "ेगा", "ोगी", "ोगे", "ाने", "ाना", "ाते", "ाती", "ाता", "तीं", "ाओं", "ाएं", "ुओं", "ुएं", "ुआं"], ["ाएगी", "ाएगा", "ाओगी", "ाओगे", "एंगी", "ेंगी", "एंगे", "ेंगे", "ूंगी", "ूंगा", "ातीं", "नाओं", "नाएं", "ताओं", "ताएं", "ियाँ", "ियों", "ियां"], ["ाएंगी", "ाएंगे", "ाऊंगी", "ाऊंगा", "ाइयाँ", "ाइयों", "ाइयां"] ] # reference 1: https://en.wikipedia.org/wiki/Indian_numbering_system # reference 2: https://blogs.transparent.com/hindi/hindi-numbers-1-100/ # reference 3: https://www.mindurhindi.com/basic-words-and-phrases-in-hindi/ _one_to_ten = [ "शून्य", "एक", "दो", "तीन", "चार", "पांच", "पाँच", "छह", "सात", "आठ", "नौ", "दस", ] _eleven_to_beyond = [ "ग्यारह", "बारह", "तेरह", "चौदह", "पंद्रह", "सोलह", "सत्रह", "अठारह", "उन्नीस", "बीस", "इकीस", "इक्कीस", "बाईस", "तेइस", "चौबीस", "पच्चीस", "छब्बीस", "सताइस", "सत्ताइस", "अट्ठाइस", "उनतीस", "तीस", "इकतीस", "इकत्तीस", "बतीस", "बत्तीस", "तैंतीस", "चौंतीस", "पैंतीस", "छतीस", "छत्तीस", "सैंतीस", "अड़तीस", "उनतालीस", "उनत्तीस", "चालीस", "इकतालीस", "बयालीस", "तैतालीस", "चवालीस", "पैंतालीस", "छयालिस", "सैंतालीस", "अड़तालीस", "उनचास", "पचास", "इक्यावन", "बावन", "तिरपन", "तिरेपन", "चौवन", "चउवन", "पचपन", "छप्पन", "सतावन", "सत्तावन", "अठावन", "उनसठ", "साठ", "इकसठ", "बासठ", "तिरसठ", "तिरेसठ", "चौंसठ", "पैंसठ", "छियासठ", "सड़सठ", "अड़सठ", "उनहत्तर", "सत्तर", "इकहत्तर", "बहत्तर", "तिहत्तर", "चौहत्तर", "पचहत्तर", "छिहत्तर", "सतहत्तर", "अठहत्तर", "उन्नासी", "उन्यासी" "अस्सी", "इक्यासी", "बयासी", "तिरासी", "चौरासी", "पचासी", "छियासी", "सतासी", "अट्ठासी", "नवासी", "नब्बे", "इक्यानवे", "बानवे", "तिरानवे", "चौरानवे", "पचानवे", "छियानवे", "सतानवे", "अट्ठानवे", "निन्यानवे", "सौ", "हज़ार", "लाख", "करोड़", "अरब", "खरब", ] _num_words = _one_to_ten + _eleven_to_beyond _ordinal_words_one_to_ten = [ "प्रथम", "पहला", "द्वितीय", "दूसरा", "तृतीय", "तीसरा", "चौथा", "पांचवाँ", "छठा", "सातवाँ", "आठवाँ", "नौवाँ", "दसवाँ", ] _ordinal_suffix = "वाँ" # fmt: on def norm(string): # normalise base exceptions, e.g. punctuation or currency symbols if string in BASE_NORMS: return BASE_NORMS[string] # set stem word as norm, if available, adapted from: # http://computing.open.ac.uk/Sites/EACLSouthAsia/Papers/p6-Ramanathan.pdf # http://research.variancia.com/hindi_stemmer/ # https://github.com/taranjeet/hindi-tokenizer/blob/master/HindiTokenizer.py#L142 for suffix_group in reversed(_stem_suffixes): length = len(suffix_group[0]) if len(string) <= length: continue for suffix in suffix_group: if string.endswith(suffix): return string[:-length] return string def like_num(text): if text.startswith(("+", "-", "±", "~")): text = text[1:] text = text.replace(",", "").replace(".", "") if text.isdigit(): return True if text.count("/") == 1: num, denom = text.split("/") if num.isdigit() and denom.isdigit(): return True if text.lower() in _num_words: return True # check ordinal numbers # reference: http://www.englishkitab.com/Vocabulary/Numbers.html if text in _ordinal_words_one_to_ten: return True if text.endswith(_ordinal_suffix): if text[: -len(_ordinal_suffix)] in _eleven_to_beyond: return True return False LEX_ATTRS = {NORM: norm, LIKE_NUM: like_num}
4,095
20.671958
149
py
spaCy
spaCy-master/spacy/lang/hi/stop_words.py
# Source: https://github.com/taranjeet/hindi-tokenizer/blob/master/stopwords.txt, https://data.mendeley.com/datasets/bsr3frvvjc/1#file-a21d5092-99d7-45d8-b044-3ae9edd391c6 STOP_WORDS = set( """ अंदर अत अदि अप अपना अपनि अपनी अपने अभि अभी अंदर आदि आप अगर इंहिं इंहें इंहों इतयादि इत्यादि इन इनका इन्हीं इन्हें इन्हों इस इसका इसकि इसकी इसके इसमें इसि इसी इसे उंहिं उंहें उंहों उन उनका उनकि उनकी उनके उनको उन्हीं उन्हें उन्हों उस उसके उसि उसी उसे एक एवं एस एसे ऐसे ओर और कइ कई कर करता करते करना करने करें कहते कहा का काफि काफ़ी कि किंहें किंहों कितना किन्हें किन्हों किया किर किस किसि किसी किसे की कुछ कुल के को कोइ कोई कोन कोनसा कौन कौनसा गया घर जब जहाँ जहां जा जिंहें जिंहों जितना जिधर जिन जिन्हें जिन्हों जिस जिसे जीधर जेसा जेसे जैसा जैसे जो तक तब तरह तिंहें तिंहों तिन तिन्हें तिन्हों तिस तिसे तो था थि थी थे दबारा दवारा दिया दुसरा दुसरे दूसरे दो द्वारा न नहिं नहीं ना निचे निहायत नीचे ने पर पहले पुरा पूरा पे फिर बनि बनी बहि बही बहुत बाद बाला बिलकुल भि भितर भी भीतर मगर मानो मे में मैं मुझको मेरा यदि यह यहाँ यहां यहि यही या यिह ये रखें रवासा रहा रहे ऱ्वासा लिए लिये लेकिन व वगेरह वग़ैरह वरग वर्ग वह वहाँ वहां वहिं वहीं वाले वुह वे वग़ैरह संग सकता सकते सबसे सभि सभी साथ साबुत साभ सारा से सो संग हि ही हुअ हुआ हुइ हुई हुए हे हें है हैं हो हूँ होता होति होती होते होना होने """.split() )
1,289
4.375
171
py
spaCy
spaCy-master/spacy/lang/hr/__init__.py
from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS class CroatianDefaults(BaseDefaults): stop_words = STOP_WORDS class Croatian(Language): lang = "hr" Defaults = CroatianDefaults __all__ = ["Croatian"]
251
15.8
46
py
spaCy
spaCy-master/spacy/lang/hr/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.hr.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "Ovo je rečenica.", "Kako se popravlja auto?", "Zagreb je udaljen od Ljubljane svega 150 km.", "Nećete vjerovati što se dogodilo na ovogodišnjem festivalu!", "Budućnost Apple je upitna nakon dugotrajnog pada vrijednosti dionica firme.", "Trgovina oružjem predstavlja prijetnju za globalni mir.", ]
483
29.25
82
py
spaCy
spaCy-master/spacy/lang/hr/stop_words.py
# Source: https://github.com/stopwords-iso/stopwords-hr STOP_WORDS = set( """ a ah aha aj ako al ali arh au avaj bar baš bez bi bih bijah bijahu bijaše bijasmo bijaste bila bili bilo bio bismo biste biti brr buć budavši bude budimo budite budu budući bum bumo će ćemo ćeš ćete čijem čijim čijima ću da daj dakle de deder dem djelomice djelomično do doista dok dokle donekle dosad doskoro dotad dotle dovečer drugamo drugdje duž e eh ehe ej eno eto evo ga gdjekakav gdjekoje gic god halo hej hm hoće hoćemo hoćeš hoćete hoću hop htijahu htijasmo htijaste htio htjedoh htjedoše htjedoste htjela htjele htjeli hura i iako ih iju ijuju ikada ikakav ikakva ikakve ikakvi ikakvih ikakvim ikakvima ikakvo ikakvog ikakvoga ikakvoj ikakvom ikakvome ili im iz ja je jedna jedne jedni jedno jer jesam jesi jesmo jest jeste jesu jim joj još ju kada kako kao koja koje koji kojima koju kroz lani li me mene meni mi mimo moj moja moje moji moju mu na nad nakon nam nama nas naš naša naše našeg naši ne neće nećemo nećeš nećete neću nego neka neke neki nekog neku nema nešto netko ni nije nikoga nikoje nikoji nikoju nisam nisi nismo niste nisu njega njegov njegova njegovo njemu njezin njezina njezino njih njihov njihova njihovo njim njima njoj nju no o od odmah on ona one oni ono onu onoj onom onim onima ova ovaj ovim ovima ovoj pa pak pljus po pod podalje poimence poizdalje ponekad pored postrance potajice potrbuške pouzdano prije s sa sam samo sasvim sav se sebe sebi si šic smo ste što šta štogod štagod su sva sve svi svi svog svoj svoja svoje svoju svom svu ta tada taj tako te tebe tebi ti tim tima to toj tome tu tvoj tvoja tvoje tvoji tvoju u usprkos utaman uvijek uz uza uzagrapce uzalud uzduž valjda vam vama vas vaš vaša vaše vašim vašima već vi vjerojatno vjerovatno vrh vrlo za zaista zar zatim zato zbija zbog želeći željah željela željele željeli željelo željen željena željene željeni željenu željeo zimus zum """.split() )
1,936
4.614493
55
py
spaCy
spaCy-master/spacy/lang/hsb/__init__.py
from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class UpperSorbianDefaults(BaseDefaults): lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS tokenizer_exceptions = TOKENIZER_EXCEPTIONS class UpperSorbian(Language): lang = "hsb" Defaults = UpperSorbianDefaults __all__ = ["UpperSorbian"]
437
22.052632
54
py
spaCy
spaCy-master/spacy/lang/hsb/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.hsb.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "To běšo wjelgin raźone a jo se wót luźi derje pśiwzeło. Tak som dožywiła wjelgin", "Jogo pśewóźowarce stej groniłej, až how w serbskich stronach njama Santa Claus nic pytaś.", "A ten sobuźěłaśeŕ Statneje biblioteki w Barlinju jo pśimjeł drogotne knigły bźez rukajcowu z nagima rukoma!", "Take wobchadanje z našym kulturnym derbstwom zewšym njejźo.", "Wopśimjeśe drugich pśinoskow jo było na wusokem niwowje, ako pśecej.", ]
608
37.0625
114
py
spaCy
spaCy-master/spacy/lang/hsb/lex_attrs.py
from ...attrs import LIKE_NUM _num_words = [ "nul", "jedyn", "jedna", "jedne", "dwaj", "dwě", "tři", "třo", "štyri", "štyrjo", "pjeć", "šěsć", "sydom", "wosom", "dźewjeć", "dźesać", "jědnaće", "dwanaće", "třinaće", "štyrnaće", "pjatnaće", "šěsnaće", "sydomnaće", "wosomnaće", "dźewjatnaće", "dwaceći", "třiceći", "štyrceći", "pjećdźesat", "šěsćdźesat", "sydomdźesat", "wosomdźesat", "dźewjećdźesat", "sto", "tysac", "milion", "miliarda", "bilion", "biliarda", "trilion", "triliarda", ] _ordinal_words = [ "prěni", "prěnja", "prěnje", "druhi", "druha", "druhe", "třeći", "třeća", "třeće", "štwórty", "štwórta", "štwórte", "pjaty", "pjata", "pjate", "šěsty", "šěsta", "šěste", "sydmy", "sydma", "sydme", "wosmy", "wosma", "wosme", "dźewjaty", "dźewjata", "dźewjate", "dźesaty", "dźesata", "dźesate", "jědnaty", "jědnata", "jědnate", "dwanaty", "dwanata", "dwanate", ] def like_num(text): if text.startswith(("+", "-", "±", "~")): text = text[1:] text = text.replace(",", "").replace(".", "") if text.isdigit(): return True if text.count("/") == 1: num, denom = text.split("/") if num.isdigit() and denom.isdigit(): return True text_lower = text.lower() if text_lower in _num_words: return True # Check ordinal number if text_lower in _ordinal_words: return True return False LEX_ATTRS = {LIKE_NUM: like_num}
1,716
15.046729
49
py
spaCy
spaCy-master/spacy/lang/hsb/stop_words.py
STOP_WORDS = set( """ a abo ale ani dokelž hdyž jeli jelizo kaž pak potom tež tohodla zo zoby """.split() )
119
5
17
py
spaCy
spaCy-master/spacy/lang/hsb/tokenizer_exceptions.py
from ...symbols import NORM, ORTH from ...util import update_exc from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = dict() for exc_data in [ {ORTH: "mil.", NORM: "milion"}, {ORTH: "wob.", NORM: "wobydler"}, ]: _exc[exc_data[ORTH]] = [exc_data] for orth in [ "resp.", ]: _exc[orth] = [{ORTH: orth}] TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
386
19.368421
56
py
spaCy
spaCy-master/spacy/lang/hu/__init__.py
from ...language import BaseDefaults, Language from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKEN_MATCH, TOKENIZER_EXCEPTIONS class HungarianDefaults(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS prefixes = TOKENIZER_PREFIXES suffixes = TOKENIZER_SUFFIXES infixes = TOKENIZER_INFIXES token_match = TOKEN_MATCH stop_words = STOP_WORDS class Hungarian(Language): lang = "hu" Defaults = HungarianDefaults __all__ = ["Hungarian"]
584
25.590909
82
py
spaCy
spaCy-master/spacy/lang/hu/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.hu.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "Az Apple egy brit startup vásárlását tervezi 1 milliárd dollár értékben.", "San Francisco vezetése mérlegeli a járdát használó szállító robotok betiltását.", "London az Egyesült Királyság egy nagy városa.", ]
384
26.5
86
py
spaCy
spaCy-master/spacy/lang/hu/punctuation.py
from ..char_classes import ( ALPHA, ALPHA_LOWER, ALPHA_UPPER, CONCAT_ICONS, CONCAT_QUOTES, LIST_ELLIPSES, LIST_PUNCT, LIST_QUOTES, UNITS, ) # removing ° from the special icons to keep e.g. 99° as one token _concat_icons = CONCAT_ICONS.replace("\u00B0", "") _currency = r"\$¢£€¥฿" _quotes = CONCAT_QUOTES.replace("'", "") _units = UNITS.replace("%", "") _prefixes = ( LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES + [_concat_icons] + [r"[,.:](?=[{a}])".format(a=ALPHA)] ) _suffixes = ( [r"\+"] + LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES + [_concat_icons] + [ r"(?<=[0-9])\+", r"(?<=°[FfCcKk])\.", r"(?<=[0-9])(?:[{c}])".format(c=_currency), r"(?<=[0-9])(?:{u})".format(u=_units), r"(?<=[{al}{e}{q}(?:{c})])\.".format( al=ALPHA_LOWER, e=r"%²\-\+", q=CONCAT_QUOTES, c=_currency ), r"(?<=[{al})])-e".format(al=ALPHA_LOWER), ] ) _infixes = ( LIST_ELLIPSES + [_concat_icons] + [ r"(?<=[{al}])\.(?=[{au}])".format(al=ALPHA_LOWER, au=ALPHA_UPPER), r"(?<=[{a}])[,!?](?=[{a}])".format(a=ALPHA), r"(?<=[{a}])[:<>=](?=[{a}])".format(a=ALPHA), r"(?<=[{a}])--(?=[{a}])".format(a=ALPHA), r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA), r"(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])".format(a=ALPHA, q=_quotes), ] ) TOKENIZER_PREFIXES = _prefixes TOKENIZER_SUFFIXES = _suffixes TOKENIZER_INFIXES = _infixes
1,494
23.112903
75
py
spaCy
spaCy-master/spacy/lang/hu/stop_words.py
STOP_WORDS = set( """ a abban ahhoz ahogy ahol aki akik akkor akár alatt amely amelyek amelyekben amelyeket amelyet amelynek ami amikor amit amolyan amíg annak arra arról az azok azon azonban azt aztán azután azzal azért be belül benne bár cikk cikkek cikkeket csak de e ebben eddig egy egyes egyetlen egyik egyre egyéb egész ehhez ekkor el ellen elo eloször elott elso elég előtt emilyen ennek erre ez ezek ezen ezt ezzel ezért fel felé ha hanem hiszen hogy hogyan hát ide igen ill ill. illetve ilyen ilyenkor inkább is ismét ison itt jobban jó jól kell kellett keressünk keresztül ki kívül között közül le legalább legyen lehet lehetett lenne lenni lesz lett ma maga magát majd meg mellett mely melyek mert mi miatt mikor milyen minden mindenki mindent mindig mint mintha mit mivel miért mondta most már más másik még míg nagy nagyobb nagyon ne nekem neki nem nincs néha néhány nélkül o oda ok oket olyan ott pedig persze például rá s saját sem semmi sok sokat sokkal stb. szemben szerint szinte számára szét talán te tehát teljes ti tovább továbbá több túl ugyanis utolsó után utána vagy vagyis vagyok valaki valami valamint való van vannak vele vissza viszont volna volt voltak voltam voltunk által általában át én éppen és így ön össze úgy új újabb újra ő őket """.split() )
1,309
19.793651
77
py
spaCy
spaCy-master/spacy/lang/hu/tokenizer_exceptions.py
import re from ...symbols import ORTH from ...util import update_exc from ..punctuation import ALPHA_LOWER, CURRENCY from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} for orth in [ "-e", "A.", "AG.", "AkH.", "Aö.", "B.", "B.CS.", "B.S.", "B.Sc.", "B.ú.é.k.", "BE.", "BEK.", "BSC.", "BSc.", "BTK.", "Bat.", "Be.", "Bek.", "Bfok.", "Bk.", "Bp.", "Bros.", "Bt.", "Btk.", "Btke.", "Btét.", "C.", "CSC.", "Cal.", "Cg.", "Cgf.", "Cgt.", "Cia.", "Co.", "Colo.", "Comp.", "Copr.", "Corp.", "Cos.", "Cs.", "Csc.", "Csop.", "Cstv.", "Ctv.", "Ctvr.", "D.", "DR.", "Dipl.", "Dr.", "Dsz.", "Dzs.", "E.", "EK.", "EU.", "F.", "Fla.", "Folyt.", "Fpk.", "Főszerk.", "G.", "GK.", "GM.", "Gfv.", "Gmk.", "Gr.", "Group.", "Gt.", "Gy.", "H.", "HKsz.", "Hmvh.", "I.", "Ifj.", "Inc.", "Inform.", "Int.", "J.", "Jr.", "Jv.", "K.", "K.m.f.", "KB.", "KER.", "KFT.", "KRT.", "Kb.", "Ker.", "Kft.", "Kg.", "Kht.", "Kkt.", "Kong.", "Korm.", "Kr.", "Kr.e.", "Kr.u.", "Krt.", "L.", "LB.", "Llc.", "Ltd.", "M.", "M.A.", "M.S.", "M.SC.", "M.Sc.", "MA.", "MH.", "MSC.", "MSc.", "Mass.", "Max.", "Mlle.", "Mme.", "Mo.", "Mr.", "Mrs.", "Ms.", "Mt.", "N.", "N.N.", "NB.", "NBr.", "Nat.", "No.", "Nr.", "Ny.", "Nyh.", "Nyr.", "Nyrt.", "O.", "OJ.", "Op.", "P.", "P.H.", "P.S.", "PH.D.", "PHD.", "PROF.", "Pf.", "Ph.D", "PhD.", "Pk.", "Pl.", "Plc.", "Pp.", "Proc.", "Prof.", "Ptk.", "R.", "RT.", "Rer.", "Rt.", "S.", "S.B.", "SZOLG.", "Salg.", "Sch.", "Spa.", "St.", "Sz.", "SzRt.", "Szerk.", "Szfv.", "Szjt.", "Szolg.", "Szt.", "Sztv.", "Szvt.", "Számv.", "T.", "TEL.", "Tel.", "Ty.", "Tyr.", "U.", "Ui.", "Ut.", "V.", "VB.", "Vcs.", "Vhr.", "Vht.", "Várm.", "W.", "X.", "X.Y.", "Y.", "Z.", "Zrt.", "Zs.", "a.C.", "ac.", "adj.", "adm.", "ag.", "agit.", "alez.", "alk.", "all.", "altbgy.", "an.", "ang.", "arch.", "at.", "atc.", "aug.", "b.a.", "b.s.", "b.sc.", "bek.", "belker.", "berend.", "biz.", "bizt.", "bo.", "bp.", "br.", "bsc.", "bt.", "btk.", "ca.", "cc.", "cca.", "cf.", "cif.", "co.", "corp.", "cos.", "cs.", "csc.", "csüt.", "cső.", "ctv.", "dbj.", "dd.", "ddr.", "de.", "dec.", "dikt.", "dipl.", "dj.", "dk.", "dl.", "dny.", "dolg.", "dr.", "du.", "dzs.", "ea.", "ed.", "eff.", "egyh.", "ell.", "elv.", "elvt.", "em.", "eng.", "eny.", "et.", "etc.", "ev.", "ezr.", "eü.", "f.h.", "f.é.", "fam.", "fb.", "febr.", "fej.", "felv.", "felügy.", "ff.", "ffi.", "fhdgy.", "fil.", "fiz.", "fm.", "foglalk.", "ford.", "fp.", "fr.", "frsz.", "fszla.", "fszt.", "ft.", "fuv.", "főig.", "főisk.", "főtörm.", "főv.", "gazd.", "gimn.", "gk.", "gkv.", "gmk.", "gondn.", "gr.", "grav.", "gy.", "gyak.", "gyártm.", "gör.", "hads.", "hallg.", "hdm.", "hdp.", "hds.", "hg.", "hiv.", "hk.", "hm.", "ho.", "honv.", "hp.", "hr.", "hrsz.", "hsz.", "ht.", "htb.", "hv.", "hőm.", "i.e.", "i.sz.", "id.", "ie.", "ifj.", "ig.", "igh.", "ill.", "imp.", "inc.", "ind.", "inform.", "inic.", "int.", "io.", "ip.", "ir.", "irod.", "irod.", "isk.", "ism.", "izr.", "iá.", "jan.", "jav.", "jegyz.", "jgmk.", "jjv.", "jkv.", "jogh.", "jogt.", "jr.", "jvb.", "júl.", "jún.", "karb.", "kat.", "kath.", "kb.", "kcs.", "kd.", "ker.", "kf.", "kft.", "kht.", "kir.", "kirend.", "kisip.", "kiv.", "kk.", "kkt.", "klin.", "km.", "korm.", "kp.", "krt.", "kt.", "ktsg.", "kult.", "kv.", "kve.", "képv.", "kísérl.", "kóth.", "könyvt.", "körz.", "köv.", "közj.", "közl.", "közp.", "közt.", "kü.", "lat.", "ld.", "legs.", "lg.", "lgv.", "loc.", "lt.", "ltd.", "ltp.", "luth.", "m.a.", "m.s.", "m.sc.", "ma.", "mat.", "max.", "mb.", "med.", "megh.", "met.", "mf.", "mfszt.", "min.", "miss.", "mjr.", "mjv.", "mk.", "mlle.", "mme.", "mn.", "mozg.", "mr.", "mrs.", "ms.", "msc.", "má.", "máj.", "márc.", "mé.", "mélt.", "mü.", "műh.", "műsz.", "műv.", "művez.", "nagyker.", "nagys.", "nat.", "nb.", "neg.", "nk.", "no.", "nov.", "nu.", "ny.", "nyilv.", "nyrt.", "nyug.", "obj.", "okl.", "okt.", "old.", "olv.", "orsz.", "ort.", "ov.", "ovh.", "pf.", "pg.", "ph.d", "ph.d.", "phd.", "phil.", "pjt.", "pk.", "pl.", "plb.", "plc.", "pld.", "plur.", "pol.", "polg.", "poz.", "pp.", "proc.", "prof.", "prot.", "pság.", "ptk.", "pu.", "pü.", "r.k.", "rac.", "rad.", "red.", "ref.", "reg.", "rer.", "rev.", "rf.", "rkp.", "rkt.", "rt.", "rtg.", "röv.", "s.b.", "s.k.", "sa.", "sb.", "sel.", "sgt.", "sm.", "st.", "stat.", "stb.", "strat.", "stud.", "sz.", "szakm.", "szaksz.", "szakszerv.", "szd.", "szds.", "szept.", "szerk.", "szf.", "szimf.", "szjt.", "szkv.", "szla.", "szn.", "szolg.", "szt.", "szubj.", "szöv.", "szül.", "tanm.", "tb.", "tbk.", "tc.", "techn.", "tek.", "tel.", "tf.", "tgk.", "ti.", "tip.", "tisztv.", "titks.", "tk.", "tkp.", "tny.", "tp.", "tszf.", "tszk.", "tszkv.", "tv.", "tvr.", "ty.", "törv.", "tü.", "ua.", "ui.", "unit.", "uo.", "uv.", "vas.", "vb.", "vegy.", "vh.", "vhol.", "vhr.", "vill.", "vizsg.", "vk.", "vkf.", "vkny.", "vm.", "vol.", "vs.", "vsz.", "vv.", "vál.", "várm.", "vízv.", "vö.", "zrt.", "zs.", "Á.", "Áe.", "Áht.", "É.", "Épt.", "Ész.", "Új-Z.", "ÚjZ.", "Ún.", "á.", "ált.", "ápr.", "ásv.", "é.", "ék.", "ény.", "érk.", "évf.", "í.", "ó.", "össz.", "ötk.", "özv.", "ú.", "ú.n.", "úm.", "ún.", "út.", "üag.", "üd.", "üdv.", "üe.", "ümk.", "ütk.", "üv.", "ű.", "őrgy.", "őrpk.", "őrv.", ]: _exc[orth] = [{ORTH: orth}] _ord_num_or_date = r"([A-Z0-9]+[./-])*(\d+\.?)" _num = r"[+\-]?\d+([,.]\d+)*" _ops = r"[=<>+\-\*/^()÷%²]" _suffixes = r"-[{al}]+".format(al=ALPHA_LOWER) _numeric_exp = r"({n})(({o})({n}))*[%]?".format(n=_num, o=_ops) _time_exp = r"\d+(:\d+)*(\.\d+)?" _nums = r"(({ne})|({t})|({on})|({c}))({s})?".format( ne=_numeric_exp, t=_time_exp, on=_ord_num_or_date, c=CURRENCY, s=_suffixes ) for u in "cfkCFK": _exc[f"°{u}"] = [{ORTH: f"°{u}"}] _exc[f"°{u}."] = [{ORTH: f"°{u}"}, {ORTH: "."}] TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc) TOKEN_MATCH = re.compile(r"^{n}$".format(n=_nums)).match
8,299
11.671756
78
py
spaCy
spaCy-master/spacy/lang/hy/__init__.py
from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS class ArmenianDefaults(BaseDefaults): lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class Armenian(Language): lang = "hy" Defaults = ArmenianDefaults __all__ = ["Armenian"]
317
17.705882
46
py
spaCy
spaCy-master/spacy/lang/hy/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.hy.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "Լոնդոնը Միացյալ Թագավորության մեծ քաղաք է։", "Ո՞վ է Ֆրանսիայի նախագահը։", "Ո՞րն է Միացյալ Նահանգների մայրաքաղաքը։", "Ե՞րբ է ծնվել Բարաք Օբաման։", ]
326
22.357143
56
py
spaCy
spaCy-master/spacy/lang/hy/lex_attrs.py
from ...attrs import LIKE_NUM _num_words = [ "զրո", "մեկ", "երկու", "երեք", "չորս", "հինգ", "վեց", "յոթ", "ութ", "ինը", "տասը", "տասնմեկ", "տասներկու", "տասներեք", "տասնչորս", "տասնհինգ", "տասնվեց", "տասնյոթ", "տասնութ", "տասնինը", "քսան", "երեսուն", "քառասուն", "հիսուն", "վաթսուն", "յոթանասուն", "ութսուն", "իննսուն", "հարյուր", "հազար", "միլիոն", "միլիարդ", "տրիլիոն", "քվինտիլիոն", ] def like_num(text): if text.startswith(("+", "-", "±", "~")): text = text[1:] text = text.replace(",", "").replace(".", "") if text.isdigit(): return True if text.count("/") == 1: num, denom = text.split("/") if num.isdigit() and denom.isdigit(): return True if text.lower() in _num_words: return True return False LEX_ATTRS = {LIKE_NUM: like_num}
953
15.736842
49
py
spaCy
spaCy-master/spacy/lang/hy/stop_words.py
STOP_WORDS = set( """ նա ողջը այստեղ ենք նա էիր որպես ուրիշ բոլորը այն այլ նույնչափ էի մի և ողջ ես ոմն հետ նրանք ամենքը ըստ ինչ-ինչ այսպես համայն մի նաև նույնքան դա ովևէ համար այնտեղ էին որոնք սույն ինչ-որ ամենը նույնպիսի ու իր որոշ միևնույն ի այնպիսի մենք ամեն ոք նույն երբևէ այն որևէ ին այդպես նրա որը վրա դու էինք այդպիսի էիք յուրաքանչյուրը եմ պիտի այդ ամբողջը հետո եք ամեն այլ կամ այսքան որ այնպես այսինչ բոլոր է մեկնումեկը այդչափ այնքան ամբողջ երբևիցե այնչափ ամենայն մյուս այնինչ իսկ այդտեղ այս սա են ամեն ինչ որևիցե ում մեկը այդ դուք այսչափ այդքան այսպիսի էր յուրաքանչյուր այս մեջ թ """.split() )
607
4.62963
17
py
spaCy
spaCy-master/spacy/lang/id/__init__.py
from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class IndonesianDefaults(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS prefixes = TOKENIZER_PREFIXES suffixes = TOKENIZER_SUFFIXES infixes = TOKENIZER_INFIXES syntax_iterators = SYNTAX_ITERATORS lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class Indonesian(Language): lang = "id" Defaults = IndonesianDefaults __all__ = ["Indonesian"]
698
26.96
82
py
spaCy
spaCy-master/spacy/lang/id/_tokenizer_exceptions_list.py
ID_BASE_EXCEPTIONS = set( """ aba-aba abah-abah abal-abal abang-abang abar-abar abong-abong abrit-abrit abrit-abritan abu-abu abuh-abuhan abuk-abuk abun-abun acak-acak acak-acakan acang-acang acap-acap aci-aci aci-acian aci-acinya aco-acoan ad-blocker ad-interim ada-ada ada-adanya ada-adanyakah adang-adang adap-adapan add-on add-ons adik-adik adik-beradik aduk-adukan after-sales agak-agak agak-agih agama-agama agar-agar age-related agut-agut air-air air-cooled air-to-air ajak-ajak ajar-ajar aji-aji akal-akal akal-akalan akan-akan akar-akar akar-akaran akhir-akhir akhir-akhirnya aki-aki aksi-aksi alah-mengalahi alai-belai alan-alan alang-alang alang-alangan alap-alap alat-alat ali-ali alif-alifan alih-alih aling-aling aling-alingan alip-alipan all-electric all-in-one all-out all-time alon-alon alt-right alt-text alu-alu alu-aluan alun-alun alur-alur alur-aluran always-on amai-amai amatir-amatiran ambah-ambah ambai-ambai ambil-mengambil ambreng-ambrengan ambring-ambringan ambu-ambu ambung-ambung amin-amin amit-amit ampai-ampai amprung-amprungan amung-amung anai-anai anak-anak anak-anakan anak-beranak anak-cucu anak-istri ancak-ancak ancang-ancang ancar-ancar andang-andang andeng-andeng aneh-aneh angan-angan anggar-anggar anggaran-red anggota-anggota anggung-anggip angin-angin angin-anginan angkal-angkal angkul-angkul angkup-angkup angkut-angkut ani-ani aning-aning anjang-anjang anjing-anjing anjung-anjung anjung-anjungan antah-berantah antar-antar antar-mengantar ante-mortem antek-antek anter-anter antihuru-hara anting-anting antung-antung anyam-menganyam anyang-anyang apa-apa apa-apaan apel-apel api-api apit-apit aplikasi-aplikasi apotek-apotek aprit-apritan apu-apu apung-apung arah-arah arak-arak arak-arakan aram-aram arek-arek arem-arem ari-ari artis-artis aru-aru arung-arungan asa-asaan asal-asalan asal-muasal asal-usul asam-asaman asas-asas aset-aset asmaul-husna asosiasi-asosiasi asuh-asuh asyik-asyiknya atas-mengatasi ati-ati atung-atung aturan-aturan audio-video audio-visual auto-brightness auto-complete auto-focus auto-play auto-update avant-garde awan-awan awan-berawan awang-awang awang-gemawang awar-awar awat-awat awik-awik awut-awutan ayah-anak ayak-ayak ayam-ayam ayam-ayaman ayang-ayang ayat-ayat ayeng-ayengan ayun-temayun ayut-ayutan ba-bi-bu back-to-back back-up badan-badan bade-bade badut-badut bagi-bagi bahan-bahan bahu-membahu baik-baik bail-out bajang-bajang baji-baji balai-balai balam-balam balas-berbalas balas-membalas bale-bale baling-baling ball-playing balon-balon balut-balut band-band bandara-bandara bangsa-bangsa bangun-bangun bangunan-bangunan bank-bank bantah-bantah bantahan-bantahan bantal-bantal banyak-banyak bapak-anak bapak-bapak bapak-ibu bapak-ibunya barang-barang barat-barat barat-daya barat-laut barau-barau bare-bare bareng-bareng bari-bari barik-barik baris-berbaris baru-baru baru-batu barung-barung basa-basi bata-bata batalyon-batalyon batang-batang batas-batas batir-batir batu-batu batuk-batuk batung-batung bau-bauan bawa-bawa bayan-bayan bayang-bayang bayi-bayi bea-cukai bedeng-bedeng bedil-bedal bedil-bedilan begana-begini bek-bek bekal-bekalan bekerdom-kerdom bekertak-kertak belang-belang belat-belit beliau-beliau belu-belai belum-belum benar-benar benda-benda bengang-bengut benggal-benggil bengkal-bengkil bengkang-bengkok bengkang-bengkong bengkang-bengkung benteng-benteng bentuk-bentuk benua-benua ber-selfie berabad-abad berabun-rabun beracah-acah berada-ada beradik-berkakak beragah-agah beragak-agak beragam-ragam beraja-raja berakit-rakit beraku-akuan beralu-aluan beralun-alun beramah-ramah beramah-ramahan beramah-tamah beramai-ramai berambai-ambai berambal-ambalan berambil-ambil beramuk-amuk beramuk-amukan berandai-andai berandai-randai beraneh-aneh berang-berang berangan-angan beranggap-anggapan berangguk-angguk berangin-angin berangka-angka berangka-angkaan berangkai-rangkai berangkap-rangkapan berani-berani beranja-anja berantai-rantai berapi-api berapung-apung berarak-arakan beras-beras berasak-asak berasak-asakan berasap-asap berasing-asingan beratus-ratus berawa-rawa berawas-awas berayal-ayalan berayun-ayun berbagai-bagai berbahas-bahasan berbahasa-bahasa berbaik-baikan berbait-bait berbala-bala berbalas-balasan berbalik-balik berbalun-balun berbanjar-banjar berbantah-bantah berbanyak-banyak berbarik-barik berbasa-basi berbasah-basah berbatu-batu berbayang-bayang berbecak-becak berbeda-beda berbedil-bedilan berbega-bega berbeka-beka berbelah-belah berbelakang-belakangan berbelang-belang berbelau-belauan berbeli-beli berbeli-belian berbelit-belit berbelok-belok berbenang-benang berbenar-benar berbencah-bencah berbencol-bencol berbenggil-benggil berbentol-bentol berbentong-bentong berberani-berani berbesar-besar berbidai-bidai berbiduk-biduk berbiku-biku berbilik-bilik berbinar-binar berbincang-bincang berbingkah-bingkah berbintang-bintang berbintik-bintik berbintil-bintil berbisik-bisik berbolak-balik berbolong-bolong berbondong-bondong berbongkah-bongkah berbuai-buai berbual-bual berbudak-budak berbukit-bukit berbulan-bulan berbunga-bunga berbuntut-buntut berbunuh-bunuhan berburu-buru berburuk-buruk berbutir-butir bercabang-cabang bercaci-cacian bercakap-cakap bercakar-cakaran bercamping-camping bercantik-cantik bercari-cari bercari-carian bercarik-carik bercarut-carut bercebar-cebur bercepat-cepat bercerai-berai bercerai-cerai bercetai-cetai berciap-ciap bercikun-cikun bercinta-cintaan bercita-cita berciut-ciut bercompang-camping berconteng-conteng bercoreng-coreng bercoreng-moreng bercuang-caing bercuit-cuit bercumbu-cumbu bercumbu-cumbuan bercura-bura bercura-cura berdada-dadaan berdahulu-dahuluan berdalam-dalam berdalih-dalih berdampung-dampung berdebar-debar berdecak-decak berdecap-decap berdecup-decup berdecut-decut berdedai-dedai berdegap-degap berdegar-degar berdeham-deham berdekah-dekah berdekak-dekak berdekap-dekapan berdekat-dekat berdelat-delat berdembai-dembai berdembun-dembun berdempang-dempang berdempet-dempet berdencing-dencing berdendam-dendaman berdengkang-dengkang berdengut-dengut berdentang-dentang berdentum-dentum berdentung-dentung berdenyar-denyar berdenyut-denyut berdepak-depak berdepan-depan berderai-derai berderak-derak berderam-deram berderau-derau berderik-derik berdering-dering berderung-derung berderus-derus berdesak-desakan berdesik-desik berdesing-desing berdesus-desus berdikit-dikit berdingkit-dingkit berdua-dua berduri-duri berduru-duru berduyun-duyun berebut-rebut berebut-rebutan beregang-regang berek-berek berembut-rembut berempat-empat berenak-enak berencel-encel bereng-bereng berenggan-enggan berenteng-renteng beresa-esaan beresah-resah berfoya-foya bergagah-gagahan bergagap-gagap bergagau-gagau bergalur-galur berganda-ganda berganjur-ganjur berganti-ganti bergarah-garah bergaruk-garuk bergaya-gaya bergegas-gegas bergelang-gelang bergelap-gelap bergelas-gelasan bergeleng-geleng bergemal-gemal bergembar-gembor bergembut-gembut bergepok-gepok bergerek-gerek bergesa-gesa bergilir-gilir bergolak-golak bergolek-golek bergolong-golong bergores-gores bergotong-royong bergoyang-goyang bergugus-gugus bergulung-gulung bergulut-gulut bergumpal-gumpal bergunduk-gunduk bergunung-gunung berhadap-hadapan berhamun-hamun berhandai-handai berhanyut-hanyut berhari-hari berhati-hati berhati-hatilah berhektare-hektare berhilau-hilau berhormat-hormat berhujan-hujan berhura-hura beri-beri beri-memberi beria-ia beria-ria beriak-riak beriba-iba beribu-ribu berigi-rigi berimpit-impit berindap-indap bering-bering beringat-ingat beringgit-ringgit berintik-rintik beriring-iring beriring-iringan berita-berita berjabir-jabir berjaga-jaga berjagung-jagung berjalan-jalan berjalar-jalar berjalin-jalin berjalur-jalur berjam-jam berjari-jari berjauh-jauhan berjegal-jegalan berjejal-jejal berjela-jela berjengkek-jengkek berjenis-jenis berjenjang-jenjang berjilid-jilid berjinak-jinak berjingkat-jingkat berjingkik-jingkik berjingkrak-jingkrak berjongkok-jongkok berjubel-jubel berjujut-jujutan berjulai-julai berjumbai-jumbai berjumbul-jumbul berjuntai-juntai berjurai-jurai berjurus-jurus berjuta-juta berka-li-kali berkabu-kabu berkaca-kaca berkaing-kaing berkait-kaitan berkala-kala berkali-kali berkamit-kamit berkanjar-kanjar berkaok-kaok berkarung-karung berkasak-kusuk berkasih-kasihan berkata-kata berkatak-katak berkecai-kecai berkecek-kecek berkecil-kecil berkecil-kecilan berkedip-kedip berkejang-kejang berkejap-kejap berkejar-kejaran berkelar-kelar berkelepai-kelepai berkelip-kelip berkelit-kelit berkelok-kelok berkelompok-kelompok berkelun-kelun berkembur-kembur berkempul-kempul berkena-kenaan berkenal-kenalan berkendur-kendur berkeok-keok berkepak-kepak berkepal-kepal berkeping-keping berkepul-kepul berkeras-kerasan berkering-kering berkeritik-keritik berkeruit-keruit berkerut-kerut berketai-ketai berketak-ketak berketak-ketik berketap-ketap berketap-ketip berketar-ketar berketi-keti berketil-ketil berketuk-ketak berketul-ketul berkial-kial berkian-kian berkias-kias berkias-kiasan berkibar-kibar berkilah-kilah berkilap-kilap berkilat-kilat berkilau-kilauan berkilo-kilo berkimbang-kimbang berkinja-kinja berkipas-kipas berkira-kira berkirim-kiriman berkisar-kisar berkoak-koak berkoar-koar berkobar-kobar berkobok-kobok berkocak-kocak berkodi-kodi berkolek-kolek berkomat-kamit berkopah-kopah berkoper-koper berkotak-kotak berkuat-kuat berkuat-kuatan berkumur-kumur berkunang-kunang berkunar-kunar berkunjung-kunjungan berkurik-kurik berkurun-kurun berkusau-kusau berkusu-kusu berkusut-kusut berkuting-kuting berkutu-kutuan berlabun-labun berlain-lainan berlaju-laju berlalai-lalai berlama-lama berlambai-lambai berlambak-lambak berlampang-lampang berlanggar-langgar berlapang-lapang berlapis-lapis berlapuk-lapuk berlarah-larah berlarat-larat berlari-lari berlari-larian berlarih-larih berlarik-larik berlarut-larut berlawak-lawak berlayap-layapan berlebih-lebih berlebih-lebihan berleha-leha berlekas-lekas berlekas-lekasan berlekat-lekat berlekuk-lekuk berlempar-lemparan berlena-lena berlengah-lengah berlenggak-lenggok berlenggek-lenggek berlenggok-lenggok berleret-leret berletih-letih berliang-liuk berlibat-libat berligar-ligar berliku-liku berlikur-likur berlimbak-limbak berlimpah-limpah berlimpap-limpap berlimpit-limpit berlinang-linang berlindak-lindak berlipat-lipat berlomba-lomba berlompok-lompok berloncat-loncatan berlopak-lopak berlubang-lubang berlusin-lusin bermaaf-maafan bermabuk-mabukan bermacam-macam bermain-main bermalam-malam bermalas-malas bermalas-malasan bermanik-manik bermanis-manis bermanja-manja bermasak-masak bermati-mati bermegah-megah bermemek-memek bermenung-menung bermesra-mesraan bermewah-mewah bermewah-mewahan berminggu-minggu berminta-minta berminyak-minyak bermuda-muda bermudah-mudah bermuka-muka bermula-mula bermuluk-muluk bermulut-mulut bernafsi-nafsi bernaka-naka bernala-nala bernanti-nanti berniat-niat bernyala-nyala berogak-ogak beroleng-oleng berolok-olok beromong-omong beroncet-roncet beronggok-onggok berorang-orang beroyal-royal berpada-pada berpadu-padu berpahit-pahit berpair-pair berpal-pal berpalu-palu berpalu-paluan berpalun-palun berpanas-panas berpandai-pandai berpandang-pandangan berpangkat-pangkat berpanjang-panjang berpantun-pantun berpasang-pasang berpasang-pasangan berpasuk-pasuk berpayah-payah berpeluh-peluh berpeluk-pelukan berpenat-penat berpencar-pencar berpendar-pendar berpenggal-penggal berperai-perai berperang-perangan berpesai-pesai berpesta-pesta berpesuk-pesuk berpetak-petak berpeti-peti berpihak-pihak berpijar-pijar berpikir-pikir berpikul-pikul berpilih-pilih berpilin-pilin berpindah-pindah berpintal-pintal berpirau-pirau berpisah-pisah berpolah-polah berpolok-polok berpongah-pongah berpontang-panting berporah-porah berpotong-potong berpotong-potongan berpuak-puak berpual-pual berpugak-pugak berpuing-puing berpukas-pukas berpuluh-puluh berpulun-pulun berpuntal-puntal berpura-pura berpusar-pusar berpusing-pusing berpusu-pusu berputar-putar berrumpun-rumpun bersaf-saf bersahut-sahutan bersakit-sakit bersalah-salahan bersalam-salaman bersalin-salin bersalip-salipan bersama-sama bersambar-sambaran bersambut-sambutan bersampan-sampan bersantai-santai bersapa-sapaan bersarang-sarang bersedan-sedan bersedia-sedia bersedu-sedu bersejuk-sejuk bersekat-sekat berselang-selang berselang-seli berselang-seling berselang-tenggang berselit-selit berseluk-beluk bersembunyi-sembunyi bersembunyi-sembunyian bersembur-semburan bersempit-sempit bersenang-senang bersenang-senangkan bersenda-senda bersendi-sendi bersenggang-senggang bersenggau-senggau bersepah-sepah bersepak-sepakan bersepi-sepi berserak-serak berseri-seri berseru-seru bersesak-sesak bersetai-setai bersia-sia bersiap-siap bersiar-siar bersih-bersih bersikut-sikutan bersilir-silir bersimbur-simburan bersinau-sinau bersopan-sopan bersorak-sorai bersuap-suapan bersudah-sudah bersuka-suka bersuka-sukaan bersuku-suku bersulang-sulang bersumpah-sumpahan bersungguh-sungguh bersungut-sungut bersunyi-sunyi bersuruk-surukan bersusah-susah bersusuk-susuk bersusuk-susukan bersutan-sutan bertabur-tabur bertahan-tahan bertahu-tahu bertahun-tahun bertajuk-tajuk bertakik-takik bertala-tala bertalah-talah bertali-tali bertalu-talu bertalun-talun bertambah-tambah bertanda-tandaan bertangis-tangisan bertangkil-tangkil bertanya-tanya bertarik-tarikan bertatai-tatai bertatap-tatapan bertatih-tatih bertawan-tawan bertawar-tawaran bertebu-tebu bertebu-tebukan berteguh-teguh berteguh-teguhan berteka-teki bertelang-telang bertelau-telau bertele-tele bertembuk-tembuk bertempat-tempat bertempuh-tempuh bertenang-tenang bertenggang-tenggangan bertentu-tentu bertepek-tepek berterang-terang berterang-terangan berteriak-teriak bertikam-tikaman bertimbal-timbalan bertimbun-timbun bertimpa-timpa bertimpas-timpas bertingkah-tingkah bertingkat-tingkat bertinjau-tinjauan bertiras-tiras bertitar-titar bertitik-titik bertoboh-toboh bertolak-tolak bertolak-tolakan bertolong-tolongan bertonjol-tonjol bertruk-truk bertua-tua bertua-tuaan bertual-tual bertubi-tubi bertukar-tukar bertukar-tukaran bertukas-tukas bertumpak-tumpak bertumpang-tindih bertumpuk-tumpuk bertunda-tunda bertunjuk-tunjukan bertura-tura berturut-turut bertutur-tutur beruas-ruas berubah-ubah berulang-alik berulang-ulang berumbai-rumbai berundak-undak berundan-undan berundung-undung berunggas-runggas berunggun-unggun berunggut-unggut berungkur-ungkuran beruntai-untai beruntun-runtun beruntung-untung berunyai-unyai berupa-rupa berura-ura beruris-uris berurut-urutan berwarna-warna berwarna-warni berwindu-windu berwiru-wiru beryang-yang besar-besar besar-besaran betak-betak beti-beti betik-betik betul-betul biang-biang biar-biar biaya-biaya bicu-bicu bidadari-bidadari bidang-bidang bijak-bijaklah biji-bijian bila-bila bilang-bilang bincang-bincang bincang-bincut bingkah-bingkah bini-binian bintang-bintang bintik-bintik bio-oil biri-biri biru-biru biru-hitam biru-kuning bisik-bisik biti-biti blak-blakan blok-blok bocah-bocah bohong-bohong bohong-bohongan bola-bola bolak-balik bolang-baling boleh-boleh bom-bom bomber-bomber bonek-bonek bongkar-bangkir bongkar-membongkar bongkar-pasang boro-boro bos-bos bottom-up box-to-box boyo-boyo buah-buahan buang-buang buat-buatan buaya-buaya bubun-bubun bugi-bugi build-up built-in built-up buka-buka buka-bukaan buka-tutup bukan-bukan bukti-bukti buku-buku bulan-bulan bulan-bulanan bulang-baling bulang-bulang bulat-bulat buli-buli bulu-bulu buluh-buluh bulus-bulus bunga-bunga bunga-bungaan bunuh-membunuh bunyi-bunyian bupati-bupati bupati-wakil buru-buru burung-burung burung-burungan bus-bus business-to-business busur-busur butir-butir by-pass bye-bye cabang-cabang cabik-cabik cabik-mencabik cabup-cawabup caci-maki cagub-cawagub caing-caing cakar-mencakar cakup-mencakup calak-calak calar-balar caleg-caleg calo-calo calon-calon campang-camping campur-campur capres-cawapres cara-cara cari-cari cari-carian carut-marut catch-up cawali-cawawali cawe-cawe cawi-cawi cebar-cebur celah-celah celam-celum celangak-celinguk celas-celus celedang-celedok celengkak-celengkok celingak-celinguk celung-celung cemas-cemas cenal-cenil cengar-cengir cengir-cengir cengis-cengis cengking-mengking centang-perenang cepat-cepat ceplas-ceplos cerai-berai cerita-cerita ceruk-menceruk ceruk-meruk cetak-biru cetak-mencetak cetar-ceter check-in check-ins check-up chit-chat choki-choki cingak-cinguk cipika-cipiki ciri-ciri ciri-cirinya cirit-birit cita-cita cita-citaku close-up closed-circuit coba-coba cobak-cabik cobar-cabir cola-cala colang-caling comat-comot comot-comot compang-camping computer-aided computer-generated condong-mondong congak-cangit conggah-canggih congkah-cangkih congkah-mangkih copak-capik copy-paste corak-carik corat-coret coreng-moreng coret-coret crat-crit cross-border cross-dressing crypto-ransomware cuang-caing cublak-cublak cubung-cubung culik-culik cuma-cuma cumi-cumi cungap-cangip cupu-cupu dabu-dabu daerah-daerah dag-dag dag-dig-dug daging-dagingan dahulu-mendahului dalam-dalam dali-dali dam-dam danau-danau dansa-dansi dapil-dapil dapur-dapur dari-dari daru-daru dasar-dasar datang-datang datang-mendatangi daun-daun daun-daunan dawai-dawai dayang-dayang dayung-mayung debak-debuk debu-debu deca-core decision-making deep-lying deg-degan degap-degap dekak-dekak dekat-dekat dengar-dengaran dengking-mendengking departemen-departemen depo-depo deputi-deputi desa-desa desa-kota desas-desus detik-detik dewa-dewa dewa-dewi dewan-dewan dewi-dewi dial-up diam-diam dibayang-bayangi dibuat-buat diiming-imingi dilebih-lebihkan dimana-mana dimata-matai dinas-dinas dinul-Islam diobok-obok diolok-olok direksi-direksi direktorat-direktorat dirjen-dirjen dirut-dirut ditunggu-tunggu divisi-divisi do-it-yourself doa-doa dog-dog doggy-style dokok-dokok dolak-dalik dor-doran dorong-mendorong dosa-dosa dress-up drive-in dua-dua dua-duaan dua-duanya dubes-dubes duduk-duduk dugaan-dugaan dulang-dulang duri-duri duta-duta dwi-kewarganegaraan e-arena e-billing e-budgeting e-cctv e-class e-commerce e-counting e-elektronik e-entertainment e-evolution e-faktur e-filing e-fin e-form e-government e-govt e-hakcipta e-id e-info e-katalog e-ktp e-leadership e-lhkpn e-library e-loket e-m1 e-money e-news e-nisn e-npwp e-paspor e-paten e-pay e-perda e-perizinan e-planning e-polisi e-power e-punten e-retribusi e-samsat e-sport e-store e-tax e-ticketing e-tilang e-toll e-visa e-voting e-wallet e-warong ecek-ecek eco-friendly eco-park edan-edanan editor-editor editor-in-chief efek-efek ekonomi-ekonomi eksekutif-legislatif ekspor-impor elang-elang elemen-elemen emak-emak embuh-embuhan empat-empat empek-empek empet-empetan empok-empok empot-empotan enak-enak encal-encal end-to-end end-user endap-endap endut-endut endut-endutan engah-engah engap-engap enggan-enggan engkah-engkah engket-engket entah-berentah enten-enten entry-level equity-linked erang-erot erat-erat erek-erek ereng-ereng erong-erong esek-esek ex-officio exchange-traded exercise-induced extra-time face-down face-to-face fair-play fakta-fakta faktor-faktor fakultas-fakultas fase-fase fast-food feed-in fifty-fifty file-file first-leg first-team fitur-fitur fitur-fiturnya fixed-income flip-flop flip-plop fly-in follow-up foto-foto foya-foya fraksi-fraksi free-to-play front-end fungsi-fungsi gaba-gaba gabai-gabai gada-gada gading-gading gadis-gadis gado-gado gail-gail gajah-gajah gajah-gajahan gala-gala galeri-galeri gali-gali gali-galian galing-galing galu-galu gamak-gamak gambar-gambar gambar-menggambar gamit-gamitan gampang-gampangan gana-gini ganal-ganal ganda-berganda ganjal-mengganjal ganjil-genap ganteng-ganteng gantung-gantung gapah-gopoh gara-gara garah-garah garis-garis gasak-gasakan gatal-gatal gaun-gaun gawar-gawar gaya-gayanya gayang-gayang ge-er gebyah-uyah gebyar-gebyar gedana-gedini gedebak-gedebuk gedebar-gedebur gedung-gedung gelang-gelang gelap-gelapan gelar-gelar gelas-gelas gelembung-gelembungan geleng-geleng geli-geli geliang-geliut geliat-geliut gembar-gembor gembrang-gembreng gempul-gempul gempur-menggempur gendang-gendang gengsi-gengsian genjang-genjot genjot-genjotan genjrang-genjreng genome-wide geo-politik gerabak-gerubuk gerak-gerik gerak-geriknya gerakan-gerakan gerbas-gerbus gereja-gereja gereng-gereng geriak-geriuk gerit-gerit gerot-gerot geruh-gerah getak-getuk getem-getem geti-geti gial-gial gial-giul gila-gila gila-gilaan gilang-gemilang gilap-gemilap gili-gili giling-giling gilir-bergilir ginang-ginang girap-girap girik-girik giring-giring go-auto go-bills go-bluebird go-box go-car go-clean go-food go-glam go-jek go-kart go-mart go-massage go-med go-points go-pulsa go-ride go-send go-shop go-tix go-to-market goak-goak goal-line gol-gol golak-galik gondas-gandes gonjang-ganjing gonjlang-ganjling gonta-ganti gontok-gontokan gorap-gorap gorong-gorong gotong-royong gresek-gresek gua-gua gual-gail gubernur-gubernur gudu-gudu gula-gula gulang-gulang gulung-menggulung guna-ganah guna-guna gundala-gundala guntang-guntang gunung-ganang gunung-gemunung gunung-gunungan guru-guru habis-habis habis-habisan hak-hak hak-hal hakim-hakim hal-hal halai-balai half-time hama-hama hampir-hampir hancur-hancuran hancur-menghancurkan hands-free hands-on hang-out hantu-hantu happy-happy harap-harap harap-harapan hard-disk harga-harga hari-hari harimau-harimau harum-haruman hasil-hasil hasta-wara hat-trick hati-hati hati-hatilah head-mounted head-to-head head-up heads-up heavy-duty hebat-hebatan hewan-hewan hexa-core hidup-hidup hidup-mati hila-hila hilang-hilang hina-menghinakan hip-hop hiru-biru hiru-hara hiruk-pikuk hitam-putih hitung-hitung hitung-hitungan hormat-menghormati hot-swappable hotel-hotel how-to hubar-habir hubaya-hubaya hukum-red hukuman-hukuman hula-hoop hula-hula hulu-hilir humas-humas hura-hura huru-hara ibar-ibar ibu-anak ibu-ibu icak-icak icip-icip idam-idam ide-ide igau-igauan ikan-ikan ikut-ikut ikut-ikutan ilam-ilam ilat-ilatan ilmu-ilmu imbang-imbangan iming-iming imut-imut inang-inang inca-binca incang-incut industri-industri ingar-bingar ingar-ingar ingat-ingat ingat-ingatan ingau-ingauan inggang-inggung injak-injak input-output instansi-instansi instant-on instrumen-instrumen inter-governmental ira-ira irah-irahan iras-iras iring-iringan iris-irisan isak-isak isat-bb iseng-iseng istana-istana istri-istri isu-isu iya-iya jabatan-jabatan jadi-jadian jagoan-jagoan jaja-jajaan jaksa-jaksa jala-jala jalan-jalan jali-jali jalin-berjalin jalin-menjalin jam-jam jamah-jamahan jambak-jambakan jambu-jambu jampi-jampi janda-janda jangan-jangan janji-janji jarang-jarang jari-jari jaring-jaring jarum-jarum jasa-jasa jatuh-bangun jauh-dekat jauh-jauh jawi-jawi jebar-jebur jebat-jebatan jegal-jegalan jejak-jejak jelang-menjelang jelas-jelas jelur-jelir jembatan-jembatan jenazah-jenazah jendal-jendul jenderal-jenderal jenggar-jenggur jenis-jenis jenis-jenisnya jentik-jentik jerah-jerih jinak-jinak jiwa-jiwa joli-joli jolong-jolong jongkang-jangking jongkar-jangkir jongkat-jangkit jor-joran jotos-jotosan juak-juak jual-beli juang-juang julo-julo julung-julung julur-julur jumbai-jumbai jungkang-jungkit jungkat-jungkit jurai-jurai kabang-kabang kabar-kabari kabir-kabiran kabruk-kabrukan kabu-kabu kabupaten-kabupaten kabupaten-kota kaca-kaca kacang-kacang kacang-kacangan kacau-balau kadang-kadang kader-kader kades-kades kadis-kadis kail-kail kain-kain kait-kait kakak-adik kakak-beradik kakak-kakak kakek-kakek kakek-nenek kaki-kaki kala-kala kalau-kalau kaleng-kalengan kali-kalian kalimat-kalimat kalung-kalung kalut-malut kambing-kambing kamit-kamit kampung-kampung kampus-kampus kanak-kanak kanak-kanan kanan-kanak kanan-kiri kangen-kangenan kanwil-kanwil kapa-kapa kapal-kapal kapan-kapan kapolda-kapolda kapolres-kapolres kapolsek-kapolsek kapu-kapu karang-karangan karang-mengarang kareseh-peseh karut-marut karya-karya kasak-kusuk kasus-kasus kata-kata katang-katang kava-kava kawa-kawa kawan-kawan kawin-cerai kawin-mawin kayu-kayu kayu-kayuan ke-Allah-an keabu-abuan kearab-araban keasyik-asyikan kebarat-baratan kebasah-basahan kebat-kebit kebata-bataan kebayi-bayian kebelanda-belandaan keberlarut-larutan kebesar-hatian kebiasaan-kebiasaan kebijakan-kebijakan kebiru-biruan kebudak-budakan kebun-kebun kebut-kebutan kecamatan-kecamatan kecentang-perenangan kecil-kecil kecil-kecilan kecil-mengecil kecokelat-cokelatan kecomak-kecimik kecuh-kecah kedek-kedek kedekak-kedekik kedesa-desaan kedubes-kedubes kedutaan-kedutaan keempat-empatnya kegadis-gadisan kegelap-gelapan kegiatan-kegiatan kegila-gilaan kegirang-girangan kehati-hatian keheran-heranan kehijau-hijauan kehitam-hitaman keinggris-inggrisan kejaga-jagaan kejahatan-kejahatan kejang-kejang kejar-kejar kejar-kejaran kejar-mengejar kejingga-jinggaan kejut-kejut kejutan-kejutan kekabur-kaburan kekanak-kanakan kekoboi-koboian kekota-kotaan kekuasaan-kekuasaan kekuning-kuningan kelak-kelik kelak-keluk kelaki-lakian kelang-kelok kelap-kelip kelasah-kelusuh kelek-kelek kelek-kelekan kelemak-kelemek kelik-kelik kelip-kelip kelompok-kelompok kelontang-kelantung keluar-masuk kelurahan-kelurahan kelusuh-kelasah kelut-melut kemak-kemik kemalu-maluan kemana-mana kemanja-manjaan kemarah-marahan kemasam-masaman kemati-matian kembang-kembang kemenpan-rb kementerian-kementerian kemerah-merahan kempang-kempis kempas-kempis kemuda-mudaan kena-mengena kenal-mengenal kenang-kenangan kencang-kencung kencing-mengencingi kencrang-kencring kendang-kendang kendang-kendangan keningrat-ningratan kentung-kentung kenyat-kenyit kepala-kepala kepala-kepalaan kepandir-pandiran kepang-kepot keperak-perakan kepetah-lidahan kepilu-piluan keping-keping kepucat-pucatan kepuh-kepuh kepura-puraan keputih-putihan kerah-kerahan kerancak-rancakan kerang-kerangan kerang-keroh kerang-kerot kerang-keruk kerang-kerung kerap-kerap keras-mengerasi kercap-kercip kercap-kercup keriang-keriut kerja-kerja kernyat-kernyut kerobak-kerabit kerobak-kerobek kerobak-kerobik kerobat-kerabit kerong-kerong keropas-kerapis kertak-kertuk kertap-kertap keruntang-pungkang kesalahan-kesalahan kesap-kesip kesemena-menaan kesenak-senakan kesewenang-wenangan kesia-siaan kesik-kesik kesipu-sipuan kesu-kesi kesuh-kesih kesuk-kesik ketakar-keteker ketakutan-ketakutan ketap-ketap ketap-ketip ketar-ketir ketentuan-ketentuan ketergesa-gesaan keti-keti ketidur-tiduran ketiga-tiganya ketir-ketir ketua-ketua ketua-tuaan ketuan-tuanan keungu-unguan kewangi-wangian ki-ka kia-kia kiai-kiai kiak-kiak kial-kial kiang-kiut kiat-kiat kibang-kibut kicang-kecoh kicang-kicu kick-off kida-kida kijang-kijang kilau-mengilau kili-kili kilik-kilik kincir-kincir kios-kios kira-kira kira-kiraan kiri-kanan kirim-berkirim kisah-kisah kisi-kisi kitab-kitab kitang-kitang kiu-kiu klaim-klaim klik-klikan klip-klip klub-klub kluntang-klantung knock-knock knock-on knock-out ko-as ko-pilot koak-koak koboi-koboian kocah-kacih kocar-kacir kodam-kodam kode-kode kodim-kodim kodok-kodok kolang-kaling kole-kole koleh-koleh kolong-kolong koma-koma komat-kamit komisaris-komisaris komisi-komisi komite-komite komoditas-komoditas kongko-kongko konsulat-konsulat konsultan-konsultan kontal-kantil kontang-kanting kontra-terorisme kontrak-kontrak konvensi-konvensi kopat-kapit koperasi-koperasi kopi-kopi koran-koran koreng-koreng kos-kosan kosak-kasik kota-kota kota-wakil kotak-katik kotak-kotak koyak-koyak kuas-kuas kuat-kuat kubu-kubuan kucar-kacir kucing-kucing kucing-kucingan kuda-kuda kuda-kudaan kudap-kudap kue-kue kulah-kulah kulak-kulak kulik-kulik kulum-kulum kumat-kamit kumpul-kumpul kunang-kunang kunar-kunar kung-fu kuning-hitam kupat-kapit kupu-kupu kura-kura kurang-kurang kusat-mesat kutat-kutet kuti-kuti kuwung-kuwung kyai-kyai laba-laba labi-labi labu-labu laga-laga lagi-lagi lagu-lagu laguh-lagah lain-lain laki-laki lalu-lalang lalu-lintas lama-kelamaan lama-lama lamat-lamat lambat-lambat lampion-lampion lampu-lampu lancang-lancang lancar-lancar langak-longok langgar-melanggar langit-langit langkah-langka langkah-langkah lanja-lanjaan lapas-lapas lapat-lapat laporan-laporan laptop-tablet large-scale lari-lari lari-larian laskar-laskar lauk-pauk laun-laun laut-timur lawah-lawah lawak-lawak lawan-lawan lawi-lawi layang-layang layu-layuan lebih-lebih lecet-lecet legak-legok legum-legum legup-legup leha-leha lekak-lekuk lekap-lekup lekas-lekas lekat-lekat lekuh-lekih lekum-lekum lekup-lekap lembaga-lembaga lempar-lemparan lenggak-lenggok lenggok-lenggok lenggut-lenggut lengket-lengket lentam-lentum lentang-lentok lentang-lentung lepa-lepa lerang-lerang lereng-lereng lese-majeste letah-letai lete-lete letuk-letuk letum-letum letup-letup leyeh-leyeh liang-liuk liang-liut liar-liar liat-liut lidah-lidah life-toxins liga-liga light-emitting lika-liku lil-alamin lilin-lilin line-up lintas-selat lipat-melipat liquid-cooled lithium-ion lithium-polymer liuk-liuk liung-liung lobi-lobi lock-up locked-in lokasi-lokasi long-term longak-longok lontang-lanting lontang-lantung lopak-lapik lopak-lopak low-cost low-density low-end low-light low-multi low-pass lucu-lucu luka-luka lukisan-lukisan lumba-lumba lumi-lumi luntang-lantung lupa-lupa lupa-lupaan lurah-camat maaf-memaafkan mabuk-mabukan mabul-mabul macam-macam macan-macanan machine-to-machine mafia-mafia mahasiswa-mahasiswi mahasiswa/i mahi-mahi main-main main-mainan main-mainlah majelis-majelis maju-mundur makam-makam makan-makan makan-makanan makanan-red make-up maki-maki maki-makian mal-mal malai-malai malam-malam malar-malar malas-malasan mali-mali malu-malu mama-mama man-in-the-middle mana-mana manajer-manajer manik-manik manis-manis manis-manisan marah-marah mark-up mas-mas masa-masa masak-masak masalah-masalah mash-up masing-masing masjid-masjid masuk-keluar mat-matan mata-mata match-fixing mati-mati mati-matian maya-maya mayat-mayat mayday-mayday media-media mega-bintang mega-tsunami megal-megol megap-megap meger-meger megrek-megrek melak-melak melambai-lambai melambai-lambaikan melambat-lambatkan melaun-laun melawak-lawak melayang-layang melayap-layap melayap-layapkan melebih-lebihi melebih-lebihkan melejang-lejangkan melek-melekan meleleh-leleh melengah-lengah melihat-lihat melimpah-limpah melincah-lincah meliuk-liuk melolong-lolong melompat-lompat meloncat-loncat melonco-lonco melongak-longok melonjak-lonjak memacak-macak memada-madai memadan-madan memaki-maki memaksa-maksa memanas-manasi memancit-mancitkan memandai-mandai memanggil-manggil memanis-manis memanjut-manjut memantas-mantas memasak-masak memata-matai mematah-matah mematuk-matuk mematut-matut memau-mau memayah-mayahkan membaca-baca membacah-bacah membagi-bagikan membalik-balik membangkit-bangkit membarut-barut membawa-bawa membayang-bayangi membayang-bayangkan membeda-bedakan membelai-belai membeli-beli membelit-belitkan membelu-belai membenar-benar membenar-benari memberai-beraikan membesar-besar membesar-besarkan membikin-bikin membilah-bilah membolak-balikkan membongkar-bangkir membongkar-bongkar membuang-buang membuat-buat membulan-bulani membunga-bungai membungkuk-bungkuk memburu-buru memburu-burukan memburuk-burukkan memelintir-melintir memencak-mencak memencar-mencar memercik-mercik memetak-metak memetang-metangkan memetir-metir memijar-mijar memikir-mikir memikir-mikirkan memilih-milih memilin-milin meminang-minang meminta-minta memisah-misahkan memontang-mantingkan memorak-perandakan memorak-porandakan memotong-motong memperamat-amat memperamat-amatkan memperbagai-bagaikan memperganda-gandakan memperganduh-ganduhkan memperimpit-impitkan memperkuda-kudakan memperlengah-lengah memperlengah-lengahkan mempermacam-macamkan memperolok-olok memperolok-olokkan mempersama-samakan mempertubi-tubi mempertubi-tubikan memperturut-turutkan memuja-muja memukang-mukang memulun-mulun memundi-mundi memundi-mundikan memutar-mutar memuyu-muyu men-tweet menagak-nagak menakut-nakuti menang-kalah menanjur-nanjur menanti-nanti menari-nari mencabik-cabik mencabik-cabikkan mencacah-cacah mencaing-caing mencak-mencak mencakup-cakup mencapak-capak mencari-cari mencarik-carik mencarik-carikkan mencarut-carut mencengis-cengis mencepak-cepak mencepuk-cepuk mencerai-beraikan mencetai-cetai menciak-ciak menciap-ciap menciar-ciar mencita-citakan mencium-cium menciut-ciut mencla-mencle mencoang-coang mencoba-coba mencocok-cocok mencolek-colek menconteng-conteng mencubit-cubit mencucuh-cucuh mencucuh-cucuhkan mencuri-curi mendecap-decap mendegam-degam mendengar-dengar mendengking-dengking mendengus-dengus mendengut-dengut menderai-deraikan menderak-derakkan menderau-derau menderu-deru mendesas-desuskan mendesus-desus mendetap-detap mendewa-dewakan mendudu-dudu menduga-duga menebu-nebu menegur-neguri menepak-nepak menepak-nepakkan mengabung-ngabung mengaci-acikan mengacu-acu mengada-ada mengada-ngada mengadang-adangi mengaduk-aduk mengagak-agak mengagak-agihkan mengagut-agut mengais-ngais mengalang-alangi mengali-ali mengalur-alur mengamang-amang mengamat-amati mengambai-ambaikan mengambang-ambang mengambung-ambung mengambung-ambungkan mengamit-ngamitkan mengancai-ancaikan mengancak-ancak mengancar-ancar mengangan-angan mengangan-angankan mengangguk-angguk menganggut-anggut mengangin-anginkan mengangkat-angkat menganjung-anjung menganjung-anjungkan mengap-mengap mengapa-apai mengapi-apikan mengarah-arahi mengarang-ngarang mengata-ngatai mengatup-ngatupkan mengaum-aum mengaum-aumkan mengejan-ejan mengejar-ngejar mengejut-ngejuti mengelai-ngelai mengelepik-ngelepik mengelip-ngelip mengelu-elukan mengelus-elus mengembut-embut mengempas-empaskan mengenap-enapkan mengendap-endap mengenjak-enjak mengentak-entak mengentak-entakkan mengepak-ngepak mengepak-ngepakkan mengepal-ngepalkan mengerjap-ngerjap mengerling-ngerling mengertak-ngertakkan mengesot-esot menggaba-gabai menggali-gali menggalur-galur menggamak-gamak menggamit-gamitkan menggapai-gapai menggapai-gapaikan menggaruk-garuk menggebu-gebu menggebyah-uyah menggeleng-gelengkan menggelepar-gelepar menggelepar-geleparkan menggeliang-geliutkan menggelinding-gelinding menggemak-gemak menggembar-gemborkan menggerak-gerakkan menggerecak-gerecak menggesa-gesakan menggili-gili menggodot-godot menggolak-galikkan menggorek-gorek menggoreng-goreng menggosok-gosok menggoyang-goyangkan mengguit-guit menghalai-balaikan menghalang-halangi menghambur-hamburkan menghinap-hinap menghitam-memutihkan menghitung-hitung menghubung-hubungkan menghujan-hujankan mengiang-ngiang mengibar-ngibarkan mengibas-ngibas mengibas-ngibaskan mengidam-idamkan mengilah-ngilahkan mengilai-ilai mengilat-ngilatkan mengilik-ngilik mengimak-imak mengimbak-imbak mengiming-iming mengincrit-incrit mengingat-ingat menginjak-injak mengipas-ngipas mengira-ngira mengira-ngirakan mengiras-iras mengiras-irasi mengiris-iris mengitar-ngitar mengitik-ngitik mengodol-odol mengogok-ogok mengolak-alik mengolak-alikkan mengolang-aling mengolang-alingkan mengoleng-oleng mengolok-olok mengombang-ambing mengombang-ambingkan mengongkang-ongkang mengongkok-ongkok mengonyah-anyih mengopak-apik mengorak-arik mengorat-oret mengorek-ngorek mengoret-oret mengorok-orok mengotak-atik mengotak-ngatikkan mengotak-ngotakkan mengoyak-ngoyak mengoyak-ngoyakkan mengoyak-oyak menguar-nguarkan menguar-uarkan mengubah-ubah mengubek-ubek menguber-uber mengubit-ubit mengubrak-abrik mengucar-ngacirkan mengucek-ngucek mengucek-ucek menguik-uik menguis-uis mengulang-ulang mengulas-ulas mengulit-ulit mengulum-ngulum mengulur-ulur menguman-uman mengumbang-ambingkan mengumpak-umpak mengungkat-ungkat mengungkit-ungkit mengupa-upa mengurik-urik mengusil-usil mengusil-usilkan mengutak-atik mengutak-ngatikkan mengutik-ngutik mengutik-utik menika-nika menimang-nimang menimbang-nimbang menimbun-nimbun menimpang-nimpangkan meningkat-ningkat meniru-niru menit-menit menitar-nitarkan meniup-niup menjadi-jadi menjadi-jadikan menjedot-jedotkan menjelek-jelekkan menjengek-jengek menjengit-jengit menjerit-jerit menjilat-jilat menjungkat-jungkit menko-menko menlu-menlu menonjol-nonjolkan mentah-mentah mentang-mentang menteri-menteri mentul-mentul menuding-nuding menumpah-numpahkan menunda-nunda menunduk-nunduk menusuk-nusuk menyala-nyala menyama-nyama menyama-nyamai menyambar-nyambar menyangkut-nyangkutkan menyanjung-nyanjung menyanjung-nyanjungkan menyapu-nyapu menyarat-nyarat menyayat-nyayat menyedang-nyedang menyedang-nyedangkan menyelang-nyelangkan menyelang-nyeling menyelang-nyelingkan menyenak-nyenak menyendi-nyendi menyentak-nyentak menyentuh-nyentuh menyepak-nyepakkan menyerak-nyerakkan menyeret-nyeret menyeru-nyerukan menyetel-nyetel menyia-nyiakan menyibak-nyibak menyobek-nyobek menyorong-nyorongkan menyungguh-nyungguhi menyuruk-nyuruk meraba-raba merah-hitam merah-merah merambang-rambang merangkak-rangkak merasa-rasai merata-ratakan meraung-raung meraung-raungkan merayau-rayau merayu-rayu mercak-mercik mercedes-benz merek-merek mereka-mereka mereka-reka merelap-relap merem-merem meremah-remah meremas-remas meremeh-temehkan merempah-rempah merempah-rempahi merengek-rengek merengeng-rengeng merenik-renik merenta-renta merenyai-renyai meresek-resek merintang-rintang merintik-rintik merobek-robek meronta-ronta meruap-ruap merubu-rubu merungus-rungus merungut-rungut meta-analysis metode-metode mewanti-wanti mewarna-warnikan meyakin-yakini mid-range mid-size miju-miju mikro-kecil mimpi-mimpi minggu-minggu minta-minta minuman-minuman mixed-use mobil-mobil mobile-first mobile-friendly moga-moga mola-mola momen-momen mondar-mandir monyet-monyet morak-marik morat-marit move-on muda-muda muda-mudi muda/i mudah-mudahan muka-muka mula-mula multiple-output muluk-muluk mulut-mulutan mumi-mumi mundur-mundur muntah-muntah murid-muridnya musda-musda museum-museum muslim-muslimah musuh-musuh musuh-musuhnya nabi-nabi nada-nadanya naga-naga naga-naganya naik-naik naik-turun nakal-nakalan nama-nama nanti-nantian nanya-nanya nasi-nasi nasib-nasiban near-field negara-negara negera-negara negeri-negeri negeri-red neka-neka nekat-nekat neko-neko nenek-nenek neo-liberalisme next-gen next-generation ngeang-ngeang ngeri-ngeri nggak-nggak ngobrol-ngobrol ngumpul-ngumpul nilai-nilai nine-dash nipa-nipa nong-nong norma-norma novel-novel nyai-nyai nyolong-nyolong nyut-nyutan ob-gyn obat-obat obat-obatan objek-objek obok-obok obrak-abrik octa-core odong-odong oedipus-kompleks off-road ogah-agih ogah-ogah ogah-ogahan ogak-agik ogak-ogak ogoh-ogoh olak-alik olak-olak olang-aling olang-alingan ole-ole oleh-oleh olok-olok olok-olokan olong-olong om-om ombang-ambing omni-channel on-board on-demand on-fire on-line on-off on-premises on-roll on-screen on-the-go onde-onde ondel-ondel ondos-ondos one-click one-to-one one-touch one-two oneng-oneng ongkang-ongkang ongol-ongol online-to-offline ontran-ontran onyah-anyih onyak-anyik opak-apik opsi-opsi opt-in orak-arik orang-aring orang-orang orang-orangan orat-oret organisasi-organisasi ormas-ormas orok-orok orong-orong oseng-oseng otak-atik otak-otak otak-otakan over-heating over-the-air over-the-top pa-pa pabrik-pabrik padi-padian pagi-pagi pagi-sore pajak-pajak paket-paket palas-palas palato-alveolar paling-paling palu-arit palu-memalu panas-dingin panas-panas pandai-pandai pandang-memandang panel-panel pangeran-pangeran panggung-panggung pangkalan-pangkalan panja-panja panji-panji pansus-pansus pantai-pantai pao-pao para-para parang-parang parpol-parpol partai-partai paru-paru pas-pasan pasal-pasal pasang-memasang pasang-surut pasar-pasar pasu-pasu paus-paus paut-memaut pay-per-click paya-paya pdi-p pecah-pecah pecat-pecatan peer-to-peer pejabat-pejabat pekak-pekak pekik-pekuk pelabuhan-pelabuhan pelacur-pelacur pelajar-pelajar pelan-pelan pelangi-pelangi pem-bully pemain-pemain pemata-mataan pemda-pemda pemeluk-pemeluknya pemerintah-pemerintah pemerintah-red pemerintah-swasta pemetang-metangan pemilu-pemilu pemimpin-pemimpin peminta-minta pemuda-pemuda pemuda-pemudi penanggung-jawab pengali-ali pengaturan-pengaturan penggembar-gemboran pengorak-arik pengotak-ngotakan pengundang-undang pengusaha-pengusaha pentung-pentungan penyakit-penyakit perak-perak perang-perangan peras-perus peraturan-peraturan perda-perda perempat-final perempuan-perempuan pergi-pergi pergi-pulang perintang-rintang perkereta-apian perlahan-lahan perlip-perlipan permen-permen pernak-pernik pernik-pernik pertama-tama pertandingan-pertandingan pertimbangan-pertimbangan perudang-undangan perundang-undangan perundangan-undangan perusahaan-perusahaan perusahaan-perusahan perwakilan-perwakilan pesan-pesan pesawat-pesawat peta-jalan petang-petang petantang-petenteng petatang-peteteng pete-pete piala-piala piat-piut pick-up picture-in-picture pihak-pihak pijak-pijak pijar-pijar pijat-pijat pikir-pikir pil-pil pilah-pilih pilih-pilih pilihan-pilihan pilin-memilin pilkada-pilkada pina-pina pindah-pindah ping-pong pinjam-meminjam pintar-pintarlah pisang-pisang pistol-pistolan piting-memiting planet-planet play-off plin-plan plintat-plintut plonga-plongo plug-in plus-minus plus-plus poco-poco pohon-pohonan poin-poin point-of-sale point-of-sales pokemon-pokemon pokja-pokja pokok-pokok pokrol-pokrolan polang-paling polda-polda poleng-poleng polong-polongan polres-polres polsek-polsek polwan-polwan poma-poma pondok-pondok ponpes-ponpes pontang-panting pop-up porak-parik porak-peranda porak-poranda pos-pos posko-posko potong-memotong praktek-praktek praktik-praktik produk-produk program-program promosi-degradasi provinsi-provinsi proyek-proyek puing-puing puisi-puisi puji-pujian pukang-pukang pukul-memukul pulang-pergi pulau-pulai pulau-pulau pull-up pulut-pulut pundi-pundi pungak-pinguk punggung-memunggung pura-pura puruk-parak pusar-pusar pusat-pusat push-to-talk push-up push-ups pusing-pusing puskesmas-puskesmas putar-putar putera-puteri putih-hitam putih-putih putra-putra putra-putri putra/i putri-putri putus-putus putusan-putusan puvi-puvi quad-core raba-rabaan raba-rubu rada-rada radio-frequency ragu-ragu rahasia-rahasiaan raja-raja rama-rama ramai-ramai ramalan-ramalan rambeh-rambeh rambu-rambu rame-rame ramu-ramuan randa-rondo rangkul-merangkul rango-rango rap-rap rasa-rasanya rata-rata raun-raun read-only real-life real-time rebah-rebah rebah-rebahan rebas-rebas red-eye redam-redam redep-redup rehab-rekon reja-reja reka-reka reka-rekaan rekan-rekan rekan-rekannya rekor-rekor relief-relief remah-remah remang-remang rembah-rembah rembah-rembih remeh-cemeh remeh-temeh rempah-rempah rencana-rencana renyai-renyai rep-repan repot-repot repuh-repuh restoran-restoran retak-retak riang-riang ribu-ribu ribut-ribut rica-rica ride-sharing rigi-rigi rinai-rinai rintik-rintik ritual-ritual robak-rabik robat-rabit robot-robot role-play role-playing roll-on rombang-rambing romol-romol rompang-romping rondah-rondih ropak-rapik royal-royalan royo-royo ruak-ruak ruba-ruba rudal-rudal ruji-ruji ruku-ruku rumah-rumah rumah-rumahan rumbai-rumbai rumput-rumputan runding-merunding rundu-rundu runggu-rangga runner-up runtang-runtung rupa-rupa rupa-rupanya rusun-rusun rute-rute saat-saat saban-saban sabu-sabu sabung-menyabung sah-sah sahabat-sahabat saham-saham sahut-menyahut saing-menyaing saji-sajian sakit-sakitan saksi-saksi saku-saku salah-salah sama-sama samar-samar sambar-menyambar sambung-bersambung sambung-menyambung sambut-menyambut samo-samo sampah-sampah sampai-sampai samping-menyamping sana-sini sandar-menyandar sandi-sandi sangat-sangat sangkut-menyangkut sapa-menyapa sapai-sapai sapi-sapi sapu-sapu saran-saran sarana-prasarana sari-sari sarit-sarit satu-dua satu-satu satu-satunya satuan-satuan saudara-saudara sauk-menyauk sauk-sauk sayang-sayang sayap-sayap sayup-menyayup sayup-sayup sayur-mayur sayur-sayuran sci-fi seagak-agak seakal-akal seakan-akan sealak-alak seari-arian sebaik-baiknya sebelah-menyebelah sebentar-sebentar seberang-menyeberang seberuntung-beruntungnya sebesar-besarnya seboleh-bolehnya sedalam-dalamnya sedam-sedam sedang-menyedang sedang-sedang sedap-sedapan sedapat-dapatnya sedikit-dikitnya sedikit-sedikit sedikit-sedikitnya sedini-dininya seelok-eloknya segala-galanya segan-menyegan segan-menyegani segan-segan sehabis-habisnya sehari-hari sehari-harian sehari-harinya sejadi-jadinya sekali-kali sekali-sekali sekenyang-kenyangnya sekira-kira sekolah-sekolah sekonyong-konyong sekosong-kosongnya sektor-sektor sekuasa-kuasanya sekuat-kuatnya sekurang-kurangnya sel-sel sela-menyela sela-sela selak-seluk selama-lamanya selambat-lambatnya selang-seli selang-seling selar-belar selat-latnya selatan-tenggara selekas-lekasnya selentang-selenting selepas-lepas self-driving self-esteem self-healing self-help selir-menyelir seloyong-seloyong seluk-beluk seluk-semeluk sema-sema semah-semah semak-semak semaksimal-maksimalnya semalam-malaman semang-semang semanis-manisnya semasa-masa semata-mata semau-maunya sembunyi-sembunyi sembunyi-sembunyian sembur-sembur semena-mena semenda-menyemenda semengga-mengga semenggah-menggah sementang-mentang semerdeka-merdekanya semi-final semi-permanen sempat-sempatnya semu-semu semua-muanya semujur-mujurnya semut-semutan sen-senan sendiri-sendiri sengal-sengal sengar-sengir sengau-sengauan senggak-sengguk senggang-tenggang senggol-menyenggol senior-junior senjata-senjata senyum-senyum seolah-olah sepala-pala sepandai-pandai sepetang-petangan sepoi-sepoi sepraktis-praktisnya sepuas-puasnya serak-serak serak-serik serang-menyerang serang-serangan serangan-serangan seraya-menyeraya serba-serbi serbah-serbih serembah-serembih serigala-serigala sering-sering serobot-serobotan serong-menyerong serta-menyertai serta-merta serta-serta seru-seruan service-oriented sesak-menyesak sesal-menyesali sesayup-sayup sesi-sesi sesuang-suang sesudah-sudah sesudah-sudahnya sesuka-suka sesuka-sukanya set-piece setempat-setempat setengah-setengah setidak-tidaknya setinggi-tingginya seupaya-upaya seupaya-upayanya sewa-menyewa sewaktu-waktu sewenang-wenang sewot-sewotan shabu-shabu short-term short-throw sia-sia siang-siang siap-siap siapa-siapa sibar-sibar sibur-sibur sida-sida side-by-side sign-in siku-siku sikut-sikutan silah-silah silang-menyilang silir-semilir simbol-simbol simpan-pinjam sinar-menyinar sinar-seminar sinar-suminar sindir-menyindir singa-singa singgah-menyinggah single-core sipil-militer sir-siran sirat-sirat sisa-sisa sisi-sisi siswa-siswa siswa-siswi siswa/i siswi-siswi situ-situ situs-situs six-core six-speed slintat-slintut slo-mo slow-motion snap-on sobek-sobekan sodok-sodokan sok-sokan solek-menyolek solid-state sorak-sorai sorak-sorak sore-sore sosio-ekonomi soya-soya spill-resistant split-screen sponsor-sponsor sponsor-sponsoran srikandi-srikandi staf-staf stand-by stand-up start-up stasiun-stasiun state-owned striker-striker studi-studi suam-suam suami-isteri suami-istri suami-suami suang-suang suara-suara sudin-sudin sudu-sudu sudung-sudung sugi-sugi suka-suka suku-suku sulang-menyulang sulat-sulit sulur-suluran sum-sum sumber-sumber sumpah-sumpah sumpit-sumpit sundut-bersundut sungai-sungai sungguh-sungguh sungut-sungut sunting-menyunting super-damai super-rahasia super-sub supply-demand supply-side suram-suram surat-menyurat surat-surat suruh-suruhan suruk-surukan susul-menyusul suwir-suwir syarat-syarat system-on-chip t-shirt t-shirts tabar-tabar tabir-mabir tabrak-tubruk tabuh-tabuhan tabun-menabun tahu-menahu tahu-tahu tahun-tahun takah-takahnya takang-takik take-off takut-takut takut-takutan tali-bertali tali-tali talun-temalun taman-taman tampak-tampak tanak-tanakan tanam-menanam tanam-tanaman tanda-tanda tangan-menangan tangan-tangan tangga-tangga tanggal-tanggal tanggul-tanggul tanggung-menanggung tanggung-tanggung tank-tank tante-tante tanya-jawab tapa-tapa tapak-tapak tari-menari tari-tarian tarik-menarik tarik-ulur tata-tertib tatah-tatah tau-tau tawa-tawa tawak-tawak tawang-tawang tawar-menawar tawar-tawar tayum-temayum tebak-tebakan tebu-tebu tedong-tedong tegak-tegak tegerbang-gerbang teh-tehan tek-tek teka-teki teknik-teknik teman-teman teman-temanku temas-temas tembak-menembak temeh-temeh tempa-menempa tempat-tempat tempo-tempo temut-temut tenang-tenang tengah-tengah tenggang-menenggang tengok-menengok teori-teori teraba-raba teralang-alang terambang-ambang terambung-ambung terang-terang terang-terangan teranggar-anggar terangguk-angguk teranggul-anggul terangin-angin terangkup-angkup teranja-anja terapung-apung terayan-rayan terayap-rayap terbada-bada terbahak-bahak terbang-terbang terbata-bata terbatuk-batuk terbayang-bayang terbeda-bedakan terbengkil-bengkil terbengong-bengong terbirit-birit terbuai-buai terbuang-buang terbungkuk-bungkuk terburu-buru tercangak-cangak tercengang-cengang tercilap-cilap tercongget-congget tercoreng-moreng tercungap-cungap terdangka-dangka terdengih-dengih terduga-duga terekeh-ekeh terembut-embut terembut-rembut terempas-empas terengah-engah teresak-esak tergagap-gagap tergagau-gagau tergaguk-gaguk tergapai-gapai tergegap-gegap tergegas-gegas tergelak-gelak tergelang-gelang tergeleng-geleng tergelung-gelung tergerai-gerai tergerenyeng-gerenyeng tergesa-gesa tergila-gila tergolek-golek tergontai-gontai tergudik-gudik tergugu-gugu terguling-guling tergulut-gulut terhambat-hambat terharak-harak terharap-harap terhengit-hengit terheran-heran terhinggut-hinggut terigau-igau terimpi-impi terincut-incut teringa-inga teringat-ingat terinjak-injak terisak-isak terjembak-jembak terjerit-jerit terkadang-kadang terkagum-kagum terkaing-kaing terkakah-kakah terkakak-kakak terkampul-kampul terkanjar-kanjar terkantuk-kantuk terkapah-kapah terkapai-kapai terkapung-kapung terkatah-katah terkatung-katung terkecap-kecap terkedek-kedek terkedip-kedip terkejar-kejar terkekau-kekau terkekeh-kekeh terkekek-kekek terkelinjat-kelinjat terkelip-kelip terkempul-kempul terkemut-kemut terkencar-kencar terkencing-kencing terkentut-kentut terkepak-kepak terkesot-kesot terkesut-kesut terkial-kial terkijai-kijai terkikih-kikih terkikik-kikik terkincak-kincak terkindap-kindap terkinja-kinja terkirai-kirai terkitar-kitar terkocoh-kocoh terkojol-kojol terkokol-kokol terkosel-kosel terkotak-kotak terkoteng-koteng terkuai-kuai terkumpal-kumpal terlara-lara terlayang-layang terlebih-lebih terlincah-lincah terliuk-liuk terlolong-lolong terlongong-longong terlunta-lunta termangu-mangu termanja-manja termata-mata termengah-mengah termenung-menung termimpi-mimpi termonyong-monyong ternanti-nanti terngiang-ngiang teroleng-oleng terombang-ambing terpalit-palit terpandang-pandang terpecah-pecah terpekik-pekik terpencar-pencar terpereh-pereh terpijak-pijak terpikau-pikau terpilah-pilah terpinga-pinga terpingkal-pingkal terpingkau-pingkau terpontang-panting terpusing-pusing terputus-putus tersanga-sanga tersaruk-saruk tersedan-sedan tersedih-sedih tersedu-sedu terseduh-seduh tersendat-sendat tersendeng-sendeng tersengal-sengal tersengguk-sengguk tersengut-sengut terseok-seok tersera-sera terserak-serak tersetai-setai tersia-sia tersipu-sipu tersoja-soja tersungkuk-sungkuk tersuruk-suruk tertagak-tagak tertahan-tahan tertatih-tatih tertegun-tegun tertekan-tekan terteleng-teleng tertendang-tendang tertimpang-timpang tertitar-titar terumbang-ambing terumbang-umbang terungkap-ungkap terus-menerus terus-terusan tete-a-tete text-to-speech think-tank think-thank third-party third-person three-axis three-point tiap-tiap tiba-tiba tidak-tidak tidur-tidur tidur-tiduran tie-dye tie-in tiga-tiganya tikam-menikam tiki-taka tikus-tikus tilik-menilik tim-tim timah-timah timang-timangan timbang-menimbang time-lapse timpa-menimpa timu-timu timun-timunan timur-barat timur-laut timur-tenggara tindih-bertindih tindih-menindih tinjau-meninjau tinju-meninju tip-off tipu-tipu tiru-tiruan titik-titik titik-titiknya tiup-tiup to-do tokak-takik toko-toko tokoh-tokoh tokok-menokok tolak-menolak tolong-menolong tong-tong top-level top-up totol-totol touch-screen trade-in training-camp trans-nasional treble-winner tri-band trik-trik triple-core truk-truk tua-tua tuan-tuan tuang-tuang tuban-tuban tubuh-tubuh tujuan-tujuan tuk-tuk tukang-menukang tukar-menukar tulang-belulang tulang-tulangan tuli-tuli tulis-menulis tumbuh-tumbuhan tumpang-tindih tune-up tunggang-tunggik tunggang-tungging tunggang-tunggit tunggul-tunggul tunjuk-menunjuk tupai-tupai tupai-tupaian turi-turian turn-based turnamen-turnamen turun-temurun turut-menurut turut-turutan tuyuk-tuyuk twin-cam twin-turbocharged two-state two-step two-tone u-shape uang-uangan uar-uar ubek-ubekan ubel-ubel ubrak-abrik ubun-ubun ubur-ubur uci-uci udang-undang udap-udapan ugal-ugalan uget-uget uir-uir ujar-ujar uji-coba ujung-ujung ujung-ujungnya uka-uka ukir-mengukir ukir-ukiran ula-ula ulak-ulak ulam-ulam ulang-alik ulang-aling ulang-ulang ulap-ulap ular-ular ular-ularan ulek-ulek ulu-ulu ulung-ulung umang-umang umbang-ambing umbi-umbian umbul-umbul umbut-umbut uncang-uncit undak-undakan undang-undang undang-undangnya unduk-unduk undung-undung undur-undur unek-unek ungah-angih unggang-anggit unggat-unggit unggul-mengungguli ungkit-ungkit unit-unit universitas-universitas unsur-unsur untang-anting unting-unting untung-untung untung-untungan upah-mengupah upih-upih upside-down ura-ura uran-uran urat-urat uring-uringan urup-urup urup-urupan urus-urus usaha-usaha user-user user-useran utak-atik utang-piutang utang-utang utar-utar utara-jauh utara-selatan uter-uter utusan-utusan v-belt v-neck value-added very-very video-video visi-misi visi-misinya voa-islam voice-over volt-ampere wajah-wajah wajar-wajar wake-up wakil-wakil walk-in walk-out wangi-wangian wanita-wanita wanti-wanti wara-wara wara-wiri warna-warna warna-warni was-was water-cooled web-based wide-angle wilayah-wilayah win-win wira-wiri wora-wari work-life world-class yang-yang yayasan-yayasan year-on-year yel-yel yo-yo zam-zam zig-zag """.split() )
53,599
12.733026
25
py
spaCy
spaCy-master/spacy/lang/id/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.id.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "Indonesia merupakan negara kepulauan yang kaya akan budaya.", "Berapa banyak warga yang dibutuhkan saat kerja bakti?", "Penyaluran pupuk berasal dari lima lokasi yakni Bontang, Kalimantan Timur, Surabaya, Banyuwangi, Semarang, dan Makassar.", "PT Pupuk Kaltim telah menyalurkan 274.707 ton pupuk bersubsidi ke wilayah penyaluran di 14 provinsi.", "Jakarta adalah kota besar yang nyaris tidak pernah tidur." "Kamu ada di mana semalam?", "Siapa yang membeli makanan ringan tersebut?", "Siapa presiden pertama Republik Indonesia?", ]
726
37.263158
127
py
spaCy
spaCy-master/spacy/lang/id/lex_attrs.py
import unicodedata from ...attrs import IS_CURRENCY, LIKE_NUM from .punctuation import LIST_CURRENCY _num_words = [ "nol", "satu", "dua", "tiga", "empat", "lima", "enam", "tujuh", "delapan", "sembilan", "sepuluh", "sebelas", "belas", "puluh", "ratus", "ribu", "juta", "miliar", "biliun", "triliun", "kuadriliun", "kuintiliun", "sekstiliun", "septiliun", "oktiliun", "noniliun", "desiliun", ] def like_num(text): if text.startswith(("+", "-", "±", "~")): text = text[1:] text = text.replace(",", "").replace(".", "") if text.isdigit(): return True if text.count("/") == 1: num, denom = text.split("/") if num.isdigit() and denom.isdigit(): return True if text.lower() in _num_words: return True if text.count("-") == 1: _, num = text.split("-") if num.isdigit() or num in _num_words: return True return False def is_currency(text): if text in LIST_CURRENCY: return True for char in text: if unicodedata.category(char) != "Sc": return False return True LEX_ATTRS = {IS_CURRENCY: is_currency, LIKE_NUM: like_num}
1,275
18.044776
58
py
spaCy
spaCy-master/spacy/lang/id/punctuation.py
from ..char_classes import ALPHA, _currency, _units, merge_chars, split_chars from ..punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES _units = ( _units + "s bit Gbps Mbps mbps Kbps kbps ƒ ppi px " "Hz kHz MHz GHz mAh " "ratus rb ribu ribuan " "juta jt jutaan mill?iar million bil[l]?iun bilyun billion " ) _currency = _currency + r" USD Rp IDR RMB SGD S\$" _months = ( "Januari Februari Maret April Mei Juni Juli Agustus September " "Oktober November Desember January February March May June " "July August October December Jan Feb Mar Jun Jul Aug Sept " "Oct Okt Nov Des " ) UNITS = merge_chars(_units) CURRENCY = merge_chars(_currency) HTML_PREFIX = r"<(b|strong|i|em|p|span|div|br)\s?/>|<a([^>]+)>" HTML_SUFFIX = r"</(b|strong|i|em|p|span|div|a)>" MONTHS = merge_chars(_months) LIST_CURRENCY = split_chars(_currency) _prefixes = list(TOKENIZER_PREFIXES) _prefixes.remove("#") # hashtag _prefixes = _prefixes + LIST_CURRENCY + [HTML_PREFIX] + ["/", "—"] _suffixes = ( TOKENIZER_SUFFIXES + [r"\-[Nn]ya", "-[KkMm]u", "[—-]"] + [ # disabled: variable width currency variable # r"(?<={c})(?:[0-9]+)".format(c=CURRENCY), r"(?<=[0-9])(?:{u})".format(u=UNITS), r"(?<=[0-9])%", # disabled: variable width HTML_SUFFIX variable # r"(?<=[0-9{a}]{h})(?:[\.,:-])".format(a=ALPHA, h=HTML_SUFFIX), r"(?<=[0-9{a}])(?:{h})".format(a=ALPHA, h=HTML_SUFFIX), ] ) _infixes = TOKENIZER_INFIXES + [ r"(?<=[0-9])[\\/](?=[0-9%-])", r"(?<=[0-9])%(?=[{a}0-9/])".format(a=ALPHA), # disabled: variable width units variable # r"(?<={u})[\/-](?=[0-9])".format(u=UNITS), # disabled: variable width months variable # r"(?<={m})[\/-](?=[0-9])".format(m=MONTHS), r'(?<=[0-9)][.,])"(?=[0-9])', r'(?<=[{a})][.,\'])["—](?=[{a}])'.format(a=ALPHA), r"(?<=[{a}])-(?=[0-9])".format(a=ALPHA), r"(?<=[0-9])-(?=[{a}])".format(a=ALPHA), r"(?<=[{a}])[\/-](?={c}|[{a}])".format(a=ALPHA, c=CURRENCY), ] TOKENIZER_PREFIXES = _prefixes TOKENIZER_SUFFIXES = _suffixes TOKENIZER_INFIXES = _infixes
2,131
33.95082
83
py
spaCy
spaCy-master/spacy/lang/id/stop_words.py
STOP_WORDS = set( """ ada adalah adanya adapun agak agaknya agar akan akankah akhir akhiri akhirnya aku akulah amat amatlah anda andalah antar antara antaranya apa apaan apabila apakah apalagi apatah artinya asal asalkan atas atau ataukah ataupun awal awalnya bagai bagaikan bagaimana bagaimanakah bagaimanapun bagi bagian bahkan bahwa bahwasanya baik bakal bakalan balik banyak bapak baru bawah beberapa begini beginian beginikah beginilah begitu begitukah begitulah begitupun bekerja belakang belakangan belum belumlah benar benarkah benarlah berada berakhir berakhirlah berakhirnya berapa berapakah berapalah berapapun berarti berawal berbagai berdatangan beri berikan berikut berikutnya berjumlah berkali-kali berkata berkehendak berkeinginan berkenaan berlainan berlalu berlangsung berlebihan bermacam bermacam-macam bermaksud bermula bersama bersama-sama bersiap bersiap-siap bertanya bertanya-tanya berturut berturut-turut bertutur berujar berupa besar betul betulkah biasa biasanya bila bilakah bisa bisakah boleh bolehkah bolehlah buat bukan bukankah bukanlah bukannya bulan bung cara caranya cukup cukupkah cukuplah cuma dahulu dalam dan dapat dari daripada datang dekat demi demikian demikianlah dengan depan di dia diakhiri diakhirinya dialah diantara diantaranya diberi diberikan diberikannya dibuat dibuatnya didapat didatangkan digunakan diibaratkan diibaratkannya diingat diingatkan diinginkan dijawab dijelaskan dijelaskannya dikarenakan dikatakan dikatakannya dikerjakan diketahui diketahuinya dikira dilakukan dilalui dilihat dimaksud dimaksudkan dimaksudkannya dimaksudnya diminta dimintai dimisalkan dimulai dimulailah dimulainya dimungkinkan dini dipastikan diperbuat diperbuatnya dipergunakan diperkirakan diperlihatkan diperlukan diperlukannya dipersoalkan dipertanyakan dipunyai diri dirinya disampaikan disebut disebutkan disebutkannya disini disinilah ditambahkan ditandaskan ditanya ditanyai ditanyakan ditegaskan ditujukan ditunjuk ditunjuki ditunjukkan ditunjukkannya ditunjuknya dituturkan dituturkannya diucapkan diucapkannya diungkapkan dong dua dulu empat enggak enggaknya entah entahlah guna gunakan hal hampir hanya hanyalah hari harus haruslah harusnya hendak hendaklah hendaknya hingga ia ialah ibarat ibaratkan ibaratnya ibu ikut ingat ingat-ingat ingin inginkah inginkan ini inikah inilah itu itukah itulah jadi jadilah jadinya jangan jangankan janganlah jauh jawab jawaban jawabnya jelas jelaskan jelaslah jelasnya jika jikalau juga jumlah jumlahnya justru kala kalau kalaulah kalaupun kalian kami kamilah kamu kamulah kan kapan kapankah kapanpun karena karenanya kasus kata katakan katakanlah katanya ke keadaan kebetulan kecil kedua keduanya keinginan kelamaan kelihatan kelihatannya kelima keluar kembali kemudian kemungkinan kemungkinannya kenapa kepada kepadanya kesampaian keseluruhan keseluruhannya keterlaluan ketika khususnya kini kinilah kira kira-kira kiranya kita kitalah kok kurang lagi lagian lah lain lainnya lalu lama lamanya lanjut lanjutnya lebih lewat lima luar macam maka makanya makin malah malahan mampu mampukah mana manakala manalagi masa masalah masalahnya masih masihkah masing masing-masing mau maupun melainkan melakukan melalui melihat melihatnya memang memastikan memberi memberikan membuat memerlukan memihak meminta memintakan memisalkan memperbuat mempergunakan memperkirakan memperlihatkan mempersiapkan mempersoalkan mempertanyakan mempunyai memulai memungkinkan menaiki menambahkan menandaskan menanti menanti-nanti menantikan menanya menanyai menanyakan mendapat mendapatkan mendatang mendatangi mendatangkan menegaskan mengakhiri mengapa mengatakan mengatakannya mengenai mengerjakan mengetahui menggunakan menghendaki mengibaratkan mengibaratkannya mengingat mengingatkan menginginkan mengira mengucapkan mengucapkannya mengungkapkan menjadi menjawab menjelaskan menuju menunjuk menunjuki menunjukkan menunjuknya menurut menuturkan menyampaikan menyangkut menyatakan menyebutkan menyeluruh menyiapkan merasa mereka merekalah merupakan meski meskipun meyakini meyakinkan minta mirip misal misalkan misalnya mula mulai mulailah mulanya mungkin mungkinkah nah naik namun nanti nantinya nyaris nyatanya oleh olehnya pada padahal padanya pak paling panjang pantas para pasti pastilah penting pentingnya per percuma perlu perlukah perlunya pernah persoalan pertama pertama-tama pertanyaan pertanyakan pihak pihaknya pukul pula pun punya rasa rasanya rata rupanya saat saatnya saja sajalah saling sama sama-sama sambil sampai sampai-sampai sampaikan sana sangat sangatlah satu saya sayalah se sebab sebabnya sebagai sebagaimana sebagainya sebagian sebaik sebaik-baiknya sebaiknya sebaliknya sebanyak sebegini sebegitu sebelum sebelumnya sebenarnya seberapa sebesar sebetulnya sebisanya sebuah sebut sebutlah sebutnya secara secukupnya sedang sedangkan sedemikian sedikit sedikitnya seenaknya segala segalanya segera seharusnya sehingga seingat sejak sejauh sejenak sejumlah sekadar sekadarnya sekali sekali-kali sekalian sekaligus sekalipun sekarang sekarang sekecil seketika sekiranya sekitar sekitarnya sekurang-kurangnya sekurangnya sela selain selaku selalu selama selama-lamanya selamanya selanjutnya seluruh seluruhnya semacam semakin semampu semampunya semasa semasih semata semata-mata semaunya sementara semisal semisalnya sempat semua semuanya semula sendiri sendirian sendirinya seolah seolah-olah seorang sepanjang sepantasnya sepantasnyalah seperlunya seperti sepertinya sepihak sering seringnya serta serupa sesaat sesama sesampai sesegera sesekali seseorang sesuatu sesuatunya sesudah sesudahnya setelah setempat setengah seterusnya setiap setiba setibanya setidak-tidaknya setidaknya setinggi seusai sewaktu siap siapa siapakah siapapun sini sinilah soal soalnya suatu sudah sudahkah sudahlah supaya tadi tadinya tahu tahun tak tambah tambahnya tampak tampaknya tandas tandasnya tanpa tanya tanyakan tanyanya tapi tegas tegasnya telah tempat tengah tentang tentu tentulah tentunya tepat terakhir terasa terbanyak terdahulu terdapat terdiri terhadap terhadapnya teringat teringat-ingat terjadi terjadilah terjadinya terkira terlalu terlebih terlihat termasuk ternyata tersampaikan tersebut tersebutlah tertentu tertuju terus terutama tetap tetapi tiap tiba tiba-tiba tidak tidakkah tidaklah tiga tinggi toh tunjuk turut tutur tuturnya ucap ucapnya ujar ujarnya umum umumnya ungkap ungkapnya untuk usah usai waduh wah wahai waktu waktunya walau walaupun wong yaitu yakin yakni yang """.split() )
6,507
53.689076
79
py
spaCy
spaCy-master/spacy/lang/id/syntax_iterators.py
from typing import Iterator, Tuple, Union from ...errors import Errors from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]: """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ # fmt: off labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"] # fmt: on doc = doclike.doc # Ensure works on both Doc and Span. if not doc.has_annotation("DEP"): raise ValueError(Errors.E029) np_deps = [doc.vocab.strings[label] for label in labels] conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") prev_end = -1 for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced if word.left_edge.i <= prev_end: continue if word.dep in np_deps: prev_end = word.right_edge.i yield word.left_edge.i, word.right_edge.i + 1, np_label elif word.dep == conj: head = word.head while head.dep == conj and head.head.i < head.i: head = head.head # If the head is an NP, and we're coordinated to it, we're an NP if head.dep in np_deps: prev_end = word.right_edge.i yield word.left_edge.i, word.right_edge.i + 1, np_label SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
1,538
35.642857
89
py
spaCy
spaCy-master/spacy/lang/id/tokenizer_exceptions.py
from ...symbols import NORM, ORTH from ...util import update_exc from ..tokenizer_exceptions import BASE_EXCEPTIONS from ._tokenizer_exceptions_list import ID_BASE_EXCEPTIONS # Daftar singkatan dan Akronim dari: # https://id.wiktionary.org/wiki/Wiktionary:Daftar_singkatan_dan_akronim_bahasa_Indonesia#A _exc = {} for orth in ID_BASE_EXCEPTIONS: _exc[orth] = [{ORTH: orth}] orth_title = orth.title() _exc[orth_title] = [{ORTH: orth_title}] orth_caps = orth.upper() _exc[orth_caps] = [{ORTH: orth_caps}] orth_lower = orth.lower() _exc[orth_lower] = [{ORTH: orth_lower}] orth_first_upper = orth[0].upper() + orth[1:] _exc[orth_first_upper] = [{ORTH: orth_first_upper}] if "-" in orth: orth_title = "-".join([part.title() for part in orth.split("-")]) _exc[orth_title] = [{ORTH: orth_title}] orth_caps = "-".join([part.upper() for part in orth.split("-")]) _exc[orth_caps] = [{ORTH: orth_caps}] for exc_data in [ {ORTH: "Jan.", NORM: "Januari"}, {ORTH: "Feb.", NORM: "Februari"}, {ORTH: "Mar.", NORM: "Maret"}, {ORTH: "Apr.", NORM: "April"}, {ORTH: "Jun.", NORM: "Juni"}, {ORTH: "Jul.", NORM: "Juli"}, {ORTH: "Agu.", NORM: "Agustus"}, {ORTH: "Ags.", NORM: "Agustus"}, {ORTH: "Sep.", NORM: "September"}, {ORTH: "Okt.", NORM: "Oktober"}, {ORTH: "Nov.", NORM: "November"}, {ORTH: "Des.", NORM: "Desember"}, ]: _exc[exc_data[ORTH]] = [exc_data] _other_exc = { "do'a": [{ORTH: "do'a", NORM: "doa"}], "jum'at": [{ORTH: "jum'at", NORM: "Jumat"}], "Jum'at": [{ORTH: "Jum'at", NORM: "Jumat"}], "la'nat": [{ORTH: "la'nat", NORM: "laknat"}], "ma'af": [{ORTH: "ma'af", NORM: "maaf"}], "mu'jizat": [{ORTH: "mu'jizat", NORM: "mukjizat"}], "Mu'jizat": [{ORTH: "Mu'jizat", NORM: "mukjizat"}], "ni'mat": [{ORTH: "ni'mat", NORM: "nikmat"}], "raka'at": [{ORTH: "raka'at", NORM: "rakaat"}], "ta'at": [{ORTH: "ta'at", NORM: "taat"}], } _exc.update(_other_exc) for orth in [ "A.AB.", "A.Ma.", "A.Md.", "A.Md.Keb.", "A.Md.Kep.", "A.P.", "B.A.", "B.Ch.E.", "B.Sc.", "Dr.", "Dra.", "Drs.", "Hj.", "Ka.", "Kp.", "M.AB", "M.Ag.", "M.AP", "M.Arl", "M.A.R.S", "M.Hum.", "M.I.Kom.", "M.Kes,", "M.Kom.", "M.M.", "M.P.", "M.Pd.", "M.Psi.", "M.Psi.T.", "M.Sc.", "M.SArl", "M.Si.", "M.Sn.", "M.T.", "M.Th.", "No.", "Pjs.", "Plt.", "R.A.", "S.AB", "S.AP", "S.Adm", "S.Ag.", "S.Agr", "S.Ant", "S.Arl", "S.Ars", "S.A.R.S", "S.Ds", "S.E.", "S.E.I.", "S.Farm", "S.Gz.", "S.H.", "S.Han", "S.H.Int", "S.Hum", "S.Hut.", "S.In.", "S.IK.", "S.I.Kom.", "S.I.P", "S.IP", "S.P.", "S.Pt", "S.Psi", "S.Ptk", "S.Keb", "S.Ked", "S.Kep", "S.KG", "S.KH", "S.Kel", "S.K.M.", "S.Kedg.", "S.Kedh.", "S.Kom.", "S.KPM", "S.Mb", "S.Mat", "S.Par", "S.Pd.", "S.Pd.I.", "S.Pd.SD", "S.Pol.", "S.Psi.", "S.S.", "S.SArl.", "S.Sn", "S.Si.", "S.Si.Teol.", "S.SI.", "S.ST.", "S.ST.Han", "S.STP", "S.Sos.", "S.Sy.", "S.T.", "S.T.Han", "S.Th.", "S.Th.I" "S.TI.", "S.T.P.", "S.TrK", "S.Tekp.", "S.Th.", "Prof.", "drg.", "KH.", "Ust.", "Lc", "Pdt.", "S.H.H.", "Rm.", "Ps.", "St.", "M.A.", "M.B.A", "M.Eng.", "M.Eng.Sc.", "M.Pharm.", "Dr. med", "Dr.-Ing", "Dr. rer. nat.", "Dr. phil.", "Dr. iur.", "Dr. rer. oec", "Dr. rer. pol.", "R.Ng.", "R.", "R.M.", "R.B.", "R.P.", "R.Ay.", "Rr.", "R.Ngt.", "a.l.", "a.n.", "a.s.", "b.d.", "d.a.", "d.l.", "d/h", "dkk.", "dll.", "dr.", "drh.", "ds.", "dsb.", "dst.", "faks.", "fax.", "hlm.", "i/o", "n.b.", "p.p." "pjs.", "s.d.", "tel.", "u.p.", ]: _exc[orth] = [{ORTH: orth}] TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
4,204
18.027149
91
py
spaCy
spaCy-master/spacy/lang/is/__init__.py
from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS class IcelandicDefaults(BaseDefaults): stop_words = STOP_WORDS class Icelandic(Language): lang = "is" Defaults = IcelandicDefaults __all__ = ["Icelandic"]
255
16.066667
46
py
spaCy
spaCy-master/spacy/lang/is/stop_words.py
# Source: https://github.com/Xangis/extra-stopwords STOP_WORDS = set( """ afhverju aftan aftur afþví aldrei allir allt alveg annað annars bara dag eða eftir eiga einhver einhverjir einhvers eins einu eitthvað ekkert ekki ennþá eru fara fer finna fjöldi fólk framan frá frekar fyrir gegnum geta getur gmg gott hann hafa hef hefur heyra hér hérna hjá hún hvað hvar hver hverjir hverjum hvernig hvor hvort hægt img inn kannski koma líka lol maður mátt mér með mega meira mig mikið minna minni missa mjög nei niður núna oft okkar okkur póst póstur rofl saman sem sér sig sinni síðan sjá smá smátt spurja spyrja staðar stórt svo svona sælir sæll taka takk til tilvitnun titlar upp var vel velkomin velkominn vera verður verið vel við vil vilja vill vita væri yfir ykkar það þakka þakkir þannig það þar þarf þau þeim þeir þeirra þeirra þegar þess þessa þessi þessu þessum þetta þér þið þinn þitt þín þráð þráður því þær ætti """.split() )
938
4.90566
51
py
spaCy
spaCy-master/spacy/lang/it/__init__.py
from typing import Callable, Optional from thinc.api import Model from ...language import BaseDefaults, Language from .lemmatizer import ItalianLemmatizer from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class ItalianDefaults(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS prefixes = TOKENIZER_PREFIXES infixes = TOKENIZER_INFIXES stop_words = STOP_WORDS syntax_iterators = SYNTAX_ITERATORS class Italian(Language): lang = "it" Defaults = ItalianDefaults @Italian.factory( "lemmatizer", assigns=["token.lemma"], default_config={ "model": None, "mode": "pos_lookup", "overwrite": False, "scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"}, }, default_score_weights={"lemma_acc": 1.0}, ) def make_lemmatizer( nlp: Language, model: Optional[Model], name: str, mode: str, overwrite: bool, scorer: Optional[Callable], ): return ItalianLemmatizer( nlp.vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer ) __all__ = ["Italian"]
1,230
23.137255
77
py
spaCy
spaCy-master/spacy/lang/it/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.it.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "Apple vuole comprare una startup del Regno Unito per un miliardo di dollari", "Le automobili a guida autonoma spostano la responsabilità assicurativa verso i produttori", "San Francisco prevede di bandire i robot di consegna porta a porta", "Londra è una grande città del Regno Unito.", ]
468
30.266667
96
py
spaCy
spaCy-master/spacy/lang/it/lemmatizer.py
from typing import Dict, List, Tuple from ...pipeline import Lemmatizer from ...tokens import Token class ItalianLemmatizer(Lemmatizer): """This lemmatizer was adapted from the Polish one (version of April 2021). It implements lookup lemmatization based on the morphological lexicon morph-it (Baroni and Zanchetta). The table lemma_lookup with non-POS-aware entries is used as a backup for words that aren't handled by morph-it.""" @classmethod def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]: if mode == "pos_lookup": required = [ "lemma_lookup_num", "lemma_lookup_det", "lemma_lookup_adp", "lemma_lookup_adj", "lemma_lookup_noun", "lemma_lookup_pron", "lemma_lookup_verb", "lemma_lookup_aux", "lemma_lookup_adv", "lemma_lookup_other", "lemma_lookup", ] return (required, []) else: return super().get_lookups_config(mode) def pos_lookup_lemmatize(self, token: Token) -> List[str]: string = token.text univ_pos = token.pos_ morphology = token.morph.to_dict() lookup_pos = univ_pos.lower() if univ_pos == "PROPN": lookup_pos = "noun" elif univ_pos == "PART": lookup_pos = "pron" lookup_table = self.lookups.get_table("lemma_lookup_" + lookup_pos, {}) if univ_pos == "NOUN": return self.lemmatize_noun(string, morphology, lookup_table) else: if univ_pos != "PROPN": string = string.lower() if univ_pos == "DET": return self.lemmatize_det(string, morphology, lookup_table) elif univ_pos == "PRON": return self.lemmatize_pron(string, morphology, lookup_table) elif univ_pos == "ADP": return self.lemmatize_adp(string, morphology, lookup_table) elif univ_pos == "ADJ": return self.lemmatize_adj(string, morphology, lookup_table) else: lemma = lookup_table.get(string, "") if not lemma: lookup_table = self.lookups.get_table("lemma_lookup_other") lemma = lookup_table.get(string, "") if not lemma: lookup_table = self.lookups.get_table( "lemma_lookup" ) # "legacy" lookup table lemma = lookup_table.get(string, string.lower()) return [lemma] def lemmatize_det( self, string: str, morphology: dict, lookup_table: Dict[str, str] ) -> List[str]: if string in [ "l'", "lo", "la", "i", "gli", "le", ]: return ["il"] if string in ["un'", "un", "una"]: return ["uno"] return [lookup_table.get(string, string)] def lemmatize_pron( self, string: str, morphology: dict, lookup_table: Dict[str, str] ) -> List[str]: if string in [ "l'", "li", "la", "gli", "le", ]: return ["lo"] if string in ["un'", "un", "una"]: return ["uno"] lemma = lookup_table.get(string, string) if lemma == "alcun": lemma = "alcuno" elif lemma == "qualcun": lemma = "qualcuno" return [lemma] def lemmatize_adp( self, string: str, morphology: dict, lookup_table: Dict[str, str] ) -> List[str]: if string == "d'": return ["di"] return [lookup_table.get(string, string)] def lemmatize_adj( self, string: str, morphology: dict, lookup_table: Dict[str, str] ) -> List[str]: lemma = lookup_table.get(string, string) if lemma == "alcun": lemma = "alcuno" elif lemma == "qualcun": lemma = "qualcuno" return [lemma] def lemmatize_noun( self, string: str, morphology: dict, lookup_table: Dict[str, str] ) -> List[str]: # this method is case-sensitive, in order to work # for incorrectly tagged proper names if string != string.lower(): if string.lower() in lookup_table: return [lookup_table[string.lower()]] elif string in lookup_table: return [lookup_table[string]] return [string.lower()] return [lookup_table.get(string, string)]
4,615
33.706767
79
py
spaCy
spaCy-master/spacy/lang/it/punctuation.py
from ..char_classes import ( ALPHA, ALPHA_LOWER, ALPHA_UPPER, CONCAT_QUOTES, HYPHENS, LIST_ELLIPSES, LIST_ICONS, ) from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES ELISION = "'’" _prefixes = [r"'[0-9][0-9]", r"[0-9]+°"] + BASE_TOKENIZER_PREFIXES _infixes = ( LIST_ELLIPSES + LIST_ICONS + [ r"(?<=[0-9])[+\-\*^](?=[0-9-])", r"(?<=[{al}{q}])\.(?=[{au}{q}])".format( al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES ), r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA), r"(?<=[{a}])(?:{h})(?=[{al}])".format(a=ALPHA, h=HYPHENS, al=ALPHA_LOWER), r"(?<=[{a}0-9])[:<>=\/](?=[{a}])".format(a=ALPHA), r"(?<=[{a}][{el}])(?=[{a}0-9\"])".format(a=ALPHA, el=ELISION), ] ) TOKENIZER_PREFIXES = _prefixes TOKENIZER_INFIXES = _infixes
850
23.314286
82
py
spaCy
spaCy-master/spacy/lang/it/stop_words.py
STOP_WORDS = set( """ a abbastanza abbia abbiamo abbiano abbiate accidenti ad adesso affinche agl agli ahime ahimè ai al alcuna alcuni alcuno all alla alle allo allora altri altrimenti altro altrove altrui anche ancora anni anno ansa anticipo assai attesa attraverso avanti avemmo avendo avente aver avere averlo avesse avessero avessi avessimo aveste avesti avete aveva avevamo avevano avevate avevi avevo avrai avranno avrebbe avrebbero avrei avremmo avremo avreste avresti avrete avrà avrò avuta avute avuti avuto basta bene benissimo brava bravo casa caso cento certa certe certi certo che chi chicchessia chiunque ci c' ciascuna ciascuno cima cio cioe circa citta città co codesta codesti codesto cogli coi col colei coll coloro colui come cominci comunque con concernente conciliarsi conclusione consiglio contro cortesia cos cosa cosi così cui d' da dagl dagli dai dal dall dall' dalla dalle dallo dappertutto davanti degl degli dei del dell dell' della delle dello dentro detto deve di dice dietro dire dirimpetto diventa diventare diventato dopo dov dove dovra dovrà dovunque due dunque durante e ebbe ebbero ebbi ecc ecco ed effettivamente egli ella entrambi eppure era erano eravamo eravate eri ero esempio esse essendo esser essere essi ex è fa faccia facciamo facciano facciate faccio facemmo facendo facesse facessero facessi facessimo faceste facesti faceva facevamo facevano facevate facevi facevo fai fanno farai faranno fare farebbe farebbero farei faremmo faremo fareste faresti farete farà farò fatto favore fece fecero feci fin finalmente finche fine fino forse forza fosse fossero fossi fossimo foste fosti fra frattempo fu fui fummo fuori furono futuro generale gia già giacche giorni giorno gli gl' gliela gliele glieli glielo gliene governo grande grazie gruppo ha haha hai hanno ho ieri il improvviso in inc infatti inoltre insieme intanto intorno invece io l' la là lasciato lato lavoro le lei li lo lontano loro lui lungo luogo m' ma macche magari maggior mai male malgrado malissimo mancanza marche me medesimo mediante meglio meno mentre mesi mezzo mi mia mie miei mila miliardi milioni minimi ministro mio modo molti moltissimo molto momento mondo mosto nazionale ne negl negli nei nel nell nella nelle nello nemmeno neppure nessun nessun' nessuna nessuno nient' niente no noi non nondimeno nonostante nonsia nostra nostre nostri nostro novanta nove nulla nuovo od oggi ogni ognuna ognuno oltre oppure ora ore osi ossia ottanta otto paese parecchi parecchie parecchio parte partendo peccato peggio per perche perché percio perciò perfino pero persino persone però piedi pieno piglia piu piuttosto più po pochissimo poco poi poiche possa possedere posteriore posto potrebbe preferibilmente presa press prima primo principalmente probabilmente proprio puo può pure purtroppo qualche qualcosa qualcuna qualcuno quale quali qualunque quando quanta quante quanti quanto quantunque quasi quattro quel quel' quella quelle quelli quello quest quest' questa queste questi questo qui quindi realmente recente recentemente registrazione relativo riecco salvo s' sara sarà sarai saranno sarebbe sarebbero sarei saremmo saremo sareste saresti sarete saro sarò scola scopo scorso se secondo seguente seguito sei sembra sembrare sembrato sembri sempre senza sette si sia siamo siano siate siete sig solito solo soltanto sono sopra sotto spesso srl sta stai stando stanno starai staranno starebbe starebbero starei staremmo staremo stareste staresti starete starà starò stata state stati stato stava stavamo stavano stavate stavi stavo stemmo stessa stesse stessero stessi stessimo stesso steste stesti stette stettero stetti stia stiamo stiano stiate sto su sua subito successivamente successivo sue sugl sugli sui sul sull sulla sulle sullo suo suoi t' tale tali talvolta tanto te tempo ti titolo tra tranne tre trenta troppo trovato tu tua tue tuo tuoi tutta tuttavia tutte tutti tutto uguali ulteriore ultimo un un' una uno uomo v' va vale vari varia varie vario verso vi via vicino visto vita voi volta volte vostra vostre vostri vostro """.split() )
4,094
47.75
90
py
spaCy
spaCy-master/spacy/lang/it/syntax_iterators.py
from typing import Iterator, Tuple, Union from ...errors import Errors from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]: """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ labels = [ "nsubj", "nsubj:pass", "obj", "obl", "obl:agent", "nmod", "pcomp", "appos", "ROOT", ] post_modifiers = ["flat", "flat:name", "fixed", "compound"] dets = ["det", "det:poss"] doc = doclike.doc # Ensure works on both Doc and Span. if not doc.has_annotation("DEP"): raise ValueError(Errors.E029) np_deps = {doc.vocab.strings.add(label) for label in labels} np_modifs = {doc.vocab.strings.add(modifier) for modifier in post_modifiers} np_label = doc.vocab.strings.add("NP") adj_label = doc.vocab.strings.add("amod") det_labels = {doc.vocab.strings.add(det) for det in dets} det_pos = doc.vocab.strings.add("DET") adp_label = doc.vocab.strings.add("ADP") conj = doc.vocab.strings.add("conj") conj_pos = doc.vocab.strings.add("CCONJ") prev_end = -1 for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced if word.left_edge.i <= prev_end: continue if word.dep in np_deps: right_childs = list(word.rights) right_child = right_childs[0] if right_childs else None if right_child: if ( right_child.dep == adj_label ): # allow chain of adjectives by expanding to right right_end = right_child.right_edge elif ( right_child.dep in det_labels and right_child.pos == det_pos ): # cut relative pronouns here right_end = right_child elif right_child.dep in np_modifs: # Check if we can expand to right right_end = word.right_edge else: right_end = word else: right_end = word prev_end = right_end.i left_index = word.left_edge.i left_index = ( left_index + 1 if word.left_edge.pos == adp_label else left_index ) yield left_index, right_end.i + 1, np_label elif word.dep == conj: head = word.head while head.dep == conj and head.head.i < head.i: head = head.head # If the head is an NP, and we're coordinated to it, we're an NP if head.dep in np_deps: prev_end = word.i left_index = word.left_edge.i # eliminate left attached conjunction left_index = ( left_index + 1 if word.left_edge.pos == conj_pos else left_index ) yield left_index, word.i + 1, np_label SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
3,137
35.068966
85
py
spaCy
spaCy-master/spacy/lang/it/tokenizer_exceptions.py
from ...symbols import ORTH from ...util import update_exc from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = { "all'art.": [{ORTH: "all'"}, {ORTH: "art."}], "dall'art.": [{ORTH: "dall'"}, {ORTH: "art."}], "dell'art.": [{ORTH: "dell'"}, {ORTH: "art."}], "L'art.": [{ORTH: "L'"}, {ORTH: "art."}], "l'art.": [{ORTH: "l'"}, {ORTH: "art."}], "nell'art.": [{ORTH: "nell'"}, {ORTH: "art."}], "po'": [{ORTH: "po'"}], "sett..": [{ORTH: "sett."}, {ORTH: "."}], } for orth in [ "..", "....", "a.C.", "al.", "all-path", "art.", "Art.", "artt.", "att.", "avv.", "Avv.", "by-pass", "c.d.", "c/c", "C.so", "centro-sinistra", "check-up", "Civ.", "cm.", "Cod.", "col.", "Cost.", "d.C.", 'de"', "distr.", "E'", "ecc.", "e-mail", "e/o", "etc.", "Jr.", "n°", "nord-est", "pag.", "Proc.", "prof.", "sett.", "s.p.a.", "s.n.c", "s.r.l", "ss.", "St.", "tel.", "week-end", ]: _exc[orth] = [{ORTH: orth}] TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
1,159
16.846154
56
py
spaCy
spaCy-master/spacy/lang/ja/__init__.py
import re from collections import namedtuple from pathlib import Path from typing import Any, Callable, Dict, Optional, Union import srsly from thinc.api import Model from ... import util from ...errors import Errors from ...language import BaseDefaults, Language from ...pipeline import Morphologizer from ...pipeline.morphologizer import DEFAULT_MORPH_MODEL from ...scorer import Scorer from ...symbols import POS from ...tokens import Doc, MorphAnalysis from ...training import validate_examples from ...util import DummyTokenizer, load_config_from_str, registry from ...vocab import Vocab from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS from .tag_bigram_map import TAG_BIGRAM_MAP from .tag_map import TAG_MAP from .tag_orth_map import TAG_ORTH_MAP DEFAULT_CONFIG = """ [nlp] [nlp.tokenizer] @tokenizers = "spacy.ja.JapaneseTokenizer" split_mode = null """ @registry.tokenizers("spacy.ja.JapaneseTokenizer") def create_tokenizer(split_mode: Optional[str] = None): def japanese_tokenizer_factory(nlp): return JapaneseTokenizer(nlp.vocab, split_mode=split_mode) return japanese_tokenizer_factory class JapaneseTokenizer(DummyTokenizer): def __init__(self, vocab: Vocab, split_mode: Optional[str] = None) -> None: self.vocab = vocab self.split_mode = split_mode self.tokenizer = try_sudachi_import(self.split_mode) # if we're using split mode A we don't need subtokens self.need_subtokens = not (split_mode is None or split_mode == "A") def __reduce__(self): return JapaneseTokenizer, (self.vocab, self.split_mode) def __call__(self, text: str) -> Doc: # convert sudachipy.morpheme.Morpheme to DetailedToken and merge continuous spaces sudachipy_tokens = self.tokenizer.tokenize(text) dtokens = self._get_dtokens(sudachipy_tokens) dtokens, spaces = get_dtokens_and_spaces(dtokens, text) # create Doc with tag bi-gram based part-of-speech identification rules words, tags, inflections, lemmas, norms, readings, sub_tokens_list = ( zip(*dtokens) if dtokens else [[]] * 7 ) sub_tokens_list = list(sub_tokens_list) doc = Doc(self.vocab, words=words, spaces=spaces) next_pos = None # for bi-gram rules for idx, (token, dtoken) in enumerate(zip(doc, dtokens)): token.tag_ = dtoken.tag if next_pos: # already identified in previous iteration token.pos = next_pos next_pos = None else: token.pos, next_pos = resolve_pos( token.orth_, dtoken.tag, tags[idx + 1] if idx + 1 < len(tags) else None, ) # if there's no lemma info (it's an unk) just use the surface token.lemma_ = dtoken.lemma if dtoken.lemma else dtoken.surface morph = {} if dtoken.inf: # it's normal for this to be empty for non-inflecting types morph["Inflection"] = dtoken.inf token.norm_ = dtoken.norm if dtoken.reading: # punctuation is its own reading, but we don't want values like # "=" here morph["Reading"] = re.sub("[=|]", "_", dtoken.reading) token.morph = MorphAnalysis(self.vocab, morph) if self.need_subtokens: doc.user_data["sub_tokens"] = sub_tokens_list return doc def _get_dtokens(self, sudachipy_tokens, need_sub_tokens: bool = True): sub_tokens_list = ( self._get_sub_tokens(sudachipy_tokens) if need_sub_tokens else None ) dtokens = [ DetailedToken( token.surface(), # orth "-".join([xx for xx in token.part_of_speech()[:4] if xx != "*"]), # tag ";".join([xx for xx in token.part_of_speech()[4:] if xx != "*"]), # inf token.dictionary_form(), # lemma token.normalized_form(), token.reading_form(), sub_tokens_list[idx] if sub_tokens_list else None, # user_data['sub_tokens'] ) for idx, token in enumerate(sudachipy_tokens) if len(token.surface()) > 0 # remove empty tokens which can be produced with characters like … that ] # Sudachi normalizes internally and outputs each space char as a token. # This is the preparation for get_dtokens_and_spaces() to merge the continuous space tokens return [ t for idx, t in enumerate(dtokens) if idx == 0 or not t.surface.isspace() or t.tag != "空白" or not dtokens[idx - 1].surface.isspace() or dtokens[idx - 1].tag != "空白" ] def _get_sub_tokens(self, sudachipy_tokens): # do nothing for default split mode if not self.need_subtokens: return None sub_tokens_list = [] # list of (list of list of DetailedToken | None) for token in sudachipy_tokens: sub_a = token.split(self.tokenizer.SplitMode.A) if len(sub_a) == 1: # no sub tokens sub_tokens_list.append(None) elif self.split_mode == "B": sub_tokens_list.append([self._get_dtokens(sub_a, False)]) else: # "C" sub_b = token.split(self.tokenizer.SplitMode.B) if len(sub_a) == len(sub_b): dtokens = self._get_dtokens(sub_a, False) sub_tokens_list.append([dtokens, dtokens]) else: sub_tokens_list.append( [ self._get_dtokens(sub_a, False), self._get_dtokens(sub_b, False), ] ) return sub_tokens_list def score(self, examples): validate_examples(examples, "JapaneseTokenizer.score") return Scorer.score_tokenization(examples) def _get_config(self) -> Dict[str, Any]: return {"split_mode": self.split_mode} def _set_config(self, config: Dict[str, Any] = {}) -> None: self.split_mode = config.get("split_mode", None) def to_bytes(self, **kwargs) -> bytes: serializers = {"cfg": lambda: srsly.json_dumps(self._get_config())} return util.to_bytes(serializers, []) def from_bytes(self, data: bytes, **kwargs) -> "JapaneseTokenizer": deserializers = {"cfg": lambda b: self._set_config(srsly.json_loads(b))} util.from_bytes(data, deserializers, []) self.tokenizer = try_sudachi_import(self.split_mode) return self def to_disk(self, path: Union[str, Path], **kwargs) -> None: path = util.ensure_path(path) serializers = {"cfg": lambda p: srsly.write_json(p, self._get_config())} util.to_disk(path, serializers, []) def from_disk(self, path: Union[str, Path], **kwargs) -> "JapaneseTokenizer": path = util.ensure_path(path) serializers = {"cfg": lambda p: self._set_config(srsly.read_json(p))} util.from_disk(path, serializers, []) self.tokenizer = try_sudachi_import(self.split_mode) return self class JapaneseDefaults(BaseDefaults): config = load_config_from_str(DEFAULT_CONFIG) stop_words = STOP_WORDS syntax_iterators = SYNTAX_ITERATORS writing_system = {"direction": "ltr", "has_case": False, "has_letters": False} class Japanese(Language): lang = "ja" Defaults = JapaneseDefaults @Japanese.factory( "morphologizer", assigns=["token.morph", "token.pos"], default_config={ "model": DEFAULT_MORPH_MODEL, "overwrite": True, "extend": True, "scorer": {"@scorers": "spacy.morphologizer_scorer.v1"}, }, default_score_weights={ "pos_acc": 0.5, "morph_micro_f": 0.5, "morph_per_feat": None, }, ) def make_morphologizer( nlp: Language, model: Model, name: str, overwrite: bool, extend: bool, scorer: Optional[Callable], ): return Morphologizer( nlp.vocab, model, name, overwrite=overwrite, extend=extend, scorer=scorer ) # Hold the attributes we need with convenient names DetailedToken = namedtuple( "DetailedToken", ["surface", "tag", "inf", "lemma", "norm", "reading", "sub_tokens"] ) def try_sudachi_import(split_mode="A"): """SudachiPy is required for Japanese support, so check for it. It it's not available blow up and explain how to fix it. split_mode should be one of these values: "A", "B", "C", None->"A".""" try: from sudachipy import dictionary, tokenizer split_mode = { None: tokenizer.Tokenizer.SplitMode.A, "A": tokenizer.Tokenizer.SplitMode.A, "B": tokenizer.Tokenizer.SplitMode.B, "C": tokenizer.Tokenizer.SplitMode.C, }[split_mode] tok = dictionary.Dictionary().create(mode=split_mode) return tok except ImportError: raise ImportError( "Japanese support requires SudachiPy and SudachiDict-core " "(https://github.com/WorksApplications/SudachiPy). " "Install with `pip install sudachipy sudachidict_core` or " "install spaCy with `pip install spacy[ja]`." ) from None def resolve_pos(orth, tag, next_tag): """If necessary, add a field to the POS tag for UD mapping. Under Universal Dependencies, sometimes the same Unidic POS tag can be mapped differently depending on the literal token or its context in the sentence. This function returns resolved POSs for both token and next_token by tuple. """ # Some tokens have their UD tag decided based on the POS of the following # token. # apply orth based mapping if tag in TAG_ORTH_MAP: orth_map = TAG_ORTH_MAP[tag] if orth in orth_map: return orth_map[orth], None # current_pos, next_pos # apply tag bi-gram mapping if next_tag: tag_bigram = tag, next_tag if tag_bigram in TAG_BIGRAM_MAP: current_pos, next_pos = TAG_BIGRAM_MAP[tag_bigram] if current_pos is None: # apply tag uni-gram mapping for current_pos return ( TAG_MAP[tag][POS], next_pos, ) # only next_pos is identified by tag bi-gram mapping else: return current_pos, next_pos # apply tag uni-gram mapping return TAG_MAP[tag][POS], None def get_dtokens_and_spaces(dtokens, text, gap_tag="空白"): # Compare the content of tokens and text, first words = [x.surface for x in dtokens] if "".join("".join(words).split()) != "".join(text.split()): raise ValueError(Errors.E194.format(text=text, words=words)) text_dtokens = [] text_spaces = [] text_pos = 0 # handle empty and whitespace-only texts if len(words) == 0: return text_dtokens, text_spaces elif len([word for word in words if not word.isspace()]) == 0: assert text.isspace() text_dtokens = [DetailedToken(text, gap_tag, "", text, text, None, None)] text_spaces = [False] return text_dtokens, text_spaces # align words and dtokens by referring text, and insert gap tokens for the space char spans for i, (word, dtoken) in enumerate(zip(words, dtokens)): # skip all space tokens if word.isspace(): continue try: word_start = text[text_pos:].index(word) except ValueError: raise ValueError(Errors.E194.format(text=text, words=words)) from None # space token if word_start > 0: w = text[text_pos : text_pos + word_start] text_dtokens.append(DetailedToken(w, gap_tag, "", w, w, None, None)) text_spaces.append(False) text_pos += word_start # content word text_dtokens.append(dtoken) text_spaces.append(False) text_pos += len(word) # poll a space char after the word if i + 1 < len(dtokens) and dtokens[i + 1].surface == " ": text_spaces[-1] = True text_pos += 1 # trailing space token if text_pos < len(text): w = text[text_pos:] text_dtokens.append(DetailedToken(w, gap_tag, "", w, w, None, None)) text_spaces.append(False) return text_dtokens, text_spaces __all__ = ["Japanese"]
12,609
35.763848
99
py
spaCy
spaCy-master/spacy/lang/ja/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.ja.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "アップルがイギリスの新興企業を10億ドルで購入を検討", "自動運転車の損害賠償責任、自動車メーカーに一定の負担を求める", "歩道を走る自動配達ロボ、サンフランシスコ市が走行禁止を検討", "ロンドンはイギリスの大都市です。", ]
297
18.866667
56
py
spaCy
spaCy-master/spacy/lang/ja/stop_words.py
# This list was created by taking the top 2000 words from a Wikipedia dump and # filtering out everything that wasn't hiragana. ー (one) was also added. # Considered keeping some non-hiragana words but too many place names were # present. STOP_WORDS = set( """ あ あっ あまり あり ある あるいは あれ い いい いう いく いずれ いっ いつ いる いわ うち え お おい おけ および おら おり か かけ かつ かつて かなり から が き きっかけ くる くん こ こう ここ こと この これ ご ごと さ さらに さん し しか しかし しまう しまっ しよう す すぐ すべて する ず せ せい せる そう そこ そして その それ それぞれ た たい ただし たち ため たら たり だ だけ だっ ち ちゃん つ つい つけ つつ て で でき できる です と とき ところ とっ とも どう な ない なお なかっ ながら なく なけれ なし なっ など なら なり なる に にて ぬ ね の のち のみ は はじめ ば ひと ぶり へ べき ほか ほとんど ほど ほぼ ま ます また まで まま み も もう もっ もと もの や やっ よ よう よく よっ より よる よれ ら らしい られ られる る れ れる を ん 一 """.split() )
730
13.918367
78
py
spaCy
spaCy-master/spacy/lang/ja/syntax_iterators.py
from typing import Iterator, Set, Tuple, Union from ...symbols import NOUN, PRON, PROPN, VERB from ...tokens import Doc, Span # TODO: this can probably be pruned a bit # fmt: off labels = ["nsubj", "nmod", "ddoclike", "nsubjpass", "pcomp", "pdoclike", "doclike", "obl", "dative", "appos", "attr", "ROOT"] # fmt: on def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]: """Detect base noun phrases from a dependency parse. Works on Doc and Span.""" doc = doclike.doc # Ensure works on both Doc and Span. np_deps = [doc.vocab.strings.add(label) for label in labels] doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen: Set[int] = set() for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced if word.i in seen: continue if word.dep in np_deps: unseen = [w.i for w in word.subtree if w.i not in seen] if not unseen: continue # this takes care of particles etc. seen.update(j.i for j in word.subtree) # This avoids duplicating embedded clauses seen.update(range(word.i + 1)) # if the head of this is a verb, mark that and rights seen # Don't do the subtree as that can hide other phrases if word.head.pos == VERB: seen.add(word.head.i) seen.update(w.i for w in word.head.rights) yield unseen[0], word.i + 1, np_label SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
1,638
38.02381
125
py
spaCy
spaCy-master/spacy/lang/ja/tag_bigram_map.py
from ...symbols import ADJ, AUX, NOUN, PART, VERB # mapping from tag bi-gram to pos of previous token TAG_BIGRAM_MAP = { # This covers only small part of AUX. ("形容詞-非自立可能", "助詞-終助詞"): (AUX, None), ("名詞-普通名詞-形状詞可能", "助動詞"): (ADJ, None), # ("副詞", "名詞-普通名詞-形状詞可能"): (None, ADJ), # This covers acl, advcl, obl and root, but has side effect for compound. ("名詞-普通名詞-サ変可能", "動詞-非自立可能"): (VERB, AUX), # This covers almost all of the deps ("名詞-普通名詞-サ変形状詞可能", "動詞-非自立可能"): (VERB, AUX), ("名詞-普通名詞-副詞可能", "動詞-非自立可能"): (None, VERB), ("副詞", "動詞-非自立可能"): (None, VERB), ("形容詞-一般", "動詞-非自立可能"): (None, VERB), ("形容詞-非自立可能", "動詞-非自立可能"): (None, VERB), ("接頭辞", "動詞-非自立可能"): (None, VERB), ("助詞-係助詞", "動詞-非自立可能"): (None, VERB), ("助詞-副助詞", "動詞-非自立可能"): (None, VERB), ("助詞-格助詞", "動詞-非自立可能"): (None, VERB), ("補助記号-読点", "動詞-非自立可能"): (None, VERB), ("形容詞-一般", "接尾辞-名詞的-一般"): (None, PART), ("助詞-格助詞", "形状詞-助動詞語幹"): (None, NOUN), ("連体詞", "形状詞-助動詞語幹"): (None, NOUN), ("動詞-一般", "助詞-副助詞"): (None, PART), ("動詞-非自立可能", "助詞-副助詞"): (None, PART), ("助動詞", "助詞-副助詞"): (None, PART), }
1,137
38.241379
77
py
spaCy
spaCy-master/spacy/lang/ja/tag_map.py
from ...symbols import ( ADJ, ADP, ADV, AUX, CCONJ, DET, INTJ, NOUN, NUM, PART, POS, PRON, PROPN, PUNCT, SCONJ, SPACE, SYM, VERB, ) TAG_MAP = { # Explanation of Unidic tags: # https://www.gavo.t.u-tokyo.ac.jp/~mine/japanese/nlp+slp/UNIDIC_manual.pdf # Universal Dependencies Mapping: (Some of the entries in this mapping are updated to v2.6 in the list below) # http://universaldependencies.org/ja/overview/morphology.html # http://universaldependencies.org/ja/pos/all.html "記号-一般": {POS: NOUN}, # this includes characters used to represent sounds like ドレミ "記号-文字": { POS: NOUN }, # this is for Greek and Latin characters having some meanings, or used as symbols, as in math "感動詞-フィラー": {POS: INTJ}, "感動詞-一般": {POS: INTJ}, "空白": {POS: SPACE}, "形状詞-一般": {POS: ADJ}, "形状詞-タリ": {POS: ADJ}, "形状詞-助動詞語幹": {POS: AUX}, "形容詞-一般": {POS: ADJ}, "形容詞-非自立可能": {POS: ADJ}, # XXX ADJ if alone, AUX otherwise "助詞-格助詞": {POS: ADP}, "助詞-係助詞": {POS: ADP}, "助詞-終助詞": {POS: PART}, "助詞-準体助詞": {POS: SCONJ}, # の as in 走るのが速い "助詞-接続助詞": {POS: SCONJ}, # verb ending て0 "助詞-副助詞": {POS: ADP}, # ばかり, つつ after a verb "助動詞": {POS: AUX}, "接続詞": {POS: CCONJ}, # XXX: might need refinement "接頭辞": {POS: NOUN}, "接尾辞-形状詞的": {POS: PART}, # がち, チック "接尾辞-形容詞的": {POS: AUX}, # -らしい "接尾辞-動詞的": {POS: PART}, # -じみ "接尾辞-名詞的-サ変可能": {POS: NOUN}, # XXX see 名詞,普通名詞,サ変可能,* "接尾辞-名詞的-一般": {POS: NOUN}, "接尾辞-名詞的-助数詞": {POS: NOUN}, "接尾辞-名詞的-副詞可能": {POS: NOUN}, # -後, -過ぎ "代名詞": {POS: PRON}, "動詞-一般": {POS: VERB}, "動詞-非自立可能": {POS: AUX}, # XXX VERB if alone, AUX otherwise "副詞": {POS: ADV}, "補助記号-AA-一般": {POS: SYM}, # text art "補助記号-AA-顔文字": {POS: PUNCT}, # kaomoji "補助記号-一般": {POS: SYM}, "補助記号-括弧開": {POS: PUNCT}, # open bracket "補助記号-括弧閉": {POS: PUNCT}, # close bracket "補助記号-句点": {POS: PUNCT}, # period or other EOS marker "補助記号-読点": {POS: PUNCT}, # comma "名詞-固有名詞-一般": {POS: PROPN}, # general proper noun "名詞-固有名詞-人名-一般": {POS: PROPN}, # person's name "名詞-固有名詞-人名-姓": {POS: PROPN}, # surname "名詞-固有名詞-人名-名": {POS: PROPN}, # first name "名詞-固有名詞-地名-一般": {POS: PROPN}, # place name "名詞-固有名詞-地名-国": {POS: PROPN}, # country name "名詞-助動詞語幹": {POS: AUX}, "名詞-数詞": {POS: NUM}, # includes Chinese numerals "名詞-普通名詞-サ変可能": {POS: NOUN}, # XXX: sometimes VERB in UDv2; suru-verb noun "名詞-普通名詞-サ変形状詞可能": {POS: NOUN}, "名詞-普通名詞-一般": {POS: NOUN}, "名詞-普通名詞-形状詞可能": {POS: NOUN}, # XXX: sometimes ADJ in UDv2 "名詞-普通名詞-助数詞可能": {POS: NOUN}, # counter / unit "名詞-普通名詞-副詞可能": {POS: NOUN}, "連体詞": {POS: DET}, # XXX this has exceptions based on literal token # GSD tags. These aren't in Unidic, but we need them for the GSD data. "外国語": {POS: PROPN}, # Foreign words "絵文字・記号等": {POS: SYM}, # emoji / kaomoji ^^; }
3,001
33.906977
113
py
spaCy
spaCy-master/spacy/lang/ja/tag_orth_map.py
from ...symbols import DET, PART, PRON, SPACE, X # mapping from tag bi-gram to pos of previous token TAG_ORTH_MAP = { "空白": {" ": SPACE, " ": X}, "助詞-副助詞": {"たり": PART}, "連体詞": { "あの": DET, "かの": DET, "この": DET, "その": DET, "どの": DET, "彼の": DET, "此の": DET, "其の": DET, "ある": PRON, "こんな": PRON, "そんな": PRON, "どんな": PRON, "あらゆる": PRON, }, }
458
18.956522
51
py
spaCy
spaCy-master/spacy/lang/kn/__init__.py
from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS class KannadaDefaults(BaseDefaults): stop_words = STOP_WORDS class Kannada(Language): lang = "kn" Defaults = KannadaDefaults __all__ = ["Kannada"]
247
15.533333
46
py
spaCy
spaCy-master/spacy/lang/kn/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.en.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "ಆಪಲ್ ಒಂದು ಯು.ಕೆ. ಸ್ಟಾರ್ಟ್ಅಪ್ ಅನ್ನು ೧ ಶತಕೋಟಿ ಡಾಲರ್ಗಳಿಗೆ ಖರೀದಿಸಲು ನೋಡುತ್ತಿದೆ.", "ಸ್ವಾಯತ್ತ ಕಾರುಗಳು ವಿಮಾ ಹೊಣೆಗಾರಿಕೆಯನ್ನು ತಯಾರಕರ ಕಡೆಗೆ ಬದಲಾಯಿಸುತ್ತವೆ.", "ಕಾಲುದಾರಿ ವಿತರಣಾ ರೋಬೋಟ್‌ಗಳನ್ನು ನಿಷೇಧಿಸುವುದನ್ನು ಸ್ಯಾನ್ ಫ್ರಾನ್ಸಿಸ್ಕೊ ​​ಪರಿಗಣಿಸುತ್ತದೆ.", "ಲಂಡನ್ ಯುನೈಟೆಡ್ ಕಿಂಗ್‌ಡಂನ ದೊಡ್ಡ ನಗರ.", "ನೀನು ಎಲ್ಲಿದಿಯಾ?", "ಫ್ರಾನ್ಸಾದ ಅಧ್ಯಕ್ಷರು ಯಾರು?", "ಯುನೈಟೆಡ್ ಸ್ಟೇಟ್ಸ್ನ ರಾಜಧಾನಿ ಯಾವುದು?", "ಬರಾಕ್ ಒಬಾಮ ಯಾವಾಗ ಜನಿಸಿದರು?", ]
585
29.842105
89
py
spaCy
spaCy-master/spacy/lang/kn/stop_words.py
STOP_WORDS = set( """ ಹಲವು ಮೂಲಕ ಹಾಗೂ ಅದು ನೀಡಿದ್ದಾರೆ ಯಾವ ಎಂದರು ಅವರು ಈಗ ಎಂಬ ಹಾಗಾಗಿ ಅಷ್ಟೇ ನಾವು ಇದೇ ಹೇಳಿ ತಮ್ಮ ಹೀಗೆ ನಮ್ಮ ಬೇರೆ ನೀಡಿದರು ಮತ್ತೆ ಇದು ಈ ನೀವು ನಾನು ಇತ್ತು ಎಲ್ಲಾ ಯಾವುದೇ ನಡೆದ ಅದನ್ನು ಎಂದರೆ ನೀಡಿದೆ ಹೀಗಾಗಿ ಜೊತೆಗೆ ಇದರಿಂದ ನನಗೆ ಅಲ್ಲದೆ ಎಷ್ಟು ಇದರ ಇಲ್ಲ ಕಳೆದ ತುಂಬಾ ಈಗಾಗಲೇ ಮಾಡಿ ಅದಕ್ಕೆ ಬಗ್ಗೆ ಅವರ ಇದನ್ನು ಆ ಇದೆ ಹೆಚ್ಚು ಇನ್ನು ಎಲ್ಲ ಇರುವ ಅವರಿಗೆ ನಿಮ್ಮ ಏನು ಕೂಡ ಇಲ್ಲಿ ನನ್ನನ್ನು ಕೆಲವು ಮಾತ್ರ ಬಳಿಕ ಅಂತ ತನ್ನ ಆಗ ಅಥವಾ ಅಲ್ಲ ಕೇವಲ ಆದರೆ ಮತ್ತು ಇನ್ನೂ ಅದೇ ಆಗಿ ಅವರನ್ನು ಹೇಳಿದ್ದಾರೆ ನಡೆದಿದೆ ಇದಕ್ಕೆ ಎಂಬುದು ಎಂದು ನನ್ನ ಮೇಲೆ """.split() )
499
4.747126
17
py
spaCy
spaCy-master/spacy/lang/ko/__init__.py
from typing import Any, Dict, Iterator from ...language import BaseDefaults, Language from ...scorer import Scorer from ...symbols import POS, X from ...tokens import Doc from ...training import validate_examples from ...util import DummyTokenizer, load_config_from_str, registry from ...vocab import Vocab from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES from .stop_words import STOP_WORDS from .tag_map import TAG_MAP DEFAULT_CONFIG = """ [nlp] [nlp.tokenizer] @tokenizers = "spacy.ko.KoreanTokenizer" """ @registry.tokenizers("spacy.ko.KoreanTokenizer") def create_tokenizer(): def korean_tokenizer_factory(nlp): return KoreanTokenizer(nlp.vocab) return korean_tokenizer_factory class KoreanTokenizer(DummyTokenizer): def __init__(self, vocab: Vocab): self.vocab = vocab self._mecab = try_mecab_import() # type: ignore[func-returns-value] self._mecab_tokenizer = None @property def mecab_tokenizer(self): # This is a property so that initializing a pipeline with blank:ko is # possible without actually requiring mecab-ko, e.g. to run # `spacy init vectors ko` for a pipeline that will have a different # tokenizer in the end. The languages need to match for the vectors # to be imported and there's no way to pass a custom config to # `init vectors`. if self._mecab_tokenizer is None: self._mecab_tokenizer = self._mecab("-F%f[0],%f[7]") return self._mecab_tokenizer def __reduce__(self): return KoreanTokenizer, (self.vocab,) def __call__(self, text: str) -> Doc: dtokens = list(self.detailed_tokens(text)) surfaces = [dt["surface"] for dt in dtokens] doc = Doc(self.vocab, words=surfaces, spaces=list(check_spaces(text, surfaces))) for token, dtoken in zip(doc, dtokens): first_tag, sep, eomi_tags = dtoken["tag"].partition("+") token.tag_ = first_tag # stem(어간) or pre-final(선어말 어미) if token.tag_ in TAG_MAP: token.pos = TAG_MAP[token.tag_][POS] else: token.pos = X token.lemma_ = dtoken["lemma"] doc.user_data["full_tags"] = [dt["tag"] for dt in dtokens] return doc def detailed_tokens(self, text: str) -> Iterator[Dict[str, Any]]: # 품사 태그(POS)[0], 의미 부류(semantic class)[1], 종성 유무(jongseong)[2], 읽기(reading)[3], # 타입(type)[4], 첫번째 품사(start pos)[5], 마지막 품사(end pos)[6], 표현(expression)[7], * for node in self.mecab_tokenizer.parse(text, as_nodes=True): if node.is_eos(): break surface = node.surface feature = node.feature tag, _, expr = feature.partition(",") lemma, _, remainder = expr.partition("/") if lemma == "*": lemma = surface yield {"surface": surface, "lemma": lemma, "tag": tag} def score(self, examples): validate_examples(examples, "KoreanTokenizer.score") return Scorer.score_tokenization(examples) class KoreanDefaults(BaseDefaults): config = load_config_from_str(DEFAULT_CONFIG) lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS writing_system = {"direction": "ltr", "has_case": False, "has_letters": False} infixes = TOKENIZER_INFIXES class Korean(Language): lang = "ko" Defaults = KoreanDefaults def try_mecab_import() -> None: try: from natto import MeCab return MeCab except ImportError: raise ImportError( 'The Korean tokenizer ("spacy.ko.KoreanTokenizer") requires ' "[mecab-ko](https://bitbucket.org/eunjeon/mecab-ko/src/master/README.md), " "[mecab-ko-dic](https://bitbucket.org/eunjeon/mecab-ko-dic), " "and [natto-py](https://github.com/buruzaemon/natto-py)" ) from None def check_spaces(text, tokens): prev_end = -1 start = 0 for token in tokens: idx = text.find(token, start) if prev_end > 0: yield prev_end != idx prev_end = idx + len(token) start = prev_end if start > 0: yield False __all__ = ["Korean"]
4,230
32.314961
88
py
spaCy
spaCy-master/spacy/lang/ko/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.ko.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "애플이 영국의 스타트업을 10억 달러에 인수하는 것을 알아보고 있다.", "자율주행 자동차의 손해 배상 책임이 제조 업체로 옮겨 가다", "샌프란시스코 시가 자동 배달 로봇의 보도 주행 금지를 검토 중이라고 합니다.", "런던은 영국의 수도이자 가장 큰 도시입니다.", ]
331
22.714286
56
py
spaCy
spaCy-master/spacy/lang/ko/lex_attrs.py
from ...attrs import LIKE_NUM _num_words = [ "영", "공", # Native Korean number system "하나", "둘", "셋", "넷", "다섯", "여섯", "일곱", "여덟", "아홉", "열", "스물", "서른", "마흔", "쉰", "예순", "일흔", "여든", "아흔", # Sino-Korean number system "일", "이", "삼", "사", "오", "육", "칠", "팔", "구", "십", "백", "천", "만", "십만", "백만", "천만", "일억", "십억", "백억", ] def like_num(text): if text.startswith(("+", "-", "±", "~")): text = text[1:] text = text.replace(",", "").replace(".", "") if text.isdigit(): return True if text.count("/") == 1: num, denom = text.split("/") if num.isdigit() and denom.isdigit(): return True if any(char.lower() in _num_words for char in text): return True return False LEX_ATTRS = {LIKE_NUM: like_num}
934
13.609375
56
py
spaCy
spaCy-master/spacy/lang/ko/punctuation.py
from ..char_classes import LIST_QUOTES from ..punctuation import TOKENIZER_INFIXES as BASE_TOKENIZER_INFIXES _infixes = ( ["·", "ㆍ", r"\(", r"\)"] + [r"(?<=[0-9])~(?=[0-9-])"] + LIST_QUOTES + BASE_TOKENIZER_INFIXES ) TOKENIZER_INFIXES = _infixes
264
21.083333
69
py
spaCy
spaCy-master/spacy/lang/ko/stop_words.py
STOP_WORDS = set( """ 이 있 하 것 들 그 되 수 이 보 않 없 나 주 아니 등 같 때 년 가 한 지 오 말 일 그렇 위하 때문 그것 두 말하 알 그러나 받 못하 일 그런 또 더 많 그리고 좋 크 시키 그러 하나 살 데 안 어떤 번 나 다른 어떻 들 이렇 점 싶 말 좀 원 잘 놓 """.split() )
185
1.735294
17
py
spaCy
spaCy-master/spacy/lang/ko/tag_map.py
from ...symbols import ( ADJ, ADP, ADV, AUX, CONJ, DET, INTJ, NOUN, NUM, POS, PRON, PROPN, PUNCT, SYM, VERB, X, ) # 은전한닢(mecab-ko-dic)의 품사 태그를 universal pos tag로 대응시킴 # https://docs.google.com/spreadsheets/d/1-9blXKjtjeKZqsf4NzHeYJCrr49-nXeRF6D80udfcwY/edit#gid=589544265 # https://universaldependencies.org/u/pos/ TAG_MAP = { # J.{1,2} 조사 "JKS": {POS: ADP}, "JKC": {POS: ADP}, "JKG": {POS: ADP}, "JKO": {POS: ADP}, "JKB": {POS: ADP}, "JKV": {POS: ADP}, "JKQ": {POS: ADP}, "JX": {POS: ADP}, # 보조사 "JC": {POS: CONJ}, # 접속 조사 "MAJ": {POS: CONJ}, # 접속 부사 "MAG": {POS: ADV}, # 일반 부사 "MM": {POS: DET}, # 관형사 "XPN": {POS: X}, # 접두사 # XS. 접미사 "XSN": {POS: X}, "XSV": {POS: X}, "XSA": {POS: X}, "XR": {POS: X}, # 어근 # E.{1,2} 어미 "EP": {POS: X}, "EF": {POS: X}, "EC": {POS: X}, "ETN": {POS: X}, "ETM": {POS: X}, "IC": {POS: INTJ}, # 감탄사 "VV": {POS: VERB}, # 동사 "VA": {POS: ADJ}, # 형용사 "VX": {POS: AUX}, # 보조 용언 "VCP": {POS: ADP}, # 긍정 지정사(이다) "VCN": {POS: ADJ}, # 부정 지정사(아니다) "NNG": {POS: NOUN}, # 일반 명사(general noun) "NNB": {POS: NOUN}, # 의존 명사 "NNBC": {POS: NOUN}, # 의존 명사(단위: unit) "NNP": {POS: PROPN}, # 고유 명사(proper noun) "NP": {POS: PRON}, # 대명사 "NR": {POS: NUM}, # 수사(numerals) "SN": {POS: NUM}, # 숫자 # S.{1,2} 부호 # 문장 부호 "SF": {POS: PUNCT}, # period or other EOS marker "SE": {POS: PUNCT}, "SC": {POS: PUNCT}, # comma, etc. "SSO": {POS: PUNCT}, # open bracket "SSC": {POS: PUNCT}, # close bracket "SY": {POS: SYM}, # 기타 기호 "SL": {POS: X}, # 외국어 "SH": {POS: X}, # 한자 }
1,751
23
104
py
spaCy
spaCy-master/spacy/lang/ky/__init__.py
from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class KyrgyzDefaults(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS infixes = TOKENIZER_INFIXES lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class Kyrgyz(Language): lang = "ky" Defaults = KyrgyzDefaults __all__ = ["Kyrgyz"]
487
22.238095
54
py
spaCy
spaCy-master/spacy/lang/ky/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.ky.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "Apple Улуу Британия стартабын $1 миллиардга сатып алууну көздөөдө.", "Автоном автомобилдерди камсыздоо жоопкерчилиги өндүрүүчүлөргө артылды.", "Сан-Франциско тротуар менен жүрүүчү робот-курьерлерге тыю салууну караштырууда.", "Лондон - Улуу Британияда жайгашкан ири шаар.", "Кайдасың?", "Франциянын президенти ким?", "Америка Кошмо Штаттарынын борбор калаасы кайсы шаар?", "Барак Обама качан төрөлгөн?", ]
599
34.294118
86
py
spaCy
spaCy-master/spacy/lang/ky/lex_attrs.py
from ...attrs import LIKE_NUM _num_words = [ "нөл", "ноль", "бир", "эки", "үч", "төрт", "беш", "алты", "жети", "сегиз", "тогуз", "он", "жыйырма", "отуз", "кырк", "элүү", "алтымыш", "жетмиш", "сексен", "токсон", "жүз", "миң", "миллион", "миллиард", "триллион", "триллиард", ] def like_num(text): if text.startswith(("+", "-", "±", "~")): text = text[1:] text = text.replace(",", "").replace(".", "") if text.isdigit(): return True if text.count("/") == 1: num, denom = text.split("/") if num.isdigit() and denom.isdigit(): return True if text in _num_words: return True return False LEX_ATTRS = {LIKE_NUM: like_num}
800
15.346939
49
py
spaCy
spaCy-master/spacy/lang/ky/punctuation.py
from ..char_classes import ( ALPHA, ALPHA_LOWER, ALPHA_UPPER, CONCAT_QUOTES, HYPHENS, LIST_ELLIPSES, LIST_ICONS, ) _hyphens_no_dash = HYPHENS.replace("-", "").strip("|").replace("||", "") _infixes = ( LIST_ELLIPSES + LIST_ICONS + [ r"(?<=[{al}])\.(?=[{au}])".format(al=ALPHA_LOWER, au=ALPHA_UPPER), r"(?<=[{a}])[,!?/()]+(?=[{a}])".format(a=ALPHA), r"(?<=[{a}{q}])[:<>=](?=[{a}])".format(a=ALPHA, q=CONCAT_QUOTES), r"(?<=[{a}])--(?=[{a}])".format(a=ALPHA), r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA), r"(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])".format(a=ALPHA, q=CONCAT_QUOTES), r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=_hyphens_no_dash), r"(?<=[0-9])-(?=[{a}])".format(a=ALPHA), r"(?<=[0-9])-(?=[0-9])", ] ) TOKENIZER_INFIXES = _infixes
855
28.517241
81
py
spaCy
spaCy-master/spacy/lang/ky/stop_words.py
STOP_WORDS = set( """ ага адам айтты айтымында айтып ал алар алардын алган алуу алып анда андан аны анын ар бар басма баш башка башкы башчысы берген биз билдирген билдирди бир биринчи бирок бишкек болгон болот болсо болуп боюнча буга бул гана да дагы деген деди деп жана жатат жаткан жаңы же жогорку жок жол жолу кабыл калган кандай карата каршы катары келген керек кийин кол кылмыш кыргыз күнү көп маалымат мамлекеттик мен менен миң мурдагы мыйзам мындай мүмкүн ошол ошондой сүрөт сөз тарабынан турган тууралуу укук учурда чейин чек экенин эки эл эле эмес эми эч үч үчүн өз """.split() )
607
13.139535
41
py
spaCy
spaCy-master/spacy/lang/ky/tokenizer_exceptions.py
from ...symbols import NORM, ORTH from ...util import update_exc from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} _abbrev_exc = [ # Weekdays abbreviations {ORTH: "дүй", NORM: "дүйшөмбү"}, {ORTH: "шей", NORM: "шейшемби"}, {ORTH: "шар", NORM: "шаршемби"}, {ORTH: "бей", NORM: "бейшемби"}, {ORTH: "жум", NORM: "жума"}, {ORTH: "ишм", NORM: "ишемби"}, {ORTH: "жек", NORM: "жекшемби"}, # Months abbreviations {ORTH: "янв", NORM: "январь"}, {ORTH: "фев", NORM: "февраль"}, {ORTH: "мар", NORM: "март"}, {ORTH: "апр", NORM: "апрель"}, {ORTH: "июн", NORM: "июнь"}, {ORTH: "июл", NORM: "июль"}, {ORTH: "авг", NORM: "август"}, {ORTH: "сен", NORM: "сентябрь"}, {ORTH: "окт", NORM: "октябрь"}, {ORTH: "ноя", NORM: "ноябрь"}, {ORTH: "дек", NORM: "декабрь"}, # Number abbreviations {ORTH: "млрд", NORM: "миллиард"}, {ORTH: "млн", NORM: "миллион"}, ] for abbr in _abbrev_exc: for orth in (abbr[ORTH], abbr[ORTH].capitalize(), abbr[ORTH].upper()): _exc[orth] = [{ORTH: orth, NORM: abbr[NORM]}] _exc[orth + "."] = [{ORTH: orth + ".", NORM: abbr[NORM]}] for exc_data in [ # "etc." abbreviations {ORTH: "ж.б.у.с.", NORM: "жана башка ушул сыяктуу"}, {ORTH: "ж.б.", NORM: "жана башка"}, {ORTH: "ж.", NORM: "жыл"}, {ORTH: "б.з.ч.", NORM: "биздин заманга чейин"}, {ORTH: "б.з.", NORM: "биздин заман"}, {ORTH: "кк.", NORM: "кылымдар"}, {ORTH: "жж.", NORM: "жылдар"}, {ORTH: "к.", NORM: "кылым"}, {ORTH: "көч.", NORM: "көчөсү"}, {ORTH: "м-н", NORM: "менен"}, {ORTH: "б-ча", NORM: "боюнча"}, ]: _exc[exc_data[ORTH]] = [exc_data] TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
1,736
31.166667
74
py
spaCy
spaCy-master/spacy/lang/la/__init__.py
from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class LatinDefaults(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS stop_words = STOP_WORDS lex_attr_getters = LEX_ATTRS syntax_iterators = SYNTAX_ITERATORS class Latin(Language): lang = "la" Defaults = LatinDefaults __all__ = ["Latin"]
495
22.619048
54
py
spaCy
spaCy-master/spacy/lang/la/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.la.examples import sentences >>> docs = nlp.pipe(sentences) """ # > Caes. BG 1.1 # > Cic. De Amic. 1 # > V. Georg. 1.1-5 # > Gen. 1:1 # > Galileo, Sid. Nunc. # > van Schurman, Opusc. arg. 1 sentences = [ "Gallia est omnis divisa in partes tres, quarum unam incolunt Belgae, aliam Aquitani, tertiam qui ipsorum lingua Celtae, nostra Galli appellantur.", "Q. Mucius augur multa narrare de C. Laelio socero suo memoriter et iucunde solebat nec dubitare illum in omni sermone appellare sapientem.", "Quid faciat laetas segetes, quo sidere terram uertere, Maecenas, ulmisque adiungere uitis conueniat, quae cura boum, qui cultus habendo sit pecori, apibus quanta experientia parcis, hinc canere incipiam", "In principio creavit Deus caelum et terram.", "Quo sumpto, intelligatur lunaris globus, cuius maximus circulus CAF, centrum vero E, dimetiens CF, qui ad Terre diametrum est ut duo ad septem.", "Cuicunque natura indita sunt principia, seu potentiae principiorum omnium artium, ac scientiarum, ei conveniunt omnes artes ac scientiae.", ]
1,146
48.869565
209
py
spaCy
spaCy-master/spacy/lang/la/lex_attrs.py
import re from ...attrs import LIKE_NUM # cf. Goyvaerts/Levithan 2009; case-insensitive, allow 4 roman_numerals_compile = re.compile( r"(?i)^(?=[MDCLXVI])M*(C[MD]|D?C{0,4})(X[CL]|L?X{0,4})(I[XV]|V?I{0,4})$" ) _num_words = """unus una unum duo duae tres tria quattuor quinque sex septem octo novem decem undecim duodecim tredecim quattuordecim quindecim sedecim septendecim duodeviginti undeviginti viginti triginta quadraginta quinquaginta sexaginta septuaginta octoginta nonaginta centum ducenti ducentae ducenta trecenti trecentae trecenta quadringenti quadringentae quadringenta quingenti quingentae quingenta sescenti sescentae sescenta septingenti septingentae septingenta octingenti octingentae octingenta nongenti nongentae nongenta mille """.split() _num_words += [item.replace("v", "u") for item in _num_words] _num_words = set(_num_words) _ordinal_words = """primus prima primum secundus secunda secundum tertius tertia tertium quartus quarta quartum quintus quinta quintum sextus sexta sextum septimus septima septimum octavus octava octavum nonus nona nonum decimus decima decimum undecimus undecima undecimum duodecimus duodecima duodecimum duodevicesimus duodevicesima duodevicesimum undevicesimus undevicesima undevicesimum vicesimus vicesima vicesimum tricesimus tricesima tricesimum quadragesimus quadragesima quadragesimum quinquagesimus quinquagesima quinquagesimum sexagesimus sexagesima sexagesimum septuagesimus septuagesima septuagesimum octogesimus octogesima octogesimum nonagesimus nonagesima nonagesimum centesimus centesima centesimum ducentesimus ducentesima ducentesimum trecentesimus trecentesima trecentesimum quadringentesimus quadringentesima quadringentesimum quingentesimus quingentesima quingentesimum sescentesimus sescentesima sescentesimum septingentesimus septingentesima septingentesimum octingentesimus octingentesima octingentesimum nongentesimus nongentesima nongentesimum millesimus millesima millesimum""".split() _ordinal_words += [item.replace("v", "u") for item in _ordinal_words] _ordinal_words = set(_ordinal_words) def like_num(text): if text.isdigit(): return True if roman_numerals_compile.match(text): return True if text.lower() in _num_words: return True if text.lower() in _ordinal_words: return True return False LEX_ATTRS = {LIKE_NUM: like_num}
2,372
66.8
1,111
py
spaCy
spaCy-master/spacy/lang/la/stop_words.py
# Corrected Perseus list, cf. https://wiki.digitalclassicist.org/Stopwords_for_Greek_and_Latin STOP_WORDS = set( """ ab ac ad adhuc aliqui aliquis an ante apud at atque aut autem cum cur de deinde dum ego enim ergo es est et etiam etsi ex fio haud hic iam idem igitur ille in infra inter interim ipse is ita magis modo mox nam ne nec necque neque nisi non nos o ob per possum post pro quae quam quare qui quia quicumque quidem quilibet quis quisnam quisquam quisque quisquis quo quoniam sed si sic sive sub sui sum super suus tam tamen trans tu tum ubi uel uero vel vero """.split() )
619
15.315789
102
py
spaCy
spaCy-master/spacy/lang/la/syntax_iterators.py
from typing import Iterator, Tuple, Union from ...errors import Errors from ...symbols import AUX, NOUN, PRON, PROPN, VERB from ...tokens import Doc, Span # NB: Modified from da on suggestion from https://github.com/explosion/spaCy/issues/7457#issuecomment-800349751 [PJB] def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]: def is_verb_token(tok): return tok.pos in [VERB, AUX] def get_left_bound(root): left_bound = root for tok in reversed(list(root.lefts)): if tok.dep in np_left_deps: left_bound = tok return left_bound def get_right_bound(doc, root): right_bound = root for tok in root.rights: if tok.dep in np_right_deps: right = get_right_bound(doc, tok) if list( filter( lambda t: is_verb_token(t) or t.dep in stop_deps, doc[root.i : right.i], ) ): break else: right_bound = right return right_bound def get_bounds(doc, root): return get_left_bound(root), get_right_bound(doc, root) doc = doclike.doc # Ensure works on both Doc and Span. if not doc.has_annotation("DEP"): raise ValueError(Errors.E029) if not len(doc): return left_labels = [ "det", "fixed", "nmod:poss", "amod", "flat", "goeswith", "nummod", "appos", ] right_labels = [ "fixed", "nmod:poss", "amod", "flat", "goeswith", "nummod", "appos", "nmod", "det", ] stop_labels = ["punct"] np_label = doc.vocab.strings.add("NP") np_left_deps = [doc.vocab.strings.add(label) for label in left_labels] np_right_deps = [doc.vocab.strings.add(label) for label in right_labels] stop_deps = [doc.vocab.strings.add(label) for label in stop_labels] prev_right = -1 for token in doclike: if token.pos in [PROPN, NOUN, PRON]: left, right = get_bounds(doc, token) if left.i <= prev_right: continue yield left.i, right.i + 1, np_label prev_right = right.i SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
2,392
26.505747
117
py
spaCy
spaCy-master/spacy/lang/la/tokenizer_exceptions.py
from ...symbols import ORTH from ...util import update_exc from ..tokenizer_exceptions import BASE_EXCEPTIONS ## TODO: Look into systematically handling u/v _exc = { "mecum": [{ORTH: "me"}, {ORTH: "cum"}], "tecum": [{ORTH: "te"}, {ORTH: "cum"}], "nobiscum": [{ORTH: "nobis"}, {ORTH: "cum"}], "vobiscum": [{ORTH: "vobis"}, {ORTH: "cum"}], "uobiscum": [{ORTH: "uobis"}, {ORTH: "cum"}], } _abbrev_exc = """A. A.D. Aa. Aaa. Acc. Agr. Ap. Apr. April. A.U.C. Aug. C. Caes. Caess. Cc. Cn. Coll. Cons. Conss. Cos. Coss. D. D.N. Dat. Dd. Dec. Decemb. Decembr. F. Feb. Febr. Februar. Ian. Id. Imp. Impp. Imppp. Iul. Iun. K. Kal. L. M'. M. Mai. Mam. Mar. Mart. Med. N. Nn. Nob. Non. Nov. Novemb. Oct. Octob. Opet. Ord. P. Paul. Pf. Pl. Plur. Post. Pp. Prid. Pro. Procos. Q. Quint. S. S.C. Scr. Sept. Septemb. Ser. Sert. Sex. Sext. St. Sta. Suff. T. Ti. Trib. V. Vol. Vop. Vv.""".split() _abbrev_exc += [item.lower() for item in _abbrev_exc] _abbrev_exc += [item.upper() for item in _abbrev_exc] _abbrev_exc += [item.replace("v", "u").replace("V", "U") for item in _abbrev_exc] _abbrev_exc += ["d.N."] for orth in set(_abbrev_exc): _exc[orth] = [{ORTH: orth}] TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
1,235
46.538462
489
py
spaCy
spaCy-master/spacy/lang/lb/__init__.py
from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class LuxembourgishDefaults(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS infixes = TOKENIZER_INFIXES lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class Luxembourgish(Language): lang = "lb" Defaults = LuxembourgishDefaults __all__ = ["Luxembourgish"]
515
23.571429
54
py
spaCy
spaCy-master/spacy/lang/lb/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.lb.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "An der Zäit hunn sech den Nordwand an d’Sonn gestridden, wie vun hinnen zwee wuel méi staark wier, wéi e Wanderer, deen an ee waarme Mantel agepak war, iwwert de Wee koum.", "Si goufen sech eens, dass deejéinege fir de Stäerkste gëlle sollt, deen de Wanderer forcéiere géif, säi Mantel auszedoen.", "Den Nordwand huet mat aller Force geblosen, awer wat e méi geblosen huet, wat de Wanderer sech méi a säi Mantel agewéckelt huet.", "Um Enn huet den Nordwand säi Kampf opginn.", "Dunn huet d’Sonn d’Loft mat hire frëndleche Strale gewiermt, a schonn no kuerzer Zäit huet de Wanderer säi Mantel ausgedoen.", "Do huet den Nordwand missen zouginn, dass d’Sonn vun hinnen zwee de Stäerkste wier.", ]
880
54.0625
178
py
spaCy
spaCy-master/spacy/lang/lb/lex_attrs.py
from ...attrs import LIKE_NUM _num_words = set( """ null eent zwee dräi véier fënnef sechs ziwen aacht néng zéng eelef zwielef dräizéng véierzéng foffzéng siechzéng siwwenzéng uechtzeng uechzeng nonnzéng nongzéng zwanzeg drësseg véierzeg foffzeg sechzeg siechzeg siwenzeg achtzeg achzeg uechtzeg uechzeg nonnzeg honnert dausend millioun milliard billioun billiard trillioun triliard """.split() ) _ordinal_words = set( """ éischten zweeten drëtten véierten fënneften sechsten siwenten aachten néngten zéngten eeleften zwieleften dräizéngten véierzéngten foffzéngten siechzéngten uechtzéngen uechzéngten nonnzéngten nongzéngten zwanzegsten drëssegsten véierzegsten foffzegsten siechzegsten siwenzegsten uechzegsten nonnzegsten honnertsten dausendsten milliounsten milliardsten billiounsten billiardsten trilliounsten trilliardsten """.split() ) def like_num(text): """ check if text resembles a number """ text = text.replace(",", "").replace(".", "") if text.isdigit(): return True if text.count("/") == 1: num, denom = text.split("/") if num.isdigit() and denom.isdigit(): return True if text in _num_words: return True if text in _ordinal_words: return True return False LEX_ATTRS = {LIKE_NUM: like_num}
1,308
30.926829
175
py
spaCy
spaCy-master/spacy/lang/lb/punctuation.py
from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER, LIST_ELLIPSES, LIST_ICONS ELISION = " ' ’ ".strip().replace(" ", "") abbrev = ("d", "D") _infixes = ( LIST_ELLIPSES + LIST_ICONS + [ r"(?<=^[{ab}][{el}])(?=[{a}])".format(ab=abbrev, a=ALPHA, el=ELISION), r"(?<=[{al}])\.(?=[{au}])".format(al=ALPHA_LOWER, au=ALPHA_UPPER), r"(?<=[{a}])[,!?](?=[{a}])".format(a=ALPHA), r"(?<=[{a}])[:<>=](?=[{a}])".format(a=ALPHA), r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA), r"(?<=[{a}])--(?=[{a}])".format(a=ALPHA), r"(?<=[0-9])-(?=[0-9])", ] ) TOKENIZER_INFIXES = _infixes
639
28.090909
85
py
spaCy
spaCy-master/spacy/lang/lb/stop_words.py
STOP_WORDS = set( """ a à äis är ärt äert ären all allem alles alleguer als also am an anerefalls ass aus awer bei beim bis bis d' dach datt däin där dat de dee den deel deem deen deene déi den deng denger dem der dësem di dir do da dann domat dozou drop du duerch duerno e ee em een eent ë en ënner ëm ech eis eise eisen eiser eises eisereen esou een eng enger engem entweder et eréischt falls fir géint géif gëtt gët geet gi ginn gouf gouff goung hat haten hatt hätt hei hu huet hun hunn hiren hien hin hier hir jidderen jiddereen jiddwereen jiddereng jiddwerengen jo ins iech iwwer kann kee keen kënne kënnt kéng kéngen kéngem koum kuckt mam mat ma mä mech méi mécht meng menger mer mir muss nach nämmlech nämmelech näischt nawell nëmme nëmmen net nees nee no nu nom och oder ons onsen onser onsereen onst om op ouni säi säin schonn schonns si sid sie se sech seng senge sengem senger selwecht selwer sinn sollten souguer sou soss sot 't tëscht u un um virdrun vu vum vun wann war waren was wat wëllt weider wéi wéini wéinst wi wollt wou wouhin zanter ze zu zum zwar """.split() )
1,088
4.136792
17
py
spaCy
spaCy-master/spacy/lang/lb/tokenizer_exceptions.py
from ...symbols import NORM, ORTH from ...util import update_exc from ..tokenizer_exceptions import BASE_EXCEPTIONS # TODO # treat other apostrophes within words as part of the word: [op d'mannst], [fir d'éischt] (= exceptions) _exc = {} # translate / delete what is not necessary for exc_data in [ {ORTH: "’t", NORM: "et"}, {ORTH: "’T", NORM: "et"}, {ORTH: "'t", NORM: "et"}, {ORTH: "'T", NORM: "et"}, {ORTH: "wgl.", NORM: "wannechgelift"}, {ORTH: "M.", NORM: "Monsieur"}, {ORTH: "Mme.", NORM: "Madame"}, {ORTH: "Dr.", NORM: "Dokter"}, {ORTH: "Tel.", NORM: "Telefon"}, {ORTH: "asw.", NORM: "an sou weider"}, {ORTH: "etc.", NORM: "et cetera"}, {ORTH: "bzw.", NORM: "bezéiungsweis"}, {ORTH: "Jan.", NORM: "Januar"}, ]: _exc[exc_data[ORTH]] = [exc_data] # to be extended for orth in [ "z.B.", "Dipl.", "Dr.", "etc.", "i.e.", "o.k.", "O.K.", "p.a.", "p.s.", "P.S.", "phil.", "q.e.d.", "R.I.P.", "rer.", "sen.", "ë.a.", "U.S.", "U.S.A.", ]: _exc[orth] = [{ORTH: orth}] TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
1,160
20.90566
104
py
spaCy
spaCy-master/spacy/lang/lg/__init__.py
from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES from .stop_words import STOP_WORDS class LugandaDefaults(BaseDefaults): lex_attr_getters = LEX_ATTRS infixes = TOKENIZER_INFIXES stop_words = STOP_WORDS class Luganda(Language): lang = "lg" Defaults = LugandaDefaults __all__ = ["Luganda"]
388
19.473684
46
py
spaCy
spaCy-master/spacy/lang/lg/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.lg.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "Mpa ebyafaayo ku byalo Nakatu ne Nkajja", "Okuyita Ttembo kitegeeza kugwa ddalu", "Ekifumu kino kyali kya mulimu ki?", "Ekkovu we liyise wayitibwa mukululo", "Akola mulimu ki oguvaamu ssente?", "Emisumaali egikomerera embaawo giyitibwa nninga", "Abooluganda ab’emmamba ababiri", "Ekisaawe ky'ebyenjigiriza kya mugaso nnyo", ]
520
27.944444
56
py
spaCy
spaCy-master/spacy/lang/lg/lex_attrs.py
from ...attrs import LIKE_NUM _num_words = [ "nnooti", # Zero "zeero", # zero "emu", # one "bbiri", # two "ssatu", # three "nnya", # four "ttaano", # five "mukaaga", # six "musanvu", # seven "munaana", # eight "mwenda", # nine "kkumi", # ten "kkumi n'emu", # eleven "kkumi na bbiri", # twelve "kkumi na ssatu", # thirteen "kkumi na nnya", # forteen "kkumi na ttaano", # fifteen "kkumi na mukaaga", # sixteen "kkumi na musanvu", # seventeen "kkumi na munaana", # eighteen "kkumi na mwenda", # nineteen "amakumi abiri", # twenty "amakumi asatu", # thirty "amakumi ana", # forty "amakumi ataano", # fifty "nkaaga", # sixty "nsanvu", # seventy "kinaana", # eighty "kyenda", # ninety "kikumi", # hundred "lukumi", # thousand "kakadde", # million "kawumbi", # billion "kase", # trillion "katabalika", # quadrillion "keesedde", # gajillion "kafukunya", # bazillion "ekisooka", # first "ekyokubiri", # second "ekyokusatu", # third "ekyokuna", # fourth "ekyokutaano", # fifith "ekyomukaaga", # sixth "ekyomusanvu", # seventh "eky'omunaana", # eighth "ekyomwenda", # nineth "ekyekkumi", # tenth "ekyekkumi n'ekimu", # eleventh "ekyekkumi n'ebibiri", # twelveth "ekyekkumi n'ebisatu", # thirteenth "ekyekkumi n'ebina", # fourteenth "ekyekkumi n'ebitaano", # fifteenth "ekyekkumi n'omukaaga", # sixteenth "ekyekkumi n'omusanvu", # seventeenth "ekyekkumi n'omunaana", # eigteenth "ekyekkumi n'omwenda", # nineteenth "ekyamakumi abiri", # twentieth "ekyamakumi asatu", # thirtieth "ekyamakumi ana", # fortieth "ekyamakumi ataano", # fiftieth "ekyenkaaga", # sixtieth "ekyensanvu", # seventieth "ekyekinaana", # eightieth "ekyekyenda", # ninetieth "ekyekikumi", # hundredth "ekyolukumi", # thousandth "ekyakakadde", # millionth "ekyakawumbi", # billionth "ekyakase", # trillionth "ekyakatabalika", # quadrillionth "ekyakeesedde", # gajillionth "ekyakafukunya", # bazillionth ] def like_num(text): if text.startswith(("+", "-", "±", "~")): text = text[1:] text = text.replace(",", "").replace(".", "") if text.isdigit(): return True if text.count("/") == 1: num, denom = text.split("/") if num.isdigit() and denom.isdigit(): return True text_lower = text.lower() if text_lower in _num_words: return True return False LEX_ATTRS = {LIKE_NUM: like_num}
2,679
26.916667
49
py
spaCy
spaCy-master/spacy/lang/lg/punctuation.py
from ..char_classes import ( ALPHA, ALPHA_LOWER, ALPHA_UPPER, CONCAT_QUOTES, HYPHENS, LIST_ELLIPSES, LIST_ICONS, ) _infixes = ( LIST_ELLIPSES + LIST_ICONS + [ r"(?<=[0-9])[+\-\*^](?=[0-9-])", r"(?<=[{al}{q}])\.(?=[{au}{q}])".format( al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES ), r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA), r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS), r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA), ] ) TOKENIZER_INFIXES = _infixes
576
20.37037
68
py
spaCy
spaCy-master/spacy/lang/lg/stop_words.py
STOP_WORDS = set( """ abadde abalala abamu abangi abava ajja ali alina ani anti ateekeddwa atewamu atya awamu aweebwa ayinza ba baali babadde babalina bajja bajjanewankubade bali balina bandi bangi bano bateekeddwa baweebwa bayina bebombi beera bibye bimu bingi bino bo bokka bonna buli bulijjo bulungi bwabwe bwaffe bwayo bwe bwonna bya byabwe byaffe byebimu byonna ddaa ddala ddi e ebimu ebiri ebweruobulungi ebyo edda ejja ekirala ekyo endala engeri ennyo era erimu erina ffe ffenna ga gujja gumu gunno guno gwa gwe kaseera kati kennyini ki kiki kikino kikye kikyo kino kirungi kki ku kubangabyombi kubangaolwokuba kudda kuva kuwa kwegamba kyaffe kye kyekimuoyo kyekyo kyonna leero liryo lwa lwaki lyabwezaabwe lyaffe lyange mbadde mingi mpozzi mu mulinaoyina munda mwegyabwe nolwekyo nabadde nabo nandiyagadde nandiye nanti naye ne nedda neera nga nnyingi nnyini nnyinza nnyo nti nyinza nze oba ojja okudda okugenda okuggyako okutuusa okuva okuwa oli olina oluvannyuma olwekyobuva omuli ono osobola otya oyina oyo seetaaga si sinakindi singa talina tayina tebaali tebaalina tebayina terina tetulina tetuteekeddwa tewali teyalina teyayina tolina tu tuyina tulina tuyina twafuna twetaaga wa wabula wabweru wadde waggulunnina wakati waliwobangi waliyo wandi wange wano wansi weebwa yabadde yaffe ye yenna yennyini yina yonna ziba zijja zonna """.split() )
1,361
67.1
99
py
spaCy
spaCy-master/spacy/lang/lij/__init__.py
from ...language import BaseDefaults, Language from .punctuation import TOKENIZER_INFIXES from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class LigurianDefaults(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS infixes = TOKENIZER_INFIXES stop_words = STOP_WORDS class Ligurian(Language): lang = "lij" Defaults = LigurianDefaults __all__ = ["Ligurian"]
430
21.684211
54
py
spaCy
spaCy-master/spacy/lang/lij/examples.py
""" Example sentences to test spaCy and its language models. >>> from spacy.lang.lij.examples import sentences >>> docs = nlp.pipe(sentences) """ sentences = [ "Sciusciâ e sciorbî no se peu.", "Graçie di çetroin, che me son arrivæ.", "Vegnime apreuvo, che ve fasso pescâ di òmmi.", "Bella pe sempre l'ægua inta conchetta quande unn'agoggia d'ægua a se â trapaña.", ]
386
24.8
86
py