Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
spaCy | spaCy-master/spacy/lang/sv/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from ...pipeline import Lemmatizer
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class SwedishDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
infixes = TOKENIZER_INFIXES
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
syntax_iterators = SYNTAX_ITERATORS
stop_words = STOP_WORDS
class Swedish(Language):
lang = "sv"
Defaults = SwedishDefaults
@Swedish.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "rule",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
scorer: Optional[Callable],
):
return Lemmatizer(
nlp.vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
__all__ = ["Swedish"]
| 1,276 | 23.09434 | 77 | py |
spaCy | spaCy-master/spacy/lang/sv/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.sv.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple överväger att köpa brittisk startup för 1 miljard dollar.",
"Självkörande bilar förskjuter försäkringsansvar mot tillverkare.",
"San Fransisco överväger förbud mot leveransrobotar på trottoarer.",
"London är en storstad i Storbritannien.",
]
| 427 | 27.533333 | 72 | py |
spaCy | spaCy-master/spacy/lang/sv/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"noll",
"en",
"ett",
"två",
"tre",
"fyra",
"fem",
"sex",
"sju",
"åtta",
"nio",
"tio",
"elva",
"tolv",
"tretton",
"fjorton",
"femton",
"sexton",
"sjutton",
"arton",
"nitton",
"tjugo",
"trettio",
"fyrtio",
"femtio",
"sextio",
"sjuttio",
"åttio",
"nittio",
"hundra",
"tusen",
"miljon",
"miljard",
"biljon",
"biljard",
"kvadriljon",
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 949 | 15.101695 | 49 | py |
spaCy | spaCy-master/spacy/lang/sv/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
LIST_ELLIPSES,
LIST_ICONS,
)
from ..punctuation import TOKENIZER_SUFFIXES
_quotes = CONCAT_QUOTES.replace("'", "")
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[{al}])\.(?=[{au}])".format(al=ALPHA_LOWER, au=ALPHA_UPPER),
r"(?<=[{a}])[,!?](?=[{a}])".format(a=ALPHA),
r"(?<=[{a}])[<>=](?=[{a}])".format(a=ALPHA),
r"(?<=[{a}]):(?=[{a}])".format(a=ALPHA_UPPER),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}])([{q}\)\]\(\[])(?=[{a}])".format(a=ALPHA, q=_quotes),
r"(?<=[{a}])--(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])[<>=/](?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9]):(?=[{a}])".format(a=ALPHA_UPPER),
]
)
_suffixes = [
suffix
for suffix in TOKENIZER_SUFFIXES
if suffix not in ["'s", "'S", "’s", "’S", r"\'"]
]
_suffixes += [r"(?<=[^sSxXzZ])\'"]
TOKENIZER_INFIXES = _infixes
TOKENIZER_SUFFIXES = _suffixes
| 1,021 | 25.205128 | 74 | py |
spaCy | spaCy-master/spacy/lang/sv/stop_words.py | STOP_WORDS = set(
"""
aderton adertonde adjö aldrig alla allas allt alltid alltså än andra andras
annan annat ännu artonde arton åtminstone att åtta åttio åttionde åttonde av
även
båda bådas bakom bara bäst bättre behöva behövas behövde behövt beslut beslutat
beslutit bland blev bli blir blivit bort borta bra
då dag dagar dagarna dagen där därför de del delen dem den deras dess det detta
dig din dina dit ditt dock du
efter eftersom elfte eller elva en enkel enkelt enkla enligt er era ert ett
ettusen
få fanns får fått fem femte femtio femtionde femton femtonde fick fin finnas
finns fjärde fjorton fjortonde fler flera flesta följande för före förlåt förra
första fram framför från fyra fyrtio fyrtionde
gå gälla gäller gällt går gärna gått genast genom gick gjorde gjort god goda
godare godast gör göra gott
ha hade haft han hans har här heller hellre helst helt henne hennes hit hög
höger högre högst hon honom hundra hundraen hundraett hur
i ibland idag igår igen imorgon in inför inga ingen ingenting inget innan inne
inom inte inuti
ja jag jämfört
kan kanske knappast kom komma kommer kommit kr kunde kunna kunnat kvar
länge längre långsam långsammare långsammast långsamt längst långt lätt lättare
lättast legat ligga ligger lika likställd likställda lilla lite liten litet
man många måste med mellan men mer mera mest mig min mina mindre minst mitt
mittemot möjlig möjligen möjligt möjligtvis mot mycket
någon någonting något några när nästa ned nederst nedersta nedre nej ner ni nio
nionde nittio nittionde nitton nittonde nödvändig nödvändiga nödvändigt
nödvändigtvis nog noll nr nu nummer
och också ofta oftast olika olikt om oss
över övermorgon överst övre
på
rakt rätt redan
så sade säga säger sagt samma sämre sämst sedan senare senast sent sex sextio
sextionde sexton sextonde sig sin sina sist sista siste sitt sjätte sju sjunde
sjuttio sjuttionde sjutton sjuttonde ska skall skulle slutligen små smått snart
som stor stora större störst stort
tack tidig tidigare tidigast tidigt till tills tillsammans tio tionde tjugo
tjugoen tjugoett tjugonde tjugotre tjugotvå tjungo tolfte tolv tre tredje
trettio trettionde tretton trettonde två tvåhundra
under upp ur ursäkt ut utan utanför ute
vad vänster vänstra var vår vara våra varför varifrån varit varken värre
varsågod vart vårt vem vems verkligen vi vid vidare viktig viktigare viktigast
viktigt vilka vilken vilket vill
""".split()
)
| 2,428 | 35.253731 | 79 | py |
spaCy | spaCy-master/spacy/lang/sv/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
# fmt: off
labels = ["nsubj", "nsubj:pass", "dobj", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
| 1,531 | 37.3 | 97 | py |
spaCy | spaCy-master/spacy/lang/sv/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
# Verbs
for verb_data in [
{ORTH: "driver"},
{ORTH: "kör"},
{ORTH: "hörr"},
{ORTH: "fattar"},
{ORTH: "hajar"},
{ORTH: "lever"},
{ORTH: "serr"},
{ORTH: "fixar"},
]:
verb_data_tc = dict(verb_data)
verb_data_tc[ORTH] = verb_data_tc[ORTH].title()
for data in [verb_data, verb_data_tc]:
_exc[data[ORTH] + "u"] = [data, {ORTH: "u", NORM: "du"}]
# Abbreviations for weekdays "sön." (for "söndag" / "söner")
# are left out because they are ambiguous. The same is the case
# for abbreviations "jul." and "Jul." ("juli" / "jul").
for exc_data in [
{ORTH: "jan.", NORM: "januari"},
{ORTH: "febr.", NORM: "februari"},
{ORTH: "feb.", NORM: "februari"},
{ORTH: "apr.", NORM: "april"},
{ORTH: "jun.", NORM: "juni"},
{ORTH: "aug.", NORM: "augusti"},
{ORTH: "sept.", NORM: "september"},
{ORTH: "sep.", NORM: "september"},
{ORTH: "okt.", NORM: "oktober"},
{ORTH: "nov.", NORM: "november"},
{ORTH: "dec.", NORM: "december"},
{ORTH: "mån.", NORM: "måndag"},
{ORTH: "tis.", NORM: "tisdag"},
{ORTH: "ons.", NORM: "onsdag"},
{ORTH: "tors.", NORM: "torsdag"},
{ORTH: "fre.", NORM: "fredag"},
{ORTH: "lör.", NORM: "lördag"},
{ORTH: "Jan.", NORM: "Januari"},
{ORTH: "Febr.", NORM: "Februari"},
{ORTH: "Feb.", NORM: "Februari"},
{ORTH: "Apr.", NORM: "April"},
{ORTH: "Jun.", NORM: "Juni"},
{ORTH: "Aug.", NORM: "Augusti"},
{ORTH: "Sept.", NORM: "September"},
{ORTH: "Sep.", NORM: "September"},
{ORTH: "Okt.", NORM: "Oktober"},
{ORTH: "Nov.", NORM: "November"},
{ORTH: "Dec.", NORM: "December"},
{ORTH: "Mån.", NORM: "Måndag"},
{ORTH: "Tis.", NORM: "Tisdag"},
{ORTH: "Ons.", NORM: "Onsdag"},
{ORTH: "Tors.", NORM: "Torsdag"},
{ORTH: "Fre.", NORM: "Fredag"},
{ORTH: "Lör.", NORM: "Lördag"},
{ORTH: "sthlm", NORM: "Stockholm"},
{ORTH: "gbg", NORM: "Göteborg"},
]:
_exc[exc_data[ORTH]] = [exc_data]
# Specific case abbreviations only
for orth in ["AB", "Dr.", "H.M.", "H.K.H.", "m/s", "M/S", "Ph.d.", "S:t", "s:t"]:
_exc[orth] = [{ORTH: orth}]
ABBREVIATIONS = [
"ang",
"anm",
"bl.a",
"d.v.s",
"doc",
"dvs",
"e.d",
"e.kr",
"el.",
"eng",
"etc",
"exkl",
"ev",
"f.",
"f.d",
"f.kr",
"f.n",
"f.ö",
"fid",
"fig",
"forts",
"fr.o.m",
"förf",
"inkl",
"iofs",
"jur.",
"kap",
"kl",
"kor.",
"kr",
"kungl",
"lat",
"m.a.o",
"m.fl",
"m.m",
"max",
"milj",
"min.",
"mos",
"mt",
"mvh",
"o.d",
"o.s.v",
"obs",
"osv",
"p.g.a",
"proc",
"prof",
"ref",
"resp",
"s.a.s",
"s.k",
"s.t",
"sid",
"t.ex",
"t.h",
"t.o.m",
"t.v",
"tel",
"ung.",
"vol",
"v.",
"äv",
"övers",
]
# Add abbreviation for trailing punctuation too. If the abbreviation already has a trailing punctuation - skip it.
for abbr in ABBREVIATIONS:
if not abbr.endswith("."):
ABBREVIATIONS.append(abbr + ".")
for orth in ABBREVIATIONS:
_exc[orth] = [{ORTH: orth}]
capitalized = orth.capitalize()
_exc[capitalized] = [{ORTH: capitalized}]
# Sentences ending in "i." (as in "... peka i."), "m." (as in "...än 2000 m."),
# should be tokenized as two separate tokens.
for orth in ["i", "m"]:
_exc[orth + "."] = [{ORTH: orth, NORM: orth}, {ORTH: "."}]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
| 3,636 | 22.165605 | 114 | py |
spaCy | spaCy-master/spacy/lang/ta/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class TamilDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Tamil(Language):
lang = "ta"
Defaults = TamilDefaults
__all__ = ["Tamil"]
| 305 | 17 | 46 | py |
spaCy | spaCy-master/spacy/lang/ta/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ta.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"கிறிஸ்துமஸ் மற்றும் இனிய புத்தாண்டு வாழ்த்துக்கள்",
"எனக்கு என் குழந்தைப் பருவம் நினைவிருக்கிறது",
"உங்கள் பெயர் என்ன?",
"ஏறத்தாழ இலங்கைத் தமிழரில் மூன்றிலொரு பங்கினர் இலங்கையை விட்டு வெளியேறிப் பிற நாடுகளில் வாழ்கின்றனர்",
"இந்த ஃபோனுடன் சுமார் ரூ.2,990 மதிப்புள்ள போட் ராக்கர்ஸ் நிறுவனத்தின் ஸ்போர்ட் புளூடூத் ஹெட்போன்ஸ் இலவசமாக வழங்கப்படவுள்ளது.",
"மட்டக்களப்பில் பல இடங்களில் வீட்டுத் திட்டங்களுக்கு இன்று அடிக்கல் நாட்டல்",
"ஐ போன்க்கு முகத்தை வைத்து அன்லாக் செய்யும் முறை மற்றும் விரலால் தொட்டு அன்லாக் செய்யும் முறையை வாட்ஸ் ஆப் நிறுவனம் இதற்கு முன் கண்டுபிடித்தது",
"இது ஒரு வாக்கியம்.",
"ஆப்பிள் நிறுவனம் யு.கே. தொடக்க நிறுவனத்தை ஒரு லட்சம் கோடிக்கு வாங்கப் பார்க்கிறது",
"தன்னாட்சி கார்கள் காப்பீட்டு பொறுப்பை உற்பத்தியாளரிடம் மாற்றுகின்றன",
"நடைபாதை விநியோக ரோபோக்களை தடை செய்வதை சான் பிரான்சிஸ்கோ கருதுகிறது",
"லண்டன் ஐக்கிய இராச்சியத்தில் ஒரு பெரிய நகரம்.",
"என்ன வேலை செய்கிறீர்கள்?",
"எந்த கல்லூரியில் படிக்கிறாய்?",
]
| 1,155 | 45.24 | 149 | py |
spaCy | spaCy-master/spacy/lang/ta/lex_attrs.py | from ...attrs import LIKE_NUM
_numeral_suffixes = {"பத்து": "பது", "ற்று": "று", "ரத்து": "ரம்", "சத்து": "சம்"}
_num_words = [
"பூச்சியம்",
"ஒரு",
"ஒன்று",
"இரண்டு",
"மூன்று",
"நான்கு",
"ஐந்து",
"ஆறு",
"ஏழு",
"எட்டு",
"ஒன்பது",
"பத்து",
"பதினொன்று",
"பன்னிரண்டு",
"பதின்மூன்று",
"பதினான்கு",
"பதினைந்து",
"பதினாறு",
"பதினேழு",
"பதினெட்டு",
"பத்தொன்பது",
"இருபது",
"முப்பது",
"நாற்பது",
"ஐம்பது",
"அறுபது",
"எழுபது",
"எண்பது",
"தொண்ணூறு",
"நூறு",
"இருநூறு",
"முன்னூறு",
"நாநூறு",
"ஐநூறு",
"அறுநூறு",
"எழுநூறு",
"எண்ணூறு",
"தொள்ளாயிரம்",
"ஆயிரம்",
"ஒராயிரம்",
"லட்சம்",
"மில்லியன்",
"கோடி",
"பில்லியன்",
"டிரில்லியன்",
]
# 20-89 ,90-899,900-99999 and above have different suffixes
def suffix_filter(text):
# text without numeral suffixes
for num_suffix in _numeral_suffixes.keys():
length = len(num_suffix)
if len(text) < length:
break
elif text.endswith(num_suffix):
return text[:-length] + _numeral_suffixes[num_suffix]
return text
def like_num(text):
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
elif suffix_filter(text) in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 1,601 | 18.777778 | 82 | py |
spaCy | spaCy-master/spacy/lang/ta/stop_words.py | # Stop words
STOP_WORDS = set(
"""
ஒரு
என்று
மற்றும்
இந்த
இது
என்ற
கொண்டு
என்பது
பல
ஆகும்
அல்லது
அவர்
நான்
உள்ள
அந்த
இவர்
என
முதல்
என்ன
இருந்து
சில
என்
போன்ற
வேண்டும்
வந்து
இதன்
அது
அவன்
தான்
பலரும்
என்னும்
மேலும்
பின்னர்
கொண்ட
இருக்கும்
தனது
உள்ளது
போது
என்றும்
அதன்
தன்
பிறகு
அவர்கள்
வரை
அவள்
நீ
ஆகிய
இருந்தது
உள்ளன
வந்த
இருந்த
மிகவும்
இங்கு
மீது
ஓர்
இவை
இந்தக்
பற்றி
வரும்
வேறு
இரு
இதில்
போல்
இப்போது
அவரது
மட்டும்
இந்தப்
எனும்
மேல்
பின்
சேர்ந்த
ஆகியோர்
எனக்கு
இன்னும்
அந்தப்
அன்று
ஒரே
மிக
அங்கு
பல்வேறு
விட்டு
பெரும்
அதை
பற்றிய
உன்
அதிக
அந்தக்
பேர்
இதனால்
அவை
அதே
ஏன்
முறை
யார்
என்பதை
எல்லாம்
மட்டுமே
இங்கே
அங்கே
இடம்
இடத்தில்
அதில்
நாம்
அதற்கு
எனவே
பிற
சிறு
மற்ற
விட
எந்த
எனவும்
எனப்படும்
எனினும்
அடுத்த
இதனை
இதை
கொள்ள
இந்தத்
இதற்கு
அதனால்
தவிர
போல
வரையில்
சற்று
எனக்
""".split()
)
| 792 | 5.007576 | 17 | py |
spaCy | spaCy-master/spacy/lang/te/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class TeluguDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Telugu(Language):
lang = "te"
Defaults = TeluguDefaults
__all__ = ["Telugu"]
| 309 | 17.235294 | 46 | py |
spaCy | spaCy-master/spacy/lang/te/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.te import Telugu
>>> nlp = Telugu()
>>> from spacy.lang.te.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"ఆపిల్ 1 బిలియన్ డాలర్స్ కి యూ.కె. స్టార్ట్అప్ ని కొనాలని అనుకుంటుంది.",
"ఆటోనోమోస్ కార్లు భీమా బాధ్యతను తయారీదారులపైకి మళ్లిస్తాయి.",
"సాన్ ఫ్రాన్సిస్కో కాలిబాట డెలివరీ రోబోట్లను నిషేధించడానికి ఆలోచిస్తుంది.",
"లండన్ యునైటెడ్ కింగ్డమ్ లో పెద్ద సిటీ.",
"నువ్వు ఎక్కడ ఉన్నావ్?",
"ఫ్రాన్స్ అధ్యక్షుడు ఎవరు?",
"యునైటెడ్ స్టేట్స్ యొక్క రాజధాని ఏంటి?",
"బరాక్ ఒబామా ఎప్పుడు జన్మించారు?",
]
| 635 | 29.285714 | 79 | py |
spaCy | spaCy-master/spacy/lang/te/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"సున్నా",
"శూన్యం",
"ఒకటి",
"రెండు",
"మూడు",
"నాలుగు",
"ఐదు",
"ఆరు",
"ఏడు",
"ఎనిమిది",
"తొమ్మిది",
"పది",
"పదకొండు",
"పన్నెండు",
"పదమూడు",
"పద్నాలుగు",
"పదిహేను",
"పదహారు",
"పదిహేడు",
"పద్దెనిమిది",
"పందొమ్మిది",
"ఇరవై",
"ముప్పై",
"నలభై",
"యాభై",
"అరవై",
"డెబ్బై",
"ఎనభై",
"తొంబై",
"వంద",
"నూరు",
"వెయ్యి",
"లక్ష",
"కోటి",
]
def like_num(text):
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 873 | 14.890909 | 49 | py |
spaCy | spaCy-master/spacy/lang/te/stop_words.py | # Source: https://github.com/Xangis/extra-stopwords (MIT License)
STOP_WORDS = set(
"""
అందరూ
అందుబాటులో
అడగండి
అడగడం
అడ్డంగా
అనుగుణంగా
అనుమతించు
అనుమతిస్తుంది
అయితే
ఇప్పటికే
ఉన్నారు
ఎక్కడైనా
ఎప్పుడు
ఎవరైనా
ఎవరో ఒకరు
ఏ
ఏదైనా
ఏమైనప్పటికి
ఏమైనప్పటికి
ఒక
ఒక ప్రక్కన
కనిపిస్తాయి
కాదు
కాదు
కూడా
గా
గురించి
చుట్టూ
చేయగలిగింది
తగిన
తర్వాత
తర్వాత
దాదాపు
దూరంగా
నిజంగా
పై
ప్రకారం
మధ్య
మధ్య
మరియు
మరొక
మళ్ళీ
మాత్రమే
మెచ్చుకో
వద్ద
వద్ద
వెంట
వేరుగా
వ్యతిరేకంగా
సంబంధం
""".split()
)
| 475 | 7.350877 | 65 | py |
spaCy | spaCy-master/spacy/lang/th/__init__.py | from ...language import BaseDefaults, Language
from ...tokens import Doc
from ...util import DummyTokenizer, load_config_from_str, registry
from ...vocab import Vocab
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
DEFAULT_CONFIG = """
[nlp]
[nlp.tokenizer]
@tokenizers = "spacy.th.ThaiTokenizer"
"""
@registry.tokenizers("spacy.th.ThaiTokenizer")
def create_thai_tokenizer():
def thai_tokenizer_factory(nlp):
return ThaiTokenizer(nlp.vocab)
return thai_tokenizer_factory
class ThaiTokenizer(DummyTokenizer):
def __init__(self, vocab: Vocab) -> None:
try:
from pythainlp.tokenize import word_tokenize
except ImportError:
raise ImportError(
"The Thai tokenizer requires the PyThaiNLP library: "
"https://github.com/PyThaiNLP/pythainlp"
) from None
self.word_tokenize = word_tokenize
self.vocab = vocab
def __call__(self, text: str) -> Doc:
words = list(self.word_tokenize(text))
spaces = [False] * len(words)
return Doc(self.vocab, words=words, spaces=spaces)
class ThaiDefaults(BaseDefaults):
config = load_config_from_str(DEFAULT_CONFIG)
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Thai(Language):
lang = "th"
Defaults = ThaiDefaults
__all__ = ["Thai"]
| 1,371 | 24.407407 | 69 | py |
spaCy | spaCy-master/spacy/lang/th/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"ศูนย์",
"หนึ่ง",
"สอง",
"สาม",
"สี่",
"ห้า",
"หก",
"เจ็ด",
"แปด",
"เก้า",
"สิบ",
"สิบเอ็ด",
"ยี่สิบ",
"ยี่สิบเอ็ด",
"สามสิบ",
"สามสิบเอ็ด",
"สี่สิบ",
"สี่สิบเอ็ด",
"ห้าสิบ",
"ห้าสิบเอ็ด",
"หกสิบเอ็ด",
"เจ็ดสิบ",
"เจ็ดสิบเอ็ด",
"แปดสิบ",
"แปดสิบเอ็ด",
"เก้าสิบ",
"เก้าสิบเอ็ด",
"ร้อย",
"พัน",
"ล้าน",
"พันล้าน",
"หมื่นล้าน",
"แสนล้าน",
"ล้านล้าน",
"ล้านล้านล้าน",
"ล้านล้านล้านล้าน",
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 996 | 15.898305 | 49 | py |
spaCy | spaCy-master/spacy/lang/th/stop_words.py | STOP_WORDS = set(
"""
ทั้งนี้ ดัง ขอ รวม หลังจาก เป็น หลัง หรือ ๆ เกี่ยวกับ ซึ่งได้แก่ ด้วยเพราะ ด้วยว่า ด้วยเหตุเพราะ
ด้วยเหตุว่า สุดๆ เสร็จแล้ว เช่น เข้า ถ้า ถูก ถึง ต่างๆ ใคร เปิดเผย ครา รือ ตาม ใน ได้แก่ ได้แต่
ได้ที่ ตลอดถึง นอกจากว่า นอกนั้น จริง อย่างดี ส่วน เพียงเพื่อ เดียว จัด ทั้งที ทั้งคน ทั้งตัว ไกลๆ
ถึงเมื่อใด คงจะ ถูกๆ เป็นที นับแต่ที่ นับแต่นั้น รับรอง ด้าน เป็นต้นมา ทุก กระทั่ง กระทำ จวบ ซึ่งก็ จะ
ครบครัน นับแต่ เยอะๆ เพียงไหน เปลี่ยนแปลง ไป่ ผ่านๆ เพื่อที่ รวมๆ กว้างขวาง เสียยิ่ง เปลี่ยน ผ่าน
ทรง ทว่า กันเถอะ เกี่ยวๆ ใดๆ ครั้งที่ ครั้งนั้น ครั้งนี้ ครั้งละ ครั้งหลัง ครั้งหลังสุด ร่วมกัน ร่วมด้วย ก็ตามที
ที่สุด ผิดๆ ยืนยง เยอะ ครั้งๆ ใครๆ นั่นเอง เสมือนว่า เสร็จ ตลอดศก ทั้งที่ ยืนยัน ด้วยที่ บัดนี้
ด้วยประการฉะนี้ ซึ่งกัน ตลอดทั่วถึง ตลอดทั่วทั้ง ตลอดปี เป็นการ นั่นแหละ พร้อม เถิด ทั้ง สืบเนื่อง ตั้งแต่
กลับ กล่าวคือ กลุ่มก้อน กลุ่มๆ ครั้งครา ส่ง รวดเร็ว เสร็จสิ้น เสีย เสียก่อน เสียจน อดีต ตั้ง เกิด อาจ
อีก ตลอดเวลา ภายหน้า ภายหลัง มอง มันๆ มองว่า มัก มักจะ มัน หาก คงอยู่ เป็นที่ เป็นที่สุด
เป็นเพราะเป็นเพราะว่า เกี่ยวกัน เพียงไร เป็นแต่เพียง กล่าว จนบัดนี้ เป็นอัน จน จนเมื่อ จนแม้ ใกล้
ใหม่ๆ เป็นเพียง อย่างที่ ถูกต้อง ทั้งนั้น ทั้งนั้นด้วย กันดีกว่า กันดีไหม นั่นไง ตรงๆ แยะๆ เป็นต้น ใกล้ๆ
ซึ่งๆ ด้วยกัน ดังเคย เถอะ เสมือนกับ ไป คือ ขณะนี้ นอกจาก เพื่อที่จะ ขณะหนึ่ง ขวาง ครัน อยาก ไว้
แบบ นอกจากนี้ เนื่องจาก เดียวกัน คง ให้มา อนึ่ง ก็แล้วแต่ ต้อง ข้าง เพื่อว่า จนแม้น ครั้งหนึ่ง อะไร ซึ่ง
เกินๆ ด้วยเหตุนั้น กันและกัน รับ ระหว่าง ครั้งไหน เสร็จกัน ถึงอย่างไร ขาด ข้าฯ เข้าใจ ครบ ครั้งใด
ครบถ้วน ระยะ ไม่ เกือบ เกือบจะ เกือบๆ แก่ แก อย่างโน้น ดังกับว่า จริงจัง เยอะแยะ นั่น ด้วย ถึงแม้ว่า
มาก ตลอดกาลนาน ตลอดระยะเวลา ตลอดจน ตลอดไป เป็นอันๆ เป็นอาทิ ก็ต่อเมื่อ สู่ เมื่อ เพื่อ ก็ กับ
ด้วยเหมือนกัน ด้วยเหตุนี้ ครั้งคราว ราย ร่วม เป็นอันมาก สูง รวมกัน รวมทั้ง ร่วมมือ เป็นเพียงว่า รวมถึง
ต่อ นะ กว้าง มา ครับ ตลอดทั้ง การ นั้นๆ น่า เป็นอันว่า เพราะ วัน จนขณะนี้ จนตลอด จนถึง ข้า อย่างใด
ไหนๆ ก่อนหน้านี้ ก่อนๆ สูงกว่า สูงส่ง สูงสุด สูงๆ เสียด้วย เสียนั่น เสียนี่ เสียนี่กระไร เสียนั่นเอง สุด
สําหรับ ว่า ลง ภายใต้ เพื่อให้ ภายนอก ภายใน เฉพาะ ซึ่งกันและกัน ง่าย ง่ายๆ ไง ถึงแม้จะ ถึงเมื่อไร
เกิน ก็ได้ คราใด คราที่ ตลอดวัน นับ ดังเก่า ดั่งเก่า หลาย หนึ่ง ถือว่า ก่อนหน้า นับตั้งแต่ จรด จริงๆ
จวน จวนเจียน ตลอดมา กลุ่ม กระนั้น ข้างๆ ตรง ข้าพเจ้า กว่า เกี่ยวเนื่อง ขึ้น ให้ไป ผล แต่ เอง เห็น
จึง ได้ ให้ โดย จริงๆจังๆ ดั่งกับว่า ทั้งนั้นเพราะ นอก นอกเหนือ น่ะ กันนะ ขณะเดียวกัน แยะ
นอกเหนือจาก น้อย ก่อน จวนจะ ข้างเคียง ก็ตามแต่ จรดกับ น้อยกว่า นั่นเป็น นักๆ ครั้งกระนั้น เลย ไกล
สิ้นกาลนาน ครั้ง รือว่า เก็บ อย่างเช่น บาง ดั่ง ดังกล่าว ดังกับ รึ รึว่า ออก แรก จง ยืนนาน ได้มา ตน
ตนเอง ได้รับ ระยะๆ กระผม กันไหม กันเอง กำลังจะ กำหนด กู กำลัง ความ แล้ว และ ต่าง อย่างน้อย
อย่างนั้น อย่างนี้ ก็คือ ก็แค่ ด้วยเหตุที่ ใหญ่ๆ ให้ดี ยัง เป็นเพื่อ ก็ตาม ผู้ ต่อกัน ถือ ซึ่งก็คือ ภายภาค
ภายภาคหน้า ก็ดี ก็จะ อยู่ เสียยิ่งนัก ใหม่ ขณะ เริ่ม เรา ขวางๆ เสียแล้ว ใคร่ ใคร่จะ ตนฯ ของ แห่ง
รวด ดั่งกับ ถึงเมื่อ น้อยๆ นับจากนั้น ตลอด ตลอดกาล เสร็จสมบูรณ์ เขียน กว้างๆ ยืนยาว ถึงแก่ ขณะใด
ขณะใดๆ ขณะที่ ขณะนั้น จนทั่ว ภาคฯ ภาย เป็นแต่ อย่าง พบ ภาค ให้แด่ เสียจนกระทั่ง เสียจนถึง
จนกระทั่ง จนกว่า ตลอดทั่ว เป็นๆ นอกจากนั้น ผิด ครั้งก่อน แก้ไข ขั้น กัน ช่วง จาก รวมด้วย เขา
ด้วยเช่นกัน นอกจากที่ เป็นต้นไป ข้างต้น ข้างบน ข้างล่าง ถึงจะ ถึงบัดนั้น ถึงแม้ มี ทาง เคย นับจากนี้
อย่างเดียว เกี่ยวข้อง นี้ นํา นั้น ที่ ทําให้ ทํา ครานั้น ครานี้ คราหนึ่ง คราไหน คราว คราวก่อน คราวใด
คราวที่ คราวนั้น คราวนี้ คราวโน้น คราวละ คราวหน้า คราวหนึ่ง คราวหลัง คราวไหน คราวๆ คล้าย
คล้ายกัน คล้ายกันกับ คล้ายกับ คล้ายกับว่า คล้ายว่า ควร ค่อน ค่อนข้าง ค่อนข้างจะ ค่อยไปทาง ค่อนมาทาง ค
่อย ค่อยๆ คะ ค่ะ คำ คิด คิดว่า คุณ คุณๆ เคยๆ แค่ แค่จะ แค่นั้น แค่นี้ แค่เพียง แค่ว่า แค่ไหน จังๆ
จวบกับ จวบจน จ้ะ จ๊ะ จะได้ จัง จัดการ จัดงาน จัดแจง จัดตั้ง จัดทำ จัดหา จัดให้ จับ จ้า จ๋า จากนั้น
จากนี้ จากนี้ไป จำ จำเป็น จำพวก จึงจะ จึงเป็น จู่ๆ ฉะนั้น ฉะนี้ ฉัน เฉกเช่น เฉย เฉยๆ ไฉน ช่วงก่อน ช
่วงต่อไป ช่วงถัดไป ช่วงท้าย ช่วงที่ ช่วงนั้น ช่วงนี้ ช่วงระหว่าง ช่วงแรก ช่วงหน้า ช่วงหลัง ช่วงๆ ช่วย ช้า
ช้านาน ชาว ช้าๆ เช่นก่อน เช่นกัน เช่นเคย เช่นดัง เช่นดังก่อน เช่นดังเก่า เช่นดังที่ เช่นดังว่า
เช่นเดียวกัน เช่นเดียวกับ เช่นใด เช่นที่ เช่นที่เคย เช่นที่ว่า เช่นนั้น เช่นนั้นเอง เช่นนี้ เช่นเมื่อ เช่นไร
เชื่อ เชื่อถือ เชื่อมั่น เชื่อว่า ใช่ ใช้ ซะ ซะก่อน ซะจน ซะจนกระทั่ง ซะจนถึง ดั่งเคย ต่างก็ ต่างหาก
ตามด้วย ตามแต่ ตามที่ ตามๆ เต็มไปด้วย เต็มไปหมด เต็มๆ แต่ก็ แต่ก่อน แต่จะ แต่เดิม แต่ต้อง แต่ถ้า
แต่ทว่า แต่ที่ แต่นั้น แต่เพียง แต่เมื่อ แต่ไร แต่ละ แต่ว่า แต่ไหน แต่อย่างใด โต โตๆ ใต้ ถ้าจะ ถ้าหาก
ทั้งปวง ทั้งเป็น ทั้งมวล ทั้งสิ้น ทั้งหมด ทั้งหลาย ทั้งๆ ทัน ทันใดนั้น ทันที ทันทีทันใด ทั่ว ทำให้ ทำๆ ที ที่จริง
ที่ซึ่ง ทีเดียว ทีใด ที่ใด ที่ได้ ทีเถอะ ที่แท้ ที่แท้จริง ที่นั้น ที่นี้ ทีไร ทีละ ที่ละ ที่แล้ว ที่ว่า ที่แห่งนั้น ทีๆ ที่ๆ
ทุกคน ทุกครั้ง ทุกครา ทุกคราว ทุกชิ้น ทุกตัว ทุกทาง ทุกที ทุกที่ ทุกเมื่อ ทุกวัน ทุกวันนี้ ทุกสิ่ง ทุกหน ทุกแห่ง
ทุกอย่าง ทุกอัน ทุกๆ เท่า เท่ากัน เท่ากับ เท่าใด เท่าที่ เท่านั้น เท่านี้ แท้ แท้จริง เธอ นั้นไว นับแต่นี้
นาง นางสาว น่าจะ นาน นานๆ นาย นำ นำพา นำมา นิด นิดหน่อย นิดๆ นี่ นี่ไง นี่นา นี่แน่ะ นี่แหละ นี้แหล่
นี่เอง นี้เอง นู่น นู้น เน้น เนี่ย เนี่ยเอง ในช่วง ในที่ ในเมื่อ ในระหว่าง บน บอก บอกแล้ว บอกว่า บ่อย
บ่อยกว่า บ่อยครั้ง บ่อยๆ บัดดล บัดเดี๋ยวนี้ บัดนั้น บ้าง บางกว่า บางขณะ บางครั้ง บางครา บางคราว
บางที บางที่ บางแห่ง บางๆ ปฏิบัติ ประกอบ ประการ ประการฉะนี้ ประการใด ประการหนึ่ง ประมาณ
ประสบ ปรับ ปรากฏ ปรากฏว่า ปัจจุบัน ปิด เป็นด้วย เป็นดัง ผู้ใด เผื่อ เผื่อจะ เผื่อที่ เผื่อว่า ฝ่าย ฝ่ายใด
พบว่า พยายาม พร้อมกัน พร้อมกับ พร้อมด้วย พร้อมทั้ง พร้อมที่ พร้อมเพียง พวก พวกกัน พวกกู พวกแก
พวกเขา พวกคุณ พวกฉัน พวกท่าน พวกที่ พวกเธอ พวกนั้น พวกนี้ พวกนู้น พวกโน้น พวกมัน พวกมึง พอ พอกัน
พอควร พอจะ พอดี พอตัว พอที พอที่ พอเพียง พอแล้ว พอสม พอสมควร พอเหมาะ พอๆ พา พึง พึ่ง พื้นๆ พูด
เพราะฉะนั้น เพราะว่า เพิ่ง เพิ่งจะ เพิ่ม เพิ่มเติม เพียง เพียงแค่ เพียงใด เพียงแต่ เพียงพอ เพียงเพราะ
มากกว่า มากมาย มิ มิฉะนั้น มิใช่ มิได้ มีแต่ มึง มุ่ง มุ่งเน้น มุ่งหมาย เมื่อก่อน เมื่อครั้ง เมื่อครั้งก่อน
เมื่อคราวก่อน เมื่อคราวที่ เมื่อคราว เมื่อคืน เมื่อเช้า เมื่อใด เมื่อนั้น เมื่อนี้ เมื่อเย็น เมื่อวันวาน เมื่อวาน
แม้ แม้กระทั่ง แม้แต่ แม้นว่า แม้ว่า ไม่ค่อย ไม่ค่อยจะ ไม่ค่อยเป็น ไม่ใช่ ไม่เป็นไร ไม่ว่า ยก ยกให้ ยอม
ยอมรับ ย่อม ย่อย ยังคง ยังงั้น ยังงี้ ยังโง้น ยังไง ยังจะ ยังแต่ ยาก ยาว ยาวนาน ยิ่ง ยิ่งกว่า ยิ่งขึ้น
ยิ่งขึ้นไป ยิ่งจน ยิ่งจะ ยิ่งนัก ยิ่งเมื่อ ยิ่งแล้ว ยิ่งใหญ่ เร็ว เร็วๆ เราๆ เรียก เรียบ เรื่อย เรื่อยๆ ล้วน
ล้วนจน ล้วนแต่ ละ ล่าสุด เล็ก เล็กน้อย เล็กๆ เล่าว่า แล้วกัน แล้วแต่ แล้วเสร็จ วันใด วันนั้น วันนี้ วันไหน
สบาย สมัย สมัยก่อน สมัยนั้น สมัยนี้ สมัยโน้น ส่วนเกิน ส่วนด้อย ส่วนดี ส่วนใด ส่วนที่ ส่วนน้อย ส่วนนั้น ส
่วนมาก ส่วนใหญ่ สั้น สั้นๆ สามารถ สำคัญ สิ่ง สิ่งใด สิ่งนั้น สิ่งนี้ สิ่งไหน สิ้น แสดง แสดงว่า หน หนอ หนอย
หน่อย หมด หมดกัน หมดสิ้น หากแม้ หากแม้น หากแม้นว่า หากว่า หาความ หาใช่ หารือ เหตุ เหตุผล เหตุนั้น
เหตุนี้ เหตุไร เห็นแก่ เห็นควร เห็นจะ เห็นว่า เหลือ เหลือเกิน เหล่า เหล่านั้น เหล่านี้ แห่งใด แห่งนั้น
แห่งนี้ แห่งโน้น แห่งไหน แหละ ให้แก่ ใหญ่ ใหญ่โต อย่างมาก อย่างยิ่ง อย่างไรก็ อย่างไรก็ได้ อย่างไรเสีย
อย่างละ อย่างหนึ่ง อย่างๆ อัน อันจะ อันได้แก่ อันที่ อันที่จริง อันที่จะ อันเนื่องมาจาก อันละ อันๆ อาจจะ
อาจเป็น อาจเป็นด้วย อื่น อื่นๆ เอ็ง เอา ฯ ฯล ฯลฯ 555 กำ ขอโทษ เยี่ยม นี่คือ
""".split()
)
| 7,187 | 93.578947 | 125 | py |
spaCy | spaCy-master/spacy/lang/th/tokenizer_exceptions.py | from ...symbols import ORTH
_exc = {
# หน่วยงานรัฐ / government agency
"กกต.": [{ORTH: "กกต."}],
"กทท.": [{ORTH: "กทท."}],
"กทพ.": [{ORTH: "กทพ."}],
"กบข.": [{ORTH: "กบข."}],
"กบว.": [{ORTH: "กบว."}],
"กปน.": [{ORTH: "กปน."}],
"กปภ.": [{ORTH: "กปภ."}],
"กปส.": [{ORTH: "กปส."}],
"กผม.": [{ORTH: "กผม."}],
"กฟน.": [{ORTH: "กฟน."}],
"กฟผ.": [{ORTH: "กฟผ."}],
"กฟภ.": [{ORTH: "กฟภ."}],
"ก.ช.น.": [{ORTH: "ก.ช.น."}],
"กยศ.": [{ORTH: "กยศ."}],
"ก.ล.ต.": [{ORTH: "ก.ล.ต."}],
"กศ.บ.": [{ORTH: "กศ.บ."}],
"กศน.": [{ORTH: "กศน."}],
"กสท.": [{ORTH: "กสท."}],
"กอ.รมน.": [{ORTH: "กอ.รมน."}],
"กร.": [{ORTH: "กร."}],
"ขสมก.": [{ORTH: "ขสมก."}],
"คตง.": [{ORTH: "คตง."}],
"ครม.": [{ORTH: "ครม."}],
"คมช.": [{ORTH: "คมช."}],
"ตชด.": [{ORTH: "ตชด."}],
"ตม.": [{ORTH: "ตม."}],
"ตร.": [{ORTH: "ตร."}],
"ททท.": [{ORTH: "ททท."}],
"ททบ.": [{ORTH: "ททบ."}],
"ทบ.": [{ORTH: "ทบ."}],
"ทร.": [{ORTH: "ทร."}],
"ทอ.": [{ORTH: "ทอ."}],
"ทอท.": [{ORTH: "ทอท."}],
"ธ.ก.ส.": [{ORTH: "ธ.ก.ส."}],
"ธปท.": [{ORTH: "ธปท."}],
"ธอส.": [{ORTH: "ธอส."}],
"นย.": [{ORTH: "นย."}],
"ปตท.": [{ORTH: "ปตท."}],
"ป.ป.ช.": [{ORTH: "ป.ป.ช."}],
"ป.ป.ส.": [{ORTH: "ป.ป.ส."}],
"บพร.": [{ORTH: "บพร."}],
"บย.": [{ORTH: "บย."}],
"พสวท.": [{ORTH: "พสวท."}],
"มอก.": [{ORTH: "มอก."}],
"ยธ.": [{ORTH: "ยธ."}],
"รพช.": [{ORTH: "รพช."}],
"รฟท.": [{ORTH: "รฟท."}],
"รฟม.": [{ORTH: "รฟม."}],
"ศธ.": [{ORTH: "ศธ."}],
"ศนธ.": [{ORTH: "ศนธ."}],
"สกจ.": [{ORTH: "สกจ."}],
"สกท.": [{ORTH: "สกท."}],
"สกว.": [{ORTH: "สกว."}],
"สคบ.": [{ORTH: "สคบ."}],
"สจร.": [{ORTH: "สจร."}],
"สตง.": [{ORTH: "สตง."}],
"สทท.": [{ORTH: "สทท."}],
"สทร.": [{ORTH: "สทร."}],
"สธ": [{ORTH: "สธ"}],
"สนช.": [{ORTH: "สนช."}],
"สนนท.": [{ORTH: "สนนท."}],
"สปก.": [{ORTH: "สปก."}],
"สปช.": [{ORTH: "สปช."}],
"สปอ.": [{ORTH: "สปอ."}],
"สพช.": [{ORTH: "สพช."}],
"สยช.": [{ORTH: "สยช."}],
"สวช.": [{ORTH: "สวช."}],
"สวท.": [{ORTH: "สวท."}],
"สวทช.": [{ORTH: "สวทช."}],
"สคช.": [{ORTH: "สคช."}],
"สสว.": [{ORTH: "สสว."}],
"สสส.": [{ORTH: "สสส."}],
"สสวท.": [{ORTH: "สสวท."}],
"อตก.": [{ORTH: "อตก."}],
"อบจ.": [{ORTH: "อบจ."}],
"อบต.": [{ORTH: "อบต."}],
"อปพร.": [{ORTH: "อปพร."}],
"อย.": [{ORTH: "อย."}],
"อ.ส.ม.ท.": [{ORTH: "อ.ส.ม.ท."}],
# มหาวิทยาลัย / สถานศึกษา / university / college
"มทส.": [{ORTH: "มทส."}],
"มธ.": [{ORTH: "มธ."}],
"ม.อ.": [{ORTH: "ม.อ."}],
"มทร.": [{ORTH: "มทร."}],
"มมส.": [{ORTH: "มมส."}],
"วท.": [{ORTH: "วท."}],
"สตม.": [{ORTH: "สตม."}],
# ยศ / rank
"ดร.": [{ORTH: "ดร."}],
"ด.ต.": [{ORTH: "ด.ต."}],
"จ.ต.": [{ORTH: "จ.ต."}],
"จ.ท.": [{ORTH: "จ.ท."}],
"จ.ส.ต.": [{ORTH: "จ.ส.ต."}],
"จสต.": [{ORTH: "จสต."}],
"จ.ส.ท.": [{ORTH: "จ.ส.ท."}],
"จ.ส.อ.": [{ORTH: "จ.ส.อ."}],
"จ.อ.": [{ORTH: "จ.อ."}],
"ทพญ.": [{ORTH: "ทพญ."}],
"ทนพ.": [{ORTH: "ทนพ."}],
"นจอ.": [{ORTH: "นจอ."}],
"น.ช.": [{ORTH: "น.ช."}],
"น.ญ.": [{ORTH: "น.ญ."}],
"น.ต.": [{ORTH: "น.ต."}],
"น.ท.": [{ORTH: "น.ท."}],
"นตท.": [{ORTH: "นตท."}],
"นนส.": [{ORTH: "นนส."}],
"นนร.": [{ORTH: "นนร."}],
"นนอ.": [{ORTH: "นนอ."}],
"นพ.": [{ORTH: "นพ."}],
"นพท.": [{ORTH: "นพท."}],
"นรจ.": [{ORTH: "นรจ."}],
"นรต.": [{ORTH: "นรต."}],
"นศพ.": [{ORTH: "นศพ."}],
"นศท.": [{ORTH: "นศท."}],
"น.สพ.": [{ORTH: "น.สพ."}],
"น.อ.": [{ORTH: "น.อ."}],
"บช.ก.": [{ORTH: "บช.ก."}],
"บช.น.": [{ORTH: "บช.น."}],
"ผกก.": [{ORTH: "ผกก."}],
"ผกก.ภ.": [{ORTH: "ผกก.ภ."}],
"ผจก.": [{ORTH: "ผจก."}],
"ผช.": [{ORTH: "ผช."}],
"ผชก.": [{ORTH: "ผชก."}],
"ผช.ผอ.": [{ORTH: "ผช.ผอ."}],
"ผญบ.": [{ORTH: "ผญบ."}],
"ผบ.": [{ORTH: "ผบ."}],
"ผบก.": [{ORTH: "ผบก."}],
"ผบก.น.": [{ORTH: "ผบก.น."}],
"ผบก.ป.": [{ORTH: "ผบก.ป."}],
"ผบก.ปค.": [{ORTH: "ผบก.ปค."}],
"ผบก.ปม.": [{ORTH: "ผบก.ปม."}],
"ผบก.ภ.": [{ORTH: "ผบก.ภ."}],
"ผบช.": [{ORTH: "ผบช."}],
"ผบช.ก.": [{ORTH: "ผบช.ก."}],
"ผบช.ตชด.": [{ORTH: "ผบช.ตชด."}],
"ผบช.น.": [{ORTH: "ผบช.น."}],
"ผบช.ภ.": [{ORTH: "ผบช.ภ."}],
"ผบ.ทบ.": [{ORTH: "ผบ.ทบ."}],
"ผบ.ตร.": [{ORTH: "ผบ.ตร."}],
"ผบ.ทร.": [{ORTH: "ผบ.ทร."}],
"ผบ.ทอ.": [{ORTH: "ผบ.ทอ."}],
"ผบ.ทสส.": [{ORTH: "ผบ.ทสส."}],
"ผวจ.": [{ORTH: "ผวจ."}],
"ผู้ว่าฯ": [{ORTH: "ผู้ว่าฯ"}],
"พ.จ.ต.": [{ORTH: "พ.จ.ต."}],
"พ.จ.ท.": [{ORTH: "พ.จ.ท."}],
"พ.จ.อ.": [{ORTH: "พ.จ.อ."}],
"พญ.": [{ORTH: "พญ."}],
"ฯพณฯ": [{ORTH: "ฯพณฯ"}],
"พ.ต.": [{ORTH: "พ.ต."}],
"พ.ท.": [{ORTH: "พ.ท."}],
"พ.อ.": [{ORTH: "พ.อ."}],
"พ.ต.อ.พิเศษ": [{ORTH: "พ.ต.อ.พิเศษ"}],
"พลฯ": [{ORTH: "พลฯ"}],
"พล.๑ รอ.": [{ORTH: "พล.๑ รอ."}],
"พล.ต.": [{ORTH: "พล.ต."}],
"พล.ต.ต.": [{ORTH: "พล.ต.ต."}],
"พล.ต.ท.": [{ORTH: "พล.ต.ท."}],
"พล.ต.อ.": [{ORTH: "พล.ต.อ."}],
"พล.ท.": [{ORTH: "พล.ท."}],
"พล.ปตอ.": [{ORTH: "พล.ปตอ."}],
"พล.ม.": [{ORTH: "พล.ม."}],
"พล.ม.๒": [{ORTH: "พล.ม.๒"}],
"พล.ร.ต.": [{ORTH: "พล.ร.ต."}],
"พล.ร.ท.": [{ORTH: "พล.ร.ท."}],
"พล.ร.อ.": [{ORTH: "พล.ร.อ."}],
"พล.อ.": [{ORTH: "พล.อ."}],
"พล.อ.ต.": [{ORTH: "พล.อ.ต."}],
"พล.อ.ท.": [{ORTH: "พล.อ.ท."}],
"พล.อ.อ.": [{ORTH: "พล.อ.อ."}],
"พ.อ.พิเศษ": [{ORTH: "พ.อ.พิเศษ"}],
"พ.อ.ต.": [{ORTH: "พ.อ.ต."}],
"พ.อ.ท.": [{ORTH: "พ.อ.ท."}],
"พ.อ.อ.": [{ORTH: "พ.อ.อ."}],
"ภกญ.": [{ORTH: "ภกญ."}],
"ม.จ.": [{ORTH: "ม.จ."}],
"มท1": [{ORTH: "มท1"}],
"ม.ร.ว.": [{ORTH: "ม.ร.ว."}],
"มล.": [{ORTH: "มล."}],
"ร.ต.": [{ORTH: "ร.ต."}],
"ร.ต.ต.": [{ORTH: "ร.ต.ต."}],
"ร.ต.ท.": [{ORTH: "ร.ต.ท."}],
"ร.ต.อ.": [{ORTH: "ร.ต.อ."}],
"ร.ท.": [{ORTH: "ร.ท."}],
"รมช.": [{ORTH: "รมช."}],
"รมต.": [{ORTH: "รมต."}],
"รมว.": [{ORTH: "รมว."}],
"รศ.": [{ORTH: "รศ."}],
"ร.อ.": [{ORTH: "ร.อ."}],
"ศ.": [{ORTH: "ศ."}],
"ส.ต.": [{ORTH: "ส.ต."}],
"ส.ต.ต.": [{ORTH: "ส.ต.ต."}],
"ส.ต.ท.": [{ORTH: "ส.ต.ท."}],
"ส.ต.อ.": [{ORTH: "ส.ต.อ."}],
"ส.ท.": [{ORTH: "ส.ท."}],
"สพ.": [{ORTH: "สพ."}],
"สพ.ญ.": [{ORTH: "สพ.ญ."}],
"สพ.ช.": [{ORTH: "สพ.ช."}],
"ส.อ.": [{ORTH: "ส.อ."}],
"อจ.": [{ORTH: "อจ."}],
"อจญ.": [{ORTH: "อจญ."}],
# วุฒิ / bachelor degree
"ป.": [{ORTH: "ป."}],
"ป.กศ.": [{ORTH: "ป.กศ."}],
"ป.กศ.สูง": [{ORTH: "ป.กศ.สูง"}],
"ปวช.": [{ORTH: "ปวช."}],
"ปวท.": [{ORTH: "ปวท."}],
"ปวส.": [{ORTH: "ปวส."}],
"ปทส.": [{ORTH: "ปทส."}],
"กษ.บ.": [{ORTH: "กษ.บ."}],
"กษ.ม.": [{ORTH: "กษ.ม."}],
"กษ.ด.": [{ORTH: "กษ.ด."}],
"ค.บ.": [{ORTH: "ค.บ."}],
"คศ.บ.": [{ORTH: "คศ.บ."}],
"คศ.ม.": [{ORTH: "คศ.ม."}],
"คศ.ด.": [{ORTH: "คศ.ด."}],
"ค.อ.บ.": [{ORTH: "ค.อ.บ."}],
"ค.อ.ม.": [{ORTH: "ค.อ.ม."}],
"ค.อ.ด.": [{ORTH: "ค.อ.ด."}],
"ทก.บ.": [{ORTH: "ทก.บ."}],
"ทก.ม.": [{ORTH: "ทก.ม."}],
"ทก.ด.": [{ORTH: "ทก.ด."}],
"ท.บ.": [{ORTH: "ท.บ."}],
"ท.ม.": [{ORTH: "ท.ม."}],
"ท.ด.": [{ORTH: "ท.ด."}],
"น.บ.": [{ORTH: "น.บ."}],
"น.ม.": [{ORTH: "น.ม."}],
"น.ด.": [{ORTH: "น.ด."}],
"นศ.บ.": [{ORTH: "นศ.บ."}],
"นศ.ม.": [{ORTH: "นศ.ม."}],
"นศ.ด.": [{ORTH: "นศ.ด."}],
"บช.บ.": [{ORTH: "บช.บ."}],
"บช.ม.": [{ORTH: "บช.ม."}],
"บช.ด.": [{ORTH: "บช.ด."}],
"บธ.บ.": [{ORTH: "บธ.บ."}],
"บธ.ม.": [{ORTH: "บธ.ม."}],
"บธ.ด.": [{ORTH: "บธ.ด."}],
"พณ.บ.": [{ORTH: "พณ.บ."}],
"พณ.ม.": [{ORTH: "พณ.ม."}],
"พณ.ด.": [{ORTH: "พณ.ด."}],
"พ.บ.": [{ORTH: "พ.บ."}],
"พ.ม.": [{ORTH: "พ.ม."}],
"พ.ด.": [{ORTH: "พ.ด."}],
"พธ.บ.": [{ORTH: "พธ.บ."}],
"พธ.ม.": [{ORTH: "พธ.ม."}],
"พธ.ด.": [{ORTH: "พธ.ด."}],
"พบ.บ.": [{ORTH: "พบ.บ."}],
"พบ.ม.": [{ORTH: "พบ.ม."}],
"พบ.ด.": [{ORTH: "พบ.ด."}],
"พย.บ.": [{ORTH: "พย.บ."}],
"พย.ม.": [{ORTH: "พย.ม."}],
"พย.ด.": [{ORTH: "พย.ด."}],
"พศ.บ.": [{ORTH: "พศ.บ."}],
"พศ.ม.": [{ORTH: "พศ.ม."}],
"พศ.ด.": [{ORTH: "พศ.ด."}],
"ภ.บ.": [{ORTH: "ภ.บ."}],
"ภ.ม.": [{ORTH: "ภ.ม."}],
"ภ.ด.": [{ORTH: "ภ.ด."}],
"ภ.สถ.บ.": [{ORTH: "ภ.สถ.บ."}],
"รป.บ.": [{ORTH: "รป.บ."}],
"รป.ม.": [{ORTH: "รป.ม."}],
"วท.บ.": [{ORTH: "วท.บ."}],
"วท.ม.": [{ORTH: "วท.ม."}],
"วท.ด.": [{ORTH: "วท.ด."}],
"ศ.บ.": [{ORTH: "ศ.บ."}],
"ศศ.บ.": [{ORTH: "ศศ.บ."}],
"ศษ.บ.": [{ORTH: "ศษ.บ."}],
"ศส.บ.": [{ORTH: "ศส.บ."}],
"สถ.บ.": [{ORTH: "สถ.บ."}],
"สถ.ม.": [{ORTH: "สถ.ม."}],
"สถ.ด.": [{ORTH: "สถ.ด."}],
"สพ.บ.": [{ORTH: "สพ.บ."}],
"อ.บ.": [{ORTH: "อ.บ."}],
"อ.ม.": [{ORTH: "อ.ม."}],
"อ.ด.": [{ORTH: "อ.ด."}],
# ปี / เวลา / year / time
"ชม.": [{ORTH: "ชม."}],
"จ.ศ.": [{ORTH: "จ.ศ."}],
"ค.ศ.": [{ORTH: "ค.ศ."}],
"ฮ.ศ.": [{ORTH: "ฮ.ศ."}],
"ว.ด.ป.": [{ORTH: "ว.ด.ป."}],
# ระยะทาง / distance
"ฮม.": [{ORTH: "ฮม."}],
"ดคม.": [{ORTH: "ดคม."}],
"ดม.": [{ORTH: "ดม."}],
"มม.": [{ORTH: "มม."}],
"ซม.": [{ORTH: "ซม."}],
"กม.": [{ORTH: "กม."}],
# น้ำหนัก / weight
"น.น.": [{ORTH: "น.น."}],
"ฮก.": [{ORTH: "ฮก."}],
"ดคก.": [{ORTH: "ดคก."}],
"ดก.": [{ORTH: "ดก."}],
"ซก.": [{ORTH: "ซก."}],
"มก.": [{ORTH: "มก."}],
"ก.": [{ORTH: "ก."}],
"กก.": [{ORTH: "กก."}],
# ปริมาตร / volume
"ฮล.": [{ORTH: "ฮล."}],
"ดคล.": [{ORTH: "ดคล."}],
"ดล.": [{ORTH: "ดล."}],
"ซล.": [{ORTH: "ซล."}],
"ล.": [{ORTH: "ล."}],
"กล.": [{ORTH: "กล."}],
"ลบ.": [{ORTH: "ลบ."}],
# พื้นที่ / area
"ตร.ซม.": [{ORTH: "ตร.ซม."}],
"ตร.ม.": [{ORTH: "ตร.ม."}],
"ตร.ว.": [{ORTH: "ตร.ว."}],
"ตร.กม.": [{ORTH: "ตร.กม."}],
# เดือน / month
"ม.ค.": [{ORTH: "ม.ค."}],
"ก.พ.": [{ORTH: "ก.พ."}],
"มี.ค.": [{ORTH: "มี.ค."}],
"เม.ย.": [{ORTH: "เม.ย."}],
"พ.ค.": [{ORTH: "พ.ค."}],
"มิ.ย.": [{ORTH: "มิ.ย."}],
"ก.ค.": [{ORTH: "ก.ค."}],
"ส.ค.": [{ORTH: "ส.ค."}],
"ก.ย.": [{ORTH: "ก.ย."}],
"ต.ค.": [{ORTH: "ต.ค."}],
"พ.ย.": [{ORTH: "พ.ย."}],
"ธ.ค.": [{ORTH: "ธ.ค."}],
# เพศ / gender
"ช.": [{ORTH: "ช."}],
"ญ.": [{ORTH: "ญ."}],
"ด.ช.": [{ORTH: "ด.ช."}],
"ด.ญ.": [{ORTH: "ด.ญ."}],
# ที่อยู่ / address
"ถ.": [{ORTH: "ถ."}],
"ต.": [{ORTH: "ต."}],
"อ.": [{ORTH: "อ."}],
"จ.": [{ORTH: "จ."}],
# สรรพนาม / pronoun
"ข้าฯ": [{ORTH: "ข้าฯ"}],
"ทูลเกล้าฯ": [{ORTH: "ทูลเกล้าฯ"}],
"น้อมเกล้าฯ": [{ORTH: "น้อมเกล้าฯ"}],
"โปรดเกล้าฯ": [{ORTH: "โปรดเกล้าฯ"}],
# การเมือง / politic
"ขจก.": [{ORTH: "ขจก."}],
"ขบด.": [{ORTH: "ขบด."}],
"นปช.": [{ORTH: "นปช."}],
"ปชป.": [{ORTH: "ปชป."}],
"ผกค.": [{ORTH: "ผกค."}],
"พท.": [{ORTH: "พท."}],
"พ.ร.ก.": [{ORTH: "พ.ร.ก."}],
"พ.ร.ฎ.": [{ORTH: "พ.ร.ฎ."}],
"พ.ร.บ.": [{ORTH: "พ.ร.บ."}],
"รธน.": [{ORTH: "รธน."}],
"รบ.": [{ORTH: "รบ."}],
"รสช.": [{ORTH: "รสช."}],
"ส.ก.": [{ORTH: "ส.ก."}],
"สจ.": [{ORTH: "สจ."}],
"สว.": [{ORTH: "สว."}],
"ส.ส.": [{ORTH: "ส.ส."}],
# ทั่วไป / general
"ก.ข.ค.": [{ORTH: "ก.ข.ค."}],
"กทม.": [{ORTH: "กทม."}],
"กรุงเทพฯ": [{ORTH: "กรุงเทพฯ"}],
"ขรก.": [{ORTH: "ขรก."}],
"ขส": [{ORTH: "ขส."}],
"ค.ร.น.": [{ORTH: "ค.ร.น."}],
"ค.ร.ม.": [{ORTH: "ค.ร.ม."}],
"ง.ด.": [{ORTH: "ง.ด."}],
"งป.": [{ORTH: "งป."}],
"จก.": [{ORTH: "จก."}],
"จขกท.": [{ORTH: "จขกท."}],
"จนท.": [{ORTH: "จนท."}],
"จ.ป.ร.": [{ORTH: "จ.ป.ร."}],
"จ.ม.": [{ORTH: "จ.ม."}],
"จย.": [{ORTH: "จย."}],
"จยย.": [{ORTH: "จยย."}],
"ตจว.": [{ORTH: "ตจว."}],
"โทร.": [{ORTH: "โทร."}],
"ธ.": [{ORTH: "ธ."}],
"น.ร.": [{ORTH: "น.ร."}],
"น.ศ.": [{ORTH: "น.ศ."}],
"น.ส.": [{ORTH: "น.ส."}],
"น.ส.๓": [{ORTH: "น.ส.๓"}],
"น.ส.๓ ก.": [{ORTH: "น.ส.๓ ก"}],
"นสพ.": [{ORTH: "นสพ."}],
"บ.ก.": [{ORTH: "บ.ก."}],
"บจก.": [{ORTH: "บจก."}],
"บงล.": [{ORTH: "บงล."}],
"บบส.": [{ORTH: "บบส."}],
"บมจ.": [{ORTH: "บมจ."}],
"บลจ.": [{ORTH: "บลจ."}],
"บ/ช": [{ORTH: "บ/ช"}],
"บร.": [{ORTH: "บร."}],
"ปชช.": [{ORTH: "ปชช."}],
"ปณ.": [{ORTH: "ปณ."}],
"ปณก.": [{ORTH: "ปณก."}],
"ปณส.": [{ORTH: "ปณส."}],
"ปธ.": [{ORTH: "ปธ."}],
"ปธน.": [{ORTH: "ปธน."}],
"ปอ.": [{ORTH: "ปอ."}],
"ปอ.พ.": [{ORTH: "ปอ.พ."}],
"พ.ก.ง.": [{ORTH: "พ.ก.ง."}],
"พ.ก.ส.": [{ORTH: "พ.ก.ส."}],
"พขร.": [{ORTH: "พขร."}],
"ภ.ง.ด.": [{ORTH: "ภ.ง.ด."}],
"ภ.ง.ด.๙": [{ORTH: "ภ.ง.ด.๙"}],
"ภ.ป.ร.": [{ORTH: "ภ.ป.ร."}],
"ภ.พ.": [{ORTH: "ภ.พ."}],
"ร.": [{ORTH: "ร."}],
"ร.ง.": [{ORTH: "ร.ง."}],
"ร.ด.": [{ORTH: "ร.ด."}],
"รปภ.": [{ORTH: "รปภ."}],
"รพ.": [{ORTH: "รพ."}],
"ร.พ.": [{ORTH: "ร.พ."}],
"รร.": [{ORTH: "รร."}],
"รสก.": [{ORTH: "รสก."}],
"ส.ค.ส.": [{ORTH: "ส.ค.ส."}],
"สต.": [{ORTH: "สต."}],
"สน.": [{ORTH: "สน."}],
"สนข.": [{ORTH: "สนข."}],
"สนง.": [{ORTH: "สนง."}],
"สนญ.": [{ORTH: "สนญ."}],
"ส.ป.ช.": [{ORTH: "ส.ป.ช."}],
"สภ.": [{ORTH: "สภ."}],
"ส.ล.น.": [{ORTH: "ส.ล.น."}],
"สวญ.": [{ORTH: "สวญ."}],
"สวป.": [{ORTH: "สวป."}],
"สว.สส.": [{ORTH: "สว.สส."}],
"ส.ห.": [{ORTH: "ส.ห."}],
"สอ.": [{ORTH: "สอ."}],
"สอท.": [{ORTH: "สอท."}],
"เสธ.": [{ORTH: "เสธ."}],
"หจก.": [{ORTH: "หจก."}],
"ห.ร.ม.": [{ORTH: "ห.ร.ม."}],
}
TOKENIZER_EXCEPTIONS = _exc
| 13,343 | 29.396355 | 52 | py |
spaCy | spaCy-master/spacy/lang/ti/__init__.py | from ...attrs import LANG
from ...language import BaseDefaults, Language
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class TigrinyaDefaults(BaseDefaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters.update(LEX_ATTRS)
lex_attr_getters[LANG] = lambda text: "ti"
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
stop_words = STOP_WORDS
suffixes = TOKENIZER_SUFFIXES
writing_system = {"direction": "ltr", "has_case": False, "has_letters": True}
class Tigrinya(Language):
lang = "ti"
Defaults = TigrinyaDefaults
__all__ = ["Tigrinya"]
| 834 | 29.925926 | 81 | py |
spaCy | spaCy-master/spacy/lang/ti/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.ti.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"አፕል ብዩኬ ትርከብ ንግድ ብ1 ቢሊዮን ዶላር ንምግዛዕ ሐሲባ።",
"ፈላማይ ክታበት ኮቪድ 19 ተጀሚሩ፤ሓዱሽ ተስፋ ሂቡ ኣሎ",
"ቻንስለር ጀርመን ኣንገላ መርከል ዝርግሓ ቫይረስ ኮሮና ንምክልካል ጽኑዕ እገዳ ክግበር ጸዊዓ",
"ለንደን ብዓዲ እንግሊዝ ትርከብ ዓባይ ከተማ እያ።",
"ናበይ አለኻ፧",
"ናይ ፈረንሳይ ፕሬዝዳንት መን እዩ፧",
"ናይ አሜሪካ ዋና ከተማ እንታይ እያ፧",
"ኦባማ መዓስ ተወሊዱ፧",
]
| 457 | 23.105263 | 65 | py |
spaCy | spaCy-master/spacy/lang/ti/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"ዜሮ",
"ሓደ",
"ክልተ",
"ሰለስተ",
"ኣርባዕተ",
"ሓሙሽተ",
"ሽድሽተ",
"ሸውዓተ",
"ሽሞንተ",
"ትሽዓተ",
"ዓሰርተ",
"ዕስራ",
"ሰላሳ",
"ኣርብዓ",
"ሓምሳ",
"ሱሳ",
"ሰብዓ",
"ሰማንያ",
"ቴስዓ",
"ሚእቲ",
"ሺሕ",
"ሚልዮን",
"ቢልዮን",
"ትሪልዮን",
"ኳድሪልዮን",
"ጋዚልዮን",
"ባዚልዮን",
]
# Tigrinya ordinals above 10 are the same as _num_words but start with "መበል "
_ordinal_words = [
"ቀዳማይ",
"ካልኣይ",
"ሳልሳይ",
"ራብዓይ",
"ሓምሻይ",
"ሻድሻይ",
"ሻውዓይ",
"ሻምናይ",
"ታሽዓይ",
"ዓስራይ",
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
text_lower = text.lower()
if text_lower in _num_words:
return True
# Check ordinal number
if text_lower in _ordinal_words:
return True
if text_lower.endswith("ይ"):
if text_lower[:-2].isdigit():
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 1,221 | 15.513514 | 77 | py |
spaCy | spaCy-master/spacy/lang/ti/punctuation.py | from ..char_classes import (
ALPHA_UPPER,
CURRENCY,
LIST_ELLIPSES,
LIST_PUNCT,
LIST_QUOTES,
UNITS,
)
_list_punct = LIST_PUNCT + "፡ ። ፣ ፤ ፥ ፦ ፧ ፠ ፨".strip().split()
_suffixes = (
_list_punct
+ LIST_ELLIPSES
+ LIST_QUOTES
+ [
r"(?<=[0-9])\+",
# Tigrinya is written from Left-To-Right
r"(?<=[0-9])(?:{c})".format(c=CURRENCY),
r"(?<=[0-9])(?:{u})".format(u=UNITS),
r"(?<=[{au}][{au}])\.".format(au=ALPHA_UPPER),
]
)
TOKENIZER_SUFFIXES = _suffixes
| 530 | 19.423077 | 62 | py |
spaCy | spaCy-master/spacy/lang/ti/stop_words.py | # Stop words from Tigrinya Wordcount: https://github.com/fgaim/Tigrinya-WordCount/blob/main/ti_stop_words.txt
# Stop words
STOP_WORDS = set(
"""
'ምበር 'ሞ 'ቲ 'ታ 'ኳ 'ውን 'ዚ 'የ 'ዩ 'ያ 'ዮም 'ዮን
ልዕሊ ሒዙ ሒዛ ሕጂ መበል መን መንጎ መጠን ማለት ምስ ምባል
ምእንቲ ምኽንያቱ ምኽንያት ምዃኑ ምዃንና ምዃኖም
ስለ ስለዚ ስለዝበላ ሽዑ ቅድሚ በለ በቲ በዚ ብምባል ብተወሳኺ ብኸመይ
ብዘይ ብዘይካ ብዙሕ ብዛዕባ ብፍላይ ተባሂሉ ነበረ ነቲ ነታ ነቶም
ነዚ ነይሩ ነገራት ነገር ናብ ናብቲ ናትኩም ናትኪ ናትካ ናትክን
ናይ ናይቲ ንሕና ንሱ ንሳ ንሳቶም ንስኺ ንስኻ ንስኻትኩም ንስኻትክን ንዓይ
ኢለ ኢሉ ኢላ ኢልካ ኢሎም ኢና ኢኻ ኢዩ ኣለኹ
ኣለዉ ኣለዎ ኣሎ ኣብ ኣብቲ ኣብታ ኣብኡ ኣብዚ ኣነ ኣዝዩ ኣይኮነን ኣይኰነን
እምበር እሞ እተን እቲ እታ እቶም እንተ እንተሎ
ኣላ እንተኾነ እንታይ እንከሎ እኳ እዋን እውን እዚ እዛ እዞም
እየ እየን እዩ እያ እዮም
ከሎ ከመይ ከም ከምቲ ከምኡ ከምዘሎ
ከምዚ ከኣ ኩሉ ካልእ ካልኦት ካብ ካብቲ ካብቶም ክሳብ ክሳዕ ክብል
ክንደይ ክንዲ ክኸውን ኮይኑ ኰይኑ ኵሉ ኸም ኸኣ ወይ
ዋላ ዘለና ዘለዉ ዘለዋ ዘለዎ ዘለዎም ዘላ ዘሎ ዘይብሉ
ዝርከብ ዝበሃል ዝበለ ዝብል ዝተባህለ ዝተኻየደ ዝተፈላለየ ዝተፈላለዩ
ዝነበረ ዝነበረት ዝነበሩ ዝካየድ ዝኸውን ዝኽእል ዝኾነ ዝዀነ
የለን ይቕረብ ይብል ይኸውን ይኹን ይኽእል ደኣ ድሕሪ ድማ
ገለ ገሊጹ ገና ገይሩ ግና ግን ጥራይ
""".split()
)
| 899 | 31.142857 | 109 | py |
spaCy | spaCy-master/spacy/lang/ti/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
_exc = {}
for exc_data in [
{ORTH: "ት/ቤት"},
{ORTH: "ወ/ሮ", NORM: "ወይዘሮ"},
{ORTH: "ወ/ሪ", NORM: "ወይዘሪት"},
]:
_exc[exc_data[ORTH]] = [exc_data]
for orth in [
"ዓ.ም.",
"ኪ.ሜ.",
]:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = _exc
| 298 | 12.590909 | 37 | py |
spaCy | spaCy-master/spacy/lang/tl/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class TagalogDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Tagalog(Language):
lang = "tl"
Defaults = TagalogDefaults
__all__ = ["Tagalog"]
| 416 | 20.947368 | 54 | py |
spaCy | spaCy-master/spacy/lang/tl/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"sero",
"isa",
"dalawa",
"tatlo",
"apat",
"lima",
"anim",
"pito",
"walo",
"siyam",
"sampu",
"labing-isa",
"labindalawa",
"labintatlo",
"labing-apat",
"labinlima",
"labing-anim",
"labimpito",
"labing-walo",
"labinsiyam",
"dalawampu",
"tatlumpu",
"apatnapu",
"limampu",
"animnapu",
"pitumpu",
"walumpu",
"siyamnapu",
"daan",
"libo",
"milyon",
"bilyon",
"trilyon",
"quadrilyon",
"gajilyon",
"bazilyon",
]
def like_num(text):
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 942 | 15.54386 | 49 | py |
spaCy | spaCy-master/spacy/lang/tl/stop_words.py | STOP_WORDS = set(
"""
akin
aking
ako
alin
am
amin
aming
ang
ano
anumang
apat
at
atin
ating
ay
bababa
bago
bakit
bawat
bilang
dahil
dalawa
dapat
din
dito
doon
gagawin
gayunman
ginagawa
ginawa
ginawang
gumawa
gusto
habang
hanggang
hindi
huwag
iba
ibaba
ibabaw
ibig
ikaw
ilagay
ilalim
ilan
inyong
isa
isang
itaas
ito
iyo
iyon
iyong
ka
kahit
kailangan
kailanman
kami
kanila
kanilang
kanino
kanya
kanyang
kapag
kapwa
karamihan
katiyakan
katulad
kaya
kaysa
ko
kong
kulang
kumuha
kung
laban
lahat
lamang
likod
lima
maaari
maaaring
maging
mahusay
makita
marami
marapat
masyado
may
mayroon
mga
minsan
mismo
mula
muli
na
nabanggit
naging
nagkaroon
nais
nakita
namin
napaka
narito
nasaan
ng
ngayon
ni
nila
nilang
nito
niya
niyang
noon
o
pa
paano
pababa
paggawa
pagitan
pagkakaroon
pagkatapos
palabas
pamamagitan
panahon
pangalawa
para
paraan
pareho
pataas
pero
pumunta
pumupunta
sa
saan
sabi
sabihin
sarili
sila
sino
siya
tatlo
tayo
tulad
tungkol
una
walang
""".split()
)
| 965 | 5.355263 | 17 | py |
spaCy | spaCy-master/spacy/lang/tl/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {
"tayo'y": [{ORTH: "tayo"}, {ORTH: "'y", NORM: "ay"}],
"isa'y": [{ORTH: "isa"}, {ORTH: "'y", NORM: "ay"}],
"baya'y": [{ORTH: "baya"}, {ORTH: "'y", NORM: "ay"}],
"sa'yo": [{ORTH: "sa"}, {ORTH: "'yo", NORM: "iyo"}],
"ano'ng": [{ORTH: "ano"}, {ORTH: "'ng", NORM: "ang"}],
"siya'y": [{ORTH: "siya"}, {ORTH: "'y", NORM: "ay"}],
"nawa'y": [{ORTH: "nawa"}, {ORTH: "'y", NORM: "ay"}],
"papa'no": [{ORTH: "papa'no", NORM: "papaano"}],
"'di": [{ORTH: "'di", NORM: "hindi"}],
}
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
| 687 | 35.210526 | 58 | py |
spaCy | spaCy-master/spacy/lang/tn/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
class SetswanaDefaults(BaseDefaults):
infixes = TOKENIZER_INFIXES
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
class Setswana(Language):
lang = "tn"
Defaults = SetswanaDefaults
__all__ = ["Setswana"]
| 392 | 19.684211 | 46 | py |
spaCy | spaCy-master/spacy/lang/tn/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.tn.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple e nyaka go reka JSE ka tlhwatlhwa ta R1 billion",
"Johannesburg ke toropo e kgolo mo Afrika Borwa.",
"O ko kae?",
"ke mang presidente ya Afrika Borwa?",
"ke eng toropo kgolo ya Afrika Borwa?",
"Nelson Mandela o belegwe leng?",
]
| 421 | 25.375 | 60 | py |
spaCy | spaCy-master/spacy/lang/tn/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"lefela",
"nngwe",
"pedi",
"tharo",
"nne",
"tlhano",
"thataro",
"supa",
"robedi",
"robongwe",
"lesome",
"lesomenngwe",
"lesomepedi",
"sometharo",
"somenne",
"sometlhano",
"somethataro",
"somesupa",
"somerobedi",
"somerobongwe",
"someamabedi",
"someamararo",
"someamane",
"someamatlhano",
"someamarataro",
"someamasupa",
"someamarobedi",
"someamarobongwe",
"lekgolo",
"sekete",
"milione",
"bilione",
"terilione",
"kwatirilione",
"gajillione",
"bazillione",
]
_ordinal_words = [
"ntlha",
"bobedi",
"boraro",
"bone",
"botlhano",
"borataro",
"bosupa",
"borobedi ",
"borobongwe",
"bolesome",
"bolesomengwe",
"bolesomepedi",
"bolesometharo",
"bolesomenne",
"bolesometlhano",
"bolesomethataro",
"bolesomesupa",
"bolesomerobedi",
"bolesomerobongwe",
"somamabedi",
"someamararo",
"someamane",
"someamatlhano",
"someamarataro",
"someamasupa",
"someamarobedi",
"someamarobongwe",
"lekgolo",
"sekete",
"milione",
"bilione",
"terilione",
"kwatirilione",
"gajillione",
"bazillione",
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
text_lower = text.lower()
if text_lower in _num_words:
return True
# CHeck ordinal number
if text_lower in _ordinal_words:
return True
if text_lower.endswith("th"):
if text_lower[:-2].isdigit():
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 1,942 | 16.990741 | 49 | py |
spaCy | spaCy-master/spacy/lang/tn/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
HYPHENS,
LIST_ELLIPSES,
LIST_ICONS,
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
TOKENIZER_INFIXES = _infixes
| 576 | 20.37037 | 68 | py |
spaCy | spaCy-master/spacy/lang/tn/stop_words.py | # Stop words
STOP_WORDS = set(
"""
ke gareng ga selekanyo tlhwatlhwa yo mongwe se
sengwe fa go le jalo gongwe ba na mo tikologong
jaaka kwa morago nna gonne ka sa pele nako teng
tlase fela ntle magareng tsona feta bobedi kgabaganya
moo gape kgatlhanong botlhe tsotlhe bokana e esi
setseng mororo dinako golo kgolo nnye wena gago
o ntse ntle tla goreng gangwe mang yotlhe gore
eo yona tseraganyo eng ne sentle re rona thata
godimo fitlha pedi masomamabedi lesomepedi mmogo
tharo tseo boraro tseno yone jaanong bobona bona
lesome tsaya tsamaiso nngwe masomethataro thataro
tsa mmatota tota sale thoko supa dira tshwanetse di mmalwa masisi
bonala e tshwanang bogolo tsenya tsweetswee karolo
sepe tlhalosa dirwa robedi robongwe lesomenngwe gaisa
tlhano lesometlhano botlalo lekgolo
""".split()
)
| 796 | 36.952381 | 65 | py |
spaCy | spaCy-master/spacy/lang/tr/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from .tokenizer_exceptions import TOKEN_MATCH, TOKENIZER_EXCEPTIONS
class TurkishDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
token_match = TOKEN_MATCH
syntax_iterators = SYNTAX_ITERATORS
class Turkish(Language):
lang = "tr"
Defaults = TurkishDefaults
__all__ = ["Turkish"]
| 546 | 23.863636 | 67 | py |
spaCy | spaCy-master/spacy/lang/tr/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.tr.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Neredesin?",
"Neredesiniz?",
"Bu bir cümledir.",
"Sürücüsüz araçlar sigorta yükümlülüğünü üreticilere kaydırıyor.",
"San Francisco kaldırımda kurye robotları yasaklayabilir."
"Londra İngiltere'nin başkentidir.",
"Türkiye'nin başkenti neresi?",
"Bakanlar Kurulu 180 günlük eylem planını açıkladı.",
"Merkez Bankası, beklentiler doğrultusunda faizlerde değişikliğe gitmedi.",
]
| 574 | 29.263158 | 79 | py |
spaCy | spaCy-master/spacy/lang/tr/lex_attrs.py | from ...attrs import LIKE_NUM
# Thirteen, fifteen etc. are written separate: on üç
_num_words = [
"bir",
"iki",
"üç",
"dört",
"beş",
"altı",
"yedi",
"sekiz",
"dokuz",
"on",
"yirmi",
"otuz",
"kırk",
"elli",
"altmış",
"yetmiş",
"seksen",
"doksan",
"yüz",
"bin",
"milyon",
"milyar",
"trilyon",
"katrilyon",
"kentilyon",
]
_ordinal_words = [
"birinci",
"ikinci",
"üçüncü",
"dördüncü",
"beşinci",
"altıncı",
"yedinci",
"sekizinci",
"dokuzuncu",
"onuncu",
"yirminci",
"otuzuncu",
"kırkıncı",
"ellinci",
"altmışıncı",
"yetmişinci",
"sekseninci",
"doksanıncı",
"yüzüncü",
"bininci",
"milyonuncu",
"milyarıncı",
"trilyonuncu",
"katrilyonuncu",
"kentilyonuncu",
]
_ordinal_endings = ("inci", "ıncı", "nci", "ncı", "uncu", "üncü")
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
text_lower = text.lower()
# Check cardinal number
if text_lower in _num_words:
return True
# Check ordinal number
if text_lower in _ordinal_words:
return True
if text_lower.endswith(_ordinal_endings):
if text_lower[:-3].isdigit() or text_lower[:-4].isdigit():
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 1,630 | 17.325843 | 66 | py |
spaCy | spaCy-master/spacy/lang/tr/stop_words.py | # Source: https://github.com/stopwords-iso/stopwords-tr
STOP_WORDS = set(
"""
acaba
acep
adamakıllı
adeta
ait
ama
amma
anca
ancak
arada
artık
aslında
aynen
ayrıca
az
açıkça
açıkçası
bana
bari
bazen
bazı
bazısı
bazısına
bazısında
bazısından
bazısını
bazısının
başkası
başkasına
başkasında
başkasından
başkasını
başkasının
başka
belki
ben
bende
benden
beni
benim
beri
beriki
berikinin
berikiyi
berisi
bilcümle
bile
binaen
binaenaleyh
biraz
birazdan
birbiri
birbirine
birbirini
birbirinin
birbirinde
birbirinden
birden
birdenbire
biri
birine
birini
birinin
birinde
birinden
birice
birileri
birilerinde
birilerinden
birilerine
birilerini
birilerinin
birisi
birisine
birisini
birisinin
birisinde
birisinden
birkaç
birkaçı
birkaçına
birkaçını
birkaçının
birkaçında
birkaçından
birkez
birlikte
birçok
birçoğu
birçoğuna
birçoğunda
birçoğundan
birçoğunu
birçoğunun
birşey
birşeyi
bitevi
biteviye
bittabi
biz
bizatihi
bizce
bizcileyin
bizden
bize
bizi
bizim
bizimki
bizzat
boşuna
bu
buna
bunda
bundan
bunlar
bunları
bunların
bunu
bunun
buracıkta
burada
buradan
burası
burasına
burasını
burasının
burasında
burasından
böyle
böylece
böylecene
böylelikle
böylemesine
böylesine
büsbütün
bütün
cuk
cümlesi
cümlesine
cümlesini
cümlesinin
cümlesinden
cümlemize
cümlemizi
cümlemizden
çabuk
çabukça
çeşitli
çok
çokları
çoklarınca
çokluk
çoklukla
çokça
çoğu
çoğun
çoğunca
çoğunda
çoğundan
çoğunlukla
çoğunu
çoğunun
çünkü
da
daha
dahası
dahi
dahil
dahilen
daima
dair
dayanarak
de
defa
dek
demin
demincek
deminden
denli
derakap
derhal
derken
değil
değin
diye
diğer
diğeri
diğerine
diğerini
diğerinden
dolayı
dolayısıyla
doğru
edecek
eden
ederek
edilecek
ediliyor
edilmesi
ediyor
elbet
elbette
emme
en
enikonu
epey
epeyce
epeyi
esasen
esnasında
etmesi
etraflı
etraflıca
etti
ettiği
ettiğini
evleviyetle
evvel
evvela
evvelce
evvelden
evvelemirde
evveli
eğer
fakat
filanca
filancanın
gah
gayet
gayetle
gayri
gayrı
gelgelelim
gene
gerek
gerçi
geçende
geçenlerde
gibi
gibilerden
gibisinden
gine
göre
gırla
hakeza
halbuki
halen
halihazırda
haliyle
handiyse
hangi
hangisi
hangisine
hangisine
hangisinde
hangisinden
hani
hariç
hasebiyle
hasılı
hatta
hele
hem
henüz
hep
hepsi
hepsini
hepsinin
hepsinde
hepsinden
her
herhangi
herkes
herkesi
herkesin
herkesten
hiç
hiçbir
hiçbiri
hiçbirine
hiçbirini
hiçbirinin
hiçbirinde
hiçbirinden
hoş
hulasaten
iken
ila
ile
ilen
ilgili
ilk
illa
illaki
imdi
indinde
inen
insermi
ise
ister
itibaren
itibariyle
itibarıyla
iyi
iyice
iyicene
için
iş
işte
kadar
kaffesi
kah
kala
kanımca
karşın
kaynak
kaçı
kaçına
kaçında
kaçından
kaçını
kaçının
kelli
kendi
kendilerinde
kendilerinden
kendilerine
kendilerini
kendilerinin
kendini
kendisi
kendisinde
kendisinden
kendisine
kendisini
kendisinin
kere
kez
keza
kezalik
keşke
ki
kim
kimden
kime
kimi
kiminin
kimisi
kimisinde
kimisinden
kimisine
kimisinin
kimse
kimsecik
kimsecikler
külliyen
kısaca
kısacası
lakin
leh
lütfen
maada
madem
mademki
mamafih
mebni
međer
meğer
meğerki
meğerse
mu
mü
mı
mi
nasıl
nasılsa
nazaran
naşi
ne
neden
nedeniyle
nedenle
nedenler
nedenlerden
nedense
nerde
nerden
nerdeyse
nere
nerede
nereden
neredeyse
neresi
nereye
netekim
neye
neyi
neyse
nice
nihayet
nihayetinde
nitekim
niye
niçin
o
olan
olarak
oldu
olduklarını
oldukça
olduğu
olduğunu
olmak
olması
olsa
olsun
olup
olur
olursa
oluyor
ona
onca
onculayın
onda
ondan
onlar
onlara
onlardan
onları
onların
onu
onun
ora
oracık
oracıkta
orada
oradan
oranca
oranla
oraya
oysa
oysaki
öbür
öbürkü
öbürü
öbüründe
öbüründen
öbürüne
öbürünü
önce
önceden
önceleri
öncelikle
öteki
ötekisi
öyle
öylece
öylelikle
öylemesine
öz
pek
pekala
peki
pekçe
peyderpey
rağmen
sadece
sahi
sahiden
sana
sanki
sen
senden
seni
senin
siz
sizden
sizi
sizin
sonra
sonradan
sonraları
sonunda
şayet
şey
şeyden
şeyi
şeyler
şu
şuna
şuncacık
şunda
şundan
şunlar
şunları
şunların
şunu
şunun
şura
şuracık
şuracıkta
şurası
şöyle
şimdi
tabii
tam
tamam
tamamen
tamamıyla
tarafından
tek
tüm
üzere
var
vardı
vasıtasıyla
ve
velev
velhasıl
velhasılıkelam
veya
veyahut
ya
yahut
yakinen
yakında
yakından
yakınlarda
yalnız
yalnızca
yani
yapacak
yapmak
yaptı
yaptıkları
yaptığı
yaptığını
yapılan
yapılması
yapıyor
yeniden
yenilerde
yerine
yine
yok
yoksa
yoluyla
yüzünden
zarfında
zaten
zati
zira
""".split()
)
| 4,187 | 6.505376 | 55 | py |
spaCy | spaCy-master/spacy/lang/tr/syntax_iterators.py | from typing import Iterator, Tuple, Union
from ...errors import Errors
from ...symbols import NOUN, PRON, PROPN
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# Please see documentation for Turkish NP structure
labels = [
"nsubj",
"iobj",
"obj",
"obl",
"appos",
"orphan",
"dislocated",
"ROOT",
]
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings.add(label) for label in labels]
conj = doc.vocab.strings.add("conj")
flat = doc.vocab.strings.add("flat")
np_label = doc.vocab.strings.add("NP")
def extend_right(w): # Playing a trick for flat
rindex = w.i + 1
for rdep in doc[w.i].rights: # Extend the span to right if there is a flat
if rdep.dep == flat and rdep.pos in (NOUN, PROPN):
rindex = rdep.i + 1
else:
break
return rindex
prev_end = len(doc) + 1
for i, word in reversed(list(enumerate(doclike))):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.i >= prev_end:
continue
if word.dep in np_deps:
prev_end = word.left_edge.i
yield word.left_edge.i, extend_right(word), np_label
elif word.dep == conj:
cc_token = word.left_edge
prev_end = cc_token.i
# Shave off cc tokens from the NP
yield cc_token.right_edge.i + 1, extend_right(word), np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
| 1,854 | 30.440678 | 83 | py |
spaCy | spaCy-master/spacy/lang/tr/tokenizer_exceptions.py | import re
from ...symbols import NORM, ORTH
from ..punctuation import ALPHA, ALPHA_LOWER
_exc = {}
_abbr_period_exc = [
{ORTH: "A.B.D.", NORM: "Amerika"},
{ORTH: "Alb.", NORM: "albay"},
{ORTH: "Ank.", NORM: "Ankara"},
{ORTH: "Ar.Gör."},
{ORTH: "Arş.Gör."},
{ORTH: "Asb.", NORM: "astsubay"},
{ORTH: "Astsb.", NORM: "astsubay"},
{ORTH: "As.İz."},
{ORTH: "as.iz."},
{ORTH: "Atğm", NORM: "asteğmen"},
{ORTH: "Av.", NORM: "avukat"},
{ORTH: "Apt.", NORM: "apartmanı"},
{ORTH: "apt.", NORM: "apartmanı"},
{ORTH: "Bçvş.", NORM: "başçavuş"},
{ORTH: "bçvş.", NORM: "başçavuş"},
{ORTH: "bk.", NORM: "bakınız"},
{ORTH: "bknz.", NORM: "bakınız"},
{ORTH: "Bnb.", NORM: "binbaşı"},
{ORTH: "bnb.", NORM: "binbaşı"},
{ORTH: "Böl.", NORM: "bölümü"},
{ORTH: "böl.", NORM: "bölümü"},
{ORTH: "Bşk.", NORM: "başkanlığı"},
{ORTH: "bşk.", NORM: "başkanlığı"},
{ORTH: "Bştbp.", NORM: "baştabip"},
{ORTH: "bştbp.", NORM: "baştabip"},
{ORTH: "Bul.", NORM: "bulvarı"},
{ORTH: "bul.", NORM: "bulvarı"},
{ORTH: "Cad.", NORM: "caddesi"},
{ORTH: "cad.", NORM: "caddesi"},
{ORTH: "çev.", NORM: "çeviren"},
{ORTH: "Çvş.", NORM: "çavuş"},
{ORTH: "çvş.", NORM: "çavuş"},
{ORTH: "dak.", NORM: "dakika"},
{ORTH: "dk.", NORM: "dakika"},
{ORTH: "Doç.", NORM: "doçent"},
{ORTH: "doğ."},
{ORTH: "Dr.", NORM: "doktor"},
{ORTH: "dr.", NORM: "doktor"},
{ORTH: "drl.", NORM: "derleyen"},
{ORTH: "Dz.", NORM: "deniz"},
{ORTH: "Dz.K.K.lığı"},
{ORTH: "Dz.Kuv."},
{ORTH: "Dz.Kuv.K."},
{ORTH: "dzl.", NORM: "düzenleyen"},
{ORTH: "Ecz.", NORM: "eczanesi"},
{ORTH: "ecz.", NORM: "eczanesi"},
{ORTH: "ekon.", NORM: "ekonomi"},
{ORTH: "Fak.", NORM: "fakültesi"},
{ORTH: "Gn.", NORM: "genel"},
{ORTH: "Gnkur.", NORM: "Genelkurmay"},
{ORTH: "Gn.Kur.", NORM: "Genelkurmay"},
{ORTH: "gr.", NORM: "gram"},
{ORTH: "Hst.", NORM: "hastanesi"},
{ORTH: "hst.", NORM: "hastanesi"},
{ORTH: "Hs.Uzm."},
{ORTH: "huk.", NORM: "hukuk"},
{ORTH: "Hv.", NORM: "hava"},
{ORTH: "Hv.K.K.lığı"},
{ORTH: "Hv.Kuv."},
{ORTH: "Hv.Kuv.K."},
{ORTH: "Hz.", NORM: "hazreti"},
{ORTH: "Hz.Öz."},
{ORTH: "İng.", NORM: "ingilizce"},
{ORTH: "İst.", NORM: "İstanbul"},
{ORTH: "Jeol.", NORM: "jeoloji"},
{ORTH: "jeol.", NORM: "jeoloji"},
{ORTH: "Korg.", NORM: "korgeneral"},
{ORTH: "Kur.", NORM: "kurmay"},
{ORTH: "Kur.Bşk."},
{ORTH: "Kuv.", NORM: "kuvvetleri"},
{ORTH: "Ltd.", NORM: "limited"},
{ORTH: "ltd.", NORM: "limited"},
{ORTH: "Mah.", NORM: "mahallesi"},
{ORTH: "mah.", NORM: "mahallesi"},
{ORTH: "max.", NORM: "maksimum"},
{ORTH: "min.", NORM: "minimum"},
{ORTH: "Müh.", NORM: "mühendisliği"},
{ORTH: "müh.", NORM: "mühendisliği"},
{ORTH: "M.Ö."},
{ORTH: "M.S."},
{ORTH: "Onb.", NORM: "onbaşı"},
{ORTH: "Ord.", NORM: "ordinaryüs"},
{ORTH: "Org.", NORM: "orgeneral"},
{ORTH: "Ped.", NORM: "pedagoji"},
{ORTH: "Prof.", NORM: "profesör"},
{ORTH: "prof.", NORM: "profesör"},
{ORTH: "Sb.", NORM: "subay"},
{ORTH: "Sn.", NORM: "sayın"},
{ORTH: "sn.", NORM: "saniye"},
{ORTH: "Sok.", NORM: "sokak"},
{ORTH: "sok.", NORM: "sokak"},
{ORTH: "Şb.", NORM: "şube"},
{ORTH: "şb.", NORM: "şube"},
{ORTH: "Şti.", NORM: "şirketi"},
{ORTH: "şti.", NORM: "şirketi"},
{ORTH: "Tbp.", NORM: "tabip"},
{ORTH: "tbp.", NORM: "tabip"},
{ORTH: "T.C."},
{ORTH: "Tel.", NORM: "telefon"},
{ORTH: "tel.", NORM: "telefon"},
{ORTH: "telg.", NORM: "telgraf"},
{ORTH: "Tğm.", NORM: "teğmen"},
{ORTH: "tğm.", NORM: "teğmen"},
{ORTH: "tic.", NORM: "ticaret"},
{ORTH: "Tug.", NORM: "tugay"},
{ORTH: "Tuğg.", NORM: "tuğgeneral"},
{ORTH: "Tümg.", NORM: "tümgeneral"},
{ORTH: "Uzm.", NORM: "uzman"},
{ORTH: "Üçvş.", NORM: "üstçavuş"},
{ORTH: "Üni.", NORM: "üniversitesi"},
{ORTH: "Ütğm.", NORM: "üsteğmen"},
{ORTH: "vb."},
{ORTH: "vs.", NORM: "vesaire"},
{ORTH: "Yard.", NORM: "yardımcı"},
{ORTH: "Yar.", NORM: "yardımcı"},
{ORTH: "Yd.Sb."},
{ORTH: "Yard.Doç."},
{ORTH: "Yar.Doç."},
{ORTH: "Yb.", NORM: "yarbay"},
{ORTH: "Yrd.", NORM: "yardımcı"},
{ORTH: "Yrd.Doç."},
{ORTH: "Y.Müh."},
{ORTH: "Y.Mim."},
{ORTH: "yy.", NORM: "yüzyıl"},
]
for abbr in _abbr_period_exc:
_exc[abbr[ORTH]] = [abbr]
_abbr_exc = [
{ORTH: "AB", NORM: "Avrupa Birliği"},
{ORTH: "ABD", NORM: "Amerika"},
{ORTH: "ABS", NORM: "fren"},
{ORTH: "AOÇ"},
{ORTH: "ASKİ"},
{ORTH: "Bağ-kur", NORM: "Bağkur"},
{ORTH: "BDDK"},
{ORTH: "BJK", NORM: "Beşiktaş"},
{ORTH: "ESA", NORM: "Avrupa uzay ajansı"},
{ORTH: "FB", NORM: "Fenerbahçe"},
{ORTH: "GATA"},
{ORTH: "GS", NORM: "Galatasaray"},
{ORTH: "İSKİ"},
{ORTH: "KBB"},
{ORTH: "RTÜK", NORM: "radyo ve televizyon üst kurulu"},
{ORTH: "TBMM"},
{ORTH: "TC"},
{ORTH: "TÜİK", NORM: "Türkiye istatistik kurumu"},
{ORTH: "YÖK"},
]
for abbr in _abbr_exc:
_exc[abbr[ORTH]] = [abbr]
_num = r"[+-]?\d+([,.]\d+)*"
_ord_num = r"(\d+\.)"
_date = r"(((\d{1,2}[./-]){2})?(\d{4})|(\d{1,2}[./]\d{1,2}(\.)?))"
_dash_num = r"(([{al}\d]+/\d+)|(\d+/[{al}]))".format(al=ALPHA)
_roman_num = "M{0,3}(?:C[MD]|D?C{0,3})(?:X[CL]|L?X{0,3})(?:I[XV]|V?I{0,3})"
_roman_ord = r"({rn})\.".format(rn=_roman_num)
_time_exp = r"\d+(:\d+)*"
_inflections = r"'[{al}]+".format(al=ALPHA_LOWER)
_abbrev_inflected = r"[{a}]+\.'[{al}]+".format(a=ALPHA, al=ALPHA_LOWER)
_nums = r"(({d})|({dn})|({te})|({on})|({n})|({ro})|({rn}))({inf})?".format(
d=_date,
dn=_dash_num,
te=_time_exp,
on=_ord_num,
n=_num,
ro=_roman_ord,
rn=_roman_num,
inf=_inflections,
)
TOKENIZER_EXCEPTIONS = _exc
TOKEN_MATCH = re.compile(
r"^({abbr})|({n})$".format(n=_nums, abbr=_abbrev_inflected)
).match
| 5,945 | 30.13089 | 75 | py |
spaCy | spaCy-master/spacy/lang/tt/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class TatarDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
infixes = TOKENIZER_INFIXES
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Tatar(Language):
lang = "tt"
Defaults = TatarDefaults
__all__ = ["Tatar"]
| 483 | 22.047619 | 54 | py |
spaCy | spaCy-master/spacy/lang/tt/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.tt.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple Бөекбритания стартабын $1 миллиард өчен сатып алыун исәпли.",
"Автоном автомобильләр иминият җаваплылыкны җитештерүчеләргә күчерә.",
"Сан-Франциско тротуар буенча йөри торган робот-курьерларны тыю мөмкинлеген карый.",
"Лондон - Бөекбританиядә урнашкан зур шәһәр.",
"Син кайда?",
"Францияда кем президент?",
"Америка Кушма Штатларының башкаласы нинди шәһәр?",
"Барак Обама кайчан туган?",
]
| 589 | 33.705882 | 88 | py |
spaCy | spaCy-master/spacy/lang/tt/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"нуль",
"ноль",
"бер",
"ике",
"өч",
"дүрт",
"биш",
"алты",
"җиде",
"сигез",
"тугыз",
"ун",
"унбер",
"унике",
"унөч",
"ундүрт",
"унбиш",
"уналты",
"унҗиде",
"унсигез",
"унтугыз",
"егерме",
"утыз",
"кырык",
"илле",
"алтмыш",
"җитмеш",
"сиксән",
"туксан",
"йөз",
"мең",
"төмән",
"миллион",
"миллиард",
"триллион",
"триллиард",
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 936 | 14.881356 | 49 | py |
spaCy | spaCy-master/spacy/lang/tt/punctuation.py | from ..char_classes import (
ALPHA,
ALPHA_LOWER,
ALPHA_UPPER,
CONCAT_QUOTES,
HYPHENS,
LIST_ELLIPSES,
LIST_ICONS,
)
_hyphens_no_dash = HYPHENS.replace("-", "").strip("|").replace("||", "")
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[{al}])\.(?=[{au}])".format(al=ALPHA_LOWER, au=ALPHA_UPPER),
r"(?<=[{a}])[,!?/()]+(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}{q}])[:<>=](?=[{a}])".format(a=ALPHA, q=CONCAT_QUOTES),
r"(?<=[{a}])--(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])".format(a=ALPHA, q=CONCAT_QUOTES),
r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=_hyphens_no_dash),
r"(?<=[0-9])-(?=[0-9])",
]
)
TOKENIZER_INFIXES = _infixes
| 806 | 27.821429 | 81 | py |
spaCy | spaCy-master/spacy/lang/tt/stop_words.py | # Tatar stopwords are from https://github.com/aliiae/stopwords-tt
STOP_WORDS = set(
"""алай алайса алар аларга аларда алардан аларны аларның аларча
алары аларын аларынга аларында аларыннан аларының алтмыш алтмышынчы алтмышынчыга
алтмышынчыда алтмышынчыдан алтмышынчылар алтмышынчыларга алтмышынчыларда
алтмышынчылардан алтмышынчыларны алтмышынчыларның алтмышынчыны алтмышынчының
алты алтылап алтынчы алтынчыга алтынчыда алтынчыдан алтынчылар алтынчыларга
алтынчыларда алтынчылардан алтынчыларны алтынчыларның алтынчыны алтынчының
алтышар анда андагы андай андый андыйга андыйда андыйдан андыйны андыйның аннан
ансы анча аны аныкы аныкын аныкынга аныкында аныкыннан аныкының анысы анысын
анысынга анысында анысыннан анысының аның аныңча аркылы ары аша аңа аңар аңарга
аңарда аңардагы аңардан
бар бара барлык барча барчасы барчасын барчасына барчасында барчасыннан
барчасының бары башка башкача белән без безгә бездә бездән безне безнең безнеңчә
белдерүенчә белән бер бергә беренче беренчегә беренчедә беренчедән беренчеләр
беренчеләргә беренчеләрдә беренчеләрдән беренчеләрне беренчеләрнең беренчене
беренченең беркайда беркайсы беркая беркаян беркем беркемгә беркемдә беркемне
беркемнең беркемнән берлән берни бернигә бернидә бернидән бернинди бернине
бернинең берничек берничә бернәрсә бернәрсәгә бернәрсәдә бернәрсәдән бернәрсәне
бернәрсәнең беррәттән берсе берсен берсенгә берсендә берсенең берсеннән берәр
берәрсе берәрсен берәрсендә берәрсенең берәрсеннән берәрсенә берәү бигрәк бик
бирле бит биш бишенче бишенчегә бишенчедә бишенчедән бишенчеләр бишенчеләргә
бишенчеләрдә бишенчеләрдән бишенчеләрне бишенчеләрнең бишенчене бишенченең
бишләп болай болар боларга боларда болардан боларны боларның болары боларын
боларынга боларында боларыннан боларының бу буе буена буенда буенча буйлап
буларак булачак булды булмый булса булып булыр булырга бусы бүтән бәлки бән
бәрабәренә бөтен бөтенесе бөтенесен бөтенесендә бөтенесенең бөтенесеннән
бөтенесенә
вә
гел генә гына гүя гүяки гәрчә
да ди дигән диде дип дистәләгән дистәләрчә дүрт дүртенче дүртенчегә дүртенчедә
дүртенчедән дүртенчеләр дүртенчеләргә дүртенчеләрдә дүртенчеләрдән дүртенчеләрне
дүртенчеләрнең дүртенчене дүртенченең дүртләп дә
егерме егерменче егерменчегә егерменчедә егерменчедән егерменчеләр
егерменчеләргә егерменчеләрдә егерменчеләрдән егерменчеләрне егерменчеләрнең
егерменчене егерменченең ел елда
иде идек идем ике икенче икенчегә икенчедә икенчедән икенчеләр икенчеләргә
икенчеләрдә икенчеләрдән икенчеләрне икенчеләрнең икенчене икенченең икешәр икән
илле илленче илленчегә илленчедә илленчедән илленчеләр илленчеләргә
илленчеләрдә илленчеләрдән илленчеләрне илленчеләрнең илленчене илленченең илә
илән инде исә итеп иткән итте итү итә итәргә иң
йөз йөзенче йөзенчегә йөзенчедә йөзенчедән йөзенчеләр йөзенчеләргә йөзенчеләрдә
йөзенчеләрдән йөзенчеләрне йөзенчеләрнең йөзенчене йөзенченең йөзләгән йөзләрчә
йөзәрләгән
кадәр кай кайбер кайберләре кайберсе кайберәү кайберәүгә кайберәүдә кайберәүдән
кайберәүне кайберәүнең кайдагы кайсы кайсыбер кайсын кайсына кайсында кайсыннан
кайсының кайчангы кайчандагы кайчаннан караганда карамастан карамый карата каршы
каршына каршында каршындагы кебек кем кемгә кемдә кемне кемнең кемнән кенә ки
килеп килә кирәк кына кырыгынчы кырыгынчыга кырыгынчыда кырыгынчыдан
кырыгынчылар кырыгынчыларга кырыгынчыларда кырыгынчылардан кырыгынчыларны
кырыгынчыларның кырыгынчыны кырыгынчының кырык күк күпләгән күпме күпмеләп
күпмешәр күпмешәрләп күптән күрә
ләкин
максатында менә мең меңенче меңенчегә меңенчедә меңенчедән меңенчеләр
меңенчеләргә меңенчеләрдә меңенчеләрдән меңенчеләрне меңенчеләрнең меңенчене
меңенченең меңләгән меңләп меңнәрчә меңәрләгән меңәрләп миллиард миллиардлаган
миллиардларча миллион миллионлаган миллионнарча миллионынчы миллионынчыга
миллионынчыда миллионынчыдан миллионынчылар миллионынчыларга миллионынчыларда
миллионынчылардан миллионынчыларны миллионынчыларның миллионынчыны
миллионынчының мин миндә мине минем минемчә миннән миңа монда мондагы мондые
мондыен мондыенгә мондыендә мондыеннән мондыеның мондый мондыйга мондыйда
мондыйдан мондыйлар мондыйларга мондыйларда мондыйлардан мондыйларны
мондыйларның мондыйлары мондыйларын мондыйларынга мондыйларында мондыйларыннан
мондыйларының мондыйны мондыйның моннан монсыз монча моны моныкы моныкын
моныкынга моныкында моныкыннан моныкының монысы монысын монысынга монысында
монысыннан монысының моның моңа моңар моңарга мәгълүматынча мәгәр мән мөмкин
ни нибарысы никадәре нинди ниндие ниндиен ниндиенгә ниндиендә ниндиенең
ниндиеннән ниндиләр ниндиләргә ниндиләрдә ниндиләрдән ниндиләрен ниндиләренн
ниндиләреннгә ниндиләренндә ниндиләреннең ниндиләренннән ниндиләрне ниндиләрнең
ниндирәк нихәтле ничаклы ничек ничәшәр ничәшәрләп нуль нче нчы нәрсә нәрсәгә
нәрсәдә нәрсәдән нәрсәне нәрсәнең
саен сез сезгә сездә сездән сезне сезнең сезнеңчә сигез сигезенче сигезенчегә
сигезенчедә сигезенчедән сигезенчеләр сигезенчеләргә сигезенчеләрдә
сигезенчеләрдән сигезенчеләрне сигезенчеләрнең сигезенчене сигезенченең
сиксән син синдә сине синең синеңчә синнән сиңа соң сыман сүзенчә сүзләренчә
та таба теге тегеләй тегеләр тегеләргә тегеләрдә тегеләрдән тегеләре тегеләрен
тегеләренгә тегеләрендә тегеләренең тегеләреннән тегеләрне тегеләрнең тегенди
тегендигә тегендидә тегендидән тегендине тегендинең тегендә тегендәге тегене
тегенеке тегенекен тегенекенгә тегенекендә тегенекенең тегенекеннән тегенең
тегеннән тегесе тегесен тегесенгә тегесендә тегесенең тегесеннән тегеңә тиеш тик
тикле тора триллиард триллион тугыз тугызлап тугызлашып тугызынчы тугызынчыга
тугызынчыда тугызынчыдан тугызынчылар тугызынчыларга тугызынчыларда
тугызынчылардан тугызынчыларны тугызынчыларның тугызынчыны тугызынчының туксан
туксанынчы туксанынчыга туксанынчыда туксанынчыдан туксанынчылар туксанынчыларга
туксанынчыларда туксанынчылардан туксанынчыларны туксанынчыларның туксанынчыны
туксанынчының турында тыш түгел тә тәгаенләнгән төмән
уенча уйлавынча ук ул ун уналты уналтынчы уналтынчыга уналтынчыда уналтынчыдан
уналтынчылар уналтынчыларга уналтынчыларда уналтынчылардан уналтынчыларны
уналтынчыларның уналтынчыны уналтынчының унарлаган унарлап унаула унаулап унбер
унберенче унберенчегә унберенчедә унберенчедән унберенчеләр унберенчеләргә
унберенчеләрдә унберенчеләрдән унберенчеләрне унберенчеләрнең унберенчене
унберенченең унбиш унбишенче унбишенчегә унбишенчедә унбишенчедән унбишенчеләр
унбишенчеләргә унбишенчеләрдә унбишенчеләрдән унбишенчеләрне унбишенчеләрнең
унбишенчене унбишенченең ундүрт ундүртенче ундүртенчегә ундүртенчедә
ундүртенчедән ундүртенчеләр ундүртенчеләргә ундүртенчеләрдә ундүртенчеләрдән
ундүртенчеләрне ундүртенчеләрнең ундүртенчене ундүртенченең унике уникенче
уникенчегә уникенчедә уникенчедән уникенчеләр уникенчеләргә уникенчеләрдә
уникенчеләрдән уникенчеләрне уникенчеләрнең уникенчене уникенченең унлаган
унлап уннарча унсигез унсигезенче унсигезенчегә унсигезенчедә унсигезенчедән
унсигезенчеләр унсигезенчеләргә унсигезенчеләрдә унсигезенчеләрдән
унсигезенчеләрне унсигезенчеләрнең унсигезенчене унсигезенченең унтугыз
унтугызынчы унтугызынчыга унтугызынчыда унтугызынчыдан унтугызынчылар
унтугызынчыларга унтугызынчыларда унтугызынчылардан унтугызынчыларны
унтугызынчыларның унтугызынчыны унтугызынчының унынчы унынчыга унынчыда
унынчыдан унынчылар унынчыларга унынчыларда унынчылардан унынчыларны
унынчыларның унынчыны унынчының унҗиде унҗиденче унҗиденчегә унҗиденчедә
унҗиденчедән унҗиденчеләр унҗиденчеләргә унҗиденчеләрдә унҗиденчеләрдән
унҗиденчеләрне унҗиденчеләрнең унҗиденчене унҗиденченең унөч унөченче унөченчегә
унөченчедә унөченчедән унөченчеләр унөченчеләргә унөченчеләрдә унөченчеләрдән
унөченчеләрне унөченчеләрнең унөченчене унөченченең утыз утызынчы утызынчыга
утызынчыда утызынчыдан утызынчылар утызынчыларга утызынчыларда утызынчылардан
утызынчыларны утызынчыларның утызынчыны утызынчының
фикеренчә фәкать
хакында хәбәр хәлбуки хәтле хәтта
чаклы чакта чөнки
шикелле шул шулай шулар шуларга шуларда шулардан шуларны шуларның шулары шуларын
шуларынга шуларында шуларыннан шуларының шулкадәр шултикле шултиклем шулхәтле
шулчаклы шунда шундагы шундый шундыйга шундыйда шундыйдан шундыйны шундыйның
шунлыктан шуннан шунсы шунча шуны шуныкы шуныкын шуныкынга шуныкында шуныкыннан
шуныкының шунысы шунысын шунысынга шунысында шунысыннан шунысының шуның шушы
шушында шушыннан шушыны шушының шушыңа шуңа шуңар шуңарга
элек
югыйсә юк юкса
я ягъни язуынча яисә яки яктан якын ярашлы яхут яшь яшьлек
җиде җиделәп җиденче җиденчегә җиденчедә җиденчедән җиденчеләр җиденчеләргә
җиденчеләрдә җиденчеләрдән җиденчеләрне җиденчеләрнең җиденчене җиденченең
җидешәр җитмеш җитмешенче җитмешенчегә җитмешенчедә җитмешенчедән җитмешенчеләр
җитмешенчеләргә җитмешенчеләрдә җитмешенчеләрдән җитмешенчеләрне
җитмешенчеләрнең җитмешенчене җитмешенченең җыенысы
үз үзе үзем үземдә үземне үземнең үземнән үземә үзен үзендә үзенең үзеннән үзенә
үк
һичбер һичбере һичберен һичберендә һичберенең һичбереннән һичберенә һичберсе
һичберсен һичберсендә һичберсенең һичберсеннән һичберсенә һичберәү һичберәүгә
һичберәүдә һичберәүдән һичберәүне һичберәүнең һичкайсы һичкайсыга һичкайсыда
һичкайсыдан һичкайсыны һичкайсының һичкем һичкемгә һичкемдә һичкемне һичкемнең
һичкемнән һични һичнигә һичнидә һичнидән һичнинди һичнине һичнинең һичнәрсә
һичнәрсәгә һичнәрсәдә һичнәрсәдән һичнәрсәне һичнәрсәнең һәм һәммә һәммәсе
һәммәсен һәммәсендә һәммәсенең һәммәсеннән һәммәсенә һәр һәрбер һәрбере һәрберсе
һәркайсы һәркайсыга һәркайсыда һәркайсыдан һәркайсыны һәркайсының һәркем
һәркемгә һәркемдә һәркемне һәркемнең һәркемнән һәрни һәрнәрсә һәрнәрсәгә
һәрнәрсәдә һәрнәрсәдән һәрнәрсәне һәрнәрсәнең һәртөрле
ә әгәр әйтүенчә әйтүләренчә әлбәттә әле әлеге әллә әмма әнә
өстәп өч өчен өченче өченчегә өченчедә өченчедән өченчеләр өченчеләргә
өченчеләрдә өченчеләрдән өченчеләрне өченчеләрнең өченчене өченченең өчләп
өчәрләп""".split()
)
| 9,852 | 55.626437 | 81 | py |
spaCy | spaCy-master/spacy/lang/tt/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
_abbrev_exc = [
# Weekdays abbreviations
{ORTH: "дш", NORM: "дүшәмбе"},
{ORTH: "сш", NORM: "сишәмбе"},
{ORTH: "чш", NORM: "чәршәмбе"},
{ORTH: "пш", NORM: "пәнҗешәмбе"},
{ORTH: "җм", NORM: "җомга"},
{ORTH: "шб", NORM: "шимбә"},
{ORTH: "яш", NORM: "якшәмбе"},
# Months abbreviations
{ORTH: "гый", NORM: "гыйнвар"},
{ORTH: "фев", NORM: "февраль"},
{ORTH: "мар", NORM: "март"},
{ORTH: "мар", NORM: "март"},
{ORTH: "апр", NORM: "апрель"},
{ORTH: "июн", NORM: "июнь"},
{ORTH: "июл", NORM: "июль"},
{ORTH: "авг", NORM: "август"},
{ORTH: "сен", NORM: "сентябрь"},
{ORTH: "окт", NORM: "октябрь"},
{ORTH: "ноя", NORM: "ноябрь"},
{ORTH: "дек", NORM: "декабрь"},
# Number abbreviations
{ORTH: "млрд", NORM: "миллиард"},
{ORTH: "млн", NORM: "миллион"},
]
for abbr in _abbrev_exc:
for orth in (abbr[ORTH], abbr[ORTH].capitalize(), abbr[ORTH].upper()):
_exc[orth] = [{ORTH: orth, NORM: abbr[NORM]}]
_exc[orth + "."] = [{ORTH: orth + ".", NORM: abbr[NORM]}]
for exc_data in [ # "etc." abbreviations
{ORTH: "һ.б.ш.", NORM: "һәм башка шундыйлар"},
{ORTH: "һ.б.", NORM: "һәм башка"},
{ORTH: "б.э.к.", NORM: "безнең эрага кадәр"},
{ORTH: "б.э.", NORM: "безнең эра"},
]:
_exc[exc_data[ORTH]] = [exc_data]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
| 1,509 | 30.458333 | 74 | py |
spaCy | spaCy-master/spacy/lang/uk/__init__.py | from typing import Callable, Optional
from thinc.api import Model
from ...language import BaseDefaults, Language
from ..punctuation import (
COMBINING_DIACRITICS_TOKENIZER_INFIXES,
COMBINING_DIACRITICS_TOKENIZER_SUFFIXES,
)
from .lemmatizer import UkrainianLemmatizer
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class UkrainianDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
class Ukrainian(Language):
lang = "uk"
Defaults = UkrainianDefaults
@Ukrainian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "pymorphy3",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
scorer: Optional[Callable],
):
return UkrainianLemmatizer(
nlp.vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
__all__ = ["Ukrainian"]
| 1,320 | 23.462963 | 77 | py |
spaCy | spaCy-master/spacy/lang/uk/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.uk.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Ніч на середу буде морозною.",
"Чим кращі книги ти читав, тим гірше спиш.", # Serhiy Zhadan
"Найстаріші ґудзики, відомі людству, археологи знайшли в долині ріки Інд.",
"Слов'янське слово «Україна» вперше згадується у Київському літописному зводі за Іпатіївським списком під 1187 роком.", # wikipedia
"Де у Києві найсмачніша кава?",
"Від Нижнього озера довгими дерев’яними сходами, над якими синьо й біло горіли маленькі коробочки-ліхтарики, підіймалися до нього двоє стовусів: найкращий друг Вертутій і його дванадцятилітній онук Чублик.", # blyznets_viktor_semenovych/zemlia_svitliachkiv
"Китайський космічний зонд \"Чан'е-4\" вперше в історії здійснив м'яку посадку на зворотному боці Місяця.",
"Коли до губ твоїх лишається півподиху, коли до губ твоїх лишається півкроку – зіниці твої виткані із подиву, в очах у тебе синьо і широко.", # Hryhorij Czubaj
"Дорогу сестру збираю у дорогу, а брати вирішили не брати машину.", # homographs
]
| 1,144 | 56.25 | 261 | py |
spaCy | spaCy-master/spacy/lang/uk/lemmatizer.py | from typing import Callable, Optional
from thinc.api import Model
from ...pipeline.lemmatizer import lemmatizer_score
from ...vocab import Vocab
from ..ru.lemmatizer import RussianLemmatizer
class UkrainianLemmatizer(RussianLemmatizer):
def __init__(
self,
vocab: Vocab,
model: Optional[Model],
name: str = "lemmatizer",
*,
mode: str = "pymorphy3",
overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score,
) -> None:
if mode in {"pymorphy2", "pymorphy2_lookup"}:
try:
from pymorphy2 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Ukrainian lemmatizer mode 'pymorphy2' requires the "
"pymorphy2 library and dictionaries. Install them with: "
"pip install pymorphy2 pymorphy2-dicts-uk"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk")
elif mode in {"pymorphy3", "pymorphy3_lookup"}:
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Ukrainian lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library and dictionaries. Install them with: "
"pip install pymorphy3 pymorphy3-dicts-uk"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk")
super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
| 1,716 | 36.326087 | 77 | py |
spaCy | spaCy-master/spacy/lang/uk/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"більйон",
"вісім",
"вісімдесят",
"вісімнадцять",
"вісімсот",
"восьмий",
"два",
"двадцять",
"дванадцять",
"двісті",
"дев'яносто",
"дев'ятнадцять",
"дев'ятсот",
"дев'ять",
"десять",
"децильйон",
"квадрильйон",
"квінтильйон",
"мільйон",
"мільярд",
"нонильйон",
"один",
"одинадцять",
"октильйон",
"п'ятий",
"п'ятисотий",
"п'ятнадцять",
"п'ятсот",
"п'ять",
"секстильйон",
"септильйон",
"сім",
"сімдесят",
"сімнадцять",
"сімсот",
"сорок",
"сто",
"тисяча",
"три",
"тридцять",
"трильйон",
"тринадцять",
"триста",
"чотири",
"чотириста",
"чотирнадцять",
"шістдесят",
"шістнадцять",
"шістсот",
"шість",
]
def like_num(text):
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 1,191 | 15.788732 | 49 | py |
spaCy | spaCy-master/spacy/lang/uk/stop_words.py | STOP_WORDS = set(
"""а
або
адже
аж
але
алло
б
багато
без
безперервно
би
більш
більше
біля
близько
бо
був
буває
буде
будемо
будете
будеш
буду
будуть
будь
була
були
було
бути
в
вам
вами
вас
ваш
ваша
ваше
вашим
вашими
ваших
ваші
вашій
вашого
вашої
вашому
вашою
вашу
вгорі
вгору
вдалині
весь
вже
ви
від
відсотків
він
вісім
вісімнадцятий
вісімнадцять
вниз
внизу
вона
вони
воно
восьмий
все
всею
всі
всім
всіх
всього
всьому
всю
вся
втім
г
геть
говорив
говорить
давно
далеко
далі
дарма
два
двадцятий
двадцять
дванадцятий
дванадцять
дві
двох
де
дев'ятий
дев'ятнадцятий
дев'ятнадцять
дев'ять
декілька
день
десятий
десять
дійсно
для
дня
до
добре
довго
доки
досить
другий
дуже
дякую
е
є
ж
же
з
за
завжди
зазвичай
занадто
зараз
зате
звичайно
звідси
звідусіль
здається
зі
значить
знову
зовсім
і
із
її
їй
їм
іноді
інша
інше
інший
інших
інші
їх
й
його
йому
каже
ким
кілька
кого
кожен
кожна
кожне
кожні
коли
кому
краще
крім
куди
ласка
ледве
лише
м
має
майже
мало
мати
мене
мені
менш
менше
ми
мимо
міг
між
мій
мільйонів
мною
мого
могти
моє
моєї
моєму
моєю
може
можна
можно
можуть
мої
моїй
моїм
моїми
моїх
мою
моя
на
навіть
навіщо
навколо
навкруги
нагорі
над
назад
найбільш
нам
нами
нарешті
нас
наш
наша
наше
нашим
нашими
наших
наші
нашій
нашого
нашої
нашому
нашою
нашу
не
небагато
небудь
недалеко
неї
немає
нерідко
нещодавно
нею
нибудь
нижче
низько
ним
ними
них
ні
ніби
ніж
ній
ніколи
нікуди
нім
нічого
ну
нього
ньому
о
обидва
обоє
один
одинадцятий
одинадцять
однак
однієї
одній
одного
означає
окрім
он
особливо
ось
п'ятий
п'ятнадцятий
п'ятнадцять
п'ять
перед
перший
під
пізніше
пір
після
по
повинно
подів
поки
пора
поруч
посеред
потім
потрібно
почала
початку
при
про
просто
проте
проти
раз
разу
раніше
рано
раптом
рік
роки
років
року
році
сам
сама
саме
самим
самими
самих
самі
самій
само
самого
самому
саму
свого
своє
своєї
свої
своїй
своїх
свою
себе
сих
сім
сімнадцятий
сімнадцять
сказав
сказала
сказати
скільки
скрізь
собі
собою
спасибі
спочатку
справ
став
суть
сьогодні
сьомий
т
та
так
така
таке
такий
такі
також
там
твій
твого
твоє
твоєї
твоєму
твоєю
твої
твоїй
твоїм
твоїми
твоїх
твою
твоя
те
тебе
теж
тепер
ти
тим
тими
тисяч
тих
ті
тієї
тією
тій
тільки
тім
то
тобі
тобою
того
тоді
той
тому
тою
треба
третій
три
тринадцятий
тринадцять
трохи
ту
туди
тут
у
увесь
уміти
усе
усі
усім
усіма
усіх
усього
усьому
усю
усюди
уся
хіба
хотіти
хоч
хоча
хочеш
хто
це
цей
цим
цими
цих
ці
цієї
цій
цього
цьому
цю
ця
час
частіше
часто
часу
через
четвертий
чи
чиє
чиєї
чиєму
чиї
чиїй
чиїм
чиїми
чиїх
чий
чийого
чийому
чим
численна
численне
численний
численні
чию
чия
чого
чому
чотири
чотирнадцятий
чотирнадцять
шістнадцятий
шістнадцять
шість
шостий
ще
що
щоб
щодо
щось
я
як
яка
який
яких
які
якій
якого
якої
якщо""".split()
)
| 2,700 | 4.746809 | 17 | py |
spaCy | spaCy-master/spacy/lang/uk/tokenizer_exceptions.py | from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
for exc_data in [
{ORTH: "обл.", NORM: "область"},
{ORTH: "р-н.", NORM: "район"},
{ORTH: "р-н", NORM: "район"},
{ORTH: "м.", NORM: "місто"},
{ORTH: "вул.", NORM: "вулиця"},
{ORTH: "просп.", NORM: "проспект"},
{ORTH: "пр-кт", NORM: "проспект"},
{ORTH: "бул.", NORM: "бульвар"},
{ORTH: "пров.", NORM: "провулок"},
{ORTH: "пл.", NORM: "площа"},
{ORTH: "майд.", NORM: "майдан"},
{ORTH: "мкр.", NORM: "мікрорайон"},
{ORTH: "ст.", NORM: "станція"},
{ORTH: "ж/м", NORM: "житловий масив"},
{ORTH: "наб.", NORM: "набережна"},
{ORTH: "в/ч", NORM: "військова частина"},
{ORTH: "в/м", NORM: "військове містечко"},
{ORTH: "оз.", NORM: "озеро"},
{ORTH: "ім.", NORM: "імені"},
{ORTH: "г.", NORM: "гора"},
{ORTH: "п.", NORM: "пан"},
{ORTH: "проф.", NORM: "професор"},
{ORTH: "акад.", NORM: "академік"},
{ORTH: "доц.", NORM: "доцент"},
]:
_exc[exc_data[ORTH]] = [exc_data]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
| 1,143 | 29.918919 | 56 | py |
spaCy | spaCy-master/spacy/lang/ur/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
class UrduDefaults(BaseDefaults):
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
writing_system = {"direction": "rtl", "has_case": False, "has_letters": True}
class Urdu(Language):
lang = "ur"
Defaults = UrduDefaults
__all__ = ["Urdu"]
| 461 | 22.1 | 81 | py |
spaCy | spaCy-master/spacy/lang/ur/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.da.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"اردو ہے جس کا نام ہم جانتے ہیں داغ",
"سارے جہاں میں دھوم ہماری زباں کی ہے",
]
| 249 | 18.230769 | 56 | py |
spaCy | spaCy-master/spacy/lang/ur/lex_attrs.py | from ...attrs import LIKE_NUM
# Source https://quizlet.com/4271889/1-100-urdu-number-wordsurdu-numerals-flash-cards/
# http://www.urduword.com/lessons.php?lesson=numbers
# https://en.wikibooks.org/wiki/Urdu/Vocabulary/Numbers
# https://www.urdu-english.com/lessons/beginner/numbers
_num_words = """ایک دو تین چار پانچ چھ سات آٹھ نو دس گیارہ بارہ تیرہ چودہ پندرہ سولہ سترہ
اٹهارا انیس بیس اکیس بائیس تئیس چوبیس پچیس چھببیس
ستایس اٹھائس انتيس تیس اکتیس بتیس تینتیس چونتیس پینتیس
چھتیس سینتیس ارتیس انتالیس چالیس اکتالیس بیالیس تیتالیس
چوالیس پیتالیس چھیالیس سینتالیس اڑتالیس انچالیس پچاس اکاون باون
تریپن چون پچپن چھپن ستاون اٹھاون انسٹھ ساثھ
اکسٹھ باسٹھ تریسٹھ چوسٹھ پیسٹھ چھیاسٹھ سڑسٹھ اڑسٹھ
انھتر ستر اکھتر بھتتر تیھتر چوھتر تچھتر چھیتر ستتر
اٹھتر انیاسی اسی اکیاسی بیاسی تیراسی چوراسی پچیاسی چھیاسی
سٹیاسی اٹھیاسی نواسی نوے اکانوے بانوے ترانوے
چورانوے پچانوے چھیانوے ستانوے اٹھانوے ننانوے سو
""".split()
# source https://www.google.com/intl/ur/inputtools/try/
_ordinal_words = """پہلا دوسرا تیسرا چوتھا پانچواں چھٹا ساتواں آٹھواں نواں دسواں گیارہواں بارہواں تیرھواں چودھواں
پندرھواں سولہواں سترھواں اٹھارواں انیسواں بسیواں
""".split()
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text in _num_words:
return True
if text in _ordinal_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 1,617 | 34.173913 | 113 | py |
spaCy | spaCy-master/spacy/lang/ur/punctuation.py | from ..punctuation import TOKENIZER_SUFFIXES
_suffixes = TOKENIZER_SUFFIXES
| 77 | 18.5 | 44 | py |
spaCy | spaCy-master/spacy/lang/ur/stop_words.py | # Source: collected from different resource on internet
STOP_WORDS = set(
"""
ثھی
خو
گی
اپٌے
گئے
ثہت
طرف
ہوبری
پبئے
اپٌب
دوضری
گیب
کت
گب
ثھی
ضے
ہر
پر
اش
دی
گے
لگیں
ہے
ثعذ
ضکتے
تھی
اى
دیب
لئے
والے
یہ
ثدبئے
ضکتی
تھب
اًذر
رریعے
لگی
ہوبرا
ہوًے
ثبہر
ضکتب
ًہیں
تو
اور
رہب
لگے
ہوضکتب
ہوں
کب
ہوبرے
توبم
کیب
ایطے
رہی
هگر
ہوضکتی
ہیں
کریں
ہو
تک
کی
ایک
رہے
هیں
ہوضکتے
کیطے
ہوًب
تت
کہ
ہوا
آئے
ضبت
تھے
کیوں
ہو
تب
کے
پھر
ثغیر
خبر
ہے
رکھ
کی
طب
کوئی
رریعے
ثبرے
خب
اضطرذ
ثلکہ
خجکہ
رکھ
تب
کی
طرف
ثراں
خبر
رریعہ
اضکب
ثٌذ
خص
کی
لئے
توہیں
دوضرے
کررہی
اضکی
ثیچ
خوکہ
رکھتی
کیوًکہ
دوًوں
کر
رہے
خبر
ہی
ثرآں
اضکے
پچھلا
خیطب
رکھتے
کے
ثعذ
تو
ہی
دورى
کر
یہبں
آش
تھوڑا
چکے
زکویہ
دوضروں
ضکب
اوًچب
ثٌب
پل
تھوڑی
چلا
خبهوظ
دیتب
ضکٌب
اخبزت
اوًچبئی
ثٌبرہب
پوچھب
تھوڑے
چلو
ختن
دیتی
ضکی
اچھب
اوًچی
ثٌبرہی
پوچھتب
تیي
چلیں
در
دیتے
ضکے
اچھی
اوًچے
ثٌبرہے
پوچھتی
خبًب
چلے
درخبت
دیر
ضلطلہ
اچھے
اٹھبًب
ثٌبًب
پوچھتے
خبًتب
چھوٹب
درخہ
دیکھٌب
ضوچ
اختتبم
اہن
ثٌذ
پوچھٌب
خبًتی
چھوٹوں
درخے
دیکھو
ضوچب
ادھر
آئی
ثٌذکرًب
پوچھو
خبًتے
چھوٹی
درزقیقت
دیکھی
ضوچتب
ارد
آئے
ثٌذکرو
پوچھوں
خبًٌب
چھوٹے
درضت
دیکھیں
ضوچتی
اردگرد
آج
ثٌذی
پوچھیں
خططرذ
چھہ
دش
دیٌب
ضوچتے
ارکبى
آخر
ثڑا
پورا
خگہ
چیسیں
دفعہ
دے
ضوچٌب
اضتعوبل
آخر
پہلا
خگہوں
زبصل
دکھبئیں
راضتوں
ضوچو
اضتعوبلات
آدهی
ثڑی
پہلی
خگہیں
زبضر
دکھبتب
راضتہ
ضوچی
اغیب
آًب
ثڑے
پہلےضی
خلذی
زبل
دکھبتی
راضتے
ضوچیں
اطراف
آٹھ
ثھر
خٌبة
زبل
دکھبتے
رکي
ضیذھب
افراد
آیب
ثھرا
پہلے
خواى
زبلات
دکھبًب
رکھب
ضیذھی
اکثر
ثب
ہوا
پیع
خوًہی
زبلیہ
دکھبو
رکھی
ضیذھے
اکٹھب
ثھرپور
تبزٍ
خیطبکہ
زصوں
رکھے
ضیکٌڈ
اکٹھی
ثبری
ثہتر
تر
چبر
زصہ
دلچطپ
زیبدٍ
غبیذ
اکٹھے
ثبلا
ثہتری
ترتیت
چبہب
زصے
دلچطپی
ضبت
غخص
اکیلا
ثبلترتیت
ثہتریي
تریي
چبہٌب
زقبئق
دلچطپیبں
ضبدٍ
غذ
اکیلی
ثرش
پبش
تعذاد
چبہے
زقیتیں
هٌبضت
ضبرا
غروع
اکیلے
ثغیر
پبًب
چکب
زقیقت
دو
ضبرے
غروعبت
اگرچہ
ثلٌذ
پبًچ
تن
چکی
زکن
دور
ضبل
غے
الگ
پراًب
تٌہب
چکیں
دوضرا
ضبلوں
صبف
صسیر
قجیلہ
کوًطے
لازهی
هطئلے
ًیب
طریق
کرتی
کہتے
صفر
قطن
کھولا
لگتب
هطبئل
وار
طریقوں
کرتے
کہٌب
صورت
کئی
کھولٌب
لگتی
هطتعول
وار
طریقہ
کرتے
ہو
کہٌب
صورتسبل
کئے
کھولو
لگتے
هػتول
ٹھیک
طریقے
کرًب
کہو
صورتوں
کبفی
هطلق
ڈھوًڈا
طور
کرو
کہوں
صورتیں
کبم
کھولیں
لگی
هعلوم
ڈھوًڈلیب
طورپر
کریں
کہی
ضرور
کجھی
کھولے
لگے
هکول
ڈھوًڈًب
ظبہر
کرے
کہیں
ضرورت
کرا
کہب
لوجب
هلا
ڈھوًڈو
عذد
کل
کہیں
کرتب
کہتب
لوجی
هوکي
ڈھوًڈی
عظین
کن
کہے
ضروری
کرتبہوں
کہتی
لوجے
هوکٌبت
ڈھوًڈیں
علاقوں
کوتر
کیے
لوسبت
هوکٌہ
ہن
لے
ًبپطٌذ
ہورہے
علاقہ
کورا
کے
رریعے
لوسہ
هڑا
ہوئی
هتعلق
ًبگسیر
ہوگئی
علاقے
کوروں
گئی
لو
هڑًب
ہوئے
هسترم
ًطجت
ہو
گئے
علاوٍ
کورٍ
گرد
لوگ
هڑے
ہوتی
هسترهہ
ًقطہ
ہوگیب
کورے
گروپ
لوگوں
هہرثبى
ہوتے
هسطوش
ًکبلٌب
ہوًی
عووهی
کوطي
گروٍ
لڑکپي
هیرا
ہوچکب
هختلف
ًکتہ
ہی
فرد
کوى
گروہوں
لی
هیری
ہوچکی
هسیذ
فی
کوًطب
گٌتی
لیب
هیرے
ہوچکے
هطئلہ
ًوخواى
یقیٌی
قجل
کوًطی
لیٌب
ًئی
ہورہب
لیں
ًئے
ہورہی
ثبعث
ضت
""".split()
)
| 2,665 | 4.18677 | 55 | py |
spaCy | spaCy-master/spacy/lang/vi/__init__.py | import re
import string
from pathlib import Path
from typing import Any, Dict, Union
import srsly
from ... import util
from ...language import BaseDefaults, Language
from ...tokens import Doc
from ...util import DummyTokenizer, load_config_from_str, registry
from ...vocab import Vocab
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
DEFAULT_CONFIG = """
[nlp]
[nlp.tokenizer]
@tokenizers = "spacy.vi.VietnameseTokenizer"
use_pyvi = true
"""
@registry.tokenizers("spacy.vi.VietnameseTokenizer")
def create_vietnamese_tokenizer(use_pyvi: bool = True):
def vietnamese_tokenizer_factory(nlp):
return VietnameseTokenizer(nlp.vocab, use_pyvi=use_pyvi)
return vietnamese_tokenizer_factory
class VietnameseTokenizer(DummyTokenizer):
def __init__(self, vocab: Vocab, use_pyvi: bool = False):
self.vocab = vocab
self.use_pyvi = use_pyvi
if self.use_pyvi:
try:
from pyvi import ViTokenizer
self.ViTokenizer = ViTokenizer
except ImportError:
msg = (
"Pyvi not installed. Either set use_pyvi = False, "
"or install it https://pypi.python.org/pypi/pyvi"
)
raise ImportError(msg) from None
def __reduce__(self):
return VietnameseTokenizer, (self.vocab, self.use_pyvi)
def __call__(self, text: str) -> Doc:
if self.use_pyvi:
words = self.pyvi_tokenize(text)
words, spaces = util.get_words_and_spaces(words, text)
return Doc(self.vocab, words=words, spaces=spaces)
else:
words, spaces = util.get_words_and_spaces(text.split(), text)
return Doc(self.vocab, words=words, spaces=spaces)
# The methods pyvi_sylabelize_with_ws and pyvi_tokenize are adapted from
# pyvi v0.1, MIT License, Copyright (c) 2016 Viet-Trung Tran.
# See licenses/3rd_party_licenses.txt
def pyvi_sylabelize_with_ws(self, text):
"""Modified from pyvi to preserve whitespace and skip unicode
normalization."""
specials = [r"==>", r"->", r"\.\.\.", r">>"]
digit = r"\d+([\.,_]\d+)+"
email = r"([a-zA-Z0-9_.+-]+@([a-zA-Z0-9-]+\.)+[a-zA-Z0-9-]+)"
web = r"\w+://[^\s]+"
word = r"\w+"
non_word = r"[^\w\s]"
abbreviations = [
r"[A-ZĐ]+\.",
r"Tp\.",
r"Mr\.",
r"Mrs\.",
r"Ms\.",
r"Dr\.",
r"ThS\.",
]
patterns = []
patterns.extend(abbreviations)
patterns.extend(specials)
patterns.extend([web, email])
patterns.extend([digit, non_word, word])
patterns = r"(\s+|" + "|".join(patterns) + ")"
tokens = re.findall(patterns, text, re.UNICODE)
return [token[0] for token in tokens]
def pyvi_tokenize(self, text):
"""Modified from pyvi to preserve text and whitespace."""
if len(text) == 0:
return []
elif text.isspace():
return [text]
segs = self.pyvi_sylabelize_with_ws(text)
words = []
preceding_ws = []
for i, token in enumerate(segs):
if not token.isspace():
words.append(token)
preceding_ws.append(
"" if (i == 0 or not segs[i - 1].isspace()) else segs[i - 1]
)
labels = self.ViTokenizer.ViTokenizer.model.predict(
[self.ViTokenizer.ViTokenizer.sent2features(words, False)]
)
token = words[0]
tokens = []
for i in range(1, len(labels[0])):
if (
labels[0][i] == "I_W"
and words[i] not in string.punctuation
and words[i - 1] not in string.punctuation
and not words[i][0].isdigit()
and not words[i - 1][0].isdigit()
and not (words[i][0].istitle() and not words[i - 1][0].istitle())
):
token = token + preceding_ws[i] + words[i]
else:
tokens.append(token)
token = words[i]
tokens.append(token)
return tokens
def _get_config(self) -> Dict[str, Any]:
return {"use_pyvi": self.use_pyvi}
def _set_config(self, config: Dict[str, Any] = {}) -> None:
self.use_pyvi = config.get("use_pyvi", False)
def to_bytes(self, **kwargs) -> bytes:
serializers = {"cfg": lambda: srsly.json_dumps(self._get_config())}
return util.to_bytes(serializers, [])
def from_bytes(self, data: bytes, **kwargs) -> "VietnameseTokenizer":
deserializers = {"cfg": lambda b: self._set_config(srsly.json_loads(b))}
util.from_bytes(data, deserializers, [])
return self
def to_disk(self, path: Union[str, Path], **kwargs) -> None:
path = util.ensure_path(path)
serializers = {"cfg": lambda p: srsly.write_json(p, self._get_config())}
util.to_disk(path, serializers, [])
def from_disk(self, path: Union[str, Path], **kwargs) -> "VietnameseTokenizer":
path = util.ensure_path(path)
serializers = {"cfg": lambda p: self._set_config(srsly.read_json(p))}
util.from_disk(path, serializers, [])
return self
class VietnameseDefaults(BaseDefaults):
config = load_config_from_str(DEFAULT_CONFIG)
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Vietnamese(Language):
lang = "vi"
Defaults = VietnameseDefaults
__all__ = ["Vietnamese"]
| 5,574 | 31.988166 | 83 | py |
spaCy | spaCy-master/spacy/lang/vi/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.vi.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Đây là đâu, tôi là ai?",
"Căn phòng có nhiều cửa sổ nên nó khá sáng",
"Đại dịch COVID vừa qua đã gây ảnh hưởng rất lớn tới nhiều doanh nghiệp lớn nhỏ.",
"Thành phố Hồ Chí Minh đã bị ảnh hưởng nặng nề trong thời gian vừa qua.",
"Ông bạn đang ở đâu thế?",
"Ai là người giải phóng đất nước Việt Nam khỏi ách đô hộ?",
"Vị tướng nào là người đã làm nên chiến thắng lịch sử Điện Biên Phủ?",
"Làm việc nhiều chán quá, đi chơi đâu đi?",
]
| 625 | 33.777778 | 86 | py |
spaCy | spaCy-master/spacy/lang/vi/lex_attrs.py | from ...attrs import LIKE_NUM
_num_words = [
"không", # Zero
"một", # One
"mốt", # Also one, irreplacable in niché cases for unit digit such as "51"="năm mươi mốt"
"hai", # Two
"ba", # Three
"bốn", # Four
"tư", # Also four, used in certain cases for unit digit such as "54"="năm mươi tư"
"năm", # Five
"lăm", # Also five, irreplacable in niché cases for unit digit such as "55"="năm mươi lăm"
"sáu", # Six
"bảy", # Seven
"bẩy", # Also seven, old fashioned
"tám", # Eight
"chín", # Nine
"mười", # Ten
"chục", # Also ten, used for counting in tens such as "20 eggs"="hai chục trứng"
"trăm", # Hundred
"nghìn", # Thousand
"ngàn", # Also thousand, used in the south
"vạn", # Ten thousand
"triệu", # Million
"tỷ", # Billion
"tỉ", # Also billion, used in combinatorics such as "tỉ_phú"="billionaire"
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 1,340 | 28.152174 | 95 | py |
spaCy | spaCy-master/spacy/lang/vi/stop_words.py | # Source: https://github.com/stopwords/vietnamese-stopwords
STOP_WORDS = set(
"""
a_lô
a_ha
ai
ai_ai
ai_nấy
ai_đó
alô
amen
anh
anh_ấy
ba
ba_bau
ba_bản
ba_cùng
ba_họ
ba_ngày
ba_ngôi
ba_tăng
bao_giờ
bao_lâu
bao_nhiêu
bao_nả
bay_biến
biết
biết_bao
biết_bao_nhiêu
biết_chắc
biết_chừng_nào
biết_mình
biết_mấy
biết_thế
biết_trước
biết_việc
biết_đâu
biết_đâu_chừng
biết_đâu_đấy
biết_được
buổi
buổi_làm
buổi_mới
buổi_ngày
buổi_sớm
bà
bà_ấy
bài
bài_bác
bài_bỏ
bài_cái
bác
bán
bán_cấp
bán_dạ
bán_thế
bây_bẩy
bây_chừ
bây_giờ
bây_nhiêu
bèn
béng
bên
bên_bị
bên_có
bên_cạnh
bông
bước
bước_khỏi
bước_tới
bước_đi
bạn
bản
bản_bộ
bản_riêng
bản_thân
bản_ý
bất_chợt
bất_cứ
bất_giác
bất_kì
bất_kể
bất_kỳ
bất_luận
bất_ngờ
bất_nhược
bất_quá
bất_quá_chỉ
bất_thình_lình
bất_tử
bất_đồ
bấy
bấy_chầy
bấy_chừ
bấy_giờ
bấy_lâu
bấy_lâu_nay
bấy_nay
bấy_nhiêu
bập_bà_bập_bõm
bập_bõm
bắt_đầu
bắt_đầu_từ
bằng
bằng_cứ
bằng_không
bằng_người
bằng_nhau
bằng_như
bằng_nào
bằng_nấy
bằng_vào
bằng_được
bằng_ấy
bển
bệt
bị
bị_chú
bị_vì
bỏ
bỏ_bà
bỏ_cha
bỏ_cuộc
bỏ_không
bỏ_lại
bỏ_mình
bỏ_mất
bỏ_mẹ
bỏ_nhỏ
bỏ_quá
bỏ_ra
bỏ_riêng
bỏ_việc
bỏ_xa
bỗng
bỗng_chốc
bỗng_dưng
bỗng_không
bỗng_nhiên
bỗng_nhưng
bỗng_thấy
bỗng_đâu
bộ
bộ_thuộc
bộ_điều
bội_phần
bớ
bởi
bởi_ai
bởi_chưng
bởi_nhưng
bởi_sao
bởi_thế
bởi_thế_cho_nên
bởi_tại
bởi_vì
bởi_vậy
bởi_đâu
bức
cao
cao_lâu
cao_ráo
cao_răng
cao_sang
cao_số
cao_thấp
cao_thế
cao_xa
cha
cha_chả
chao_ôi
chia_sẻ
chiếc
cho
cho_biết
cho_chắc
cho_hay
cho_nhau
cho_nên
cho_rằng
cho_rồi
cho_thấy
cho_tin
cho_tới
cho_tới_khi
cho_về
cho_ăn
cho_đang
cho_được
cho_đến
cho_đến_khi
cho_đến_nỗi
choa
chu_cha
chui_cha
chung
chung_cho
chung_chung
chung_cuộc
chung_cục
chung_nhau
chung_qui
chung_quy
chung_quy_lại
chung_ái
chuyển
chuyển_tự
chuyển_đạt
chuyện
chuẩn_bị
chành_chạnh
chí_chết
chính
chính_bản
chính_giữa
chính_là
chính_thị
chính_điểm
chùn_chùn
chùn_chũn
chú
chú_dẫn
chú_khách
chú_mày
chú_mình
chúng
chúng_mình
chúng_ta
chúng_tôi
chúng_ông
chăn_chắn
chăng
chăng_chắc
chăng_nữa
chơi
chơi_họ
chưa
chưa_bao_giờ
chưa_chắc
chưa_có
chưa_cần
chưa_dùng
chưa_dễ
chưa_kể
chưa_tính
chưa_từng
chầm_chập
chậc
chắc
chắc_chắn
chắc_dạ
chắc_hẳn
chắc_lòng
chắc_người
chắc_vào
chắc_ăn
chẳng_lẽ
chẳng_những
chẳng_nữa
chẳng_phải
chết_nỗi
chết_thật
chết_tiệt
chỉ
chỉ_chính
chỉ_có
chỉ_là
chỉ_tên
chỉn
chị
chị_bộ
chị_ấy
chịu
chịu_chưa
chịu_lời
chịu_tốt
chịu_ăn
chọn
chọn_bên
chọn_ra
chốc_chốc
chớ
chớ_chi
chớ_gì
chớ_không
chớ_kể
chớ_như
chợt
chợt_nghe
chợt_nhìn
chủn
chứ
chứ_ai
chứ_còn
chứ_gì
chứ_không
chứ_không_phải
chứ_lại
chứ_lị
chứ_như
chứ_sao
coi_bộ
coi_mòi
con
con_con
con_dạ
con_nhà
con_tính
cu_cậu
cuối
cuối_cùng
cuối_điểm
cuốn
cuộc
càng
càng_càng
càng_hay
cá_nhân
các
các_cậu
cách
cách_bức
cách_không
cách_nhau
cách_đều
cái
cái_gì
cái_họ
cái_đã
cái_đó
cái_ấy
câu_hỏi
cây
cây_nước
còn
còn_như
còn_nữa
còn_thời_gian
còn_về
có
có_ai
có_chuyện
có_chăng
có_chăng_là
có_chứ
có_cơ
có_dễ
có_họ
có_khi
có_ngày
có_người
có_nhiều
có_nhà
có_phải
có_số
có_tháng
có_thế
có_thể
có_vẻ
có_ý
có_ăn
có_điều
có_điều_kiện
có_đáng
có_đâu
có_được
cóc_khô
cô
cô_mình
cô_quả
cô_tăng
cô_ấy
công_nhiên
cùng
cùng_chung
cùng_cực
cùng_nhau
cùng_tuổi
cùng_tột
cùng_với
cùng_ăn
căn
căn_cái
căn_cắt
căn_tính
cũng
cũng_như
cũng_nên
cũng_thế
cũng_vậy
cũng_vậy_thôi
cũng_được
cơ
cơ_chỉ
cơ_chừng
cơ_cùng
cơ_dẫn
cơ_hồ
cơ_hội
cơ_mà
cơn
cả
cả_nghe
cả_nghĩ
cả_ngày
cả_người
cả_nhà
cả_năm
cả_thảy
cả_thể
cả_tin
cả_ăn
cả_đến
cảm_thấy
cảm_ơn
cấp
cấp_số
cấp_trực_tiếp
cần
cần_cấp
cần_gì
cần_số
cật_lực
cật_sức
cậu
cổ_lai
cụ_thể
cụ_thể_là
cụ_thể_như
của
của_ngọt
của_tin
cứ
cứ_như
cứ_việc
cứ_điểm
cực_lực
do
do_vì
do_vậy
do_đó
duy
duy_chỉ
duy_có
dài
dài_lời
dài_ra
dành
dành_dành
dào
dì
dù
dù_cho
dù_dì
dù_gì
dù_rằng
dù_sao
dùng
dùng_cho
dùng_hết
dùng_làm
dùng_đến
dưới
dưới_nước
dạ
dạ_bán
dạ_con
dạ_dài
dạ_dạ
dạ_khách
dần_dà
dần_dần
dầu_sao
dẫn
dẫu
dẫu_mà
dẫu_rằng
dẫu_sao
dễ
dễ_dùng
dễ_gì
dễ_khiến
dễ_nghe
dễ_ngươi
dễ_như_chơi
dễ_sợ
dễ_sử_dụng
dễ_thường
dễ_thấy
dễ_ăn
dễ_đâu
dở_chừng
dữ
dữ_cách
em
em_em
giá_trị
giá_trị_thực_tế
giảm
giảm_chính
giảm_thấp
giảm_thế
giống
giống_người
giống_nhau
giống_như
giờ
giờ_lâu
giờ_này
giờ_đi
giờ_đây
giờ_đến
giữ
giữ_lấy
giữ_ý
giữa
giữa_lúc
gây
gây_cho
gây_giống
gây_ra
gây_thêm
gì
gì_gì
gì_đó
gần
gần_bên
gần_hết
gần_ngày
gần_như
gần_xa
gần_đây
gần_đến
gặp
gặp_khó_khăn
gặp_phải
gồm
hay
hay_biết
hay_hay
hay_không
hay_là
hay_làm
hay_nhỉ
hay_nói
hay_sao
hay_tin
hay_đâu
hiểu
hiện_nay
hiện_tại
hoàn_toàn
hoặc
hoặc_là
hãy
hãy_còn
hơn
hơn_cả
hơn_hết
hơn_là
hơn_nữa
hơn_trước
hầu_hết
hết
hết_chuyện
hết_cả
hết_của
hết_nói
hết_ráo
hết_rồi
hết_ý
họ
họ_gần
họ_xa
hỏi
hỏi_lại
hỏi_xem
hỏi_xin
hỗ_trợ
khi
khi_khác
khi_không
khi_nào
khi_nên
khi_trước
khiến
khoảng
khoảng_cách
khoảng_không
khá
khá_tốt
khác
khác_gì
khác_khác
khác_nhau
khác_nào
khác_thường
khác_xa
khách
khó
khó_biết
khó_chơi
khó_khăn
khó_làm
khó_mở
khó_nghe
khó_nghĩ
khó_nói
khó_thấy
khó_tránh
không
không_ai
không_bao_giờ
không_bao_lâu
không_biết
không_bán
không_chỉ
không_còn
không_có
không_có_gì
không_cùng
không_cần
không_cứ
không_dùng
không_gì
không_hay
không_khỏi
không_kể
không_ngoài
không_nhận
không_những
không_phải
không_phải_không
không_thể
không_tính
không_điều_kiện
không_được
không_đầy
không_để
khẳng_định
khỏi
khỏi_nói
kể
kể_cả
kể_như
kể_tới
kể_từ
liên_quan
loại
loại_từ
luôn
luôn_cả
luôn_luôn
luôn_tay
là
là_cùng
là_là
là_nhiều
là_phải
là_thế_nào
là_vì
là_ít
làm
làm_bằng
làm_cho
làm_dần_dần
làm_gì
làm_lòng
làm_lại
làm_lấy
làm_mất
làm_ngay
làm_như
làm_nên
làm_ra
làm_riêng
làm_sao
làm_theo
làm_thế_nào
làm_tin
làm_tôi
làm_tăng
làm_tại
làm_tắp_lự
làm_vì
làm_đúng
làm_được
lâu
lâu_các
lâu_lâu
lâu_nay
lâu_ngày
lên
lên_cao
lên_cơn
lên_mạnh
lên_ngôi
lên_nước
lên_số
lên_xuống
lên_đến
lòng
lòng_không
lúc
lúc_khác
lúc_lâu
lúc_nào
lúc_này
lúc_sáng
lúc_trước
lúc_đi
lúc_đó
lúc_đến
lúc_ấy
lý_do
lượng
lượng_cả
lượng_số
lượng_từ
lại
lại_bộ
lại_cái
lại_còn
lại_giống
lại_làm
lại_người
lại_nói
lại_nữa
lại_quả
lại_thôi
lại_ăn
lại_đây
lấy
lấy_có
lấy_cả
lấy_giống
lấy_làm
lấy_lý_do
lấy_lại
lấy_ra
lấy_ráo
lấy_sau
lấy_số
lấy_thêm
lấy_thế
lấy_vào
lấy_xuống
lấy_được
lấy_để
lần
lần_khác
lần_lần
lần_nào
lần_này
lần_sang
lần_sau
lần_theo
lần_trước
lần_tìm
lớn
lớn_lên
lớn_nhỏ
lời
lời_chú
lời_nói
mang
mang_lại
mang_mang
mang_nặng
mang_về
muốn
mà
mà_cả
mà_không
mà_lại
mà_thôi
mà_vẫn
mình
mạnh
mất
mất_còn
mọi
mọi_giờ
mọi_khi
mọi_lúc
mọi_người
mọi_nơi
mọi_sự
mọi_thứ
mọi_việc
mối
mỗi
mỗi_lúc
mỗi_lần
mỗi_một
mỗi_ngày
mỗi_người
một
một_cách
một_cơn
một_khi
một_lúc
một_số
một_vài
một_ít
mới
mới_hay
mới_rồi
mới_đây
mở
mở_mang
mở_nước
mở_ra
mợ
mức
nay
ngay
ngay_bây_giờ
ngay_cả
ngay_khi
ngay_khi_đến
ngay_lúc
ngay_lúc_này
ngay_lập_tức
ngay_thật
ngay_tức_khắc
ngay_tức_thì
ngay_từ
nghe
nghe_chừng
nghe_hiểu
nghe_không
nghe_lại
nghe_nhìn
nghe_như
nghe_nói
nghe_ra
nghe_rõ
nghe_thấy
nghe_tin
nghe_trực_tiếp
nghe_đâu
nghe_đâu_như
nghe_được
nghen
nghiễm_nhiên
nghĩ
nghĩ_lại
nghĩ_ra
nghĩ_tới
nghĩ_xa
nghĩ_đến
nghỉm
ngoài
ngoài_này
ngoài_ra
ngoài_xa
ngoải
nguồn
ngày
ngày_càng
ngày_cấp
ngày_giờ
ngày_ngày
ngày_nào
ngày_này
ngày_nọ
ngày_qua
ngày_rày
ngày_tháng
ngày_xưa
ngày_xửa
ngày_đến
ngày_ấy
ngôi
ngôi_nhà
ngôi_thứ
ngõ_hầu
ngăn_ngắt
ngươi
người
người_hỏi
người_khác
người_khách
người_mình
người_nghe
người_người
người_nhận
ngọn
ngọn_nguồn
ngọt
ngồi
ngồi_bệt
ngồi_không
ngồi_sau
ngồi_trệt
ngộ_nhỡ
nhanh
nhanh_lên
nhanh_tay
nhau
nhiên_hậu
nhiều
nhiều_ít
nhiệt_liệt
nhung_nhăng
nhà
nhà_chung
nhà_khó
nhà_làm
nhà_ngoài
nhà_ngươi
nhà_tôi
nhà_việc
nhân_dịp
nhân_tiện
nhé
nhìn
nhìn_chung
nhìn_lại
nhìn_nhận
nhìn_theo
nhìn_thấy
nhìn_xuống
nhóm
nhón_nhén
như
như_ai
như_chơi
như_không
như_là
như_nhau
như_quả
như_sau
như_thường
như_thế
như_thế_nào
như_thể
như_trên
như_trước
như_tuồng
như_vậy
như_ý
nhưng
nhưng_mà
nhược_bằng
nhất
nhất_loạt
nhất_luật
nhất_là
nhất_mực
nhất_nhất
nhất_quyết
nhất_sinh
nhất_thiết
nhất_thì
nhất_tâm
nhất_tề
nhất_đán
nhất_định
nhận
nhận_biết
nhận_họ
nhận_làm
nhận_nhau
nhận_ra
nhận_thấy
nhận_việc
nhận_được
nhằm
nhằm_khi
nhằm_lúc
nhằm_vào
nhằm_để
nhỉ
nhỏ
nhỏ_người
nhớ
nhớ_bập_bõm
nhớ_lại
nhớ_lấy
nhớ_ra
nhờ
nhờ_chuyển
nhờ_có
nhờ_nhờ
nhờ_đó
nhỡ_ra
những
những_ai
những_khi
những_là
những_lúc
những_muốn
những_như
nào
nào_cũng
nào_hay
nào_là
nào_phải
nào_đâu
nào_đó
này
này_nọ
nên
nên_chi
nên_chăng
nên_làm
nên_người
nên_tránh
nó
nóc
nói
nói_bông
nói_chung
nói_khó
nói_là
nói_lên
nói_lại
nói_nhỏ
nói_phải
nói_qua
nói_ra
nói_riêng
nói_rõ
nói_thêm
nói_thật
nói_toẹt
nói_trước
nói_tốt
nói_với
nói_xa
nói_ý
nói_đến
nói_đủ
năm
năm_tháng
nơi
nơi_nơi
nước
nước_bài
nước_cùng
nước_lên
nước_nặng
nước_quả
nước_xuống
nước_ăn
nước_đến
nấy
nặng
nặng_căn
nặng_mình
nặng_về
nếu
nếu_có
nếu_cần
nếu_không
nếu_mà
nếu_như
nếu_thế
nếu_vậy
nếu_được
nền
nọ
nớ
nức_nở
nữa
nữa_khi
nữa_là
nữa_rồi
oai_oái
oái
pho
phè
phè_phè
phía
phía_bên
phía_bạn
phía_dưới
phía_sau
phía_trong
phía_trên
phía_trước
phóc
phót
phù_hợp
phăn_phắt
phương_chi
phải
phải_biết
phải_chi
phải_chăng
phải_cách
phải_cái
phải_giờ
phải_khi
phải_không
phải_lại
phải_lời
phải_người
phải_như
phải_rồi
phải_tay
phần
phần_lớn
phần_nhiều
phần_nào
phần_sau
phần_việc
phắt
phỉ_phui
phỏng
phỏng_như
phỏng_nước
phỏng_theo
phỏng_tính
phốc
phụt
phứt
qua
qua_chuyện
qua_khỏi
qua_lại
qua_lần
qua_ngày
qua_tay
qua_thì
qua_đi
quan_trọng
quan_trọng_vấn_đề
quan_tâm
quay
quay_bước
quay_lại
quay_số
quay_đi
quá
quá_bán
quá_bộ
quá_giờ
quá_lời
quá_mức
quá_nhiều
quá_tay
quá_thì
quá_tin
quá_trình
quá_tuổi
quá_đáng
quá_ư
quả
quả_là
quả_thật
quả_thế
quả_vậy
quận
ra
ra_bài
ra_bộ
ra_chơi
ra_gì
ra_lại
ra_lời
ra_ngôi
ra_người
ra_sao
ra_tay
ra_vào
ra_ý
ra_điều
ra_đây
ren_rén
riu_ríu
riêng
riêng_từng
riệt
rày
ráo
ráo_cả
ráo_nước
ráo_trọi
rén
rén_bước
rích
rón_rén
rõ
rõ_là
rõ_thật
rút_cục
răng
răng_răng
rất
rất_lâu
rằng
rằng_là
rốt_cuộc
rốt_cục
rồi
rồi_nữa
rồi_ra
rồi_sao
rồi_sau
rồi_tay
rồi_thì
rồi_xem
rồi_đây
rứa
sa_sả
sang
sang_năm
sang_sáng
sang_tay
sao
sao_bản
sao_bằng
sao_cho
sao_vậy
sao_đang
sau
sau_chót
sau_cuối
sau_cùng
sau_hết
sau_này
sau_nữa
sau_sau
sau_đây
sau_đó
so
so_với
song_le
suýt
suýt_nữa
sáng
sáng_ngày
sáng_rõ
sáng_thế
sáng_ý
sì
sì_sì
sất
sắp
sắp_đặt
sẽ
sẽ_biết
sẽ_hay
số
số_cho_biết
số_cụ_thể
số_loại
số_là
số_người
số_phần
số_thiếu
sốt_sột
sớm
sớm_ngày
sở_dĩ
sử_dụng
sự
sự_thế
sự_việc
tanh
tanh_tanh
tay
tay_quay
tha_hồ
tha_hồ_chơi
tha_hồ_ăn
than_ôi
thanh
thanh_ba
thanh_chuyển
thanh_không
thanh_thanh
thanh_tính
thanh_điều_kiện
thanh_điểm
thay_đổi
thay_đổi_tình_trạng
theo
theo_bước
theo_như
theo_tin
thi_thoảng
thiếu
thiếu_gì
thiếu_điểm
thoạt
thoạt_nghe
thoạt_nhiên
thoắt
thuần
thuần_ái
thuộc
thuộc_bài
thuộc_cách
thuộc_lại
thuộc_từ
thà
thà_là
thà_rằng
thành_ra
thành_thử
thái_quá
tháng
tháng_ngày
tháng_năm
tháng_tháng
thêm
thêm_chuyện
thêm_giờ
thêm_vào
thì
thì_giờ
thì_là
thì_phải
thì_ra
thì_thôi
thình_lình
thích
thích_cứ
thích_thuộc
thích_tự
thích_ý
thím
thôi
thôi_việc
thúng_thắng
thương_ôi
thường
thường_bị
thường_hay
thường_khi
thường_số
thường_sự
thường_thôi
thường_thường
thường_tính
thường_tại
thường_xuất_hiện
thường_đến
thảo_hèn
thảo_nào
thấp
thấp_cơ
thấp_thỏm
thấp_xuống
thấy
thấy_tháng
thẩy
thậm
thậm_chí
thậm_cấp
thậm_từ
thật
thật_chắc
thật_là
thật_lực
thật_quả
thật_ra
thật_sự
thật_thà
thật_tốt
thật_vậy
thế
thế_chuẩn_bị
thế_là
thế_lại
thế_mà
thế_nào
thế_nên
thế_ra
thế_sự
thế_thì
thế_thôi
thế_thường
thế_thế
thế_à
thế_đó
thếch
thỉnh_thoảng
thỏm
thốc
thốc_tháo
thốt
thốt_nhiên
thốt_nói
thốt_thôi
thộc
thời_gian
thời_gian_sử_dụng
thời_gian_tính
thời_điểm
thục_mạng
thứ
thứ_bản
thứ_đến
thửa
thực_hiện
thực_hiện_đúng
thực_ra
thực_sự
thực_tế
thực_vậy
tin
tin_thêm
tin_vào
tiếp_theo
tiếp_tục
tiếp_đó
tiện_thể
toà
toé_khói
toẹt
trong
trong_khi
trong_lúc
trong_mình
trong_ngoài
trong_này
trong_số
trong_vùng
trong_đó
trong_ấy
tránh
tránh_khỏi
tránh_ra
tránh_tình_trạng
tránh_xa
trên
trên_bộ
trên_dưới
trước
trước_hết
trước_khi
trước_kia
trước_nay
trước_ngày
trước_nhất
trước_sau
trước_tiên
trước_tuổi
trước_đây
trước_đó
trả
trả_của
trả_lại
trả_ngay
trả_trước
trếu_tráo
trển
trệt
trệu_trạo
trỏng
trời_đất_ơi
trở_thành
trừ_phi
trực_tiếp
trực_tiếp_làm
tuy
tuy_có
tuy_là
tuy_nhiên
tuy_rằng
tuy_thế
tuy_vậy
tuy_đã
tuyệt_nhiên
tuần_tự
tuốt_luốt
tuốt_tuồn_tuột
tuốt_tuột
tuổi
tuổi_cả
tuổi_tôi
tà_tà
tên
tên_chính
tên_cái
tên_họ
tên_tự
tênh
tênh_tênh
tìm
tìm_bạn
tìm_cách
tìm_hiểu
tìm_ra
tìm_việc
tình_trạng
tính
tính_cách
tính_căn
tính_người
tính_phỏng
tính_từ
tít_mù
tò_te
tôi
tôi_con
tông_tốc
tù_tì
tăm_tắp
tăng
tăng_chúng
tăng_cấp
tăng_giảm
tăng_thêm
tăng_thế
tại
tại_lòng
tại_nơi
tại_sao
tại_tôi
tại_vì
tại_đâu
tại_đây
tại_đó
tạo
tạo_cơ_hội
tạo_nên
tạo_ra
tạo_ý
tạo_điều_kiện
tấm
tấm_bản
tấm_các
tấn
tấn_tới
tất_cả
tất_cả_bao_nhiêu
tất_thảy
tất_tần_tật
tất_tật
tập_trung
tắp
tắp_lự
tắp_tắp
tọt
tỏ_ra
tỏ_vẻ
tốc_tả
tối_ư
tốt
tốt_bạn
tốt_bộ
tốt_hơn
tốt_mối
tốt_ngày
tột
tột_cùng
tớ
tới
tới_gần
tới_mức
tới_nơi
tới_thì
tức_thì
tức_tốc
từ
từ_căn
từ_giờ
từ_khi
từ_loại
từ_nay
từ_thế
từ_tính
từ_tại
từ_từ
từ_ái
từ_điều
từ_đó
từ_ấy
từng
từng_cái
từng_giờ
từng_nhà
từng_phần
từng_thời_gian
từng_đơn_vị
từng_ấy
tự
tự_cao
tự_khi
tự_lượng
tự_tính
tự_tạo
tự_vì
tự_ý
tự_ăn
tựu_trung
veo
veo_veo
việc
việc_gì
vung_thiên_địa
vung_tàn_tán
vung_tán_tàn
và
vài
vài_ba
vài_người
vài_nhà
vài_nơi
vài_tên
vài_điều
vào
vào_gặp
vào_khoảng
vào_lúc
vào_vùng
vào_đến
vâng
vâng_chịu
vâng_dạ
vâng_vâng
vâng_ý
vèo
vèo_vèo
vì
vì_chưng
vì_rằng
vì_sao
vì_thế
vì_vậy
ví_bằng
ví_dù
ví_phỏng
ví_thử
vô_hình_trung
vô_kể
vô_luận
vô_vàn
vùng
vùng_lên
vùng_nước
văng_tê
vượt
vượt_khỏi
vượt_quá
vạn_nhất
vả_chăng
vả_lại
vấn_đề
vấn_đề_quan_trọng
vẫn
vẫn_thế
vậy
vậy_là
vậy_mà
vậy_nên
vậy_ra
vậy_thì
vậy_ư
về
về_không
về_nước
về_phần
về_sau
về_tay
vị_trí
vị_tất
vốn_dĩ
với
với_lại
với_nhau
vở
vụt
vừa
vừa_khi
vừa_lúc
vừa_mới
vừa_qua
vừa_rồi
vừa_vừa
xa
xa_cách
xa_gần
xa_nhà
xa_tanh
xa_tắp
xa_xa
xa_xả
xem
xem_lại
xem_ra
xem_số
xin
xin_gặp
xin_vâng
xiết_bao
xon_xón
xoành_xoạch
xoét
xoẳn
xoẹt
xuất_hiện
xuất_kì_bất_ý
xuất_kỳ_bất_ý
xuể
xuống
xăm_xúi
xăm_xăm
xăm_xắm
xảy_ra
xềnh_xệch
xệp
xử_lý
yêu_cầu
à
à_này
à_ơi
ào
ào_vào
ào_ào
á
á_à
ái
ái_chà
ái_dà
áng
áng_như
âu_là
ít
ít_biết
ít_có
ít_hơn
ít_khi
ít_lâu
ít_nhiều
ít_nhất
ít_nữa
ít_quá
ít_ra
ít_thôi
ít_thấy
ô_hay
ô_hô
ô_kê
ô_kìa
ôi_chao
ôi_thôi
ông
ông_nhỏ
ông_tạo
ông_từ
ông_ấy
ông_ổng
úi
úi_chà
úi_dào
ý
ý_chừng
ý_da
ý_hoặc
ăn
ăn_chung
ăn_chắc
ăn_chịu
ăn_cuộc
ăn_hết
ăn_hỏi
ăn_làm
ăn_người
ăn_ngồi
ăn_quá
ăn_riêng
ăn_sáng
ăn_tay
ăn_trên
ăn_về
đang
đang_tay
đang_thì
điều
điều_gì
điều_kiện
điểm
điểm_chính
điểm_gặp
điểm_đầu_tiên
đành_đạch
đáng
đáng_kể
đáng_lí
đáng_lý
đáng_lẽ
đáng_số
đánh_giá
đánh_đùng
đáo_để
đâu
đâu_có
đâu_cũng
đâu_như
đâu_nào
đâu_phải
đâu_đâu
đâu_đây
đâu_đó
đây
đây_này
đây_rồi
đây_đó
đã
đã_hay
đã_không
đã_là
đã_lâu
đã_thế
đã_vậy
đã_đủ
đó
đó_đây
đúng
đúng_ngày
đúng_ra
đúng_tuổi
đúng_với
đơn_vị
đưa
đưa_cho
đưa_chuyện
đưa_em
đưa_ra
đưa_tay
đưa_tin
đưa_tới
đưa_vào
đưa_về
đưa_xuống
đưa_đến
được
được_cái
được_lời
được_nước
được_tin
đại_loại
đại_nhân
đại_phàm
đại_để
đạt
đảm_bảo
đầu_tiên
đầy
đầy_năm
đầy_phè
đầy_tuổi
đặc_biệt
đặt
đặt_làm
đặt_mình
đặt_mức
đặt_ra
đặt_trước
đặt_để
đến
đến_bao_giờ
đến_cùng
đến_cùng_cực
đến_cả
đến_giờ
đến_gần
đến_hay
đến_khi
đến_lúc
đến_lời
đến_nay
đến_ngày
đến_nơi
đến_nỗi
đến_thì
đến_thế
đến_tuổi
đến_xem
đến_điều
đến_đâu
đều
đều_bước
đều_nhau
đều_đều
để
để_cho
để_giống
để_không
để_lòng
để_lại
để_mà
để_phần
để_được
để_đến_nỗi
đối_với
đồng_thời
đủ
đủ_dùng
đủ_nơi
đủ_số
đủ_điều
đủ_điểm
ơ
ơ_hay
ơ_kìa
ơi
ơi_là
ư
ạ
ạ_ơi
ấy
ấy_là
ầu_ơ
ắt
ắt_hẳn
ắt_là
ắt_phải
ắt_thật
ối_dào
ối_giời
ối_giời_ơi
ồ
ồ_ồ
ổng
ớ
ớ_này
ờ
ờ_ờ
ở
ở_lại
ở_như
ở_nhờ
ở_năm
ở_trên
ở_vào
ở_đây
ở_đó
ở_được
ủa
ứ_hự
ứ_ừ
ừ
ừ_nhé
ừ_thì
ừ_ào
ừ_ừ
ử
""".split(
"\n"
)
)
| 15,387 | 6.891282 | 59 | py |
spaCy | spaCy-master/spacy/lang/xx/__init__.py | from ...language import Language
class MultiLanguage(Language):
"""Language class to be used for models that support multiple languages.
This module allows models to specify their language ID as 'xx'.
"""
lang = "xx"
__all__ = ["MultiLanguage"]
| 266 | 19.538462 | 76 | py |
spaCy | spaCy-master/spacy/lang/xx/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.de.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
# combined examples from de/en/es/fr/it/nl/pl/pt/ru
sentences = [
"Die ganze Stadt ist ein Startup: Shenzhen ist das Silicon Valley für Hardware-Firmen",
"Wie deutsche Startups die Technologie vorantreiben wollen: Künstliche Intelligenz",
"Trend zum Urlaub in Deutschland beschert Gastwirten mehr Umsatz",
"Bundesanwaltschaft erhebt Anklage gegen mutmaßlichen Schweizer Spion",
"San Francisco erwägt Verbot von Lieferrobotern",
"Autonome Fahrzeuge verlagern Haftpflicht auf Hersteller",
"Wo bist du?",
"Was ist die Hauptstadt von Deutschland?",
"Apple is looking at buying U.K. startup for $1 billion",
"Autonomous cars shift insurance liability toward manufacturers",
"San Francisco considers banning sidewalk delivery robots",
"London is a big city in the United Kingdom.",
"Where are you?",
"Who is the president of France?",
"What is the capital of the United States?",
"When was Barack Obama born?",
"Apple está buscando comprar una startup del Reino Unido por mil millones de dólares.",
"Los coches autónomos delegan la responsabilidad del seguro en sus fabricantes.",
"San Francisco analiza prohibir los robots delivery.",
"Londres es una gran ciudad del Reino Unido.",
"El gato come pescado.",
"Veo al hombre con el telescopio.",
"La araña come moscas.",
"El pingüino incuba en su nido.",
"Apple cherche à acheter une start-up anglaise pour 1 milliard de dollars",
"Les voitures autonomes déplacent la responsabilité de l'assurance vers les constructeurs",
"San Francisco envisage d'interdire les robots coursiers sur les trottoirs",
"Londres est une grande ville du Royaume-Uni",
"L’Italie choisit ArcelorMittal pour reprendre la plus grande aciérie d’Europe",
"Apple lance HomePod parce qu'il se sent menacé par l'Echo d'Amazon",
"La France ne devrait pas manquer d'électricité cet été, même en cas de canicule",
"Nouvelles attaques de Trump contre le maire de Londres",
"Où es-tu ?",
"Qui est le président de la France ?",
"Où est la capitale des États-Unis ?",
"Quand est né Barack Obama ?",
"Apple vuole comprare una startup del Regno Unito per un miliardo di dollari",
"Le automobili a guida autonoma spostano la responsabilità assicurativa verso i produttori",
"San Francisco prevede di bandire i robot di consegna porta a porta",
"Londra è una grande città del Regno Unito.",
"Apple overweegt om voor 1 miljard een U.K. startup te kopen",
"Autonome auto's verschuiven de verzekeringverantwoordelijkheid naar producenten",
"San Francisco overweegt robots op voetpaden te verbieden",
"Londen is een grote stad in het Verenigd Koninkrijk",
"Poczuł przyjemną woń mocnej kawy.",
"Istnieje wiele dróg oddziaływania substancji psychoaktywnej na układ nerwowy.",
"Powitał mnie biało-czarny kot, płosząc siedzące na płocie trzy dorodne dudki.",
"Nowy abonament pod lupą Komisji Europejskiej",
"Czy w ciągu ostatnich 48 godzin spożyłeś leki zawierające paracetamol?",
"Kto ma ochotę zapoznać się z innymi niż w książkach przygodami Muminków i ich przyjaciół, temu polecam komiks Tove Jansson „Muminki i morze”.",
"Apple está querendo comprar uma startup do Reino Unido por 100 milhões de dólares.",
"Carros autônomos empurram a responsabilidade do seguro para os fabricantes..",
"São Francisco considera banir os robôs de entrega que andam pelas calçadas.",
"Londres é a maior cidade do Reino Unido.",
# Translations from English:
"Apple рассматривает возможность покупки стартапа из Соединённого Королевства за $1 млрд",
"Беспилотные автомобили перекладывают страховую ответственность на производителя",
"В Сан-Франциско рассматривается возможность запрета роботов-курьеров, которые перемещаются по тротуару",
"Лондон — это большой город в Соединённом Королевстве",
# Native Russian sentences:
# Colloquial:
"Да, нет, наверное!", # Typical polite refusal
"Обратите внимание на необыкновенную красоту этого города-героя Москвы, столицы нашей Родины!", # From a tour guide speech
# Examples of Bookish Russian:
# Quote from "The Golden Calf"
"Рио-де-Жанейро — это моя мечта, и не смейте касаться её своими грязными лапами!",
# Quotes from "Ivan Vasilievich changes his occupation"
"Ты пошто боярыню обидел, смерд?!!",
"Оставь меня, старушка, я в печали!",
# Quotes from Dostoevsky:
"Уж коли я, такой же, как и ты, человек грешный, над тобой умилился и пожалел тебя, кольми паче бог",
"В мечтах я нередко, говорит, доходил до страстных помыслов о служении человечеству и может быть действительно пошел бы на крест за людей, если б это вдруг как-нибудь потребовалось, а между тем я двух дней не в состоянии прожить ни с кем в одной комнате, о чем знаю из опыта",
"Зато всегда так происходило, что чем более я ненавидел людей в частности, тем пламеннее становилась любовь моя к человечеству вообще",
# Quotes from Chekhov:
"Ненужные дела и разговоры всё об одном отхватывают на свою долю лучшую часть времени, лучшие силы, и в конце концов остается какая-то куцая, бескрылая жизнь, какая-то чепуха, и уйти и бежать нельзя, точно сидишь в сумасшедшем доме или в арестантских ротах!",
# Quotes from Turgenev:
"Нравится тебе женщина, старайся добиться толку; а нельзя — ну, не надо, отвернись — земля не клином сошлась",
"Узенькое местечко, которое я занимаю, до того крохотно в сравнении с остальным пространством, где меня нет и где дела до меня нет; и часть времени, которую мне удастся прожить, так ничтожна перед вечностью, где меня не было и не будет...",
# Quotes from newspapers:
# Komsomolskaya Pravda:
"На заседании президиума правительства Москвы принято решение присвоить статус инвестиционного приоритетного проекта города Москвы киностудии Союзмультфильм",
"Глава Минобороны Сергей Шойгу заявил, что обстановка на этом стратегическом направлении требует непрерывного совершенствования боевого состава войск",
# Argumenty i Facty:
"На реплику лже-Говина — дескать, он (Волков) будет лучшим революционером — Стамп с энтузиазмом ответил: Непременно!",
]
| 6,350 | 65.15625 | 280 | py |
spaCy | spaCy-master/spacy/lang/yo/__init__.py | from ...language import BaseDefaults, Language
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
class YorubaDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Yoruba(Language):
lang = "yo"
Defaults = YorubaDefaults
__all__ = ["Yoruba"]
| 309 | 17.235294 | 46 | py |
spaCy | spaCy-master/spacy/lang/yo/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.yo.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
# 1. https://yo.wikipedia.org/wiki/Wikipedia:%C3%80y%E1%BB%8Dk%C3%A0_p%C3%A0t%C3%A0k%C3%AC
# 2.https://yo.wikipedia.org/wiki/Oj%C3%BAew%C3%A9_%C3%80k%E1%BB%8D%CC%81k%E1%BB%8D%CC%81
# 3. https://www.bbc.com/yoruba
sentences = [
"Ìjọba Tanzania fi Ajìjàgbara Ọmọ Orílẹ̀-èdèe Uganda sí àtìmọ́lé",
"Olúṣẹ́gun Ọbásanjọ́, tí ó jẹ́ Ààrẹ ìjọba ológun àná (láti ọdún 1976 sí 1979), tí ó sì tún ṣe Ààrẹ ìjọba alágbádá tí ìbò gbé wọlé (ní ọdún 1999 sí 2007), kúndùn láti máa bu ẹnu àtẹ́ lu àwọn "
"ètò ìjọba Ààrẹ orílẹ̀-èdè Nàìjíríà tí ó jẹ tẹ̀lé e.",
"Akin Alabi rán ẹnu mọ́ agbárá Adárí Òsìsẹ̀, àwọn ọmọ Nàìjíríà dẹnu bò ó",
"Ta ló leè dúró s'ẹ́gbẹ̀ẹ́ Okunnu láì rẹ́rìín?",
"Dídarapọ̀ mọ́n ìpolongo",
"Bi a se n so, omobinrin ni oruko ni ojo kejo bee naa ni omokunrin ni oruko ni ojo kesan.",
"Oríṣìíríṣìí nǹkan ló le yọrí sí orúkọ tí a sọ ọmọ",
"Gbogbo won ni won ni oriki ti won",
]
| 1,061 | 45.173913 | 196 | py |
spaCy | spaCy-master/spacy/lang/yo/lex_attrs.py | import unicodedata
from ...attrs import LIKE_NUM
_num_words = [
"ení",
"oókàn",
"ọ̀kanlá",
"ẹ́ẹdọ́gbọ̀n",
"àádọ́fà",
"ẹ̀walélúɡba",
"egbèje",
"ẹgbàárin",
"èjì",
"eéjì",
"èjìlá",
"ọgbọ̀n,",
"ọgọ́fà",
"ọ̀ọ́dúrún",
"ẹgbẹ̀jọ",
"ẹ̀ẹ́dẹ́ɡbàárùn",
"ẹ̀ta",
"ẹẹ́ta",
"ẹ̀talá",
"aárùndílogójì",
"àádóje",
"irinwó",
"ẹgbẹ̀sàn",
"ẹgbàárùn",
"ẹ̀rin",
"ẹẹ́rin",
"ẹ̀rinlá",
"ogójì",
"ogóje",
"ẹ̀ẹ́dẹ́gbẹ̀ta",
"ẹgbàá",
"ẹgbàájọ",
"àrún",
"aárùn",
"ẹ́ẹdógún",
"àádọ́ta",
"àádọ́jọ",
"ẹgbẹ̀ta",
"ẹgboókànlá",
"ẹgbàawǎ",
"ẹ̀fà",
"ẹẹ́fà",
"ẹẹ́rìndílógún",
"ọgọ́ta",
"ọgọ́jọ",
"ọ̀ọ́dẹ́gbẹ̀rin",
"ẹgbẹ́ẹdógún",
"ọkẹ́marun",
"èje",
"etàdílógún",
"àádọ́rin",
"àádọ́sán",
"ẹgbẹ̀rin",
"ẹgbàajì",
"ẹgbẹ̀ẹgbẹ̀rún",
"ẹ̀jọ",
"ẹẹ́jọ",
"eéjìdílógún",
"ọgọ́rin",
"ọgọsàn",
"ẹ̀ẹ́dẹ́gbẹ̀rún",
"ẹgbẹ́ẹdọ́gbọ̀n",
"ọgọ́rùn ọkẹ́",
"ẹ̀sán",
"ẹẹ́sàn",
"oókàndílógún",
"àádọ́rùn",
"ẹ̀wadilúɡba",
"ẹgbẹ̀rún",
"ẹgbàáta",
"ẹ̀wá",
"ẹẹ́wàá",
"ogún",
"ọgọ́rùn",
"igba",
"ẹgbẹ̀fà",
"ẹ̀ẹ́dẹ́ɡbarin",
]
def strip_accents_text(text):
"""
Converts the string to NFD, separates & returns only the base characters
:param text:
:return: input string without diacritic adornments on base characters
"""
return "".join(
c for c in unicodedata.normalize("NFD", text) if unicodedata.category(c) != "Mn"
)
def like_num(text):
text = text.replace(",", "").replace(".", "")
num_markers = ["dí", "dọ", "lé", "dín", "di", "din", "le", "do"]
if any(mark in text for mark in num_markers):
return True
text = strip_accents_text(text)
_num_words_stripped = [strip_accents_text(num) for num in _num_words]
if text.isdigit():
return True
if text in _num_words_stripped or text.lower() in _num_words_stripped:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 2,103 | 17.785714 | 88 | py |
spaCy | spaCy-master/spacy/lang/yo/stop_words.py | # stop words as whitespace-separated list.
# Source: https://raw.githubusercontent.com/dohliam/more-stoplists/master/yo/yo.txt
STOP_WORDS = set(
"a an b bá bí bẹ̀rẹ̀ d e f fún fẹ́ g gbogbo i inú j jù jẹ jẹ́ k kan kì kí kò "
"l láti lè lọ m mi mo máa mọ̀ n ni náà ní nígbà nítorí nǹkan o p padà pé "
"púpọ̀ pẹ̀lú r rẹ̀ s sì sí sínú t ti tí u w wà wá wọn wọ́n y yìí à àti àwọn á "
"è é ì í ò òun ó ù ú ń ńlá ǹ ̀ ́ ̣ ṣ ṣe ṣé ṣùgbọ́n ẹ ẹmọ́ ọ ọjọ́ ọ̀pọ̀lọpọ̀".split()
)
| 483 | 47.4 | 88 | py |
spaCy | spaCy-master/spacy/lang/zh/__init__.py | import tempfile
import warnings
from enum import Enum
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional
import srsly
from ... import util
from ...errors import Errors, Warnings
from ...language import BaseDefaults, Language
from ...scorer import Scorer
from ...tokens import Doc
from ...training import Example, validate_examples
from ...util import DummyTokenizer, load_config_from_str, registry
from ...vocab import Vocab
from .lex_attrs import LEX_ATTRS
from .stop_words import STOP_WORDS
# fmt: off
_PKUSEG_INSTALL_MSG = "install spacy-pkuseg with `pip install \"spacy-pkuseg>=0.0.27,<0.1.0\"` or `conda install -c conda-forge \"spacy-pkuseg>=0.0.27,<0.1.0\"`"
# fmt: on
DEFAULT_CONFIG = """
[nlp]
[nlp.tokenizer]
@tokenizers = "spacy.zh.ChineseTokenizer"
segmenter = "char"
[initialize]
[initialize.tokenizer]
pkuseg_model = null
pkuseg_user_dict = "default"
"""
class Segmenter(str, Enum):
char = "char"
jieba = "jieba"
pkuseg = "pkuseg"
@classmethod
def values(cls):
return list(cls.__members__.keys())
@registry.tokenizers("spacy.zh.ChineseTokenizer")
def create_chinese_tokenizer(segmenter: Segmenter = Segmenter.char):
def chinese_tokenizer_factory(nlp):
return ChineseTokenizer(nlp.vocab, segmenter=segmenter)
return chinese_tokenizer_factory
class ChineseTokenizer(DummyTokenizer):
def __init__(self, vocab: Vocab, segmenter: Segmenter = Segmenter.char):
self.vocab = vocab
self.segmenter = (
segmenter.value if isinstance(segmenter, Segmenter) else segmenter
)
self.pkuseg_seg = None
self.jieba_seg = None
if self.segmenter not in Segmenter.values():
warn_msg = Warnings.W103.format(
lang="Chinese",
segmenter=self.segmenter,
supported=", ".join(Segmenter.values()),
default="'char' (character segmentation)",
)
warnings.warn(warn_msg)
self.segmenter = Segmenter.char
if self.segmenter == Segmenter.jieba:
self.jieba_seg = try_jieba_import()
def initialize(
self,
get_examples: Optional[Callable[[], Iterable[Example]]] = None,
*,
nlp: Optional[Language] = None,
pkuseg_model: Optional[str] = None,
pkuseg_user_dict: Optional[str] = "default",
):
if self.segmenter == Segmenter.pkuseg:
if pkuseg_user_dict is None:
pkuseg_user_dict = pkuseg_model
self.pkuseg_seg = try_pkuseg_import(
pkuseg_model=pkuseg_model, pkuseg_user_dict=pkuseg_user_dict
)
def __call__(self, text: str) -> Doc:
if self.segmenter == Segmenter.jieba:
words = list([x for x in self.jieba_seg.cut(text, cut_all=False) if x]) # type: ignore[union-attr]
(words, spaces) = util.get_words_and_spaces(words, text)
return Doc(self.vocab, words=words, spaces=spaces)
elif self.segmenter == Segmenter.pkuseg:
if self.pkuseg_seg is None:
raise ValueError(Errors.E1000)
words = self.pkuseg_seg.cut(text)
(words, spaces) = util.get_words_and_spaces(words, text)
return Doc(self.vocab, words=words, spaces=spaces)
# warn if segmenter setting is not the only remaining option "char"
if self.segmenter != Segmenter.char:
warn_msg = Warnings.W103.format(
lang="Chinese",
segmenter=self.segmenter,
supported=", ".join(Segmenter.values()),
default="'char' (character segmentation)",
)
warnings.warn(warn_msg)
# split into individual characters
words = list(text)
(words, spaces) = util.get_words_and_spaces(words, text)
return Doc(self.vocab, words=words, spaces=spaces)
def pkuseg_update_user_dict(self, words: List[str], reset: bool = False):
if self.segmenter == Segmenter.pkuseg:
if reset:
try:
import spacy_pkuseg
self.pkuseg_seg.preprocesser = spacy_pkuseg.Preprocesser(None) # type: ignore[attr-defined]
except ImportError:
msg = (
"spacy_pkuseg not installed: unable to reset pkuseg "
"user dict. Please " + _PKUSEG_INSTALL_MSG
)
raise ImportError(msg) from None
for word in words:
self.pkuseg_seg.preprocesser.insert(word.strip(), "") # type: ignore[attr-defined]
else:
warn_msg = Warnings.W104.format(target="pkuseg", current=self.segmenter)
warnings.warn(warn_msg)
def score(self, examples):
validate_examples(examples, "ChineseTokenizer.score")
return Scorer.score_tokenization(examples)
def _get_config(self) -> Dict[str, Any]:
return {
"segmenter": self.segmenter,
}
def _set_config(self, config: Dict[str, Any] = {}) -> None:
self.segmenter = config.get("segmenter", Segmenter.char)
def to_bytes(self, **kwargs):
pkuseg_features_b = b""
pkuseg_weights_b = b""
pkuseg_processors_data = None
if self.pkuseg_seg:
with tempfile.TemporaryDirectory() as tempdir:
self.pkuseg_seg.feature_extractor.save(tempdir)
self.pkuseg_seg.model.save(tempdir)
tempdir = Path(tempdir)
with open(tempdir / "features.msgpack", "rb") as fileh:
pkuseg_features_b = fileh.read()
with open(tempdir / "weights.npz", "rb") as fileh:
pkuseg_weights_b = fileh.read()
pkuseg_processors_data = (
_get_pkuseg_trie_data(self.pkuseg_seg.preprocesser.trie),
self.pkuseg_seg.postprocesser.do_process,
sorted(list(self.pkuseg_seg.postprocesser.common_words)),
sorted(list(self.pkuseg_seg.postprocesser.other_words)),
)
serializers = {
"cfg": lambda: srsly.json_dumps(self._get_config()),
"pkuseg_features": lambda: pkuseg_features_b,
"pkuseg_weights": lambda: pkuseg_weights_b,
"pkuseg_processors": lambda: srsly.msgpack_dumps(pkuseg_processors_data),
}
return util.to_bytes(serializers, [])
def from_bytes(self, data, **kwargs):
pkuseg_data = {"features_b": b"", "weights_b": b"", "processors_data": None}
def deserialize_pkuseg_features(b):
pkuseg_data["features_b"] = b
def deserialize_pkuseg_weights(b):
pkuseg_data["weights_b"] = b
def deserialize_pkuseg_processors(b):
pkuseg_data["processors_data"] = srsly.msgpack_loads(b)
deserializers = {
"cfg": lambda b: self._set_config(srsly.json_loads(b)),
"pkuseg_features": deserialize_pkuseg_features,
"pkuseg_weights": deserialize_pkuseg_weights,
"pkuseg_processors": deserialize_pkuseg_processors,
}
util.from_bytes(data, deserializers, [])
if pkuseg_data["features_b"] and pkuseg_data["weights_b"]:
with tempfile.TemporaryDirectory() as tempdir:
tempdir = Path(tempdir)
with open(tempdir / "features.msgpack", "wb") as fileh:
fileh.write(pkuseg_data["features_b"])
with open(tempdir / "weights.npz", "wb") as fileh:
fileh.write(pkuseg_data["weights_b"])
try:
import spacy_pkuseg
except ImportError:
raise ImportError(
"spacy-pkuseg not installed. To use this model, "
+ _PKUSEG_INSTALL_MSG
) from None
self.pkuseg_seg = spacy_pkuseg.pkuseg(str(tempdir))
if pkuseg_data["processors_data"]:
processors_data = pkuseg_data["processors_data"]
(user_dict, do_process, common_words, other_words) = processors_data
self.pkuseg_seg.preprocesser = spacy_pkuseg.Preprocesser(user_dict)
self.pkuseg_seg.postprocesser.do_process = do_process
self.pkuseg_seg.postprocesser.common_words = set(common_words)
self.pkuseg_seg.postprocesser.other_words = set(other_words)
return self
def to_disk(self, path, **kwargs):
path = util.ensure_path(path)
def save_pkuseg_model(path):
if self.pkuseg_seg:
if not path.exists():
path.mkdir(parents=True)
self.pkuseg_seg.model.save(path)
self.pkuseg_seg.feature_extractor.save(path)
def save_pkuseg_processors(path):
if self.pkuseg_seg:
data = (
_get_pkuseg_trie_data(self.pkuseg_seg.preprocesser.trie),
self.pkuseg_seg.postprocesser.do_process,
sorted(list(self.pkuseg_seg.postprocesser.common_words)),
sorted(list(self.pkuseg_seg.postprocesser.other_words)),
)
srsly.write_msgpack(path, data)
serializers = {
"cfg": lambda p: srsly.write_json(p, self._get_config()),
"pkuseg_model": lambda p: save_pkuseg_model(p),
"pkuseg_processors": lambda p: save_pkuseg_processors(p),
}
return util.to_disk(path, serializers, [])
def from_disk(self, path, **kwargs):
path = util.ensure_path(path)
def load_pkuseg_model(path):
try:
import spacy_pkuseg
except ImportError:
if self.segmenter == Segmenter.pkuseg:
raise ImportError(
"spacy-pkuseg not installed. To use this model, "
+ _PKUSEG_INSTALL_MSG
) from None
if path.exists():
self.pkuseg_seg = spacy_pkuseg.pkuseg(path)
def load_pkuseg_processors(path):
try:
import spacy_pkuseg
except ImportError:
if self.segmenter == Segmenter.pkuseg:
raise ImportError(self._pkuseg_install_msg) from None
if self.segmenter == Segmenter.pkuseg:
data = srsly.read_msgpack(path)
(user_dict, do_process, common_words, other_words) = data
self.pkuseg_seg.preprocesser = spacy_pkuseg.Preprocesser(user_dict)
self.pkuseg_seg.postprocesser.do_process = do_process
self.pkuseg_seg.postprocesser.common_words = set(common_words)
self.pkuseg_seg.postprocesser.other_words = set(other_words)
serializers = {
"cfg": lambda p: self._set_config(srsly.read_json(p)),
"pkuseg_model": lambda p: load_pkuseg_model(p),
"pkuseg_processors": lambda p: load_pkuseg_processors(p),
}
util.from_disk(path, serializers, [])
class ChineseDefaults(BaseDefaults):
config = load_config_from_str(DEFAULT_CONFIG)
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
writing_system = {"direction": "ltr", "has_case": False, "has_letters": False}
class Chinese(Language):
lang = "zh"
Defaults = ChineseDefaults
def try_jieba_import():
try:
import jieba
# segment a short text to have jieba initialize its cache in advance
list(jieba.cut("作为", cut_all=False))
return jieba
except ImportError:
msg = (
"Jieba not installed. To use jieba, install it with `pip "
" install jieba` or from https://github.com/fxsjy/jieba"
)
raise ImportError(msg) from None
def try_pkuseg_import(pkuseg_model: Optional[str], pkuseg_user_dict: Optional[str]):
try:
import spacy_pkuseg
except ImportError:
msg = "spacy-pkuseg not installed. To use pkuseg, " + _PKUSEG_INSTALL_MSG
raise ImportError(msg) from None
try:
return spacy_pkuseg.pkuseg(pkuseg_model, user_dict=pkuseg_user_dict)
except FileNotFoundError:
msg = "Unable to load pkuseg model from: " + str(pkuseg_model or "")
raise FileNotFoundError(msg) from None
def _get_pkuseg_trie_data(node, path=""):
data = []
for c, child_node in sorted(node.children.items()):
data.extend(_get_pkuseg_trie_data(child_node, path + c))
if node.isword:
data.append((path, node.usertag))
return data
__all__ = ["Chinese"]
| 12,734 | 36.677515 | 161 | py |
spaCy | spaCy-master/spacy/lang/zh/examples.py | """
Example sentences to test spaCy and its language models.
>>> from spacy.lang.zh.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
# from https://zh.wikipedia.org/wiki/汉语
sentences = [
"作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。",
"汉语有多种分支,当中官话最为流行,为中华人民共和国的国家通用语言(又称为普通话)、以及中华民国的国语。",
"此外,中文还是联合国正式语文,并被上海合作组织等国际组织采用为官方语言。",
"在中国大陆,汉语通称为“汉语”。",
"在联合国、台湾、香港及澳门,通称为“中文”。",
"在新加坡及马来西亚,通称为“华语”。",
]
| 430 | 24.352941 | 58 | py |
spaCy | spaCy-master/spacy/lang/zh/lex_attrs.py | import re
from ...attrs import LIKE_NUM
_single_num_words = [
"〇",
"一",
"二",
"三",
"四",
"五",
"六",
"七",
"八",
"九",
"十",
"十一",
"十二",
"十三",
"十四",
"十五",
"十六",
"十七",
"十八",
"十九",
"廿",
"卅",
"卌",
"皕",
"零",
"壹",
"贰",
"叁",
"肆",
"伍",
"陆",
"柒",
"捌",
"玖",
"拾",
"拾壹",
"拾贰",
"拾叁",
"拾肆",
"拾伍",
"拾陆",
"拾柒",
"拾捌",
"拾玖",
]
_count_num_words = [
"一",
"二",
"三",
"四",
"五",
"六",
"七",
"八",
"九",
"壹",
"贰",
"叁",
"肆",
"伍",
"陆",
"柒",
"捌",
"玖",
]
_base_num_words = ["十", "百", "千", "万", "亿", "兆", "拾", "佰", "仟"]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "").replace(",", "").replace("。", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text in _single_num_words:
return True
# fmt: off
if re.match('^((' + '|'.join(_count_num_words) + '){1}'
+ '(' + '|'.join(_base_num_words) + '){1})+'
+ '(' + '|'.join(_count_num_words) + ')?$', text):
return True
# fmt: on
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| 1,429 | 13.591837 | 83 | py |
spaCy | spaCy-master/spacy/lang/zh/stop_words.py | # stop words as whitespace-separated list
# Chinese stop words,maybe not enough
STOP_WORDS = set(
"""
!
"
#
$
%
&
'
(
)
*
+
,
-
--
.
..
...
......
...................
./
.一
.数
.日
/
//
0
1
2
3
4
5
6
7
8
9
:
://
::
;
<
=
>
>>
?
@
A
Lex
[
\
]
^
_
`
exp
sub
sup
|
}
~
~~~~
·
×
×××
Δ
Ψ
γ
μ
φ
φ.
В
—
——
———
‘
’
’‘
“
”
”,
…
……
…………………………………………………③
′∈
′|
℃
Ⅲ
↑
→
∈[
∪φ∈
≈
①
②
②c
③
③]
④
⑤
⑥
⑦
⑧
⑨
⑩
──
■
▲
、
。
〈
〉
《
》
》),
」
『
』
【
】
〔
〕
〕〔
㈧
一
一.
一一
一下
一个
一些
一何
一切
一则
一则通过
一天
一定
一方面
一旦
一时
一来
一样
一次
一片
一番
一直
一致
一般
一起
一转眼
一边
一面
七
万一
三
三天两头
三番两次
三番五次
上
上下
上升
上去
上来
上述
上面
下
下列
下去
下来
下面
不
不一
不下
不久
不了
不亦乐乎
不仅
不仅...而且
不仅仅
不仅仅是
不会
不但
不但...而且
不光
不免
不再
不力
不单
不变
不只
不可
不可开交
不可抗拒
不同
不外
不外乎
不够
不大
不如
不妨
不定
不对
不少
不尽
不尽然
不巧
不已
不常
不得
不得不
不得了
不得已
不必
不怎么
不怕
不惟
不成
不拘
不择手段
不敢
不料
不断
不日
不时
不是
不曾
不止
不止一次
不比
不消
不满
不然
不然的话
不特
不独
不由得
不知不觉
不管
不管怎样
不经意
不胜
不能
不能不
不至于
不若
不要
不论
不起
不足
不过
不迭
不问
不限
与
与其
与其说
与否
与此同时
专门
且
且不说
且说
两者
严格
严重
个
个人
个别
中小
中间
丰富
串行
临
临到
为
为主
为了
为什么
为什麽
为何
为止
为此
为着
主张
主要
举凡
举行
乃
乃至
乃至于
么
之
之一
之前
之后
之後
之所以
之类
乌乎
乎
乒
乘
乘势
乘机
乘胜
乘虚
乘隙
九
也
也好
也就是说
也是
也罢
了
了解
争取
二
二来
二话不说
二话没说
于
于是
于是乎
云云
云尔
互
互相
五
些
交口
亦
产生
亲口
亲手
亲眼
亲自
亲身
人
人人
人们
人家
人民
什么
什么样
什麽
仅
仅仅
今
今后
今天
今年
今後
介于
仍
仍旧
仍然
从
从不
从严
从中
从事
从今以后
从优
从古到今
从古至今
从头
从宽
从小
从新
从无到有
从早到晚
从未
从来
从此
从此以后
从而
从轻
从速
从重
他
他人
他们
他是
他的
代替
以
以上
以下
以为
以便
以免
以前
以及
以后
以外
以後
以故
以期
以来
以至
以至于
以致
们
任
任何
任凭
任务
企图
伙同
会
伟大
传
传说
传闻
似乎
似的
但
但凡
但愿
但是
何
何乐而不为
何以
何况
何处
何妨
何尝
何必
何时
何止
何苦
何须
余外
作为
你
你们
你是
你的
使
使得
使用
例如
依
依据
依照
依靠
便
便于
促进
保持
保管
保险
俺
俺们
倍加
倍感
倒不如
倒不如说
倒是
倘
倘使
倘或
倘然
倘若
借
借以
借此
假使
假如
假若
偏偏
做到
偶尔
偶而
傥然
像
儿
允许
元/吨
充其极
充其量
充分
先不先
先后
先後
先生
光
光是
全体
全力
全年
全然
全身心
全部
全都
全面
八
八成
公然
六
兮
共
共同
共总
关于
其
其一
其中
其二
其他
其余
其后
其它
其实
其次
具体
具体地说
具体来说
具体说来
具有
兼之
内
再
再其次
再则
再有
再次
再者
再者说
再说
冒
冲
决不
决定
决非
况且
准备
凑巧
凝神
几
几乎
几度
几时
几番
几经
凡
凡是
凭
凭借
出
出于
出去
出来
出现
分别
分头
分期
分期分批
切
切不可
切切
切勿
切莫
则
则甚
刚
刚好
刚巧
刚才
初
别
别人
别处
别是
别的
别管
别说
到
到了儿
到处
到头
到头来
到底
到目前为止
前后
前此
前者
前进
前面
加上
加之
加以
加入
加强
动不动
动辄
勃然
匆匆
十分
千
千万
千万千万
半
单
单单
单纯
即
即令
即使
即便
即刻
即如
即将
即或
即是说
即若
却
却不
历
原来
去
又
又及
及
及其
及时
及至
双方
反之
反之亦然
反之则
反倒
反倒是
反应
反手
反映
反而
反过来
反过来说
取得
取道
受到
变成
古来
另
另一个
另一方面
另外
另悉
另方面
另行
只
只当
只怕
只是
只有
只消
只要
只限
叫
叫做
召开
叮咚
叮当
可
可以
可好
可是
可能
可见
各
各个
各人
各位
各地
各式
各种
各级
各自
合理
同
同一
同时
同样
后
后来
后者
后面
向
向使
向着
吓
吗
否则
吧
吧哒
吱
呀
呃
呆呆地
呐
呕
呗
呜
呜呼
呢
周围
呵
呵呵
呸
呼哧
呼啦
咋
和
咚
咦
咧
咱
咱们
咳
哇
哈
哈哈
哉
哎
哎呀
哎哟
哗
哗啦
哟
哦
哩
哪
哪个
哪些
哪儿
哪天
哪年
哪怕
哪样
哪边
哪里
哼
哼唷
唉
唯有
啊
啊呀
啊哈
啊哟
啐
啥
啦
啪达
啷当
喀
喂
喏
喔唷
喽
嗡
嗡嗡
嗬
嗯
嗳
嘎
嘎嘎
嘎登
嘘
嘛
嘻
嘿
嘿嘿
四
因
因为
因了
因此
因着
因而
固
固然
在
在下
在于
地
均
坚决
坚持
基于
基本
基本上
处在
处处
处理
复杂
多
多么
多亏
多多
多多少少
多多益善
多少
多年前
多年来
多数
多次
够瞧的
大
大不了
大举
大事
大体
大体上
大凡
大力
大多
大多数
大大
大家
大张旗鼓
大批
大抵
大概
大略
大约
大致
大都
大量
大面儿上
失去
奇
奈
奋勇
她
她们
她是
她的
好
好在
好的
好象
如
如上
如上所述
如下
如今
如何
如其
如前所述
如同
如常
如是
如期
如果
如次
如此
如此等等
如若
始而
姑且
存在
存心
孰料
孰知
宁
宁可
宁愿
宁肯
它
它们
它们的
它是
它的
安全
完全
完成
定
实现
实际
宣布
容易
密切
对
对于
对应
对待
对方
对比
将
将才
将要
将近
小
少数
尔
尔后
尔尔
尔等
尚且
尤其
就
就地
就是
就是了
就是说
就此
就算
就要
尽
尽可能
尽如人意
尽心尽力
尽心竭力
尽快
尽早
尽然
尽管
尽管如此
尽量
局外
居然
届时
属于
屡
屡屡
屡次
屡次三番
岂
岂但
岂止
岂非
川流不息
左右
巨大
巩固
差一点
差不多
己
已
已矣
已经
巴
巴巴
带
帮助
常
常常
常言说
常言说得好
常言道
平素
年复一年
并
并不
并不是
并且
并排
并无
并没
并没有
并肩
并非
广大
广泛
应当
应用
应该
庶乎
庶几
开外
开始
开展
引起
弗
弹指之间
强烈
强调
归
归根到底
归根结底
归齐
当
当下
当中
当儿
当前
当即
当口儿
当地
当场
当头
当庭
当时
当然
当真
当着
形成
彻夜
彻底
彼
彼时
彼此
往
往往
待
待到
很
很多
很少
後来
後面
得
得了
得出
得到
得天独厚
得起
心里
必
必定
必将
必然
必要
必须
快
快要
忽地
忽然
怎
怎么
怎么办
怎么样
怎奈
怎样
怎麽
怕
急匆匆
怪
怪不得
总之
总是
总的来看
总的来说
总的说来
总结
总而言之
恍然
恐怕
恰似
恰好
恰如
恰巧
恰恰
恰恰相反
恰逢
您
您们
您是
惟其
惯常
意思
愤然
愿意
慢说
成为
成年
成年累月
成心
我
我们
我是
我的
或
或则
或多或少
或是
或曰
或者
或许
战斗
截然
截至
所
所以
所在
所幸
所有
所谓
才
才能
扑通
打
打从
打开天窗说亮话
扩大
把
抑或
抽冷子
拦腰
拿
按
按时
按期
按照
按理
按说
挨个
挨家挨户
挨次
挨着
挨门挨户
挨门逐户
换句话说
换言之
据
据实
据悉
据我所知
据此
据称
据说
掌握
接下来
接着
接著
接连不断
放量
故
故意
故此
故而
敞开儿
敢
敢于
敢情
数/
整个
断然
方
方便
方才
方能
方面
旁人
无
无宁
无法
无论
既
既...又
既往
既是
既然
日复一日
日渐
日益
日臻
日见
时候
昂然
明显
明确
是
是不是
是以
是否
是的
显然
显著
普通
普遍
暗中
暗地里
暗自
更
更为
更加
更进一步
曾
曾经
替
替代
最
最后
最大
最好
最後
最近
最高
有
有些
有关
有利
有力
有及
有所
有效
有时
有点
有的
有的是
有着
有著
望
朝
朝着
末##末
本
本人
本地
本着
本身
权时
来
来不及
来得及
来看
来着
来自
来讲
来说
极
极为
极了
极其
极力
极大
极度
极端
构成
果然
果真
某
某个
某些
某某
根据
根本
格外
梆
概
次第
欢迎
欤
正值
正在
正如
正巧
正常
正是
此
此中
此后
此地
此处
此外
此时
此次
此间
殆
毋宁
每
每个
每天
每年
每当
每时每刻
每每
每逢
比
比及
比如
比如说
比方
比照
比起
比较
毕竟
毫不
毫无
毫无例外
毫无保留地
汝
沙沙
没
没奈何
没有
沿
沿着
注意
活
深入
清楚
满
满足
漫说
焉
然
然则
然后
然後
然而
照
照着
牢牢
特别是
特殊
特点
犹且
犹自
独
独自
猛然
猛然间
率尔
率然
现代
现在
理应
理当
理该
瑟瑟
甚且
甚么
甚或
甚而
甚至
甚至于
用
用来
甫
甭
由
由于
由是
由此
由此可见
略
略为
略加
略微
白
白白
的
的确
的话
皆可
目前
直到
直接
相似
相信
相反
相同
相对
相对而言
相应
相当
相等
省得
看
看上去
看出
看到
看来
看样子
看看
看见
看起来
真是
真正
眨眼
着
着呢
矣
矣乎
矣哉
知道
砰
确定
碰巧
社会主义
离
种
积极
移动
究竟
穷年累月
突出
突然
窃
立
立刻
立即
立地
立时
立马
竟
竟然
竟而
第
第二
等
等到
等等
策略地
简直
简而言之
简言之
管
类如
粗
精光
紧接着
累年
累次
纯
纯粹
纵
纵令
纵使
纵然
练习
组成
经
经常
经过
结合
结果
给
绝
绝不
绝对
绝非
绝顶
继之
继后
继续
继而
维持
综上所述
缕缕
罢了
老
老大
老是
老老实实
考虑
者
而
而且
而况
而又
而后
而外
而已
而是
而言
而论
联系
联袂
背地里
背靠背
能
能否
能够
腾
自
自个儿
自从
自各儿
自后
自家
自己
自打
自身
臭
至
至于
至今
至若
致
般的
良好
若
若夫
若是
若果
若非
范围
莫
莫不
莫不然
莫如
莫若
莫非
获得
藉以
虽
虽则
虽然
虽说
蛮
行为
行动
表明
表示
被
要
要不
要不是
要不然
要么
要是
要求
见
规定
觉得
譬喻
譬如
认为
认真
认识
让
许多
论
论说
设使
设或
设若
诚如
诚然
话说
该
该当
说明
说来
说说
请勿
诸
诸位
诸如
谁
谁人
谁料
谁知
谨
豁然
贼死
赖以
赶
赶快
赶早不赶晚
起
起先
起初
起头
起来
起见
起首
趁
趁便
趁势
趁早
趁机
趁热
趁着
越是
距
跟
路经
转动
转变
转贴
轰然
较
较为
较之
较比
边
达到
达旦
迄
迅速
过
过于
过去
过来
运用
近
近几年来
近年来
近来
还
还是
还有
还要
这
这一来
这个
这么
这么些
这么样
这么点儿
这些
这会儿
这儿
这就是说
这时
这样
这次
这点
这种
这般
这边
这里
这麽
进入
进去
进来
进步
进而
进行
连
连同
连声
连日
连日来
连袂
连连
迟早
迫于
适应
适当
适用
逐步
逐渐
通常
通过
造成
逢
遇到
遭到
遵循
遵照
避免
那
那个
那么
那么些
那么样
那些
那会儿
那儿
那时
那末
那样
那般
那边
那里
那麽
部分
都
鄙人
采取
里面
重大
重新
重要
鉴于
针对
长期以来
长此下去
长线
长话短说
问题
间或
防止
阿
附近
陈年
限制
陡然
除
除了
除却
除去
除外
除开
除此
除此之外
除此以外
除此而外
除非
随
随后
随时
随着
随著
隔夜
隔日
难得
难怪
难说
难道
难道说
集中
零
需要
非但
非常
非徒
非得
非特
非独
靠
顶多
顷
顷刻
顷刻之间
顷刻间
顺
顺着
顿时
颇
风雨无阻
饱
首先
马上
高低
高兴
默然
默默地
齐
︿
!
#
$
%
&
'
(
)
)÷(1-
)、
*
+
+ξ
++
,
,也
-
-β
--
-[*]-
.
/
0
0:2
1
1.
12%
2
2.3%
3
4
5
5:0
6
7
8
9
:
;
<
<±
<Δ
<λ
<φ
<<
=
=″
=☆
=(
=-
=[
={
>
>λ
?
@
A
LI
R.L.
ZXFITL
[
[①①]
[①②]
[①③]
[①④]
[①⑤]
[①⑥]
[①⑦]
[①⑧]
[①⑨]
[①A]
[①B]
[①C]
[①D]
[①E]
[①]
[①a]
[①c]
[①d]
[①e]
[①f]
[①g]
[①h]
[①i]
[①o]
[②
[②①]
[②②]
[②③]
[②④
[②⑤]
[②⑥]
[②⑦]
[②⑧]
[②⑩]
[②B]
[②G]
[②]
[②a]
[②b]
[②c]
[②d]
[②e]
[②f]
[②g]
[②h]
[②i]
[②j]
[③①]
[③⑩]
[③F]
[③]
[③a]
[③b]
[③c]
[③d]
[③e]
[③g]
[③h]
[④]
[④a]
[④b]
[④c]
[④d]
[④e]
[⑤]
[⑤]]
[⑤a]
[⑤b]
[⑤d]
[⑤e]
[⑤f]
[⑥]
[⑦]
[⑧]
[⑨]
[⑩]
[*]
[-
[]
]
]∧′=[
][
_
a]
b]
c]
e]
f]
ng昉
{
{-
|
}
}>
~
~±
~+
¥
""".split()
)
| 5,897 | 2.104211 | 41 | py |
spaCy | spaCy-master/spacy/matcher/__init__.py | from .dependencymatcher import DependencyMatcher
from .levenshtein import levenshtein
from .matcher import Matcher
from .phrasematcher import PhraseMatcher
__all__ = ["Matcher", "PhraseMatcher", "DependencyMatcher", "levenshtein"]
| 232 | 32.285714 | 74 | py |
spaCy | spaCy-master/spacy/matcher/polyleven.c | /*
* Adapted from Polyleven (https://ceptord.net/)
*
* Source: https://github.com/fujimotos/polyleven/blob/c3f95a080626c5652f0151a2e449963288ccae84/polyleven.c
*
* Copyright (c) 2021 Fujimoto Seiji <[email protected]>
* Copyright (c) 2021 Max Bachmann <[email protected]>
* Copyright (c) 2022 Nick Mazuk
* Copyright (c) 2022 Michael Weiss <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <Python.h>
#include <stdint.h>
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define CDIV(a,b) ((a) / (b) + ((a) % (b) > 0))
#define BIT(i,n) (((i) >> (n)) & 1)
#define FLIP(i,n) ((i) ^ ((uint64_t) 1 << (n)))
#define ISASCII(kd) ((kd) == PyUnicode_1BYTE_KIND)
/*
* Bare bone of PyUnicode
*/
struct strbuf {
void *ptr;
int kind;
int64_t len;
};
static void strbuf_init(struct strbuf *s, PyObject *o)
{
s->ptr = PyUnicode_DATA(o);
s->kind = PyUnicode_KIND(o);
s->len = PyUnicode_GET_LENGTH(o);
}
#define strbuf_read(s, i) PyUnicode_READ((s)->kind, (s)->ptr, (i))
/*
* An encoded mbleven model table.
*
* Each 8-bit integer represents an edit sequence, with using two
* bits for a single operation.
*
* 01 = DELETE, 10 = INSERT, 11 = REPLACE
*
* For example, 13 is '1101' in binary notation, so it means
* DELETE + REPLACE.
*/
static const uint8_t MBLEVEN_MATRIX[] = {
3, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0,
15, 9, 6, 0, 0, 0, 0, 0,
13, 7, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
63, 39, 45, 57, 54, 30, 27, 0,
61, 55, 31, 37, 25, 22, 0, 0,
53, 29, 23, 0, 0, 0, 0, 0,
21, 0, 0, 0, 0, 0, 0, 0,
};
#define MBLEVEN_MATRIX_GET(k, d) ((((k) + (k) * (k)) / 2 - 1) + (d)) * 8
static int64_t mbleven_ascii(char *s1, int64_t len1,
char *s2, int64_t len2, int k)
{
int pos;
uint8_t m;
int64_t i, j, c, r;
pos = MBLEVEN_MATRIX_GET(k, len1 - len2);
r = k + 1;
while (MBLEVEN_MATRIX[pos]) {
m = MBLEVEN_MATRIX[pos++];
i = j = c = 0;
while (i < len1 && j < len2) {
if (s1[i] != s2[j]) {
c++;
if (!m) break;
if (m & 1) i++;
if (m & 2) j++;
m >>= 2;
} else {
i++;
j++;
}
}
c += (len1 - i) + (len2 - j);
r = MIN(r, c);
if (r < 2) {
return r;
}
}
return r;
}
static int64_t mbleven(PyObject *o1, PyObject *o2, int64_t k)
{
int pos;
uint8_t m;
int64_t i, j, c, r;
struct strbuf s1, s2;
strbuf_init(&s1, o1);
strbuf_init(&s2, o2);
if (s1.len < s2.len)
return mbleven(o2, o1, k);
if (k > 3)
return -1;
if (k < s1.len - s2.len)
return k + 1;
if (ISASCII(s1.kind) && ISASCII(s2.kind))
return mbleven_ascii(s1.ptr, s1.len, s2.ptr, s2.len, k);
pos = MBLEVEN_MATRIX_GET(k, s1.len - s2.len);
r = k + 1;
while (MBLEVEN_MATRIX[pos]) {
m = MBLEVEN_MATRIX[pos++];
i = j = c = 0;
while (i < s1.len && j < s2.len) {
if (strbuf_read(&s1, i) != strbuf_read(&s2, j)) {
c++;
if (!m) break;
if (m & 1) i++;
if (m & 2) j++;
m >>= 2;
} else {
i++;
j++;
}
}
c += (s1.len - i) + (s2.len - j);
r = MIN(r, c);
if (r < 2) {
return r;
}
}
return r;
}
/*
* Data structure to store Peq (equality bit-vector).
*/
struct blockmap_entry {
uint32_t key[128];
uint64_t val[128];
};
struct blockmap {
int64_t nr;
struct blockmap_entry *list;
};
#define blockmap_key(c) ((c) | 0x80000000U)
#define blockmap_hash(c) ((c) % 128)
static int blockmap_init(struct blockmap *map, struct strbuf *s)
{
int64_t i;
struct blockmap_entry *be;
uint32_t c, k;
uint8_t h;
map->nr = CDIV(s->len, 64);
map->list = calloc(1, map->nr * sizeof(struct blockmap_entry));
if (map->list == NULL) {
PyErr_NoMemory();
return -1;
}
for (i = 0; i < s->len; i++) {
be = &(map->list[i / 64]);
c = strbuf_read(s, i);
h = blockmap_hash(c);
k = blockmap_key(c);
while (be->key[h] && be->key[h] != k)
h = blockmap_hash(h + 1);
be->key[h] = k;
be->val[h] |= (uint64_t) 1 << (i % 64);
}
return 0;
}
static void blockmap_clear(struct blockmap *map)
{
if (map->list)
free(map->list);
map->list = NULL;
map->nr = 0;
}
static uint64_t blockmap_get(struct blockmap *map, int block, uint32_t c)
{
struct blockmap_entry *be;
uint8_t h;
uint32_t k;
h = blockmap_hash(c);
k = blockmap_key(c);
be = &(map->list[block]);
while (be->key[h] && be->key[h] != k)
h = blockmap_hash(h + 1);
return be->key[h] == k ? be->val[h] : 0;
}
/*
* Myers' bit-parallel algorithm
*
* See: G. Myers. "A fast bit-vector algorithm for approximate string
* matching based on dynamic programming." Journal of the ACM, 1999.
*/
static int64_t myers1999_block(struct strbuf *s1, struct strbuf *s2,
struct blockmap *map)
{
uint64_t Eq, Xv, Xh, Ph, Mh, Pv, Mv, Last;
uint64_t *Mhc, *Phc;
int64_t i, b, hsize, vsize, Score;
uint8_t Pb, Mb;
hsize = CDIV(s1->len, 64);
vsize = CDIV(s2->len, 64);
Score = s2->len;
Phc = malloc(hsize * 2 * sizeof(uint64_t));
if (Phc == NULL) {
PyErr_NoMemory();
return -1;
}
Mhc = Phc + hsize;
memset(Phc, -1, hsize * sizeof(uint64_t));
memset(Mhc, 0, hsize * sizeof(uint64_t));
Last = (uint64_t)1 << ((s2->len - 1) % 64);
for (b = 0; b < vsize; b++) {
Mv = 0;
Pv = (uint64_t) -1;
Score = s2->len;
for (i = 0; i < s1->len; i++) {
Eq = blockmap_get(map, b, strbuf_read(s1, i));
Pb = BIT(Phc[i / 64], i % 64);
Mb = BIT(Mhc[i / 64], i % 64);
Xv = Eq | Mv;
Xh = ((((Eq | Mb) & Pv) + Pv) ^ Pv) | Eq | Mb;
Ph = Mv | ~ (Xh | Pv);
Mh = Pv & Xh;
if (Ph & Last) Score++;
if (Mh & Last) Score--;
if ((Ph >> 63) ^ Pb)
Phc[i / 64] = FLIP(Phc[i / 64], i % 64);
if ((Mh >> 63) ^ Mb)
Mhc[i / 64] = FLIP(Mhc[i / 64], i % 64);
Ph = (Ph << 1) | Pb;
Mh = (Mh << 1) | Mb;
Pv = Mh | ~ (Xv | Ph);
Mv = Ph & Xv;
}
}
free(Phc);
return Score;
}
static int64_t myers1999_simple(uint8_t *s1, int64_t len1, uint8_t *s2, int64_t len2)
{
uint64_t Peq[256];
uint64_t Eq, Xv, Xh, Ph, Mh, Pv, Mv, Last;
int64_t i;
int64_t Score = len2;
memset(Peq, 0, sizeof(Peq));
for (i = 0; i < len2; i++)
Peq[s2[i]] |= (uint64_t) 1 << i;
Mv = 0;
Pv = (uint64_t) -1;
Last = (uint64_t) 1 << (len2 - 1);
for (i = 0; i < len1; i++) {
Eq = Peq[s1[i]];
Xv = Eq | Mv;
Xh = (((Eq & Pv) + Pv) ^ Pv) | Eq;
Ph = Mv | ~ (Xh | Pv);
Mh = Pv & Xh;
if (Ph & Last) Score++;
if (Mh & Last) Score--;
Ph = (Ph << 1) | 1;
Mh = (Mh << 1);
Pv = Mh | ~ (Xv | Ph);
Mv = Ph & Xv;
}
return Score;
}
static int64_t myers1999(PyObject *o1, PyObject *o2)
{
struct strbuf s1, s2;
struct blockmap map;
int64_t ret;
strbuf_init(&s1, o1);
strbuf_init(&s2, o2);
if (s1.len < s2.len)
return myers1999(o2, o1);
if (ISASCII(s1.kind) && ISASCII(s2.kind) && s2.len < 65)
return myers1999_simple(s1.ptr, s1.len, s2.ptr, s2.len);
if (blockmap_init(&map, &s2))
return -1;
ret = myers1999_block(&s1, &s2, &map);
blockmap_clear(&map);
return ret;
}
/*
* Interface functions
*/
static int64_t polyleven(PyObject *o1, PyObject *o2, int64_t k)
{
int64_t len1, len2;
len1 = PyUnicode_GET_LENGTH(o1);
len2 = PyUnicode_GET_LENGTH(o2);
if (len1 < len2)
return polyleven(o2, o1, k);
if (k == 0)
return PyUnicode_Compare(o1, o2) ? 1 : 0;
if (0 < k && k < len1 - len2)
return k + 1;
if (len2 == 0)
return len1;
if (0 < k && k < 4)
return mbleven(o1, o2, k);
return myers1999(o1, o2);
}
| 9,571 | 23.862338 | 107 | c |
spaCy | spaCy-master/spacy/ml/__init__.py | from .callbacks import create_models_with_nvtx_range # noqa: F401
from .models import * # noqa: F401, F403
| 109 | 35.666667 | 66 | py |
spaCy | spaCy-master/spacy/ml/_character_embed.py | from typing import List
from thinc.api import Model
from thinc.types import Floats2d
from ..tokens import Doc
from ..util import registry
@registry.layers("spacy.CharEmbed.v1")
def CharacterEmbed(nM: int, nC: int) -> Model[List[Doc], List[Floats2d]]:
# nM: Number of dimensions per character. nC: Number of characters.
return Model(
"charembed",
forward,
init=init,
dims={"nM": nM, "nC": nC, "nO": nM * nC, "nV": 256},
params={"E": None},
)
def init(model: Model, X=None, Y=None):
vectors_table = model.ops.alloc3f(
model.get_dim("nC"), model.get_dim("nV"), model.get_dim("nM")
)
model.set_param("E", vectors_table)
def forward(model: Model, docs: List[Doc], is_train: bool):
if docs is None:
return []
ids = []
output = []
E = model.get_param("E")
nC = model.get_dim("nC")
nM = model.get_dim("nM")
nO = model.get_dim("nO")
# This assists in indexing; it's like looping over this dimension.
# Still consider this weird witch craft...But thanks to Mark Neumann
# for the tip.
nCv = model.ops.xp.arange(nC)
for doc in docs:
doc_ids = model.ops.asarray(doc.to_utf8_array(nr_char=nC))
doc_vectors = model.ops.alloc3f(len(doc), nC, nM)
# Let's say I have a 2d array of indices, and a 3d table of data. What numpy
# incantation do I chant to get
# output[i, j, k] == data[j, ids[i, j], k]?
doc_vectors[:, nCv] = E[nCv, doc_ids[:, nCv]] # type: ignore[call-overload, index]
output.append(doc_vectors.reshape((len(doc), nO)))
ids.append(doc_ids)
def backprop(d_output):
dE = model.ops.alloc(E.shape, dtype=E.dtype)
for doc_ids, d_doc_vectors in zip(ids, d_output):
d_doc_vectors = d_doc_vectors.reshape((len(doc_ids), nC, nM))
dE[nCv, doc_ids[:, nCv]] += d_doc_vectors[:, nCv]
model.inc_grad("E", dE)
return []
return output, backprop
| 1,994 | 31.704918 | 91 | py |
spaCy | spaCy-master/spacy/ml/_precomputable_affine.py | from thinc.api import Model, normal_init
from ..util import registry
@registry.layers("spacy.PrecomputableAffine.v1")
def PrecomputableAffine(nO, nI, nF, nP, dropout=0.1):
model = Model(
"precomputable_affine",
forward,
init=init,
dims={"nO": nO, "nI": nI, "nF": nF, "nP": nP},
params={"W": None, "b": None, "pad": None},
attrs={"dropout_rate": dropout},
)
return model
def forward(model, X, is_train):
nF = model.get_dim("nF")
nO = model.get_dim("nO")
nP = model.get_dim("nP")
nI = model.get_dim("nI")
W = model.get_param("W")
# Preallocate array for layer output, including padding.
Yf = model.ops.alloc2f(X.shape[0] + 1, nF * nO * nP, zeros=False)
model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True, out=Yf[1:])
Yf = Yf.reshape((Yf.shape[0], nF, nO, nP))
# Set padding. Padding has shape (1, nF, nO, nP). Unfortunately, we cannot
# change its shape to (nF, nO, nP) without breaking existing models. So
# we'll squeeze the first dimension here.
Yf[0] = model.ops.xp.squeeze(model.get_param("pad"), 0)
def backward(dY_ids):
# This backprop is particularly tricky, because we get back a different
# thing from what we put out. We put out an array of shape:
# (nB, nF, nO, nP), and get back:
# (nB, nO, nP) and ids (nB, nF)
# The ids tell us the values of nF, so we would have:
#
# dYf = zeros((nB, nF, nO, nP))
# for b in range(nB):
# for f in range(nF):
# dYf[b, ids[b, f]] += dY[b]
#
# However, we avoid building that array for efficiency -- and just pass
# in the indices.
dY, ids = dY_ids
assert dY.ndim == 3
assert dY.shape[1] == nO, dY.shape
assert dY.shape[2] == nP, dY.shape
# nB = dY.shape[0]
model.inc_grad("pad", _backprop_precomputable_affine_padding(model, dY, ids))
Xf = X[ids]
Xf = Xf.reshape((Xf.shape[0], nF * nI))
model.inc_grad("b", dY.sum(axis=0))
dY = dY.reshape((dY.shape[0], nO * nP))
Wopfi = W.transpose((1, 2, 0, 3))
Wopfi = Wopfi.reshape((nO * nP, nF * nI))
dXf = model.ops.gemm(dY.reshape((dY.shape[0], nO * nP)), Wopfi)
dWopfi = model.ops.gemm(dY, Xf, trans1=True)
dWopfi = dWopfi.reshape((nO, nP, nF, nI))
# (o, p, f, i) --> (f, o, p, i)
dWopfi = dWopfi.transpose((2, 0, 1, 3))
model.inc_grad("W", dWopfi)
return dXf.reshape((dXf.shape[0], nF, nI))
return Yf, backward
def _backprop_precomputable_affine_padding(model, dY, ids):
nB = dY.shape[0]
nF = model.get_dim("nF")
nP = model.get_dim("nP")
nO = model.get_dim("nO")
# Backprop the "padding", used as a filler for missing values.
# Values that are missing are set to -1, and each state vector could
# have multiple missing values. The padding has different values for
# different missing features. The gradient of the padding vector is:
#
# for b in range(nB):
# for f in range(nF):
# if ids[b, f] < 0:
# d_pad[f] += dY[b]
#
# Which can be rewritten as:
#
# (ids < 0).T @ dY
mask = model.ops.asarray(ids < 0, dtype="f")
d_pad = model.ops.gemm(mask, dY.reshape(nB, nO * nP), trans1=True)
return d_pad.reshape((1, nF, nO, nP))
def init(model, X=None, Y=None):
"""This is like the 'layer sequential unit variance', but instead
of taking the actual inputs, we randomly generate whitened data.
Why's this all so complicated? We have a huge number of inputs,
and the maxout unit makes guessing the dynamics tricky. Instead
we set the maxout weights to values that empirically result in
whitened outputs given whitened inputs.
"""
if model.has_param("W") and model.get_param("W").any():
return
nF = model.get_dim("nF")
nO = model.get_dim("nO")
nP = model.get_dim("nP")
nI = model.get_dim("nI")
W = model.ops.alloc4f(nF, nO, nP, nI)
b = model.ops.alloc2f(nO, nP)
pad = model.ops.alloc4f(1, nF, nO, nP)
ops = model.ops
W = normal_init(ops, W.shape, mean=float(ops.xp.sqrt(1.0 / nF * nI)))
pad = normal_init(ops, pad.shape, mean=1.0)
model.set_param("W", W)
model.set_param("b", b)
model.set_param("pad", pad)
ids = ops.alloc((5000, nF), dtype="f")
ids += ops.xp.random.uniform(0, 1000, ids.shape)
ids = ops.asarray(ids, dtype="i")
tokvecs = ops.alloc((5000, nI), dtype="f")
tokvecs += ops.xp.random.normal(loc=0.0, scale=1.0, size=tokvecs.size).reshape(
tokvecs.shape
)
def predict(ids, tokvecs):
# nS ids. nW tokvecs. Exclude the padding array.
hiddens = model.predict(tokvecs[:-1]) # (nW, f, o, p)
vectors = model.ops.alloc((ids.shape[0], nO * nP), dtype="f")
# need nS vectors
hiddens = hiddens.reshape((hiddens.shape[0] * nF, nO * nP))
model.ops.scatter_add(vectors, ids.flatten(), hiddens)
vectors = vectors.reshape((vectors.shape[0], nO, nP))
vectors += b
vectors = model.ops.asarray(vectors)
if nP >= 2:
return model.ops.maxout(vectors)[0]
else:
return vectors * (vectors >= 0)
tol_var = 0.01
tol_mean = 0.01
t_max = 10
W = model.get_param("W").copy()
b = model.get_param("b").copy()
for t_i in range(t_max):
acts1 = predict(ids, tokvecs)
var = model.ops.xp.var(acts1)
mean = model.ops.xp.mean(acts1)
if abs(var - 1.0) >= tol_var:
W /= model.ops.xp.sqrt(var)
model.set_param("W", W)
elif abs(mean) >= tol_mean:
b -= mean
model.set_param("b", b)
else:
break
| 5,834 | 34.363636 | 85 | py |
spaCy | spaCy-master/spacy/ml/callbacks.py | import functools
import inspect
import types
import warnings
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Set, Type
from thinc.layers import with_nvtx_range
from thinc.model import Model, wrap_model_recursive
from thinc.util import use_nvtx_range
from ..errors import Warnings
from ..util import registry
if TYPE_CHECKING:
# This lets us add type hints for mypy etc. without causing circular imports
from ..language import Language # noqa: F401
DEFAULT_NVTX_ANNOTATABLE_PIPE_METHODS = [
"pipe",
"predict",
"set_annotations",
"update",
"rehearse",
"get_loss",
"initialize",
"begin_update",
"finish_update",
"update",
]
def models_with_nvtx_range(nlp, forward_color: int, backprop_color: int):
pipes = [
pipe
for _, pipe in nlp.components
if hasattr(pipe, "is_trainable") and pipe.is_trainable
]
seen_models: Set[int] = set()
for pipe in pipes:
for node in pipe.model.walk():
if id(node) in seen_models:
continue
seen_models.add(id(node))
with_nvtx_range(
node, forward_color=forward_color, backprop_color=backprop_color
)
return nlp
@registry.callbacks("spacy.models_with_nvtx_range.v1")
def create_models_with_nvtx_range(
forward_color: int = -1, backprop_color: int = -1
) -> Callable[["Language"], "Language"]:
return functools.partial(
models_with_nvtx_range,
forward_color=forward_color,
backprop_color=backprop_color,
)
def nvtx_range_wrapper_for_pipe_method(self, func, *args, **kwargs):
if isinstance(func, functools.partial):
return func(*args, **kwargs)
else:
with use_nvtx_range(f"{self.name} {func.__name__}"):
return func(*args, **kwargs)
def pipes_with_nvtx_range(
nlp, additional_pipe_functions: Optional[Dict[str, List[str]]]
):
for _, pipe in nlp.components:
if additional_pipe_functions:
extra_funcs = additional_pipe_functions.get(pipe.name, [])
else:
extra_funcs = []
for name in DEFAULT_NVTX_ANNOTATABLE_PIPE_METHODS + extra_funcs:
func = getattr(pipe, name, None)
if func is None:
if name in extra_funcs:
warnings.warn(Warnings.W121.format(method=name, pipe=pipe.name))
continue
wrapped_func = functools.partial(
types.MethodType(nvtx_range_wrapper_for_pipe_method, pipe), func
)
# We need to preserve the original function signature so that
# the original parameters are passed to pydantic for validation downstream.
try:
wrapped_func.__signature__ = inspect.signature(func) # type: ignore
except:
# Can fail for Cython methods that do not have bindings.
warnings.warn(Warnings.W122.format(method=name, pipe=pipe.name))
continue
try:
setattr(
pipe,
name,
wrapped_func,
)
except AttributeError:
warnings.warn(Warnings.W122.format(method=name, pipe=pipe.name))
return nlp
@registry.callbacks("spacy.models_and_pipes_with_nvtx_range.v1")
def create_models_and_pipes_with_nvtx_range(
forward_color: int = -1,
backprop_color: int = -1,
additional_pipe_functions: Optional[Dict[str, List[str]]] = None,
) -> Callable[["Language"], "Language"]:
def inner(nlp):
nlp = models_with_nvtx_range(nlp, forward_color, backprop_color)
nlp = pipes_with_nvtx_range(nlp, additional_pipe_functions)
return nlp
return inner
| 3,787 | 29.304 | 87 | py |
spaCy | spaCy-master/spacy/ml/extract_ngrams.py | from thinc.api import Model
from ..attrs import LOWER
from ..util import registry
@registry.layers("spacy.extract_ngrams.v1")
def extract_ngrams(ngram_size: int, attr: int = LOWER) -> Model:
model: Model = Model("extract_ngrams", forward)
model.attrs["ngram_size"] = ngram_size
model.attrs["attr"] = attr
return model
def forward(model: Model, docs, is_train: bool):
batch_keys = []
batch_vals = []
for doc in docs:
unigrams = model.ops.asarray(doc.to_array([model.attrs["attr"]]))
ngrams = [unigrams]
for n in range(2, model.attrs["ngram_size"] + 1):
ngrams.append(model.ops.ngrams(n, unigrams)) # type: ignore[arg-type]
keys = model.ops.xp.concatenate(ngrams)
keys, vals = model.ops.xp.unique(keys, return_counts=True)
batch_keys.append(keys)
batch_vals.append(vals)
lengths = model.ops.asarray([arr.shape[0] for arr in batch_keys], dtype="int32")
batch_keys = model.ops.xp.concatenate(batch_keys)
batch_vals = model.ops.asarray(model.ops.xp.concatenate(batch_vals), dtype="f")
def backprop(dY):
return []
return (batch_keys, batch_vals, lengths), backprop
| 1,191 | 33.057143 | 84 | py |
spaCy | spaCy-master/spacy/ml/extract_spans.py | from typing import Callable, List, Tuple
from thinc.api import Model, to_numpy
from thinc.types import Ints1d, Ragged
from ..util import registry
@registry.layers("spacy.extract_spans.v1")
def extract_spans() -> Model[Tuple[Ragged, Ragged], Ragged]:
"""Extract spans from a sequence of source arrays, as specified by an array
of (start, end) indices. The output is a ragged array of the
extracted spans.
"""
return Model(
"extract_spans", forward, layers=[], refs={}, attrs={}, dims={}, init=init
)
def init(model, X=None, Y=None):
pass
def forward(
model: Model, source_spans: Tuple[Ragged, Ragged], is_train: bool
) -> Tuple[Ragged, Callable]:
"""Get subsequences from source vectors."""
ops = model.ops
X, spans = source_spans
assert spans.dataXd.ndim == 2
indices = _get_span_indices(ops, spans, X.lengths)
if len(indices) > 0:
Y = Ragged(X.dataXd[indices], spans.dataXd[:, 1] - spans.dataXd[:, 0]) # type: ignore[arg-type, index]
else:
Y = Ragged(
ops.xp.zeros(X.dataXd.shape, dtype=X.dataXd.dtype),
ops.xp.zeros((len(X.lengths),), dtype="i"),
)
x_shape = X.dataXd.shape
x_lengths = X.lengths
def backprop_windows(dY: Ragged) -> Tuple[Ragged, Ragged]:
dX = Ragged(ops.alloc2f(*x_shape), x_lengths)
ops.scatter_add(dX.dataXd, indices, dY.dataXd) # type: ignore[arg-type]
return (dX, spans)
return Y, backprop_windows
def _get_span_indices(ops, spans: Ragged, lengths: Ints1d) -> Ints1d:
"""Construct a flat array that has the indices we want to extract from the
source data. For instance, if we want the spans (5, 9), (8, 10) the
indices will be [5, 6, 7, 8, 8, 9].
"""
spans, lengths = _ensure_cpu(spans, lengths)
indices: List[int] = []
offset = 0
for i, length in enumerate(lengths):
spans_i = spans[i].dataXd + offset
for j in range(spans_i.shape[0]):
indices.extend(range(spans_i[j, 0], spans_i[j, 1])) # type: ignore[arg-type, call-overload]
offset += length
return ops.asarray1i(indices)
def _ensure_cpu(spans: Ragged, lengths: Ints1d) -> Tuple[Ragged, Ints1d]:
return Ragged(to_numpy(spans.dataXd), to_numpy(spans.lengths)), to_numpy(lengths)
| 2,304 | 32.897059 | 111 | py |
spaCy | spaCy-master/spacy/ml/featureextractor.py | from typing import Callable, List, Tuple, Union
from thinc.api import Model, registry
from thinc.types import Ints2d
from ..tokens import Doc
@registry.layers("spacy.FeatureExtractor.v1")
def FeatureExtractor(columns: List[Union[int, str]]) -> Model[List[Doc], List[Ints2d]]:
return Model("extract_features", forward, attrs={"columns": columns})
def forward(
model: Model[List[Doc], List[Ints2d]], docs, is_train: bool
) -> Tuple[List[Ints2d], Callable]:
columns = model.attrs["columns"]
features: List[Ints2d] = []
for doc in docs:
if hasattr(doc, "to_array"):
attrs = doc.to_array(columns)
else:
attrs = doc.doc.to_array(columns)[doc.start : doc.end]
if attrs.ndim == 1:
attrs = attrs.reshape((attrs.shape[0], 1))
features.append(model.ops.asarray2i(attrs, dtype="uint64"))
backprop: Callable[[List[Ints2d]], List] = lambda d_features: []
return features, backprop
| 970 | 31.366667 | 87 | py |
spaCy | spaCy-master/spacy/ml/staticvectors.py | import warnings
from typing import Callable, List, Optional, Sequence, Tuple, cast
from thinc.api import Model, Ops, registry
from thinc.initializers import glorot_uniform_init
from thinc.types import Floats1d, Floats2d, Ints1d, Ragged
from thinc.util import partial
from ..attrs import ORTH
from ..errors import Errors, Warnings
from ..tokens import Doc
from ..vectors import Mode
from ..vocab import Vocab
@registry.layers("spacy.StaticVectors.v2")
def StaticVectors(
nO: Optional[int] = None,
nM: Optional[int] = None,
*,
dropout: Optional[float] = None,
init_W: Callable = glorot_uniform_init,
key_attr: str = "ORTH"
) -> Model[List[Doc], Ragged]:
"""Embed Doc objects with their vocab's vectors table, applying a learned
linear projection to control the dimensionality. If a dropout rate is
specified, the dropout is applied per dimension over the whole batch.
"""
if key_attr != "ORTH":
warnings.warn(Warnings.W125, DeprecationWarning)
return Model(
"static_vectors",
forward,
init=partial(init, init_W),
params={"W": None},
attrs={"key_attr": key_attr, "dropout_rate": dropout},
dims={"nO": nO, "nM": nM},
)
def forward(
model: Model[List[Doc], Ragged], docs: List[Doc], is_train: bool
) -> Tuple[Ragged, Callable]:
token_count = sum(len(doc) for doc in docs)
if not token_count:
return _handle_empty(model.ops, model.get_dim("nO"))
vocab: Vocab = docs[0].vocab
key_attr: int = getattr(vocab.vectors, "attr", ORTH)
keys = model.ops.flatten([cast(Ints1d, doc.to_array(key_attr)) for doc in docs])
W = cast(Floats2d, model.ops.as_contig(model.get_param("W")))
if vocab.vectors.mode == Mode.default:
V = model.ops.asarray(vocab.vectors.data)
rows = vocab.vectors.find(keys=keys)
V = model.ops.as_contig(V[rows])
elif vocab.vectors.mode == Mode.floret:
V = vocab.vectors.get_batch(keys)
V = model.ops.as_contig(V)
else:
raise RuntimeError(Errors.E896)
try:
vectors_data = model.ops.gemm(V, W, trans2=True)
except ValueError:
raise RuntimeError(Errors.E896)
if vocab.vectors.mode == Mode.default:
# Convert negative indices to 0-vectors
# TODO: more options for UNK tokens
vectors_data[rows < 0] = 0
output = Ragged(vectors_data, model.ops.asarray1i([len(doc) for doc in docs]))
mask = None
if is_train:
mask = _get_drop_mask(model.ops, W.shape[0], model.attrs.get("dropout_rate"))
if mask is not None:
output.data *= mask
def backprop(d_output: Ragged) -> List[Doc]:
if mask is not None:
d_output.data *= mask
model.inc_grad(
"W",
model.ops.gemm(
cast(Floats2d, d_output.data),
cast(Floats2d, model.ops.as_contig(V)),
trans1=True,
),
)
return []
return output, backprop
def init(
init_W: Callable,
model: Model[List[Doc], Ragged],
X: Optional[List[Doc]] = None,
Y: Optional[Ragged] = None,
) -> Model[List[Doc], Ragged]:
nM = model.get_dim("nM") if model.has_dim("nM") else None
nO = model.get_dim("nO") if model.has_dim("nO") else None
if X is not None and len(X):
nM = X[0].vocab.vectors.shape[1]
if Y is not None:
nO = Y.data.shape[1]
if nM is None:
raise ValueError(Errors.E905)
if nO is None:
raise ValueError(Errors.E904)
model.set_dim("nM", nM)
model.set_dim("nO", nO)
model.set_param("W", init_W(model.ops, (nO, nM)))
return model
def _handle_empty(ops: Ops, nO: int):
return Ragged(ops.alloc2f(0, nO), ops.alloc1i(0)), lambda d_ragged: []
def _get_drop_mask(ops: Ops, nO: int, rate: Optional[float]) -> Optional[Floats1d]:
if rate is not None:
mask = ops.get_dropout_mask((nO,), rate)
return mask # type: ignore
return None
| 4,000 | 31.528455 | 85 | py |
spaCy | spaCy-master/spacy/ml/tb_framework.py | from thinc.api import Model, noop
from ..util import registry
from .parser_model import ParserStepModel
@registry.layers("spacy.TransitionModel.v1")
def TransitionModel(
tok2vec, lower, upper, resize_output, dropout=0.2, unseen_classes=set()
):
"""Set up a stepwise transition-based model"""
if upper is None:
has_upper = False
upper = noop()
else:
has_upper = True
# don't define nO for this object, because we can't dynamically change it
return Model(
name="parser_model",
forward=forward,
dims={"nI": tok2vec.maybe_get_dim("nI")},
layers=[tok2vec, lower, upper],
refs={"tok2vec": tok2vec, "lower": lower, "upper": upper},
init=init,
attrs={
"has_upper": has_upper,
"unseen_classes": set(unseen_classes),
"resize_output": resize_output,
},
)
def forward(model, X, is_train):
step_model = ParserStepModel(
X,
model.layers,
unseen_classes=model.attrs["unseen_classes"],
train=is_train,
has_upper=model.attrs["has_upper"],
)
return step_model, step_model.finish_steps
def init(model, X=None, Y=None):
model.get_ref("tok2vec").initialize(X=X)
lower = model.get_ref("lower")
lower.initialize()
if model.attrs["has_upper"]:
statevecs = model.ops.alloc2f(2, lower.get_dim("nO"))
model.get_ref("upper").initialize(X=statevecs)
| 1,465 | 27.192308 | 77 | py |
spaCy | spaCy-master/spacy/ml/models/__init__.py | from .entity_linker import * # noqa
from .multi_task import * # noqa
from .parser import * # noqa
from .span_finder import * # noqa
from .spancat import * # noqa
from .tagger import * # noqa
from .textcat import * # noqa
from .tok2vec import * # noqa
| 259 | 27.888889 | 36 | py |
spaCy | spaCy-master/spacy/ml/models/entity_linker.py | from pathlib import Path
from typing import Callable, Iterable, List, Optional, Tuple
from thinc.api import (
Linear,
Maxout,
Model,
Ragged,
chain,
list2ragged,
reduce_mean,
residual,
tuplify,
)
from thinc.types import Floats2d
from ...errors import Errors
from ...kb import (
Candidate,
InMemoryLookupKB,
KnowledgeBase,
get_candidates,
get_candidates_batch,
)
from ...tokens import Doc, Span
from ...util import registry
from ...vocab import Vocab
from ..extract_spans import extract_spans
@registry.architectures("spacy.EntityLinker.v2")
def build_nel_encoder(
tok2vec: Model, nO: Optional[int] = None
) -> Model[List[Doc], Floats2d]:
with Model.define_operators({">>": chain, "&": tuplify}):
token_width = tok2vec.maybe_get_dim("nO")
output_layer = Linear(nO=nO, nI=token_width)
model = (
((tok2vec >> list2ragged()) & build_span_maker())
>> extract_spans()
>> reduce_mean()
>> residual(Maxout(nO=token_width, nI=token_width, nP=2, dropout=0.0)) # type: ignore
>> output_layer
)
model.set_ref("output_layer", output_layer)
model.set_ref("tok2vec", tok2vec)
# flag to show this isn't legacy
model.attrs["include_span_maker"] = True
return model
def build_span_maker(n_sents: int = 0) -> Model:
model: Model = Model("span_maker", forward=span_maker_forward)
model.attrs["n_sents"] = n_sents
return model
def span_maker_forward(model, docs: List[Doc], is_train) -> Tuple[Ragged, Callable]:
ops = model.ops
n_sents = model.attrs["n_sents"]
candidates = []
for doc in docs:
cands = []
try:
sentences = [s for s in doc.sents]
except ValueError:
# no sentence info, normal in initialization
for tok in doc:
tok.is_sent_start = tok.i == 0
sentences = [doc[:]]
for ent in doc.ents:
try:
# find the sentence in the list of sentences.
sent_index = sentences.index(ent.sent)
except AttributeError:
# Catch the exception when ent.sent is None and provide a user-friendly warning
raise RuntimeError(Errors.E030) from None
# get n previous sentences, if there are any
start_sentence = max(0, sent_index - n_sents)
# get n posterior sentences, or as many < n as there are
end_sentence = min(len(sentences) - 1, sent_index + n_sents)
# get token positions
start_token = sentences[start_sentence].start
end_token = sentences[end_sentence].end
# save positions for extraction
cands.append((start_token, end_token))
candidates.append(ops.asarray2i(cands))
lengths = model.ops.asarray1i([len(cands) for cands in candidates])
out = Ragged(model.ops.flatten(candidates), lengths)
# because this is just rearranging docs, the backprop does nothing
return out, lambda x: []
@registry.misc("spacy.KBFromFile.v1")
def load_kb(
kb_path: Path,
) -> Callable[[Vocab], KnowledgeBase]:
def kb_from_file(vocab: Vocab):
kb = InMemoryLookupKB(vocab, entity_vector_length=1)
kb.from_disk(kb_path)
return kb
return kb_from_file
@registry.misc("spacy.EmptyKB.v2")
def empty_kb_for_config() -> Callable[[Vocab, int], KnowledgeBase]:
def empty_kb_factory(vocab: Vocab, entity_vector_length: int):
return InMemoryLookupKB(vocab=vocab, entity_vector_length=entity_vector_length)
return empty_kb_factory
@registry.misc("spacy.EmptyKB.v1")
def empty_kb(
entity_vector_length: int,
) -> Callable[[Vocab], KnowledgeBase]:
def empty_kb_factory(vocab: Vocab):
return InMemoryLookupKB(vocab=vocab, entity_vector_length=entity_vector_length)
return empty_kb_factory
@registry.misc("spacy.CandidateGenerator.v1")
def create_candidates() -> Callable[[KnowledgeBase, Span], Iterable[Candidate]]:
return get_candidates
@registry.misc("spacy.CandidateBatchGenerator.v1")
def create_candidates_batch() -> Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
]:
return get_candidates_batch
| 4,291 | 30.792593 | 98 | py |
spaCy | spaCy-master/spacy/ml/models/multi_task.py | from functools import partial
from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Optional, Tuple, cast
import numpy
from thinc.api import (
CosineDistance,
L2Distance,
LayerNorm,
Linear,
Maxout,
Model,
MultiSoftmax,
Softmax,
chain,
list2array,
to_categorical,
zero_init,
)
from thinc.loss import Loss
from thinc.types import Floats2d, Ints1d
from ...attrs import ID, ORTH
from ...errors import Errors
from ...util import OOV_RANK, registry
from ...vectors import Mode as VectorsMode
if TYPE_CHECKING:
# This lets us add type hints for mypy etc. without causing circular imports
from ...tokens.doc import Doc # noqa: F401
from ...vocab import Vocab # noqa: F401
@registry.architectures("spacy.PretrainVectors.v1")
def create_pretrain_vectors(
maxout_pieces: int, hidden_size: int, loss: str
) -> Callable[["Vocab", Model], Model]:
def create_vectors_objective(vocab: "Vocab", tok2vec: Model) -> Model:
if vocab.vectors.shape[1] == 0:
raise ValueError(Errors.E875)
model = build_cloze_multi_task_model(
vocab, tok2vec, hidden_size=hidden_size, maxout_pieces=maxout_pieces
)
model.attrs["loss"] = create_vectors_loss()
return model
def create_vectors_loss() -> Callable:
distance: Loss
if loss == "cosine":
distance = CosineDistance(normalize=True, ignore_zeros=True)
return partial(get_vectors_loss, distance=distance)
elif loss == "L2":
distance = L2Distance(normalize=True)
return partial(get_vectors_loss, distance=distance)
else:
raise ValueError(Errors.E906.format(found=loss, supported="'cosine', 'L2'"))
return create_vectors_objective
@registry.architectures("spacy.PretrainCharacters.v1")
def create_pretrain_characters(
maxout_pieces: int, hidden_size: int, n_characters: int
) -> Callable[["Vocab", Model], Model]:
def create_characters_objective(vocab: "Vocab", tok2vec: Model) -> Model:
model = build_cloze_characters_multi_task_model(
vocab,
tok2vec,
hidden_size=hidden_size,
maxout_pieces=maxout_pieces,
nr_char=n_characters,
)
model.attrs["loss"] = partial(get_characters_loss, nr_char=n_characters)
return model
return create_characters_objective
def get_vectors_loss(ops, docs, prediction, distance):
"""Compute a loss based on a distance between the documents' vectors and
the prediction.
"""
vocab = docs[0].vocab
if vocab.vectors.mode == VectorsMode.default:
# The simplest way to implement this would be to vstack the
# token.vector values, but that's a bit inefficient, especially on GPU.
# Instead we fetch the index into the vectors table for each of our
# tokens, and look them up all at once. This prevents data copying.
ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs])
target = docs[0].vocab.vectors.data[ids]
target[ids == OOV_RANK] = 0
d_target, loss = distance(prediction, target)
elif vocab.vectors.mode == VectorsMode.floret:
keys = ops.flatten([cast(Ints1d, doc.to_array(ORTH)) for doc in docs])
target = vocab.vectors.get_batch(keys)
target = ops.as_contig(target)
d_target, loss = distance(prediction, target)
else:
raise ValueError(Errors.E850.format(mode=vocab.vectors.mode))
return loss, d_target
def get_characters_loss(ops, docs, prediction, nr_char):
"""Compute a loss based on a number of characters predicted from the docs."""
target_ids = numpy.vstack([doc.to_utf8_array(nr_char=nr_char) for doc in docs])
target_ids = target_ids.reshape((-1,))
target = ops.asarray(to_categorical(target_ids, n_classes=256), dtype="f")
target = target.reshape((-1, 256 * nr_char))
diff = prediction - target
loss = (diff**2).sum()
d_target = diff / float(prediction.shape[0])
return loss, d_target
def build_multi_task_model(
tok2vec: Model,
maxout_pieces: int,
token_vector_width: int,
nO: Optional[int] = None,
) -> Model:
softmax = Softmax(nO=nO, nI=token_vector_width * 2)
model = chain(
tok2vec,
Maxout(
nO=token_vector_width * 2,
nI=token_vector_width,
nP=maxout_pieces,
dropout=0.0,
),
LayerNorm(token_vector_width * 2),
softmax,
)
model.set_ref("tok2vec", tok2vec)
model.set_ref("output_layer", softmax)
return model
def build_cloze_multi_task_model(
vocab: "Vocab", tok2vec: Model, maxout_pieces: int, hidden_size: int
) -> Model:
nO = vocab.vectors.shape[1]
output_layer = chain(
cast(Model[List["Floats2d"], Floats2d], list2array()),
Maxout(
nO=hidden_size,
nI=tok2vec.get_dim("nO"),
nP=maxout_pieces,
normalize=True,
dropout=0.0,
),
Linear(nO=nO, nI=hidden_size, init_W=zero_init),
)
model = chain(tok2vec, output_layer)
model = build_masked_language_model(vocab, model)
model.set_ref("tok2vec", tok2vec)
model.set_ref("output_layer", output_layer)
return model
def build_cloze_characters_multi_task_model(
vocab: "Vocab", tok2vec: Model, maxout_pieces: int, hidden_size: int, nr_char: int
) -> Model:
output_layer = chain(
cast(Model[List["Floats2d"], Floats2d], list2array()),
Maxout(nO=hidden_size, nP=maxout_pieces),
LayerNorm(nI=hidden_size),
MultiSoftmax([256] * nr_char, nI=hidden_size), # type: ignore[arg-type]
)
model = build_masked_language_model(vocab, chain(tok2vec, output_layer))
model.set_ref("tok2vec", tok2vec)
model.set_ref("output_layer", output_layer)
return model
def build_masked_language_model(
vocab: "Vocab", wrapped_model: Model, mask_prob: float = 0.15
) -> Model:
"""Convert a model into a BERT-style masked language model"""
random_words = _RandomWords(vocab)
def mlm_forward(model, docs, is_train):
mask, docs = _apply_mask(docs, random_words, mask_prob=mask_prob)
mask = model.ops.asarray(mask).reshape((mask.shape[0], 1))
output, backprop = model.layers[0](docs, is_train)
def mlm_backward(d_output):
d_output *= 1 - mask
return backprop(d_output)
return output, mlm_backward
def mlm_initialize(model: Model, X=None, Y=None):
wrapped = model.layers[0]
wrapped.initialize(X=X, Y=Y)
for dim in wrapped.dim_names:
if wrapped.has_dim(dim):
model.set_dim(dim, wrapped.get_dim(dim))
mlm_model: Model = Model(
"masked-language-model",
mlm_forward,
layers=[wrapped_model],
init=mlm_initialize,
refs={"wrapped": wrapped_model},
dims={dim: None for dim in wrapped_model.dim_names},
)
mlm_model.set_ref("wrapped", wrapped_model)
return mlm_model
class _RandomWords:
def __init__(self, vocab: "Vocab") -> None:
# Extract lexeme representations
self.words = [lex.text for lex in vocab if lex.prob != 0.0]
self.words = self.words[:10000]
# Compute normalized lexeme probabilities
probs = [lex.prob for lex in vocab if lex.prob != 0.0]
probs = probs[:10000]
probs: numpy.ndarray = numpy.exp(numpy.array(probs, dtype="f"))
probs /= probs.sum()
self.probs = probs
# Initialize cache
self._cache: List[int] = []
def next(self) -> str:
if not self._cache:
self._cache.extend(
numpy.random.choice(len(self.words), 10000, p=self.probs)
)
index = self._cache.pop()
return self.words[index]
def _apply_mask(
docs: Iterable["Doc"], random_words: _RandomWords, mask_prob: float = 0.15
) -> Tuple[numpy.ndarray, List["Doc"]]:
# This needs to be here to avoid circular imports
from ...tokens.doc import Doc # noqa: F811
N = sum(len(doc) for doc in docs)
mask = numpy.random.uniform(0.0, 1.0, (N,))
mask = mask >= mask_prob
i = 0
masked_docs = []
for doc in docs:
words = []
for token in doc:
if not mask[i]:
word = _replace_word(token.text, random_words)
else:
word = token.text
words.append(word)
i += 1
spaces = [bool(w.whitespace_) for w in doc]
# NB: If you change this implementation to instead modify
# the docs in place, take care that the IDs reflect the original
# words. Currently we use the original docs to make the vectors
# for the target, so we don't lose the original tokens. But if
# you modified the docs in place here, you would.
masked_docs.append(Doc(doc.vocab, words=words, spaces=spaces))
return mask, masked_docs
def _replace_word(word: str, random_words: _RandomWords, mask: str = "[MASK]") -> str:
roll = numpy.random.random()
if roll < 0.8:
return mask
elif roll < 0.9:
return random_words.next()
else:
return word
| 9,271 | 32.96337 | 88 | py |
spaCy | spaCy-master/spacy/ml/models/parser.py | from typing import List, Optional, cast
from thinc.api import Linear, Model, chain, list2array, use_ops, zero_init
from thinc.types import Floats2d
from ...compat import Literal
from ...errors import Errors
from ...tokens import Doc
from ...util import registry
from .._precomputable_affine import PrecomputableAffine
from ..tb_framework import TransitionModel
@registry.architectures("spacy.TransitionBasedParser.v2")
def build_tb_parser_model(
tok2vec: Model[List[Doc], List[Floats2d]],
state_type: Literal["parser", "ner"],
extra_state_tokens: bool,
hidden_width: int,
maxout_pieces: int,
use_upper: bool,
nO: Optional[int] = None,
) -> Model:
"""
Build a transition-based parser model. Can apply to NER or dependency-parsing.
Transition-based parsing is an approach to structured prediction where the
task of predicting the structure is mapped to a series of state transitions.
You might find this tutorial helpful as background:
https://explosion.ai/blog/parsing-english-in-python
The neural network state prediction model consists of either two or three
subnetworks:
* tok2vec: Map each token into a vector representations. This subnetwork
is run once for each batch.
* lower: Construct a feature-specific vector for each (token, feature) pair.
This is also run once for each batch. Constructing the state
representation is then simply a matter of summing the component features
and applying the non-linearity.
* upper (optional): A feed-forward network that predicts scores from the
state representation. If not present, the output from the lower model is
used as action scores directly.
tok2vec (Model[List[Doc], List[Floats2d]]):
Subnetwork to map tokens into vector representations.
state_type (str):
String value denoting the type of parser model: "parser" or "ner"
extra_state_tokens (bool): Whether or not to use additional tokens in the context
to construct the state vector. Defaults to `False`, which means 3 and 8
for the NER and parser respectively. When set to `True`, this would become 6
feature sets (for the NER) or 13 (for the parser).
hidden_width (int): The width of the hidden layer.
maxout_pieces (int): How many pieces to use in the state prediction layer.
Recommended values are 1, 2 or 3. If 1, the maxout non-linearity
is replaced with a ReLu non-linearity if use_upper=True, and no
non-linearity if use_upper=False.
use_upper (bool): Whether to use an additional hidden layer after the state
vector in order to predict the action scores. It is recommended to set
this to False for large pretrained models such as transformers, and True
for smaller networks. The upper layer is computed on CPU, which becomes
a bottleneck on larger GPU-based models, where it's also less necessary.
nO (int or None): The number of actions the model will predict between.
Usually inferred from data at the beginning of training, or loaded from
disk.
"""
if state_type == "parser":
nr_feature_tokens = 13 if extra_state_tokens else 8
elif state_type == "ner":
nr_feature_tokens = 6 if extra_state_tokens else 3
else:
raise ValueError(Errors.E917.format(value=state_type))
t2v_width = tok2vec.get_dim("nO") if tok2vec.has_dim("nO") else None
tok2vec = chain(
tok2vec,
list2array(),
Linear(hidden_width, t2v_width),
)
tok2vec.set_dim("nO", hidden_width)
lower = _define_lower(
nO=hidden_width if use_upper else nO,
nF=nr_feature_tokens,
nI=tok2vec.get_dim("nO"),
nP=maxout_pieces,
)
upper = None
if use_upper:
with use_ops("cpu"):
# Initialize weights at zero, as it's a classification layer.
upper = _define_upper(nO=nO, nI=None)
return TransitionModel(tok2vec, lower, upper, resize_output)
def _define_upper(nO, nI):
return Linear(nO=nO, nI=nI, init_W=zero_init)
def _define_lower(nO, nF, nI, nP):
return PrecomputableAffine(nO=nO, nF=nF, nI=nI, nP=nP)
def resize_output(model, new_nO):
if model.attrs["has_upper"]:
return _resize_upper(model, new_nO)
return _resize_lower(model, new_nO)
def _resize_upper(model, new_nO):
upper = model.get_ref("upper")
if upper.has_dim("nO") is None:
upper.set_dim("nO", new_nO)
return model
elif new_nO == upper.get_dim("nO"):
return model
smaller = upper
nI = smaller.maybe_get_dim("nI")
with use_ops("cpu"):
larger = _define_upper(nO=new_nO, nI=nI)
# it could be that the model is not initialized yet, then skip this bit
if smaller.has_param("W"):
larger_W = larger.ops.alloc2f(new_nO, nI)
larger_b = larger.ops.alloc1f(new_nO)
smaller_W = smaller.get_param("W")
smaller_b = smaller.get_param("b")
# Weights are stored in (nr_out, nr_in) format, so we're basically
# just adding rows here.
if smaller.has_dim("nO"):
old_nO = smaller.get_dim("nO")
larger_W[:old_nO] = smaller_W
larger_b[:old_nO] = smaller_b
for i in range(old_nO, new_nO):
model.attrs["unseen_classes"].add(i)
larger.set_param("W", larger_W)
larger.set_param("b", larger_b)
model._layers[-1] = larger
model.set_ref("upper", larger)
return model
def _resize_lower(model, new_nO):
lower = model.get_ref("lower")
if lower.has_dim("nO") is None:
lower.set_dim("nO", new_nO)
return model
smaller = lower
nI = smaller.maybe_get_dim("nI")
nF = smaller.maybe_get_dim("nF")
nP = smaller.maybe_get_dim("nP")
larger = _define_lower(nO=new_nO, nI=nI, nF=nF, nP=nP)
# it could be that the model is not initialized yet, then skip this bit
if smaller.has_param("W"):
larger_W = larger.ops.alloc4f(nF, new_nO, nP, nI)
larger_b = larger.ops.alloc2f(new_nO, nP)
larger_pad = larger.ops.alloc4f(1, nF, new_nO, nP)
smaller_W = smaller.get_param("W")
smaller_b = smaller.get_param("b")
smaller_pad = smaller.get_param("pad")
# Copy the old weights and padding into the new layer
if smaller.has_dim("nO"):
old_nO = smaller.get_dim("nO")
larger_W[:, 0:old_nO, :, :] = smaller_W
larger_pad[:, :, 0:old_nO, :] = smaller_pad
larger_b[0:old_nO, :] = smaller_b
for i in range(old_nO, new_nO):
model.attrs["unseen_classes"].add(i)
larger.set_param("W", larger_W)
larger.set_param("b", larger_b)
larger.set_param("pad", larger_pad)
model._layers[1] = larger
model.set_ref("lower", larger)
return model
| 6,897 | 38.193182 | 85 | py |
spaCy | spaCy-master/spacy/ml/models/span_finder.py | from typing import Callable, List, Tuple
from thinc.api import Model, chain, with_array
from thinc.types import Floats1d, Floats2d
from ...tokens import Doc
from ...util import registry
InT = List[Doc]
OutT = Floats2d
@registry.architectures("spacy.SpanFinder.v1")
def build_finder_model(
tok2vec: Model[InT, List[Floats2d]], scorer: Model[OutT, OutT]
) -> Model[InT, OutT]:
logistic_layer: Model[List[Floats2d], List[Floats2d]] = with_array(scorer)
model: Model[InT, OutT] = chain(tok2vec, logistic_layer, flattener())
model.set_ref("tok2vec", tok2vec)
model.set_ref("scorer", scorer)
model.set_ref("logistic_layer", logistic_layer)
return model
def flattener() -> Model[List[Floats2d], Floats2d]:
"""Flattens the input to a 1-dimensional list of scores"""
def forward(
model: Model[Floats1d, Floats1d], X: List[Floats2d], is_train: bool
) -> Tuple[Floats2d, Callable[[Floats2d], List[Floats2d]]]:
lens = model.ops.asarray1i([len(doc) for doc in X])
Y = model.ops.flatten(X)
def backprop(dY: Floats2d) -> List[Floats2d]:
return model.ops.unflatten(dY, lens)
return Y, backprop
return Model("Flattener", forward=forward)
| 1,230 | 28.309524 | 78 | py |
spaCy | spaCy-master/spacy/ml/models/spancat.py | from typing import List, Tuple, cast
from thinc.api import (
Linear,
Logistic,
Maxout,
Model,
chain,
concatenate,
glorot_uniform_init,
list2ragged,
reduce_first,
reduce_last,
reduce_max,
reduce_mean,
with_getitem,
)
from thinc.types import Floats2d, Ragged
from ...tokens import Doc
from ...util import registry
from ..extract_spans import extract_spans
@registry.layers("spacy.LinearLogistic.v1")
def build_linear_logistic(nO=None, nI=None) -> Model[Floats2d, Floats2d]:
"""An output layer for multi-label classification. It uses a linear layer
followed by a logistic activation.
"""
return chain(Linear(nO=nO, nI=nI, init_W=glorot_uniform_init), Logistic())
@registry.layers("spacy.mean_max_reducer.v1")
def build_mean_max_reducer(hidden_size: int) -> Model[Ragged, Floats2d]:
"""Reduce sequences by concatenating their mean and max pooled vectors,
and then combine the concatenated vectors with a hidden layer.
"""
return chain(
concatenate(
cast(Model[Ragged, Floats2d], reduce_last()),
cast(Model[Ragged, Floats2d], reduce_first()),
reduce_mean(),
reduce_max(),
),
Maxout(nO=hidden_size, normalize=True, dropout=0.0),
)
@registry.architectures("spacy.SpanCategorizer.v1")
def build_spancat_model(
tok2vec: Model[List[Doc], List[Floats2d]],
reducer: Model[Ragged, Floats2d],
scorer: Model[Floats2d, Floats2d],
) -> Model[Tuple[List[Doc], Ragged], Floats2d]:
"""Build a span categorizer model, given a token-to-vector model, a
reducer model to map the sequence of vectors for each span down to a single
vector, and a scorer model to map the vectors to probabilities.
tok2vec (Model[List[Doc], List[Floats2d]]): The tok2vec model.
reducer (Model[Ragged, Floats2d]): The reducer model.
scorer (Model[Floats2d, Floats2d]): The scorer model.
"""
model = chain(
cast(
Model[Tuple[List[Doc], Ragged], Tuple[Ragged, Ragged]],
with_getitem(
0, chain(tok2vec, cast(Model[List[Floats2d], Ragged], list2ragged()))
),
),
extract_spans(),
reducer,
scorer,
)
model.set_ref("tok2vec", tok2vec)
model.set_ref("reducer", reducer)
model.set_ref("scorer", scorer)
return model
| 2,386 | 29.602564 | 85 | py |
spaCy | spaCy-master/spacy/ml/models/tagger.py | from typing import List, Optional
from thinc.api import Model, Softmax_v2, chain, with_array, zero_init
from thinc.types import Floats2d
from ...tokens import Doc
from ...util import registry
@registry.architectures("spacy.Tagger.v2")
def build_tagger_model(
tok2vec: Model[List[Doc], List[Floats2d]], nO: Optional[int] = None, normalize=False
) -> Model[List[Doc], List[Floats2d]]:
"""Build a tagger model, using a provided token-to-vector component. The tagger
model simply adds a linear layer with softmax activation to predict scores
given the token vectors.
tok2vec (Model[List[Doc], List[Floats2d]]): The token-to-vector subnetwork.
nO (int or None): The number of tags to output. Inferred from the data if None.
"""
# TODO: glorot_uniform_init seems to work a bit better than zero_init here?!
t2v_width = tok2vec.get_dim("nO") if tok2vec.has_dim("nO") else None
output_layer = Softmax_v2(
nO, t2v_width, init_W=zero_init, normalize_outputs=normalize
)
softmax = with_array(output_layer) # type: ignore
model = chain(tok2vec, softmax)
model.set_ref("tok2vec", tok2vec)
model.set_ref("softmax", output_layer)
model.set_ref("output_layer", output_layer)
return model
| 1,253 | 38.1875 | 88 | py |
spaCy | spaCy-master/spacy/ml/models/textcat.py | from functools import partial
from typing import List, Optional, cast
from thinc.api import (
Dropout,
LayerNorm,
Linear,
Logistic,
Maxout,
Model,
ParametricAttention,
Relu,
Softmax,
SparseLinear,
chain,
clone,
concatenate,
list2ragged,
reduce_mean,
reduce_sum,
residual,
resizable,
softmax_activation,
with_cpu,
)
from thinc.layers.chain import init as init_chain
from thinc.layers.resizable import resize_linear_weighted, resize_model
from thinc.types import Floats2d
from ...attrs import ORTH
from ...tokens import Doc
from ...util import registry
from ..extract_ngrams import extract_ngrams
from ..staticvectors import StaticVectors
from .tok2vec import get_tok2vec_width
NEG_VALUE = -5000
@registry.architectures("spacy.TextCatCNN.v2")
def build_simple_cnn_text_classifier(
tok2vec: Model, exclusive_classes: bool, nO: Optional[int] = None
) -> Model[List[Doc], Floats2d]:
"""
Build a simple CNN text classifier, given a token-to-vector model as inputs.
If exclusive_classes=True, a softmax non-linearity is applied, so that the
outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
is applied instead, so that outputs are in the range [0, 1].
"""
fill_defaults = {"b": 0, "W": 0}
with Model.define_operators({">>": chain}):
cnn = tok2vec >> list2ragged() >> reduce_mean()
nI = tok2vec.maybe_get_dim("nO")
if exclusive_classes:
output_layer = Softmax(nO=nO, nI=nI)
fill_defaults["b"] = NEG_VALUE
resizable_layer: Model = resizable(
output_layer,
resize_layer=partial(
resize_linear_weighted, fill_defaults=fill_defaults
),
)
model = cnn >> resizable_layer
else:
output_layer = Linear(nO=nO, nI=nI)
resizable_layer = resizable(
output_layer,
resize_layer=partial(
resize_linear_weighted, fill_defaults=fill_defaults
),
)
model = cnn >> resizable_layer >> Logistic()
model.set_ref("output_layer", output_layer)
model.attrs["resize_output"] = partial(
resize_and_set_ref,
resizable_layer=resizable_layer,
)
model.set_ref("tok2vec", tok2vec)
if nO is not None:
model.set_dim("nO", cast(int, nO))
model.attrs["multi_label"] = not exclusive_classes
return model
def resize_and_set_ref(model, new_nO, resizable_layer):
resizable_layer = resize_model(resizable_layer, new_nO)
model.set_ref("output_layer", resizable_layer.layers[0])
model.set_dim("nO", new_nO, force=True)
return model
@registry.architectures("spacy.TextCatBOW.v2")
def build_bow_text_classifier(
exclusive_classes: bool,
ngram_size: int,
no_output_layer: bool,
nO: Optional[int] = None,
) -> Model[List[Doc], Floats2d]:
fill_defaults = {"b": 0, "W": 0}
with Model.define_operators({">>": chain}):
sparse_linear = SparseLinear(nO=nO)
output_layer = None
if not no_output_layer:
fill_defaults["b"] = NEG_VALUE
output_layer = softmax_activation() if exclusive_classes else Logistic()
resizable_layer: Model[Floats2d, Floats2d] = resizable(
sparse_linear,
resize_layer=partial(resize_linear_weighted, fill_defaults=fill_defaults),
)
model = extract_ngrams(ngram_size, attr=ORTH) >> resizable_layer
model = with_cpu(model, model.ops)
if output_layer:
model = model >> with_cpu(output_layer, output_layer.ops)
if nO is not None:
model.set_dim("nO", cast(int, nO))
model.set_ref("output_layer", sparse_linear)
model.attrs["multi_label"] = not exclusive_classes
model.attrs["resize_output"] = partial(
resize_and_set_ref, resizable_layer=resizable_layer
)
return model
@registry.architectures("spacy.TextCatEnsemble.v2")
def build_text_classifier_v2(
tok2vec: Model[List[Doc], List[Floats2d]],
linear_model: Model[List[Doc], Floats2d],
nO: Optional[int] = None,
) -> Model[List[Doc], Floats2d]:
exclusive_classes = not linear_model.attrs["multi_label"]
with Model.define_operators({">>": chain, "|": concatenate}):
width = tok2vec.maybe_get_dim("nO")
attention_layer = ParametricAttention(width)
maxout_layer = Maxout(nO=width, nI=width)
norm_layer = LayerNorm(nI=width)
cnn_model = (
tok2vec
>> list2ragged()
>> attention_layer
>> reduce_sum()
>> residual(maxout_layer >> norm_layer >> Dropout(0.0))
)
nO_double = nO * 2 if nO else None
if exclusive_classes:
output_layer = Softmax(nO=nO, nI=nO_double)
else:
output_layer = Linear(nO=nO, nI=nO_double) >> Logistic()
model = (linear_model | cnn_model) >> output_layer
model.set_ref("tok2vec", tok2vec)
if model.has_dim("nO") is not False and nO is not None:
model.set_dim("nO", cast(int, nO))
model.set_ref("output_layer", linear_model.get_ref("output_layer"))
model.set_ref("attention_layer", attention_layer)
model.set_ref("maxout_layer", maxout_layer)
model.set_ref("norm_layer", norm_layer)
model.attrs["multi_label"] = not exclusive_classes
model.init = init_ensemble_textcat # type: ignore[assignment]
return model
def init_ensemble_textcat(model, X, Y) -> Model:
tok2vec_width = get_tok2vec_width(model)
model.get_ref("attention_layer").set_dim("nO", tok2vec_width)
model.get_ref("maxout_layer").set_dim("nO", tok2vec_width)
model.get_ref("maxout_layer").set_dim("nI", tok2vec_width)
model.get_ref("norm_layer").set_dim("nI", tok2vec_width)
model.get_ref("norm_layer").set_dim("nO", tok2vec_width)
init_chain(model, X, Y)
return model
@registry.architectures("spacy.TextCatLowData.v1")
def build_text_classifier_lowdata(
width: int, dropout: Optional[float], nO: Optional[int] = None
) -> Model[List[Doc], Floats2d]:
# Don't document this yet, I'm not sure it's right.
# Note, before v.3, this was the default if setting "low_data" and "pretrained_dims"
with Model.define_operators({">>": chain, "**": clone}):
model = (
StaticVectors(width)
>> list2ragged()
>> ParametricAttention(width)
>> reduce_sum()
>> residual(Relu(width, width)) ** 2
>> Linear(nO, width)
)
if dropout:
model = model >> Dropout(dropout)
model = model >> Logistic()
return model
| 6,769 | 34.07772 | 88 | py |
spaCy | spaCy-master/spacy/ml/models/tok2vec.py | from typing import List, Optional, Union, cast
from thinc.api import (
HashEmbed,
Maxout,
Mish,
Model,
PyTorchLSTM,
chain,
clone,
concatenate,
expand_window,
list2ragged,
noop,
ragged2list,
residual,
with_array,
with_padded,
)
from thinc.types import Floats2d, Ints1d, Ints2d, Ragged
from ...attrs import intify_attr
from ...errors import Errors
from ...ml import _character_embed
from ...pipeline.tok2vec import Tok2VecListener
from ...tokens import Doc
from ...util import registry
from ..featureextractor import FeatureExtractor
from ..staticvectors import StaticVectors
@registry.architectures("spacy.Tok2VecListener.v1")
def tok2vec_listener_v1(width: int, upstream: str = "*"):
tok2vec = Tok2VecListener(upstream_name=upstream, width=width)
return tok2vec
def get_tok2vec_width(model: Model):
nO = None
if model.has_ref("tok2vec"):
tok2vec = model.get_ref("tok2vec")
if tok2vec.has_dim("nO"):
nO = tok2vec.get_dim("nO")
elif tok2vec.has_ref("listener"):
nO = tok2vec.get_ref("listener").get_dim("nO")
return nO
@registry.architectures("spacy.HashEmbedCNN.v2")
def build_hash_embed_cnn_tok2vec(
*,
width: int,
depth: int,
embed_size: int,
window_size: int,
maxout_pieces: int,
subword_features: bool,
pretrained_vectors: Optional[bool],
) -> Model[List[Doc], List[Floats2d]]:
"""Build spaCy's 'standard' tok2vec layer, which uses hash embedding
with subword features and a CNN with layer-normalized maxout.
width (int): The width of the input and output. These are required to be the
same, so that residual connections can be used. Recommended values are
96, 128 or 300.
depth (int): The number of convolutional layers to use. Recommended values
are between 2 and 8.
window_size (int): The number of tokens on either side to concatenate during
the convolutions. The receptive field of the CNN will be
depth * (window_size * 2 + 1), so a 4-layer network with window_size of
2 will be sensitive to 20 words at a time. Recommended value is 1.
embed_size (int): The number of rows in the hash embedding tables. This can
be surprisingly small, due to the use of the hash embeddings. Recommended
values are between 2000 and 10000.
maxout_pieces (int): The number of pieces to use in the maxout non-linearity.
If 1, the Mish non-linearity is used instead. Recommended values are 1-3.
subword_features (bool): Whether to also embed subword features, specifically
the prefix, suffix and word shape. This is recommended for alphabetic
languages like English, but not if single-character tokens are used for
a language such as Chinese.
pretrained_vectors (bool): Whether to also use static vectors.
"""
if subword_features:
attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"]
row_sizes = [embed_size, embed_size // 2, embed_size // 2, embed_size // 2]
else:
attrs = ["NORM"]
row_sizes = [embed_size]
return build_Tok2Vec_model(
embed=MultiHashEmbed(
width=width,
rows=row_sizes,
attrs=attrs,
include_static_vectors=bool(pretrained_vectors),
),
encode=MaxoutWindowEncoder(
width=width,
depth=depth,
window_size=window_size,
maxout_pieces=maxout_pieces,
),
)
@registry.architectures("spacy.Tok2Vec.v2")
def build_Tok2Vec_model(
embed: Model[List[Doc], List[Floats2d]],
encode: Model[List[Floats2d], List[Floats2d]],
) -> Model[List[Doc], List[Floats2d]]:
"""Construct a tok2vec model out of embedding and encoding subnetworks.
See https://explosion.ai/blog/deep-learning-formula-nlp
embed (Model[List[Doc], List[Floats2d]]): Embed tokens into context-independent
word vector representations.
encode (Model[List[Floats2d], List[Floats2d]]): Encode context into the
embeddings, using an architecture such as a CNN, BiLSTM or transformer.
"""
tok2vec = chain(embed, encode)
if encode.has_dim("nO"):
tok2vec.set_dim("nO", encode.get_dim("nO"))
tok2vec.set_ref("embed", embed)
tok2vec.set_ref("encode", encode)
return tok2vec
@registry.architectures("spacy.MultiHashEmbed.v2")
def MultiHashEmbed(
width: int,
attrs: List[Union[str, int]],
rows: List[int],
include_static_vectors: bool,
) -> Model[List[Doc], List[Floats2d]]:
"""Construct an embedding layer that separately embeds a number of lexical
attributes using hash embedding, concatenates the results, and passes it
through a feed-forward subnetwork to build a mixed representation.
The features used can be configured with the 'attrs' argument. The suggested
attributes are NORM, PREFIX, SUFFIX and SHAPE. This lets the model take into
account some subword information, without constructing a fully character-based
representation. If pretrained vectors are available, they can be included in
the representation as well, with the vectors table kept static
(i.e. it's not updated).
The `width` parameter specifies the output width of the layer and the widths
of all embedding tables. If static vectors are included, a learned linear
layer is used to map the vectors to the specified width before concatenating
it with the other embedding outputs. A single Maxout layer is then used to
reduce the concatenated vectors to the final width.
The `rows` parameter controls the number of rows used by the `HashEmbed`
tables. The HashEmbed layer needs surprisingly few rows, due to its use of
the hashing trick. Generally between 2000 and 10000 rows is sufficient,
even for very large vocabularies. A number of rows must be specified for each
table, so the `rows` list must be of the same length as the `attrs` parameter.
width (int): The output width. Also used as the width of the embedding tables.
Recommended values are between 64 and 300.
attrs (list of attr IDs): The token attributes to embed. A separate
embedding table will be constructed for each attribute.
rows (List[int]): The number of rows in the embedding tables. Must have the
same length as attrs.
include_static_vectors (bool): Whether to also use static word vectors.
Requires a vectors table to be loaded in the Doc objects' vocab.
"""
if len(rows) != len(attrs):
raise ValueError(f"Mismatched lengths: {len(rows)} vs {len(attrs)}")
seed = 7
def make_hash_embed(index):
nonlocal seed
seed += 1
return HashEmbed(width, rows[index], column=index, seed=seed, dropout=0.0)
embeddings = [make_hash_embed(i) for i in range(len(attrs))]
concat_size = width * (len(embeddings) + include_static_vectors)
max_out: Model[Ragged, Ragged] = with_array(
Maxout(width, concat_size, nP=3, dropout=0.0, normalize=True)
)
if include_static_vectors:
feature_extractor: Model[List[Doc], Ragged] = chain(
FeatureExtractor(attrs),
cast(Model[List[Ints2d], Ragged], list2ragged()),
with_array(concatenate(*embeddings)),
)
model = chain(
concatenate(
feature_extractor,
StaticVectors(width, dropout=0.0),
),
max_out,
ragged2list(),
)
else:
model = chain(
FeatureExtractor(list(attrs)),
cast(Model[List[Ints2d], Ragged], list2ragged()),
with_array(concatenate(*embeddings)),
max_out,
ragged2list(),
)
return model
@registry.architectures("spacy.CharacterEmbed.v2")
def CharacterEmbed(
width: int,
rows: int,
nM: int,
nC: int,
include_static_vectors: bool,
feature: Union[int, str] = "LOWER",
) -> Model[List[Doc], List[Floats2d]]:
"""Construct an embedded representation based on character embeddings, using
a feed-forward network. A fixed number of UTF-8 byte characters are used for
each word, taken from the beginning and end of the word equally. Padding is
used in the centre for words that are too short.
For instance, let's say nC=4, and the word is "jumping". The characters
used will be jung (two from the start, two from the end). If we had nC=8,
the characters would be "jumpping": 4 from the start, 4 from the end. This
ensures that the final character is always in the last position, instead
of being in an arbitrary position depending on the word length.
The characters are embedded in a embedding table with a given number of rows,
and the vectors concatenated. A hash-embedded vector of the LOWER of the word is
also concatenated on, and the result is then passed through a feed-forward
network to construct a single vector to represent the information.
feature (int or str): An attribute to embed, to concatenate with the characters.
width (int): The width of the output vector and the feature embedding.
rows (int): The number of rows in the LOWER hash embedding table.
nM (int): The dimensionality of the character embeddings. Recommended values
are between 16 and 64.
nC (int): The number of UTF-8 bytes to embed per word. Recommended values
are between 3 and 8, although it may depend on the length of words in the
language.
include_static_vectors (bool): Whether to also use static word vectors.
Requires a vectors table to be loaded in the Doc objects' vocab.
"""
feature = intify_attr(feature)
if feature is None:
raise ValueError(Errors.E911.format(feat=feature))
char_embed = chain(
_character_embed.CharacterEmbed(nM=nM, nC=nC),
cast(Model[List[Floats2d], Ragged], list2ragged()),
)
feature_extractor: Model[List[Doc], Ragged] = chain(
FeatureExtractor([feature]),
cast(Model[List[Ints2d], Ragged], list2ragged()),
with_array(HashEmbed(nO=width, nV=rows, column=0, seed=5)), # type: ignore[misc]
)
max_out: Model[Ragged, Ragged]
if include_static_vectors:
max_out = with_array(
Maxout(width, nM * nC + (2 * width), nP=3, normalize=True, dropout=0.0)
)
model = chain(
concatenate(
char_embed,
feature_extractor,
StaticVectors(width, dropout=0.0),
),
max_out,
ragged2list(),
)
else:
max_out = with_array(
Maxout(width, nM * nC + width, nP=3, normalize=True, dropout=0.0)
)
model = chain(
concatenate(
char_embed,
feature_extractor,
),
max_out,
ragged2list(),
)
return model
@registry.architectures("spacy.MaxoutWindowEncoder.v2")
def MaxoutWindowEncoder(
width: int, window_size: int, maxout_pieces: int, depth: int
) -> Model[List[Floats2d], List[Floats2d]]:
"""Encode context using convolutions with maxout activation, layer
normalization and residual connections.
width (int): The input and output width. These are required to be the same,
to allow residual connections. This value will be determined by the
width of the inputs. Recommended values are between 64 and 300.
window_size (int): The number of words to concatenate around each token
to construct the convolution. Recommended value is 1.
maxout_pieces (int): The number of maxout pieces to use. Recommended
values are 2 or 3.
depth (int): The number of convolutional layers. Recommended value is 4.
"""
cnn = chain(
expand_window(window_size=window_size),
Maxout(
nO=width,
nI=width * ((window_size * 2) + 1),
nP=maxout_pieces,
dropout=0.0,
normalize=True,
),
)
model = clone(residual(cnn), depth)
model.set_dim("nO", width)
receptive_field = window_size * depth
return with_array(model, pad=receptive_field)
@registry.architectures("spacy.MishWindowEncoder.v2")
def MishWindowEncoder(
width: int, window_size: int, depth: int
) -> Model[List[Floats2d], List[Floats2d]]:
"""Encode context using convolutions with mish activation, layer
normalization and residual connections.
width (int): The input and output width. These are required to be the same,
to allow residual connections. This value will be determined by the
width of the inputs. Recommended values are between 64 and 300.
window_size (int): The number of words to concatenate around each token
to construct the convolution. Recommended value is 1.
depth (int): The number of convolutional layers. Recommended value is 4.
"""
cnn = chain(
expand_window(window_size=window_size),
Mish(nO=width, nI=width * ((window_size * 2) + 1), dropout=0.0, normalize=True),
)
model = clone(residual(cnn), depth)
model.set_dim("nO", width)
return with_array(model)
@registry.architectures("spacy.TorchBiLSTMEncoder.v1")
def BiLSTMEncoder(
width: int, depth: int, dropout: float
) -> Model[List[Floats2d], List[Floats2d]]:
"""Encode context using bidirectonal LSTM layers. Requires PyTorch.
width (int): The input and output width. These are required to be the same,
to allow residual connections. This value will be determined by the
width of the inputs. Recommended values are between 64 and 300.
depth (int): The number of recurrent layers.
dropout (float): Creates a Dropout layer on the outputs of each LSTM layer
except the last layer. Set to 0 to disable this functionality.
"""
if depth == 0:
return noop()
return with_padded(PyTorchLSTM(width, width, bi=True, depth=depth, dropout=dropout))
| 14,046 | 38.90625 | 89 | py |
spaCy | spaCy-master/spacy/pipeline/__init__.py | from .attributeruler import AttributeRuler
from .dep_parser import DependencyParser
from .edit_tree_lemmatizer import EditTreeLemmatizer
from .entity_linker import EntityLinker
from .entityruler import EntityRuler
from .functions import merge_entities, merge_noun_chunks, merge_subtokens
from .lemmatizer import Lemmatizer
from .morphologizer import Morphologizer
from .ner import EntityRecognizer
from .pipe import Pipe
from .sentencizer import Sentencizer
from .senter import SentenceRecognizer
from .span_finder import SpanFinder
from .span_ruler import SpanRuler
from .spancat import SpanCategorizer
from .tagger import Tagger
from .textcat import TextCategorizer
from .textcat_multilabel import MultiLabel_TextCategorizer
from .tok2vec import Tok2Vec
from .trainable_pipe import TrainablePipe
__all__ = [
"AttributeRuler",
"DependencyParser",
"EntityLinker",
"EntityRecognizer",
"EntityRuler",
"Morphologizer",
"Lemmatizer",
"MultiLabel_TextCategorizer",
"Pipe",
"SentenceRecognizer",
"Sentencizer",
"SpanCategorizer",
"SpanFinder",
"SpanRuler",
"Tagger",
"TextCategorizer",
"Tok2Vec",
"TrainablePipe",
"merge_entities",
"merge_noun_chunks",
"merge_subtokens",
]
| 1,253 | 26.866667 | 73 | py |
spaCy | spaCy-master/spacy/pipeline/attributeruler.py | from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import srsly
from .. import util
from ..errors import Errors
from ..language import Language
from ..matcher import Matcher
from ..scorer import Scorer
from ..symbols import IDS
from ..tokens import Doc, Span
from ..tokens._retokenize import normalize_token_attrs, set_token_attrs
from ..training import Example
from ..util import SimpleFrozenList, registry
from ..vocab import Vocab
from .pipe import Pipe
MatcherPatternType = List[Dict[Union[int, str], Any]]
AttributeRulerPatternType = Dict[str, Union[MatcherPatternType, Dict, int]]
TagMapType = Dict[str, Dict[Union[int, str], Union[int, str]]]
MorphRulesType = Dict[str, Dict[str, Dict[Union[int, str], Union[int, str]]]]
@Language.factory(
"attribute_ruler",
default_config={
"validate": False,
"scorer": {"@scorers": "spacy.attribute_ruler_scorer.v1"},
},
)
def make_attribute_ruler(
nlp: Language, name: str, validate: bool, scorer: Optional[Callable]
):
return AttributeRuler(nlp.vocab, name, validate=validate, scorer=scorer)
def attribute_ruler_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]:
def morph_key_getter(token, attr):
return getattr(token, attr).key
results = {}
results.update(Scorer.score_token_attr(examples, "tag", **kwargs))
results.update(Scorer.score_token_attr(examples, "pos", **kwargs))
results.update(
Scorer.score_token_attr(examples, "morph", getter=morph_key_getter, **kwargs)
)
results.update(
Scorer.score_token_attr_per_feat(
examples, "morph", getter=morph_key_getter, **kwargs
)
)
results.update(Scorer.score_token_attr(examples, "lemma", **kwargs))
return results
@registry.scorers("spacy.attribute_ruler_scorer.v1")
def make_attribute_ruler_scorer():
return attribute_ruler_score
class AttributeRuler(Pipe):
"""Set token-level attributes for tokens matched by Matcher patterns.
Additionally supports importing patterns from tag maps and morph rules.
DOCS: https://spacy.io/api/attributeruler
"""
def __init__(
self,
vocab: Vocab,
name: str = "attribute_ruler",
*,
validate: bool = False,
scorer: Optional[Callable] = attribute_ruler_score,
) -> None:
"""Create the AttributeRuler. After creation, you can add patterns
with the `.initialize()` or `.add_patterns()` methods, or load patterns
with `.from_bytes()` or `.from_disk()`. Loading patterns will remove
any patterns you've added previously.
vocab (Vocab): The vocab.
name (str): The pipe name. Defaults to "attribute_ruler".
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_token_attr for the attributes "tag", "pos", "morph" and
"lemma" and Scorer.score_token_attr_per_feat for the attribute
"morph".
RETURNS (AttributeRuler): The AttributeRuler component.
DOCS: https://spacy.io/api/attributeruler#init
"""
self.name = name
self.vocab = vocab
self.matcher = Matcher(self.vocab, validate=validate)
self.validate = validate
self.attrs: List[Dict] = []
self._attrs_unnormed: List[Dict] = [] # store for reference
self.indices: List[int] = []
self.scorer = scorer
def clear(self) -> None:
"""Reset all patterns."""
self.matcher = Matcher(self.vocab, validate=self.validate)
self.attrs = []
self._attrs_unnormed = []
self.indices = []
def initialize(
self,
get_examples: Optional[Callable[[], Iterable[Example]]],
*,
nlp: Optional[Language] = None,
patterns: Optional[Iterable[AttributeRulerPatternType]] = None,
tag_map: Optional[TagMapType] = None,
morph_rules: Optional[MorphRulesType] = None,
) -> None:
"""Initialize the attribute ruler by adding zero or more patterns.
Rules can be specified as a sequence of dicts using the `patterns`
keyword argument. You can also provide rules using the "tag map" or
"morph rules" formats supported by spaCy prior to v3.
"""
self.clear()
if patterns:
self.add_patterns(patterns)
if tag_map:
self.load_from_tag_map(tag_map)
if morph_rules:
self.load_from_morph_rules(morph_rules)
def __call__(self, doc: Doc) -> Doc:
"""Apply the AttributeRuler to a Doc and set all attribute exceptions.
doc (Doc): The document to process.
RETURNS (Doc): The processed Doc.
DOCS: https://spacy.io/api/attributeruler#call
"""
error_handler = self.get_error_handler()
try:
matches = self.match(doc)
self.set_annotations(doc, matches)
return doc
except Exception as e:
return error_handler(self.name, self, [doc], e)
def match(self, doc: Doc):
matches = self.matcher(doc, allow_missing=True, as_spans=False)
# Sort by the attribute ID, so that later rules have precedence
matches = [
(int(self.vocab.strings[m_id]), m_id, s, e) for m_id, s, e in matches # type: ignore
]
matches.sort()
return matches
def set_annotations(self, doc, matches):
"""Modify the document in place"""
for attr_id, match_id, start, end in matches:
span = Span(doc, start, end, label=match_id)
attrs = self.attrs[attr_id]
index = self.indices[attr_id]
try:
# The index can be negative, which makes it annoying to do
# the boundscheck. Let Span do it instead.
token = span[index] # noqa: F841
except IndexError:
# The original exception is just our conditional logic, so we
# raise from.
raise ValueError(
Errors.E1001.format(
patterns=self.matcher.get(span.label),
span=[t.text for t in span],
index=index,
)
) from None
set_token_attrs(span[index], attrs)
def load_from_tag_map(
self, tag_map: Dict[str, Dict[Union[int, str], Union[int, str]]]
) -> None:
"""Load attribute ruler patterns from a tag map.
tag_map (dict): The tag map that maps fine-grained tags to
coarse-grained tags and morphological features.
DOCS: https://spacy.io/api/attributeruler#load_from_morph_rules
"""
for tag, attrs in tag_map.items():
pattern = [{"TAG": tag}]
attrs, morph_attrs = _split_morph_attrs(attrs)
if "MORPH" not in attrs:
morph = self.vocab.morphology.add(morph_attrs)
attrs["MORPH"] = self.vocab.strings[morph]
else:
morph = self.vocab.morphology.add(attrs["MORPH"])
attrs["MORPH"] = self.vocab.strings[morph]
self.add([pattern], attrs) # type: ignore[list-item]
def load_from_morph_rules(
self, morph_rules: Dict[str, Dict[str, Dict[Union[int, str], Union[int, str]]]]
) -> None:
"""Load attribute ruler patterns from morph rules.
morph_rules (dict): The morph rules that map token text and
fine-grained tags to coarse-grained tags, lemmas and morphological
features.
DOCS: https://spacy.io/api/attributeruler#load_from_morph_rules
"""
for tag in morph_rules:
for word in morph_rules[tag]:
pattern = [{"ORTH": word, "TAG": tag}]
attrs = morph_rules[tag][word]
attrs, morph_attrs = _split_morph_attrs(attrs)
if "MORPH" in attrs:
morph = self.vocab.morphology.add(attrs["MORPH"])
attrs["MORPH"] = self.vocab.strings[morph]
elif morph_attrs:
morph = self.vocab.morphology.add(morph_attrs)
attrs["MORPH"] = self.vocab.strings[morph]
self.add([pattern], attrs) # type: ignore[list-item]
def add(
self, patterns: Iterable[MatcherPatternType], attrs: Dict, index: int = 0
) -> None:
"""Add Matcher patterns for tokens that should be modified with the
provided attributes. The token at the specified index within the
matched span will be assigned the attributes.
patterns (Iterable[List[Dict]]): A list of Matcher patterns.
attrs (Dict): The attributes to assign to the target token in the
matched span.
index (int): The index of the token in the matched span to modify. May
be negative to index from the end of the span. Defaults to 0.
DOCS: https://spacy.io/api/attributeruler#add
"""
# We need to make a string here, because otherwise the ID we pass back
# will be interpreted as the hash of a string, rather than an ordinal.
key = str(len(self.attrs))
self.matcher.add(self.vocab.strings.add(key), patterns) # type: ignore[arg-type]
self._attrs_unnormed.append(attrs)
attrs = normalize_token_attrs(self.vocab, attrs)
self.attrs.append(attrs)
self.indices.append(index)
def add_patterns(self, patterns: Iterable[AttributeRulerPatternType]) -> None:
"""Add patterns from a list of pattern dicts with the keys as the
arguments to AttributeRuler.add.
patterns (Iterable[dict]): A list of pattern dicts with the keys
as the arguments to AttributeRuler.add (patterns/attrs/index) to
add as patterns.
DOCS: https://spacy.io/api/attributeruler#add_patterns
"""
for p in patterns:
self.add(**p) # type: ignore[arg-type]
@property
def patterns(self) -> List[AttributeRulerPatternType]:
"""All the added patterns."""
all_patterns = []
for i in range(len(self.attrs)):
p = {}
p["patterns"] = self.matcher.get(str(i))[1]
p["attrs"] = self._attrs_unnormed[i] # type: ignore
p["index"] = self.indices[i] # type: ignore
all_patterns.append(p)
return all_patterns # type: ignore[return-value]
def to_bytes(self, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
"""Serialize the AttributeRuler to a bytestring.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (bytes): The serialized object.
DOCS: https://spacy.io/api/attributeruler#to_bytes
"""
serialize = {}
serialize["vocab"] = lambda: self.vocab.to_bytes(exclude=exclude)
serialize["patterns"] = lambda: srsly.msgpack_dumps(self.patterns)
return util.to_bytes(serialize, exclude)
def from_bytes(
self, bytes_data: bytes, exclude: Iterable[str] = SimpleFrozenList()
) -> "AttributeRuler":
"""Load the AttributeRuler from a bytestring.
bytes_data (bytes): The data to load.
exclude (Iterable[str]): String names of serialization fields to exclude.
returns (AttributeRuler): The loaded object.
DOCS: https://spacy.io/api/attributeruler#from_bytes
"""
def load_patterns(b):
self.add_patterns(srsly.msgpack_loads(b))
deserialize = {
"vocab": lambda b: self.vocab.from_bytes(b, exclude=exclude),
"patterns": load_patterns,
}
util.from_bytes(bytes_data, deserialize, exclude)
return self
def to_disk(
self, path: Union[Path, str], exclude: Iterable[str] = SimpleFrozenList()
) -> None:
"""Serialize the AttributeRuler to disk.
path (Union[Path, str]): A path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
DOCS: https://spacy.io/api/attributeruler#to_disk
"""
serialize = {
"vocab": lambda p: self.vocab.to_disk(p, exclude=exclude),
"patterns": lambda p: srsly.write_msgpack(p, self.patterns),
}
util.to_disk(path, serialize, exclude)
def from_disk(
self, path: Union[Path, str], exclude: Iterable[str] = SimpleFrozenList()
) -> "AttributeRuler":
"""Load the AttributeRuler from disk.
path (Union[Path, str]): A path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (AttributeRuler): The loaded object.
DOCS: https://spacy.io/api/attributeruler#from_disk
"""
def load_patterns(p):
self.add_patterns(srsly.read_msgpack(p))
deserialize = {
"vocab": lambda p: self.vocab.from_disk(p, exclude=exclude),
"patterns": load_patterns,
}
util.from_disk(path, deserialize, exclude)
return self
def _split_morph_attrs(attrs: dict) -> Tuple[dict, dict]:
"""Split entries from a tag map or morph rules dict into to two dicts, one
with the token-level features (POS, LEMMA) and one with the remaining
features, which are presumed to be individual MORPH features."""
other_attrs = {}
morph_attrs = {}
for k, v in attrs.items():
if k in "_" or k in IDS.keys() or k in IDS.values():
other_attrs[k] = v
else:
morph_attrs[k] = v
return other_attrs, morph_attrs
| 13,675 | 37.201117 | 97 | py |
spaCy | spaCy-master/spacy/pipeline/edit_tree_lemmatizer.py | from collections import Counter
from itertools import islice
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, cast
import numpy as np
import srsly
from thinc.api import Config, Model, NumpyOps, SequenceCategoricalCrossentropy
from thinc.types import Floats2d, Ints2d
from .. import util
from ..errors import Errors
from ..language import Language
from ..tokens import Doc
from ..training import Example, validate_examples, validate_get_examples
from ..vocab import Vocab
from ._edit_tree_internals.edit_trees import EditTrees
from ._edit_tree_internals.schemas import validate_edit_tree
from .lemmatizer import lemmatizer_score
from .trainable_pipe import TrainablePipe
# The cutoff value of *top_k* above which an alternative method is used to process guesses.
TOP_K_GUARDRAIL = 20
default_model_config = """
[model]
@architectures = "spacy.Tagger.v2"
[model.tok2vec]
@architectures = "spacy.HashEmbedCNN.v2"
pretrained_vectors = null
width = 96
depth = 4
embed_size = 2000
window_size = 1
maxout_pieces = 3
subword_features = true
"""
DEFAULT_EDIT_TREE_LEMMATIZER_MODEL = Config().from_str(default_model_config)["model"]
@Language.factory(
"trainable_lemmatizer",
assigns=["token.lemma"],
requires=[],
default_config={
"model": DEFAULT_EDIT_TREE_LEMMATIZER_MODEL,
"backoff": "orth",
"min_tree_freq": 3,
"overwrite": False,
"top_k": 1,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
default_score_weights={"lemma_acc": 1.0},
)
def make_edit_tree_lemmatizer(
nlp: Language,
name: str,
model: Model,
backoff: Optional[str],
min_tree_freq: int,
overwrite: bool,
top_k: int,
scorer: Optional[Callable],
):
"""Construct an EditTreeLemmatizer component."""
return EditTreeLemmatizer(
nlp.vocab,
model,
name,
backoff=backoff,
min_tree_freq=min_tree_freq,
overwrite=overwrite,
top_k=top_k,
scorer=scorer,
)
class EditTreeLemmatizer(TrainablePipe):
"""
Lemmatizer that lemmatizes each word using a predicted edit tree.
"""
def __init__(
self,
vocab: Vocab,
model: Model,
name: str = "trainable_lemmatizer",
*,
backoff: Optional[str] = "orth",
min_tree_freq: int = 3,
overwrite: bool = False,
top_k: int = 1,
scorer: Optional[Callable] = lemmatizer_score,
):
"""
Construct an edit tree lemmatizer.
backoff (Optional[str]): backoff to use when the predicted edit trees
are not applicable. Must be an attribute of Token or None (leave the
lemma unset).
min_tree_freq (int): prune trees that are applied less than this
frequency in the training data.
overwrite (bool): overwrite existing lemma annotations.
top_k (int): try to apply at most the k most probable edit trees.
"""
self.vocab = vocab
self.model = model
self.name = name
self.backoff = backoff
self.min_tree_freq = min_tree_freq
self.overwrite = overwrite
self.top_k = top_k
self.trees = EditTrees(self.vocab.strings)
self.tree2label: Dict[int, int] = {}
self.cfg: Dict[str, Any] = {"labels": []}
self.scorer = scorer
self.numpy_ops = NumpyOps()
def get_loss(
self, examples: Iterable[Example], scores: List[Floats2d]
) -> Tuple[float, List[Floats2d]]:
validate_examples(examples, "EditTreeLemmatizer.get_loss")
loss_func = SequenceCategoricalCrossentropy(normalize=False, missing_value=-1)
truths = []
for eg in examples:
eg_truths = []
for (predicted, gold_lemma) in zip(
eg.predicted, eg.get_aligned("LEMMA", as_string=True)
):
if gold_lemma is None or gold_lemma == "":
label = -1
else:
tree_id = self.trees.add(predicted.text, gold_lemma)
label = self.tree2label.get(tree_id, 0)
eg_truths.append(label)
truths.append(eg_truths)
d_scores, loss = loss_func(scores, truths)
if self.model.ops.xp.isnan(loss):
raise ValueError(Errors.E910.format(name=self.name))
return float(loss), d_scores
def predict(self, docs: Iterable[Doc]) -> List[Ints2d]:
if self.top_k == 1:
scores2guesses = self._scores2guesses_top_k_equals_1
elif self.top_k <= TOP_K_GUARDRAIL:
scores2guesses = self._scores2guesses_top_k_greater_1
else:
scores2guesses = self._scores2guesses_top_k_guardrail
# The behaviour of *_scores2guesses_top_k_greater_1()* is efficient for values
# of *top_k>1* that are likely to be useful when the edit tree lemmatizer is used
# for its principal purpose of lemmatizing tokens. However, the code could also
# be used for other purposes, and with very large values of *top_k* the method
# becomes inefficient. In such cases, *_scores2guesses_top_k_guardrail()* is used
# instead.
n_docs = len(list(docs))
if not any(len(doc) for doc in docs):
# Handle cases where there are no tokens in any docs.
n_labels = len(self.cfg["labels"])
guesses: List[Ints2d] = [self.model.ops.alloc2i(0, n_labels) for _ in docs]
assert len(guesses) == n_docs
return guesses
scores = self.model.predict(docs)
assert len(scores) == n_docs
guesses = scores2guesses(docs, scores)
assert len(guesses) == n_docs
return guesses
def _scores2guesses_top_k_equals_1(self, docs, scores):
guesses = []
for doc, doc_scores in zip(docs, scores):
doc_guesses = doc_scores.argmax(axis=1)
doc_guesses = self.numpy_ops.asarray(doc_guesses)
doc_compat_guesses = []
for i, token in enumerate(doc):
tree_id = self.cfg["labels"][doc_guesses[i]]
if self.trees.apply(tree_id, token.text) is not None:
doc_compat_guesses.append(tree_id)
else:
doc_compat_guesses.append(-1)
guesses.append(np.array(doc_compat_guesses))
return guesses
def _scores2guesses_top_k_greater_1(self, docs, scores):
guesses = []
top_k = min(self.top_k, len(self.labels))
for doc, doc_scores in zip(docs, scores):
doc_scores = self.numpy_ops.asarray(doc_scores)
doc_compat_guesses = []
for i, token in enumerate(doc):
for _ in range(top_k):
candidate = int(doc_scores[i].argmax())
candidate_tree_id = self.cfg["labels"][candidate]
if self.trees.apply(candidate_tree_id, token.text) is not None:
doc_compat_guesses.append(candidate_tree_id)
break
doc_scores[i, candidate] = np.finfo(np.float32).min
else:
doc_compat_guesses.append(-1)
guesses.append(np.array(doc_compat_guesses))
return guesses
def _scores2guesses_top_k_guardrail(self, docs, scores):
guesses = []
for doc, doc_scores in zip(docs, scores):
doc_guesses = np.argsort(doc_scores)[..., : -self.top_k - 1 : -1]
doc_guesses = self.numpy_ops.asarray(doc_guesses)
doc_compat_guesses = []
for token, candidates in zip(doc, doc_guesses):
tree_id = -1
for candidate in candidates:
candidate_tree_id = self.cfg["labels"][candidate]
if self.trees.apply(candidate_tree_id, token.text) is not None:
tree_id = candidate_tree_id
break
doc_compat_guesses.append(tree_id)
guesses.append(np.array(doc_compat_guesses))
return guesses
def set_annotations(self, docs: Iterable[Doc], batch_tree_ids):
for i, doc in enumerate(docs):
doc_tree_ids = batch_tree_ids[i]
if hasattr(doc_tree_ids, "get"):
doc_tree_ids = doc_tree_ids.get()
for j, tree_id in enumerate(doc_tree_ids):
if self.overwrite or doc[j].lemma == 0:
# If no applicable tree could be found during prediction,
# the special identifier -1 is used. Otherwise the tree
# is guaranteed to be applicable.
if tree_id == -1:
if self.backoff is not None:
doc[j].lemma = getattr(doc[j], self.backoff)
else:
lemma = self.trees.apply(tree_id, doc[j].text)
doc[j].lemma_ = lemma
@property
def labels(self) -> Tuple[int, ...]:
"""Returns the labels currently added to the component."""
return tuple(self.cfg["labels"])
@property
def hide_labels(self) -> bool:
return True
@property
def label_data(self) -> Dict:
trees = []
for tree_id in range(len(self.trees)):
tree = self.trees[tree_id]
if "orig" in tree:
tree["orig"] = self.vocab.strings[tree["orig"]]
if "subst" in tree:
tree["subst"] = self.vocab.strings[tree["subst"]]
trees.append(tree)
return dict(trees=trees, labels=tuple(self.cfg["labels"]))
def initialize(
self,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
labels: Optional[Dict] = None,
):
validate_get_examples(get_examples, "EditTreeLemmatizer.initialize")
if labels is None:
self._labels_from_data(get_examples)
else:
self._add_labels(labels)
# Sample for the model.
doc_sample = []
label_sample = []
for example in islice(get_examples(), 10):
doc_sample.append(example.x)
gold_labels: List[List[float]] = []
for token in example.reference:
if token.lemma == 0:
gold_label = None
else:
gold_label = self._pair2label(token.text, token.lemma_)
gold_labels.append(
[
1.0 if label == gold_label else 0.0
for label in self.cfg["labels"]
]
)
gold_labels = cast(Floats2d, gold_labels)
label_sample.append(self.model.ops.asarray(gold_labels, dtype="float32"))
self._require_labels()
assert len(doc_sample) > 0, Errors.E923.format(name=self.name)
assert len(label_sample) > 0, Errors.E923.format(name=self.name)
self.model.initialize(X=doc_sample, Y=label_sample)
def from_bytes(self, bytes_data, *, exclude=tuple()):
deserializers = {
"cfg": lambda b: self.cfg.update(srsly.json_loads(b)),
"model": lambda b: self.model.from_bytes(b),
"vocab": lambda b: self.vocab.from_bytes(b, exclude=exclude),
"trees": lambda b: self.trees.from_bytes(b),
}
util.from_bytes(bytes_data, deserializers, exclude)
return self
def to_bytes(self, *, exclude=tuple()):
serializers = {
"cfg": lambda: srsly.json_dumps(self.cfg),
"model": lambda: self.model.to_bytes(),
"vocab": lambda: self.vocab.to_bytes(exclude=exclude),
"trees": lambda: self.trees.to_bytes(),
}
return util.to_bytes(serializers, exclude)
def to_disk(self, path, exclude=tuple()):
path = util.ensure_path(path)
serializers = {
"cfg": lambda p: srsly.write_json(p, self.cfg),
"model": lambda p: self.model.to_disk(p),
"vocab": lambda p: self.vocab.to_disk(p, exclude=exclude),
"trees": lambda p: self.trees.to_disk(p),
}
util.to_disk(path, serializers, exclude)
def from_disk(self, path, exclude=tuple()):
def load_model(p):
try:
with open(p, "rb") as mfile:
self.model.from_bytes(mfile.read())
except AttributeError:
raise ValueError(Errors.E149) from None
deserializers = {
"cfg": lambda p: self.cfg.update(srsly.read_json(p)),
"model": load_model,
"vocab": lambda p: self.vocab.from_disk(p, exclude=exclude),
"trees": lambda p: self.trees.from_disk(p),
}
util.from_disk(path, deserializers, exclude)
return self
def _add_labels(self, labels: Dict):
if "labels" not in labels:
raise ValueError(Errors.E857.format(name="labels"))
if "trees" not in labels:
raise ValueError(Errors.E857.format(name="trees"))
self.cfg["labels"] = list(labels["labels"])
trees = []
for tree in labels["trees"]:
errors = validate_edit_tree(tree)
if errors:
raise ValueError(Errors.E1026.format(errors="\n".join(errors)))
tree = dict(tree)
if "orig" in tree:
tree["orig"] = self.vocab.strings.add(tree["orig"])
if "orig" in tree:
tree["subst"] = self.vocab.strings.add(tree["subst"])
trees.append(tree)
self.trees.from_json(trees)
for label, tree in enumerate(self.labels):
self.tree2label[tree] = label
def _labels_from_data(self, get_examples: Callable[[], Iterable[Example]]):
# Count corpus tree frequencies in ad-hoc storage to avoid cluttering
# the final pipe/string store.
vocab = Vocab()
trees = EditTrees(vocab.strings)
tree_freqs: Counter = Counter()
repr_pairs: Dict = {}
for example in get_examples():
for token in example.reference:
if token.lemma != 0:
tree_id = trees.add(token.text, token.lemma_)
tree_freqs[tree_id] += 1
repr_pairs[tree_id] = (token.text, token.lemma_)
# Construct trees that make the frequency cut-off using representative
# form - token pairs.
for tree_id, freq in tree_freqs.items():
if freq >= self.min_tree_freq:
form, lemma = repr_pairs[tree_id]
self._pair2label(form, lemma, add_label=True)
def _pair2label(self, form, lemma, add_label=False):
"""
Look up the edit tree identifier for a form/label pair. If the edit
tree is unknown and "add_label" is set, the edit tree will be added to
the labels.
"""
tree_id = self.trees.add(form, lemma)
if tree_id not in self.tree2label:
if not add_label:
return None
self.tree2label[tree_id] = len(self.cfg["labels"])
self.cfg["labels"].append(tree_id)
return self.tree2label[tree_id]
| 15,329 | 35.15566 | 91 | py |
spaCy | spaCy-master/spacy/pipeline/entity_linker.py | import random
from itertools import islice
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
import srsly
from thinc.api import Config, CosineDistance, Model, Optimizer, set_dropout_rate
from thinc.types import Floats2d
from .. import util
from ..errors import Errors
from ..kb import Candidate, KnowledgeBase
from ..language import Language
from ..ml import empty_kb
from ..scorer import Scorer
from ..tokens import Doc, Span
from ..training import Example, validate_examples, validate_get_examples
from ..util import SimpleFrozenList, registry
from ..vocab import Vocab
from .legacy.entity_linker import EntityLinker_v1
from .pipe import deserialize_config
from .trainable_pipe import TrainablePipe
# See #9050
BACKWARD_OVERWRITE = True
default_model_config = """
[model]
@architectures = "spacy.EntityLinker.v2"
[model.tok2vec]
@architectures = "spacy.HashEmbedCNN.v2"
pretrained_vectors = null
width = 96
depth = 2
embed_size = 2000
window_size = 1
maxout_pieces = 3
subword_features = true
"""
DEFAULT_NEL_MODEL = Config().from_str(default_model_config)["model"]
@Language.factory(
"entity_linker",
requires=["doc.ents", "doc.sents", "token.ent_iob", "token.ent_type"],
assigns=["token.ent_kb_id"],
default_config={
"model": DEFAULT_NEL_MODEL,
"labels_discard": [],
"n_sents": 0,
"incl_prior": True,
"incl_context": True,
"entity_vector_length": 64,
"get_candidates": {"@misc": "spacy.CandidateGenerator.v1"},
"get_candidates_batch": {"@misc": "spacy.CandidateBatchGenerator.v1"},
"generate_empty_kb": {"@misc": "spacy.EmptyKB.v2"},
"overwrite": True,
"scorer": {"@scorers": "spacy.entity_linker_scorer.v1"},
"use_gold_ents": True,
"candidates_batch_size": 1,
"threshold": None,
},
default_score_weights={
"nel_micro_f": 1.0,
"nel_micro_r": None,
"nel_micro_p": None,
},
)
def make_entity_linker(
nlp: Language,
name: str,
model: Model,
*,
labels_discard: Iterable[str],
n_sents: int,
incl_prior: bool,
incl_context: bool,
entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
get_candidates_batch: Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
],
generate_empty_kb: Callable[[Vocab, int], KnowledgeBase],
overwrite: bool,
scorer: Optional[Callable],
use_gold_ents: bool,
candidates_batch_size: int,
threshold: Optional[float] = None,
):
"""Construct an EntityLinker component.
model (Model[List[Doc], Floats2d]): A model that learns document vector
representations. Given a batch of Doc objects, it should return a single
array, with one row per item in the batch.
labels_discard (Iterable[str]): NER labels that will automatically get a "NIL" prediction.
n_sents (int): The number of neighbouring sentences to take into account.
incl_prior (bool): Whether or not to include prior probabilities from the KB in the model.
incl_context (bool): Whether or not to include the local context in the model.
entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention.
get_candidates_batch (
Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]], Iterable[Candidate]]
): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
generate_empty_kb (Callable[[Vocab, int], KnowledgeBase]): Callable returning empty KnowledgeBase.
scorer (Optional[Callable]): The scoring method.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations.
candidates_batch_size (int): Size of batches for entity candidate generation.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold,
prediction is discarded. If None, predictions are not filtered by any threshold.
"""
if not model.attrs.get("include_span_maker", False):
# The only difference in arguments here is that use_gold_ents and threshold aren't available.
return EntityLinker_v1(
nlp.vocab,
model,
name,
labels_discard=labels_discard,
n_sents=n_sents,
incl_prior=incl_prior,
incl_context=incl_context,
entity_vector_length=entity_vector_length,
get_candidates=get_candidates,
overwrite=overwrite,
scorer=scorer,
)
return EntityLinker(
nlp.vocab,
model,
name,
labels_discard=labels_discard,
n_sents=n_sents,
incl_prior=incl_prior,
incl_context=incl_context,
entity_vector_length=entity_vector_length,
get_candidates=get_candidates,
get_candidates_batch=get_candidates_batch,
generate_empty_kb=generate_empty_kb,
overwrite=overwrite,
scorer=scorer,
use_gold_ents=use_gold_ents,
candidates_batch_size=candidates_batch_size,
threshold=threshold,
)
def entity_linker_score(examples, **kwargs):
return Scorer.score_links(examples, negative_labels=[EntityLinker.NIL], **kwargs)
@registry.scorers("spacy.entity_linker_scorer.v1")
def make_entity_linker_scorer():
return entity_linker_score
class EntityLinker(TrainablePipe):
"""Pipeline component for named entity linking.
DOCS: https://spacy.io/api/entitylinker
"""
NIL = "NIL" # string used to refer to a non-existing link
def __init__(
self,
vocab: Vocab,
model: Model,
name: str = "entity_linker",
*,
labels_discard: Iterable[str],
n_sents: int,
incl_prior: bool,
incl_context: bool,
entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
get_candidates_batch: Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
],
generate_empty_kb: Callable[[Vocab, int], KnowledgeBase],
overwrite: bool = BACKWARD_OVERWRITE,
scorer: Optional[Callable] = entity_linker_score,
use_gold_ents: bool,
candidates_batch_size: int,
threshold: Optional[float] = None,
) -> None:
"""Initialize an entity linker.
vocab (Vocab): The shared vocabulary.
model (thinc.api.Model): The Thinc Model powering the pipeline component.
name (str): The component instance name, used to add entries to the
losses during training.
labels_discard (Iterable[str]): NER labels that will automatically get a "NIL" prediction.
n_sents (int): The number of neighbouring sentences to take into account.
incl_prior (bool): Whether or not to include prior probabilities from the KB in the model.
incl_context (bool): Whether or not to include the local context in the model.
entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention.
get_candidates_batch (
Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]],
Iterable[Candidate]]
): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
generate_empty_kb (Callable[[Vocab, int], KnowledgeBase]): Callable returning empty KnowledgeBase.
scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations.
candidates_batch_size (int): Size of batches for entity candidate generation.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the
threshold, prediction is discarded. If None, predictions are not filtered by any threshold.
DOCS: https://spacy.io/api/entitylinker#init
"""
if threshold is not None and not (0 <= threshold <= 1):
raise ValueError(
Errors.E1043.format(
range_start=0,
range_end=1,
value=threshold,
)
)
self.vocab = vocab
self.model = model
self.name = name
self.labels_discard = list(labels_discard)
# how many neighbour sentences to take into account
self.n_sents = n_sents
self.incl_prior = incl_prior
self.incl_context = incl_context
self.get_candidates = get_candidates
self.get_candidates_batch = get_candidates_batch
self.cfg: Dict[str, Any] = {"overwrite": overwrite}
self.distance = CosineDistance(normalize=False)
self.kb = generate_empty_kb(self.vocab, entity_vector_length)
self.scorer = scorer
self.use_gold_ents = use_gold_ents
self.candidates_batch_size = candidates_batch_size
self.threshold = threshold
if candidates_batch_size < 1:
raise ValueError(Errors.E1044)
def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]):
"""Define the KB of this pipe by providing a function that will
create it using this object's vocab."""
if not callable(kb_loader):
raise ValueError(Errors.E885.format(arg_type=type(kb_loader)))
self.kb = kb_loader(self.vocab) # type: ignore
def validate_kb(self) -> None:
# Raise an error if the knowledge base is not initialized.
if self.kb is None:
raise ValueError(Errors.E1018.format(name=self.name))
if hasattr(self.kb, "is_empty") and self.kb.is_empty():
raise ValueError(Errors.E139.format(name=self.name))
def initialize(
self,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
kb_loader: Optional[Callable[[Vocab], KnowledgeBase]] = None,
):
"""Initialize the pipe for training, using a representative set
of data examples.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab
instance. Note that providing this argument will overwrite all data accumulated in the current KB.
Use this only when loading a KB as-such from file.
DOCS: https://spacy.io/api/entitylinker#initialize
"""
validate_get_examples(get_examples, "EntityLinker.initialize")
if kb_loader is not None:
self.set_kb(kb_loader)
self.validate_kb()
nO = self.kb.entity_vector_length
doc_sample = []
vector_sample = []
for eg in islice(get_examples(), 10):
doc = eg.x
if self.use_gold_ents:
ents, _ = eg.get_aligned_ents_and_ner()
doc.ents = ents
doc_sample.append(doc)
vector_sample.append(self.model.ops.alloc1f(nO))
assert len(doc_sample) > 0, Errors.E923.format(name=self.name)
assert len(vector_sample) > 0, Errors.E923.format(name=self.name)
# XXX In order for size estimation to work, there has to be at least
# one entity. It's not used for training so it doesn't have to be real,
# so we add a fake one if none are present.
# We can't use Doc.has_annotation here because it can be True for docs
# that have been through an NER component but got no entities.
has_annotations = any([doc.ents for doc in doc_sample])
if not has_annotations:
doc = doc_sample[0]
ent = doc[0:1]
ent.label_ = "XXX"
doc.ents = (ent,)
self.model.initialize(
X=doc_sample, Y=self.model.ops.asarray(vector_sample, dtype="float32")
)
if not has_annotations:
# Clean up dummy annotation
doc.ents = []
def batch_has_learnable_example(self, examples):
"""Check if a batch contains a learnable example.
If one isn't present, then the update step needs to be skipped.
"""
for eg in examples:
for ent in eg.predicted.ents:
candidates = list(self.get_candidates(self.kb, ent))
if candidates:
return True
return False
def update(
self,
examples: Iterable[Example],
*,
drop: float = 0.0,
sgd: Optional[Optimizer] = None,
losses: Optional[Dict[str, float]] = None,
) -> Dict[str, float]:
"""Learn from a batch of documents and gold-standard information,
updating the pipe's model. Delegates to predict and get_loss.
examples (Iterable[Example]): A batch of Example objects.
drop (float): The dropout rate.
sgd (thinc.api.Optimizer): The optimizer.
losses (Dict[str, float]): Optional record of the loss during training.
Updated using the component name as the key.
RETURNS (Dict[str, float]): The updated losses dictionary.
DOCS: https://spacy.io/api/entitylinker#update
"""
self.validate_kb()
if losses is None:
losses = {}
losses.setdefault(self.name, 0.0)
if not examples:
return losses
validate_examples(examples, "EntityLinker.update")
set_dropout_rate(self.model, drop)
docs = [eg.predicted for eg in examples]
# save to restore later
old_ents = [doc.ents for doc in docs]
for doc, ex in zip(docs, examples):
if self.use_gold_ents:
ents, _ = ex.get_aligned_ents_and_ner()
doc.ents = ents
else:
# only keep matching ents
doc.ents = ex.get_matching_ents()
# make sure we have something to learn from, if not, short-circuit
if not self.batch_has_learnable_example(examples):
return losses
sentence_encodings, bp_context = self.model.begin_update(docs)
# now restore the ents
for doc, old in zip(docs, old_ents):
doc.ents = old
loss, d_scores = self.get_loss(
sentence_encodings=sentence_encodings, examples=examples
)
bp_context(d_scores)
if sgd is not None:
self.finish_update(sgd)
losses[self.name] += loss
return losses
def get_loss(self, examples: Iterable[Example], sentence_encodings: Floats2d):
validate_examples(examples, "EntityLinker.get_loss")
entity_encodings = []
eidx = 0 # indices in gold entities to keep
keep_ents = [] # indices in sentence_encodings to keep
for eg in examples:
kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True)
for ent in eg.get_matching_ents():
kb_id = kb_ids[ent.start]
if kb_id:
entity_encoding = self.kb.get_vector(kb_id)
entity_encodings.append(entity_encoding)
keep_ents.append(eidx)
eidx += 1
entity_encodings = self.model.ops.asarray2f(entity_encodings, dtype="float32")
selected_encodings = sentence_encodings[keep_ents]
# if there are no matches, short circuit
if not keep_ents:
out = self.model.ops.alloc2f(*sentence_encodings.shape)
return 0, out
if selected_encodings.shape != entity_encodings.shape:
err = Errors.E147.format(
method="get_loss", msg="gold entities do not match up"
)
raise RuntimeError(err)
gradients = self.distance.get_grad(selected_encodings, entity_encodings)
# to match the input size, we need to give a zero gradient for items not in the kb
out = self.model.ops.alloc2f(*sentence_encodings.shape)
out[keep_ents] = gradients
loss = self.distance.get_loss(selected_encodings, entity_encodings)
loss = loss / len(entity_encodings)
return float(loss), out
def predict(self, docs: Iterable[Doc]) -> List[str]:
"""Apply the pipeline's model to a batch of docs, without modifying them.
Returns the KB IDs for each entity in each doc, including NIL if there is
no prediction.
docs (Iterable[Doc]): The documents to predict.
RETURNS (List[str]): The models prediction for each document.
DOCS: https://spacy.io/api/entitylinker#predict
"""
self.validate_kb()
entity_count = 0
final_kb_ids: List[str] = []
xp = self.model.ops.xp
if not docs:
return final_kb_ids
if isinstance(docs, Doc):
docs = [docs]
for i, doc in enumerate(docs):
if len(doc) == 0:
continue
sentences = [s for s in doc.sents]
# Loop over entities in batches.
for ent_idx in range(0, len(doc.ents), self.candidates_batch_size):
ent_batch = doc.ents[ent_idx : ent_idx + self.candidates_batch_size]
# Look up candidate entities.
valid_ent_idx = [
idx
for idx in range(len(ent_batch))
if ent_batch[idx].label_ not in self.labels_discard
]
batch_candidates = list(
self.get_candidates_batch(
self.kb, [ent_batch[idx] for idx in valid_ent_idx]
)
if self.candidates_batch_size > 1
else [
self.get_candidates(self.kb, ent_batch[idx])
for idx in valid_ent_idx
]
)
# Looping through each entity in batch (TODO: rewrite)
for j, ent in enumerate(ent_batch):
assert hasattr(ent, "sents")
sents = list(ent.sents)
sent_indices = (
sentences.index(sents[0]),
sentences.index(sents[-1]),
)
assert sent_indices[1] >= sent_indices[0] >= 0
if self.incl_context:
# get n_neighbour sentences, clipped to the length of the document
start_sentence = max(0, sent_indices[0] - self.n_sents)
end_sentence = min(
len(sentences) - 1, sent_indices[1] + self.n_sents
)
start_token = sentences[start_sentence].start
end_token = sentences[end_sentence].end
sent_doc = doc[start_token:end_token].as_doc()
# currently, the context is the same for each entity in a sentence (should be refined)
sentence_encoding = self.model.predict([sent_doc])[0]
sentence_encoding_t = sentence_encoding.T
sentence_norm = xp.linalg.norm(sentence_encoding_t)
entity_count += 1
if ent.label_ in self.labels_discard:
# ignoring this entity - setting to NIL
final_kb_ids.append(self.NIL)
else:
candidates = list(batch_candidates[j])
if not candidates:
# no prediction possible for this entity - setting to NIL
final_kb_ids.append(self.NIL)
elif len(candidates) == 1 and self.threshold is None:
# shortcut for efficiency reasons: take the 1 candidate
final_kb_ids.append(candidates[0].entity_)
else:
random.shuffle(candidates)
# set all prior probabilities to 0 if incl_prior=False
prior_probs = xp.asarray([c.prior_prob for c in candidates])
if not self.incl_prior:
prior_probs = xp.asarray([0.0 for _ in candidates])
scores = prior_probs
# add in similarity from the context
if self.incl_context:
entity_encodings = xp.asarray(
[c.entity_vector for c in candidates]
)
entity_norm = xp.linalg.norm(entity_encodings, axis=1)
if len(entity_encodings) != len(prior_probs):
raise RuntimeError(
Errors.E147.format(
method="predict",
msg="vectors not of equal length",
)
)
# cosine similarity
sims = xp.dot(entity_encodings, sentence_encoding_t) / (
sentence_norm * entity_norm
)
if sims.shape != prior_probs.shape:
raise ValueError(Errors.E161)
scores = prior_probs + sims - (prior_probs * sims)
final_kb_ids.append(
candidates[scores.argmax().item()].entity_
if self.threshold is None
or scores.max() >= self.threshold
else EntityLinker.NIL
)
if not (len(final_kb_ids) == entity_count):
err = Errors.E147.format(
method="predict", msg="result variables not of equal length"
)
raise RuntimeError(err)
return final_kb_ids
def set_annotations(self, docs: Iterable[Doc], kb_ids: List[str]) -> None:
"""Modify a batch of documents, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
kb_ids (List[str]): The IDs to set, produced by EntityLinker.predict.
DOCS: https://spacy.io/api/entitylinker#set_annotations
"""
count_ents = len([ent for doc in docs for ent in doc.ents])
if count_ents != len(kb_ids):
raise ValueError(Errors.E148.format(ents=count_ents, ids=len(kb_ids)))
i = 0
overwrite = self.cfg["overwrite"]
for doc in docs:
for ent in doc.ents:
kb_id = kb_ids[i]
i += 1
for token in ent:
if token.ent_kb_id == 0 or overwrite:
token.ent_kb_id_ = kb_id
def to_bytes(self, *, exclude=tuple()):
"""Serialize the pipe to a bytestring.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (bytes): The serialized object.
DOCS: https://spacy.io/api/entitylinker#to_bytes
"""
self._validate_serialization_attrs()
serialize = {}
if hasattr(self, "cfg") and self.cfg is not None:
serialize["cfg"] = lambda: srsly.json_dumps(self.cfg)
serialize["vocab"] = lambda: self.vocab.to_bytes(exclude=exclude)
serialize["kb"] = self.kb.to_bytes
serialize["model"] = self.model.to_bytes
return util.to_bytes(serialize, exclude)
def from_bytes(self, bytes_data, *, exclude=tuple()):
"""Load the pipe from a bytestring.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (TrainablePipe): The loaded object.
DOCS: https://spacy.io/api/entitylinker#from_bytes
"""
self._validate_serialization_attrs()
def load_model(b):
try:
self.model.from_bytes(b)
except AttributeError:
raise ValueError(Errors.E149) from None
deserialize = {}
if hasattr(self, "cfg") and self.cfg is not None:
deserialize["cfg"] = lambda b: self.cfg.update(srsly.json_loads(b))
deserialize["vocab"] = lambda b: self.vocab.from_bytes(b, exclude=exclude)
deserialize["kb"] = lambda b: self.kb.from_bytes(b)
deserialize["model"] = load_model
util.from_bytes(bytes_data, deserialize, exclude)
return self
def to_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> None:
"""Serialize the pipe to disk.
path (str / Path): Path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
DOCS: https://spacy.io/api/entitylinker#to_disk
"""
serialize = {}
serialize["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude)
serialize["cfg"] = lambda p: srsly.write_json(p, self.cfg)
serialize["kb"] = lambda p: self.kb.to_disk(p)
serialize["model"] = lambda p: self.model.to_disk(p)
util.to_disk(path, serialize, exclude)
def from_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> "EntityLinker":
"""Load the pipe from disk. Modifies the object in place and returns it.
path (str / Path): Path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (EntityLinker): The modified EntityLinker object.
DOCS: https://spacy.io/api/entitylinker#from_disk
"""
def load_model(p):
try:
with p.open("rb") as infile:
self.model.from_bytes(infile.read())
except AttributeError:
raise ValueError(Errors.E149) from None
deserialize: Dict[str, Callable[[Any], Any]] = {}
deserialize["cfg"] = lambda p: self.cfg.update(deserialize_config(p))
deserialize["vocab"] = lambda p: self.vocab.from_disk(p, exclude=exclude)
deserialize["kb"] = lambda p: self.kb.from_disk(p)
deserialize["model"] = load_model
util.from_disk(path, deserialize, exclude)
return self
def rehearse(self, examples, *, sgd=None, losses=None, **config):
raise NotImplementedError
def add_label(self, label):
raise NotImplementedError
| 27,412 | 40.284639 | 120 | py |
spaCy | spaCy-master/spacy/pipeline/entityruler.py | import warnings
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import srsly
from ..errors import Errors, Warnings
from ..language import Language
from ..matcher import Matcher, PhraseMatcher
from ..matcher.levenshtein import levenshtein_compare
from ..scorer import get_ner_prf
from ..tokens import Doc, Span
from ..training import Example
from ..util import SimpleFrozenList, ensure_path, from_disk, registry, to_disk
from .pipe import Pipe
DEFAULT_ENT_ID_SEP = "||"
PatternType = Dict[str, Union[str, List[Dict[str, Any]]]]
@Language.factory(
"entity_ruler",
assigns=["doc.ents", "token.ent_type", "token.ent_iob"],
default_config={
"phrase_matcher_attr": None,
"matcher_fuzzy_compare": {"@misc": "spacy.levenshtein_compare.v1"},
"validate": False,
"overwrite_ents": False,
"ent_id_sep": DEFAULT_ENT_ID_SEP,
"scorer": {"@scorers": "spacy.entity_ruler_scorer.v1"},
},
default_score_weights={
"ents_f": 1.0,
"ents_p": 0.0,
"ents_r": 0.0,
"ents_per_type": None,
},
)
def make_entity_ruler(
nlp: Language,
name: str,
phrase_matcher_attr: Optional[Union[int, str]],
matcher_fuzzy_compare: Callable,
validate: bool,
overwrite_ents: bool,
ent_id_sep: str,
scorer: Optional[Callable],
):
return EntityRuler(
nlp,
name,
phrase_matcher_attr=phrase_matcher_attr,
matcher_fuzzy_compare=matcher_fuzzy_compare,
validate=validate,
overwrite_ents=overwrite_ents,
ent_id_sep=ent_id_sep,
scorer=scorer,
)
def entity_ruler_score(examples, **kwargs):
return get_ner_prf(examples)
@registry.scorers("spacy.entity_ruler_scorer.v1")
def make_entity_ruler_scorer():
return entity_ruler_score
class EntityRuler(Pipe):
"""The EntityRuler lets you add spans to the `Doc.ents` using token-based
rules or exact phrase matches. It can be combined with the statistical
`EntityRecognizer` to boost accuracy, or used on its own to implement a
purely rule-based entity recognition system. After initialization, the
component is typically added to the pipeline using `nlp.add_pipe`.
DOCS: https://spacy.io/api/entityruler
USAGE: https://spacy.io/usage/rule-based-matching#entityruler
"""
def __init__(
self,
nlp: Language,
name: str = "entity_ruler",
*,
phrase_matcher_attr: Optional[Union[int, str]] = None,
matcher_fuzzy_compare: Callable = levenshtein_compare,
validate: bool = False,
overwrite_ents: bool = False,
ent_id_sep: str = DEFAULT_ENT_ID_SEP,
patterns: Optional[List[PatternType]] = None,
scorer: Optional[Callable] = entity_ruler_score,
) -> None:
"""Initialize the entity ruler. If patterns are supplied here, they
need to be a list of dictionaries with a `"label"` and `"pattern"`
key. A pattern can either be a token pattern (list) or a phrase pattern
(string). For example: `{'label': 'ORG', 'pattern': 'Apple'}`.
nlp (Language): The shared nlp object to pass the vocab to the matchers
and process phrase patterns.
name (str): Instance name of the current pipeline component. Typically
passed in automatically from the factory when the component is
added. Used to disable the current entity ruler while creating
phrase patterns with the nlp object.
phrase_matcher_attr (int / str): Token attribute to match on, passed
to the internal PhraseMatcher as `attr`.
matcher_fuzzy_compare (Callable): The fuzzy comparison method for the
internal Matcher. Defaults to
spacy.matcher.levenshtein.levenshtein_compare.
validate (bool): Whether patterns should be validated, passed to
Matcher and PhraseMatcher as `validate`
patterns (iterable): Optional patterns to load in.
overwrite_ents (bool): If existing entities are present, e.g. entities
added by the model, overwrite them by matches if necessary.
ent_id_sep (str): Separator used internally for entity IDs.
scorer (Optional[Callable]): The scoring method. Defaults to
spacy.scorer.get_ner_prf.
DOCS: https://spacy.io/api/entityruler#init
"""
self.nlp = nlp
self.name = name
self.overwrite = overwrite_ents
self.token_patterns = defaultdict(list) # type: ignore
self.phrase_patterns = defaultdict(list) # type: ignore
self._validate = validate
self.matcher_fuzzy_compare = matcher_fuzzy_compare
self.matcher = Matcher(
nlp.vocab, validate=validate, fuzzy_compare=self.matcher_fuzzy_compare
)
self.phrase_matcher_attr = phrase_matcher_attr
self.phrase_matcher = PhraseMatcher(
nlp.vocab, attr=self.phrase_matcher_attr, validate=validate
)
self.ent_id_sep = ent_id_sep
self._ent_ids = defaultdict(tuple) # type: ignore
if patterns is not None:
self.add_patterns(patterns)
self.scorer = scorer
def __len__(self) -> int:
"""The number of all patterns added to the entity ruler."""
n_token_patterns = sum(len(p) for p in self.token_patterns.values())
n_phrase_patterns = sum(len(p) for p in self.phrase_patterns.values())
return n_token_patterns + n_phrase_patterns
def __contains__(self, label: str) -> bool:
"""Whether a label is present in the patterns."""
return label in self.token_patterns or label in self.phrase_patterns
def __call__(self, doc: Doc) -> Doc:
"""Find matches in document and add them as entities.
doc (Doc): The Doc object in the pipeline.
RETURNS (Doc): The Doc with added entities, if available.
DOCS: https://spacy.io/api/entityruler#call
"""
error_handler = self.get_error_handler()
try:
matches = self.match(doc)
self.set_annotations(doc, matches)
return doc
except Exception as e:
return error_handler(self.name, self, [doc], e)
def match(self, doc: Doc):
self._require_patterns()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="\\[W036")
matches = list(self.matcher(doc)) + list(self.phrase_matcher(doc))
final_matches = set(
[(m_id, start, end) for m_id, start, end in matches if start != end]
)
get_sort_key = lambda m: (m[2] - m[1], -m[1])
final_matches = sorted(final_matches, key=get_sort_key, reverse=True)
return final_matches
def set_annotations(self, doc, matches):
"""Modify the document in place"""
entities = list(doc.ents)
new_entities = []
seen_tokens = set()
for match_id, start, end in matches:
if any(t.ent_type for t in doc[start:end]) and not self.overwrite:
continue
# check for end - 1 here because boundaries are inclusive
if start not in seen_tokens and end - 1 not in seen_tokens:
if match_id in self._ent_ids:
label, ent_id = self._ent_ids[match_id]
span = Span(doc, start, end, label=label, span_id=ent_id)
else:
span = Span(doc, start, end, label=match_id)
new_entities.append(span)
entities = [
e for e in entities if not (e.start < end and e.end > start)
]
seen_tokens.update(range(start, end))
doc.ents = entities + new_entities
@property
def labels(self) -> Tuple[str, ...]:
"""All labels present in the match patterns.
RETURNS (set): The string labels.
DOCS: https://spacy.io/api/entityruler#labels
"""
keys = set(self.token_patterns.keys())
keys.update(self.phrase_patterns.keys())
all_labels = set()
for l in keys:
if self.ent_id_sep in l:
label, _ = self._split_label(l)
all_labels.add(label)
else:
all_labels.add(l)
return tuple(sorted(all_labels))
def initialize(
self,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
patterns: Optional[Sequence[PatternType]] = None,
):
"""Initialize the pipe for training.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
patterns Optional[Iterable[PatternType]]: The list of patterns.
DOCS: https://spacy.io/api/entityruler#initialize
"""
self.clear()
if patterns:
self.add_patterns(patterns) # type: ignore[arg-type]
@property
def ent_ids(self) -> Tuple[Optional[str], ...]:
"""All entity ids present in the match patterns `id` properties
RETURNS (set): The string entity ids.
DOCS: https://spacy.io/api/entityruler#ent_ids
"""
keys = set(self.token_patterns.keys())
keys.update(self.phrase_patterns.keys())
all_ent_ids = set()
for l in keys:
if self.ent_id_sep in l:
_, ent_id = self._split_label(l)
all_ent_ids.add(ent_id)
return tuple(all_ent_ids)
@property
def patterns(self) -> List[PatternType]:
"""Get all patterns that were added to the entity ruler.
RETURNS (list): The original patterns, one dictionary per pattern.
DOCS: https://spacy.io/api/entityruler#patterns
"""
all_patterns = []
for label, patterns in self.token_patterns.items():
for pattern in patterns:
ent_label, ent_id = self._split_label(label)
p = {"label": ent_label, "pattern": pattern}
if ent_id:
p["id"] = ent_id
all_patterns.append(p)
for label, patterns in self.phrase_patterns.items():
for pattern in patterns:
ent_label, ent_id = self._split_label(label)
p = {"label": ent_label, "pattern": pattern.text}
if ent_id:
p["id"] = ent_id
all_patterns.append(p)
return all_patterns
def add_patterns(self, patterns: List[PatternType]) -> None:
"""Add patterns to the entity ruler. A pattern can either be a token
pattern (list of dicts) or a phrase pattern (string). For example:
{'label': 'ORG', 'pattern': 'Apple'}
{'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]}
patterns (list): The patterns to add.
DOCS: https://spacy.io/api/entityruler#add_patterns
"""
# disable the nlp components after this one in case they hadn't been initialized / deserialised yet
try:
current_index = -1
for i, (name, pipe) in enumerate(self.nlp.pipeline):
if self == pipe:
current_index = i
break
subsequent_pipes = [pipe for pipe in self.nlp.pipe_names[current_index:]]
except ValueError:
subsequent_pipes = []
with self.nlp.select_pipes(disable=subsequent_pipes):
token_patterns = []
phrase_pattern_labels = []
phrase_pattern_texts = []
phrase_pattern_ids = []
for entry in patterns:
if isinstance(entry["pattern"], str):
phrase_pattern_labels.append(entry["label"])
phrase_pattern_texts.append(entry["pattern"])
phrase_pattern_ids.append(entry.get("id"))
elif isinstance(entry["pattern"], list):
token_patterns.append(entry)
phrase_patterns = []
for label, pattern, ent_id in zip(
phrase_pattern_labels,
self.nlp.pipe(phrase_pattern_texts),
phrase_pattern_ids,
):
phrase_pattern = {"label": label, "pattern": pattern}
if ent_id:
phrase_pattern["id"] = ent_id
phrase_patterns.append(phrase_pattern)
for entry in token_patterns + phrase_patterns: # type: ignore[operator]
label = entry["label"] # type: ignore
if "id" in entry:
ent_label = label
label = self._create_label(label, entry["id"])
key = self.matcher._normalize_key(label)
self._ent_ids[key] = (ent_label, entry["id"])
pattern = entry["pattern"] # type: ignore
if isinstance(pattern, Doc):
self.phrase_patterns[label].append(pattern)
self.phrase_matcher.add(label, [pattern]) # type: ignore
elif isinstance(pattern, list):
self.token_patterns[label].append(pattern)
self.matcher.add(label, [pattern])
else:
raise ValueError(Errors.E097.format(pattern=pattern))
def clear(self) -> None:
"""Reset all patterns."""
self.token_patterns = defaultdict(list)
self.phrase_patterns = defaultdict(list)
self._ent_ids = defaultdict(tuple)
self.matcher = Matcher(
self.nlp.vocab,
validate=self._validate,
fuzzy_compare=self.matcher_fuzzy_compare,
)
self.phrase_matcher = PhraseMatcher(
self.nlp.vocab, attr=self.phrase_matcher_attr, validate=self._validate
)
def remove(self, ent_id: str) -> None:
"""Remove a pattern by its ent_id if a pattern with this ent_id was added before
ent_id (str): id of the pattern to be removed
RETURNS: None
DOCS: https://spacy.io/api/entityruler#remove
"""
label_id_pairs = [
(label, eid) for (label, eid) in self._ent_ids.values() if eid == ent_id
]
if not label_id_pairs:
raise ValueError(
Errors.E1024.format(attr_type="ID", label=ent_id, component=self.name)
)
created_labels = [
self._create_label(label, eid) for (label, eid) in label_id_pairs
]
# remove the patterns from self.phrase_patterns
self.phrase_patterns = defaultdict(
list,
{
label: val
for (label, val) in self.phrase_patterns.items()
if label not in created_labels
},
)
# remove the patterns from self.token_pattern
self.token_patterns = defaultdict(
list,
{
label: val
for (label, val) in self.token_patterns.items()
if label not in created_labels
},
)
# remove the patterns from self.token_pattern
for label in created_labels:
if label in self.phrase_matcher:
self.phrase_matcher.remove(label)
else:
self.matcher.remove(label)
def _require_patterns(self) -> None:
"""Raise a warning if this component has no patterns defined."""
if len(self) == 0:
warnings.warn(Warnings.W036.format(name=self.name))
def _split_label(self, label: str) -> Tuple[str, Optional[str]]:
"""Split Entity label into ent_label and ent_id if it contains self.ent_id_sep
label (str): The value of label in a pattern entry
RETURNS (tuple): ent_label, ent_id
"""
if self.ent_id_sep in label:
ent_label, ent_id = label.rsplit(self.ent_id_sep, 1)
else:
ent_label = label
ent_id = None # type: ignore
return ent_label, ent_id
def _create_label(self, label: Any, ent_id: Any) -> str:
"""Join Entity label with ent_id if the pattern has an `id` attribute
If ent_id is not a string, the label is returned as is.
label (str): The label to set for ent.label_
ent_id (str): The label
RETURNS (str): The ent_label joined with configured `ent_id_sep`
"""
if isinstance(ent_id, str):
label = f"{label}{self.ent_id_sep}{ent_id}"
return label
def from_bytes(
self, patterns_bytes: bytes, *, exclude: Iterable[str] = SimpleFrozenList()
) -> "EntityRuler":
"""Load the entity ruler from a bytestring.
patterns_bytes (bytes): The bytestring to load.
RETURNS (EntityRuler): The loaded entity ruler.
DOCS: https://spacy.io/api/entityruler#from_bytes
"""
cfg = srsly.msgpack_loads(patterns_bytes)
self.clear()
if isinstance(cfg, dict):
self.add_patterns(cfg.get("patterns", cfg))
self.overwrite = cfg.get("overwrite", False)
self.phrase_matcher_attr = cfg.get("phrase_matcher_attr", None)
self.phrase_matcher = PhraseMatcher(
self.nlp.vocab,
attr=self.phrase_matcher_attr,
)
self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
else:
self.add_patterns(cfg)
return self
def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
"""Serialize the entity ruler patterns to a bytestring.
RETURNS (bytes): The serialized patterns.
DOCS: https://spacy.io/api/entityruler#to_bytes
"""
serial = {
"overwrite": self.overwrite,
"ent_id_sep": self.ent_id_sep,
"phrase_matcher_attr": self.phrase_matcher_attr,
"patterns": self.patterns,
}
return srsly.msgpack_dumps(serial)
def from_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> "EntityRuler":
"""Load the entity ruler from a file. Expects a file containing
newline-delimited JSON (JSONL) with one entry per line.
path (str / Path): The JSONL file to load.
RETURNS (EntityRuler): The loaded entity ruler.
DOCS: https://spacy.io/api/entityruler#from_disk
"""
path = ensure_path(path)
self.clear()
depr_patterns_path = path.with_suffix(".jsonl")
if path.suffix == ".jsonl": # user provides a jsonl
if path.is_file:
patterns = srsly.read_jsonl(path)
self.add_patterns(patterns)
else:
raise ValueError(Errors.E1023.format(path=path))
elif depr_patterns_path.is_file():
patterns = srsly.read_jsonl(depr_patterns_path)
self.add_patterns(patterns)
elif path.is_dir(): # path is a valid directory
cfg = {}
deserializers_patterns = {
"patterns": lambda p: self.add_patterns(
srsly.read_jsonl(p.with_suffix(".jsonl"))
)
}
deserializers_cfg = {"cfg": lambda p: cfg.update(srsly.read_json(p))}
from_disk(path, deserializers_cfg, {})
self.overwrite = cfg.get("overwrite", False)
self.phrase_matcher_attr = cfg.get("phrase_matcher_attr")
self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
self.phrase_matcher = PhraseMatcher(
self.nlp.vocab, attr=self.phrase_matcher_attr
)
from_disk(path, deserializers_patterns, {})
else: # path is not a valid directory or file
raise ValueError(Errors.E146.format(path=path))
return self
def to_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> None:
"""Save the entity ruler patterns to a directory. The patterns will be
saved as newline-delimited JSON (JSONL).
path (str / Path): The JSONL file to save.
DOCS: https://spacy.io/api/entityruler#to_disk
"""
path = ensure_path(path)
cfg = {
"overwrite": self.overwrite,
"phrase_matcher_attr": self.phrase_matcher_attr,
"ent_id_sep": self.ent_id_sep,
}
serializers = {
"patterns": lambda p: srsly.write_jsonl(
p.with_suffix(".jsonl"), self.patterns
),
"cfg": lambda p: srsly.write_json(p, cfg),
}
if path.suffix == ".jsonl": # user wants to save only JSONL
srsly.write_jsonl(path, self.patterns)
else:
to_disk(path, serializers, {})
| 21,071 | 37.878229 | 107 | py |
spaCy | spaCy-master/spacy/pipeline/functions.py | import warnings
from typing import Any, Dict
import srsly
from .. import util
from ..errors import Warnings
from ..language import Language
from ..matcher import Matcher
from ..tokens import Doc
@Language.component(
"merge_noun_chunks",
requires=["token.dep", "token.tag", "token.pos"],
retokenizes=True,
)
def merge_noun_chunks(doc: Doc) -> Doc:
"""Merge noun chunks into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged noun chunks.
DOCS: https://spacy.io/api/pipeline-functions#merge_noun_chunks
"""
if not doc.has_annotation("DEP"):
return doc
with doc.retokenize() as retokenizer:
for np in doc.noun_chunks:
attrs = {"tag": np.root.tag, "dep": np.root.dep}
retokenizer.merge(np, attrs=attrs) # type: ignore[arg-type]
return doc
@Language.component(
"merge_entities",
requires=["doc.ents", "token.ent_iob", "token.ent_type"],
retokenizes=True,
)
def merge_entities(doc: Doc):
"""Merge entities into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged entities.
DOCS: https://spacy.io/api/pipeline-functions#merge_entities
"""
with doc.retokenize() as retokenizer:
for ent in doc.ents:
attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label}
retokenizer.merge(ent, attrs=attrs) # type: ignore[arg-type]
return doc
@Language.component("merge_subtokens", requires=["token.dep"], retokenizes=True)
def merge_subtokens(doc: Doc, label: str = "subtok") -> Doc:
"""Merge subtokens into a single token.
doc (Doc): The Doc object.
label (str): The subtoken dependency label.
RETURNS (Doc): The Doc object with merged subtokens.
DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens
"""
# TODO: make stateful component with "label" config
merger = Matcher(doc.vocab)
merger.add("SUBTOK", [[{"DEP": label, "op": "+"}]])
matches = merger(doc)
spans = util.filter_spans([doc[start : end + 1] for _, start, end in matches]) # type: ignore[misc, operator]
with doc.retokenize() as retokenizer:
for span in spans:
retokenizer.merge(span)
return doc
@Language.factory(
"token_splitter",
default_config={"min_length": 25, "split_length": 10},
retokenizes=True,
)
def make_token_splitter(
nlp: Language, name: str, *, min_length: int = 0, split_length: int = 0
):
return TokenSplitter(min_length=min_length, split_length=split_length)
class TokenSplitter:
def __init__(self, min_length: int = 0, split_length: int = 0):
self.min_length = min_length
self.split_length = split_length
def __call__(self, doc: Doc) -> Doc:
if self.min_length > 0 and self.split_length > 0:
with doc.retokenize() as retokenizer:
for t in doc:
if len(t.text) >= self.min_length:
orths = []
heads = []
attrs = {} # type: ignore[var-annotated]
for i in range(0, len(t.text), self.split_length):
orths.append(t.text[i : i + self.split_length])
heads.append((t, i / self.split_length))
retokenizer.split(t, orths, heads, attrs) # type: ignore[arg-type]
return doc
def _get_config(self) -> Dict[str, Any]:
return {
"min_length": self.min_length,
"split_length": self.split_length,
}
def _set_config(self, config: Dict[str, Any] = {}) -> None:
self.min_length = config.get("min_length", 0)
self.split_length = config.get("split_length", 0)
def to_bytes(self, **kwargs):
serializers = {
"cfg": lambda: srsly.json_dumps(self._get_config()),
}
return util.to_bytes(serializers, [])
def from_bytes(self, data, **kwargs):
deserializers = {
"cfg": lambda b: self._set_config(srsly.json_loads(b)),
}
util.from_bytes(data, deserializers, [])
return self
def to_disk(self, path, **kwargs):
path = util.ensure_path(path)
serializers = {
"cfg": lambda p: srsly.write_json(p, self._get_config()),
}
return util.to_disk(path, serializers, [])
def from_disk(self, path, **kwargs):
path = util.ensure_path(path)
serializers = {
"cfg": lambda p: self._set_config(srsly.read_json(p)),
}
util.from_disk(path, serializers, [])
@Language.factory(
"doc_cleaner",
default_config={"attrs": {"tensor": None, "_.trf_data": None}, "silent": True},
)
def make_doc_cleaner(nlp: Language, name: str, *, attrs: Dict[str, Any], silent: bool):
return DocCleaner(attrs, silent=silent)
class DocCleaner:
def __init__(self, attrs: Dict[str, Any], *, silent: bool = True):
self.cfg: Dict[str, Any] = {"attrs": dict(attrs), "silent": silent}
def __call__(self, doc: Doc) -> Doc:
attrs: dict = self.cfg["attrs"]
silent: bool = self.cfg["silent"]
for attr, value in attrs.items():
obj = doc
parts = attr.split(".")
skip = False
for part in parts[:-1]:
if hasattr(obj, part):
obj = getattr(obj, part)
else:
skip = True
if not silent:
warnings.warn(Warnings.W116.format(attr=attr))
if not skip:
if hasattr(obj, parts[-1]):
setattr(obj, parts[-1], value)
else:
if not silent:
warnings.warn(Warnings.W116.format(attr=attr))
return doc
def to_bytes(self, **kwargs):
serializers = {
"cfg": lambda: srsly.json_dumps(self.cfg),
}
return util.to_bytes(serializers, [])
def from_bytes(self, data, **kwargs):
deserializers = {
"cfg": lambda b: self.cfg.update(srsly.json_loads(b)),
}
util.from_bytes(data, deserializers, [])
return self
def to_disk(self, path, **kwargs):
path = util.ensure_path(path)
serializers = {
"cfg": lambda p: srsly.write_json(p, self.cfg),
}
return util.to_disk(path, serializers, [])
def from_disk(self, path, **kwargs):
path = util.ensure_path(path)
serializers = {
"cfg": lambda p: self.cfg.update(srsly.read_json(p)),
}
util.from_disk(path, serializers, [])
| 6,704 | 31.867647 | 114 | py |
spaCy | spaCy-master/spacy/pipeline/lemmatizer.py | import warnings
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
from thinc.api import Model
from .. import util
from ..errors import Errors, Warnings
from ..language import Language
from ..lookups import Lookups, load_lookups
from ..scorer import Scorer
from ..tokens import Doc, Token
from ..training import Example
from ..util import SimpleFrozenList, logger, registry
from ..vocab import Vocab
from .pipe import Pipe
@Language.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "lookup",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
scorer: Optional[Callable],
):
return Lemmatizer(
nlp.vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
def lemmatizer_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]:
return Scorer.score_token_attr(examples, "lemma", **kwargs)
@registry.scorers("spacy.lemmatizer_scorer.v1")
def make_lemmatizer_scorer():
return lemmatizer_score
class Lemmatizer(Pipe):
"""
The Lemmatizer supports simple part-of-speech-sensitive suffix rules and
lookup tables.
DOCS: https://spacy.io/api/lemmatizer
"""
@classmethod
def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]:
"""Returns the lookups configuration settings for a given mode for use
in Lemmatizer.load_lookups.
mode (str): The lemmatizer mode.
RETURNS (Tuple[List[str], List[str]]): The required and optional
lookup tables for this mode.
"""
if mode == "lookup":
return (["lemma_lookup"], [])
elif mode == "rule":
return (["lemma_rules"], ["lemma_exc", "lemma_index"])
return ([], [])
def __init__(
self,
vocab: Vocab,
model: Optional[Model],
name: str = "lemmatizer",
*,
mode: str = "lookup",
overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score,
) -> None:
"""Initialize a Lemmatizer.
vocab (Vocab): The vocab.
model (Model): A model (not yet implemented).
name (str): The component name. Defaults to "lemmatizer".
mode (str): The lemmatizer mode: "lookup", "rule". Defaults to "lookup".
overwrite (bool): Whether to overwrite existing lemmas. Defaults to
`False`.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_token_attr for the attribute "lemma".
DOCS: https://spacy.io/api/lemmatizer#init
"""
self.vocab = vocab
self.model = model
self.name = name
self._mode = mode
self.lookups = Lookups()
self.overwrite = overwrite
self._validated = False
if self.mode == "lookup":
self.lemmatize = self.lookup_lemmatize
elif self.mode == "rule":
self.lemmatize = self.rule_lemmatize
else:
mode_attr = f"{self.mode}_lemmatize"
if not hasattr(self, mode_attr):
raise ValueError(Errors.E1003.format(mode=mode))
self.lemmatize = getattr(self, mode_attr)
self.cache = {} # type: ignore[var-annotated]
self.scorer = scorer
@property
def mode(self):
return self._mode
def __call__(self, doc: Doc) -> Doc:
"""Apply the lemmatizer to one document.
doc (Doc): The Doc to process.
RETURNS (Doc): The processed Doc.
DOCS: https://spacy.io/api/lemmatizer#call
"""
if not self._validated:
self._validate_tables(Errors.E1004)
error_handler = self.get_error_handler()
try:
for token in doc:
if self.overwrite or token.lemma == 0:
token.lemma_ = self.lemmatize(token)[0]
return doc
except Exception as e:
error_handler(self.name, self, [doc], e)
def initialize(
self,
get_examples: Optional[Callable[[], Iterable[Example]]] = None,
*,
nlp: Optional[Language] = None,
lookups: Optional[Lookups] = None,
):
"""Initialize the lemmatizer and load in data.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
lookups (Lookups): The lookups object containing the (optional) tables
such as "lemma_rules", "lemma_index", "lemma_exc" and
"lemma_lookup". Defaults to None.
"""
required_tables, optional_tables = self.get_lookups_config(self.mode)
if lookups is None:
logger.debug("Lemmatizer: loading tables from spacy-lookups-data")
lookups = load_lookups(lang=self.vocab.lang, tables=required_tables)
optional_lookups = load_lookups(
lang=self.vocab.lang, tables=optional_tables, strict=False
)
for table in optional_lookups.tables:
lookups.set_table(table, optional_lookups.get_table(table))
self.lookups = lookups
self._validate_tables(Errors.E1004)
def _validate_tables(self, error_message: str = Errors.E912) -> None:
"""Check that the lookups are correct for the current mode."""
required_tables, optional_tables = self.get_lookups_config(self.mode)
for table in required_tables:
if table not in self.lookups:
raise ValueError(
error_message.format(
mode=self.mode,
tables=required_tables,
found=self.lookups.tables,
)
)
self._validated = True
def lookup_lemmatize(self, token: Token) -> List[str]:
"""Lemmatize using a lookup-based approach.
token (Token): The token to lemmatize.
RETURNS (list): The available lemmas for the string.
DOCS: https://spacy.io/api/lemmatizer#lookup_lemmatize
"""
lookup_table = self.lookups.get_table("lemma_lookup", {})
result = lookup_table.get(token.text, token.text)
if isinstance(result, str):
result = [result]
return result
def rule_lemmatize(self, token: Token) -> List[str]:
"""Lemmatize using a rule-based approach.
token (Token): The token to lemmatize.
RETURNS (list): The available lemmas for the string.
DOCS: https://spacy.io/api/lemmatizer#rule_lemmatize
"""
cache_key = (token.orth, token.pos, token.morph.key) # type: ignore[attr-defined]
if cache_key in self.cache:
return self.cache[cache_key]
string = token.text
univ_pos = token.pos_.lower()
if univ_pos in ("", "eol", "space"):
if univ_pos == "":
warnings.warn(Warnings.W108)
return [string.lower()]
# See Issue #435 for example of where this logic is requied.
if self.is_base_form(token):
return [string.lower()]
index_table = self.lookups.get_table("lemma_index", {})
exc_table = self.lookups.get_table("lemma_exc", {})
rules_table = self.lookups.get_table("lemma_rules", {})
if not any(
(
index_table.get(univ_pos),
exc_table.get(univ_pos),
rules_table.get(univ_pos),
)
):
if univ_pos == "propn":
return [string]
else:
return [string.lower()]
index = index_table.get(univ_pos, {})
exceptions = exc_table.get(univ_pos, {})
rules = rules_table.get(univ_pos, {})
orig = string
string = string.lower()
forms = []
oov_forms = []
for old, new in rules:
if string.endswith(old):
form = string[: len(string) - len(old)] + new
if not form:
pass
elif form in index or not form.isalpha():
forms.append(form)
else:
oov_forms.append(form)
# Remove duplicates but preserve the ordering of applied "rules"
forms = list(dict.fromkeys(forms))
# Put exceptions at the front of the list, so they get priority.
# This is a dodgy heuristic -- but it's the best we can do until we get
# frequencies on this. We can at least prune out problematic exceptions,
# if they shadow more frequent analyses.
for form in exceptions.get(string, []):
if form not in forms:
forms.insert(0, form)
if not forms:
forms.extend(oov_forms)
if not forms:
forms.append(orig)
self.cache[cache_key] = forms
return forms
def is_base_form(self, token: Token) -> bool:
"""Check whether the token is a base form that does not need further
analysis for lemmatization.
token (Token): The token.
RETURNS (bool): Whether the token is a base form.
DOCS: https://spacy.io/api/lemmatizer#is_base_form
"""
return False
def to_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
):
"""Serialize the pipe to disk.
path (str / Path): Path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
DOCS: https://spacy.io/api/lemmatizer#to_disk
"""
serialize = {}
serialize["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude)
serialize["lookups"] = lambda p: self.lookups.to_disk(p)
util.to_disk(path, serialize, exclude)
def from_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> "Lemmatizer":
"""Load the pipe from disk. Modifies the object in place and returns it.
path (str / Path): Path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (Lemmatizer): The modified Lemmatizer object.
DOCS: https://spacy.io/api/lemmatizer#from_disk
"""
deserialize: Dict[str, Callable[[Any], Any]] = {}
deserialize["vocab"] = lambda p: self.vocab.from_disk(p, exclude=exclude)
deserialize["lookups"] = lambda p: self.lookups.from_disk(p)
util.from_disk(path, deserialize, exclude)
self._validate_tables()
return self
def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
"""Serialize the pipe to a bytestring.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (bytes): The serialized object.
DOCS: https://spacy.io/api/lemmatizer#to_bytes
"""
serialize = {}
serialize["vocab"] = lambda: self.vocab.to_bytes(exclude=exclude)
serialize["lookups"] = self.lookups.to_bytes
return util.to_bytes(serialize, exclude)
def from_bytes(
self, bytes_data: bytes, *, exclude: Iterable[str] = SimpleFrozenList()
) -> "Lemmatizer":
"""Load the pipe from a bytestring.
bytes_data (bytes): The serialized pipe.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (Lemmatizer): The loaded Lemmatizer.
DOCS: https://spacy.io/api/lemmatizer#from_bytes
"""
deserialize: Dict[str, Callable[[Any], Any]] = {}
deserialize["vocab"] = lambda b: self.vocab.from_bytes(b, exclude=exclude)
deserialize["lookups"] = lambda b: self.lookups.from_bytes(b)
util.from_bytes(bytes_data, deserialize, exclude)
self._validate_tables()
return self
| 12,178 | 35.139466 | 90 | py |
spaCy | spaCy-master/spacy/pipeline/span_finder.py | from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
from thinc.api import Config, Model, Optimizer, set_dropout_rate
from thinc.types import Floats2d
from ..errors import Errors
from ..language import Language
from ..scorer import Scorer
from ..tokens import Doc, Span
from ..training import Example
from ..util import registry
from .spancat import DEFAULT_SPANS_KEY
from .trainable_pipe import TrainablePipe
span_finder_default_config = """
[model]
@architectures = "spacy.SpanFinder.v1"
[model.scorer]
@layers = "spacy.LinearLogistic.v1"
nO = 2
[model.tok2vec]
@architectures = "spacy.Tok2Vec.v2"
[model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v2"
width = 96
rows = [5000, 1000, 2500, 1000]
attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false
[model.tok2vec.encode]
@architectures = "spacy.MaxoutWindowEncoder.v2"
width = ${model.tok2vec.embed.width}
window_size = 1
maxout_pieces = 3
depth = 4
"""
DEFAULT_SPAN_FINDER_MODEL = Config().from_str(span_finder_default_config)["model"]
@Language.factory(
"span_finder",
assigns=["doc.spans"],
default_config={
"threshold": 0.5,
"model": DEFAULT_SPAN_FINDER_MODEL,
"spans_key": DEFAULT_SPANS_KEY,
"max_length": 25,
"min_length": None,
"scorer": {"@scorers": "spacy.span_finder_scorer.v1"},
},
default_score_weights={
f"spans_{DEFAULT_SPANS_KEY}_f": 1.0,
f"spans_{DEFAULT_SPANS_KEY}_p": 0.0,
f"spans_{DEFAULT_SPANS_KEY}_r": 0.0,
},
)
def make_span_finder(
nlp: Language,
name: str,
model: Model[Iterable[Doc], Floats2d],
spans_key: str,
threshold: float,
max_length: Optional[int],
min_length: Optional[int],
scorer: Optional[Callable],
) -> "SpanFinder":
"""Create a SpanFinder component. The component predicts whether a token is
the start or the end of a potential span.
model (Model[List[Doc], Floats2d]): A model instance that
is given a list of documents and predicts a probability for each token.
spans_key (str): Key of the doc.spans dict to save the spans under. During
initialization and training, the component will look for spans on the
reference document under the same key.
threshold (float): Minimum probability to consider a prediction positive.
max_length (Optional[int]): Maximum length of the produced spans, defaults
to None meaning unlimited length.
min_length (Optional[int]): Minimum length of the produced spans, defaults
to None meaning shortest span length is 1.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_spans for the Doc.spans[spans_key] with overlapping
spans allowed.
"""
return SpanFinder(
nlp,
model=model,
threshold=threshold,
name=name,
scorer=scorer,
max_length=max_length,
min_length=min_length,
spans_key=spans_key,
)
@registry.scorers("spacy.span_finder_scorer.v1")
def make_span_finder_scorer():
return span_finder_score
def span_finder_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]:
kwargs = dict(kwargs)
attr_prefix = "spans_"
key = kwargs["spans_key"]
kwargs.setdefault("attr", f"{attr_prefix}{key}")
kwargs.setdefault(
"getter", lambda doc, key: doc.spans.get(key[len(attr_prefix) :], [])
)
kwargs.setdefault("has_annotation", lambda doc: key in doc.spans)
kwargs.setdefault("allow_overlap", True)
kwargs.setdefault("labeled", False)
scores = Scorer.score_spans(examples, **kwargs)
scores.pop(f"{kwargs['attr']}_per_type", None)
return scores
def _char_indices(span: Span) -> Tuple[int, int]:
start = span[0].idx
end = span[-1].idx + len(span[-1])
return start, end
class SpanFinder(TrainablePipe):
"""Pipeline that learns span boundaries.
DOCS: https://spacy.io/api/spanfinder
"""
def __init__(
self,
nlp: Language,
model: Model[Iterable[Doc], Floats2d],
name: str = "span_finder",
*,
spans_key: str = DEFAULT_SPANS_KEY,
threshold: float = 0.5,
max_length: Optional[int] = None,
min_length: Optional[int] = None,
scorer: Optional[Callable] = span_finder_score,
) -> None:
"""Initialize the span finder.
model (thinc.api.Model): The Thinc Model powering the pipeline
component.
name (str): The component instance name, used to add entries to the
losses during training.
threshold (float): Minimum probability to consider a prediction
positive.
scorer (Optional[Callable]): The scoring method.
spans_key (str): Key of the doc.spans dict to save the spans under.
During initialization and training, the component will look for
spans on the reference document under the same key.
max_length (Optional[int]): Maximum length of the produced spans,
defaults to None meaning unlimited length.
min_length (Optional[int]): Minimum length of the produced spans,
defaults to None meaning shortest span length is 1.
DOCS: https://spacy.io/api/spanfinder#init
"""
self.vocab = nlp.vocab
if (max_length is not None and max_length < 1) or (
min_length is not None and min_length < 1
):
raise ValueError(
Errors.E1053.format(min_length=min_length, max_length=max_length)
)
self.model = model
self.name = name
self.scorer = scorer
self.cfg: Dict[str, Any] = {
"min_length": min_length,
"max_length": max_length,
"threshold": threshold,
"spans_key": spans_key,
}
def predict(self, docs: Iterable[Doc]):
"""Apply the pipeline's model to a batch of docs, without modifying
them.
docs (Iterable[Doc]): The documents to predict.
RETURNS: The models prediction for each document.
DOCS: https://spacy.io/api/spanfinder#predict
"""
scores = self.model.predict(docs)
return scores
def set_annotations(self, docs: Iterable[Doc], scores: Floats2d) -> None:
"""Modify a batch of Doc objects, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
scores: The scores to set, produced by SpanFinder predict method.
DOCS: https://spacy.io/api/spanfinder#set_annotations
"""
offset = 0
for i, doc in enumerate(docs):
doc.spans[self.cfg["spans_key"]] = []
starts = []
ends = []
doc_scores = scores[offset : offset + len(doc)]
for token, token_score in zip(doc, doc_scores):
if token_score[0] >= self.cfg["threshold"]:
starts.append(token.i)
if token_score[1] >= self.cfg["threshold"]:
ends.append(token.i)
for start in starts:
for end in ends:
span_length = end + 1 - start
if span_length < 1:
continue
if (
self.cfg["min_length"] is None
or self.cfg["min_length"] <= span_length
) and (
self.cfg["max_length"] is None
or span_length <= self.cfg["max_length"]
):
doc.spans[self.cfg["spans_key"]].append(doc[start : end + 1])
offset += len(doc)
def update(
self,
examples: Iterable[Example],
*,
drop: float = 0.0,
sgd: Optional[Optimizer] = None,
losses: Optional[Dict[str, float]] = None,
) -> Dict[str, float]:
"""Learn from a batch of documents and gold-standard information,
updating the pipe's model. Delegates to predict and get_loss.
examples (Iterable[Example]): A batch of Example objects.
drop (float): The dropout rate.
sgd (Optional[thinc.api.Optimizer]): The optimizer.
losses (Optional[Dict[str, float]]): Optional record of the loss during
training. Updated using the component name as the key.
RETURNS (Dict[str, float]): The updated losses dictionary.
DOCS: https://spacy.io/api/spanfinder#update
"""
if losses is None:
losses = {}
losses.setdefault(self.name, 0.0)
predicted = [eg.predicted for eg in examples]
set_dropout_rate(self.model, drop)
scores, backprop_scores = self.model.begin_update(predicted)
loss, d_scores = self.get_loss(examples, scores)
backprop_scores(d_scores)
if sgd is not None:
self.finish_update(sgd)
losses[self.name] += loss
return losses
def get_loss(self, examples, scores) -> Tuple[float, Floats2d]:
"""Find the loss and gradient of loss for the batch of documents and
their predicted scores.
examples (Iterable[Examples]): The batch of examples.
scores: Scores representing the model's predictions.
RETURNS (Tuple[float, Floats2d]): The loss and the gradient.
DOCS: https://spacy.io/api/spanfinder#get_loss
"""
truths, masks = self._get_aligned_truth_scores(examples, self.model.ops)
d_scores = scores - self.model.ops.asarray2f(truths)
d_scores *= masks
loss = float((d_scores**2).sum())
return loss, d_scores
def _get_aligned_truth_scores(self, examples, ops) -> Tuple[Floats2d, Floats2d]:
"""Align scores of the predictions to the references for calculating
the loss.
"""
truths = []
masks = []
for eg in examples:
if eg.x.text != eg.y.text:
raise ValueError(Errors.E1054.format(component="span_finder"))
n_tokens = len(eg.predicted)
truth = ops.xp.zeros((n_tokens, 2), dtype="float32")
mask = ops.xp.ones((n_tokens, 2), dtype="float32")
if self.cfg["spans_key"] in eg.reference.spans:
for span in eg.reference.spans[self.cfg["spans_key"]]:
ref_start_char, ref_end_char = _char_indices(span)
pred_span = eg.predicted.char_span(
ref_start_char, ref_end_char, alignment_mode="expand"
)
pred_start_char, pred_end_char = _char_indices(pred_span)
start_match = pred_start_char == ref_start_char
end_match = pred_end_char == ref_end_char
if start_match:
truth[pred_span[0].i, 0] = 1
else:
mask[pred_span[0].i, 0] = 0
if end_match:
truth[pred_span[-1].i, 1] = 1
else:
mask[pred_span[-1].i, 1] = 0
truths.append(truth)
masks.append(mask)
truths = ops.xp.concatenate(truths, axis=0)
masks = ops.xp.concatenate(masks, axis=0)
return truths, masks
def initialize(
self,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
) -> None:
"""Initialize the pipe for training, using a representative set
of data examples.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Optional[Language]): The current nlp object the component is part
of.
DOCS: https://spacy.io/api/spanfinder#initialize
"""
subbatch: List[Example] = []
for eg in get_examples():
if len(subbatch) < 10:
subbatch.append(eg)
if subbatch:
docs = [eg.reference for eg in subbatch]
Y, _ = self._get_aligned_truth_scores(subbatch, self.model.ops)
self.model.initialize(X=docs, Y=Y)
else:
self.model.initialize()
| 12,254 | 35.473214 | 85 | py |
spaCy | spaCy-master/spacy/pipeline/span_ruler.py | import warnings
from functools import partial
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import srsly
from .. import util
from ..errors import Errors, Warnings
from ..language import Language
from ..matcher import Matcher, PhraseMatcher
from ..matcher.levenshtein import levenshtein_compare
from ..scorer import Scorer
from ..tokens import Doc, Span
from ..training import Example
from ..util import SimpleFrozenList, ensure_path, registry
from .pipe import Pipe
PatternType = Dict[str, Union[str, List[Dict[str, Any]]]]
DEFAULT_SPANS_KEY = "ruler"
@Language.factory(
"future_entity_ruler",
assigns=["doc.ents"],
default_config={
"phrase_matcher_attr": None,
"validate": False,
"overwrite_ents": False,
"scorer": {"@scorers": "spacy.entity_ruler_scorer.v1"},
"ent_id_sep": "__unused__",
"matcher_fuzzy_compare": {"@misc": "spacy.levenshtein_compare.v1"},
},
default_score_weights={
"ents_f": 1.0,
"ents_p": 0.0,
"ents_r": 0.0,
"ents_per_type": None,
},
)
def make_entity_ruler(
nlp: Language,
name: str,
phrase_matcher_attr: Optional[Union[int, str]],
matcher_fuzzy_compare: Callable,
validate: bool,
overwrite_ents: bool,
scorer: Optional[Callable],
ent_id_sep: str,
):
if overwrite_ents:
ents_filter = prioritize_new_ents_filter
else:
ents_filter = prioritize_existing_ents_filter
return SpanRuler(
nlp,
name,
spans_key=None,
spans_filter=None,
annotate_ents=True,
ents_filter=ents_filter,
phrase_matcher_attr=phrase_matcher_attr,
matcher_fuzzy_compare=matcher_fuzzy_compare,
validate=validate,
overwrite=False,
scorer=scorer,
)
@Language.factory(
"span_ruler",
assigns=["doc.spans"],
default_config={
"spans_key": DEFAULT_SPANS_KEY,
"spans_filter": None,
"annotate_ents": False,
"ents_filter": {"@misc": "spacy.first_longest_spans_filter.v1"},
"phrase_matcher_attr": None,
"matcher_fuzzy_compare": {"@misc": "spacy.levenshtein_compare.v1"},
"validate": False,
"overwrite": True,
"scorer": {
"@scorers": "spacy.overlapping_labeled_spans_scorer.v1",
"spans_key": DEFAULT_SPANS_KEY,
},
},
default_score_weights={
f"spans_{DEFAULT_SPANS_KEY}_f": 1.0,
f"spans_{DEFAULT_SPANS_KEY}_p": 0.0,
f"spans_{DEFAULT_SPANS_KEY}_r": 0.0,
f"spans_{DEFAULT_SPANS_KEY}_per_type": None,
},
)
def make_span_ruler(
nlp: Language,
name: str,
spans_key: Optional[str],
spans_filter: Optional[Callable[[Iterable[Span], Iterable[Span]], Iterable[Span]]],
annotate_ents: bool,
ents_filter: Callable[[Iterable[Span], Iterable[Span]], Iterable[Span]],
phrase_matcher_attr: Optional[Union[int, str]],
matcher_fuzzy_compare: Callable,
validate: bool,
overwrite: bool,
scorer: Optional[Callable],
):
return SpanRuler(
nlp,
name,
spans_key=spans_key,
spans_filter=spans_filter,
annotate_ents=annotate_ents,
ents_filter=ents_filter,
phrase_matcher_attr=phrase_matcher_attr,
matcher_fuzzy_compare=matcher_fuzzy_compare,
validate=validate,
overwrite=overwrite,
scorer=scorer,
)
def prioritize_new_ents_filter(
entities: Iterable[Span], spans: Iterable[Span]
) -> List[Span]:
"""Merge entities and spans into one list without overlaps by allowing
spans to overwrite any entities that they overlap with. Intended to
replicate the overwrite_ents=True behavior from the EntityRuler.
entities (Iterable[Span]): The entities, already filtered for overlaps.
spans (Iterable[Span]): The spans to merge, may contain overlaps.
RETURNS (List[Span]): Filtered list of non-overlapping spans.
"""
get_sort_key = lambda span: (span.end - span.start, -span.start)
spans = sorted(spans, key=get_sort_key, reverse=True)
entities = list(entities)
new_entities = []
seen_tokens: Set[int] = set()
for span in spans:
start = span.start
end = span.end
if all(token.i not in seen_tokens for token in span):
new_entities.append(span)
entities = [e for e in entities if not (e.start < end and e.end > start)]
seen_tokens.update(range(start, end))
return entities + new_entities
@registry.misc("spacy.prioritize_new_ents_filter.v1")
def make_prioritize_new_ents_filter():
return prioritize_new_ents_filter
def prioritize_existing_ents_filter(
entities: Iterable[Span], spans: Iterable[Span]
) -> List[Span]:
"""Merge entities and spans into one list without overlaps by prioritizing
existing entities. Intended to replicate the overwrite_ents=False behavior
from the EntityRuler.
entities (Iterable[Span]): The entities, already filtered for overlaps.
spans (Iterable[Span]): The spans to merge, may contain overlaps.
RETURNS (List[Span]): Filtered list of non-overlapping spans.
"""
get_sort_key = lambda span: (span.end - span.start, -span.start)
spans = sorted(spans, key=get_sort_key, reverse=True)
entities = list(entities)
new_entities = []
seen_tokens: Set[int] = set()
seen_tokens.update(*(range(ent.start, ent.end) for ent in entities))
for span in spans:
start = span.start
end = span.end
if all(token.i not in seen_tokens for token in span):
new_entities.append(span)
seen_tokens.update(range(start, end))
return entities + new_entities
@registry.misc("spacy.prioritize_existing_ents_filter.v1")
def make_preserve_existing_ents_filter():
return prioritize_existing_ents_filter
def overlapping_labeled_spans_score(
examples: Iterable[Example], *, spans_key=DEFAULT_SPANS_KEY, **kwargs
) -> Dict[str, Any]:
kwargs = dict(kwargs)
attr_prefix = f"spans_"
kwargs.setdefault("attr", f"{attr_prefix}{spans_key}")
kwargs.setdefault("allow_overlap", True)
kwargs.setdefault("labeled", True)
kwargs.setdefault(
"getter", lambda doc, key: doc.spans.get(key[len(attr_prefix) :], [])
)
kwargs.setdefault("has_annotation", lambda doc: spans_key in doc.spans)
return Scorer.score_spans(examples, **kwargs)
@registry.scorers("spacy.overlapping_labeled_spans_scorer.v1")
def make_overlapping_labeled_spans_scorer(spans_key: str = DEFAULT_SPANS_KEY):
return partial(overlapping_labeled_spans_score, spans_key=spans_key)
class SpanRuler(Pipe):
"""The SpanRuler lets you add spans to the `Doc.spans` using token-based
rules or exact phrase matches.
DOCS: https://spacy.io/api/spanruler
USAGE: https://spacy.io/usage/rule-based-matching#spanruler
"""
def __init__(
self,
nlp: Language,
name: str = "span_ruler",
*,
spans_key: Optional[str] = DEFAULT_SPANS_KEY,
spans_filter: Optional[
Callable[[Iterable[Span], Iterable[Span]], Iterable[Span]]
] = None,
annotate_ents: bool = False,
ents_filter: Callable[
[Iterable[Span], Iterable[Span]], Iterable[Span]
] = util.filter_chain_spans,
phrase_matcher_attr: Optional[Union[int, str]] = None,
matcher_fuzzy_compare: Callable = levenshtein_compare,
validate: bool = False,
overwrite: bool = False,
scorer: Optional[Callable] = partial(
overlapping_labeled_spans_score, spans_key=DEFAULT_SPANS_KEY
),
) -> None:
"""Initialize the span ruler. If patterns are supplied here, they
need to be a list of dictionaries with a `"label"` and `"pattern"`
key. A pattern can either be a token pattern (list) or a phrase pattern
(string). For example: `{'label': 'ORG', 'pattern': 'Apple'}`.
nlp (Language): The shared nlp object to pass the vocab to the matchers
and process phrase patterns.
name (str): Instance name of the current pipeline component. Typically
passed in automatically from the factory when the component is
added. Used to disable the current span ruler while creating
phrase patterns with the nlp object.
spans_key (Optional[str]): The spans key to save the spans under. If
`None`, no spans are saved. Defaults to "ruler".
spans_filter (Optional[Callable[[Iterable[Span], Iterable[Span]], List[Span]]):
The optional method to filter spans before they are assigned to
doc.spans. Defaults to `None`.
annotate_ents (bool): Whether to save spans to doc.ents. Defaults to
`False`.
ents_filter (Callable[[Iterable[Span], Iterable[Span]], List[Span]]):
The method to filter spans before they are assigned to doc.ents.
Defaults to `util.filter_chain_spans`.
phrase_matcher_attr (Optional[Union[int, str]]): Token attribute to
match on, passed to the internal PhraseMatcher as `attr`. Defaults
to `None`.
matcher_fuzzy_compare (Callable): The fuzzy comparison method for the
internal Matcher. Defaults to
spacy.matcher.levenshtein.levenshtein_compare.
validate (bool): Whether patterns should be validated, passed to
Matcher and PhraseMatcher as `validate`.
overwrite (bool): Whether to remove any existing spans under this spans
key if `spans_key` is set, and/or to remove any ents under `doc.ents` if
`annotate_ents` is set. Defaults to `True`.
scorer (Optional[Callable]): The scoring method. Defaults to
spacy.pipeline.span_ruler.overlapping_labeled_spans_score.
DOCS: https://spacy.io/api/spanruler#init
"""
self.nlp = nlp
self.name = name
self.spans_key = spans_key
self.annotate_ents = annotate_ents
self.phrase_matcher_attr = phrase_matcher_attr
self.validate = validate
self.overwrite = overwrite
self.spans_filter = spans_filter
self.ents_filter = ents_filter
self.scorer = scorer
self.matcher_fuzzy_compare = matcher_fuzzy_compare
self._match_label_id_map: Dict[int, Dict[str, str]] = {}
self.clear()
def __len__(self) -> int:
"""The number of all labels added to the span ruler."""
return len(self._patterns)
def __contains__(self, label: str) -> bool:
"""Whether a label is present in the patterns."""
for label_id in self._match_label_id_map.values():
if label_id["label"] == label:
return True
return False
@property
def key(self) -> Optional[str]:
"""Key of the doc.spans dict to save the spans under."""
return self.spans_key
def __call__(self, doc: Doc) -> Doc:
"""Find matches in document and add them as entities.
doc (Doc): The Doc object in the pipeline.
RETURNS (Doc): The Doc with added entities, if available.
DOCS: https://spacy.io/api/spanruler#call
"""
error_handler = self.get_error_handler()
try:
matches = self.match(doc)
self.set_annotations(doc, matches)
return doc
except Exception as e:
return error_handler(self.name, self, [doc], e)
def match(self, doc: Doc):
self._require_patterns()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="\\[W036")
matches = cast(
List[Tuple[int, int, int]],
list(self.matcher(doc)) + list(self.phrase_matcher(doc)),
)
deduplicated_matches = set(
Span(
doc,
start,
end,
label=self._match_label_id_map[m_id]["label"],
span_id=self._match_label_id_map[m_id]["id"],
)
for m_id, start, end in matches
if start != end
)
return sorted(list(deduplicated_matches))
def set_annotations(self, doc, matches):
"""Modify the document in place"""
# set doc.spans if spans_key is set
if self.key:
spans = []
if self.key in doc.spans and not self.overwrite:
spans = doc.spans[self.key]
spans.extend(
self.spans_filter(spans, matches) if self.spans_filter else matches
)
doc.spans[self.key] = spans
# set doc.ents if annotate_ents is set
if self.annotate_ents:
spans = []
if not self.overwrite:
spans = list(doc.ents)
spans = self.ents_filter(spans, matches)
try:
doc.ents = sorted(spans)
except ValueError:
raise ValueError(Errors.E854)
@property
def labels(self) -> Tuple[str, ...]:
"""All labels present in the match patterns.
RETURNS (set): The string labels.
DOCS: https://spacy.io/api/spanruler#labels
"""
return tuple(sorted(set([cast(str, p["label"]) for p in self._patterns])))
@property
def ids(self) -> Tuple[str, ...]:
"""All IDs present in the match patterns.
RETURNS (set): The string IDs.
DOCS: https://spacy.io/api/spanruler#ids
"""
return tuple(
sorted(set([cast(str, p.get("id")) for p in self._patterns]) - set([None]))
)
def initialize(
self,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
patterns: Optional[Sequence[PatternType]] = None,
):
"""Initialize the pipe for training.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
patterns (Optional[Iterable[PatternType]]): The list of patterns.
DOCS: https://spacy.io/api/spanruler#initialize
"""
self.clear()
if patterns:
self.add_patterns(patterns) # type: ignore[arg-type]
@property
def patterns(self) -> List[PatternType]:
"""Get all patterns that were added to the span ruler.
RETURNS (list): The original patterns, one dictionary per pattern.
DOCS: https://spacy.io/api/spanruler#patterns
"""
return self._patterns
def add_patterns(self, patterns: List[PatternType]) -> None:
"""Add patterns to the span ruler. A pattern can either be a token
pattern (list of dicts) or a phrase pattern (string). For example:
{'label': 'ORG', 'pattern': 'Apple'}
{'label': 'ORG', 'pattern': 'Apple', 'id': 'apple'}
{'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]}
patterns (list): The patterns to add.
DOCS: https://spacy.io/api/spanruler#add_patterns
"""
# disable the nlp components after this one in case they haven't been
# initialized / deserialized yet
try:
current_index = -1
for i, (name, pipe) in enumerate(self.nlp.pipeline):
if self == pipe:
current_index = i
break
subsequent_pipes = [pipe for pipe in self.nlp.pipe_names[current_index:]]
except ValueError:
subsequent_pipes = []
with self.nlp.select_pipes(disable=subsequent_pipes):
phrase_pattern_labels = []
phrase_pattern_texts = []
for entry in patterns:
p_label = cast(str, entry["label"])
p_id = cast(str, entry.get("id", ""))
label = repr((p_label, p_id))
self._match_label_id_map[self.nlp.vocab.strings.as_int(label)] = {
"label": p_label,
"id": p_id,
}
if isinstance(entry["pattern"], str):
phrase_pattern_labels.append(label)
phrase_pattern_texts.append(entry["pattern"])
elif isinstance(entry["pattern"], list):
self.matcher.add(label, [entry["pattern"]])
else:
raise ValueError(Errors.E097.format(pattern=entry["pattern"]))
self._patterns.append(entry)
for label, pattern in zip(
phrase_pattern_labels,
self.nlp.pipe(phrase_pattern_texts),
):
self.phrase_matcher.add(label, [pattern])
def clear(self) -> None:
"""Reset all patterns.
RETURNS: None
DOCS: https://spacy.io/api/spanruler#clear
"""
self._patterns: List[PatternType] = []
self.matcher: Matcher = Matcher(
self.nlp.vocab,
validate=self.validate,
fuzzy_compare=self.matcher_fuzzy_compare,
)
self.phrase_matcher: PhraseMatcher = PhraseMatcher(
self.nlp.vocab,
attr=self.phrase_matcher_attr,
validate=self.validate,
)
def remove(self, label: str) -> None:
"""Remove a pattern by its label.
label (str): Label of the pattern to be removed.
RETURNS: None
DOCS: https://spacy.io/api/spanruler#remove
"""
if label not in self:
raise ValueError(
Errors.E1024.format(attr_type="label", label=label, component=self.name)
)
self._patterns = [p for p in self._patterns if p["label"] != label]
for m_label in self._match_label_id_map:
if self._match_label_id_map[m_label]["label"] == label:
m_label_str = self.nlp.vocab.strings.as_string(m_label)
if m_label_str in self.phrase_matcher:
self.phrase_matcher.remove(m_label_str)
if m_label_str in self.matcher:
self.matcher.remove(m_label_str)
def remove_by_id(self, pattern_id: str) -> None:
"""Remove a pattern by its pattern ID.
pattern_id (str): ID of the pattern to be removed.
RETURNS: None
DOCS: https://spacy.io/api/spanruler#remove_by_id
"""
orig_len = len(self)
self._patterns = [p for p in self._patterns if p.get("id") != pattern_id]
if orig_len == len(self):
raise ValueError(
Errors.E1024.format(
attr_type="ID", label=pattern_id, component=self.name
)
)
for m_label in self._match_label_id_map:
if self._match_label_id_map[m_label]["id"] == pattern_id:
m_label_str = self.nlp.vocab.strings.as_string(m_label)
if m_label_str in self.phrase_matcher:
self.phrase_matcher.remove(m_label_str)
if m_label_str in self.matcher:
self.matcher.remove(m_label_str)
def _require_patterns(self) -> None:
"""Raise a warning if this component has no patterns defined."""
if len(self) == 0:
warnings.warn(Warnings.W036.format(name=self.name))
def from_bytes(
self, bytes_data: bytes, *, exclude: Iterable[str] = SimpleFrozenList()
) -> "SpanRuler":
"""Load the span ruler from a bytestring.
bytes_data (bytes): The bytestring to load.
RETURNS (SpanRuler): The loaded span ruler.
DOCS: https://spacy.io/api/spanruler#from_bytes
"""
self.clear()
deserializers = {
"patterns": lambda b: self.add_patterns(srsly.json_loads(b)),
}
util.from_bytes(bytes_data, deserializers, exclude)
return self
def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
"""Serialize the span ruler to a bytestring.
RETURNS (bytes): The serialized patterns.
DOCS: https://spacy.io/api/spanruler#to_bytes
"""
serializers = {
"patterns": lambda: srsly.json_dumps(self.patterns),
}
return util.to_bytes(serializers, exclude)
def from_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> "SpanRuler":
"""Load the span ruler from a directory.
path (Union[str, Path]): A path to a directory.
RETURNS (SpanRuler): The loaded span ruler.
DOCS: https://spacy.io/api/spanruler#from_disk
"""
self.clear()
path = ensure_path(path)
deserializers = {
"patterns": lambda p: self.add_patterns(srsly.read_jsonl(p)),
}
util.from_disk(path, deserializers, {})
return self
def to_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> None:
"""Save the span ruler patterns to a directory.
path (Union[str, Path]): A path to a directory.
DOCS: https://spacy.io/api/spanruler#to_disk
"""
path = ensure_path(path)
serializers = {
"patterns": lambda p: srsly.write_jsonl(p, self.patterns),
}
util.to_disk(path, serializers, {})
| 21,554 | 35.045151 | 88 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.