Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
spaCy | spaCy-master/spacy/tests/lang/eu/test_text.py | import pytest
def test_eu_tokenizer_handles_long_text(eu_tokenizer):
text = """ta nere guitarra estrenatu ondoren"""
tokens = eu_tokenizer(text)
assert len(tokens) == 5
@pytest.mark.parametrize(
"text,length",
[
("milesker ederra joan zen hitzaldia plazer hutsa", 7),
("astelehen guztia sofan pasau biot", 5),
],
)
def test_eu_tokenizer_handles_cnts(eu_tokenizer, text, length):
tokens = eu_tokenizer(text)
assert len(tokens) == length
| 488 | 23.45 | 63 | py |
spaCy | spaCy-master/spacy/tests/lang/fa/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/fa/test_noun_chunks.py | import pytest
def test_noun_chunks_is_parsed_fa(fa_tokenizer):
"""Test that noun_chunks raises Value Error for 'fa' language if Doc is not parsed."""
doc = fa_tokenizer("این یک جمله نمونه می باشد.")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 276 | 26.7 | 90 | py |
spaCy | spaCy-master/spacy/tests/lang/fi/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/fi/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
FI_NP_TEST_EXAMPLES = [
(
"Kaksi tyttöä potkii punaista palloa",
["NUM", "NOUN", "VERB", "ADJ", "NOUN"],
["nummod", "nsubj", "ROOT", "amod", "obj"],
[1, 1, 0, 1, -2],
["Kaksi tyttöä", "punaista palloa"],
),
(
"Erittäin vaarallinen leijona karkasi kiertävän sirkuksen eläintenkesyttäjältä",
["ADV", "ADJ", "NOUN", "VERB", "ADJ", "NOUN", "NOUN"],
["advmod", "amod", "nsubj", "ROOT", "amod", "nmod:poss", "obl"],
[1, 1, 1, 0, 1, 1, -3],
["Erittäin vaarallinen leijona", "kiertävän sirkuksen eläintenkesyttäjältä"],
),
(
"Leijona raidallisine tassuineen piileksii Porin kaupungin lähellä",
["NOUN", "ADJ", "NOUN", "VERB", "PROPN", "NOUN", "ADP"],
["nsubj", "amod", "nmod", "ROOT", "nmod:poss", "obl", "case"],
[3, 1, -2, 0, 1, -2, -1],
["Leijona raidallisine tassuineen", "Porin kaupungin"],
),
(
"Lounaalla nautittiin salaattia, maukasta kanaa ja raikasta vettä",
["NOUN", "VERB", "NOUN", "PUNCT", "ADJ", "NOUN", "CCONJ", "ADJ", "NOUN"],
["obl", "ROOT", "obj", "punct", "amod", "conj", "cc", "amod", "conj"],
[1, 0, -1, 2, 1, -3, 2, 1, -6],
["Lounaalla", "salaattia", "maukasta kanaa", "raikasta vettä"],
),
(
"Minua houkuttaa maalle muuttaminen talven jälkeen",
["PRON", "VERB", "NOUN", "NOUN", "NOUN", "ADP"],
["obj", "ROOT", "nmod", "nsubj", "obl", "case"],
[1, 0, 1, -2, -3, -1],
["maalle muuttaminen", "talven"],
),
(
"Päivän kohokohta oli vierailu museossa kummilasten kanssa",
["NOUN", "NOUN", "AUX", "NOUN", "NOUN", "NOUN", "ADP"],
["nmod:poss", "nsubj:cop", "cop", "ROOT", "nmod", "obl", "case"],
[1, 2, 1, 0, -1, -2, -1],
["Päivän kohokohta", "vierailu museossa", "kummilasten"],
),
(
"Yrittäjät maksoivat tuomioistuimen määräämät korvaukset",
["NOUN", "VERB", "NOUN", "VERB", "NOUN"],
["nsubj", "ROOT", "nsubj", "acl", "obj"],
[1, 0, 1, 1, -3],
["Yrittäjät", "tuomioistuimen", "korvaukset"],
),
(
"Julkisoikeudelliset tai niihin rinnastettavat saatavat ovat suoraan ulosottokelpoisia",
["ADJ", "CCONJ", "PRON", "VERB", "NOUN", "AUX", "ADV", "NOUN"],
["amod", "cc", "obl", "acl", "nsubj:cop", "cop", "advmod", "ROOT"],
[4, 3, 1, 1, 3, 2, 1, 0],
["Julkisoikeudelliset tai niihin rinnastettavat saatavat", "ulosottokelpoisia"],
),
(
"Se oli ala-arvoista käytöstä kaikilta oppilailta, myös valvojaoppilailta",
["PRON", "AUX", "ADJ", "NOUN", "PRON", "NOUN", "PUNCT", "ADV", "NOUN"],
["nsubj:cop", "cop", "amod", "ROOT", "det", "nmod", "punct", "advmod", "appos"],
[3, 2, 1, 0, 1, -2, 2, 1, -3],
["ala-arvoista käytöstä kaikilta oppilailta", "valvojaoppilailta"],
),
(
"Isä souti veneellä, jonka hän oli vuokrannut",
["NOUN", "VERB", "NOUN", "PUNCT", "PRON", "PRON", "AUX", "VERB"],
["nsubj", "ROOT", "obl", "punct", "obj", "nsubj", "aux", "acl:relcl"],
[1, 0, -1, 4, 3, 2, 1, -5],
["Isä", "veneellä"],
),
(
"Kirja, jonka poimin hyllystä, kertoo norsuista",
["NOUN", "PUNCT", "PRON", "VERB", "NOUN", "PUNCT", "VERB", "NOUN"],
["nsubj", "punct", "obj", "acl:relcl", "obl", "punct", "ROOT", "obl"],
[6, 2, 1, -3, -1, 1, 0, -1],
["Kirja", "hyllystä", "norsuista"],
),
(
"Huomenna on päivä, jota olemme odottaneet",
["NOUN", "AUX", "NOUN", "PUNCT", "PRON", "AUX", "VERB"],
["ROOT", "cop", "nsubj:cop", "punct", "obj", "aux", "acl:relcl"],
[0, -1, -2, 3, 2, 1, -4],
["Huomenna", "päivä"],
),
(
"Liikkuvuuden lisääminen on yksi korkeakoulutuksen keskeisistä kehittämiskohteista",
["NOUN", "NOUN", "AUX", "PRON", "NOUN", "ADJ", "NOUN"],
["nmod:gobj", "nsubj:cop", "cop", "ROOT", "nmod:poss", "amod", "nmod"],
[1, 2, 1, 0, 2, 1, -3],
[
"Liikkuvuuden lisääminen",
"korkeakoulutuksen keskeisistä kehittämiskohteista",
],
),
(
"Kaupalliset palvelut jätetään yksityisten palveluntarjoajien tarjottavaksi",
["ADJ", "NOUN", "VERB", "ADJ", "NOUN", "NOUN"],
["amod", "obj", "ROOT", "amod", "nmod:gsubj", "obl"],
[1, 1, 0, 1, 1, -3],
["Kaupalliset palvelut", "yksityisten palveluntarjoajien tarjottavaksi"],
),
(
"New York tunnetaan kaupunkina, joka ei koskaan nuku",
["PROPN", "PROPN", "VERB", "NOUN", "PUNCT", "PRON", "AUX", "ADV", "VERB"],
[
"obj",
"flat:name",
"ROOT",
"obl",
"punct",
"nsubj",
"aux",
"advmod",
"acl:relcl",
],
[2, -1, 0, -1, 4, 3, 2, 1, -5],
["New York", "kaupunkina"],
),
(
"Loput vihjeet saat herra Möttöseltä",
["NOUN", "NOUN", "VERB", "NOUN", "PROPN"],
["compound:nn", "obj", "ROOT", "compound:nn", "obj"],
[1, 1, 0, 1, -2],
["Loput vihjeet", "herra Möttöseltä"],
),
(
"mahdollisuus tukea muita päivystysyksiköitä",
["NOUN", "VERB", "PRON", "NOUN"],
["ROOT", "acl", "det", "obj"],
[0, -1, 1, -2],
["mahdollisuus", "päivystysyksiköitä"],
),
(
"sairaanhoitopiirit harjoittavat leikkaustoimintaa alueellaan useammassa sairaalassa",
["NOUN", "VERB", "NOUN", "NOUN", "ADJ", "NOUN"],
["nsubj", "ROOT", "obj", "obl", "amod", "obl"],
[1, 0, -1, -1, 1, -3],
[
"sairaanhoitopiirit",
"leikkaustoimintaa",
"alueellaan",
"useammassa sairaalassa",
],
),
(
"Lain mukaan varhaiskasvatus on suunnitelmallista toimintaa",
["NOUN", "ADP", "NOUN", "AUX", "ADJ", "NOUN"],
["obl", "case", "nsubj:cop", "cop", "amod", "ROOT"],
[5, -1, 3, 2, 1, 0],
["Lain", "varhaiskasvatus", "suunnitelmallista toimintaa"],
),
]
def test_noun_chunks_is_parsed(fi_tokenizer):
"""Test that noun_chunks raises Value Error for 'fi' language if Doc is not parsed.
To check this test, we're constructing a Doc
with a new Vocab here and forcing is_parsed to 'False'
to make sure the noun chunks don't run.
"""
doc = fi_tokenizer("Tämä on testi")
with pytest.raises(ValueError):
list(doc.noun_chunks)
@pytest.mark.parametrize(
"text,pos,deps,heads,expected_noun_chunks", FI_NP_TEST_EXAMPLES
)
def test_fi_noun_chunks(fi_tokenizer, text, pos, deps, heads, expected_noun_chunks):
tokens = fi_tokenizer(text)
assert len(heads) == len(pos)
doc = Doc(
tokens.vocab,
words=[t.text for t in tokens],
heads=[head + i for i, head in enumerate(heads)],
deps=deps,
pos=pos,
)
noun_chunks = list(doc.noun_chunks)
assert len(noun_chunks) == len(expected_noun_chunks)
for i, np in enumerate(noun_chunks):
assert np.text == expected_noun_chunks[i]
| 7,179 | 36.789474 | 96 | py |
spaCy | spaCy-master/spacy/tests/lang/fi/test_text.py | import pytest
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10000", True),
("10,00", True),
("-999,0", True),
("yksi", True),
("kolmetoista", True),
("viisikymmentä", True),
("tuhat", True),
("1/2", True),
("hevonen", False),
(",", False),
],
)
def test_fi_lex_attrs_like_number(fi_tokenizer, text, match):
tokens = fi_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
| 541 | 20.68 | 61 | py |
spaCy | spaCy-master/spacy/tests/lang/fi/test_tokenizer.py | import pytest
ABBREVIATION_TESTS = [
(
"Hyvää uutta vuotta t. siht. Niemelä!",
["Hyvää", "uutta", "vuotta", "t.", "siht.", "Niemelä", "!"],
),
("Paino on n. 2.2 kg", ["Paino", "on", "n.", "2.2", "kg"]),
(
"Vuonna 1 eaa. tapahtui kauheita.",
["Vuonna", "1", "eaa.", "tapahtui", "kauheita", "."],
),
]
HYPHENATED_TESTS = [
(
"1700-luvulle sijoittuva taide-elokuva Wikimedia-säätiön Varsinais-Suomen",
[
"1700-luvulle",
"sijoittuva",
"taide-elokuva",
"Wikimedia-säätiön",
"Varsinais-Suomen",
],
)
]
ABBREVIATION_INFLECTION_TESTS = [
(
"VTT:ssa ennen v:ta 2010 suoritetut mittaukset",
["VTT:ssa", "ennen", "v:ta", "2010", "suoritetut", "mittaukset"],
),
("ALV:n osuus on 24 %.", ["ALV:n", "osuus", "on", "24", "%", "."]),
("Hiihtäjä oli kilpailun 14:s.", ["Hiihtäjä", "oli", "kilpailun", "14:s", "."]),
("EU:n toimesta tehtiin jotain.", ["EU:n", "toimesta", "tehtiin", "jotain", "."]),
]
CONTRACTION_TESTS = [
(
"Päätimme ettemme tule.",
["Päätimme", "ett", "emme", "tule", "."],
["päätimme", "että", "emme", "tule", "."],
),
(
"Miksei puhuttaisi?",
["Miks", "ei", "puhuttaisi", "?"],
["miksi", "ei", "puhuttaisi", "?"],
),
(
"He tottelivat vaikkeivat halunneet",
["He", "tottelivat", "vaikk", "eivat", "halunneet"],
["he", "tottelivat", "vaikka", "eivät", "halunneet"],
),
]
@pytest.mark.parametrize("text,expected_tokens", ABBREVIATION_TESTS)
def test_fi_tokenizer_abbreviations(fi_tokenizer, text, expected_tokens):
tokens = fi_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
@pytest.mark.parametrize("text,expected_tokens", HYPHENATED_TESTS)
def test_fi_tokenizer_hyphenated_words(fi_tokenizer, text, expected_tokens):
tokens = fi_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
@pytest.mark.parametrize("text,expected_tokens", ABBREVIATION_INFLECTION_TESTS)
def test_fi_tokenizer_abbreviation_inflections(fi_tokenizer, text, expected_tokens):
tokens = fi_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
@pytest.mark.parametrize("text,expected_tokens,expected_norms", CONTRACTION_TESTS)
def test_fi_tokenizer_contractions(fi_tokenizer, text, expected_tokens, expected_norms):
tokens = fi_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
norm_list = [token.norm_ for token in tokens if not token.is_space]
assert expected_tokens == token_list
assert expected_norms == norm_list
| 2,875 | 32.835294 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/fr/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/fr/test_exceptions.py | import pytest
@pytest.mark.parametrize(
"text",
[
"aujourd'hui",
"Aujourd'hui",
"prud'hommes",
"prud’hommal",
"audio-numérique",
"Audio-numérique",
"entr'amis",
"entr'abat",
"rentr'ouvertes",
"grand'hamien",
"Châteauneuf-la-Forêt",
"Château-Guibert",
"refox-trottâmes",
# u"K-POP",
# u"K-Pop",
# u"K-pop",
"z'yeutes",
"black-outeront",
"états-unienne",
"courtes-pattes",
"court-pattes",
"saut-de-ski",
"Écourt-Saint-Quentin",
"Bout-de-l'Îlien",
"pet-en-l'air",
],
)
def test_fr_tokenizer_infix_exceptions(fr_tokenizer, text):
tokens = fr_tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["janv.", "juill.", "Dr.", "av.", "sept."])
def test_fr_tokenizer_handles_abbr(fr_tokenizer, text):
tokens = fr_tokenizer(text)
assert len(tokens) == 1
def test_fr_tokenizer_handles_exc_in_text(fr_tokenizer):
text = "Je suis allé au mois de janv. aux prud’hommes."
tokens = fr_tokenizer(text)
assert len(tokens) == 10
assert tokens[6].text == "janv."
assert tokens[8].text == "prud’hommes"
def test_fr_tokenizer_handles_exc_in_text_2(fr_tokenizer):
text = "Cette après-midi, je suis allé dans un restaurant italo-mexicain."
tokens = fr_tokenizer(text)
assert len(tokens) == 11
assert tokens[1].text == "après-midi"
assert tokens[9].text == "italo-mexicain"
def test_fr_tokenizer_handles_title(fr_tokenizer):
text = "N'est-ce pas génial?"
tokens = fr_tokenizer(text)
assert len(tokens) == 6
assert tokens[0].text == "N'"
assert tokens[1].text == "est"
assert tokens[2].text == "-ce"
def test_fr_tokenizer_handles_title_2(fr_tokenizer):
text = "Est-ce pas génial?"
tokens = fr_tokenizer(text)
assert len(tokens) == 5
assert tokens[0].text == "Est"
assert tokens[1].text == "-ce"
def test_fr_tokenizer_handles_title_3(fr_tokenizer):
text = "Qu'est-ce que tu fais?"
tokens = fr_tokenizer(text)
assert len(tokens) == 7
assert tokens[0].text == "Qu'"
| 2,198 | 25.493976 | 78 | py |
spaCy | spaCy-master/spacy/tests/lang/fr/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
# fmt: off
@pytest.mark.parametrize(
"words,heads,deps,pos,chunk_offsets",
[
# determiner + noun
# un nom -> un nom
(
["un", "nom"],
[1, 1],
["det", "ROOT"],
["DET", "NOUN"],
[(0, 2)],
),
# determiner + noun starting with vowel
# l'heure -> l'heure
(
["l'", "heure"],
[1, 1],
["det", "ROOT"],
["DET", "NOUN"],
[(0, 2)],
),
# determiner + plural noun
# les romans -> les romans
(
["les", "romans"],
[1, 1],
["det", "ROOT"],
["DET", "NOUN"],
[(0, 2)],
),
# det + adj + noun
# Le vieux Londres -> Le vieux Londres
(
['Les', 'vieux', 'Londres'],
[2, 2, 2],
["det", "amod", "ROOT"],
["DET", "ADJ", "NOUN"],
[(0,3)]
),
# det + noun + adj
# le nom propre -> le nom propre a proper noun
(
["le", "nom", "propre"],
[1, 1, 1],
["det", "ROOT", "amod"],
["DET", "NOUN", "ADJ"],
[(0, 3)],
),
# det + noun + adj plural
# Les chiens bruns -> les chiens bruns
(
["Les", "chiens", "bruns"],
[1, 1, 1],
["det", "ROOT", "amod"],
["DET", "NOUN", "ADJ"],
[(0, 3)],
),
# multiple adjectives: one adj before the noun, one adj after the noun
# un nouveau film intéressant -> un nouveau film intéressant
(
["un", "nouveau", "film", "intéressant"],
[2, 2, 2, 2],
["det", "amod", "ROOT", "amod"],
["DET", "ADJ", "NOUN", "ADJ"],
[(0,4)]
),
# multiple adjectives, both adjs after the noun
# une personne intelligente et drôle -> une personne intelligente et drôle
(
["une", "personne", "intelligente", "et", "drôle"],
[1, 1, 1, 4, 2],
["det", "ROOT", "amod", "cc", "conj"],
["DET", "NOUN", "ADJ", "CCONJ", "ADJ"],
[(0,5)]
),
# relative pronoun
# un bus qui va au ville -> un bus, qui, ville
(
['un', 'bus', 'qui', 'va', 'au', 'ville'],
[1, 1, 3, 1, 5, 3],
['det', 'ROOT', 'nsubj', 'acl:relcl', 'case', 'obl:arg'],
['DET', 'NOUN', 'PRON', 'VERB', 'ADP', 'NOUN'],
[(0,2), (2,3), (5,6)]
),
# relative subclause
# Voilà la maison que nous voulons acheter -> la maison, nous That's the house that we want to buy.
(
['Voilà', 'la', 'maison', 'que', 'nous', 'voulons', 'acheter'],
[0, 2, 0, 5, 5, 2, 5],
['ROOT', 'det', 'obj', 'mark', 'nsubj', 'acl:relcl', 'xcomp'],
['VERB', 'DET', 'NOUN', 'SCONJ', 'PRON', 'VERB', 'VERB'],
[(1,3), (4,5)]
),
# Person name and title by flat
# Louis XIV -> Louis XIV
(
["Louis", "XIV"],
[0, 0],
["ROOT", "flat:name"],
["PROPN", "PROPN"],
[(0,2)]
),
# Organization name by flat
# Nations Unies -> Nations Unies
(
["Nations", "Unies"],
[0, 0],
["ROOT", "flat:name"],
["PROPN", "PROPN"],
[(0,2)]
),
# Noun compound, person name created by two flats
# Louise de Bratagne -> Louise de Bratagne
(
["Louise", "de", "Bratagne"],
[0, 0, 0],
["ROOT", "flat:name", "flat:name"],
["PROPN", "PROPN", "PROPN"],
[(0,3)]
),
# Noun compound, person name created by two flats
# Louis François Joseph -> Louis François Joseph
(
["Louis", "François", "Joseph"],
[0, 0, 0],
["ROOT", "flat:name", "flat:name"],
["PROPN", "PROPN", "PROPN"],
[(0,3)]
),
# one determiner + one noun + one adjective qualified by an adverb
# quelques agriculteurs très riches -> quelques agriculteurs très riches
(
["quelques", "agriculteurs", "très", "riches"],
[1, 1, 3, 1],
['det', 'ROOT', 'advmod', 'amod'],
['DET', 'NOUN', 'ADV', 'ADJ'],
[(0,4)]
),
# Two NPs conjuncted
# Il a un chien et un chat -> Il, un chien, un chat
(
['Il', 'a', 'un', 'chien', 'et', 'un', 'chat'],
[1, 1, 3, 1, 6, 6, 3],
['nsubj', 'ROOT', 'det', 'obj', 'cc', 'det', 'conj'],
['PRON', 'VERB', 'DET', 'NOUN', 'CCONJ', 'DET', 'NOUN'],
[(0,1), (2,4), (5,7)]
),
# Two NPs together
# l'écrivain brésilien Aníbal Machado -> l'écrivain brésilien, Aníbal Machado
(
["l'", 'écrivain', 'brésilien', 'Aníbal', 'Machado'],
[1, 1, 1, 1, 3],
['det', 'ROOT', 'amod', 'appos', 'flat:name'],
['DET', 'NOUN', 'ADJ', 'PROPN', 'PROPN'],
[(0, 3), (3, 5)]
),
# nmod relation between NPs
# la destruction de la ville -> la destruction, la ville
(
['la', 'destruction', 'de', 'la', 'ville'],
[1, 1, 4, 4, 1],
['det', 'ROOT', 'case', 'det', 'nmod'],
['DET', 'NOUN', 'ADP', 'DET', 'NOUN'],
[(0,2), (3,5)]
),
# nmod relation between NPs
# Archiduchesse d’Autriche -> Archiduchesse, Autriche
(
['Archiduchesse', 'd’', 'Autriche'],
[0, 2, 0],
['ROOT', 'case', 'nmod'],
['NOUN', 'ADP', 'PROPN'],
[(0,1), (2,3)]
),
# Compounding by nmod, several NPs chained together
# la première usine de drogue du gouvernement -> la première usine, drogue, gouvernement
(
["la", "première", "usine", "de", "drogue", "du", "gouvernement"],
[2, 2, 2, 4, 2, 6, 2],
['det', 'amod', 'ROOT', 'case', 'nmod', 'case', 'nmod'],
['DET', 'ADJ', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'],
[(0, 3), (4, 5), (6, 7)]
),
# several NPs
# Traduction du rapport de Susana -> Traduction, rapport, Susana
(
['Traduction', 'du', 'raport', 'de', 'Susana'],
[0, 2, 0, 4, 2],
['ROOT', 'case', 'nmod', 'case', 'nmod'],
['NOUN', 'ADP', 'NOUN', 'ADP', 'PROPN'],
[(0,1), (2,3), (4,5)]
),
# Several NPs
# Le gros chat de Susana et son amie -> Le gros chat, Susana, son amie
(
['Le', 'gros', 'chat', 'de', 'Susana', 'et', 'son', 'amie'],
[2, 2, 2, 4, 2, 7, 7, 2],
['det', 'amod', 'ROOT', 'case', 'nmod', 'cc', 'det', 'conj'],
['DET', 'ADJ', 'NOUN', 'ADP', 'PROPN', 'CCONJ', 'DET', 'NOUN'],
[(0,3), (4,5), (6,8)]
),
# Passive subject
# Les nouvelles dépenses sont alimentées par le grand compte bancaire de Clinton -> Les nouvelles dépenses, le grand compte bancaire, Clinton
(
['Les', 'nouvelles', 'dépenses', 'sont', 'alimentées', 'par', 'le', 'grand', 'compte', 'bancaire', 'de', 'Clinton'],
[2, 2, 4, 4, 4, 8, 8, 8, 4, 8, 11, 8],
['det', 'amod', 'nsubj:pass', 'aux:pass', 'ROOT', 'case', 'det', 'amod', 'obl:agent', 'amod', 'case', 'nmod'],
['DET', 'ADJ', 'NOUN', 'AUX', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', 'ADJ', 'ADP', 'PROPN'],
[(0, 3), (6, 10), (11, 12)]
)
],
)
# fmt: on
def test_fr_noun_chunks(fr_vocab, words, heads, deps, pos, chunk_offsets):
doc = Doc(fr_vocab, words=words, heads=heads, deps=deps, pos=pos)
assert [(c.start, c.end) for c in doc.noun_chunks] == chunk_offsets
def test_noun_chunks_is_parsed_fr(fr_tokenizer):
"""Test that noun_chunks raises Value Error for 'fr' language if Doc is not parsed."""
doc = fr_tokenizer("Je suis allé à l'école")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 8,316 | 34.849138 | 149 | py |
spaCy | spaCy-master/spacy/tests/lang/fr/test_prefix_suffix_infix.py | import pytest
from spacy.lang.char_classes import ALPHA
from spacy.lang.punctuation import TOKENIZER_INFIXES
from spacy.language import BaseDefaults, Language
@pytest.mark.issue(768)
@pytest.mark.parametrize(
"text,expected_tokens", [("l'avion", ["l'", "avion"]), ("j'ai", ["j'", "ai"])]
)
def test_issue768(text, expected_tokens):
"""Allow zero-width 'infix' token during the tokenization process."""
SPLIT_INFIX = r"(?<=[{a}]\')(?=[{a}])".format(a=ALPHA)
class FrenchTest(Language):
class Defaults(BaseDefaults):
infixes = TOKENIZER_INFIXES + [SPLIT_INFIX]
fr_tokenizer_w_infix = FrenchTest().tokenizer
tokens = fr_tokenizer_w_infix(text)
assert len(tokens) == 2
assert [t.text for t in tokens] == expected_tokens
| 773 | 31.25 | 82 | py |
spaCy | spaCy-master/spacy/tests/lang/fr/test_text.py | import pytest
from spacy.lang.fr.lex_attrs import like_num
def test_tokenizer_handles_long_text(fr_tokenizer):
text = """L'histoire du TAL commence dans les années 1950, bien que l'on puisse \
trouver des travaux antérieurs. En 1950, Alan Turing éditait un article \
célèbre sous le titre « Computing machinery and intelligence » qui propose ce \
qu'on appelle à présent le test de Turing comme critère d'intelligence. \
Ce critère dépend de la capacité d'un programme informatique de personnifier \
un humain dans une conversation écrite en temps réel, de façon suffisamment \
convaincante que l'interlocuteur humain ne peut distinguer sûrement — sur la \
base du seul contenu de la conversation — s'il interagit avec un programme \
ou avec un autre vrai humain."""
tokens = fr_tokenizer(text)
assert len(tokens) == 113
@pytest.mark.parametrize("word", ["onze", "onzième"])
def test_fr_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 990 | 40.291667 | 85 | py |
spaCy | spaCy-master/spacy/tests/lang/ga/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ga/test_tokenizer.py | import pytest
# fmt: off
GA_TOKEN_EXCEPTION_TESTS = [
("Niall Ó Domhnaill, Rialtas na hÉireann 1977 (lch. 600).", ["Niall", "Ó", "Domhnaill", ",", "Rialtas", "na", "hÉireann", "1977", "(", "lch.", "600", ")", "."]),
("Daoine a bhfuil Gaeilge acu, m.sh. tusa agus mise", ["Daoine", "a", "bhfuil", "Gaeilge", "acu", ",", "m.sh.", "tusa", "agus", "mise"])
]
# fmt: on
@pytest.mark.parametrize("text,expected_tokens", GA_TOKEN_EXCEPTION_TESTS)
def test_ga_tokenizer_handles_exception_cases(ga_tokenizer, text, expected_tokens):
tokens = ga_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 680 | 41.5625 | 165 | py |
spaCy | spaCy-master/spacy/tests/lang/grc/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/grc/test_text.py | import pytest
@pytest.mark.parametrize(
"text,match",
[
("ι", True),
("α", True),
("ϟα", True),
("ἑκατόν", True),
("ἐνακόσια", True),
("δισχίλια", True),
("μύρια", True),
("εἷς", True),
("λόγος", False),
(",", False),
("λβ", True),
],
)
def test_lex_attrs_like_number(grc_tokenizer, text, match):
tokens = grc_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
| 499 | 19.833333 | 59 | py |
spaCy | spaCy-master/spacy/tests/lang/grc/test_tokenizer.py | import pytest
# fmt: off
GRC_TOKEN_EXCEPTION_TESTS = [
("τὸ 〈τῆς〉 φιλοσοφίας ἔργον ἔνιοί φασιν ἀπὸ ⟦βαρβάρων⟧ ἄρξαι.", ["τὸ", "〈", "τῆς", "〉", "φιλοσοφίας", "ἔργον", "ἔνιοί", "φασιν", "ἀπὸ", "⟦", "βαρβάρων", "⟧", "ἄρξαι", "."]),
("τὴν δὲ τῶν Αἰγυπτίων φιλοσοφίαν εἶναι τοιαύτην περί τε †θεῶν† καὶ ὑπὲρ δικαιοσύνης.", ["τὴν", "δὲ", "τῶν", "Αἰγυπτίων", "φιλοσοφίαν", "εἶναι", "τοιαύτην", "περί", "τε", "†", "θεῶν", "†", "καὶ", "ὑπὲρ", "δικαιοσύνης", "."]),
("⸏πόσις δ' Ἐρεχθεύς ἐστί μοι σεσωσμένος⸏", ["⸏", "πόσις", "δ'", "Ἐρεχθεύς", "ἐστί", "μοι", "σεσωσμένος", "⸏"]),
("⸏ὔπνον ἴδωμεν⸎", ["⸏", "ὔπνον", "ἴδωμεν", "⸎"]),
]
# fmt: on
@pytest.mark.parametrize("text,expected_tokens", GRC_TOKEN_EXCEPTION_TESTS)
def test_grc_tokenizer(grc_tokenizer, text, expected_tokens):
tokens = grc_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 934 | 50.944444 | 229 | py |
spaCy | spaCy-master/spacy/tests/lang/gu/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/gu/test_text.py | import pytest
def test_gu_tokenizer_handlers_long_text(gu_tokenizer):
text = """પશ્ચિમ ભારતમાં આવેલું ગુજરાત રાજ્ય જે વ્યક્તિઓની માતૃભૂમિ છે"""
tokens = gu_tokenizer(text)
assert len(tokens) == 9
@pytest.mark.parametrize(
"text,length",
[("ગુજરાતીઓ ખાવાના શોખીન માનવામાં આવે છે", 6), ("ખેતરની ખેડ કરવામાં આવે છે.", 5)],
)
def test_gu_tokenizer_handles_cnts(gu_tokenizer, text, length):
tokens = gu_tokenizer(text)
assert len(tokens) == length
| 475 | 27 | 86 | py |
spaCy | spaCy-master/spacy/tests/lang/he/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/he/test_tokenizer.py | import pytest
from spacy.lang.he.lex_attrs import like_num
@pytest.mark.parametrize(
"text,expected_tokens",
[("פייתון היא שפת תכנות דינמית", ["פייתון", "היא", "שפת", "תכנות", "דינמית"])],
)
def test_he_tokenizer_handles_abbreviation(he_tokenizer, text, expected_tokens):
tokens = he_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
@pytest.mark.parametrize(
"text,expected_tokens",
[
(
"עקבת אחריו בכל רחבי המדינה.",
["עקבת", "אחריו", "בכל", "רחבי", "המדינה", "."],
),
(
"עקבת אחריו בכל רחבי המדינה?",
["עקבת", "אחריו", "בכל", "רחבי", "המדינה", "?"],
),
(
"עקבת אחריו בכל רחבי המדינה!",
["עקבת", "אחריו", "בכל", "רחבי", "המדינה", "!"],
),
(
"עקבת אחריו בכל רחבי המדינה..",
["עקבת", "אחריו", "בכל", "רחבי", "המדינה", ".."],
),
(
"עקבת אחריו בכל רחבי המדינה...",
["עקבת", "אחריו", "בכל", "רחבי", "המדינה", "..."],
),
],
)
def test_he_tokenizer_handles_punct(he_tokenizer, text, expected_tokens):
tokens = he_tokenizer(text)
assert expected_tokens == [token.text for token in tokens]
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10,000", True),
("10,00", True),
("999.0", True),
("אחד", True),
("שתיים", True),
("מליון", True),
("כלב", False),
(",", False),
("1/2", True),
],
)
def test_lex_attrs_like_number(he_tokenizer, text, match):
tokens = he_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
@pytest.mark.parametrize("word", ["שלישי", "מליון", "עשירי", "מאה", "עשר", "אחד עשר"])
def test_he_lex_attrs_like_number_for_ordinal(word):
assert like_num(word)
| 1,947 | 26.43662 | 86 | py |
spaCy | spaCy-master/spacy/tests/lang/hi/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/hi/test_lex_attrs.py | import pytest
from spacy.lang.hi.lex_attrs import like_num, norm
def test_hi_tokenizer_handles_long_text(hi_tokenizer):
text = """
ये कहानी 1900 के दशक की है। कौशल्या (स्मिता जयकर) को पता चलता है कि उसका
छोटा बेटा, देवदास (शाहरुख खान) वापस घर आ रहा है। देवदास 10 साल पहले कानून की
पढ़ाई करने के लिए इंग्लैंड गया था। उसके लौटने की खुशी में ये बात कौशल्या अपनी पड़ोस
में रहने वाली सुमित्रा (किरण खेर) को भी बता देती है। इस खबर से वो भी खुश हो जाती है।
"""
tokens = hi_tokenizer(text)
assert len(tokens) == 86
@pytest.mark.parametrize(
"word,word_norm",
[
("चलता", "चल"),
("पढ़ाई", "पढ़"),
("देती", "दे"),
("जाती", "ज"),
("मुस्कुराकर", "मुस्कुर"),
],
)
def test_hi_norm(word, word_norm):
assert norm(word) == word_norm
@pytest.mark.parametrize(
"word",
["१९८७", "1987", "१२,२६७", "उन्नीस", "पाँच", "नवासी", "५/१०"],
)
def test_hi_like_num(word):
assert like_num(word)
@pytest.mark.parametrize(
"word",
["पहला", "तृतीय", "निन्यानवेवाँ", "उन्नीस", "तिहत्तरवाँ", "छत्तीसवाँ"],
)
def test_hi_like_num_ordinal_words(word):
assert like_num(word)
| 1,138 | 24.311111 | 84 | py |
spaCy | spaCy-master/spacy/tests/lang/hi/test_text.py | import pytest
from spacy.lang.hi import Hindi
@pytest.mark.issue(3625)
def test_issue3625():
"""Test that default punctuation rules applies to hindi unicode characters"""
nlp = Hindi()
doc = nlp("hi. how हुए. होटल, होटल")
expected = ["hi", ".", "how", "हुए", ".", "होटल", ",", "होटल"]
assert [token.text for token in doc] == expected
| 357 | 26.538462 | 81 | py |
spaCy | spaCy-master/spacy/tests/lang/hr/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/hr/test_text.py | import pytest
def test_long_text(hr_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
uzimajući u obzir da ta deklaracija nastoji osigurati opće i djelotvorno
priznanje i poštovanje u njoj proglašenih prava;
uzimajući u obzir da je cilj Vijeća Europe postizanje većeg jedinstva
njegovih članica, i da je jedan od načina postizanja toga cilja
očuvanje i daljnje ostvarivanje ljudskih prava i temeljnih sloboda;
potvrđujući svoju duboku privrženost tim temeljnim slobodama
koje su osnova pravde i mira u svijetu i koje su najbolje zaštićene
istinskom političkom demokracijom s jedne strane te zajedničkim
razumijevanjem i poštovanjem ljudskih prava o kojima te slobode
ovise s druge strane;
"""
tokens = hr_tokenizer(text)
assert len(tokens) == 105
@pytest.mark.xfail
def test_ordinal_number(hr_tokenizer):
text = "10. prosinca 1948"
tokens = hr_tokenizer(text)
assert len(tokens) == 3
| 936 | 33.703704 | 72 | py |
spaCy | spaCy-master/spacy/tests/lang/hr/test_tokenizer.py | import pytest
HR_BASIC_TOKENIZATION_TESTS = [
(
"Nitko se ne smije podvrgnuti mučenju ni nečovječnom ili "
"ponižavajućem postupanju ili kazni.",
[
"Nitko",
"se",
"ne",
"smije",
"podvrgnuti",
"mučenju",
"ni",
"nečovječnom",
"ili",
"ponižavajućem",
"postupanju",
"ili",
"kazni",
".",
],
),
]
@pytest.mark.parametrize("text,expected_tokens", HR_BASIC_TOKENIZATION_TESTS)
def test_hr_tokenizer_basic(hr_tokenizer, text, expected_tokens):
tokens = hr_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 791 | 23.75 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/hsb/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/hsb/test_text.py | import pytest
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10,000", True),
("10,00", True),
("jedne", True),
("dwanaće", True),
("milion", True),
("sto", True),
("załožene", False),
("wona", False),
("powšitkownej", False),
(",", False),
("1/2", True),
],
)
def test_lex_attrs_like_number(hsb_tokenizer, text, match):
tokens = hsb_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
| 562 | 20.653846 | 59 | py |
spaCy | spaCy-master/spacy/tests/lang/hsb/test_tokenizer.py | import pytest
HSB_BASIC_TOKENIZATION_TESTS = [
(
"Hornjoserbšćina wobsteji resp. wobsteješe z wjacorych dialektow, kotrež so zdźěla chětro wot so rozeznawachu.",
[
"Hornjoserbšćina",
"wobsteji",
"resp.",
"wobsteješe",
"z",
"wjacorych",
"dialektow",
",",
"kotrež",
"so",
"zdźěla",
"chětro",
"wot",
"so",
"rozeznawachu",
".",
],
),
]
@pytest.mark.parametrize("text,expected_tokens", HSB_BASIC_TOKENIZATION_TESTS)
def test_hsb_tokenizer_basic(hsb_tokenizer, text, expected_tokens):
tokens = hsb_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 852 | 24.848485 | 120 | py |
spaCy | spaCy-master/spacy/tests/lang/hu/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/hu/test_tokenizer.py | import pytest
DEFAULT_TESTS = [
("N. kormányzósági\nszékhely.", ["N.", "kormányzósági", "székhely", "."]),
pytest.param(
"A .hu egy tld.", ["A", ".hu", "egy", "tld", "."], marks=pytest.mark.xfail()
),
("Az egy.ketto pelda.", ["Az", "egy.ketto", "pelda", "."]),
("A pl. rovidites.", ["A", "pl.", "rovidites", "."]),
("A S.M.A.R.T. szo.", ["A", "S.M.A.R.T.", "szo", "."]),
pytest.param("A .hu.", ["A", ".hu", "."], marks=pytest.mark.xfail()),
("Az egy.ketto.", ["Az", "egy.ketto", "."]),
("A pl.", ["A", "pl."]),
("A S.M.A.R.T.", ["A", "S.M.A.R.T."]),
("Egy..ket.", ["Egy", "..", "ket", "."]),
("Valami... van.", ["Valami", "...", "van", "."]),
("Valami ...van...", ["Valami", "...", "van", "..."]),
("Valami...", ["Valami", "..."]),
("Valami ...", ["Valami", "..."]),
("Valami ... más.", ["Valami", "...", "más", "."]),
("Soha nem lesz!", ["Soha", "nem", "lesz", "!"]),
("Soha nem lesz?", ["Soha", "nem", "lesz", "?"]),
]
HYPHEN_TESTS = [
(
"Egy -nak, -jaiért, -magyar, bel- van.",
["Egy", "-nak", ",", "-jaiért", ",", "-magyar", ",", "bel-", "van", "."],
),
("Szabolcs-Szatmár-Bereg megye", ["Szabolcs-Szatmár-Bereg", "megye"]),
("Egy -nak.", ["Egy", "-nak", "."]),
("Egy bel-.", ["Egy", "bel-", "."]),
("Dinnye-domb-.", ["Dinnye-domb-", "."]),
("Ezen -e elcsatangolt.", ["Ezen", "-e", "elcsatangolt", "."]),
("Lakik-e", ["Lakik", "-e"]),
("A--B", ["A", "--", "B"]),
("Lakik-e?", ["Lakik", "-e", "?"]),
("Lakik-e.", ["Lakik", "-e", "."]),
("Lakik-e...", ["Lakik", "-e", "..."]),
("Lakik-e... van.", ["Lakik", "-e", "...", "van", "."]),
("Lakik-e van?", ["Lakik", "-e", "van", "?"]),
("Lakik-elem van?", ["Lakik-elem", "van", "?"]),
("Az életbiztosításáról- egy.", ["Az", "életbiztosításáról-", "egy", "."]),
("Van lakik-elem.", ["Van", "lakik-elem", "."]),
("A 7-es busz?", ["A", "7-es", "busz", "?"]),
("A 7-es?", ["A", "7-es", "?"]),
("A 7-es.", ["A", "7-es", "."]),
("Ez (lakik)-e?", ["Ez", "(", "lakik", ")", "-e", "?"]),
("A %-sal.", ["A", "%-sal", "."]),
("A $-sal.", ["A", "$-sal", "."]),
("A CD-ROM-okrol.", ["A", "CD-ROM-okrol", "."]),
]
NUMBER_TESTS = [
("A 2b van.", ["A", "2b", "van", "."]),
("A 2b-ben van.", ["A", "2b-ben", "van", "."]),
("A 2b.", ["A", "2b", "."]),
("A 2b-ben.", ["A", "2b-ben", "."]),
("A 3.b van.", ["A", "3.b", "van", "."]),
("A 3.b-ben van.", ["A", "3.b-ben", "van", "."]),
("A 3.b.", ["A", "3.b", "."]),
("A 3.b-ben.", ["A", "3.b-ben", "."]),
("A 1:20:36.7 van.", ["A", "1:20:36.7", "van", "."]),
("A 1:20:36.7-ben van.", ["A", "1:20:36.7-ben", "van", "."]),
("A 1:20:36.7-ben.", ["A", "1:20:36.7-ben", "."]),
("A 1:35 van.", ["A", "1:35", "van", "."]),
("A 1:35-ben van.", ["A", "1:35-ben", "van", "."]),
("A 1:35-ben.", ["A", "1:35-ben", "."]),
("A 1.35 van.", ["A", "1.35", "van", "."]),
("A 1.35-ben van.", ["A", "1.35-ben", "van", "."]),
("A 1.35-ben.", ["A", "1.35-ben", "."]),
("A 4:01,95 van.", ["A", "4:01,95", "van", "."]),
("A 4:01,95-ben van.", ["A", "4:01,95-ben", "van", "."]),
("A 4:01,95-ben.", ["A", "4:01,95-ben", "."]),
("A 10--12 van.", ["A", "10--12", "van", "."]),
("A 10--12-ben van.", ["A", "10--12-ben", "van", "."]),
("A 10--12-ben.", ["A", "10--12-ben", "."]),
("A 10‐12 van.", ["A", "10‐12", "van", "."]),
("A 10‐12-ben van.", ["A", "10‐12-ben", "van", "."]),
("A 10‐12-ben.", ["A", "10‐12-ben", "."]),
("A 10‑12 van.", ["A", "10‑12", "van", "."]),
("A 10‑12-ben van.", ["A", "10‑12-ben", "van", "."]),
("A 10‑12-ben.", ["A", "10‑12-ben", "."]),
("A 10‒12 van.", ["A", "10‒12", "van", "."]),
("A 10‒12-ben van.", ["A", "10‒12-ben", "van", "."]),
("A 10‒12-ben.", ["A", "10‒12-ben", "."]),
("A 10–12 van.", ["A", "10–12", "van", "."]),
("A 10–12-ben van.", ["A", "10–12-ben", "van", "."]),
("A 10–12-ben.", ["A", "10–12-ben", "."]),
("A 10—12 van.", ["A", "10—12", "van", "."]),
("A 10—12-ben van.", ["A", "10—12-ben", "van", "."]),
("A 10—12-ben.", ["A", "10—12-ben", "."]),
("A 10―12 van.", ["A", "10―12", "van", "."]),
("A 10―12-ben van.", ["A", "10―12-ben", "van", "."]),
("A 10―12-ben.", ["A", "10―12-ben", "."]),
("A -23,12 van.", ["A", "-23,12", "van", "."]),
("A -23,12-ben van.", ["A", "-23,12-ben", "van", "."]),
("A -23,12-ben.", ["A", "-23,12-ben", "."]),
("A 2+3 van.", ["A", "2+3", "van", "."]),
("A 2<3 van.", ["A", "2<3", "van", "."]),
("A 2=3 van.", ["A", "2=3", "van", "."]),
("A 2÷3 van.", ["A", "2÷3", "van", "."]),
("A 1=(2÷3)-2/5 van.", ["A", "1=(2÷3)-2/5", "van", "."]),
("A 2 +3 van.", ["A", "2", "+3", "van", "."]),
("A 2+ 3 van.", ["A", "2", "+", "3", "van", "."]),
("A 2 + 3 van.", ["A", "2", "+", "3", "van", "."]),
("A 2*3 van.", ["A", "2*3", "van", "."]),
("A 2 *3 van.", ["A", "2", "*", "3", "van", "."]),
("A 2* 3 van.", ["A", "2", "*", "3", "van", "."]),
("A 2 * 3 van.", ["A", "2", "*", "3", "van", "."]),
("A C++ van.", ["A", "C++", "van", "."]),
("A C++-ben van.", ["A", "C++-ben", "van", "."]),
("A C++.", ["A", "C++", "."]),
("A C++-ben.", ["A", "C++-ben", "."]),
("A 2003. I. 06. van.", ["A", "2003.", "I.", "06.", "van", "."]),
("A 2003. I. 06-ben van.", ["A", "2003.", "I.", "06-ben", "van", "."]),
("A 2003. I. 06.", ["A", "2003.", "I.", "06."]),
("A 2003. I. 06-ben.", ["A", "2003.", "I.", "06-ben", "."]),
("A 2003. 01. 06. van.", ["A", "2003.", "01.", "06.", "van", "."]),
("A 2003. 01. 06-ben van.", ["A", "2003.", "01.", "06-ben", "van", "."]),
("A 2003. 01. 06.", ["A", "2003.", "01.", "06."]),
("A 2003. 01. 06-ben.", ["A", "2003.", "01.", "06-ben", "."]),
("A IV. 12. van.", ["A", "IV.", "12.", "van", "."]),
("A IV. 12-ben van.", ["A", "IV.", "12-ben", "van", "."]),
("A IV. 12.", ["A", "IV.", "12."]),
("A IV. 12-ben.", ["A", "IV.", "12-ben", "."]),
("A 2003.01.06. van.", ["A", "2003.01.06.", "van", "."]),
("A 2003.01.06-ben van.", ["A", "2003.01.06-ben", "van", "."]),
("A 2003.01.06.", ["A", "2003.01.06."]),
("A 2003.01.06-ben.", ["A", "2003.01.06-ben", "."]),
("A IV.12. van.", ["A", "IV.12.", "van", "."]),
("A IV.12-ben van.", ["A", "IV.12-ben", "van", "."]),
("A IV.12.", ["A", "IV.12."]),
("A IV.12-ben.", ["A", "IV.12-ben", "."]),
("A 1.1.2. van.", ["A", "1.1.2.", "van", "."]),
("A 1.1.2-ben van.", ["A", "1.1.2-ben", "van", "."]),
("A 1.1.2.", ["A", "1.1.2."]),
("A 1.1.2-ben.", ["A", "1.1.2-ben", "."]),
("A 1,5--2,5 van.", ["A", "1,5--2,5", "van", "."]),
("A 1,5--2,5-ben van.", ["A", "1,5--2,5-ben", "van", "."]),
("A 1,5--2,5-ben.", ["A", "1,5--2,5-ben", "."]),
("A 3,14 van.", ["A", "3,14", "van", "."]),
("A 3,14-ben van.", ["A", "3,14-ben", "van", "."]),
("A 3,14-ben.", ["A", "3,14-ben", "."]),
("A 3.14 van.", ["A", "3.14", "van", "."]),
("A 3.14-ben van.", ["A", "3.14-ben", "van", "."]),
("A 3.14-ben.", ["A", "3.14-ben", "."]),
("A 15. van.", ["A", "15.", "van", "."]),
("A 15-ben van.", ["A", "15-ben", "van", "."]),
("A 15-ben.", ["A", "15-ben", "."]),
("A 15.-ben van.", ["A", "15.-ben", "van", "."]),
("A 15.-ben.", ["A", "15.-ben", "."]),
("A 2002--2003. van.", ["A", "2002--2003.", "van", "."]),
("A 2002--2003-ben van.", ["A", "2002--2003-ben", "van", "."]),
("A 2002-2003-ben.", ["A", "2002-2003-ben", "."]),
("A +0,99% van.", ["A", "+0,99%", "van", "."]),
("A -0,99% van.", ["A", "-0,99%", "van", "."]),
("A -0,99%-ben van.", ["A", "-0,99%-ben", "van", "."]),
("A -0,99%.", ["A", "-0,99%", "."]),
("A -0,99%-ben.", ["A", "-0,99%-ben", "."]),
("A 10--20% van.", ["A", "10--20%", "van", "."]),
("A 10--20%-ben van.", ["A", "10--20%-ben", "van", "."]),
("A 10--20%.", ["A", "10--20%", "."]),
("A 10--20%-ben.", ["A", "10--20%-ben", "."]),
("A 99§ van.", ["A", "99§", "van", "."]),
("A 99§-ben van.", ["A", "99§-ben", "van", "."]),
("A 99§-ben.", ["A", "99§-ben", "."]),
("A 10--20§ van.", ["A", "10--20§", "van", "."]),
("A 10--20§-ben van.", ["A", "10--20§-ben", "van", "."]),
("A 10--20§-ben.", ["A", "10--20§-ben", "."]),
("A 99° van.", ["A", "99°", "van", "."]),
("A 99°-ben van.", ["A", "99°-ben", "van", "."]),
("A 99°-ben.", ["A", "99°-ben", "."]),
("A 10--20° van.", ["A", "10--20°", "van", "."]),
("A 10--20°-ben van.", ["A", "10--20°-ben", "van", "."]),
("A 10--20°-ben.", ["A", "10--20°-ben", "."]),
("A °C van.", ["A", "°C", "van", "."]),
("A °C-ben van.", ["A", "°C-ben", "van", "."]),
("A °C.", ["A", "°C", "."]),
("A °C-ben.", ["A", "°C-ben", "."]),
("A 100°C van.", ["A", "100°C", "van", "."]),
("A 100°C-ben van.", ["A", "100°C-ben", "van", "."]),
("A 100°C.", ["A", "100°C", "."]),
("A 100°C-ben.", ["A", "100°C-ben", "."]),
("A 800x600 van.", ["A", "800x600", "van", "."]),
("A 800x600-ben van.", ["A", "800x600-ben", "van", "."]),
("A 800x600-ben.", ["A", "800x600-ben", "."]),
("A 1x2x3x4 van.", ["A", "1x2x3x4", "van", "."]),
("A 1x2x3x4-ben van.", ["A", "1x2x3x4-ben", "van", "."]),
("A 1x2x3x4-ben.", ["A", "1x2x3x4-ben", "."]),
("A 5/J van.", ["A", "5/J", "van", "."]),
("A 5/J-ben van.", ["A", "5/J-ben", "van", "."]),
("A 5/J-ben.", ["A", "5/J-ben", "."]),
("A 5/J. van.", ["A", "5/J.", "van", "."]),
("A 5/J.-ben van.", ["A", "5/J.-ben", "van", "."]),
("A 5/J.-ben.", ["A", "5/J.-ben", "."]),
("A III/1 van.", ["A", "III/1", "van", "."]),
("A III/1-ben van.", ["A", "III/1-ben", "van", "."]),
("A III/1-ben.", ["A", "III/1-ben", "."]),
("A III/1. van.", ["A", "III/1.", "van", "."]),
("A III/1.-ben van.", ["A", "III/1.-ben", "van", "."]),
("A III/1.-ben.", ["A", "III/1.-ben", "."]),
("A III/c van.", ["A", "III/c", "van", "."]),
("A III/c-ben van.", ["A", "III/c-ben", "van", "."]),
("A III/c.", ["A", "III/c", "."]),
("A III/c-ben.", ["A", "III/c-ben", "."]),
("A TU–154 van.", ["A", "TU–154", "van", "."]),
("A TU–154-ben van.", ["A", "TU–154-ben", "van", "."]),
("A TU–154-ben.", ["A", "TU–154-ben", "."]),
("A 5cm³", ["A", "5", "cm³"]),
("A 5 $-ban", ["A", "5", "$-ban"]),
("A 5$-ban", ["A", "5$-ban"]),
("A 5$.", ["A", "5", "$", "."]),
("A 5$", ["A", "5", "$"]),
("A $5", ["A", "$5"]),
("A 5km/h", ["A", "5", "km/h"]),
("A 75%+1-100%-ig", ["A", "75%+1-100%-ig"]),
("A 5km/h.", ["A", "5", "km/h", "."]),
("3434/1992. évi elszámolás", ["3434/1992.", "évi", "elszámolás"]),
]
QUOTE_TESTS = [
(
'Az "Ime, hat"-ban irja.',
["Az", '"', "Ime", ",", "hat", '"', "-ban", "irja", "."],
),
('"Ime, hat"-ban irja.', ['"', "Ime", ",", "hat", '"', "-ban", "irja", "."]),
('Az "Ime, hat".', ["Az", '"', "Ime", ",", "hat", '"', "."]),
('Egy 24"-os monitor.', ["Egy", '24"-os', "monitor", "."]),
("A McDonald's van.", ["A", "McDonald's", "van", "."]),
]
DOT_TESTS = [
("N. kormányzósági\nszékhely.", ["N.", "kormányzósági", "székhely", "."]),
pytest.param(
"A .hu egy tld.", ["A", ".hu", "egy", "tld", "."], marks=pytest.mark.xfail()
),
("Az egy.ketto pelda.", ["Az", "egy.ketto", "pelda", "."]),
("A pl. rövidítés.", ["A", "pl.", "rövidítés", "."]),
("A S.M.A.R.T. szó.", ["A", "S.M.A.R.T.", "szó", "."]),
pytest.param("A .hu.", ["A", ".hu", "."], marks=pytest.mark.xfail()),
("Az egy.ketto.", ["Az", "egy.ketto", "."]),
("A pl.", ["A", "pl."]),
("A S.M.A.R.T.", ["A", "S.M.A.R.T."]),
("Egy..ket.", ["Egy", "..", "ket", "."]),
("Valami... van.", ["Valami", "...", "van", "."]),
("Valami ...van...", ["Valami", "...", "van", "..."]),
("Valami...", ["Valami", "..."]),
("Valami ...", ["Valami", "..."]),
("Valami ... más.", ["Valami", "...", "más", "."]),
]
TYPO_TESTS = [
(
"Ez egy mondat vége.Ez egy másik eleje.",
["Ez", "egy", "mondat", "vége", ".", "Ez", "egy", "másik", "eleje", "."],
),
(
"Ez egy mondat vége .Ez egy másik eleje.",
["Ez", "egy", "mondat", "vége", ".", "Ez", "egy", "másik", "eleje", "."],
),
(
"Ez egy mondat vége!ez egy másik eleje.",
["Ez", "egy", "mondat", "vége", "!", "ez", "egy", "másik", "eleje", "."],
),
(
"Ez egy mondat vége !ez egy másik eleje.",
["Ez", "egy", "mondat", "vége", "!", "ez", "egy", "másik", "eleje", "."],
),
(
"Ez egy mondat vége?Ez egy másik eleje.",
["Ez", "egy", "mondat", "vége", "?", "Ez", "egy", "másik", "eleje", "."],
),
(
"Ez egy mondat vége ?Ez egy másik eleje.",
["Ez", "egy", "mondat", "vége", "?", "Ez", "egy", "másik", "eleje", "."],
),
("egy,kettő", ["egy", ",", "kettő"]),
("egy ,kettő", ["egy", ",", "kettő"]),
("egy :kettő", ["egy", ":", "kettő"]),
]
WIKI_TESTS = [
('!"', ["!", '"']),
('lány"a', ["lány", '"', "a"]),
('lány"a', ["lány", '"', "a"]),
('!"-lel', ["!", '"', "-lel"]),
('""-sorozat ', ['"', '"', "-sorozat"]),
('"(Köszönöm', ['"', "(", "Köszönöm"]),
("(törvénykönyv)-ben ", ["(", "törvénykönyv", ")", "-ben"]),
('"(...)"–sokkal ', ['"', "(", "...", ")", '"', "–sokkal"]),
("cérium(IV)-oxid", ["cérium", "(", "IV", ")", "-oxid"]),
]
EXTRA_TESTS = (
DOT_TESTS + QUOTE_TESTS + NUMBER_TESTS + HYPHEN_TESTS + WIKI_TESTS + TYPO_TESTS # type: ignore[operator]
)
# normal: default tests + 10% of extra tests
TESTS = DEFAULT_TESTS
TESTS.extend([x for i, x in enumerate(EXTRA_TESTS) if i % 10 == 0])
# slow: remaining 90% of extra tests
SLOW_TESTS = [x for i, x in enumerate(EXTRA_TESTS) if i % 10 != 0]
TESTS.extend(
[
pytest.param(x[0], x[1], marks=pytest.mark.slow())
if not isinstance(x[0], tuple)
else x
for x in SLOW_TESTS
]
)
@pytest.mark.parametrize("text,expected_tokens", TESTS)
def test_hu_tokenizer_handles_testcases(hu_tokenizer, text, expected_tokens):
tokens = hu_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 14,261 | 43.56875 | 109 | py |
spaCy | spaCy-master/spacy/tests/lang/hy/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/hy/test_text.py | import pytest
from spacy.lang.hy.lex_attrs import like_num
@pytest.mark.parametrize("word", ["հիսուն"])
def test_hy_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 205 | 19.6 | 44 | py |
spaCy | spaCy-master/spacy/tests/lang/hy/test_tokenizer.py | import pytest
# TODO add test cases with valid punctuation signs.
hy_tokenize_text_test = [
(
"Մետաղագիտությունը պայմանականորեն բաժանվում է տեսականի և կիրառականի (տեխնիկական)",
[
"Մետաղագիտությունը",
"պայմանականորեն",
"բաժանվում",
"է",
"տեսականի",
"և",
"կիրառականի",
"(",
"տեխնիկական",
")",
],
),
(
"Գետաբերանը գտնվում է Օմոլոնա գետի ձախ ափից 726 կմ հեռավորության վրա",
[
"Գետաբերանը",
"գտնվում",
"է",
"Օմոլոնա",
"գետի",
"ձախ",
"ափից",
"726",
"կմ",
"հեռավորության",
"վրա",
],
),
]
@pytest.mark.parametrize("text,expected_tokens", hy_tokenize_text_test)
def test_ga_tokenizer_handles_exception_cases(hy_tokenizer, text, expected_tokens):
tokens = hy_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 1,102 | 23.511111 | 90 | py |
spaCy | spaCy-master/spacy/tests/lang/id/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/id/test_noun_chunks.py | import pytest
def test_noun_chunks_is_parsed_id(id_tokenizer):
"""Test that noun_chunks raises Value Error for 'id' language if Doc is not parsed."""
doc = id_tokenizer("sebelas")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 256 | 27.555556 | 90 | py |
spaCy | spaCy-master/spacy/tests/lang/id/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize("text", ["(Ma'arif)"])
def test_id_tokenizer_splits_no_special(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Ma'arif"])
def test_id_tokenizer_splits_no_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["(Ma'arif"])
def test_id_tokenizer_splits_prefix_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["Ma'arif)"])
def test_id_tokenizer_splits_suffix_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["(Ma'arif)"])
def test_id_tokenizer_splits_even_wrap(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["(Ma'arif?)"])
def test_tokenizer_splits_uneven_wrap(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize("text,length", [("S.Kom.", 1), ("SKom.", 2), ("(S.Kom.", 2)])
def test_id_tokenizer_splits_prefix_interact(id_tokenizer, text, length):
tokens = id_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["S.Kom.)"])
def test_id_tokenizer_splits_suffix_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["(S.Kom.)"])
def test_id_tokenizer_splits_even_wrap_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["(S.Kom.?)"])
def test_id_tokenizer_splits_uneven_wrap_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize(
"text,length", [("gara-gara", 1), ("Jokowi-Ahok", 3), ("Sukarno-Hatta", 3)]
)
def test_id_tokenizer_splits_hyphens(id_tokenizer, text, length):
tokens = id_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["0.1-13.5", "0.0-0.1", "103.27-300"])
def test_id_tokenizer_splits_numeric_range(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["ini.Budi", "Halo.Bandung"])
def test_id_tokenizer_splits_period_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Halo,Bandung", "satu,dua"])
def test_id_tokenizer_splits_comma_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
assert tokens[0].text == text.split(",")[0]
assert tokens[1].text == ","
assert tokens[2].text == text.split(",")[1]
@pytest.mark.parametrize("text", ["halo...Bandung", "dia...pergi"])
def test_id_tokenizer_splits_ellipsis_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
def test_id_tokenizer_splits_double_hyphen_infix(id_tokenizer):
tokens = id_tokenizer("Arsene Wenger--manajer Arsenal--melakukan konferensi pers.")
assert len(tokens) == 10
assert tokens[0].text == "Arsene"
assert tokens[1].text == "Wenger"
assert tokens[2].text == "--"
assert tokens[3].text == "manajer"
assert tokens[4].text == "Arsenal"
assert tokens[5].text == "--"
assert tokens[6].text == "melakukan"
assert tokens[7].text == "konferensi"
assert tokens[8].text == "pers"
assert tokens[9].text == "."
| 3,492 | 30.1875 | 87 | py |
spaCy | spaCy-master/spacy/tests/lang/id/test_text.py | import pytest
from spacy.lang.id.lex_attrs import like_num
@pytest.mark.parametrize("word", ["sebelas"])
def test_id_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 206 | 19.7 | 45 | py |
spaCy | spaCy-master/spacy/tests/lang/is/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/is/test_text.py | import pytest
def test_long_text(is_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
hafa í huga, að yfirlýsing þessi hefur það markmið að tryggja
almenna og raunhæfa viðurkenningu og vernd þeirra réttinda,
sem þar er lýst;
hafa í huga, að markmið Evrópuráðs er að koma á nánari einingu
aðildarríkjanna og að ein af leiðunum að því marki er sú, að
mannréttindi og mannfrelsi séu í heiðri höfð og efld;
lýsa á ný eindreginni trú sinni á það mannfrelsi, sem er undirstaða
réttlætis og friðar í heiminum og best er tryggt, annars vegar með
virku, lýðræðislegu stjórnarfari og, hins vegar, almennum skilningi
og varðveislu þeirra mannréttinda, sem eru grundvöllur frelsisins;
"""
tokens = is_tokenizer(text)
assert len(tokens) == 120
@pytest.mark.xfail
def test_ordinal_number(is_tokenizer):
text = "10. desember 1948"
tokens = is_tokenizer(text)
assert len(tokens) == 3
| 920 | 33.111111 | 67 | py |
spaCy | spaCy-master/spacy/tests/lang/is/test_tokenizer.py | import pytest
IS_BASIC_TOKENIZATION_TESTS = [
(
"Enginn maður skal sæta pyndingum eða ómannlegri eða "
"vanvirðandi meðferð eða refsingu. ",
[
"Enginn",
"maður",
"skal",
"sæta",
"pyndingum",
"eða",
"ómannlegri",
"eða",
"vanvirðandi",
"meðferð",
"eða",
"refsingu",
".",
],
),
]
@pytest.mark.parametrize("text,expected_tokens", IS_BASIC_TOKENIZATION_TESTS)
def test_is_tokenizer_basic(is_tokenizer, text, expected_tokens):
tokens = is_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 765 | 23.709677 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/it/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/it/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
# fmt: off
@pytest.mark.parametrize(
"words,heads,deps,pos,chunk_offsets",
[
# determiner + noun
# un pollo -> un pollo
(
["un", "pollo"],
[1, 1],
["det", "ROOT"],
["DET", "NOUN"],
[(0,2)],
),
# two determiners + noun
# il mio cane -> il mio cane
(
["il", "mio", "cane"],
[2, 2, 2],
["det", "det:poss", "ROOT"],
["DET", "DET", "NOUN"],
[(0,3)],
),
# two determiners, one is after noun. rare usage but still testing
# il cane mio-> il cane mio
(
["il", "cane", "mio"],
[1, 1, 1],
["det", "ROOT", "det:poss"],
["DET", "NOUN", "DET"],
[(0,3)],
),
# relative pronoun
# È molto bello il vestito che hai acquistat -> il vestito, che the dress that you bought is very pretty.
(
["È", "molto", "bello", "il", "vestito", "che", "hai", "acquistato"],
[2, 2, 2, 4, 2, 7, 7, 4],
['cop', 'advmod', 'ROOT', 'det', 'nsubj', 'obj', 'aux', 'acl:relcl'],
['AUX', 'ADV', 'ADJ', 'DET', 'NOUN', 'PRON', 'AUX', 'VERB'],
[(3,5), (5,6)]
),
# relative subclause
# il computer che hai comprato -> il computer, che the computer that you bought
(
['il', 'computer', 'che', 'hai', 'comprato'],
[1, 1, 4, 4, 1],
['det', 'ROOT', 'nsubj', 'aux', 'acl:relcl'],
['DET', 'NOUN', 'PRON', 'AUX', 'VERB'],
[(0,2), (2,3)]
),
# det + noun + adj
# Una macchina grande -> Una macchina grande
(
["Una", "macchina", "grande"],
[1, 1, 1],
["det", "ROOT", "amod"],
["DET", "NOUN", "ADJ"],
[(0,3)],
),
# noun + adj plural
# mucche bianche
(
["mucche", "bianche"],
[0, 0],
["ROOT", "amod"],
["NOUN", "ADJ"],
[(0,2)],
),
# det + adj + noun
# Una grande macchina -> Una grande macchina
(
['Una', 'grande', 'macchina'],
[2, 2, 2],
["det", "amod", "ROOT"],
["DET", "ADJ", "NOUN"],
[(0,3)]
),
# det + adj + noun, det with apostrophe
# un'importante associazione -> un'importante associazione
(
["Un'", 'importante', 'associazione'],
[2, 2, 2],
["det", "amod", "ROOT"],
["DET", "ADJ", "NOUN"],
[(0,3)]
),
# multiple adjectives
# Un cane piccolo e marrone -> Un cane piccolo e marrone
(
["Un", "cane", "piccolo", "e", "marrone"],
[1, 1, 1, 4, 2],
["det", "ROOT", "amod", "cc", "conj"],
["DET", "NOUN", "ADJ", "CCONJ", "ADJ"],
[(0,5)]
),
# determiner, adjective, compound created by flat
# le Nazioni Unite -> le Nazioni Unite
(
["le", "Nazioni", "Unite"],
[1, 1, 1],
["det", "ROOT", "flat:name"],
["DET", "PROPN", "PROPN"],
[(0,3)]
),
# one determiner + one noun + one adjective qualified by an adverb
# alcuni contadini molto ricchi -> alcuni contadini molto ricchi some very rich farmers
(
['alcuni', 'contadini', 'molto', 'ricchi'],
[1, 1, 3, 1],
['det', 'ROOT', 'advmod', 'amod'],
['DET', 'NOUN', 'ADV', 'ADJ'],
[(0,4)]
),
# Two NPs conjuncted
# Ho un cane e un gatto -> un cane, un gatto
(
['Ho', 'un', 'cane', 'e', 'un', 'gatto'],
[0, 2, 0, 5, 5, 0],
['ROOT', 'det', 'obj', 'cc', 'det', 'conj'],
['VERB', 'DET', 'NOUN', 'CCONJ', 'DET', 'NOUN'],
[(1,3), (4,6)]
),
# Two NPs together
# lo scrittore brasiliano Aníbal Machado -> lo scrittore brasiliano, Aníbal Machado
(
['lo', 'scrittore', 'brasiliano', 'Aníbal', 'Machado'],
[1, 1, 1, 1, 3],
['det', 'ROOT', 'amod', 'nmod', 'flat:name'],
['DET', 'NOUN', 'ADJ', 'PROPN', 'PROPN'],
[(0, 3), (3, 5)]
),
# Noun compound, person name and titles
# Dom Pedro II -> Dom Pedro II
(
["Dom", "Pedro", "II"],
[0, 0, 0],
["ROOT", "flat:name", "flat:name"],
["PROPN", "PROPN", "PROPN"],
[(0,3)]
),
# Noun compound created by flat
# gli Stati Uniti
(
["gli", "Stati", "Uniti"],
[1, 1, 1],
["det", "ROOT", "flat:name"],
["DET", "PROPN", "PROPN"],
[(0,3)]
),
# nmod relation between NPs
# la distruzione della città -> la distruzione, città
(
['la', 'distruzione', 'della', 'città'],
[1, 1, 3, 1],
['det', 'ROOT', 'case', 'nmod'],
['DET', 'NOUN', 'ADP', 'NOUN'],
[(0,2), (3,4)]
),
# Compounding by nmod, several NPs chained together
# la prima fabbrica di droga del governo -> la prima fabbrica, droga, governo
(
["la", "prima", "fabbrica", "di", "droga", "del", "governo"],
[2, 2, 2, 4, 2, 6, 2],
['det', 'amod', 'ROOT', 'case', 'nmod', 'case', 'nmod'],
['DET', 'ADJ', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'],
[(0, 3), (4, 5), (6, 7)]
),
# several NPs
# Traduzione del rapporto di Susana -> Traduzione, rapporto, Susana
(
['Traduzione', 'del', 'rapporto', 'di', 'Susana'],
[0, 2, 0, 4, 2],
['ROOT', 'case', 'nmod', 'case', 'nmod'],
['NOUN', 'ADP', 'NOUN', 'ADP', 'PROPN'],
[(0,1), (2,3), (4,5)]
),
# Several NPs
# Il gatto grasso di Susana e la sua amica -> Il gatto grasso, Susana, sua amica
(
['Il', 'gatto', 'grasso', 'di', 'Susana', 'e', 'la', 'sua', 'amica'],
[1, 1, 1, 4, 1, 8, 8, 8, 1],
['det', 'ROOT', 'amod', 'case', 'nmod', 'cc', 'det', 'det:poss', 'conj'],
['DET', 'NOUN', 'ADJ', 'ADP', 'PROPN', 'CCONJ', 'DET', 'DET', 'NOUN'],
[(0,3), (4,5), (6,9)]
),
# Passive subject
# La nuova spesa è alimentata dal grande conto in banca di Clinton -> Le nuova spesa, grande conto, banca, Clinton
(
['La', 'nuova', 'spesa', 'è', 'alimentata', 'dal', 'grande', 'conto', 'in', 'banca', 'di', 'Clinton'],
[2, 2, 4, 4, 4, 7, 7, 4, 9, 7, 11, 9],
['det', 'amod', 'nsubj:pass', 'aux:pass', 'ROOT', 'case', 'amod', 'obl:agent', 'case', 'nmod', 'case', 'nmod'],
['DET', 'ADJ', 'NOUN', 'AUX', 'VERB', 'ADP', 'ADJ', 'NOUN', 'ADP', 'NOUN', 'ADP', 'PROPN'],
[(0, 3), (6, 8), (9, 10), (11,12)]
),
# Misc
# Ma mentre questo prestito possa ora sembrare gestibile, un improvviso cambiamento delle circostanze potrebbe portare a problemi di debiti -> questo prestiti, un provisso cambiento, circostanze, problemi, debiti
(
['Ma', 'mentre', 'questo', 'prestito', 'possa', 'ora', 'sembrare', 'gestibile', ',', 'un', 'improvviso', 'cambiamento', 'delle', 'circostanze', 'potrebbe', 'portare', 'a', 'problemi', 'di', 'debitii'],
[15, 6, 3, 6, 6, 6, 15, 6, 6, 11, 11, 15, 13, 11, 15, 15, 17, 15, 19, 17],
['cc', 'mark', 'det', 'nsubj', 'aux', 'advmod', 'advcl', 'xcomp', 'punct', 'det', 'amod', 'nsubj', 'case', 'nmod', 'aux', 'ROOT', 'case', 'obl', 'case', 'nmod'],
['CCONJ', 'SCONJ', 'DET', 'NOUN', 'AUX', 'ADV', 'VERB', 'ADJ', 'PUNCT', 'DET', 'ADJ', 'NOUN', 'ADP', 'NOUN', 'AUX', 'VERB', 'ADP', 'NOUN', 'ADP', 'NOUN'],
[(2,4), (9,12), (13,14), (17,18), (19,20)]
)
],
)
# fmt: on
def test_it_noun_chunks(it_vocab, words, heads, deps, pos, chunk_offsets):
doc = Doc(it_vocab, words=words, heads=heads, deps=deps, pos=pos)
assert [(c.start, c.end) for c in doc.noun_chunks] == chunk_offsets
def test_noun_chunks_is_parsed_it(it_tokenizer):
"""Test that noun_chunks raises Value Error for 'it' language if Doc is not parsed."""
doc = it_tokenizer("Sei andato a Oxford")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 8,630 | 37.704036 | 220 | py |
spaCy | spaCy-master/spacy/tests/lang/it/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize(
"text,expected_tokens", [("c'è", ["c'", "è"]), ("l'ha", ["l'", "ha"])]
)
def test_contractions(it_tokenizer, text, expected_tokens):
"""Test that the contractions are split into two tokens"""
tokens = it_tokenizer(text)
assert len(tokens) == 2
assert [t.text for t in tokens] == expected_tokens
| 357 | 28.833333 | 74 | py |
spaCy | spaCy-master/spacy/tests/lang/it/test_stopwords.py | import pytest
@pytest.mark.parametrize(
"word", ["un", "lo", "dell", "dall", "si", "ti", "mi", "quest", "quel", "quello"]
)
def test_stopwords_basic(it_tokenizer, word):
tok = it_tokenizer(word)[0]
assert tok.is_stop
@pytest.mark.parametrize(
"word", ["quest'uomo", "l'ho", "un'amica", "dell'olio", "s'arrende", "m'ascolti"]
)
def test_stopwords_elided(it_tokenizer, word):
tok = it_tokenizer(word)[0]
assert tok.is_stop
| 449 | 24 | 85 | py |
spaCy | spaCy-master/spacy/tests/lang/it/test_text.py | import pytest
@pytest.mark.issue(2822)
def test_issue2822(it_tokenizer):
"""Test that the abbreviation of poco is kept as one word."""
doc = it_tokenizer("Vuoi un po' di zucchero?")
assert len(doc) == 6
assert doc[0].text == "Vuoi"
assert doc[1].text == "un"
assert doc[2].text == "po'"
assert doc[3].text == "di"
assert doc[4].text == "zucchero"
assert doc[5].text == "?"
| 411 | 26.466667 | 65 | py |
spaCy | spaCy-master/spacy/tests/lang/ja/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ja/test_lemmatization.py | import pytest
@pytest.mark.parametrize(
"word,lemma",
[("新しく", "新しい"), ("赤く", "赤い"), ("すごく", "すごい"), ("いただきました", "いただく"), ("なった", "なる")],
)
def test_ja_lemmatizer_assigns(ja_tokenizer, word, lemma):
test_lemma = ja_tokenizer(word)[0].lemma_
assert test_lemma == lemma
@pytest.mark.parametrize(
"word,norm",
[
("SUMMER", "サマー"),
("食べ物", "食べ物"),
("綜合", "総合"),
("コンピュータ", "コンピューター"),
],
)
def test_ja_lemmatizer_norm(ja_tokenizer, word, norm):
test_norm = ja_tokenizer(word)[0].norm_
assert test_norm == norm
| 578 | 22.16 | 87 | py |
spaCy | spaCy-master/spacy/tests/lang/ja/test_morphologizer_factory.py | import pytest
from spacy.lang.ja import Japanese
def test_ja_morphologizer_factory():
pytest.importorskip("sudachipy")
nlp = Japanese()
morphologizer = nlp.add_pipe("morphologizer")
assert morphologizer.cfg["extend"] is True
| 244 | 21.272727 | 49 | py |
spaCy | spaCy-master/spacy/tests/lang/ja/test_serialize.py | import pickle
from spacy.lang.ja import Japanese
from ...util import make_tempdir
def test_ja_tokenizer_serialize(ja_tokenizer):
tokenizer_bytes = ja_tokenizer.to_bytes()
nlp = Japanese()
nlp.tokenizer.from_bytes(tokenizer_bytes)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
assert nlp.tokenizer.split_mode is None
with make_tempdir() as d:
file_path = d / "tokenizer"
ja_tokenizer.to_disk(file_path)
nlp = Japanese()
nlp.tokenizer.from_disk(file_path)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
assert nlp.tokenizer.split_mode is None
# split mode is (de)serialized correctly
nlp = Japanese.from_config({"nlp": {"tokenizer": {"split_mode": "B"}}})
nlp_r = Japanese()
nlp_bytes = nlp.to_bytes()
nlp_r.from_bytes(nlp_bytes)
assert nlp_bytes == nlp_r.to_bytes()
assert nlp_r.tokenizer.split_mode == "B"
with make_tempdir() as d:
nlp.to_disk(d)
nlp_r = Japanese()
nlp_r.from_disk(d)
assert nlp_bytes == nlp_r.to_bytes()
assert nlp_r.tokenizer.split_mode == "B"
def test_ja_tokenizer_pickle(ja_tokenizer):
b = pickle.dumps(ja_tokenizer)
ja_tokenizer_re = pickle.loads(b)
assert ja_tokenizer.to_bytes() == ja_tokenizer_re.to_bytes()
| 1,307 | 29.418605 | 75 | py |
spaCy | spaCy-master/spacy/tests/lang/ja/test_tokenizer.py | import pytest
from spacy.lang.ja import DetailedToken, Japanese
from ...tokenizer.test_naughty_strings import NAUGHTY_STRINGS
# fmt: off
TOKENIZER_TESTS = [
("日本語だよ", ['日本', '語', 'だ', 'よ']),
("東京タワーの近くに住んでいます。", ['東京', 'タワー', 'の', '近く', 'に', '住ん', 'で', 'い', 'ます', '。']),
("吾輩は猫である。", ['吾輩', 'は', '猫', 'で', 'ある', '。']),
("月に代わって、お仕置きよ!", ['月', 'に', '代わっ', 'て', '、', 'お', '仕置き', 'よ', '!']),
("すもももももももものうち", ['すもも', 'も', 'もも', 'も', 'もも', 'の', 'うち'])
]
TAG_TESTS = [
("日本語だよ", ['名詞-固有名詞-地名-国', '名詞-普通名詞-一般', '助動詞', '助詞-終助詞']),
("東京タワーの近くに住んでいます。", ['名詞-固有名詞-地名-一般', '名詞-普通名詞-一般', '助詞-格助詞', '名詞-普通名詞-副詞可能', '助詞-格助詞', '動詞-一般', '助詞-接続助詞', '動詞-非自立可能', '助動詞', '補助記号-句点']),
("吾輩は猫である。", ['代名詞', '助詞-係助詞', '名詞-普通名詞-一般', '助動詞', '動詞-非自立可能', '補助記号-句点']),
("月に代わって、お仕置きよ!", ['名詞-普通名詞-助数詞可能', '助詞-格助詞', '動詞-一般', '助詞-接続助詞', '補助記号-読点', '接頭辞', '名詞-普通名詞-一般', '助詞-終助詞', '補助記号-句点']),
("すもももももももものうち", ['名詞-普通名詞-一般', '助詞-係助詞', '名詞-普通名詞-一般', '助詞-係助詞', '名詞-普通名詞-一般', '助詞-格助詞', '名詞-普通名詞-副詞可能'])
]
POS_TESTS = [
('日本語だよ', ['PROPN', 'NOUN', 'AUX', 'PART']),
('東京タワーの近くに住んでいます。', ['PROPN', 'NOUN', 'ADP', 'NOUN', 'ADP', 'VERB', 'SCONJ', 'AUX', 'AUX', 'PUNCT']),
('吾輩は猫である。', ['PRON', 'ADP', 'NOUN', 'AUX', 'AUX', 'PUNCT']),
('月に代わって、お仕置きよ!', ['NOUN', 'ADP', 'VERB', 'SCONJ', 'PUNCT', 'NOUN', 'NOUN', 'PART', 'PUNCT']),
('すもももももももものうち', ['NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'])
]
SENTENCE_TESTS = [
("あれ。これ。", ["あれ。", "これ。"]),
("「伝染るんです。」という漫画があります。", ["「伝染るんです。」という漫画があります。"]),
]
tokens1 = [
DetailedToken(surface="委員", tag="名詞-普通名詞-一般", inf="", lemma="委員", norm="委員", reading="イイン", sub_tokens=None),
DetailedToken(surface="会", tag="名詞-普通名詞-一般", inf="", lemma="会", norm="会", reading="カイ", sub_tokens=None),
]
tokens2 = [
DetailedToken(surface="選挙", tag="名詞-普通名詞-サ変可能", inf="", lemma="選挙", norm="選挙", reading="センキョ", sub_tokens=None),
DetailedToken(surface="管理", tag="名詞-普通名詞-サ変可能", inf="", lemma="管理", norm="管理", reading="カンリ", sub_tokens=None),
DetailedToken(surface="委員", tag="名詞-普通名詞-一般", inf="", lemma="委員", norm="委員", reading="イイン", sub_tokens=None),
DetailedToken(surface="会", tag="名詞-普通名詞-一般", inf="", lemma="会", norm="会", reading="カイ", sub_tokens=None),
]
tokens3 = [
DetailedToken(surface="選挙", tag="名詞-普通名詞-サ変可能", inf="", lemma="選挙", norm="選挙", reading="センキョ", sub_tokens=None),
DetailedToken(surface="管理", tag="名詞-普通名詞-サ変可能", inf="", lemma="管理", norm="管理", reading="カンリ", sub_tokens=None),
DetailedToken(surface="委員会", tag="名詞-普通名詞-一般", inf="", lemma="委員会", norm="委員会", reading="イインカイ", sub_tokens=None),
]
SUB_TOKEN_TESTS = [
("選挙管理委員会", [None, None, [tokens1]], [[tokens2, tokens3]])
]
# fmt: on
@pytest.mark.issue(2901)
def test_issue2901():
"""Test that `nlp` doesn't fail."""
try:
nlp = Japanese()
except ImportError:
pytest.skip()
doc = nlp("pythonが大好きです")
assert doc
@pytest.mark.parametrize("text,expected_tokens", TOKENIZER_TESTS)
def test_ja_tokenizer(ja_tokenizer, text, expected_tokens):
tokens = [token.text for token in ja_tokenizer(text)]
assert tokens == expected_tokens
@pytest.mark.parametrize("text,expected_tags", TAG_TESTS)
def test_ja_tokenizer_tags(ja_tokenizer, text, expected_tags):
tags = [token.tag_ for token in ja_tokenizer(text)]
assert tags == expected_tags
@pytest.mark.parametrize("text,expected_pos", POS_TESTS)
def test_ja_tokenizer_pos(ja_tokenizer, text, expected_pos):
pos = [token.pos_ for token in ja_tokenizer(text)]
assert pos == expected_pos
@pytest.mark.skip(reason="sentence segmentation in tokenizer is buggy")
@pytest.mark.parametrize("text,expected_sents", SENTENCE_TESTS)
def test_ja_tokenizer_sents(ja_tokenizer, text, expected_sents):
sents = [str(sent) for sent in ja_tokenizer(text).sents]
assert sents == expected_sents
def test_ja_tokenizer_extra_spaces(ja_tokenizer):
# note: three spaces after "I"
tokens = ja_tokenizer("I like cheese.")
assert tokens[1].orth_ == " "
@pytest.mark.parametrize("text", NAUGHTY_STRINGS)
def test_ja_tokenizer_naughty_strings(ja_tokenizer, text):
tokens = ja_tokenizer(text)
assert tokens.text_with_ws == text
@pytest.mark.parametrize(
"text,len_a,len_b,len_c",
[
("選挙管理委員会", 4, 3, 1),
("客室乗務員", 3, 2, 1),
("労働者協同組合", 4, 3, 1),
("機能性食品", 3, 2, 1),
],
)
def test_ja_tokenizer_split_modes(ja_tokenizer, text, len_a, len_b, len_c):
nlp_a = Japanese.from_config({"nlp": {"tokenizer": {"split_mode": "A"}}})
nlp_b = Japanese.from_config({"nlp": {"tokenizer": {"split_mode": "B"}}})
nlp_c = Japanese.from_config({"nlp": {"tokenizer": {"split_mode": "C"}}})
assert len(ja_tokenizer(text)) == len_a
assert len(nlp_a(text)) == len_a
assert len(nlp_b(text)) == len_b
assert len(nlp_c(text)) == len_c
@pytest.mark.parametrize("text,sub_tokens_list_b,sub_tokens_list_c", SUB_TOKEN_TESTS)
def test_ja_tokenizer_sub_tokens(
ja_tokenizer, text, sub_tokens_list_b, sub_tokens_list_c
):
nlp_a = Japanese.from_config({"nlp": {"tokenizer": {"split_mode": "A"}}})
nlp_b = Japanese.from_config({"nlp": {"tokenizer": {"split_mode": "B"}}})
nlp_c = Japanese.from_config({"nlp": {"tokenizer": {"split_mode": "C"}}})
assert ja_tokenizer(text).user_data.get("sub_tokens") is None
assert nlp_a(text).user_data.get("sub_tokens") is None
assert nlp_b(text).user_data["sub_tokens"] == sub_tokens_list_b
assert nlp_c(text).user_data["sub_tokens"] == sub_tokens_list_c
@pytest.mark.parametrize(
"text,inflections,reading_forms",
[
(
"取ってつけた",
(["五段-ラ行;連用形-促音便"], [], ["下一段-カ行;連用形-一般"], ["助動詞-タ;終止形-一般"]),
(["トッ"], ["テ"], ["ツケ"], ["タ"]),
),
("2=3", ([], [], []), (["ニ"], ["_"], ["サン"])),
],
)
def test_ja_tokenizer_inflections_reading_forms(
ja_tokenizer, text, inflections, reading_forms
):
tokens = ja_tokenizer(text)
test_inflections = [tt.morph.get("Inflection") for tt in tokens]
assert test_inflections == list(inflections)
test_readings = [tt.morph.get("Reading") for tt in tokens]
assert test_readings == list(reading_forms)
def test_ja_tokenizer_emptyish_texts(ja_tokenizer):
doc = ja_tokenizer("")
assert len(doc) == 0
doc = ja_tokenizer(" ")
assert len(doc) == 1
doc = ja_tokenizer("\n\n\n \t\t \n\n\n")
assert len(doc) == 1
| 6,443 | 37.130178 | 144 | py |
spaCy | spaCy-master/spacy/tests/lang/ko/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ko/test_lemmatization.py | import pytest
@pytest.mark.parametrize(
"word,lemma", [("새로운", "새롭"), ("빨간", "빨갛"), ("클수록", "크"), ("뭡니까", "뭣"), ("됐다", "되")]
)
def test_ko_lemmatizer_assigns(ko_tokenizer, word, lemma):
test_lemma = ko_tokenizer(word)[0].lemma_
assert test_lemma == lemma
| 269 | 26 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/ko/test_serialize.py | import pickle
from spacy.lang.ko import Korean
from ...util import make_tempdir
def test_ko_tokenizer_serialize(ko_tokenizer):
tokenizer_bytes = ko_tokenizer.to_bytes()
nlp = Korean()
nlp.tokenizer.from_bytes(tokenizer_bytes)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
with make_tempdir() as d:
file_path = d / "tokenizer"
ko_tokenizer.to_disk(file_path)
nlp = Korean()
nlp.tokenizer.from_disk(file_path)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
def test_ko_tokenizer_pickle(ko_tokenizer):
b = pickle.dumps(ko_tokenizer)
ko_tokenizer_re = pickle.loads(b)
assert ko_tokenizer.to_bytes() == ko_tokenizer_re.to_bytes()
| 713 | 26.461538 | 64 | py |
spaCy | spaCy-master/spacy/tests/lang/ko/test_tokenizer.py | import pytest
# fmt: off
TOKENIZER_TESTS = [("서울 타워 근처에 살고 있습니다.", "서울 타워 근처 에 살 고 있 습니다 ."),
("영등포구에 있는 맛집 좀 알려주세요.", "영등포구 에 있 는 맛집 좀 알려 주 세요 ."),
("10$ 할인코드를 적용할까요?", "10 $ 할인 코드 를 적용 할까요 ?")]
TAG_TESTS = [("서울 타워 근처에 살고 있습니다.",
"NNP NNG NNG JKB VV EC VX EF SF"),
("영등포구에 있는 맛집 좀 알려주세요.",
"NNP JKB VV ETM NNG MAG VV VX EP SF")]
FULL_TAG_TESTS = [("영등포구에 있는 맛집 좀 알려주세요.",
"NNP JKB VV ETM NNG MAG VV+EC VX EP+EF SF")]
POS_TESTS = [("서울 타워 근처에 살고 있습니다.",
"PROPN NOUN NOUN ADP VERB X AUX X PUNCT"),
("영등포구에 있는 맛집 좀 알려주세요.",
"PROPN ADP VERB X NOUN ADV VERB AUX X PUNCT")]
# fmt: on
@pytest.mark.parametrize("text,expected_tokens", TOKENIZER_TESTS)
def test_ko_tokenizer(ko_tokenizer, text, expected_tokens):
tokens = [token.text for token in ko_tokenizer(text)]
assert tokens == expected_tokens.split()
@pytest.mark.parametrize("text,expected_tags", TAG_TESTS)
def test_ko_tokenizer_tags(ko_tokenizer, text, expected_tags):
tags = [token.tag_ for token in ko_tokenizer(text)]
assert tags == expected_tags.split()
@pytest.mark.parametrize("text,expected_tags", FULL_TAG_TESTS)
def test_ko_tokenizer_full_tags(ko_tokenizer, text, expected_tags):
tags = ko_tokenizer(text).user_data["full_tags"]
assert tags == expected_tags.split()
@pytest.mark.parametrize("text,expected_pos", POS_TESTS)
def test_ko_tokenizer_pos(ko_tokenizer, text, expected_pos):
pos = [token.pos_ for token in ko_tokenizer(text)]
assert pos == expected_pos.split()
def test_ko_empty_doc(ko_tokenizer):
tokens = ko_tokenizer("")
assert len(tokens) == 0
@pytest.mark.issue(10535)
def test_ko_tokenizer_unknown_tag(ko_tokenizer):
tokens = ko_tokenizer("미닛 리피터")
assert tokens[1].pos_ == "X"
# fmt: off
SPACY_TOKENIZER_TESTS = [
("있다.", "있다 ."),
("'예'는", "' 예 ' 는"),
("부 (富) 는", "부 ( 富 ) 는"),
("부(富)는", "부 ( 富 ) 는"),
("1982~1983.", "1982 ~ 1983 ."),
("사과·배·복숭아·수박은 모두 과일이다.", "사과 · 배 · 복숭아 · 수박은 모두 과일이다 ."),
("그렇구나~", "그렇구나~"),
("『9시 반의 당구』,", "『 9시 반의 당구 』 ,"),
]
# fmt: on
@pytest.mark.parametrize("text,expected_tokens", SPACY_TOKENIZER_TESTS)
def test_ko_spacy_tokenizer(ko_tokenizer_tokenizer, text, expected_tokens):
tokens = [token.text for token in ko_tokenizer_tokenizer(text)]
assert tokens == expected_tokens.split()
| 2,439 | 31.105263 | 75 | py |
spaCy | spaCy-master/spacy/tests/lang/ky/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ky/test_tokenizer.py | import pytest
INFIX_HYPHEN_TESTS = [
("Бала-чака жакшыбы?", "Бала-чака жакшыбы ?".split()),
("Кыз-келиндер кийими.", "Кыз-келиндер кийими .".split()),
]
PUNC_INSIDE_WORDS_TESTS = [
(
"Пассажир саны - 2,13 млн — киши/күнүнө (2010), 783,9 млн. киши/жылына.",
"Пассажир саны - 2,13 млн — киши / күнүнө ( 2010 ) ,"
" 783,9 млн. киши / жылына .".split(),
),
('То"кой', 'То " кой'.split()),
]
MIXED_ORDINAL_NUMS_TESTS = [("Эртең 22-январь...", "Эртең 22 - январь ...".split())]
ABBREV_TESTS = [
("Маселе б-ча эртең келет", "Маселе б-ча эртең келет".split()),
("Ахунбаев көч. турат.", "Ахунбаев көч. турат .".split()),
("«3-жылы (б.з.ч.) туулган", "« 3 - жылы ( б.з.ч. ) туулган".split()),
("Жүгөрү ж.б. дандар колдонулат", "Жүгөрү ж.б. дандар колдонулат".split()),
("3-4 кк. курулган.", "3 - 4 кк. курулган .".split()),
]
NAME_ABBREV_TESTS = [
("М.Жумаш", "М.Жумаш".split()),
("М.жумаш", "М.жумаш".split()),
("м.Жумаш", "м . Жумаш".split()),
("Жумаш М.Н.", "Жумаш М.Н.".split()),
("Жумаш.", "Жумаш .".split()),
]
TYPOS_IN_PUNC_TESTS = [
("«3-жылда , туулган", "« 3 - жылда , туулган".split()),
("«3-жылда,туулган", "« 3 - жылда , туулган".split()),
("«3-жылда,туулган.", "« 3 - жылда , туулган .".split()),
("Ал иштейт(качан?)", "Ал иштейт ( качан ? )".split()),
("Ал (качан?)иштейт", "Ал ( качан ?) иштейт".split()), # "?)" => "?)" or "? )"
]
LONG_TEXTS_TESTS = [
(
"Алыскы өлкөлөргө аздыр-көптүр татаалыраак жүрүштөргө чыккандар "
"азыраак: ал бир топ кымбат жана логистика маселесинин айынан "
"кыйла татаал. Мисалы, январдагы майрамдарда Мароккого үчүнчү "
"категориядагы маршрутка (100 чакырымдан кем эмес) барып "
"келгенге аракет кылдык.",
"Алыскы өлкөлөргө аздыр-көптүр татаалыраак жүрүштөргө чыккандар "
"азыраак : ал бир топ кымбат жана логистика маселесинин айынан "
"кыйла татаал . Мисалы , январдагы майрамдарда Мароккого үчүнчү "
"категориядагы маршрутка ( 100 чакырымдан кем эмес ) барып "
"келгенге аракет кылдык .".split(),
)
]
TESTCASES = (
INFIX_HYPHEN_TESTS
+ PUNC_INSIDE_WORDS_TESTS
+ MIXED_ORDINAL_NUMS_TESTS
+ ABBREV_TESTS
+ NAME_ABBREV_TESTS
+ LONG_TEXTS_TESTS
+ TYPOS_IN_PUNC_TESTS
)
NORM_TESTCASES = [
(
"ит, мышык ж.б.у.с. үй жаныбарлары.",
["ит", ",", "мышык", "жана башка ушул сыяктуу", "үй", "жаныбарлары", "."],
)
]
@pytest.mark.parametrize("text,expected_tokens", TESTCASES)
def test_ky_tokenizer_handles_testcases(ky_tokenizer, text, expected_tokens):
tokens = [token.text for token in ky_tokenizer(text) if not token.is_space]
assert expected_tokens == tokens
@pytest.mark.parametrize("text,norms", NORM_TESTCASES)
def test_ky_tokenizer_handles_norm_exceptions(ky_tokenizer, text, norms):
tokens = ky_tokenizer(text)
assert [token.norm_ for token in tokens] == norms
| 2,969 | 33.534884 | 84 | py |
spaCy | spaCy-master/spacy/tests/lang/la/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/la/test_exception.py | import pytest
def test_la_tokenizer_handles_exc_in_text(la_tokenizer):
text = "scio te omnia facturum, ut nobiscum quam primum sis"
tokens = la_tokenizer(text)
assert len(tokens) == 11
assert tokens[6].text == "nobis"
| 236 | 25.333333 | 64 | py |
spaCy | spaCy-master/spacy/tests/lang/la/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
def test_noun_chunks_is_parsed(la_tokenizer):
"""Test that noun_chunks raises Value Error for 'la' language if Doc is not parsed.
To check this test, we're constructing a Doc
with a new Vocab here and forcing is_parsed to 'False'
to make sure the noun chunks don't run.
"""
doc = la_tokenizer("Haec est sententia.")
with pytest.raises(ValueError):
list(doc.noun_chunks)
LA_NP_TEST_EXAMPLES = [
(
"Haec narrantur a poetis de Perseo.",
["DET", "VERB", "ADP", "NOUN", "ADP", "PROPN", "PUNCT"],
["nsubj:pass", "ROOT", "case", "obl", "case", "obl", "punct"],
[1, 0, -1, -1, -3, -1, -5],
["poetis", "Perseo"],
),
(
"Perseus autem in sinu matris dormiebat.",
["NOUN", "ADV", "ADP", "NOUN", "NOUN", "VERB", "PUNCT"],
["nsubj", "discourse", "case", "obl", "nmod", "ROOT", "punct"],
[5, 4, 3, -1, -1, 0, -1],
["Perseus", "sinu matris"],
),
]
@pytest.mark.parametrize(
"text,pos,deps,heads,expected_noun_chunks", LA_NP_TEST_EXAMPLES
)
def test_la_noun_chunks(la_tokenizer, text, pos, deps, heads, expected_noun_chunks):
tokens = la_tokenizer(text)
assert len(heads) == len(pos)
doc = Doc(
tokens.vocab,
words=[t.text for t in tokens],
heads=[head + i for i, head in enumerate(heads)],
deps=deps,
pos=pos,
)
noun_chunks = list(doc.noun_chunks)
assert len(noun_chunks) == len(expected_noun_chunks)
for i, np in enumerate(noun_chunks):
assert np.text == expected_noun_chunks[i]
| 1,628 | 29.166667 | 87 | py |
spaCy | spaCy-master/spacy/tests/lang/la/test_text.py | import pytest
from spacy.lang.la.lex_attrs import like_num
@pytest.mark.parametrize(
"text,match",
[
("IIII", True),
("VI", True),
("vi", True),
("IV", True),
("iv", True),
("IX", True),
("ix", True),
("MMXXII", True),
("0", True),
("1", True),
("quattuor", True),
("decem", True),
("tertius", True),
("canis", False),
("MMXX11", False),
(",", False),
],
)
def test_lex_attrs_like_number(la_tokenizer, text, match):
tokens = la_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
@pytest.mark.parametrize("word", ["quinque"])
def test_la_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 804 | 20.756757 | 58 | py |
spaCy | spaCy-master/spacy/tests/lang/lb/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/lb/test_exceptions.py | import pytest
@pytest.mark.parametrize("text", ["z.B.", "Jan."])
def test_lb_tokenizer_handles_abbr(lb_tokenizer, text):
tokens = lb_tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["d'Saach", "d'Kanner", "d’Welt", "d’Suen"])
def test_lb_tokenizer_splits_contractions(lb_tokenizer, text):
tokens = lb_tokenizer(text)
assert len(tokens) == 2
def test_lb_tokenizer_handles_exc_in_text(lb_tokenizer):
text = "Mee 't ass net evident, d'Liewen."
tokens = lb_tokenizer(text)
assert len(tokens) == 9
assert tokens[1].text == "'t"
| 586 | 26.952381 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/lb/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize("text,length", [("z.B.", 1), ("zb.", 2), ("(z.B.", 2)])
def test_lb_tokenizer_splits_prefix_interact(lb_tokenizer, text, length):
tokens = lb_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["z.B.)"])
def test_lb_tokenizer_splits_suffix_interact(lb_tokenizer, text):
tokens = lb_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["(z.B.)"])
def test_lb_tokenizer_splits_even_wrap_interact(lb_tokenizer, text):
tokens = lb_tokenizer(text)
assert len(tokens) == 3
| 584 | 28.25 | 80 | py |
spaCy | spaCy-master/spacy/tests/lang/lb/test_text.py | import pytest
def test_lb_tokenizer_handles_long_text(lb_tokenizer):
text = """Den Nordwand an d'Sonn An der Zäit hunn sech den Nordwand an d'Sonn gestridden, wie vun hinnen zwee wuel méi staark wier, wéi e Wanderer, deen an ee waarme Mantel agepak war, iwwert de Wee koum. Si goufen sech eens, dass deejéinege fir de Stäerkste gëlle sollt, deen de Wanderer forcéiere géif, säi Mantel auszedoen. Den Nordwand huet mat aller Force geblosen, awer wat e méi geblosen huet, wat de Wanderer sech méi a säi Mantel agewéckelt huet. Um Enn huet den Nordwand säi Kampf opginn. Dunn huet d'Sonn d'Loft mat hire frëndleche Strale gewiermt, a schonn no kuerzer Zäit huet de Wanderer säi Mantel ausgedoen. Do huet den Nordwand missen zouginn, dass d'Sonn vun hinnen zwee de Stäerkste wier."""
tokens = lb_tokenizer(text)
assert len(tokens) == 142
@pytest.mark.parametrize(
"text,length",
[
("»Wat ass mat mir geschitt?«, huet hie geduecht.", 13),
("“Dëst fréi Opstoen”, denkt hien, “mécht ee ganz duercherneen. ", 15),
("Am Grand-Duché ass d'Liewen schéin, mee 't gëtt ze vill Autoen.", 14),
],
)
def test_lb_tokenizer_handles_examples(lb_tokenizer, text, length):
tokens = lb_tokenizer(text)
assert len(tokens) == length
| 1,270 | 56.772727 | 714 | py |
spaCy | spaCy-master/spacy/tests/lang/lg/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/lg/test_tokenizer.py | import pytest
LG_BASIC_TOKENIZATION_TESTS = [
(
"Abooluganda ab’emmamba ababiri",
["Abooluganda", "ab’emmamba", "ababiri"],
),
]
@pytest.mark.parametrize("text,expected_tokens", LG_BASIC_TOKENIZATION_TESTS)
def test_lg_tokenizer_basic(lg_tokenizer, text, expected_tokens):
tokens = lg_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 445 | 26.875 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/lt/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/lt/test_text.py | import pytest
def test_lt_tokenizer_handles_long_text(lt_tokenizer):
text = """Tokios sausros kriterijus atitinka pirmadienį atlikti skaičiavimai, palyginus faktinį ir žemiausią vidutinį daugiametį vandens lygį. Nustatyta, kad iš 48 šalies vandens matavimo stočių 28-iose stotyse vandens lygis yra žemesnis arba lygus žemiausiam vidutiniam daugiamečiam šiltojo laikotarpio vandens lygiui."""
tokens = lt_tokenizer(text)
assert len(tokens) == 42
@pytest.mark.parametrize(
"text,length",
[
(
"177R Parodų rūmai–Ozo g. nuo vasario 18 d. bus skelbiamas interneto tinklalapyje.",
17,
),
(
"ISM universiteto doc. dr. Ieva Augutytė-Kvedaravičienė pastebi, kad tyrimais nustatyti elgesio pokyčiai.",
18,
),
],
)
def test_lt_tokenizer_handles_punct_abbrev(lt_tokenizer, text, length):
tokens = lt_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["km.", "pvz.", "biol."])
def test_lt_tokenizer_abbrev_exceptions(lt_tokenizer, text):
tokens = lt_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10,000", True),
("10,00", True),
("999.0", True),
("vienas", True),
("du", True),
("milijardas", True),
("šuo", False),
(",", False),
("1/2", True),
],
)
def test_lt_lex_attrs_like_number(lt_tokenizer, text, match):
tokens = lt_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
| 1,619 | 29 | 326 | py |
spaCy | spaCy-master/spacy/tests/lang/lv/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/lv/test_text.py | import pytest
def test_long_text(lv_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
Ievērodamas, ka šī deklarācija paredz nodrošināt vispārēju un
efektīvu tajā pasludināto tiesību atzīšanu un ievērošanu;
Ievērodamas, ka Eiropas Padomes mērķis ir panākt lielāku vienotību
tās dalībvalstu starpā un ka viens no līdzekļiem, kā šo mērķi
sasniegt, ir cilvēka tiesību un pamatbrīvību ievērošana un turpmāka
īstenošana;
No jauna apliecinādamas patiesu pārliecību, ka šīs pamatbrīvības
ir taisnīguma un miera pamats visā pasaulē un ka tās vislabāk var
nodrošināt patiess demokrātisks politisks režīms no vienas puses un
vispārējo cilvēktiesību, uz kurām tās pamatojas, kopīga izpratne un
ievērošana no otras puses;
"""
tokens = lv_tokenizer(text)
assert len(tokens) == 109
@pytest.mark.xfail
def test_ordinal_number(lv_tokenizer):
text = "10. decembrī"
tokens = lv_tokenizer(text)
assert len(tokens) == 2
| 951 | 33 | 67 | py |
spaCy | spaCy-master/spacy/tests/lang/lv/test_tokenizer.py | import pytest
LV_BASIC_TOKENIZATION_TESTS = [
(
"Nevienu nedrīkst spīdzināt vai cietsirdīgi vai pazemojoši ar viņu "
"apieties vai sodīt.",
[
"Nevienu",
"nedrīkst",
"spīdzināt",
"vai",
"cietsirdīgi",
"vai",
"pazemojoši",
"ar",
"viņu",
"apieties",
"vai",
"sodīt",
".",
],
),
]
@pytest.mark.parametrize("text,expected_tokens", LV_BASIC_TOKENIZATION_TESTS)
def test_lv_tokenizer_basic(lv_tokenizer, text, expected_tokens):
tokens = lv_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 764 | 23.677419 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/mk/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/mk/test_text.py | import pytest
from spacy.lang.mk.lex_attrs import like_num
def test_tokenizer_handles_long_text(mk_tokenizer):
text = """
Во организациските работи или на нашите собранија со членството, никој од нас не зборуваше за
организацијата и идеологијата. Работна беше нашата работа, а не идеолошка. Што се однесува до социјализмот на
Делчев, неговата дејност зборува сама за себе - спротивно. Во суштина, водачите си имаа свои основни погледи и
свои разбирања за положбата и работите, коишто стоеја пред нив и ги завршуваа со голема упорност, настојчивост и
насоченост. Значи, идеологија имаше, само што нивната идеологија имаше своја оригиналност. Македонија денеска,
чиста рожба на животот и положбата во Македонија, кои му служеа како база на неговите побуди, беше дејност која
имаше потреба од ум за да си најде своја смисла. Таквата идеологија и заемното дејство на умот и срцето му
помогнаа на Делчев да не се занесе по патот на својата идеологија... Во суштина, Организацијата и нејзините
водачи имаа свои разбирања за работите и положбата во идеен поглед, но тоа беше врската, животот и положбата во
Македонија и го внесуваа во својата идеологија гласот на своето срце, и на крај, прибегнуваа до умот,
за да најдат смисла или да ѝ дадат. Тоа содејство и заемен сооднос на умот и срцето му помогнаа на Делчев да ја
држи својата идеологија во сообразност со положбата на работите... Водачите навистина направија една жртва
бидејќи на населението не му зборуваа за своите мисли и идеи. Тие се одрекоа од секаква субјективност во своите
мисли. Целта беше да не се зголемуваат целите и задачите како и преданоста во работата. Населението не можеше да
ги разбере овие идеи...
"""
tokens = mk_tokenizer(text)
assert len(tokens) == 297
@pytest.mark.parametrize(
"word,match",
[
("10", True),
("1", True),
("10.000", True),
("1000", True),
("бројка", False),
("999,0", True),
("еден", True),
("два", True),
("цифра", False),
("десет", True),
("сто", True),
("број", False),
("илјада", True),
("илјади", True),
("милион", True),
(",", False),
("милијарда", True),
("билион", True),
],
)
def test_mk_lex_attrs_like_number(mk_tokenizer, word, match):
tokens = mk_tokenizer(word)
assert len(tokens) == 1
assert tokens[0].like_num == match
@pytest.mark.parametrize("word", ["двесте", "два-три", "пет-шест"])
def test_mk_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
@pytest.mark.parametrize(
"word",
[
"првиот",
"втора",
"четврт",
"четвртата",
"петти",
"петто",
"стоти",
"шеесетите",
"седумдесетите",
],
)
def test_mk_lex_attrs_like_number_for_ordinal(word):
assert like_num(word)
| 2,948 | 36.329114 | 116 | py |
spaCy | spaCy-master/spacy/tests/lang/ml/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ml/test_text.py | import pytest
def test_ml_tokenizer_handles_long_text(ml_tokenizer):
text = """അനാവശ്യമായി കണ്ണിലും മൂക്കിലും വായിലും സ്പർശിക്കാതിരിക്കുക"""
tokens = ml_tokenizer(text)
assert len(tokens) == 5
@pytest.mark.parametrize(
"text,length",
[
(
"എന്നാൽ അച്ചടിയുടെ ആവിർഭാവം ലിപിയിൽ കാര്യമായ മാറ്റങ്ങൾ വരുത്തിയത് കൂട്ടക്ഷരങ്ങളെ അണുഅക്ഷരങ്ങളായി പിരിച്ചുകൊണ്ടായിരുന്നു",
10,
),
("പരമ്പരാഗതമായി മലയാളം ഇടത്തുനിന്ന് വലത്തോട്ടാണ് എഴുതുന്നത്", 5),
],
)
def test_ml_tokenizer_handles_cnts(ml_tokenizer, text, length):
tokens = ml_tokenizer(text)
assert len(tokens) == length
| 643 | 27 | 133 | py |
spaCy | spaCy-master/spacy/tests/lang/ms/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ms/test_noun_chunks.py | import pytest
def test_noun_chunks_is_parsed_ms(ms_tokenizer):
"""Test that noun_chunks raises Value Error for 'ms' language if Doc is not parsed."""
doc = ms_tokenizer("sebelas")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 256 | 27.555556 | 90 | py |
spaCy | spaCy-master/spacy/tests/lang/ms/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize("text", ["(Ma'arif)"])
def test_ms_tokenizer_splits_no_special(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Ma'arif"])
def test_ms_tokenizer_splits_no_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["(Ma'arif"])
def test_ms_tokenizer_splits_prefix_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["Ma'arif)"])
def test_ms_tokenizer_splits_suffix_punct(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["(Ma'arif)"])
def test_ms_tokenizer_splits_even_wrap(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["(Ma'arif?)"])
def test_tokenizer_splits_uneven_wrap(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize("text,length", [("S.Kom.", 1), ("SKom.", 2), ("(S.Kom.", 2)])
def test_ms_tokenizer_splits_prefix_interact(id_tokenizer, text, length):
tokens = id_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["S.Kom.)"])
def test_ms_tokenizer_splits_suffix_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize("text", ["(S.Kom.)"])
def test_ms_tokenizer_splits_even_wrap_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["(S.Kom.?)"])
def test_ms_tokenizer_splits_uneven_wrap_interact(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize(
"text,length",
[("kerana", 1), ("Mahathir-Anwar", 3), ("Tun Dr. Ismail-Abdul Rahman", 6)],
)
def test_my_tokenizer_splits_hyphens(ms_tokenizer, text, length):
tokens = ms_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize("text", ["0.1-13.5", "0.0-0.1", "103.27-300"])
def test_ms_tokenizer_splits_numeric_range(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["ini.Sani", "Halo.Malaysia"])
def test_ms_tokenizer_splits_period_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Halo,Malaysia", "satu,dua"])
def test_ms_tokenizer_splits_comma_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
assert tokens[0].text == text.split(",")[0]
assert tokens[1].text == ","
assert tokens[2].text == text.split(",")[1]
@pytest.mark.parametrize("text", ["halo...Malaysia", "dia...pergi"])
def test_ms_tokenizer_splits_ellipsis_infix(id_tokenizer, text):
tokens = id_tokenizer(text)
assert len(tokens) == 3
def test_ms_tokenizer_splits_double_hyphen_infix(id_tokenizer):
tokens = id_tokenizer("Arsene Wenger--pengurus Arsenal--mengadakan sidang media.")
assert len(tokens) == 10
assert tokens[0].text == "Arsene"
assert tokens[1].text == "Wenger"
assert tokens[2].text == "--"
assert tokens[3].text == "pengurus"
assert tokens[4].text == "Arsenal"
assert tokens[5].text == "--"
assert tokens[6].text == "mengadakan"
assert tokens[7].text == "sidang"
assert tokens[8].text == "media"
assert tokens[9].text == "."
| 3,512 | 30.088496 | 86 | py |
spaCy | spaCy-master/spacy/tests/lang/ms/test_text.py | import pytest
from spacy.lang.ms.lex_attrs import like_num
@pytest.mark.parametrize("word", ["sebelas"])
def test_ms_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 206 | 19.7 | 45 | py |
spaCy | spaCy-master/spacy/tests/lang/nb/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/nb/test_noun_chunks.py | import pytest
def test_noun_chunks_is_parsed_nb(nb_tokenizer):
"""Test that noun_chunks raises Value Error for 'nb' language if Doc is not parsed."""
doc = nb_tokenizer("Smørsausen brukes bl.a. til")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 276 | 29.777778 | 90 | py |
spaCy | spaCy-master/spacy/tests/lang/nb/test_tokenizer.py | import pytest
NB_TOKEN_EXCEPTION_TESTS = [
(
"Smørsausen brukes bl.a. til fisk",
["Smørsausen", "brukes", "bl.a.", "til", "fisk"],
),
(
"Jeg kommer først kl. 13 pga. diverse forsinkelser",
["Jeg", "kommer", "først", "kl.", "13", "pga.", "diverse", "forsinkelser"],
),
]
@pytest.mark.parametrize("text,expected_tokens", NB_TOKEN_EXCEPTION_TESTS)
def test_nb_tokenizer_handles_exception_cases(nb_tokenizer, text, expected_tokens):
tokens = nb_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 625 | 30.3 | 83 | py |
spaCy | spaCy-master/spacy/tests/lang/ne/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ne/test_text.py | import pytest
def test_ne_tokenizer_handlers_long_text(ne_tokenizer):
text = """मैले पाएको सर्टिफिकेटलाई म त बोक्रो सम्झन्छु र अभ्यास तब सुरु भयो, जब मैले कलेज पार गरेँ र जीवनको पढाइ सुरु गरेँ ।"""
tokens = ne_tokenizer(text)
assert len(tokens) == 24
@pytest.mark.parametrize(
"text,length", [("समय जान कति पनि बेर लाग्दैन ।", 7), ("म ठूलो हुँदै थिएँ ।", 5)]
)
def test_ne_tokenizer_handles_cnts(ne_tokenizer, text, length):
tokens = ne_tokenizer(text)
assert len(tokens) == length
| 510 | 30.9375 | 131 | py |
spaCy | spaCy-master/spacy/tests/lang/nl/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/nl/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
from spacy.util import filter_spans
@pytest.fixture
def nl_sample(nl_vocab):
# TEXT :
# Haar vriend lacht luid. We kregen alweer ruzie toen we de supermarkt ingingen.
# Aan het begin van de supermarkt is al het fruit en de groentes. Uiteindelijk hebben we dan ook
# geen avondeten gekocht.
words = [
"Haar",
"vriend",
"lacht",
"luid",
".",
"We",
"kregen",
"alweer",
"ruzie",
"toen",
"we",
"de",
"supermarkt",
"ingingen",
".",
"Aan",
"het",
"begin",
"van",
"de",
"supermarkt",
"is",
"al",
"het",
"fruit",
"en",
"de",
"groentes",
".",
"Uiteindelijk",
"hebben",
"we",
"dan",
"ook",
"geen",
"avondeten",
"gekocht",
".",
]
heads = [
1,
2,
2,
2,
2,
6,
6,
6,
6,
13,
13,
12,
13,
6,
6,
17,
17,
24,
20,
20,
17,
24,
24,
24,
24,
27,
27,
24,
24,
36,
36,
36,
36,
36,
35,
36,
36,
36,
]
deps = [
"nmod:poss",
"nsubj",
"ROOT",
"advmod",
"punct",
"nsubj",
"ROOT",
"advmod",
"obj",
"mark",
"nsubj",
"det",
"obj",
"advcl",
"punct",
"case",
"det",
"obl",
"case",
"det",
"nmod",
"cop",
"advmod",
"det",
"ROOT",
"cc",
"det",
"conj",
"punct",
"advmod",
"aux",
"nsubj",
"advmod",
"advmod",
"det",
"obj",
"ROOT",
"punct",
]
pos = [
"PRON",
"NOUN",
"VERB",
"ADJ",
"PUNCT",
"PRON",
"VERB",
"ADV",
"NOUN",
"SCONJ",
"PRON",
"DET",
"NOUN",
"NOUN",
"PUNCT",
"ADP",
"DET",
"NOUN",
"ADP",
"DET",
"NOUN",
"AUX",
"ADV",
"DET",
"NOUN",
"CCONJ",
"DET",
"NOUN",
"PUNCT",
"ADJ",
"AUX",
"PRON",
"ADV",
"ADV",
"DET",
"NOUN",
"VERB",
"PUNCT",
]
return Doc(nl_vocab, words=words, heads=heads, deps=deps, pos=pos)
@pytest.fixture
def nl_reference_chunking():
# Using frog https://github.com/LanguageMachines/frog/ we obtain the following NOUN-PHRASES:
return [
"haar vriend",
"we",
"ruzie",
"we",
"de supermarkt",
"het begin",
"de supermarkt",
"het fruit",
"de groentes",
"we",
"geen avondeten",
]
def test_need_dep(nl_tokenizer):
"""
Test that noun_chunks raises Value Error for 'nl' language if Doc is not parsed.
"""
txt = "Haar vriend lacht luid."
doc = nl_tokenizer(txt)
with pytest.raises(ValueError):
list(doc.noun_chunks)
def test_chunking(nl_sample, nl_reference_chunking):
"""
Test the noun chunks of a sample text. Uses a sample.
The sample text simulates a Doc object as would be produced by nl_core_news_md.
"""
chunks = [s.text.lower() for s in nl_sample.noun_chunks]
assert chunks == nl_reference_chunking
@pytest.mark.issue(10846)
def test_no_overlapping_chunks(nl_vocab):
# fmt: off
doc = Doc(
nl_vocab,
words=["Dit", "programma", "wordt", "beschouwd", "als", "'s", "werelds", "eerste", "computerprogramma"],
deps=["det", "nsubj:pass", "aux:pass", "ROOT", "mark", "det", "fixed", "amod", "xcomp"],
heads=[1, 3, 3, 3, 8, 8, 5, 8, 3],
pos=["DET", "NOUN", "AUX", "VERB", "SCONJ", "DET", "NOUN", "ADJ", "NOUN"],
)
# fmt: on
chunks = list(doc.noun_chunks)
assert filter_spans(chunks) == chunks
| 4,304 | 17.964758 | 112 | py |
spaCy | spaCy-master/spacy/tests/lang/nl/test_text.py | import pytest
from spacy.lang.nl.lex_attrs import like_num
@pytest.mark.parametrize("word", ["elf", "elfde"])
def test_nl_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
@pytest.mark.parametrize(
"text,num_tokens",
[
(
"De aftredende minister-president benadrukte al dat zijn partij inhoudelijk weinig gemeen heeft met de groenen.",
16,
),
("Hij is sociaal-cultureel werker.", 5),
("Er staan een aantal dure auto's in de garage.", 10),
],
)
def test_tokenizer_doesnt_split_hyphens(nl_tokenizer, text, num_tokens):
tokens = nl_tokenizer(text)
assert len(tokens) == num_tokens
| 694 | 25.730769 | 125 | py |
spaCy | spaCy-master/spacy/tests/lang/pl/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/pl/test_text.py | """Words like numbers are recognized correctly."""
import pytest
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10,000", True),
("10,00", True),
("jeden", True),
("dwa", True),
("milion", True),
("pies", False),
(",", False),
("1/2", True),
],
)
def test_lex_attrs_like_number(pl_tokenizer, text, match):
tokens = pl_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
| 522 | 20.791667 | 58 | py |
spaCy | spaCy-master/spacy/tests/lang/pl/test_tokenizer.py | import pytest
DOT_TESTS = [
("tel.", ["tel", "."]),
("0 zł 99 gr", ["0", "zł", "99", "gr"]),
]
HYPHEN_TESTS = [
("cztero-", ["cztero-"]),
("jedno-", ["jedno-"]),
("dwu-", ["dwu-"]),
("trzy-", ["trzy-"]),
]
TESTCASES = DOT_TESTS + HYPHEN_TESTS
@pytest.mark.parametrize("text,expected_tokens", TESTCASES)
def test_tokenizer_handles_testcases(pl_tokenizer, text, expected_tokens):
tokens = pl_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 553 | 22.083333 | 74 | py |
spaCy | spaCy-master/spacy/tests/lang/pt/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/pt/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
# fmt: off
@pytest.mark.parametrize(
"words,heads,deps,pos,chunk_offsets",
[
# determiner + noun
# um cachorro -> um cachorro
(
["um", "cachorro"],
[1, 1],
["det", "ROOT"],
["DET", "NOUN"],
[(0, 2)],
),
# two determiners + noun
# meu o pai -> meu o pai
(
["meu", "o", "pai"],
[2, 2, 2],
["det", "det", "ROOT"],
["DET", "DET", "NOUN"],
[(0, 3)],
),
# two determiners + noun
# todos essos caros -> todos essos caros
(
["todos", "essos", "caros"],
[2, 2, 2],
["det", "det", "ROOT"],
["DET", "DET", "NOUN"],
[(0, 3)],
),
# two determiners, one is after noun
# um irmão meu -> um irmão meu
(
["um", "irmão", "meu"],
[1, 1, 1],
["det", "ROOT", "det"],
["DET", "NOUN", "DET"],
[(0, 3)],
),
# two determiners + noun
# o meu pai -> o meu pai
(
["o", "meu", "pai"],
[2, 2, 2],
["det","det", "ROOT"],
["DET", "DET", "NOUN"],
[(0, 3)],
),
# relative pronoun
# A bicicleta essa está estragada -> A bicicleta
(
['A', 'bicicleta', 'essa', 'está', 'estragada'],
[1, 4, 1, 4, 4],
['det', 'nsubj', 'det', 'cop', 'ROOT'],
['DET', 'NOUN', 'PRON', 'AUX', 'ADJ'],
[(0,2)]
),
# relative subclause
# o computador que comprou -> o computador
(
['o', 'computador', 'que', 'comprou'],
[1, 1, 3, 1],
['det', 'ROOT', 'nsubj', 'acl:relcl'],
['DET', 'NOUN', 'PRON', 'VERB'],
[(0, 2), (2, 3)]
),
# det + noun + adj
# O cachorro marrom -> O cachorro marrom
(
["O", "cachorro", "marrom"],
[1, 1, 1],
["det", "ROOT", "amod"],
["DET", "NOUN", "ADJ"],
[(0, 3)],
),
# det + noun + adj plural
# As calças baratas -> As calças baratas
(
["As", "calças", "baratas"],
[1, 1, 1],
["det", "ROOT", "amod"],
["DET", "NOUN", "ADJ"],
[(0, 3)],
),
# det + adj + noun
# Uma boa ideia -> Uma boa ideia
(
['uma', 'boa', 'ideia'],
[2, 2, 2],
["det", "amod", "ROOT"],
["DET", "ADJ", "NOUN"],
[(0,3)]
),
# multiple adjectives
# Uma garota esperta e inteligente -> Uma garota esperta e inteligente
(
["Uma", "garota", "esperta", "e", "inteligente"],
[1, 1, 1, 4, 2],
["det", "ROOT", "amod", "cc", "conj"],
["DET", "NOUN", "ADJ", "CCONJ", "ADJ"],
[(0,5)]
),
# determiner, adjective, compound created by flat
# a grande São Paolo -> a grande São Paolo
(
["a", "grande", "São", "Paolo"],
[2, 2, 2, 2],
["det", "amod", "ROOT", "flat:name"],
["DET", "ADJ", "PROPN", "PROPN"],
[(0,4)]
),
# one determiner + one noun + one adjective qualified by an adverb
# alguns fazendeiros muito ricos -> alguns fazendeiros muito ricos
(
['alguns', 'fazendeiros', 'muito', 'ricos'],
[1, 1, 3, 1],
['det', 'ROOT', 'advmod', 'amod'],
['DET', 'NOUN', 'ADV', 'ADJ'],
[(0,4)]
),
# Two NPs conjuncted
# Eu tenho um cachorro e um gato -> Eu, um cacharo, um gato
(
["Eu", "tenho", "um", "cachorro", "e", "um", "gato"],
[1, 1, 3, 1, 6, 6, 3],
['nsubj', 'ROOT', 'det', 'obj', 'cc', 'det', 'conj'],
['PRON', 'VERB', 'DET', 'NOUN', 'CCONJ', 'DET', 'NOUN'],
[(0,1), (2,4), (5,7)]
),
# Two NPs together
# o escritor brasileiro Aníbal Machado -> o escritor brasileiro, Aníbal Machado
(
['o', 'escritor', 'brasileiro', 'Aníbal', 'Machado'],
[1, 1, 1, 1, 3],
['det', 'ROOT', 'amod', 'appos', 'flat:name'],
['DET', 'NOUN', 'ADJ', 'PROPN', 'PROPN'],
[(0, 3), (3, 5)]
),
# Noun compound, person name and titles
# Dom Pedro II -> Dom Pedro II
(
["Dom", "Pedro", "II"],
[0, 0, 0],
["ROOT", "flat:name", "flat:name"],
["PROPN", "PROPN", "PROPN"],
[(0,3)]
),
# Noun compound created by flat
# os Estados Unidos -> os Estados Unidos
(
["os", "Estados", "Unidos"],
[1, 1, 1],
["det", "ROOT", "flat:name"],
["DET", "PROPN", "PROPN"],
[(0,3)]
),
# nmod relation between NPs
# a destruição da cidade -> a destruição, cidade
(
['a', 'destruição', 'da', 'cidade'],
[1, 1, 3, 1],
['det', 'ROOT', 'case', 'nmod'],
['DET', 'NOUN', 'ADP', 'NOUN'],
[(0,2), (3,4)]
),
# Compounding by nmod, several NPs chained together
# a primeira fábrica de medicamentos do governo -> a primeira fábrica, medicamentos, governo
(
["a", "primeira", "fábrica", "de", "medicamentos", "do", "governo"],
[2, 2, 2, 4, 2, 6, 2],
['det', 'amod', 'ROOT', 'case', 'nmod', 'case', 'nmod'],
['DET', 'ADJ', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'],
[(0, 3), (4, 5), (6, 7)]
),
# several NPs
# Tradução da reportagem de Susana -> Tradução, reportagem, Susana
(
['Tradução', 'da', 'reportagem', 'de', 'Susana'],
[0, 2, 0, 4, 2],
['ROOT', 'case', 'nmod', 'case', 'nmod'],
['NOUN', 'ADP', 'NOUN', 'ADP', 'PROPN'],
[(0,1), (2,3), (4,5)]
),
# Several NPs
# O gato gordo da Susana e seu amigo -> O gato gordo, Susana, seu amigo
(
['O', 'gato', 'gordo', 'da', 'Susana', 'e', 'seu', 'amigo'],
[1, 1, 1, 4, 1, 7, 7, 1],
['det', 'ROOT', 'amod', 'case', 'nmod', 'cc', 'det', 'conj'],
['DET', 'NOUN', 'ADJ', 'ADP', 'PROPN', 'CCONJ', 'DET', 'NOUN'],
[(0,3), (4,5), (6,8)]
),
# Passive subject
# Os novos gastos são alimentados pela grande conta bancária de Clinton -> Os novos gastos, grande conta bancária, Clinton
(
['Os', 'novos', 'gastos', 'são', 'alimentados', 'pela', 'grande', 'conta', 'bancária', 'de', 'Clinton'],
[2, 2, 4, 4, 4, 7, 7, 4, 7, 10, 7],
['det', 'amod', 'nsubj:pass', 'aux:pass', 'ROOT', 'case', 'amod', 'obl:agent', 'amod', 'case', 'nmod'],
['DET', 'ADJ', 'NOUN', 'AUX', 'VERB', 'ADP', 'ADJ', 'NOUN', 'ADJ', 'ADP', 'PROPN'],
[(0, 3), (6, 9), (10, 11)]
)
],
)
# fmt: on
def test_pt_noun_chunks(pt_vocab, words, heads, deps, pos, chunk_offsets):
doc = Doc(pt_vocab, words=words, heads=heads, deps=deps, pos=pos)
assert [(c.start, c.end) for c in doc.noun_chunks] == chunk_offsets
def test_noun_chunks_is_parsed_pt(pt_tokenizer):
"""Test that noun_chunks raises Value Error for 'pt' language if Doc is not parsed."""
doc = pt_tokenizer("en Oxford este verano")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 7,715 | 33.600897 | 130 | py |
spaCy | spaCy-master/spacy/tests/lang/pt/test_text.py | import pytest
from spacy.lang.pt.lex_attrs import like_num
@pytest.mark.parametrize("word", ["onze", "quadragésimo"])
def test_pt_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 219 | 21 | 58 | py |
spaCy | spaCy-master/spacy/tests/lang/ro/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ro/test_tokenizer.py | import pytest
TEST_CASES = [
(
"Adresa este str. Principală nr. 5.",
["Adresa", "este", "str.", "Principală", "nr.", "5", "."],
),
("Teste, etc.", ["Teste", ",", "etc."]),
("Lista, ș.a.m.d.", ["Lista", ",", "ș.a.m.d."]),
("Și d.p.d.v. al...", ["Și", "d.p.d.v.", "al", "..."]),
# number tests
("Clasa a 4-a.", ["Clasa", "a", "4-a", "."]),
("Al 12-lea ceas.", ["Al", "12-lea", "ceas", "."]),
]
@pytest.mark.parametrize("text,expected_tokens", TEST_CASES)
def test_ro_tokenizer_handles_testcases(ro_tokenizer, text, expected_tokens):
tokens = ro_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 727 | 32.090909 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/ru/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ru/test_exceptions.py | import pytest
@pytest.mark.parametrize(
"text,norms",
[("пн.", ["понедельник"]), ("пт.", ["пятница"]), ("дек.", ["декабрь"])],
)
def test_ru_tokenizer_abbrev_exceptions(ru_tokenizer, text, norms):
tokens = ru_tokenizer(text)
assert len(tokens) == 1
assert [token.norm_ for token in tokens] == norms
| 321 | 25.833333 | 76 | py |
spaCy | spaCy-master/spacy/tests/lang/ru/test_lemmatizer.py | import pytest
from spacy.tokens import Doc
pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_ru_doc_lemmatization(ru_lemmatizer):
words = ["мама", "мыла", "раму"]
pos = ["NOUN", "VERB", "NOUN"]
morphs = [
"Animacy=Anim|Case=Nom|Gender=Fem|Number=Sing",
"Aspect=Imp|Gender=Fem|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act",
"Animacy=Anim|Case=Acc|Gender=Fem|Number=Sing",
]
doc = Doc(ru_lemmatizer.vocab, words=words, pos=pos, morphs=morphs)
doc = ru_lemmatizer(doc)
lemmas = [token.lemma_ for token in doc]
assert lemmas == ["мама", "мыть", "рама"]
@pytest.mark.parametrize(
"text,lemmas",
[
("гвоздики", ["гвоздик", "гвоздика"]),
("люди", ["человек"]),
("реки", ["река"]),
("кольцо", ["кольцо"]),
("пепперони", ["пепперони"]),
],
)
def test_ru_lemmatizer_noun_lemmas(ru_lemmatizer, text, lemmas):
doc = Doc(ru_lemmatizer.vocab, words=[text], pos=["NOUN"])
result_lemmas = ru_lemmatizer.pymorphy2_lemmatize(doc[0])
assert sorted(result_lemmas) == lemmas
@pytest.mark.parametrize(
"text,pos,morph,lemma",
[
("рой", "NOUN", "", "рой"),
("рой", "VERB", "", "рыть"),
("клей", "NOUN", "", "клей"),
("клей", "VERB", "", "клеить"),
("три", "NUM", "", "три"),
("кос", "NOUN", "Number=Sing", "кос"),
("кос", "NOUN", "Number=Plur", "коса"),
("кос", "ADJ", "", "косой"),
("потом", "NOUN", "", "пот"),
("потом", "ADV", "", "потом"),
],
)
def test_ru_lemmatizer_works_with_different_pos_homonyms(
ru_lemmatizer, text, pos, morph, lemma
):
doc = Doc(ru_lemmatizer.vocab, words=[text], pos=[pos], morphs=[morph])
result_lemmas = ru_lemmatizer.pymorphy2_lemmatize(doc[0])
assert result_lemmas == [lemma]
@pytest.mark.parametrize(
"text,morph,lemma",
[
("гвоздики", "Gender=Fem", "гвоздика"),
("гвоздики", "Gender=Masc", "гвоздик"),
("вина", "Gender=Fem", "вина"),
("вина", "Gender=Neut", "вино"),
],
)
def test_ru_lemmatizer_works_with_noun_homonyms(ru_lemmatizer, text, morph, lemma):
doc = Doc(ru_lemmatizer.vocab, words=[text], pos=["NOUN"], morphs=[morph])
result_lemmas = ru_lemmatizer.pymorphy2_lemmatize(doc[0])
assert result_lemmas == [lemma]
def test_ru_lemmatizer_punct(ru_lemmatizer):
doc = Doc(ru_lemmatizer.vocab, words=["«"], pos=["PUNCT"])
assert ru_lemmatizer.pymorphy2_lemmatize(doc[0]) == ['"']
doc = Doc(ru_lemmatizer.vocab, words=["»"], pos=["PUNCT"])
assert ru_lemmatizer.pymorphy2_lemmatize(doc[0]) == ['"']
def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer):
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
words = ["мама", "мыла", "раму"]
pos = ["NOUN", "VERB", "NOUN"]
morphs = [
"Animacy=Anim|Case=Nom|Gender=Fem|Number=Sing",
"Aspect=Imp|Gender=Fem|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act",
"Animacy=Anim|Case=Acc|Gender=Fem|Number=Sing",
]
doc = Doc(ru_lookup_lemmatizer.vocab, words=words, pos=pos, morphs=morphs)
doc = ru_lookup_lemmatizer(doc)
lemmas = [token.lemma_ for token in doc]
assert lemmas == ["мама", "мыла", "раму"]
@pytest.mark.parametrize(
"word,lemma",
(
("бременем", "бремя"),
("будешь", "быть"),
("какая-то", "какой-то"),
),
)
def test_ru_lookup_lemmatizer(ru_lookup_lemmatizer, word, lemma):
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
doc = Doc(ru_lookup_lemmatizer.vocab, words=[word])
assert ru_lookup_lemmatizer(doc)[0].lemma_ == lemma
| 3,676 | 32.427273 | 87 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.