Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
spaCy | spaCy-master/spacy/tests/lang/ru/test_text.py | import pytest
from spacy.lang.ru.lex_attrs import like_num
@pytest.mark.parametrize("word", ["одиннадцать"])
def test_ru_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 210 | 20.1 | 49 | py |
spaCy | spaCy-master/spacy/tests/lang/ru/test_tokenizer.py | from string import punctuation
import pytest
PUNCT_OPEN = ["(", "[", "{", "*"]
PUNCT_CLOSE = [")", "]", "}", "*"]
PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")]
@pytest.mark.parametrize("text", ["(", "((", "<"])
def test_ru_tokenizer_handles_only_punct(ru_tokenizer, text):
tokens = ru_tokenizer(text)
assert len(tokens) == len(text)
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize("text", ["Привет"])
def test_ru_tokenizer_splits_open_punct(ru_tokenizer, punct, text):
tokens = ru_tokenizer(punct + text)
assert len(tokens) == 2
assert tokens[0].text == punct
assert tokens[1].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize("text", ["Привет"])
def test_ru_tokenizer_splits_close_punct(ru_tokenizer, punct, text):
tokens = ru_tokenizer(text + punct)
assert len(tokens) == 2
assert tokens[0].text == text
assert tokens[1].text == punct
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize("punct_add", ["`"])
@pytest.mark.parametrize("text", ["Привет"])
def test_ru_tokenizer_splits_two_diff_open_punct(ru_tokenizer, punct, punct_add, text):
tokens = ru_tokenizer(punct + punct_add + text)
assert len(tokens) == 3
assert tokens[0].text == punct
assert tokens[1].text == punct_add
assert tokens[2].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize("punct_add", ["'"])
@pytest.mark.parametrize("text", ["Привет"])
def test_ru_tokenizer_splits_two_diff_close_punct(ru_tokenizer, punct, punct_add, text):
tokens = ru_tokenizer(text + punct + punct_add)
assert len(tokens) == 3
assert tokens[0].text == text
assert tokens[1].text == punct
assert tokens[2].text == punct_add
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize("text", ["Привет"])
def test_ru_tokenizer_splits_same_open_punct(ru_tokenizer, punct, text):
tokens = ru_tokenizer(punct + punct + punct + text)
assert len(tokens) == 4
assert tokens[0].text == punct
assert tokens[3].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize("text", ["Привет"])
def test_ru_tokenizer_splits_same_close_punct(ru_tokenizer, punct, text):
tokens = ru_tokenizer(text + punct + punct + punct)
assert len(tokens) == 4
assert tokens[0].text == text
assert tokens[1].text == punct
@pytest.mark.parametrize("text", ["'Тест"])
def test_ru_tokenizer_splits_open_appostrophe(ru_tokenizer, text):
tokens = ru_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == "'"
@pytest.mark.parametrize("text", ["Тест''"])
def test_ru_tokenizer_splits_double_end_quote(ru_tokenizer, text):
tokens = ru_tokenizer(text)
assert len(tokens) == 2
tokens_punct = ru_tokenizer("''")
assert len(tokens_punct) == 1
@pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED)
@pytest.mark.parametrize("text", ["Тест"])
def test_ru_tokenizer_splits_open_close_punct(
ru_tokenizer, punct_open, punct_close, text
):
tokens = ru_tokenizer(punct_open + text + punct_close)
assert len(tokens) == 3
assert tokens[0].text == punct_open
assert tokens[1].text == text
assert tokens[2].text == punct_close
@pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED)
@pytest.mark.parametrize("punct_open2,punct_close2", [("`", "'")])
@pytest.mark.parametrize("text", ["Тест"])
def test_ru_tokenizer_two_diff_punct(
ru_tokenizer, punct_open, punct_close, punct_open2, punct_close2, text
):
tokens = ru_tokenizer(punct_open2 + punct_open + text + punct_close + punct_close2)
assert len(tokens) == 5
assert tokens[0].text == punct_open2
assert tokens[1].text == punct_open
assert tokens[2].text == text
assert tokens[3].text == punct_close
assert tokens[4].text == punct_close2
@pytest.mark.parametrize("text", ["Тест."])
def test_ru_tokenizer_splits_trailing_dot(ru_tokenizer, text):
tokens = ru_tokenizer(text)
assert tokens[1].text == "."
def test_ru_tokenizer_splits_bracket_period(ru_tokenizer):
text = "(Раз, два, три, проверка)."
tokens = ru_tokenizer(text)
assert tokens[len(tokens) - 1].text == "."
@pytest.mark.parametrize(
"text",
[
"рекоменду́я подда́ть жару́. Самого́ Баргамота",
"РЕКОМЕНДУ́Я ПОДДА́ТЬ ЖАРУ́. САМОГО́ БАРГАМОТА",
"рекоменду̍я подда̍ть жару̍.Самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍.'Самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍,самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍:самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍. самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍, самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍: самого̍ Баргамота",
"рекоменду̍я подда̍ть жару̍-самого̍ Баргамота",
],
)
def test_ru_tokenizer_handles_final_diacritics(ru_tokenizer, text):
tokens = ru_tokenizer(text)
assert tokens[2].text in ("жару́", "ЖАРУ́", "жару̍")
assert tokens[3].text in punctuation
@pytest.mark.parametrize(
"text",
[
"РЕКОМЕНДУ́Я ПОДДА́ТЬ ЖАРУ́.САМОГО́ БАРГАМОТА",
"рекоменду̍я подда̍ть жару́.самого́ Баргамота",
],
)
def test_ru_tokenizer_handles_final_diacritic_and_period(ru_tokenizer, text):
tokens = ru_tokenizer(text)
assert tokens[2].text.lower() == "жару́.самого́"
| 5,403 | 32.987421 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/sa/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/sa/test_text.py | import pytest
def test_sa_tokenizer_handles_long_text(sa_tokenizer):
text = """नानाविधानि दिव्यानि नानावर्णाकृतीनि च।।"""
tokens = sa_tokenizer(text)
assert len(tokens) == 6
@pytest.mark.parametrize(
"text,length",
[
("श्री भगवानुवाच पश्य मे पार्थ रूपाणि शतशोऽथ सहस्रशः।", 9),
("गुणान् सर्वान् स्वभावो मूर्ध्नि वर्तते ।", 6),
],
)
def test_sa_tokenizer_handles_cnts(sa_tokenizer, text, length):
tokens = sa_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10.000", True),
("1000", True),
("999,0", True),
("एकः ", True),
("दश", True),
("पञ्चदश", True),
("चत्वारिंशत् ", True),
("कूपे", False),
(",", False),
("1/2", True),
],
)
def test_lex_attrs_like_number(sa_tokenizer, text, match):
tokens = sa_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
| 1,015 | 22.627907 | 67 | py |
spaCy | spaCy-master/spacy/tests/lang/sk/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/sk/test_text.py | import pytest
def test_long_text(sk_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
majúc na zreteli, že cieľom tejto deklarácie je zabezpečiť všeobecné
a účinné uznávanie a dodržiavanie práv v nej vyhlásených;
majúc na zreteli, že cieľom Rady Európy je dosiahnutie väčšej
jednoty medzi jej členmi, a že jedným zo spôsobov, ktorým sa
má tento cieľ napĺňať, je ochrana a ďalší rozvoj ľudských práv
a základných slobôd;
znovu potvrdzujúc svoju hlbokú vieru v tie základné slobody, ktoré
sú základom spravodlivosti a mieru vo svete, a ktoré sú najlepšie
zachovávané na jednej strane účinnou politickou demokraciou
a na strane druhej spoločným poňatím a dodržiavaním ľudských
práv, od ktorých závisia;
"""
tokens = sk_tokenizer(text)
assert len(tokens) == 118
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10,000", True),
("10,00", True),
("štyri", True),
("devätnásť", True),
("milión", True),
("pes", False),
(",", False),
("1/2", True),
],
)
def test_lex_attrs_like_number(sk_tokenizer, text, match):
tokens = sk_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
@pytest.mark.xfail
def test_ordinal_number(sk_tokenizer):
text = "10. decembra 1948"
tokens = sk_tokenizer(text)
assert len(tokens) == 3
| 1,413 | 27.857143 | 68 | py |
spaCy | spaCy-master/spacy/tests/lang/sk/test_tokenizer.py | import pytest
SK_BASIC_TOKENIZATION_TESTS = [
(
"Kedy sa narodil Andrej Kiska?",
["Kedy", "sa", "narodil", "Andrej", "Kiska", "?"],
),
]
@pytest.mark.parametrize("text,expected_tokens", SK_BASIC_TOKENIZATION_TESTS)
def test_sk_tokenizer_basic(sk_tokenizer, text, expected_tokens):
tokens = sk_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 453 | 27.375 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/sl/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/sl/test_text.py | import pytest
def test_long_text(sl_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
upoštevajoč, da si ta deklaracija prizadeva zagotoviti splošno in
učinkovito priznavanje in spoštovanje v njej razglašenih pravic,
upoštevajoč, da je cilj Sveta Evrope doseči večjo enotnost med
njegovimi članicami, in da je eden izmed načinov za zagotavljanje
tega cilja varstvo in nadaljnji razvoj človekovih pravic in temeljnih
svoboščin,
ponovno potrjujoč svojo globoko vero v temeljne svoboščine, na
katerih temeljita pravičnost in mir v svetu, in ki jih je mogoče najbolje
zavarovati na eni strani z dejansko politično demokracijo in na drugi
strani s skupnim razumevanjem in spoštovanjem človekovih pravic,
od katerih so te svoboščine odvisne,
"""
tokens = sl_tokenizer(text)
assert len(tokens) == 116
def test_ordinal_number(sl_tokenizer):
text = "10. decembra 1948"
tokens = sl_tokenizer(text)
assert len(tokens) == 3
| 968 | 34.888889 | 73 | py |
spaCy | spaCy-master/spacy/tests/lang/sl/test_tokenizer.py | import pytest
SL_BASIC_TOKENIZATION_TESTS = [
(
"Vsakdo ima pravico do spoštovanja njegovega zasebnega in "
"družinskega življenja, doma in dopisovanja.",
[
"Vsakdo",
"ima",
"pravico",
"do",
"spoštovanja",
"njegovega",
"zasebnega",
"in",
"družinskega",
"življenja",
",",
"doma",
"in",
"dopisovanja",
".",
],
),
]
@pytest.mark.parametrize("text,expected_tokens", SL_BASIC_TOKENIZATION_TESTS)
def test_sl_tokenizer_basic(sl_tokenizer, text, expected_tokens):
tokens = sl_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 825 | 24.030303 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/sq/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/sq/test_text.py | import pytest
def test_long_text(sq_tokenizer):
# Excerpt: European Convention on Human Rights
text = """
Qeveritë nënshkruese, anëtare të Këshillit të Evropës,
Duke pasur parasysh Deklaratën Universale të të Drejtave të
Njeriut, të shpallur nga Asambleja e Përgjithshme e Kombeve të
Bashkuara më 10 dhjetor 1948;
Duke pasur parasysh, se kjo Deklaratë ka për qëllim të sigurojë
njohjen dhe zbatimin universal dhe efektiv të të drejtave të
shpallura në të;
Duke pasur parasysh se qëllimi i Këshillit të Evropës është që të
realizojë një bashkim më të ngushtë midis anëtarëve të tij dhe
se një nga mjetet për të arritur këtë qëllim është mbrojtja dhe
zhvillimi i të drejtave të njeriut dhe i lirive themelore;
Duke ripohuar besimin e tyre të thellë në këto liri themelore që
përbëjnë themelet e drejtësisë dhe të paqes në botë, ruajtja e të
cilave mbështetet kryesisht mbi një regjim politik demokratik nga
njëra anë, dhe nga ana tjetër mbi një kuptim dhe respektim të
përbashkët të të drejtave të njeriut nga të cilat varen;
"""
tokens = sq_tokenizer(text)
assert len(tokens) == 182
| 1,100 | 41.346154 | 65 | py |
spaCy | spaCy-master/spacy/tests/lang/sq/test_tokenizer.py | import pytest
SQ_BASIC_TOKENIZATION_TESTS = [
(
"Askush nuk mund t’i nënshtrohet torturës ose dënimeve ose "
"trajtimeve çnjerëzore ose poshtëruese.",
[
"Askush",
"nuk",
"mund",
"t’i",
"nënshtrohet",
"torturës",
"ose",
"dënimeve",
"ose",
"trajtimeve",
"çnjerëzore",
"ose",
"poshtëruese",
".",
],
),
]
@pytest.mark.parametrize("text,expected_tokens", SQ_BASIC_TOKENIZATION_TESTS)
def test_sq_tokenizer_basic(sq_tokenizer, text, expected_tokens):
tokens = sq_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 801 | 24.0625 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/sr/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/sr/test_exceptions.py | import pytest
@pytest.mark.parametrize(
"text,norms,lemmas",
[
("о.г.", ["ове године"], ["ова година"]),
("чет.", ["четвртак"], ["четвртак"]),
("гђа", ["госпођа"], ["госпођа"]),
("ил'", ["или"], ["или"]),
],
)
def test_sr_tokenizer_abbrev_exceptions(sr_tokenizer, text, norms, lemmas):
tokens = sr_tokenizer(text)
assert len(tokens) == 1
assert [token.norm_ for token in tokens] == norms
| 446 | 25.294118 | 75 | py |
spaCy | spaCy-master/spacy/tests/lang/sr/test_tokenizer.py | import pytest
PUNCT_OPEN = ["(", "[", "{", "*"]
PUNCT_CLOSE = [")", "]", "}", "*"]
PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")]
@pytest.mark.parametrize("text", ["(", "((", "<"])
def test_sr_tokenizer_handles_only_punct(sr_tokenizer, text):
tokens = sr_tokenizer(text)
assert len(tokens) == len(text)
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize("text", ["Здраво"])
def test_sr_tokenizer_splits_open_punct(sr_tokenizer, punct, text):
tokens = sr_tokenizer(punct + text)
assert len(tokens) == 2
assert tokens[0].text == punct
assert tokens[1].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize("text", ["Здраво"])
def test_sr_tokenizer_splits_close_punct(sr_tokenizer, punct, text):
tokens = sr_tokenizer(text + punct)
assert len(tokens) == 2
assert tokens[0].text == text
assert tokens[1].text == punct
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize("punct_add", ["`"])
@pytest.mark.parametrize("text", ["Ћао"])
def test_sr_tokenizer_splits_two_diff_open_punct(sr_tokenizer, punct, punct_add, text):
tokens = sr_tokenizer(punct + punct_add + text)
assert len(tokens) == 3
assert tokens[0].text == punct
assert tokens[1].text == punct_add
assert tokens[2].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize("punct_add", ["'"])
@pytest.mark.parametrize("text", ["Здраво"])
def test_sr_tokenizer_splits_two_diff_close_punct(sr_tokenizer, punct, punct_add, text):
tokens = sr_tokenizer(text + punct + punct_add)
assert len(tokens) == 3
assert tokens[0].text == text
assert tokens[1].text == punct
assert tokens[2].text == punct_add
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize("text", ["Здраво"])
def test_sr_tokenizer_splits_same_open_punct(sr_tokenizer, punct, text):
tokens = sr_tokenizer(punct + punct + punct + text)
assert len(tokens) == 4
assert tokens[0].text == punct
assert tokens[3].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize("text", ["Здраво"])
def test_sr_tokenizer_splits_same_close_punct(sr_tokenizer, punct, text):
tokens = sr_tokenizer(text + punct + punct + punct)
assert len(tokens) == 4
assert tokens[0].text == text
assert tokens[1].text == punct
@pytest.mark.parametrize("text", ["'Тест"])
def test_sr_tokenizer_splits_open_appostrophe(sr_tokenizer, text):
tokens = sr_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == "'"
@pytest.mark.parametrize("text", ["Тест''"])
def test_sr_tokenizer_splits_double_end_quote(sr_tokenizer, text):
tokens = sr_tokenizer(text)
assert len(tokens) == 2
tokens_punct = sr_tokenizer("''")
assert len(tokens_punct) == 1
@pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED)
@pytest.mark.parametrize("text", ["Тест"])
def test_sr_tokenizer_splits_open_close_punct(
sr_tokenizer, punct_open, punct_close, text
):
tokens = sr_tokenizer(punct_open + text + punct_close)
assert len(tokens) == 3
assert tokens[0].text == punct_open
assert tokens[1].text == text
assert tokens[2].text == punct_close
@pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED)
@pytest.mark.parametrize("punct_open2,punct_close2", [("`", "'")])
@pytest.mark.parametrize("text", ["Тест"])
def test_sr_tokenizer_two_diff_punct(
sr_tokenizer, punct_open, punct_close, punct_open2, punct_close2, text
):
tokens = sr_tokenizer(punct_open2 + punct_open + text + punct_close + punct_close2)
assert len(tokens) == 5
assert tokens[0].text == punct_open2
assert tokens[1].text == punct_open
assert tokens[2].text == text
assert tokens[3].text == punct_close
assert tokens[4].text == punct_close2
@pytest.mark.parametrize("text", ["Тест."])
def test_sr_tokenizer_splits_trailing_dot(sr_tokenizer, text):
tokens = sr_tokenizer(text)
assert tokens[1].text == "."
def test_sr_tokenizer_splits_bracket_period(sr_tokenizer):
text = "(Један, два, три, четири, проба)."
tokens = sr_tokenizer(text)
assert tokens[len(tokens) - 1].text == "."
| 4,226 | 33.08871 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/sv/test_exceptions.py | import pytest
SV_TOKEN_EXCEPTION_TESTS = [
(
"Smörsåsen används bl.a. till fisk",
["Smörsåsen", "används", "bl.a.", "till", "fisk"],
),
(
"Jag kommer först kl. 13 p.g.a. diverse förseningar",
["Jag", "kommer", "först", "kl.", "13", "p.g.a.", "diverse", "förseningar"],
),
(
"Anders I. tycker om ord med i i.",
["Anders", "I.", "tycker", "om", "ord", "med", "i", "i", "."],
),
]
@pytest.mark.issue(805)
@pytest.mark.parametrize(
"text,expected_tokens",
[
(
"Smörsåsen används bl.a. till fisk",
["Smörsåsen", "används", "bl.a.", "till", "fisk"],
),
(
"Jag kommer först kl. 13 p.g.a. diverse förseningar",
["Jag", "kommer", "först", "kl.", "13", "p.g.a.", "diverse", "förseningar"],
),
],
)
def test_issue805(sv_tokenizer, text, expected_tokens):
tokens = sv_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
@pytest.mark.parametrize("text,expected_tokens", SV_TOKEN_EXCEPTION_TESTS)
def test_sv_tokenizer_handles_exception_cases(sv_tokenizer, text, expected_tokens):
tokens = sv_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
@pytest.mark.parametrize("text", ["driveru", "hajaru", "Serru", "Fixaru"])
def test_sv_tokenizer_handles_verb_exceptions(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 2
assert tokens[1].text == "u"
@pytest.mark.parametrize("text", ["bl.a", "m.a.o.", "Jan.", "Dec.", "kr.", "osv."])
def test_sv_tokenizer_handles_abbr(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["Jul.", "jul.", "sön.", "Sön."])
def test_sv_tokenizer_handles_ambiguous_abbr(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 2
def test_sv_tokenizer_handles_exc_in_text(sv_tokenizer):
text = "Det är bl.a. inte meningen"
tokens = sv_tokenizer(text)
assert len(tokens) == 5
assert tokens[2].text == "bl.a."
def test_sv_tokenizer_handles_custom_base_exc(sv_tokenizer):
text = "Här är något du kan titta på."
tokens = sv_tokenizer(text)
assert len(tokens) == 8
assert tokens[6].text == "på"
assert tokens[7].text == "."
| 2,425 | 30.102564 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/test_lex_attrs.py | import pytest
from spacy.lang.sv.lex_attrs import like_num
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10.000", True),
("10.00", True),
("999,0", True),
("en", True),
("två", True),
("miljard", True),
("hund", False),
(",", False),
("1/2", True),
],
)
def test_lex_attrs_like_number(sv_tokenizer, text, match):
tokens = sv_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
@pytest.mark.parametrize("word", ["elva"])
def test_sv_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 683 | 20.375 | 58 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/test_noun_chunks.py | import pytest
from spacy.tokens import Doc
def test_noun_chunks_is_parsed_sv(sv_tokenizer):
"""Test that noun_chunks raises Value Error for 'sv' language if Doc is not parsed."""
doc = sv_tokenizer("Studenten läste den bästa boken")
with pytest.raises(ValueError):
list(doc.noun_chunks)
SV_NP_TEST_EXAMPLES = [
(
"En student läste en bok", # A student read a book
["DET", "NOUN", "VERB", "DET", "NOUN"],
["det", "nsubj", "ROOT", "det", "dobj"],
[1, 2, 2, 4, 2],
["En student", "en bok"],
),
(
"Studenten läste den bästa boken.", # The student read the best book
["NOUN", "VERB", "DET", "ADJ", "NOUN", "PUNCT"],
["nsubj", "ROOT", "det", "amod", "dobj", "punct"],
[1, 1, 4, 4, 1, 1],
["Studenten", "den bästa boken"],
),
(
"De samvetslösa skurkarna hade stulit de största juvelerna på söndagen", # The remorseless crooks had stolen the largest jewels that sunday
["DET", "ADJ", "NOUN", "VERB", "VERB", "DET", "ADJ", "NOUN", "ADP", "NOUN"],
["det", "amod", "nsubj", "aux", "root", "det", "amod", "dobj", "case", "nmod"],
[2, 2, 4, 4, 4, 7, 7, 4, 9, 4],
["De samvetslösa skurkarna", "de största juvelerna", "på söndagen"],
),
]
@pytest.mark.parametrize(
"text,pos,deps,heads,expected_noun_chunks", SV_NP_TEST_EXAMPLES
)
def test_sv_noun_chunks(sv_tokenizer, text, pos, deps, heads, expected_noun_chunks):
tokens = sv_tokenizer(text)
assert len(heads) == len(pos)
words = [t.text for t in tokens]
doc = Doc(tokens.vocab, words=words, heads=heads, deps=deps, pos=pos)
noun_chunks = list(doc.noun_chunks)
assert len(noun_chunks) == len(expected_noun_chunks)
for i, np in enumerate(noun_chunks):
assert np.text == expected_noun_chunks[i]
| 1,844 | 35.9 | 148 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize("text", ["(under)"])
def test_tokenizer_splits_no_special(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["gitta'r", "Björn's", "Lars'"])
def test_tokenizer_handles_no_punct(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["svart.Gul", "Hej.Världen"])
def test_tokenizer_splits_period_infix(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["Hej,Världen", "en,två"])
def test_tokenizer_splits_comma_infix(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 3
assert tokens[0].text == text.split(",")[0]
assert tokens[1].text == ","
assert tokens[2].text == text.split(",")[1]
@pytest.mark.parametrize("text", ["svart...Gul", "svart...gul"])
def test_tokenizer_splits_ellipsis_infix(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.issue(12311)
@pytest.mark.parametrize("text", ["99:e", "c:a", "EU:s", "Maj:t"])
def test_sv_tokenizer_handles_colon(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 1
| 1,261 | 29.047619 | 66 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/test_text.py | def test_sv_tokenizer_handles_long_text(sv_tokenizer):
text = """Det var så härligt ute på landet. Det var sommar, majsen var gul, havren grön,
höet var uppställt i stackar nere vid den gröna ängen, och där gick storken på sina långa,
röda ben och snackade engelska, för det språket hade han lärt sig av sin mor.
Runt om åkrar och äng låg den stora skogen, och mitt i skogen fanns djupa sjöar; jo, det var verkligen trevligt ute på landet!"""
tokens = sv_tokenizer(text)
assert len(tokens) == 86
def test_sv_tokenizer_handles_trailing_dot_for_i_in_sentence(sv_tokenizer):
text = "Provar att tokenisera en mening med ord i."
tokens = sv_tokenizer(text)
assert len(tokens) == 9
| 703 | 45.933333 | 129 | py |
spaCy | spaCy-master/spacy/tests/lang/sv/test_tokenizer.py | import pytest
SV_TOKEN_EXCEPTION_TESTS = [
(
"Smörsåsen används bl.a. till fisk",
["Smörsåsen", "används", "bl.a.", "till", "fisk"],
),
(
"Jag kommer först kl. 13 p.g.a. diverse förseningar",
["Jag", "kommer", "först", "kl.", "13", "p.g.a.", "diverse", "förseningar"],
),
(
"Anders I. tycker om ord med i i.",
["Anders", "I.", "tycker", "om", "ord", "med", "i", "i", "."],
),
]
@pytest.mark.parametrize("text,expected_tokens", SV_TOKEN_EXCEPTION_TESTS)
def test_sv_tokenizer_handles_exception_cases(sv_tokenizer, text, expected_tokens):
tokens = sv_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
@pytest.mark.parametrize("text", ["driveru", "hajaru", "Serru", "Fixaru"])
def test_sv_tokenizer_handles_verb_exceptions(sv_tokenizer, text):
tokens = sv_tokenizer(text)
assert len(tokens) == 2
assert tokens[1].text == "u"
| 994 | 31.096774 | 84 | py |
spaCy | spaCy-master/spacy/tests/lang/ta/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ta/test_text.py | import pytest
from spacy.lang.ta import Tamil
# Wikipedia excerpt: https://en.wikipedia.org/wiki/Chennai (Tamil Language)
TAMIL_BASIC_TOKENIZER_SENTENCIZER_TEST_TEXT = """சென்னை (Chennai) தமிழ்நாட்டின் தலைநகரமும், இந்தியாவின் நான்காவது பெரிய நகரமும் ஆகும். 1996 ஆம் ஆண்டுக்கு முன்னர் இந்நகரம், மதராசு பட்டினம், மெட்ராஸ் (Madras) மற்றும் சென்னப்பட்டினம் என்றும் அழைக்கப்பட்டு வந்தது. சென்னை, வங்காள விரிகுடாவின் கரையில் அமைந்த துறைமுக நகரங்களுள் ஒன்று. சுமார் 10 மில்லியன் (ஒரு கோடி) மக்கள் வாழும் இந்நகரம், உலகின் 35 பெரிய மாநகரங்களுள் ஒன்று. 17ஆம் நூற்றாண்டில் ஆங்கிலேயர் சென்னையில் கால் பதித்தது முதல், சென்னை நகரம் ஒரு முக்கிய நகரமாக வளர்ந்து வந்திருக்கிறது. சென்னை தென்னிந்தியாவின் வாசலாகக் கருதப்படுகிறது. சென்னை நகரில் உள்ள மெரினா கடற்கரை உலகின் நீளமான கடற்கரைகளுள் ஒன்று. சென்னை கோலிவுட் (Kollywood) என அறியப்படும் தமிழ்த் திரைப்படத் துறையின் தாயகம் ஆகும். பல விளையாட்டு அரங்கங்கள் உள்ள சென்னையில் பல விளையாட்டுப் போட்டிகளும் நடைபெறுகின்றன."""
@pytest.mark.parametrize(
"text, num_tokens",
[(TAMIL_BASIC_TOKENIZER_SENTENCIZER_TEST_TEXT, 23 + 90)], # Punctuation + rest
)
def test_long_text(ta_tokenizer, text, num_tokens):
tokens = ta_tokenizer(text)
assert len(tokens) == num_tokens
@pytest.mark.parametrize(
"text, num_sents", [(TAMIL_BASIC_TOKENIZER_SENTENCIZER_TEST_TEXT, 9)]
)
def test_ta_sentencizer(text, num_sents):
nlp = Tamil()
nlp.add_pipe("sentencizer")
doc = nlp(text)
assert len(list(doc.sents)) == num_sents
| 1,474 | 53.62963 | 828 | py |
spaCy | spaCy-master/spacy/tests/lang/ta/test_tokenizer.py | import pytest
from spacy.lang.ta import Tamil
from spacy.symbols import ORTH
TA_BASIC_TOKENIZATION_TESTS = [
(
"கிறிஸ்துமஸ் மற்றும் இனிய புத்தாண்டு வாழ்த்துக்கள்",
["கிறிஸ்துமஸ்", "மற்றும்", "இனிய", "புத்தாண்டு", "வாழ்த்துக்கள்"],
),
(
"எனக்கு என் குழந்தைப் பருவம் நினைவிருக்கிறது",
["எனக்கு", "என்", "குழந்தைப்", "பருவம்", "நினைவிருக்கிறது"],
),
("உங்கள் பெயர் என்ன?", ["உங்கள்", "பெயர்", "என்ன", "?"]),
(
"ஏறத்தாழ இலங்கைத் தமிழரில் மூன்றிலொரு பங்கினர் இலங்கையை விட்டு வெளியேறிப் பிற நாடுகளில் வாழ்கின்றனர்",
[
"ஏறத்தாழ",
"இலங்கைத்",
"தமிழரில்",
"மூன்றிலொரு",
"பங்கினர்",
"இலங்கையை",
"விட்டு",
"வெளியேறிப்",
"பிற",
"நாடுகளில்",
"வாழ்கின்றனர்",
],
),
(
"இந்த ஃபோனுடன் சுமார் ரூ.2,990 மதிப்புள்ள போட் ராக்கர்ஸ் நிறுவனத்தின் ஸ்போர்ட் புளூடூத் ஹெட்போன்ஸ் இலவசமாக வழங்கப்படவுள்ளது.",
[
"இந்த",
"ஃபோனுடன்",
"சுமார்",
"ரூ.2,990",
"மதிப்புள்ள",
"போட்",
"ராக்கர்ஸ்",
"நிறுவனத்தின்",
"ஸ்போர்ட்",
"புளூடூத்",
"ஹெட்போன்ஸ்",
"இலவசமாக",
"வழங்கப்படவுள்ளது",
".",
],
),
(
"மட்டக்களப்பில் பல இடங்களில் வீட்டுத் திட்டங்களுக்கு இன்று அடிக்கல் நாட்டல்",
[
"மட்டக்களப்பில்",
"பல",
"இடங்களில்",
"வீட்டுத்",
"திட்டங்களுக்கு",
"இன்று",
"அடிக்கல்",
"நாட்டல்",
],
),
(
"ஐ போன்க்கு முகத்தை வைத்து அன்லாக் செய்யும் முறை மற்றும் விரலால் தொட்டு அன்லாக் செய்யும் முறையை வாட்ஸ் ஆப் நிறுவனம் இதற்கு முன் கண்டுபிடித்தது",
[
"ஐ",
"போன்க்கு",
"முகத்தை",
"வைத்து",
"அன்லாக்",
"செய்யும்",
"முறை",
"மற்றும்",
"விரலால்",
"தொட்டு",
"அன்லாக்",
"செய்யும்",
"முறையை",
"வாட்ஸ்",
"ஆப்",
"நிறுவனம்",
"இதற்கு",
"முன்",
"கண்டுபிடித்தது",
],
),
(
"இது ஒரு வாக்கியம்.",
[
"இது",
"ஒரு",
"வாக்கியம்",
".",
],
),
(
"தன்னாட்சி கார்கள் காப்பீட்டு பொறுப்பை உற்பத்தியாளரிடம் மாற்றுகின்றன",
[
"தன்னாட்சி",
"கார்கள்",
"காப்பீட்டு",
"பொறுப்பை",
"உற்பத்தியாளரிடம்",
"மாற்றுகின்றன",
],
),
(
"நடைபாதை விநியோக ரோபோக்களை தடை செய்வதை சான் பிரான்சிஸ்கோ கருதுகிறது",
[
"நடைபாதை",
"விநியோக",
"ரோபோக்களை",
"தடை",
"செய்வதை",
"சான்",
"பிரான்சிஸ்கோ",
"கருதுகிறது",
],
),
(
"லண்டன் ஐக்கிய இராச்சியத்தில் ஒரு பெரிய நகரம்.",
[
"லண்டன்",
"ஐக்கிய",
"இராச்சியத்தில்",
"ஒரு",
"பெரிய",
"நகரம்",
".",
],
),
(
"என்ன வேலை செய்கிறீர்கள்?",
[
"என்ன",
"வேலை",
"செய்கிறீர்கள்",
"?",
],
),
(
"எந்த கல்லூரியில் படிக்கிறாய்?",
[
"எந்த",
"கல்லூரியில்",
"படிக்கிறாய்",
"?",
],
),
]
@pytest.mark.parametrize("text,expected_tokens", TA_BASIC_TOKENIZATION_TESTS)
def test_ta_tokenizer_basic(ta_tokenizer, text, expected_tokens):
tokens = ta_tokenizer(text)
token_list = [token.text for token in tokens]
assert expected_tokens == token_list
@pytest.mark.parametrize(
"text,expected_tokens",
[
(
"ஆப்பிள் நிறுவனம் யு.கே. தொடக்க நிறுவனத்தை ஒரு லட்சம் கோடிக்கு வாங்கப் பார்க்கிறது",
[
"ஆப்பிள்",
"நிறுவனம்",
"யு.கே.",
"தொடக்க",
"நிறுவனத்தை",
"ஒரு",
"லட்சம்",
"கோடிக்கு",
"வாங்கப்",
"பார்க்கிறது",
],
)
],
)
def test_ta_tokenizer_special_case(text, expected_tokens):
# Add a special rule to tokenize the initialism "யு.கே." (U.K., as
# in the country) as a single token.
nlp = Tamil()
nlp.tokenizer.add_special_case("யு.கே.", [{ORTH: "யு.கே."}])
tokens = nlp(text)
token_list = [token.text for token in tokens]
assert expected_tokens == token_list
| 4,710 | 23.794737 | 152 | py |
spaCy | spaCy-master/spacy/tests/lang/th/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/th/test_serialize.py | import pickle
from spacy.lang.th import Thai
from ...util import make_tempdir
def test_th_tokenizer_serialize(th_tokenizer):
tokenizer_bytes = th_tokenizer.to_bytes()
nlp = Thai()
nlp.tokenizer.from_bytes(tokenizer_bytes)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
with make_tempdir() as d:
file_path = d / "tokenizer"
th_tokenizer.to_disk(file_path)
nlp = Thai()
nlp.tokenizer.from_disk(file_path)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
def test_th_tokenizer_pickle(th_tokenizer):
b = pickle.dumps(th_tokenizer)
th_tokenizer_re = pickle.loads(b)
assert th_tokenizer.to_bytes() == th_tokenizer_re.to_bytes()
| 707 | 26.230769 | 64 | py |
spaCy | spaCy-master/spacy/tests/lang/th/test_tokenizer.py | import pytest
@pytest.mark.parametrize(
"text,expected_tokens", [("คุณรักผมไหม", ["คุณ", "รัก", "ผม", "ไหม"])]
)
def test_th_tokenizer(th_tokenizer, text, expected_tokens):
tokens = [token.text for token in th_tokenizer(text)]
assert tokens == expected_tokens
| 274 | 26.5 | 74 | py |
spaCy | spaCy-master/spacy/tests/lang/ti/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ti/test_exception.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ti/test_text.py | import pytest
def test_ti_tokenizer_handles_long_text(ti_tokenizer):
text = """ቻንስለር ጀርመን ኣንገላ መርከል ኣብታ ሃገር ቁጽሪ መትሓዝቲ ኮቪድ መዓልታዊ ክብረ መዝገብ ድሕሪ ምህራሙ- ጽኑዕ እገዳ ክግበር ጸዊዓ።
መርከል ሎሚ ንታሕታዋይ ባይቶ ሃገራ ክትገልጽ ከላ፡ ኣብ ወሳኒ ምዕራፍ ቃልሲ ኢና ዘለና-ዳሕራዋይ ማዕበል ካብቲ ቀዳማይ ክገድድ ይኽእል`ዩ ኢላ።
ትካል ምክልኻል ተላገብቲ ሕማማት ጀርመን፡ ኣብ ዝሓለፈ 24 ሰዓታት ኣብ ምልእቲ ጀርመር 590 ሰባት ብኮቪድ19 ምሟቶም ኣፍሊጡ`ሎ።
ቻንስለር ኣንጀላ መርከል ኣብ እዋን በዓላት ልደት ስድራቤታት ክተኣኻኸባ ዝፍቀደለን`ኳ እንተኾነ ድሕሪኡ ኣብ ዘሎ ግዜ ግን እቲ እገዳታት ክትግበር ትደሊ።"""
tokens = ti_tokenizer(text)
assert len(tokens) == 85
@pytest.mark.parametrize(
"text,length",
[
("ቻንስለር ጀርመን ኣንገላ መርከል፧", 5),
("“ስድራቤታት፧”", 4),
("""ኣብ እዋን በዓላት ልደት ስድራቤታት ክተኣኻኸባ ዝፍቀደለን`ኳ እንተኾነ።""", 9),
("ብግምት 10ኪ.ሜ. ጎይዩ።", 6),
("ኣብ ዝሓለፈ 24 ሰዓታት...", 5),
],
)
def test_ti_tokenizer_handles_cnts(ti_tokenizer, text, length):
tokens = ti_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10.000", True),
("1000", True),
("999,0", True),
("ሓደ", True),
("ክልተ", True),
("ትሪልዮን", True),
("ከልቢ", False),
(",", False),
("1/2", True),
],
)
def test_lex_attrs_like_number(ti_tokenizer, text, match):
tokens = ti_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
| 1,378 | 25.519231 | 100 | py |
spaCy | spaCy-master/spacy/tests/lang/tl/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/tl/test_indices.py | def test_tl_simple_punct(tl_tokenizer):
text = "Sige, punta ka dito"
tokens = tl_tokenizer(text)
assert tokens[0].idx == 0
assert tokens[1].idx == 4
assert tokens[2].idx == 6
assert tokens[3].idx == 12
assert tokens[4].idx == 15
| 257 | 27.666667 | 39 | py |
spaCy | spaCy-master/spacy/tests/lang/tl/test_punct.py | import pytest
from spacy.lang.punctuation import TOKENIZER_PREFIXES
from spacy.util import compile_prefix_regex
PUNCT_OPEN = ["(", "[", "{", "*"]
PUNCT_CLOSE = [")", "]", "}", "*"]
PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")]
@pytest.mark.parametrize("text", ["(", "((", "<"])
def test_tl_tokenizer_handles_only_punct(tl_tokenizer, text):
tokens = tl_tokenizer(text)
assert len(tokens) == len(text)
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize("text", ["Mabuhay"])
def test_tl_tokenizer_split_open_punct(tl_tokenizer, punct, text):
tokens = tl_tokenizer(punct + text)
assert len(tokens) == 2
assert tokens[0].text == punct
assert tokens[1].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize("text", ["Mabuhay"])
def test_tl_tokenizer_splits_close_punct(tl_tokenizer, punct, text):
tokens = tl_tokenizer(text + punct)
assert len(tokens) == 2
assert tokens[0].text == text
assert tokens[1].text == punct
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize("punct_add", ["`"])
@pytest.mark.parametrize("text", ["Mabuhay"])
def test_tl_tokenizer_splits_two_diff_open_punct(tl_tokenizer, punct, punct_add, text):
tokens = tl_tokenizer(punct + punct_add + text)
assert len(tokens) == 3
assert tokens[0].text == punct
assert tokens[1].text == punct_add
assert tokens[2].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize("punct_add", ["`"])
@pytest.mark.parametrize("text", ["Mabuhay"])
def test_tl_tokenizer_splits_two_diff_close_punct(tl_tokenizer, punct, punct_add, text):
tokens = tl_tokenizer(text + punct + punct_add)
assert len(tokens) == 3
assert tokens[0].text == text
assert tokens[1].text == punct
assert tokens[2].text == punct_add
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize("text", ["Mabuhay"])
def test_tl_tokenizer_splits_same_open_punct(tl_tokenizer, punct, text):
tokens = tl_tokenizer(punct + punct + punct + text)
assert len(tokens) == 4
assert tokens[0].text == punct
assert tokens[3].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize("text", ["Mabuhay"])
def test_tl_tokenizer_splits_same_close_punct(tl_tokenizer, punct, text):
tokens = tl_tokenizer(text + punct + punct + punct)
assert len(tokens) == 4
assert tokens[0].text == text
assert tokens[1].text == punct
@pytest.mark.parametrize("text", ["'Ang"])
def test_tl_tokenizer_splits_open_apostrophe(tl_tokenizer, text):
tokens = tl_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == "'"
@pytest.mark.parametrize("text", ["Mabuhay''"])
def test_tl_tokenizer_splits_double_end_quote(tl_tokenizer, text):
tokens = tl_tokenizer(text)
assert len(tokens) == 2
tokens_punct = tl_tokenizer("''")
assert len(tokens_punct) == 1
@pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED)
@pytest.mark.parametrize("text", ["Mabuhay"])
def test_tl_tokenizer_splits_open_close_punct(
tl_tokenizer, punct_open, punct_close, text
):
tokens = tl_tokenizer(punct_open + text + punct_close)
assert len(tokens) == 3
assert tokens[0].text == punct_open
assert tokens[1].text == text
assert tokens[2].text == punct_close
@pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED)
@pytest.mark.parametrize("punct_open2,punct_close2", [("`", "'")])
@pytest.mark.parametrize("text", ["Mabuhay"])
def test_tl_tokenizer_two_diff_punct(
tl_tokenizer, punct_open, punct_close, punct_open2, punct_close2, text
):
tokens = tl_tokenizer(punct_open2 + punct_open + text + punct_close + punct_close2)
assert len(tokens) == 5
assert tokens[0].text == punct_open2
assert tokens[1].text == punct_open
assert tokens[2].text == text
assert tokens[3].text == punct_close
assert tokens[4].text == punct_close2
@pytest.mark.parametrize("text,punct", [("(sa'yo", "(")])
def test_tl_tokenizer_splits_pre_punct_regex(text, punct):
tl_search_prefixes = compile_prefix_regex(TOKENIZER_PREFIXES).search
match = tl_search_prefixes(text)
assert match.group() == punct
def test_tl_tokenizer_splits_bracket_period(tl_tokenizer):
text = "(Dumating siya kahapon)."
tokens = tl_tokenizer(text)
assert tokens[len(tokens) - 1].text == "."
| 4,420 | 33.539063 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/tl/test_text.py | import pytest
from spacy.lang.tl.lex_attrs import like_num
# https://github.com/explosion/spaCy/blob/master/spacy/tests/lang/en/test_text.py
def test_tl_tokenizer_handles_long_text(tl_tokenizer):
# Excerpt: "Sapagkat ang Pilosopiya ay Ginagawa" by Padre Roque Ferriols
text = """
Tingin tayo nang tingin. Kailangan lamang nating dumilat at
marami tayong makikita. At ang pagtingin ay isang gawain na ako lamang ang
makagagawa, kung ako nga ang makakita. Kahit na napanood na ng aking
matalik na kaibigan ang isang sine, kailangan ko pa ring panoorin, kung
ako nga ang may gustong makakita. Kahit na gaano kadikit ang aming
pagkabuklod, hindi siya maaaring tumingin sa isang paraan na ako ang
nakakakita. Kung ako ang makakita, ako lamang ang makatitingin.
"""
tokens = tl_tokenizer(text)
assert len(tokens) == 97
@pytest.mark.parametrize(
"text,length",
[
("Huwag mo nang itanong sa akin.", 7),
("Nasubukan mo na bang hulihin ang hangin?", 8),
("Hindi ba?", 3),
("Nagbukas ang DFA ng 1,000 appointment slots para sa pasaporte.", 11),
("'Wala raw pasok bukas kasi may bagyo!' sabi ni Micah.", 14),
("'Ingat,' aniya. 'Maingay sila pag malayo at tahimik kung malapit.'", 17),
],
)
def test_tl_tokenizer_handles_cnts(tl_tokenizer, text, length):
tokens = tl_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("isa", True),
("dalawa", True),
("tatlumpu", True),
pytest.param(
"isang daan",
True,
marks=pytest.mark.xfail(reason="Not yet implemented (means 100)"),
),
pytest.param(
"kalahati",
True,
marks=pytest.mark.xfail(reason="Not yet implemented (means 1/2)"),
),
pytest.param(
"isa't kalahati",
True,
marks=pytest.mark.xfail(
reason="Not yet implemented (means one-and-a-half)"
),
),
],
)
def test_lex_attrs_like_number(tl_tokenizer, text, match):
tokens = tl_tokenizer(text)
assert all([token.like_num for token in tokens]) == match
@pytest.mark.xfail(reason="Not yet implemented, fails when capitalized.")
@pytest.mark.parametrize("word", ["isa", "dalawa", "tatlo"])
def test_tl_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 2,480 | 32.08 | 83 | py |
spaCy | spaCy-master/spacy/tests/lang/tr/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/tr/test_noun_chunks.py | import pytest
def test_noun_chunks_is_parsed(tr_tokenizer):
"""Test that noun_chunks raises Value Error for 'tr' language if Doc is not parsed.
To check this test, we're constructing a Doc
with a new Vocab here and forcing is_parsed to 'False'
to make sure the noun chunks don't run.
"""
doc = tr_tokenizer("Dün seni gördüm.")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 419 | 31.307692 | 87 | py |
spaCy | spaCy-master/spacy/tests/lang/tr/test_parser.py | from spacy.tokens import Doc
def test_tr_noun_chunks_amod_simple(tr_tokenizer):
text = "sarı kedi"
heads = [1, 1]
deps = ["amod", "ROOT"]
pos = ["ADJ", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "sarı kedi "
def test_tr_noun_chunks_nmod_simple(tr_tokenizer):
text = "arkadaşımın kedisi" # my friend's cat
heads = [1, 1]
deps = ["nmod", "ROOT"]
pos = ["NOUN", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "arkadaşımın kedisi "
def test_tr_noun_chunks_determiner_simple(tr_tokenizer):
text = "O kedi" # that cat
heads = [1, 1]
deps = ["det", "ROOT"]
pos = ["DET", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "O kedi "
def test_tr_noun_chunks_nmod_amod(tr_tokenizer):
text = "okulun eski müdürü"
heads = [2, 2, 2]
deps = ["nmod", "amod", "ROOT"]
pos = ["NOUN", "ADJ", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "okulun eski müdürü "
def test_tr_noun_chunks_one_det_one_adj_simple(tr_tokenizer):
text = "O sarı kedi"
heads = [2, 2, 2]
deps = ["det", "amod", "ROOT"]
pos = ["DET", "ADJ", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "O sarı kedi "
def test_tr_noun_chunks_two_adjs_simple(tr_tokenizer):
text = "beyaz tombik kedi"
heads = [2, 2, 2]
deps = ["amod", "amod", "ROOT"]
pos = ["ADJ", "ADJ", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "beyaz tombik kedi "
def test_tr_noun_chunks_one_det_two_adjs_simple(tr_tokenizer):
text = "o beyaz tombik kedi"
heads = [3, 3, 3, 3]
deps = ["det", "amod", "amod", "ROOT"]
pos = ["DET", "ADJ", "ADJ", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "o beyaz tombik kedi "
def test_tr_noun_chunks_nmod_two(tr_tokenizer):
text = "kızın saçının rengi"
heads = [1, 2, 2]
deps = ["nmod", "nmod", "ROOT"]
pos = ["NOUN", "NOUN", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "kızın saçının rengi "
def test_tr_noun_chunks_chain_nmod_with_adj(tr_tokenizer):
text = "ev sahibinin tatlı köpeği"
heads = [1, 3, 3, 3]
deps = ["nmod", "nmod", "amod", "ROOT"]
pos = ["NOUN", "NOUN", "ADJ", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "ev sahibinin tatlı köpeği "
def test_tr_noun_chunks_chain_nmod_with_acl(tr_tokenizer):
text = "ev sahibinin gelen köpeği"
heads = [1, 3, 3, 3]
deps = ["nmod", "nmod", "acl", "ROOT"]
pos = ["NOUN", "NOUN", "VERB", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "ev sahibinin gelen köpeği "
def test_tr_noun_chunks_chain_nmod_head_with_amod_acl(tr_tokenizer):
text = "arabanın kırdığım sol aynası"
heads = [3, 3, 3, 3]
deps = ["nmod", "acl", "amod", "ROOT"]
pos = ["NOUN", "VERB", "ADJ", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "arabanın kırdığım sol aynası "
def test_tr_noun_chunks_nmod_three(tr_tokenizer):
text = "güney Afrika ülkelerinden Mozambik"
heads = [1, 2, 3, 3]
deps = ["nmod", "nmod", "nmod", "ROOT"]
pos = ["NOUN", "PROPN", "NOUN", "PROPN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "güney Afrika ülkelerinden Mozambik "
def test_tr_noun_chunks_det_amod_nmod(tr_tokenizer):
text = "bazı eski oyun kuralları"
heads = [3, 3, 3, 3]
deps = ["det", "nmod", "nmod", "ROOT"]
pos = ["DET", "ADJ", "NOUN", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "bazı eski oyun kuralları "
def test_tr_noun_chunks_acl_simple(tr_tokenizer):
text = "bahçesi olan okul"
heads = [2, 0, 2]
deps = ["acl", "cop", "ROOT"]
pos = ["NOUN", "AUX", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "bahçesi olan okul "
def test_tr_noun_chunks_acl_verb(tr_tokenizer):
text = "sevdiğim sanatçılar"
heads = [1, 1]
deps = ["acl", "ROOT"]
pos = ["VERB", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "sevdiğim sanatçılar "
def test_tr_noun_chunks_acl_nmod(tr_tokenizer):
text = "en sevdiğim ses sanatçısı"
heads = [1, 3, 3, 3]
deps = ["advmod", "acl", "nmod", "ROOT"]
pos = ["ADV", "VERB", "NOUN", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "en sevdiğim ses sanatçısı "
def test_tr_noun_chunks_acl_nmod2(tr_tokenizer):
text = "bildiğim bir turizm şirketi"
heads = [3, 3, 3, 3]
deps = ["acl", "det", "nmod", "ROOT"]
pos = ["VERB", "DET", "NOUN", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "bildiğim bir turizm şirketi "
def test_tr_noun_chunks_np_recursive_nsubj_to_root(tr_tokenizer):
text = "Simge'nin okuduğu kitap"
heads = [1, 2, 2]
deps = ["nsubj", "acl", "ROOT"]
pos = ["PROPN", "VERB", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "Simge'nin okuduğu kitap "
def test_tr_noun_chunks_np_recursive_nsubj_attached_to_pron_root(tr_tokenizer):
text = "Simge'nin konuşabileceği birisi"
heads = [1, 2, 2]
deps = ["nsubj", "acl", "ROOT"]
pos = ["PROPN", "VERB", "PRON"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "Simge'nin konuşabileceği birisi "
def test_tr_noun_chunks_np_recursive_nsubj_in_subnp(tr_tokenizer):
text = "Simge'nin yarın gideceği yer"
heads = [2, 2, 3, 3]
deps = ["nsubj", "obl", "acl", "ROOT"]
pos = ["PROPN", "NOUN", "VERB", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "Simge'nin yarın gideceği yer "
def test_tr_noun_chunks_np_recursive_two_nmods(tr_tokenizer):
text = "ustanın kapısını degiştireceği çamasır makinası"
heads = [2, 2, 4, 4, 4]
deps = ["nsubj", "obj", "acl", "nmod", "ROOT"]
pos = ["NOUN", "NOUN", "VERB", "NOUN", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "ustanın kapısını degiştireceği çamasır makinası "
def test_tr_noun_chunks_np_recursive_four_nouns(tr_tokenizer):
text = "kızına piyano dersi verdiğim hanım"
heads = [3, 2, 3, 4, 4]
deps = ["obl", "nmod", "obj", "acl", "ROOT"]
pos = ["NOUN", "NOUN", "NOUN", "VERB", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "kızına piyano dersi verdiğim hanım "
def test_tr_noun_chunks_np_recursive_no_nmod(tr_tokenizer):
text = "içine birkaç çiçek konmuş olan bir vazo"
heads = [3, 2, 3, 6, 3, 6, 6]
deps = ["obl", "det", "nsubj", "acl", "aux", "det", "ROOT"]
pos = ["ADP", "DET", "NOUN", "VERB", "AUX", "DET", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "içine birkaç çiçek konmuş olan bir vazo "
def test_tr_noun_chunks_np_recursive_long_two_acls(tr_tokenizer):
text = "içine Simge'nin bahçesinden toplanmış birkaç çiçeğin konmuş olduğu bir vazo"
heads = [6, 2, 3, 5, 5, 6, 9, 6, 9, 9]
deps = ["obl", "nmod", "obl", "acl", "det", "nsubj", "acl", "aux", "det", "ROOT"]
pos = ["ADP", "PROPN", "NOUN", "VERB", "DET", "NOUN", "VERB", "AUX", "DET", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert (
chunks[0].text_with_ws
== "içine Simge'nin bahçesinden toplanmış birkaç çiçeğin konmuş olduğu bir vazo "
)
def test_tr_noun_chunks_two_nouns_in_nmod(tr_tokenizer):
text = "kız ve erkek çocuklar"
heads = [3, 2, 0, 3]
deps = ["nmod", "cc", "conj", "ROOT"]
pos = ["NOUN", "CCONJ", "NOUN", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "kız ve erkek çocuklar "
def test_tr_noun_chunks_two_nouns_in_nmod2(tr_tokenizer):
text = "tatlı ve gürbüz çocuklar"
heads = [3, 2, 0, 3]
deps = ["amod", "cc", "conj", "ROOT"]
pos = ["ADJ", "CCONJ", "NOUN", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "tatlı ve gürbüz çocuklar "
def test_tr_noun_chunks_conj_simple(tr_tokenizer):
text = "Sen ya da ben"
heads = [0, 3, 1, 0]
deps = ["ROOT", "cc", "fixed", "conj"]
pos = ["PRON", "CCONJ", "CCONJ", "PRON"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 2
assert chunks[0].text_with_ws == "ben "
assert chunks[1].text_with_ws == "Sen "
def test_tr_noun_chunks_conj_three(tr_tokenizer):
text = "sen, ben ve ondan"
heads = [0, 2, 0, 4, 0]
deps = ["ROOT", "punct", "conj", "cc", "conj"]
pos = ["PRON", "PUNCT", "PRON", "CCONJ", "PRON"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 3
assert chunks[0].text_with_ws == "ondan "
assert chunks[1].text_with_ws == "ben "
assert chunks[2].text_with_ws == "sen "
def test_tr_noun_chunks_conj_three2(tr_tokenizer):
text = "ben ya da sen ya da onlar"
heads = [0, 3, 1, 0, 6, 4, 3]
deps = ["ROOT", "cc", "fixed", "conj", "cc", "fixed", "conj"]
pos = ["PRON", "CCONJ", "CCONJ", "PRON", "CCONJ", "CCONJ", "PRON"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 3
assert chunks[0].text_with_ws == "onlar "
assert chunks[1].text_with_ws == "sen "
assert chunks[2].text_with_ws == "ben "
def test_tr_noun_chunks_conj_and_adj_phrase(tr_tokenizer):
text = "ben ve akıllı çocuk"
heads = [0, 3, 3, 0]
deps = ["ROOT", "cc", "amod", "conj"]
pos = ["PRON", "CCONJ", "ADJ", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 2
assert chunks[0].text_with_ws == "akıllı çocuk "
assert chunks[1].text_with_ws == "ben "
def test_tr_noun_chunks_conj_fixed_adj_phrase(tr_tokenizer):
text = "ben ya da akıllı çocuk"
heads = [0, 4, 1, 4, 0]
deps = ["ROOT", "cc", "fixed", "amod", "conj"]
pos = ["PRON", "CCONJ", "CCONJ", "ADJ", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 2
assert chunks[0].text_with_ws == "akıllı çocuk "
assert chunks[1].text_with_ws == "ben "
def test_tr_noun_chunks_conj_subject(tr_tokenizer):
text = "Sen ve ben iyi anlaşıyoruz"
heads = [4, 2, 0, 2, 4]
deps = ["nsubj", "cc", "conj", "adv", "ROOT"]
pos = ["PRON", "CCONJ", "PRON", "ADV", "VERB"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 2
assert chunks[0].text_with_ws == "ben "
assert chunks[1].text_with_ws == "Sen "
def test_tr_noun_chunks_conj_noun_head_verb(tr_tokenizer):
text = "Simge babasını görmüyormuş, annesini değil"
heads = [2, 2, 2, 4, 2, 4]
deps = ["nsubj", "obj", "ROOT", "punct", "conj", "aux"]
pos = ["PROPN", "NOUN", "VERB", "PUNCT", "NOUN", "AUX"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 3
assert chunks[0].text_with_ws == "annesini "
assert chunks[1].text_with_ws == "babasını "
assert chunks[2].text_with_ws == "Simge "
def test_tr_noun_chunks_flat_simple(tr_tokenizer):
text = "New York"
heads = [0, 0]
deps = ["ROOT", "flat"]
pos = ["PROPN", "PROPN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "New York "
def test_tr_noun_chunks_flat_names_and_title(tr_tokenizer):
text = "Gazi Mustafa Kemal"
heads = [1, 1, 1]
deps = ["nmod", "ROOT", "flat"]
pos = ["PROPN", "PROPN", "PROPN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "Gazi Mustafa Kemal "
def test_tr_noun_chunks_flat_names_and_title2(tr_tokenizer):
text = "Ahmet Vefik Paşa"
heads = [2, 0, 2]
deps = ["nmod", "flat", "ROOT"]
pos = ["PROPN", "PROPN", "PROPN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "Ahmet Vefik Paşa "
def test_tr_noun_chunks_flat_name_lastname_and_title(tr_tokenizer):
text = "Cumhurbaşkanı Ahmet Necdet Sezer"
heads = [1, 1, 1, 1]
deps = ["nmod", "ROOT", "flat", "flat"]
pos = ["NOUN", "PROPN", "PROPN", "PROPN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "Cumhurbaşkanı Ahmet Necdet Sezer "
def test_tr_noun_chunks_flat_in_nmod(tr_tokenizer):
text = "Ahmet Sezer adında bir ögrenci"
heads = [2, 0, 4, 4, 4]
deps = ["nmod", "flat", "nmod", "det", "ROOT"]
pos = ["PROPN", "PROPN", "NOUN", "DET", "NOUN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "Ahmet Sezer adında bir ögrenci "
def test_tr_noun_chunks_flat_and_chain_nmod(tr_tokenizer):
text = "Batı Afrika ülkelerinden Sierra Leone"
heads = [1, 2, 3, 3, 3]
deps = ["nmod", "nmod", "nmod", "ROOT", "flat"]
pos = ["NOUN", "PROPN", "NOUN", "PROPN", "PROPN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 1
assert chunks[0].text_with_ws == "Batı Afrika ülkelerinden Sierra Leone "
def test_tr_noun_chunks_two_flats_conjed(tr_tokenizer):
text = "New York ve Sierra Leone"
heads = [0, 0, 3, 0, 3]
deps = ["ROOT", "flat", "cc", "conj", "flat"]
pos = ["PROPN", "PROPN", "CCONJ", "PROPN", "PROPN"]
tokens = tr_tokenizer(text)
doc = Doc(
tokens.vocab, words=[t.text for t in tokens], pos=pos, heads=heads, deps=deps
)
chunks = list(doc.noun_chunks)
assert len(chunks) == 2
assert chunks[0].text_with_ws == "Sierra Leone "
assert chunks[1].text_with_ws == "New York "
| 19,594 | 33.019097 | 89 | py |
spaCy | spaCy-master/spacy/tests/lang/tr/test_text.py | import pytest
from spacy.lang.tr.lex_attrs import like_num
def test_tr_tokenizer_handles_long_text(tr_tokenizer):
text = """Pamuk nasıl ipliğe dönüştürülür?
Sıkıştırılmış balyalar halindeki pamuk, iplik fabrikasına getirildiğinde hem
lifleri birbirine dolaşmıştır, hem de tarladan toplanırken araya bitkinin
parçaları karışmıştır. Üstelik balyalardaki pamuğun cinsi aynı olsa bile kalitesi
değişeceğinden, önce bütün balyaların birbirine karıştırılarak harmanlanması gerekir.
Daha sonra pamuk yığınları, liflerin açılıp temizlenmesi için tek bir birim halinde
birleştirilmiş çeşitli makinelerden geçirilir.Bunlardan biri, dönen tokmaklarıyla
pamuğu dövüp kabartarak dağınık yumaklar haline getiren ve liflerin arasındaki yabancı
maddeleri temizleyen hallaç makinesidir. Daha sonra tarak makinesine giren pamuk demetleri,
herbirinin yüzeyinde yüzbinlerce incecik iğne bulunan döner silindirlerin arasından geçerek lif lif ayrılır
ve tül inceliğinde gevşek bir örtüye dönüşür. Ama bir sonraki makine bu lifleri dağınık
ve gevşek bir biçimde birbirine yaklaştırarak 2 cm eninde bir pamuk şeridi haline getirir."""
tokens = tr_tokenizer(text)
assert len(tokens) == 146
@pytest.mark.parametrize(
"word",
[
"bir",
"iki",
"dört",
"altı",
"milyon",
"100",
"birinci",
"üçüncü",
"beşinci",
"100üncü",
"8inci",
],
)
def test_tr_lex_attrs_like_number_cardinal_ordinal(word):
assert like_num(word)
@pytest.mark.parametrize("word", ["beş", "yedi", "yedinci", "birinci", "milyonuncu"])
def test_tr_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 1,694 | 33.591837 | 107 | py |
spaCy | spaCy-master/spacy/tests/lang/tr/test_tokenizer.py | import pytest
ABBREV_TESTS = [
("Dr. Murat Bey ile görüştüm.", ["Dr.", "Murat", "Bey", "ile", "görüştüm", "."]),
("Dr.la görüştüm.", ["Dr.la", "görüştüm", "."]),
("Dr.'la görüştüm.", ["Dr.'la", "görüştüm", "."]),
("TBMM'de çalışıyormuş.", ["TBMM'de", "çalışıyormuş", "."]),
(
"Hem İst. hem Ank. bu konuda gayet iyi durumda.",
["Hem", "İst.", "hem", "Ank.", "bu", "konuda", "gayet", "iyi", "durumda", "."],
),
(
"Hem İst. hem Ank.'da yağış var.",
["Hem", "İst.", "hem", "Ank.'da", "yağış", "var", "."],
),
("Dr.", ["Dr."]),
("Yrd.Doç.", ["Yrd.Doç."]),
("Prof.'un", ["Prof.'un"]),
("Böl.'nde", ["Böl.'nde"]),
]
URL_TESTS = [
(
"Bizler de www.duygu.com.tr adında bir websitesi kurduk.",
[
"Bizler",
"de",
"www.duygu.com.tr",
"adında",
"bir",
"websitesi",
"kurduk",
".",
],
),
(
"Bizler de https://www.duygu.com.tr adında bir websitesi kurduk.",
[
"Bizler",
"de",
"https://www.duygu.com.tr",
"adında",
"bir",
"websitesi",
"kurduk",
".",
],
),
(
"Bizler de www.duygu.com.tr'dan satın aldık.",
["Bizler", "de", "www.duygu.com.tr'dan", "satın", "aldık", "."],
),
(
"Bizler de https://www.duygu.com.tr'dan satın aldık.",
["Bizler", "de", "https://www.duygu.com.tr'dan", "satın", "aldık", "."],
),
]
NUMBER_TESTS = [
("Rakamla 6 yazılıydı.", ["Rakamla", "6", "yazılıydı", "."]),
("Hava -4 dereceydi.", ["Hava", "-4", "dereceydi", "."]),
(
"Hava sıcaklığı -4ten +6ya yükseldi.",
["Hava", "sıcaklığı", "-4ten", "+6ya", "yükseldi", "."],
),
(
"Hava sıcaklığı -4'ten +6'ya yükseldi.",
["Hava", "sıcaklığı", "-4'ten", "+6'ya", "yükseldi", "."],
),
("Yarışta 6. oldum.", ["Yarışta", "6.", "oldum", "."]),
("Yarışta 438547745. oldum.", ["Yarışta", "438547745.", "oldum", "."]),
("Kitap IV. Murat hakkında.", ["Kitap", "IV.", "Murat", "hakkında", "."]),
# ("Bana söylediği sayı 6.", ["Bana", "söylediği", "sayı", "6", "."]),
("Saat 6'da buluşalım.", ["Saat", "6'da", "buluşalım", "."]),
("Saat 6dan sonra buluşalım.", ["Saat", "6dan", "sonra", "buluşalım", "."]),
("6.dan sonra saymadım.", ["6.dan", "sonra", "saymadım", "."]),
("6.'dan sonra saymadım.", ["6.'dan", "sonra", "saymadım", "."]),
("Saat 6'ydı.", ["Saat", "6'ydı", "."]),
("5'te", ["5'te"]),
("6'da", ["6'da"]),
("9dan", ["9dan"]),
("19'da", ["19'da"]),
("VI'da", ["VI'da"]),
("5.", ["5."]),
("72.", ["72."]),
("VI.", ["VI."]),
("6.'dan", ["6.'dan"]),
("19.'dan", ["19.'dan"]),
("6.dan", ["6.dan"]),
("16.dan", ["16.dan"]),
("VI.'dan", ["VI.'dan"]),
("VI.dan", ["VI.dan"]),
("Hepsi 1994 yılında oldu.", ["Hepsi", "1994", "yılında", "oldu", "."]),
("Hepsi 1994'te oldu.", ["Hepsi", "1994'te", "oldu", "."]),
(
"2/3 tarihli faturayı bulamadım.",
["2/3", "tarihli", "faturayı", "bulamadım", "."],
),
(
"2.3 tarihli faturayı bulamadım.",
["2.3", "tarihli", "faturayı", "bulamadım", "."],
),
(
"2.3. tarihli faturayı bulamadım.",
["2.3.", "tarihli", "faturayı", "bulamadım", "."],
),
(
"2/3/2020 tarihli faturayı bulamadm.",
["2/3/2020", "tarihli", "faturayı", "bulamadm", "."],
),
(
"2/3/1987 tarihinden beri burda yaşıyorum.",
["2/3/1987", "tarihinden", "beri", "burda", "yaşıyorum", "."],
),
(
"2-3-1987 tarihinden beri burdayım.",
["2-3-1987", "tarihinden", "beri", "burdayım", "."],
),
(
"2.3.1987 tarihinden beri burdayım.",
["2.3.1987", "tarihinden", "beri", "burdayım", "."],
),
(
"Bu olay 2005-2006 tarihleri arasında oldu.",
["Bu", "olay", "2005", "-", "2006", "tarihleri", "arasında", "oldu", "."],
),
(
"Bu olay 4/12/2005-21/3/2006 tarihleri arasında oldu.",
[
"Bu",
"olay",
"4/12/2005",
"-",
"21/3/2006",
"tarihleri",
"arasında",
"oldu",
".",
],
),
(
"Ek fıkra: 5/11/2003-4999/3 maddesine göre uygundur.",
[
"Ek",
"fıkra",
":",
"5/11/2003",
"-",
"4999/3",
"maddesine",
"göre",
"uygundur",
".",
],
),
(
"2/A alanları: 6831 sayılı Kanunun 2nci maddesinin birinci fıkrasının (A) bendine göre",
[
"2/A",
"alanları",
":",
"6831",
"sayılı",
"Kanunun",
"2nci",
"maddesinin",
"birinci",
"fıkrasının",
"(",
"A",
")",
"bendine",
"göre",
],
),
(
"ŞEHİTTEĞMENKALMAZ Cad. No: 2/311",
["ŞEHİTTEĞMENKALMAZ", "Cad.", "No", ":", "2/311"],
),
(
"2-3-2025",
[
"2-3-2025",
],
),
("2/3/2025", ["2/3/2025"]),
("Yıllardır 0.5 uç kullanıyorum.", ["Yıllardır", "0.5", "uç", "kullanıyorum", "."]),
(
"Kan değerlerim 0.5-0.7 arasıydı.",
["Kan", "değerlerim", "0.5", "-", "0.7", "arasıydı", "."],
),
("0.5", ["0.5"]),
("1/2", ["1/2"]),
("%1", ["%", "1"]),
("%1lik", ["%", "1lik"]),
("%1'lik", ["%", "1'lik"]),
("%1lik dilim", ["%", "1lik", "dilim"]),
("%1'lik dilim", ["%", "1'lik", "dilim"]),
("%1.5", ["%", "1.5"]),
# ("%1-%2 arası büyüme bekleniyor.", ["%", "1", "-", "%", "2", "arası", "büyüme", "bekleniyor", "."]),
(
"%1-2 arası büyüme bekliyoruz.",
["%", "1", "-", "2", "arası", "büyüme", "bekliyoruz", "."],
),
(
"%11-12 arası büyüme bekliyoruz.",
["%", "11", "-", "12", "arası", "büyüme", "bekliyoruz", "."],
),
("%1.5luk büyüme bekliyoruz.", ["%", "1.5luk", "büyüme", "bekliyoruz", "."]),
(
"Saat 1-2 arası gelin lütfen.",
["Saat", "1", "-", "2", "arası", "gelin", "lütfen", "."],
),
("Saat 15:30 gibi buluşalım.", ["Saat", "15:30", "gibi", "buluşalım", "."]),
("Saat 15:30'da buluşalım.", ["Saat", "15:30'da", "buluşalım", "."]),
("Saat 15.30'da buluşalım.", ["Saat", "15.30'da", "buluşalım", "."]),
("Saat 15.30da buluşalım.", ["Saat", "15.30da", "buluşalım", "."]),
("Saat 15 civarı buluşalım.", ["Saat", "15", "civarı", "buluşalım", "."]),
("9’daki otobüse binsek mi?", ["9’daki", "otobüse", "binsek", "mi", "?"]),
("Okulumuz 3-B şubesi", ["Okulumuz", "3-B", "şubesi"]),
("Okulumuz 3/B şubesi", ["Okulumuz", "3/B", "şubesi"]),
("Okulumuz 3B şubesi", ["Okulumuz", "3B", "şubesi"]),
("Okulumuz 3b şubesi", ["Okulumuz", "3b", "şubesi"]),
(
"Antonio Gaudí 20. yüzyılda, 1904-1914 yılları arasında on yıl süren bir reform süreci getirmiştir.",
[
"Antonio",
"Gaudí",
"20.",
"yüzyılda",
",",
"1904",
"-",
"1914",
"yılları",
"arasında",
"on",
"yıl",
"süren",
"bir",
"reform",
"süreci",
"getirmiştir",
".",
],
),
(
"Dizel yakıtın avro bölgesi ortalaması olan 1,165 avroya kıyasla litre başına 1,335 avroya mal olduğunu gösteriyor.",
[
"Dizel",
"yakıtın",
"avro",
"bölgesi",
"ortalaması",
"olan",
"1,165",
"avroya",
"kıyasla",
"litre",
"başına",
"1,335",
"avroya",
"mal",
"olduğunu",
"gösteriyor",
".",
],
),
(
"Marcus Antonius M.Ö. 1 Ocak 49'da, Sezar'dan Vali'nin kendisini barış dostu ilan ettiği bir bildiri yayınlamıştır.",
[
"Marcus",
"Antonius",
"M.Ö.",
"1",
"Ocak",
"49'da",
",",
"Sezar'dan",
"Vali'nin",
"kendisini",
"barış",
"dostu",
"ilan",
"ettiği",
"bir",
"bildiri",
"yayınlamıştır",
".",
],
),
]
PUNCT_TESTS = [
("Gitmedim dedim ya!", ["Gitmedim", "dedim", "ya", "!"]),
("Gitmedim dedim ya!!", ["Gitmedim", "dedim", "ya", "!", "!"]),
("Gitsek mi?", ["Gitsek", "mi", "?"]),
("Gitsek mi??", ["Gitsek", "mi", "?", "?"]),
("Gitsek mi?!?", ["Gitsek", "mi", "?", "!", "?"]),
(
"Ankara - Antalya arası otobüs işliyor.",
["Ankara", "-", "Antalya", "arası", "otobüs", "işliyor", "."],
),
(
"Ankara-Antalya arası otobüs işliyor.",
["Ankara", "-", "Antalya", "arası", "otobüs", "işliyor", "."],
),
("Sen--ben, ya da onlar.", ["Sen", "--", "ben", ",", "ya", "da", "onlar", "."]),
(
"Senden, benden, bizden şarkısını biliyor musun?",
["Senden", ",", "benden", ",", "bizden", "şarkısını", "biliyor", "musun", "?"],
),
(
"Akif'le geldik, sonra da o ayrıldı.",
["Akif'le", "geldik", ",", "sonra", "da", "o", "ayrıldı", "."],
),
("Bu adam ne dedi şimdi???", ["Bu", "adam", "ne", "dedi", "şimdi", "?", "?", "?"]),
(
"Yok hasta olmuş, yok annesi hastaymış, bahaneler işte...",
[
"Yok",
"hasta",
"olmuş",
",",
"yok",
"annesi",
"hastaymış",
",",
"bahaneler",
"işte",
"...",
],
),
(
"Ankara'dan İstanbul'a ... bir aşk hikayesi.",
["Ankara'dan", "İstanbul'a", "...", "bir", "aşk", "hikayesi", "."],
),
("Ahmet'te", ["Ahmet'te"]),
("İstanbul'da", ["İstanbul'da"]),
]
GENERAL_TESTS = [
(
"1914'teki Endurance seferinde, Sir Ernest Shackleton'ın kaptanlığını yaptığı İngiliz Endurance gemisi yirmi sekiz kişi ile Antarktika'yı geçmek üzere yelken açtı.",
[
"1914'teki",
"Endurance",
"seferinde",
",",
"Sir",
"Ernest",
"Shackleton'ın",
"kaptanlığını",
"yaptığı",
"İngiliz",
"Endurance",
"gemisi",
"yirmi",
"sekiz",
"kişi",
"ile",
"Antarktika'yı",
"geçmek",
"üzere",
"yelken",
"açtı",
".",
],
),
(
'Danışılan "%100 Cospedal" olduğunu belirtti.',
["Danışılan", '"', "%", "100", "Cospedal", '"', "olduğunu", "belirtti", "."],
),
(
"1976'da parkur artık kullanılmıyordu; 1990'da ise bir yangın, daha sonraları ahırlarla birlikte yıkılacak olan tahta tribünlerden geri kalanları da yok etmişti.",
[
"1976'da",
"parkur",
"artık",
"kullanılmıyordu",
";",
"1990'da",
"ise",
"bir",
"yangın",
",",
"daha",
"sonraları",
"ahırlarla",
"birlikte",
"yıkılacak",
"olan",
"tahta",
"tribünlerden",
"geri",
"kalanları",
"da",
"yok",
"etmişti",
".",
],
),
(
"Dahiyane bir ameliyat ve zorlu bir rehabilitasyon sürecinden sonra, tamamen iyileştim.",
[
"Dahiyane",
"bir",
"ameliyat",
"ve",
"zorlu",
"bir",
"rehabilitasyon",
"sürecinden",
"sonra",
",",
"tamamen",
"iyileştim",
".",
],
),
(
"Yaklaşık iki hafta süren bireysel erken oy kullanma döneminin ardından 5,7 milyondan fazla Floridalı sandık başına gitti.",
[
"Yaklaşık",
"iki",
"hafta",
"süren",
"bireysel",
"erken",
"oy",
"kullanma",
"döneminin",
"ardından",
"5,7",
"milyondan",
"fazla",
"Floridalı",
"sandık",
"başına",
"gitti",
".",
],
),
(
"Ancak, bu ABD Çevre Koruma Ajansı'nın dünyayı bu konularda uyarmasının ardından ortaya çıktı.",
[
"Ancak",
",",
"bu",
"ABD",
"Çevre",
"Koruma",
"Ajansı'nın",
"dünyayı",
"bu",
"konularda",
"uyarmasının",
"ardından",
"ortaya",
"çıktı",
".",
],
),
(
"Ortalama şansa ve 10.000 Sterlin değerinde tahvillere sahip bir yatırımcı yılda 125 Sterlin ikramiye kazanabilir.",
[
"Ortalama",
"şansa",
"ve",
"10.000",
"Sterlin",
"değerinde",
"tahvillere",
"sahip",
"bir",
"yatırımcı",
"yılda",
"125",
"Sterlin",
"ikramiye",
"kazanabilir",
".",
],
),
(
"Granit adaları; Seyşeller ve Tioman ile Saint Helena gibi volkanik adaları kapsar.",
[
"Granit",
"adaları",
";",
"Seyşeller",
"ve",
"Tioman",
"ile",
"Saint",
"Helena",
"gibi",
"volkanik",
"adaları",
"kapsar",
".",
],
),
(
"Barış antlaşmasıyla İspanya, Amerika'ya Porto Riko, Guam ve Filipinler kolonilerini devretti.",
[
"Barış",
"antlaşmasıyla",
"İspanya",
",",
"Amerika'ya",
"Porto",
"Riko",
",",
"Guam",
"ve",
"Filipinler",
"kolonilerini",
"devretti",
".",
],
),
(
"Makedonya'nın sınır bölgelerini güvence altına alan Philip, büyük bir Makedon ordusu kurdu ve uzun bir fetih seferi için Trakya'ya doğru yürüdü.",
[
"Makedonya'nın",
"sınır",
"bölgelerini",
"güvence",
"altına",
"alan",
"Philip",
",",
"büyük",
"bir",
"Makedon",
"ordusu",
"kurdu",
"ve",
"uzun",
"bir",
"fetih",
"seferi",
"için",
"Trakya'ya",
"doğru",
"yürüdü",
".",
],
),
(
"Fransız gazetesi Le Figaro'ya göre bu hükumet planı sayesinde 42 milyon Euro kazanç sağlanabilir ve elde edilen paranın 15.5 milyonu ulusal güvenlik için kullanılabilir.",
[
"Fransız",
"gazetesi",
"Le",
"Figaro'ya",
"göre",
"bu",
"hükumet",
"planı",
"sayesinde",
"42",
"milyon",
"Euro",
"kazanç",
"sağlanabilir",
"ve",
"elde",
"edilen",
"paranın",
"15.5",
"milyonu",
"ulusal",
"güvenlik",
"için",
"kullanılabilir",
".",
],
),
(
"Ortalama şansa ve 10.000 Sterlin değerinde tahvillere sahip bir yatırımcı yılda 125 Sterlin ikramiye kazanabilir.",
[
"Ortalama",
"şansa",
"ve",
"10.000",
"Sterlin",
"değerinde",
"tahvillere",
"sahip",
"bir",
"yatırımcı",
"yılda",
"125",
"Sterlin",
"ikramiye",
"kazanabilir",
".",
],
),
(
"3 Kasım Salı günü, Ankara Belediye Başkanı 2014'te hükümetle birlikte oluşturulan kentsel gelişim anlaşmasını askıya alma kararı verdi.",
[
"3",
"Kasım",
"Salı",
"günü",
",",
"Ankara",
"Belediye",
"Başkanı",
"2014'te",
"hükümetle",
"birlikte",
"oluşturulan",
"kentsel",
"gelişim",
"anlaşmasını",
"askıya",
"alma",
"kararı",
"verdi",
".",
],
),
(
"Stalin, Abakumov'u Beria'nın enerji bakanlıkları üzerindeki baskınlığına karşı MGB içinde kendi ağını kurmaya teşvik etmeye başlamıştı.",
[
"Stalin",
",",
"Abakumov'u",
"Beria'nın",
"enerji",
"bakanlıkları",
"üzerindeki",
"baskınlığına",
"karşı",
"MGB",
"içinde",
"kendi",
"ağını",
"kurmaya",
"teşvik",
"etmeye",
"başlamıştı",
".",
],
),
(
"Güney Avrupa'daki kazı alanlarının çoğunluğu gibi, bu bulgu M.Ö. 5. yüzyılın başlar",
[
"Güney",
"Avrupa'daki",
"kazı",
"alanlarının",
"çoğunluğu",
"gibi",
",",
"bu",
"bulgu",
"M.Ö.",
"5.",
"yüzyılın",
"başlar",
],
),
(
"Sağlığın bozulması Hitchcock hayatının son yirmi yılında üretimini azalttı.",
[
"Sağlığın",
"bozulması",
"Hitchcock",
"hayatının",
"son",
"yirmi",
"yılında",
"üretimini",
"azalttı",
".",
],
),
]
TESTS = ABBREV_TESTS + URL_TESTS + NUMBER_TESTS + PUNCT_TESTS + GENERAL_TESTS
@pytest.mark.parametrize("text,expected_tokens", TESTS)
def test_tr_tokenizer_handles_allcases(tr_tokenizer, text, expected_tokens):
tokens = tr_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 18,944 | 26.180775 | 180 | py |
spaCy | spaCy-master/spacy/tests/lang/tt/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/tt/test_tokenizer.py | import pytest
INFIX_HYPHEN_TESTS = [
("Явым-төшем күләме.", "Явым-төшем күләме .".split()),
("Хатын-кыз киеме.", "Хатын-кыз киеме .".split()),
]
PUNC_INSIDE_WORDS_TESTS = [
(
"Пассаҗир саны - 2,13 млн — кеше/көндә (2010), 783,9 млн. кеше/елда.",
"Пассаҗир саны - 2,13 млн — кеше / көндә ( 2010 ) ,"
" 783,9 млн. кеше / елда .".split(),
),
('Ту"кай', 'Ту " кай'.split()),
]
MIXED_ORDINAL_NUMS_TESTS = [
("Иртәгә 22нче гыйнвар...", "Иртәгә 22нче гыйнвар ...".split())
]
ABBREV_TESTS = [
("«3 елда (б.э.к.) туган", "« 3 елда ( б.э.к. ) туган".split()),
("тукымадан һ.б.ш. тегелгән.", "тукымадан һ.б.ш. тегелгән .".split()),
]
NAME_ABBREV_TESTS = [
("Ә.Тукай", "Ә.Тукай".split()),
("Ә.тукай", "Ә.тукай".split()),
("ә.Тукай", "ә . Тукай".split()),
("Миләүшә.", "Миләүшә .".split()),
]
TYPOS_IN_PUNC_TESTS = [
("«3 елда , туган", "« 3 елда , туган".split()),
("«3 елда,туган", "« 3 елда , туган".split()),
("«3 елда,туган.", "« 3 елда , туган .".split()),
("Ул эшли(кайчан?)", "Ул эшли ( кайчан ? )".split()),
("Ул (кайчан?)эшли", "Ул ( кайчан ?) эшли".split()), # "?)" => "?)" or "? )"
]
LONG_TEXTS_TESTS = [
(
"Иң борынгы кешеләр суыклар һәм салкын кышлар булмый торган җылы "
"якларда яшәгәннәр, шуңа күрә аларга кием кирәк булмаган.Йөз "
"меңнәрчә еллар үткән, борынгы кешеләр акрынлап Европа һәм Азиянең "
"салкын илләрендә дә яши башлаганнар. Алар кырыс һәм салкын "
"кышлардан саклану өчен кием-салым уйлап тапканнар - итәк.",
"Иң борынгы кешеләр суыклар һәм салкын кышлар булмый торган җылы "
"якларда яшәгәннәр , шуңа күрә аларга кием кирәк булмаган . Йөз "
"меңнәрчә еллар үткән , борынгы кешеләр акрынлап Европа һәм Азиянең "
"салкын илләрендә дә яши башлаганнар . Алар кырыс һәм салкын "
"кышлардан саклану өчен кием-салым уйлап тапканнар - итәк .".split(),
)
]
TESTCASES = (
INFIX_HYPHEN_TESTS
+ PUNC_INSIDE_WORDS_TESTS
+ MIXED_ORDINAL_NUMS_TESTS
+ ABBREV_TESTS
+ NAME_ABBREV_TESTS
+ LONG_TEXTS_TESTS
+ TYPOS_IN_PUNC_TESTS
)
NORM_TESTCASES = [
(
"тукымадан һ.б.ш. тегелгән.",
["тукымадан", "һәм башка шундыйлар", "тегелгән", "."],
)
]
@pytest.mark.parametrize("text,expected_tokens", TESTCASES)
def test_tt_tokenizer_handles_testcases(tt_tokenizer, text, expected_tokens):
tokens = [token.text for token in tt_tokenizer(text) if not token.is_space]
assert expected_tokens == tokens
@pytest.mark.parametrize("text,norms", NORM_TESTCASES)
def test_tt_tokenizer_handles_norm_exceptions(tt_tokenizer, text, norms):
tokens = tt_tokenizer(text)
assert [token.norm_ for token in tokens] == norms
| 2,758 | 31.845238 | 81 | py |
spaCy | spaCy-master/spacy/tests/lang/uk/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/uk/test_lemmatizer.py | import pytest
from spacy.tokens import Doc
pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_uk_lemmatizer(uk_lemmatizer):
"""Check that the default uk lemmatizer runs."""
doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"])
assert uk_lemmatizer.mode == "pymorphy3"
uk_lemmatizer(doc)
assert [token.lemma for token in doc]
@pytest.mark.parametrize(
"word,lemma",
(
("якийсь", "якийсь"),
("розповідають", "розповідати"),
("розповіси", "розповісти"),
),
)
def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer, word, lemma):
assert uk_lookup_lemmatizer.mode == "pymorphy3_lookup"
doc = Doc(uk_lookup_lemmatizer.vocab, words=[word])
assert uk_lookup_lemmatizer(doc)[0].lemma_ == lemma
| 783 | 27 | 69 | py |
spaCy | spaCy-master/spacy/tests/lang/uk/test_tokenizer.py | import pytest
PUNCT_OPEN = ["(", "[", "{", "*"]
PUNCT_CLOSE = [")", "]", "}", "*"]
PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")]
@pytest.mark.parametrize("text", ["(", "((", "<"])
def test_uk_tokenizer_handles_only_punct(uk_tokenizer, text):
tokens = uk_tokenizer(text)
assert len(tokens) == len(text)
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize(
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
)
def test_uk_tokenizer_splits_open_punct(uk_tokenizer, punct, text):
tokens = uk_tokenizer(punct + text)
assert len(tokens) == 2
assert tokens[0].text == punct
assert tokens[1].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize(
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
)
def test_uk_tokenizer_splits_close_punct(uk_tokenizer, punct, text):
tokens = uk_tokenizer(text + punct)
assert len(tokens) == 2
assert tokens[0].text == text
assert tokens[1].text == punct
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize("punct_add", ["`"])
@pytest.mark.parametrize(
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
)
def test_uk_tokenizer_splits_two_diff_open_punct(uk_tokenizer, punct, punct_add, text):
tokens = uk_tokenizer(punct + punct_add + text)
assert len(tokens) == 3
assert tokens[0].text == punct
assert tokens[1].text == punct_add
assert tokens[2].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize("punct_add", ["'"])
@pytest.mark.parametrize(
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
)
def test_uk_tokenizer_splits_two_diff_close_punct(uk_tokenizer, punct, punct_add, text):
tokens = uk_tokenizer(text + punct + punct_add)
assert len(tokens) == 3
assert tokens[0].text == text
assert tokens[1].text == punct
assert tokens[2].text == punct_add
@pytest.mark.parametrize("punct", PUNCT_OPEN)
@pytest.mark.parametrize(
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
)
def test_uk_tokenizer_splits_same_open_punct(uk_tokenizer, punct, text):
tokens = uk_tokenizer(punct + punct + punct + text)
assert len(tokens) == 4
assert tokens[0].text == punct
assert tokens[3].text == text
@pytest.mark.parametrize("punct", PUNCT_CLOSE)
@pytest.mark.parametrize(
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
)
def test_uk_tokenizer_splits_same_close_punct(uk_tokenizer, punct, text):
tokens = uk_tokenizer(text + punct + punct + punct)
assert len(tokens) == 4
assert tokens[0].text == text
assert tokens[1].text == punct
@pytest.mark.parametrize("text", ["'Тест"])
def test_uk_tokenizer_splits_open_appostrophe(uk_tokenizer, text):
tokens = uk_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == "'"
@pytest.mark.parametrize("text", ["Тест''"])
def test_uk_tokenizer_splits_double_end_quote(uk_tokenizer, text):
tokens = uk_tokenizer(text)
assert len(tokens) == 2
tokens_punct = uk_tokenizer("''")
assert len(tokens_punct) == 1
@pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED)
@pytest.mark.parametrize(
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
)
def test_uk_tokenizer_splits_open_close_punct(
uk_tokenizer, punct_open, punct_close, text
):
tokens = uk_tokenizer(punct_open + text + punct_close)
assert len(tokens) == 3
assert tokens[0].text == punct_open
assert tokens[1].text == text
assert tokens[2].text == punct_close
@pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED)
@pytest.mark.parametrize("punct_open2,punct_close2", [("`", "'")])
@pytest.mark.parametrize(
"text", ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]
)
def test_uk_tokenizer_two_diff_punct(
uk_tokenizer, punct_open, punct_close, punct_open2, punct_close2, text
):
tokens = uk_tokenizer(punct_open2 + punct_open + text + punct_close + punct_close2)
assert len(tokens) == 5
assert tokens[0].text == punct_open2
assert tokens[1].text == punct_open
assert tokens[2].text == text
assert tokens[3].text == punct_close
assert tokens[4].text == punct_close2
@pytest.mark.parametrize(
"text", ["Привет.", "Привіт.", "Ґелґотати.", "З'єднання.", "Єдність.", "їхні."]
)
def test_uk_tokenizer_splits_trailing_dot(uk_tokenizer, text):
tokens = uk_tokenizer(text)
assert tokens[1].text == "."
def test_uk_tokenizer_splits_bracket_period(uk_tokenizer):
text = "(Раз, два, три, проверка)."
tokens = uk_tokenizer(text)
assert tokens[len(tokens) - 1].text == "."
def test_uk_tokenizer_handles_final_diacritics(uk_tokenizer):
text = "Хлібі́в не було́. Хлібі́в не було́."
tokens = uk_tokenizer(text)
assert tokens[2].text == "було́"
assert tokens[3].text == "."
| 4,997 | 32.543624 | 88 | py |
spaCy | spaCy-master/spacy/tests/lang/uk/test_tokenizer_exc.py | import pytest
@pytest.mark.parametrize(
"text,norms,lemmas",
[("ім.", ["імені"], ["ім'я"]), ("проф.", ["професор"], ["професор"])],
)
def test_uk_tokenizer_abbrev_exceptions(uk_tokenizer, text, norms, lemmas):
tokens = uk_tokenizer(text)
assert len(tokens) == 1
assert [token.norm_ for token in tokens] == norms
| 334 | 26.916667 | 75 | py |
spaCy | spaCy-master/spacy/tests/lang/ur/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/ur/test_prefix_suffix_infix.py | import pytest
@pytest.mark.parametrize("text", ["ہےں۔", "کیا۔"])
def test_contractions(ur_tokenizer, text):
"""Test specific Urdu punctuation character"""
tokens = ur_tokenizer(text)
assert len(tokens) == 2
| 221 | 23.666667 | 50 | py |
spaCy | spaCy-master/spacy/tests/lang/ur/test_text.py | import pytest
def test_ur_tokenizer_handles_long_text(ur_tokenizer):
text = """اصل میں، رسوا ہونے کی ہمیں کچھ عادت سی ہو گئی ہے۔"""
tokens = ur_tokenizer(text)
assert len(tokens) == 14
@pytest.mark.parametrize("text,length", [("تحریر باسط حبیب", 3), ("میرا پاکستان", 2)])
def test_ur_tokenizer_handles_cnts(ur_tokenizer, text, length):
tokens = ur_tokenizer(text)
assert len(tokens) == length
| 417 | 28.857143 | 86 | py |
spaCy | spaCy-master/spacy/tests/lang/vi/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/vi/test_serialize.py | import pickle
from spacy.lang.vi import Vietnamese
from ...util import make_tempdir
def test_vi_tokenizer_serialize(vi_tokenizer):
tokenizer_bytes = vi_tokenizer.to_bytes()
nlp = Vietnamese()
nlp.tokenizer.from_bytes(tokenizer_bytes)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
assert nlp.tokenizer.use_pyvi is True
with make_tempdir() as d:
file_path = d / "tokenizer"
vi_tokenizer.to_disk(file_path)
nlp = Vietnamese()
nlp.tokenizer.from_disk(file_path)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
assert nlp.tokenizer.use_pyvi is True
# mode is (de)serialized correctly
nlp = Vietnamese.from_config({"nlp": {"tokenizer": {"use_pyvi": False}}})
nlp_bytes = nlp.to_bytes()
nlp_r = Vietnamese()
nlp_r.from_bytes(nlp_bytes)
assert nlp_bytes == nlp_r.to_bytes()
assert nlp_r.tokenizer.use_pyvi is False
with make_tempdir() as d:
nlp.to_disk(d)
nlp_r = Vietnamese()
nlp_r.from_disk(d)
assert nlp_bytes == nlp_r.to_bytes()
assert nlp_r.tokenizer.use_pyvi is False
def test_vi_tokenizer_pickle(vi_tokenizer):
b = pickle.dumps(vi_tokenizer)
vi_tokenizer_re = pickle.loads(b)
assert vi_tokenizer.to_bytes() == vi_tokenizer_re.to_bytes()
| 1,309 | 29.465116 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/vi/test_tokenizer.py | import pytest
from spacy.lang.vi import Vietnamese
from ...tokenizer.test_naughty_strings import NAUGHTY_STRINGS
# fmt: off
TOKENIZER_TESTS = [
("Đây là một văn bản bằng tiếng Việt Sau đó, đây là một văn bản khác bằng ngôn ngữ này", ['Đây', 'là', 'một', 'văn bản', 'bằng', 'tiếng', 'Việt', 'Sau', 'đó', ',', 'đây', 'là', 'một', 'văn bản', 'khác', 'bằng', 'ngôn ngữ', 'này']),
]
# fmt: on
@pytest.mark.parametrize("text,expected_tokens", TOKENIZER_TESTS)
def test_vi_tokenizer(vi_tokenizer, text, expected_tokens):
tokens = [token.text for token in vi_tokenizer(text)]
assert tokens == expected_tokens
def test_vi_tokenizer_extra_spaces(vi_tokenizer):
# note: three spaces after "I"
tokens = vi_tokenizer("I like cheese.")
assert tokens[1].orth_ == " "
@pytest.mark.parametrize("text", NAUGHTY_STRINGS)
def test_vi_tokenizer_naughty_strings(vi_tokenizer, text):
tokens = vi_tokenizer(text)
assert tokens.text_with_ws == text
def test_vi_tokenizer_emptyish_texts(vi_tokenizer):
doc = vi_tokenizer("")
assert len(doc) == 0
doc = vi_tokenizer(" ")
assert len(doc) == 1
doc = vi_tokenizer("\n\n\n \t\t \n\n\n")
assert len(doc) == 1
def test_vi_tokenizer_no_pyvi():
"""Test for whitespace tokenization without pyvi"""
nlp = Vietnamese.from_config({"nlp": {"tokenizer": {"use_pyvi": False}}})
text = "Đây là một văn bản bằng tiếng Việt Sau đó, đây là một văn bản khác bằng ngôn ngữ này"
doc = nlp(text)
assert [t.text for t in doc if not t.is_space] == text.split()
assert doc[4].text == " "
| 1,584 | 32.020833 | 237 | py |
spaCy | spaCy-master/spacy/tests/lang/xx/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/xx/test_text.py | import pytest
def test_long_text(xx_tokenizer):
# Excerpt: Text in Skolt Sami taken from https://www.samediggi.fi
text = """
Säʹmmla lie Euroopp unioon oʹdinakai alggmeer. Säʹmmlai alggmeerstatus lij raʹvvjum Lääʹddjânnam vuâđđlääʹjjest.
Alggmeer kriteeʹr vuâđđâʹvve meeraikõskksaž tuâjjorganisaatio, ILO, suåppmõʹšše nââmar 169.
Suåppmõõžž mieʹldd jiõččvälddsaž jânnmin jälsteei meeraid ââʹnet alggmeeran,
ko sij puõlvvâʹvve naroodâst, kååʹtt jânnam välddmõõžž leʹbe aazztummuž leʹbe ânnʼjõž riikkraaʹji šõddâm ääiʹj jälste
jânnmest leʹbe tõn mäddtiõđlaž vuuʹdest, koozz jânnam kooll. Alggmeer ij leäkku mieʹrreei sââʹjest jiiʹjjes jälstemvuuʹdest.
Alggmeer âlgg jiõčč ââʹnned jiiʹjjes alggmeeran leʹbe leeʹd tõn miõlâst, što sij lie alggmeer.
Alggmeer lij õlggâm seeilted vuõiggâdvuõđlaž sââʹjest huõlǩâni obbnes leʹbe vueʹzzi jiiʹjjes sosiaalʼlaž, täälʼlaž,
kulttuurlaž da poliittlaž instituutioid.
Säʹmmlai statuuzz ǩeeʹrjteš Lääʹddjânnam vuâđđläkka eeʹjj 1995. Säʹmmlain alggmeeran lij vuõiggâdvuõtt tuõʹllʼjed da
ooudâsviikkâd ǩiõlâz da kulttuurâz di tõõzz kuulli ääʹrbvuâlaž jieʹllemvueʹjjeez. Sääʹmǩiõl ââʹnnmest veʹrǧǧniiʹǩǩi
åʹrnn lij šiõttuum jiiʹjjes lääʹǩǩ. Säʹmmlain lij leämmaž eeʹjjest 1996 vueʹljeeʹl dommvuuʹdsteez ǩiõlâz da kulttuurâz kuõskki
vuâđđlääʹjj meâldlaž jiõččvaaldâšm. Säʹmmlai jiõččvaldšma kuulli tuâjaid håidd säʹmmlai vaalin vaʹlljääm parlameʹntt,
Sääʹmteʹǧǧ.
"""
tokens = xx_tokenizer(text)
assert len(tokens) == 179
| 1,482 | 58.32 | 127 | py |
spaCy | spaCy-master/spacy/tests/lang/xx/test_tokenizer.py | import pytest
XX_BASIC_TOKENIZATION_TESTS = [
(
"Lääʹddjânnmest lie nuʹtt 10 000 säʹmmliʹžžed. Seeʹst pâʹjjel",
[
"Lääʹddjânnmest",
"lie",
"nuʹtt",
"10",
"000",
"säʹmmliʹžžed",
".",
"Seeʹst",
"pâʹjjel",
],
),
]
@pytest.mark.parametrize("text,expected_tokens", XX_BASIC_TOKENIZATION_TESTS)
def test_xx_tokenizer_basic(xx_tokenizer, text, expected_tokens):
tokens = xx_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
| 643 | 23.769231 | 77 | py |
spaCy | spaCy-master/spacy/tests/lang/yo/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/yo/test_text.py | import pytest
from spacy.lang.yo.lex_attrs import like_num
def test_yo_tokenizer_handles_long_text(yo_tokenizer):
text = """Àwọn ọmọ ìlú tí wọ́n ń ṣàmúlò ayélujára ti bẹ̀rẹ̀ ìkọkúkọ sórí àwòrán ààrẹ Nkurunziza nínú ìfẹ̀hónúhàn pẹ̀lú àmì ìdámọ̀: Nkurunziza àti Burundi:
Ọmọ ilé ẹ̀kọ́ gíga ní ẹ̀wọ̀n fún kíkọ ìkọkúkọ sí orí àwòrán Ààrẹ .
Bí mo bá ṣe èyí ní Burundi , ó ṣe é ṣe kí a fi mí sí àtìmọ́lé
Ìjọba Burundi fi akẹ́kọ̀ọ́bìnrin sí àtìmọ́lé látàrí ẹ̀sùn ìkọkúkọ sí orí àwòrán ààrẹ. A túwíìtì àwòrán ìkọkúkọ wa ní ìbánikẹ́dùn ìṣẹ̀lẹ̀ náà.
Wọ́n ní kí a dán an wò, kí a kọ nǹkan sí orí àwòrán ààrẹ mo sì ṣe bẹ́ẹ̀. Mo ní ìgbóyà wípé ẹnikẹ́ni kò ní mú mi níbí.
Ìfòfinlíle mú àtakò"""
tokens = yo_tokenizer(text)
assert len(tokens) == 121
@pytest.mark.parametrize(
"text,match",
[("ení", True), ("ogun", True), ("mewadinlogun", True), ("ten", False)],
)
def test_lex_attrs_like_number(yo_tokenizer, text, match):
tokens = yo_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
@pytest.mark.parametrize("word", ["eji", "ejila", "ogun", "aárùn"])
def test_yo_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| 1,244 | 39.16129 | 159 | py |
spaCy | spaCy-master/spacy/tests/lang/zh/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/lang/zh/test_serialize.py | import pytest
from spacy.lang.zh import Chinese
from ...util import make_tempdir
def zh_tokenizer_serialize(zh_tokenizer):
tokenizer_bytes = zh_tokenizer.to_bytes()
nlp = Chinese()
nlp.tokenizer.from_bytes(tokenizer_bytes)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
with make_tempdir() as d:
file_path = d / "tokenizer"
zh_tokenizer.to_disk(file_path)
nlp = Chinese()
nlp.tokenizer.from_disk(file_path)
assert tokenizer_bytes == nlp.tokenizer.to_bytes()
def test_zh_tokenizer_serialize_char(zh_tokenizer_char):
zh_tokenizer_serialize(zh_tokenizer_char)
def test_zh_tokenizer_serialize_jieba(zh_tokenizer_jieba):
zh_tokenizer_serialize(zh_tokenizer_jieba)
@pytest.mark.slow
def test_zh_tokenizer_serialize_pkuseg_with_processors(zh_tokenizer_pkuseg):
config = {
"nlp": {
"tokenizer": {
"@tokenizers": "spacy.zh.ChineseTokenizer",
"segmenter": "pkuseg",
}
},
"initialize": {
"tokenizer": {
"pkuseg_model": "medicine",
}
},
}
nlp = Chinese.from_config(config)
nlp.initialize()
zh_tokenizer_serialize(nlp.tokenizer)
| 1,247 | 25 | 76 | py |
spaCy | spaCy-master/spacy/tests/lang/zh/test_text.py | import pytest
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("999.0", True),
("一", True),
("二", True),
("〇", True),
("十一", True),
("狗", False),
(",", False),
],
)
def test_lex_attrs_like_number(zh_tokenizer_jieba, text, match):
tokens = zh_tokenizer_jieba(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
| 442 | 19.136364 | 64 | py |
spaCy | spaCy-master/spacy/tests/lang/zh/test_tokenizer.py | import pytest
from thinc.api import ConfigValidationError
from spacy.lang.zh import Chinese, _get_pkuseg_trie_data
# fmt: off
TEXTS = ("作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。",)
JIEBA_TOKENIZER_TESTS = [
(TEXTS[0],
['作为', '语言', '而言', ',', '为', '世界', '使用', '人', '数最多',
'的', '语言', ',', '目前', '世界', '有', '五分之一', '人口', '做',
'为', '母语', '。']),
]
PKUSEG_TOKENIZER_TESTS = [
(TEXTS[0],
['作为', '语言', '而言', ',', '为', '世界', '使用', '人数', '最多',
'的', '语言', ',', '目前', '世界', '有', '五分之一', '人口', '做为',
'母语', '。']),
]
# fmt: on
@pytest.mark.parametrize("text", TEXTS)
def test_zh_tokenizer_char(zh_tokenizer_char, text):
tokens = [token.text for token in zh_tokenizer_char(text)]
assert tokens == list(text)
@pytest.mark.parametrize("text,expected_tokens", JIEBA_TOKENIZER_TESTS)
def test_zh_tokenizer_jieba(zh_tokenizer_jieba, text, expected_tokens):
tokens = [token.text for token in zh_tokenizer_jieba(text)]
assert tokens == expected_tokens
@pytest.mark.parametrize("text,expected_tokens", PKUSEG_TOKENIZER_TESTS)
def test_zh_tokenizer_pkuseg(zh_tokenizer_pkuseg, text, expected_tokens):
tokens = [token.text for token in zh_tokenizer_pkuseg(text)]
assert tokens == expected_tokens
def test_zh_tokenizer_pkuseg_user_dict(zh_tokenizer_pkuseg, zh_tokenizer_char):
user_dict = _get_pkuseg_trie_data(zh_tokenizer_pkuseg.pkuseg_seg.preprocesser.trie)
zh_tokenizer_pkuseg.pkuseg_update_user_dict(["nonsense_asdf"])
updated_user_dict = _get_pkuseg_trie_data(
zh_tokenizer_pkuseg.pkuseg_seg.preprocesser.trie
)
assert len(user_dict) == len(updated_user_dict) - 1
# reset user dict
zh_tokenizer_pkuseg.pkuseg_update_user_dict([], reset=True)
reset_user_dict = _get_pkuseg_trie_data(
zh_tokenizer_pkuseg.pkuseg_seg.preprocesser.trie
)
assert len(reset_user_dict) == 0
# warn if not relevant
with pytest.warns(UserWarning):
zh_tokenizer_char.pkuseg_update_user_dict(["nonsense_asdf"])
def test_zh_extra_spaces(zh_tokenizer_char):
# note: three spaces after "I"
tokens = zh_tokenizer_char("I like cheese.")
assert tokens[1].orth_ == " "
def test_zh_unsupported_segmenter():
config = {"nlp": {"tokenizer": {"segmenter": "unk"}}}
with pytest.raises(ConfigValidationError):
Chinese.from_config(config)
def test_zh_uninitialized_pkuseg():
config = {"nlp": {"tokenizer": {"segmenter": "char"}}}
nlp = Chinese.from_config(config)
nlp.tokenizer.segmenter = "pkuseg"
with pytest.raises(ValueError):
nlp("test")
| 2,603 | 31.962025 | 87 | py |
spaCy | spaCy-master/spacy/tests/matcher/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/matcher/test_dependency_matcher.py | import copy
import pickle
import re
import pytest
from mock import Mock
from spacy.matcher import DependencyMatcher
from spacy.tokens import Doc, Token
from ..doc.test_underscore import clean_underscore # noqa: F401
@pytest.fixture
def doc(en_vocab):
words = ["The", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "fox"]
heads = [3, 3, 3, 4, 4, 4, 8, 8, 5]
deps = ["det", "amod", "amod", "nsubj", "ROOT", "prep", "pobj", "det", "amod"]
return Doc(en_vocab, words=words, heads=heads, deps=deps)
@pytest.fixture
def patterns(en_vocab):
def is_brown_yellow(text):
return bool(re.compile(r"brown|yellow").match(text))
IS_BROWN_YELLOW = en_vocab.add_flag(is_brown_yellow)
pattern1 = [
{"RIGHT_ID": "fox", "RIGHT_ATTRS": {"ORTH": "fox"}},
{
"LEFT_ID": "fox",
"REL_OP": ">",
"RIGHT_ID": "q",
"RIGHT_ATTRS": {"ORTH": "quick", "DEP": "amod"},
},
{
"LEFT_ID": "fox",
"REL_OP": ">",
"RIGHT_ID": "r",
"RIGHT_ATTRS": {IS_BROWN_YELLOW: True},
},
]
pattern2 = [
{"RIGHT_ID": "jumped", "RIGHT_ATTRS": {"ORTH": "jumped"}},
{
"LEFT_ID": "jumped",
"REL_OP": ">",
"RIGHT_ID": "fox1",
"RIGHT_ATTRS": {"ORTH": "fox"},
},
{
"LEFT_ID": "jumped",
"REL_OP": ".",
"RIGHT_ID": "over",
"RIGHT_ATTRS": {"ORTH": "over"},
},
]
pattern3 = [
{"RIGHT_ID": "jumped", "RIGHT_ATTRS": {"ORTH": "jumped"}},
{
"LEFT_ID": "jumped",
"REL_OP": ">",
"RIGHT_ID": "fox",
"RIGHT_ATTRS": {"ORTH": "fox"},
},
{
"LEFT_ID": "fox",
"REL_OP": ">>",
"RIGHT_ID": "r",
"RIGHT_ATTRS": {"ORTH": "brown"},
},
]
pattern4 = [
{"RIGHT_ID": "jumped", "RIGHT_ATTRS": {"ORTH": "jumped"}},
{
"LEFT_ID": "jumped",
"REL_OP": ">",
"RIGHT_ID": "fox",
"RIGHT_ATTRS": {"ORTH": "fox"},
},
]
pattern5 = [
{"RIGHT_ID": "jumped", "RIGHT_ATTRS": {"ORTH": "jumped"}},
{
"LEFT_ID": "jumped",
"REL_OP": ">>",
"RIGHT_ID": "fox",
"RIGHT_ATTRS": {"ORTH": "fox"},
},
]
return [pattern1, pattern2, pattern3, pattern4, pattern5]
@pytest.fixture
def dependency_matcher(en_vocab, patterns, doc):
matcher = DependencyMatcher(en_vocab)
mock = Mock()
for i in range(1, len(patterns) + 1):
if i == 1:
matcher.add("pattern1", [patterns[0]], on_match=mock)
else:
matcher.add("pattern" + str(i), [patterns[i - 1]])
return matcher
def test_dependency_matcher(dependency_matcher, doc, patterns):
assert len(dependency_matcher) == 5
assert "pattern3" in dependency_matcher
assert dependency_matcher.get("pattern3") == (None, [patterns[2]])
matches = dependency_matcher(doc)
assert len(matches) == 6
assert matches[0][1] == [3, 1, 2]
assert matches[1][1] == [4, 3, 5]
assert matches[2][1] == [4, 3, 2]
assert matches[3][1] == [4, 3]
assert matches[4][1] == [4, 3]
assert matches[5][1] == [4, 8]
span = doc[0:6]
matches = dependency_matcher(span)
assert len(matches) == 5
assert matches[0][1] == [3, 1, 2]
assert matches[1][1] == [4, 3, 5]
assert matches[2][1] == [4, 3, 2]
assert matches[3][1] == [4, 3]
assert matches[4][1] == [4, 3]
def test_dependency_matcher_pickle(en_vocab, patterns, doc):
matcher = DependencyMatcher(en_vocab)
for i in range(1, len(patterns) + 1):
matcher.add("pattern" + str(i), [patterns[i - 1]])
matches = matcher(doc)
assert matches[0][1] == [3, 1, 2]
assert matches[1][1] == [4, 3, 5]
assert matches[2][1] == [4, 3, 2]
assert matches[3][1] == [4, 3]
assert matches[4][1] == [4, 3]
assert matches[5][1] == [4, 8]
b = pickle.dumps(matcher)
matcher_r = pickle.loads(b)
assert len(matcher) == len(matcher_r)
matches = matcher_r(doc)
assert matches[0][1] == [3, 1, 2]
assert matches[1][1] == [4, 3, 5]
assert matches[2][1] == [4, 3, 2]
assert matches[3][1] == [4, 3]
assert matches[4][1] == [4, 3]
assert matches[5][1] == [4, 8]
def test_dependency_matcher_pattern_validation(en_vocab):
pattern = [
{"RIGHT_ID": "fox", "RIGHT_ATTRS": {"ORTH": "fox"}},
{
"LEFT_ID": "fox",
"REL_OP": ">",
"RIGHT_ID": "q",
"RIGHT_ATTRS": {"ORTH": "quick", "DEP": "amod"},
},
{
"LEFT_ID": "fox",
"REL_OP": ">",
"RIGHT_ID": "r",
"RIGHT_ATTRS": {"ORTH": "brown"},
},
]
matcher = DependencyMatcher(en_vocab)
# original pattern is valid
matcher.add("FOUNDED", [pattern])
# individual pattern not wrapped in a list
with pytest.raises(ValueError):
matcher.add("FOUNDED", pattern)
# no anchor node
with pytest.raises(ValueError):
matcher.add("FOUNDED", [pattern[1:]])
# required keys missing
with pytest.raises(ValueError):
pattern2 = copy.deepcopy(pattern)
del pattern2[0]["RIGHT_ID"]
matcher.add("FOUNDED", [pattern2])
with pytest.raises(ValueError):
pattern2 = copy.deepcopy(pattern)
del pattern2[1]["RIGHT_ID"]
matcher.add("FOUNDED", [pattern2])
with pytest.raises(ValueError):
pattern2 = copy.deepcopy(pattern)
del pattern2[1]["RIGHT_ATTRS"]
matcher.add("FOUNDED", [pattern2])
with pytest.raises(ValueError):
pattern2 = copy.deepcopy(pattern)
del pattern2[1]["LEFT_ID"]
matcher.add("FOUNDED", [pattern2])
with pytest.raises(ValueError):
pattern2 = copy.deepcopy(pattern)
del pattern2[1]["REL_OP"]
matcher.add("FOUNDED", [pattern2])
# invalid operator
with pytest.raises(ValueError):
pattern2 = copy.deepcopy(pattern)
pattern2[1]["REL_OP"] = "!!!"
matcher.add("FOUNDED", [pattern2])
# duplicate node name
with pytest.raises(ValueError):
pattern2 = copy.deepcopy(pattern)
pattern2[1]["RIGHT_ID"] = "fox"
matcher.add("FOUNDED", [pattern2])
def test_dependency_matcher_callback(en_vocab, doc):
pattern = [
{"RIGHT_ID": "quick", "RIGHT_ATTRS": {"ORTH": "quick"}},
]
nomatch_pattern = [
{"RIGHT_ID": "quick", "RIGHT_ATTRS": {"ORTH": "NOMATCH"}},
]
matcher = DependencyMatcher(en_vocab)
mock = Mock()
matcher.add("pattern", [pattern], on_match=mock)
matcher.add("nomatch_pattern", [nomatch_pattern], on_match=mock)
matches = matcher(doc)
assert len(matches) == 1
mock.assert_called_once_with(matcher, doc, 0, matches)
# check that matches with and without callback are the same (#4590)
matcher2 = DependencyMatcher(en_vocab)
matcher2.add("pattern", [pattern])
matches2 = matcher2(doc)
assert matches == matches2
@pytest.mark.parametrize("op,num_matches", [(".", 8), (".*", 20), (";", 8), (";*", 20)])
def test_dependency_matcher_precedence_ops(en_vocab, op, num_matches):
# two sentences to test that all matches are within the same sentence
doc = Doc(
en_vocab,
words=["a", "b", "c", "d", "e"] * 2,
heads=[0, 0, 0, 0, 0, 5, 5, 5, 5, 5],
deps=["dep"] * 10,
)
match_count = 0
for text in ["a", "b", "c", "d", "e"]:
pattern = [
{"RIGHT_ID": "1", "RIGHT_ATTRS": {"ORTH": text}},
{"LEFT_ID": "1", "REL_OP": op, "RIGHT_ID": "2", "RIGHT_ATTRS": {}},
]
matcher = DependencyMatcher(en_vocab)
matcher.add("A", [pattern])
matches = matcher(doc)
match_count += len(matches)
for match in matches:
match_id, token_ids = match
# token_ids[0] op token_ids[1]
if op == ".":
assert token_ids[0] == token_ids[1] - 1
elif op == ";":
assert token_ids[0] == token_ids[1] + 1
elif op == ".*":
assert token_ids[0] < token_ids[1]
elif op == ";*":
assert token_ids[0] > token_ids[1]
# all tokens are within the same sentence
assert doc[token_ids[0]].sent == doc[token_ids[1]].sent
assert match_count == num_matches
@pytest.mark.parametrize(
"left,right,op,num_matches",
[
("fox", "jumped", "<", 1),
("the", "lazy", "<", 0),
("jumped", "jumped", "<", 0),
("fox", "jumped", ">", 0),
("fox", "lazy", ">", 1),
("lazy", "lazy", ">", 0),
("fox", "jumped", "<<", 2),
("jumped", "fox", "<<", 0),
("the", "fox", "<<", 2),
("fox", "jumped", ">>", 0),
("over", "the", ">>", 1),
("fox", "the", ">>", 2),
("fox", "jumped", ".", 1),
("lazy", "fox", ".", 1),
("the", "fox", ".", 0),
("the", "the", ".", 0),
("fox", "jumped", ";", 0),
("lazy", "fox", ";", 0),
("the", "fox", ";", 0),
("the", "the", ";", 0),
("quick", "fox", ".*", 2),
("the", "fox", ".*", 3),
("the", "the", ".*", 1),
("fox", "jumped", ";*", 1),
("quick", "fox", ";*", 0),
("the", "fox", ";*", 1),
("the", "the", ";*", 1),
("quick", "brown", "$+", 1),
("brown", "quick", "$+", 0),
("brown", "brown", "$+", 0),
("quick", "brown", "$-", 0),
("brown", "quick", "$-", 1),
("brown", "brown", "$-", 0),
("the", "brown", "$++", 1),
("brown", "the", "$++", 0),
("brown", "brown", "$++", 0),
("the", "brown", "$--", 0),
("brown", "the", "$--", 1),
("brown", "brown", "$--", 0),
("over", "jumped", "<+", 0),
("quick", "fox", "<+", 0),
("the", "quick", "<+", 0),
("brown", "fox", "<+", 1),
("quick", "fox", "<++", 1),
("quick", "over", "<++", 0),
("over", "jumped", "<++", 0),
("the", "fox", "<++", 2),
("brown", "fox", "<-", 0),
("fox", "over", "<-", 0),
("the", "over", "<-", 0),
("over", "jumped", "<-", 1),
("brown", "fox", "<--", 0),
("fox", "jumped", "<--", 0),
("fox", "over", "<--", 1),
("fox", "brown", ">+", 0),
("over", "fox", ">+", 0),
("over", "the", ">+", 0),
("jumped", "over", ">+", 1),
("jumped", "over", ">++", 1),
("fox", "lazy", ">++", 0),
("over", "the", ">++", 0),
("jumped", "over", ">-", 0),
("fox", "quick", ">-", 0),
("brown", "quick", ">-", 0),
("fox", "brown", ">-", 1),
("brown", "fox", ">--", 0),
("fox", "brown", ">--", 1),
("jumped", "fox", ">--", 1),
("fox", "the", ">--", 2),
],
)
def test_dependency_matcher_ops(en_vocab, doc, left, right, op, num_matches):
right_id = right
if left == right:
right_id = right + "2"
pattern = [
{"RIGHT_ID": left, "RIGHT_ATTRS": {"LOWER": left}},
{
"LEFT_ID": left,
"REL_OP": op,
"RIGHT_ID": right_id,
"RIGHT_ATTRS": {"LOWER": right},
},
]
matcher = DependencyMatcher(en_vocab)
matcher.add("pattern", [pattern])
matches = matcher(doc)
assert len(matches) == num_matches
def test_dependency_matcher_long_matches(en_vocab, doc):
pattern = [
{"RIGHT_ID": "quick", "RIGHT_ATTRS": {"DEP": "amod", "OP": "+"}},
]
matcher = DependencyMatcher(en_vocab)
with pytest.raises(ValueError):
matcher.add("pattern", [pattern])
@pytest.mark.usefixtures("clean_underscore")
def test_dependency_matcher_span_user_data(en_tokenizer):
doc = en_tokenizer("a b c d e")
for token in doc:
token.head = doc[0]
token.dep_ = "a"
Token.set_extension("is_c", default=False)
doc[2]._.is_c = True
pattern = [
{"RIGHT_ID": "c", "RIGHT_ATTRS": {"_": {"is_c": True}}},
]
matcher = DependencyMatcher(en_tokenizer.vocab)
matcher.add("C", [pattern])
doc_matches = matcher(doc)
offset = 1
span_matches = matcher(doc[offset:])
for doc_match, span_match in zip(sorted(doc_matches), sorted(span_matches)):
assert doc_match[0] == span_match[0]
for doc_t_i, span_t_i in zip(doc_match[1], span_match[1]):
assert doc_t_i == span_t_i + offset
@pytest.mark.issue(9263)
def test_dependency_matcher_order_issue(en_tokenizer):
# issue from #9263
doc = en_tokenizer("I like text")
doc[2].head = doc[1]
# this matches on attrs but not rel op
pattern1 = [
{"RIGHT_ID": "root", "RIGHT_ATTRS": {"ORTH": "like"}},
{
"LEFT_ID": "root",
"RIGHT_ID": "r",
"RIGHT_ATTRS": {"ORTH": "text"},
"REL_OP": "<",
},
]
# this matches on rel op but not attrs
pattern2 = [
{"RIGHT_ID": "root", "RIGHT_ATTRS": {"ORTH": "like"}},
{
"LEFT_ID": "root",
"RIGHT_ID": "r",
"RIGHT_ATTRS": {"ORTH": "fish"},
"REL_OP": ">",
},
]
matcher = DependencyMatcher(en_tokenizer.vocab)
# This should behave the same as the next pattern
matcher.add("check", [pattern1, pattern2])
matches = matcher(doc)
assert matches == []
# use a new matcher
matcher = DependencyMatcher(en_tokenizer.vocab)
# adding one at a time under same label gets a match
matcher.add("check", [pattern1])
matcher.add("check", [pattern2])
matches = matcher(doc)
assert matches == []
@pytest.mark.issue(9263)
def test_dependency_matcher_remove(en_tokenizer):
# issue from #9263
doc = en_tokenizer("The red book")
doc[1].head = doc[2]
# this matches
pattern1 = [
{"RIGHT_ID": "root", "RIGHT_ATTRS": {"ORTH": "book"}},
{
"LEFT_ID": "root",
"RIGHT_ID": "r",
"RIGHT_ATTRS": {"ORTH": "red"},
"REL_OP": ">",
},
]
# add and then remove it
matcher = DependencyMatcher(en_tokenizer.vocab)
matcher.add("check", [pattern1])
matcher.remove("check")
# this matches on rel op but not attrs
pattern2 = [
{"RIGHT_ID": "root", "RIGHT_ATTRS": {"ORTH": "flag"}},
{
"LEFT_ID": "root",
"RIGHT_ID": "r",
"RIGHT_ATTRS": {"ORTH": "blue"},
"REL_OP": ">",
},
]
# Adding this new pattern with the same label, which should not match
matcher.add("check", [pattern2])
matches = matcher(doc)
assert matches == []
| 14,872 | 29.415133 | 88 | py |
spaCy | spaCy-master/spacy/tests/matcher/test_levenshtein.py | import pytest
from spacy.matcher import levenshtein
from spacy.matcher.levenshtein import levenshtein_compare
# empty string plus 10 random ASCII, 10 random unicode, and 2 random long tests
# from polyleven
@pytest.mark.parametrize(
"dist,a,b",
[
(0, "", ""),
(4, "bbcb", "caba"),
(3, "abcb", "cacc"),
(3, "aa", "ccc"),
(1, "cca", "ccac"),
(1, "aba", "aa"),
(4, "bcbb", "abac"),
(3, "acbc", "bba"),
(3, "cbba", "a"),
(2, "bcc", "ba"),
(4, "aaa", "ccbb"),
(3, "うあい", "いいうい"),
(2, "あううい", "うあい"),
(3, "いういい", "うううあ"),
(2, "うい", "あいあ"),
(2, "いあい", "いう"),
(1, "いい", "あいい"),
(3, "あうあ", "いいああ"),
(4, "いあうう", "ううああ"),
(3, "いあいい", "ういああ"),
(3, "いいああ", "ううあう"),
(
166,
"TCTGGGCACGGATTCGTCAGATTCCATGTCCATATTTGAGGCTCTTGCAGGCAAAATTTGGGCATGTGAACTCCTTATAGTCCCCGTGC",
"ATATGGATTGGGGGCATTCAAAGATACGGTTTCCCTTTCTTCAGTTTCGCGCGGCGCACGTCCGGGTGCGAGCCAGTTCGTCTTACTCACATTGTCGACTTCACGAATCGCGCATGATGTGCTTAGCCTGTACTTACGAACGAACTTTCGGTCCAAATACATTCTATCAACACCGAGGTATCCGTGCCACACGCCGAAGCTCGACCGTGTTCGTTGAGAGGTGGAAATGGTAAAAGATGAACATAGTC",
),
(
111,
"GGTTCGGCCGAATTCATAGAGCGTGGTAGTCGACGGTATCCCGCCTGGTAGGGGCCCCTTCTACCTAGCGGAAGTTTGTCAGTACTCTATAACACGAGGGCCTCTCACACCCTAGATCGTCCAGCCACTCGAAGATCGCAGCACCCTTACAGAAAGGCATTAATGTTTCTCCTAGCACTTGTGCAATGGTGAAGGAGTGATG",
"CGTAACACTTCGCGCTACTGGGCTGCAACGTCTTGGGCATACATGCAAGATTATCTAATGCAAGCTTGAGCCCCGCTTGCGGAATTTCCCTAATCGGGGTCCCTTCCTGTTACGATAAGGACGCGTGCACT",
),
],
)
def test_levenshtein(dist, a, b):
assert levenshtein(a, b) == dist
@pytest.mark.parametrize(
"a,b,fuzzy,expected",
[
("a", "a", 1, True),
("a", "a", 0, True),
("a", "a", -1, True),
("a", "ab", 1, True),
("a", "ab", 0, False),
("a", "ab", -1, True),
("ab", "ac", 1, True),
("ab", "ac", -1, True),
("abc", "cde", 4, True),
("abc", "cde", -1, False),
("abcdef", "cdefgh", 4, True),
("abcdef", "cdefgh", 3, False),
("abcdef", "cdefgh", -1, False), # default (2 for length 6)
("abcdefgh", "cdefghijk", 5, True),
("abcdefgh", "cdefghijk", 4, False),
("abcdefgh", "cdefghijk", -1, False), # default (2)
("abcdefgh", "cdefghijkl", 6, True),
("abcdefgh", "cdefghijkl", 5, False),
("abcdefgh", "cdefghijkl", -1, False), # default (2)
],
)
def test_levenshtein_compare(a, b, fuzzy, expected):
assert levenshtein_compare(a, b, fuzzy) == expected
| 2,662 | 34.506667 | 263 | py |
spaCy | spaCy-master/spacy/tests/matcher/test_matcher_api.py | import pytest
from mock import Mock
from spacy.matcher import Matcher
from spacy.tokens import Doc, Span, Token
from ..doc.test_underscore import clean_underscore # noqa: F401
@pytest.fixture
def matcher(en_vocab):
rules = {
"JS": [[{"ORTH": "JavaScript"}]],
"GoogleNow": [[{"ORTH": "Google"}, {"ORTH": "Now"}]],
"Java": [[{"LOWER": "java"}]],
}
matcher = Matcher(en_vocab)
for key, patterns in rules.items():
matcher.add(key, patterns)
return matcher
def test_matcher_from_api_docs(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "test"}]
assert len(matcher) == 0
matcher.add("Rule", [pattern])
assert len(matcher) == 1
matcher.remove("Rule")
assert "Rule" not in matcher
matcher.add("Rule", [pattern])
assert "Rule" in matcher
on_match, patterns = matcher.get("Rule")
assert len(patterns[0])
def test_matcher_empty_patterns_warns(en_vocab):
matcher = Matcher(en_vocab)
assert len(matcher) == 0
doc = Doc(en_vocab, words=["This", "is", "quite", "something"])
with pytest.warns(UserWarning):
matcher(doc)
assert len(doc.ents) == 0
def test_matcher_from_usage_docs(en_vocab):
text = "Wow 😀 This is really cool! 😂 😂"
doc = Doc(en_vocab, words=text.split(" "))
pos_emoji = ["😀", "😃", "😂", "🤣", "😊", "😍"]
pos_patterns = [[{"ORTH": emoji}] for emoji in pos_emoji]
def label_sentiment(matcher, doc, i, matches):
match_id, start, end = matches[i]
if doc.vocab.strings[match_id] == "HAPPY":
doc.sentiment += 0.1
span = doc[start:end]
with doc.retokenize() as retokenizer:
retokenizer.merge(span)
token = doc[start]
token.vocab[token.text].norm_ = "happy emoji"
matcher = Matcher(en_vocab)
matcher.add("HAPPY", pos_patterns, on_match=label_sentiment)
matcher(doc)
assert doc.sentiment != 0
assert doc[1].norm_ == "happy emoji"
def test_matcher_len_contains(matcher):
assert len(matcher) == 3
matcher.add("TEST", [[{"ORTH": "test"}]])
assert "TEST" in matcher
assert "TEST2" not in matcher
def test_matcher_add_new_api(en_vocab):
doc = Doc(en_vocab, words=["a", "b"])
patterns = [[{"TEXT": "a"}], [{"TEXT": "a"}, {"TEXT": "b"}]]
matcher = Matcher(en_vocab)
on_match = Mock()
matcher = Matcher(en_vocab)
matcher.add("NEW_API", patterns)
assert len(matcher(doc)) == 2
matcher = Matcher(en_vocab)
on_match = Mock()
matcher.add("NEW_API_CALLBACK", patterns, on_match=on_match)
assert len(matcher(doc)) == 2
assert on_match.call_count == 2
def test_matcher_no_match(matcher):
doc = Doc(matcher.vocab, words=["I", "like", "cheese", "."])
assert matcher(doc) == []
def test_matcher_match_start(matcher):
doc = Doc(matcher.vocab, words=["JavaScript", "is", "good"])
assert matcher(doc) == [(matcher.vocab.strings["JS"], 0, 1)]
def test_matcher_match_end(matcher):
words = ["I", "like", "java"]
doc = Doc(matcher.vocab, words=words)
assert matcher(doc) == [(doc.vocab.strings["Java"], 2, 3)]
def test_matcher_match_middle(matcher):
words = ["I", "like", "Google", "Now", "best"]
doc = Doc(matcher.vocab, words=words)
assert matcher(doc) == [(doc.vocab.strings["GoogleNow"], 2, 4)]
def test_matcher_match_multi(matcher):
words = ["I", "like", "Google", "Now", "and", "java", "best"]
doc = Doc(matcher.vocab, words=words)
assert matcher(doc) == [
(doc.vocab.strings["GoogleNow"], 2, 4),
(doc.vocab.strings["Java"], 5, 6),
]
@pytest.mark.parametrize(
"rules,match_locs",
[
(
{
"GoogleNow": [[{"ORTH": {"FUZZY": "Google"}}, {"ORTH": "Now"}]],
},
[(2, 4)],
),
(
{
"Java": [[{"LOWER": {"FUZZY": "java"}}]],
},
[(5, 6)],
),
(
{
"JS": [[{"ORTH": {"FUZZY": "JavaScript"}}]],
"GoogleNow": [[{"ORTH": {"FUZZY": "Google"}}, {"ORTH": "Now"}]],
"Java": [[{"LOWER": {"FUZZY": "java"}}]],
},
[(2, 4), (5, 6), (8, 9)],
),
# only the second pattern matches (check that predicate keys used for
# caching don't collide)
(
{
"A": [[{"ORTH": {"FUZZY": "Javascripts"}}]],
"B": [[{"ORTH": {"FUZZY5": "Javascripts"}}]],
},
[(8, 9)],
),
],
)
def test_matcher_match_fuzzy(en_vocab, rules, match_locs):
words = ["They", "like", "Goggle", "Now", "and", "Jav", "but", "not", "JvvaScrpt"]
doc = Doc(en_vocab, words=words)
matcher = Matcher(en_vocab)
for key, patterns in rules.items():
matcher.add(key, patterns)
assert match_locs == [(start, end) for m_id, start, end in matcher(doc)]
@pytest.mark.parametrize("set_op", ["IN", "NOT_IN"])
def test_matcher_match_fuzzy_set_op_longest(en_vocab, set_op):
rules = {
"GoogleNow": [[{"ORTH": {"FUZZY": {set_op: ["Google", "Now"]}}, "OP": "+"}]]
}
matcher = Matcher(en_vocab)
for key, patterns in rules.items():
matcher.add(key, patterns, greedy="LONGEST")
words = ["They", "like", "Goggle", "Noo"]
doc = Doc(en_vocab, words=words)
assert len(matcher(doc)) == 1
def test_matcher_match_fuzzy_set_multiple(en_vocab):
rules = {
"GoogleNow": [
[
{
"ORTH": {"FUZZY": {"IN": ["Google", "Now"]}, "NOT_IN": ["Goggle"]},
"OP": "+",
}
]
]
}
matcher = Matcher(en_vocab)
for key, patterns in rules.items():
matcher.add(key, patterns, greedy="LONGEST")
words = ["They", "like", "Goggle", "Noo"]
doc = Doc(matcher.vocab, words=words)
assert matcher(doc) == [
(doc.vocab.strings["GoogleNow"], 3, 4),
]
@pytest.mark.parametrize("fuzzyn", range(1, 10))
def test_matcher_match_fuzzyn_all_insertions(en_vocab, fuzzyn):
matcher = Matcher(en_vocab)
matcher.add("GoogleNow", [[{"ORTH": {f"FUZZY{fuzzyn}": "GoogleNow"}}]])
# words with increasing edit distance
words = ["GoogleNow" + "a" * i for i in range(0, 10)]
doc = Doc(en_vocab, words)
assert len(matcher(doc)) == fuzzyn + 1
@pytest.mark.parametrize("fuzzyn", range(1, 6))
def test_matcher_match_fuzzyn_various_edits(en_vocab, fuzzyn):
matcher = Matcher(en_vocab)
matcher.add("GoogleNow", [[{"ORTH": {f"FUZZY{fuzzyn}": "GoogleNow"}}]])
# words with increasing edit distance of different edit types
words = [
"GoogleNow",
"GoogleNuw",
"GoogleNuew",
"GoogleNoweee",
"GiggleNuw3",
"gouggle5New",
]
doc = Doc(en_vocab, words)
assert len(matcher(doc)) == fuzzyn + 1
@pytest.mark.parametrize("greedy", ["FIRST", "LONGEST"])
@pytest.mark.parametrize("set_op", ["IN", "NOT_IN"])
def test_matcher_match_fuzzyn_set_op_longest(en_vocab, greedy, set_op):
rules = {
"GoogleNow": [[{"ORTH": {"FUZZY2": {set_op: ["Google", "Now"]}}, "OP": "+"}]]
}
matcher = Matcher(en_vocab)
for key, patterns in rules.items():
matcher.add(key, patterns, greedy=greedy)
words = ["They", "like", "Goggle", "Noo"]
doc = Doc(matcher.vocab, words=words)
spans = matcher(doc, as_spans=True)
assert len(spans) == 1
if set_op == "IN":
assert spans[0].text == "Goggle Noo"
else:
assert spans[0].text == "They like"
def test_matcher_match_fuzzyn_set_multiple(en_vocab):
rules = {
"GoogleNow": [
[
{
"ORTH": {"FUZZY1": {"IN": ["Google", "Now"]}, "NOT_IN": ["Goggle"]},
"OP": "+",
}
]
]
}
matcher = Matcher(en_vocab)
for key, patterns in rules.items():
matcher.add(key, patterns, greedy="LONGEST")
words = ["They", "like", "Goggle", "Noo"]
doc = Doc(matcher.vocab, words=words)
assert matcher(doc) == [
(doc.vocab.strings["GoogleNow"], 3, 4),
]
def test_matcher_empty_dict(en_vocab):
"""Test matcher allows empty token specs, meaning match on any token."""
matcher = Matcher(en_vocab)
doc = Doc(matcher.vocab, words=["a", "b", "c"])
matcher.add("A.C", [[{"ORTH": "a"}, {}, {"ORTH": "c"}]])
matches = matcher(doc)
assert len(matches) == 1
assert matches[0][1:] == (0, 3)
matcher = Matcher(en_vocab)
matcher.add("A.", [[{"ORTH": "a"}, {}]])
matches = matcher(doc)
assert matches[0][1:] == (0, 2)
def test_matcher_operator_shadow(en_vocab):
matcher = Matcher(en_vocab)
doc = Doc(matcher.vocab, words=["a", "b", "c"])
pattern = [{"ORTH": "a"}, {"IS_ALPHA": True, "OP": "+"}, {"ORTH": "c"}]
matcher.add("A.C", [pattern])
matches = matcher(doc)
assert len(matches) == 1
assert matches[0][1:] == (0, 3)
def test_matcher_match_zero(matcher):
words1 = 'He said , " some words " ...'.split()
words2 = 'He said , " some three words " ...'.split()
pattern1 = [
{"ORTH": '"'},
{"OP": "!", "IS_PUNCT": True},
{"OP": "!", "IS_PUNCT": True},
{"ORTH": '"'},
]
pattern2 = [
{"ORTH": '"'},
{"IS_PUNCT": True},
{"IS_PUNCT": True},
{"IS_PUNCT": True},
{"ORTH": '"'},
]
matcher.add("Quote", [pattern1])
doc = Doc(matcher.vocab, words=words1)
assert len(matcher(doc)) == 1
doc = Doc(matcher.vocab, words=words2)
assert len(matcher(doc)) == 0
matcher.add("Quote", [pattern2])
assert len(matcher(doc)) == 0
def test_matcher_match_zero_plus(matcher):
words = 'He said , " some words " ...'.split()
pattern = [{"ORTH": '"'}, {"OP": "*", "IS_PUNCT": False}, {"ORTH": '"'}]
matcher = Matcher(matcher.vocab)
matcher.add("Quote", [pattern])
doc = Doc(matcher.vocab, words=words)
assert len(matcher(doc)) == 1
def test_matcher_match_one_plus(matcher):
control = Matcher(matcher.vocab)
control.add("BasicPhilippe", [[{"ORTH": "Philippe"}]])
doc = Doc(control.vocab, words=["Philippe", "Philippe"])
m = control(doc)
assert len(m) == 2
pattern = [{"ORTH": "Philippe"}, {"ORTH": "Philippe", "OP": "+"}]
matcher.add("KleenePhilippe", [pattern])
m = matcher(doc)
assert len(m) == 1
def test_matcher_any_token_operator(en_vocab):
"""Test that patterns with "any token" {} work with operators."""
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"ORTH": "test"}, {"OP": "*"}]])
doc = Doc(en_vocab, words=["test", "hello", "world"])
matches = [doc[start:end].text for _, start, end in matcher(doc)]
assert len(matches) == 3
assert matches[0] == "test"
assert matches[1] == "test hello"
assert matches[2] == "test hello world"
@pytest.mark.usefixtures("clean_underscore")
def test_matcher_extension_attribute(en_vocab):
matcher = Matcher(en_vocab)
get_is_fruit = lambda token: token.text in ("apple", "banana")
Token.set_extension("is_fruit", getter=get_is_fruit, force=True)
pattern = [{"ORTH": "an"}, {"_": {"is_fruit": True}}]
matcher.add("HAVING_FRUIT", [pattern])
doc = Doc(en_vocab, words=["an", "apple"])
matches = matcher(doc)
assert len(matches) == 1
doc = Doc(en_vocab, words=["an", "aardvark"])
matches = matcher(doc)
assert len(matches) == 0
def test_matcher_set_value(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"ORTH": {"IN": ["an", "a"]}}]
matcher.add("A_OR_AN", [pattern])
doc = Doc(en_vocab, words=["an", "a", "apple"])
matches = matcher(doc)
assert len(matches) == 2
doc = Doc(en_vocab, words=["aardvark"])
matches = matcher(doc)
assert len(matches) == 0
def test_matcher_set_value_operator(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"ORTH": {"IN": ["a", "the"]}, "OP": "?"}, {"ORTH": "house"}]
matcher.add("DET_HOUSE", [pattern])
doc = Doc(en_vocab, words=["In", "a", "house"])
matches = matcher(doc)
assert len(matches) == 2
doc = Doc(en_vocab, words=["my", "house"])
matches = matcher(doc)
assert len(matches) == 1
def test_matcher_subset_value_operator(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"MORPH": {"IS_SUBSET": ["Feat=Val", "Feat2=Val2"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
assert len(matcher(doc)) == 3
doc[0].set_morph("Feat=Val")
assert len(matcher(doc)) == 3
doc[0].set_morph("Feat=Val|Feat2=Val2")
assert len(matcher(doc)) == 3
doc[0].set_morph("Feat=Val|Feat2=Val2|Feat3=Val3")
assert len(matcher(doc)) == 2
doc[0].set_morph("Feat=Val|Feat2=Val2|Feat3=Val3|Feat4=Val4")
assert len(matcher(doc)) == 2
# IS_SUBSET acts like "IN" for attrs other than MORPH
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"IS_SUBSET": ["A", "B"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 1
# IS_SUBSET with an empty list matches nothing
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"IS_SUBSET": []}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 0
# IS_SUBSET with a list value
Token.set_extension("ext", default=[])
matcher = Matcher(en_vocab)
pattern = [{"_": {"ext": {"IS_SUBSET": ["A", "B"]}}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0]._.ext = ["A"]
doc[1]._.ext = ["C", "D"]
assert len(matcher(doc)) == 2
def test_matcher_superset_value_operator(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"MORPH": {"IS_SUPERSET": ["Feat=Val", "Feat2=Val2", "Feat3=Val3"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
assert len(matcher(doc)) == 0
doc[0].set_morph("Feat=Val|Feat2=Val2")
assert len(matcher(doc)) == 0
doc[0].set_morph("Feat=Val|Feat2=Val2|Feat3=Val3")
assert len(matcher(doc)) == 1
doc[0].set_morph("Feat=Val|Feat2=Val2|Feat3=Val3|Feat4=Val4")
assert len(matcher(doc)) == 1
# IS_SUPERSET with more than one value only matches for MORPH
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"IS_SUPERSET": ["A", "B"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 0
# IS_SUPERSET with one value is the same as ==
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"IS_SUPERSET": ["A"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 1
# IS_SUPERSET with an empty value matches everything
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"IS_SUPERSET": []}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 3
# IS_SUPERSET with a list value
Token.set_extension("ext", default=[])
matcher = Matcher(en_vocab)
pattern = [{"_": {"ext": {"IS_SUPERSET": ["A"]}}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0]._.ext = ["A", "B"]
assert len(matcher(doc)) == 1
def test_matcher_intersect_value_operator(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"MORPH": {"INTERSECTS": ["Feat=Val", "Feat2=Val2", "Feat3=Val3"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
assert len(matcher(doc)) == 0
doc[0].set_morph("Feat=Val")
assert len(matcher(doc)) == 1
doc[0].set_morph("Feat=Val|Feat2=Val2")
assert len(matcher(doc)) == 1
doc[0].set_morph("Feat=Val|Feat2=Val2|Feat3=Val3")
assert len(matcher(doc)) == 1
doc[0].set_morph("Feat=Val|Feat2=Val2|Feat3=Val3|Feat4=Val4")
assert len(matcher(doc)) == 1
# INTERSECTS with a single value is the same as IN
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"INTERSECTS": ["A", "B"]}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 1
# INTERSECTS with an empty pattern list matches nothing
matcher = Matcher(en_vocab)
pattern = [{"TAG": {"INTERSECTS": []}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0].tag_ = "A"
assert len(matcher(doc)) == 0
# INTERSECTS with a list value
Token.set_extension("ext", default=[])
matcher = Matcher(en_vocab)
pattern = [{"_": {"ext": {"INTERSECTS": ["A", "C"]}}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0]._.ext = ["A", "B"]
assert len(matcher(doc)) == 1
# INTERSECTS matches nothing for iterables that aren't all str or int
matcher = Matcher(en_vocab)
pattern = [{"_": {"ext": {"INTERSECTS": ["Abx", "C"]}}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0]._.ext = [["Abx"], "B"]
assert len(matcher(doc)) == 0
doc[0]._.ext = ["Abx", "B"]
assert len(matcher(doc)) == 1
# INTERSECTS with an empty pattern list matches nothing
matcher = Matcher(en_vocab)
pattern = [{"_": {"ext": {"INTERSECTS": []}}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0]._.ext = ["A", "B"]
assert len(matcher(doc)) == 0
# INTERSECTS with an empty value matches nothing
matcher = Matcher(en_vocab)
pattern = [{"_": {"ext": {"INTERSECTS": ["A", "B"]}}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0]._.ext = []
assert len(matcher(doc)) == 0
def test_matcher_morph_handling(en_vocab):
# order of features in pattern doesn't matter
matcher = Matcher(en_vocab)
pattern1 = [{"MORPH": {"IN": ["Feat1=Val1|Feat2=Val2"]}}]
pattern2 = [{"MORPH": {"IN": ["Feat2=Val2|Feat1=Val1"]}}]
matcher.add("M", [pattern1])
matcher.add("N", [pattern2])
doc = Doc(en_vocab, words=["a", "b", "c"])
assert len(matcher(doc)) == 0
doc[0].set_morph("Feat2=Val2|Feat1=Val1")
assert len(matcher(doc)) == 2
doc[0].set_morph("Feat1=Val1|Feat2=Val2")
assert len(matcher(doc)) == 2
# multiple values are split
matcher = Matcher(en_vocab)
pattern1 = [{"MORPH": {"IS_SUPERSET": ["Feat1=Val1", "Feat2=Val2"]}}]
pattern2 = [{"MORPH": {"IS_SUPERSET": ["Feat1=Val1", "Feat1=Val3", "Feat2=Val2"]}}]
matcher.add("M", [pattern1])
matcher.add("N", [pattern2])
doc = Doc(en_vocab, words=["a", "b", "c"])
assert len(matcher(doc)) == 0
doc[0].set_morph("Feat2=Val2,Val3|Feat1=Val1")
assert len(matcher(doc)) == 1
doc[0].set_morph("Feat1=Val1,Val3|Feat2=Val2")
assert len(matcher(doc)) == 2
def test_matcher_regex(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"ORTH": {"REGEX": r"(?:a|an)"}}]
matcher.add("A_OR_AN", [pattern])
doc = Doc(en_vocab, words=["an", "a", "hi"])
matches = matcher(doc)
assert len(matches) == 2
doc = Doc(en_vocab, words=["bye"])
matches = matcher(doc)
assert len(matches) == 0
def test_matcher_regex_set_in(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"ORTH": {"REGEX": {"IN": [r"(?:a)", r"(?:an)"]}}}]
matcher.add("A_OR_AN", [pattern])
doc = Doc(en_vocab, words=["an", "a", "hi"])
matches = matcher(doc)
assert len(matches) == 2
doc = Doc(en_vocab, words=["bye"])
matches = matcher(doc)
assert len(matches) == 0
def test_matcher_regex_set_not_in(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"ORTH": {"REGEX": {"NOT_IN": [r"(?:a)", r"(?:an)"]}}}]
matcher.add("A_OR_AN", [pattern])
doc = Doc(en_vocab, words=["an", "a", "hi"])
matches = matcher(doc)
assert len(matches) == 1
doc = Doc(en_vocab, words=["bye"])
matches = matcher(doc)
assert len(matches) == 1
def test_matcher_regex_shape(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"SHAPE": {"REGEX": r"^[^x]+$"}}]
matcher.add("NON_ALPHA", [pattern])
doc = Doc(en_vocab, words=["99", "problems", "!"])
matches = matcher(doc)
assert len(matches) == 2
doc = Doc(en_vocab, words=["bye"])
matches = matcher(doc)
assert len(matches) == 0
@pytest.mark.parametrize(
"cmp, bad",
[
("==", ["a", "aaa"]),
("!=", ["aa"]),
(">=", ["a"]),
("<=", ["aaa"]),
(">", ["a", "aa"]),
("<", ["aa", "aaa"]),
],
)
def test_matcher_compare_length(en_vocab, cmp, bad):
matcher = Matcher(en_vocab)
pattern = [{"LENGTH": {cmp: 2}}]
matcher.add("LENGTH_COMPARE", [pattern])
doc = Doc(en_vocab, words=["a", "aa", "aaa"])
matches = matcher(doc)
assert len(matches) == len(doc) - len(bad)
doc = Doc(en_vocab, words=bad)
matches = matcher(doc)
assert len(matches) == 0
def test_matcher_extension_set_membership(en_vocab):
matcher = Matcher(en_vocab)
get_reversed = lambda token: "".join(reversed(token.text))
Token.set_extension("reversed", getter=get_reversed, force=True)
pattern = [{"_": {"reversed": {"IN": ["eyb", "ih"]}}}]
matcher.add("REVERSED", [pattern])
doc = Doc(en_vocab, words=["hi", "bye", "hello"])
matches = matcher(doc)
assert len(matches) == 2
doc = Doc(en_vocab, words=["aardvark"])
matches = matcher(doc)
assert len(matches) == 0
def test_matcher_extension_in_set_predicate(en_vocab):
matcher = Matcher(en_vocab)
Token.set_extension("ext", default=[])
pattern = [{"_": {"ext": {"IN": ["A", "C"]}}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
# The IN predicate expects an exact match between the
# extension value and one of the pattern's values.
doc[0]._.ext = ["A", "B"]
assert len(matcher(doc)) == 0
doc[0]._.ext = ["A"]
assert len(matcher(doc)) == 0
doc[0]._.ext = "A"
assert len(matcher(doc)) == 1
def test_matcher_basic_check(en_vocab):
matcher = Matcher(en_vocab)
# Potential mistake: pass in pattern instead of list of patterns
pattern = [{"TEXT": "hello"}, {"TEXT": "world"}]
with pytest.raises(ValueError):
matcher.add("TEST", pattern)
def test_attr_pipeline_checks(en_vocab):
doc1 = Doc(en_vocab, words=["Test"])
doc1[0].dep_ = "ROOT"
doc2 = Doc(en_vocab, words=["Test"])
doc2[0].tag_ = "TAG"
doc2[0].pos_ = "X"
doc2[0].set_morph("Feat=Val")
doc2[0].lemma_ = "LEMMA"
doc3 = Doc(en_vocab, words=["Test"])
# DEP requires DEP
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"DEP": "a"}]])
matcher(doc1)
with pytest.raises(ValueError):
matcher(doc2)
with pytest.raises(ValueError):
matcher(doc3)
# errors can be suppressed if desired
matcher(doc2, allow_missing=True)
matcher(doc3, allow_missing=True)
# TAG, POS, LEMMA require those values
for attr in ("TAG", "POS", "LEMMA"):
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{attr: "a"}]])
matcher(doc2)
with pytest.raises(ValueError):
matcher(doc1)
with pytest.raises(ValueError):
matcher(doc3)
# TEXT/ORTH only require tokens
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"ORTH": "a"}]])
matcher(doc1)
matcher(doc2)
matcher(doc3)
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"TEXT": "a"}]])
matcher(doc1)
matcher(doc2)
matcher(doc3)
@pytest.mark.parametrize(
"pattern,text",
[
([{"IS_ALPHA": True}], "a"),
([{"IS_ASCII": True}], "a"),
([{"IS_DIGIT": True}], "1"),
([{"IS_LOWER": True}], "a"),
([{"IS_UPPER": True}], "A"),
([{"IS_TITLE": True}], "Aaaa"),
([{"IS_PUNCT": True}], "."),
([{"IS_SPACE": True}], "\n"),
([{"IS_BRACKET": True}], "["),
([{"IS_QUOTE": True}], '"'),
([{"IS_LEFT_PUNCT": True}], "``"),
([{"IS_RIGHT_PUNCT": True}], "''"),
([{"IS_STOP": True}], "the"),
([{"SPACY": True}], "the"),
([{"LIKE_NUM": True}], "1"),
([{"LIKE_URL": True}], "http://example.com"),
([{"LIKE_EMAIL": True}], "[email protected]"),
],
)
def test_matcher_schema_token_attributes(en_vocab, pattern, text):
matcher = Matcher(en_vocab)
doc = Doc(en_vocab, words=text.split(" "))
matcher.add("Rule", [pattern])
assert len(matcher) == 1
matches = matcher(doc)
assert len(matches) == 1
@pytest.mark.filterwarnings("ignore:\\[W036")
def test_matcher_valid_callback(en_vocab):
"""Test that on_match can only be None or callable."""
matcher = Matcher(en_vocab)
with pytest.raises(ValueError):
matcher.add("TEST", [[{"TEXT": "test"}]], on_match=[])
matcher(Doc(en_vocab, words=["test"]))
def test_matcher_callback(en_vocab):
mock = Mock()
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "test"}]
matcher.add("Rule", [pattern], on_match=mock)
doc = Doc(en_vocab, words=["This", "is", "a", "test", "."])
matches = matcher(doc)
mock.assert_called_once_with(matcher, doc, 0, matches)
def test_matcher_callback_with_alignments(en_vocab):
mock = Mock()
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "test"}]
matcher.add("Rule", [pattern], on_match=mock)
doc = Doc(en_vocab, words=["This", "is", "a", "test", "."])
matches = matcher(doc, with_alignments=True)
mock.assert_called_once_with(matcher, doc, 0, matches)
def test_matcher_span(matcher):
text = "JavaScript is good but Java is better"
doc = Doc(matcher.vocab, words=text.split())
span_js = doc[:3]
span_java = doc[4:]
assert len(matcher(doc)) == 2
assert len(matcher(span_js)) == 1
assert len(matcher(span_java)) == 1
def test_matcher_as_spans(matcher):
"""Test the new as_spans=True API."""
text = "JavaScript is good but Java is better"
doc = Doc(matcher.vocab, words=text.split())
matches = matcher(doc, as_spans=True)
assert len(matches) == 2
assert isinstance(matches[0], Span)
assert matches[0].text == "JavaScript"
assert matches[0].label_ == "JS"
assert isinstance(matches[1], Span)
assert matches[1].text == "Java"
assert matches[1].label_ == "Java"
matches = matcher(doc[1:], as_spans=True)
assert len(matches) == 1
assert isinstance(matches[0], Span)
assert matches[0].text == "Java"
assert matches[0].label_ == "Java"
def test_matcher_deprecated(matcher):
doc = Doc(matcher.vocab, words=["hello", "world"])
with pytest.warns(DeprecationWarning) as record:
for _ in matcher.pipe([doc]):
pass
assert record.list
assert "spaCy v3.0" in str(record.list[0].message)
def test_matcher_remove_zero_operator(en_vocab):
matcher = Matcher(en_vocab)
pattern = [{"OP": "!"}]
matcher.add("Rule", [pattern])
doc = Doc(en_vocab, words=["This", "is", "a", "test", "."])
matches = matcher(doc)
assert len(matches) == 0
assert "Rule" in matcher
matcher.remove("Rule")
assert "Rule" not in matcher
def test_matcher_no_zero_length(en_vocab):
doc = Doc(en_vocab, words=["a", "b"], tags=["A", "B"])
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"TAG": "C", "OP": "?"}]])
assert len(matcher(doc)) == 0
def test_matcher_ent_iob_key(en_vocab):
"""Test that patterns with ent_iob works correctly."""
matcher = Matcher(en_vocab)
matcher.add("Rule", [[{"ENT_IOB": "I"}]])
doc1 = Doc(en_vocab, words=["I", "visited", "New", "York", "and", "California"])
doc1.ents = [Span(doc1, 2, 4, label="GPE"), Span(doc1, 5, 6, label="GPE")]
doc2 = Doc(en_vocab, words=["I", "visited", "my", "friend", "Alicia"])
doc2.ents = [Span(doc2, 4, 5, label="PERSON")]
matches1 = [doc1[start:end].text for _, start, end in matcher(doc1)]
matches2 = [doc2[start:end].text for _, start, end in matcher(doc2)]
assert len(matches1) == 1
assert matches1[0] == "York"
assert len(matches2) == 0
matcher = Matcher(en_vocab) # Test iob pattern with operators
matcher.add("Rule", [[{"ENT_IOB": "I", "OP": "+"}]])
doc = Doc(
en_vocab, words=["I", "visited", "my", "friend", "Anna", "Maria", "Esperanza"]
)
doc.ents = [Span(doc, 4, 7, label="PERSON")]
matches = [doc[start:end].text for _, start, end in matcher(doc)]
assert len(matches) == 3
assert matches[0] == "Maria"
assert matches[1] == "Maria Esperanza"
assert matches[2] == "Esperanza"
def test_matcher_min_max_operator(en_vocab):
# Exactly n matches {n}
doc = Doc(
en_vocab,
words=["foo", "bar", "foo", "foo", "bar", "foo", "foo", "foo", "bar", "bar"],
)
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "foo", "OP": "{3}"}]
matcher.add("TEST", [pattern])
matches1 = [doc[start:end].text for _, start, end in matcher(doc)]
assert len(matches1) == 1
# At least n matches {n,}
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "foo", "OP": "{2,}"}]
matcher.add("TEST", [pattern])
matches2 = [doc[start:end].text for _, start, end in matcher(doc)]
assert len(matches2) == 4
# At most m matches {,m}
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "foo", "OP": "{,2}"}]
matcher.add("TEST", [pattern])
matches3 = [doc[start:end].text for _, start, end in matcher(doc)]
assert len(matches3) == 9
# At least n matches and most m matches {n,m}
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "foo", "OP": "{2,3}"}]
matcher.add("TEST", [pattern])
matches4 = [doc[start:end].text for _, start, end in matcher(doc)]
assert len(matches4) == 4
| 29,890 | 31.847253 | 88 | py |
spaCy | spaCy-master/spacy/tests/matcher/test_matcher_logic.py | import re
import pytest
from spacy.attrs import IS_PUNCT, LOWER, ORTH
from spacy.errors import MatchPatternError
from spacy.lang.en import English
from spacy.lang.lex_attrs import LEX_ATTRS
from spacy.matcher import Matcher
from spacy.tokens import Doc, Span, Token
from spacy.vocab import Vocab
pattern1 = [{"ORTH": "A"}, {"ORTH": "A", "OP": "*"}]
pattern2 = [{"ORTH": "A", "OP": "*"}, {"ORTH": "A"}]
pattern3 = [{"ORTH": "A"}, {"ORTH": "A"}]
pattern4 = [{"ORTH": "B"}, {"ORTH": "A", "OP": "*"}, {"ORTH": "B"}]
pattern5 = [{"ORTH": "B", "OP": "*"}, {"ORTH": "A", "OP": "*"}, {"ORTH": "B"}]
re_pattern1 = "AA*"
re_pattern2 = "A*A"
re_pattern3 = "AA"
re_pattern4 = "BA*B"
re_pattern5 = "B*A*B"
longest1 = "A A A A A"
longest2 = "A A A A A"
longest3 = "A A"
longest4 = "B A A A A A B" # "FIRST" would be "B B"
longest5 = "B B A A A A A B"
@pytest.fixture
def text():
return "(BBAAAAAB)."
@pytest.fixture
def doc(en_tokenizer, text):
doc = en_tokenizer(" ".join(text))
return doc
@pytest.mark.issue(118)
@pytest.mark.parametrize(
"patterns",
[
[[{"LOWER": "celtics"}], [{"LOWER": "boston"}, {"LOWER": "celtics"}]],
[[{"LOWER": "boston"}, {"LOWER": "celtics"}], [{"LOWER": "celtics"}]],
],
)
def test_issue118(en_tokenizer, patterns):
"""Test a bug that arose from having overlapping matches"""
text = (
"how many points did lebron james score against the boston celtics last night"
)
doc = en_tokenizer(text)
ORG = doc.vocab.strings["ORG"]
matcher = Matcher(doc.vocab)
matcher.add("BostonCeltics", patterns)
assert len(list(doc.ents)) == 0
matches = [(ORG, start, end) for _, start, end in matcher(doc)]
assert matches == [(ORG, 9, 11), (ORG, 10, 11)]
doc.ents = matches[:1]
ents = list(doc.ents)
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
@pytest.mark.issue(118)
@pytest.mark.parametrize(
"patterns",
[
[[{"LOWER": "boston"}], [{"LOWER": "boston"}, {"LOWER": "celtics"}]],
[[{"LOWER": "boston"}, {"LOWER": "celtics"}], [{"LOWER": "boston"}]],
],
)
def test_issue118_prefix_reorder(en_tokenizer, patterns):
"""Test a bug that arose from having overlapping matches"""
text = (
"how many points did lebron james score against the boston celtics last night"
)
doc = en_tokenizer(text)
ORG = doc.vocab.strings["ORG"]
matcher = Matcher(doc.vocab)
matcher.add("BostonCeltics", patterns)
assert len(list(doc.ents)) == 0
matches = [(ORG, start, end) for _, start, end in matcher(doc)]
doc.ents += tuple(matches)[1:]
assert matches == [(ORG, 9, 10), (ORG, 9, 11)]
ents = doc.ents
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
@pytest.mark.issue(242)
def test_issue242(en_tokenizer):
"""Test overlapping multi-word phrases."""
text = "There are different food safety standards in different countries."
patterns = [
[{"LOWER": "food"}, {"LOWER": "safety"}],
[{"LOWER": "safety"}, {"LOWER": "standards"}],
]
doc = en_tokenizer(text)
matcher = Matcher(doc.vocab)
matcher.add("FOOD", patterns)
matches = [(ent_type, start, end) for ent_type, start, end in matcher(doc)]
match1, match2 = matches
assert match1[1] == 3
assert match1[2] == 5
assert match2[1] == 4
assert match2[2] == 6
with pytest.raises(ValueError):
# One token can only be part of one entity, so test that the matches
# can't be added as entities
doc.ents += tuple(matches)
@pytest.mark.issue(587)
def test_issue587(en_tokenizer):
"""Test that Matcher doesn't segfault on particular input"""
doc = en_tokenizer("a b; c")
matcher = Matcher(doc.vocab)
matcher.add("TEST1", [[{ORTH: "a"}, {ORTH: "b"}]])
matches = matcher(doc)
assert len(matches) == 1
matcher.add("TEST2", [[{ORTH: "a"}, {ORTH: "b"}, {IS_PUNCT: True}, {ORTH: "c"}]])
matches = matcher(doc)
assert len(matches) == 2
matcher.add("TEST3", [[{ORTH: "a"}, {ORTH: "b"}, {IS_PUNCT: True}, {ORTH: "d"}]])
matches = matcher(doc)
assert len(matches) == 2
@pytest.mark.issue(588)
def test_issue588(en_vocab):
"""Test if empty specs still cause an error when adding patterns"""
matcher = Matcher(en_vocab)
with pytest.raises(ValueError):
matcher.add("TEST", [[]])
@pytest.mark.issue(590)
def test_issue590(en_vocab):
"""Test overlapping matches"""
doc = Doc(en_vocab, words=["n", "=", "1", ";", "a", ":", "5", "%"])
matcher = Matcher(en_vocab)
matcher.add(
"ab", [[{"IS_ALPHA": True}, {"ORTH": ":"}, {"LIKE_NUM": True}, {"ORTH": "%"}]]
)
matcher.add("ab", [[{"IS_ALPHA": True}, {"ORTH": "="}, {"LIKE_NUM": True}]])
matches = matcher(doc)
assert len(matches) == 2
@pytest.mark.issue(615)
def test_issue615(en_tokenizer):
def merge_phrases(matcher, doc, i, matches):
"""Merge a phrase. We have to be careful here because we'll change the
token indices. To avoid problems, merge all the phrases once we're called
on the last match."""
if i != len(matches) - 1:
return None
spans = [Span(doc, start, end, label=label) for label, start, end in matches]
with doc.retokenize() as retokenizer:
for span in spans:
tag = "NNP" if span.label_ else span.root.tag_
attrs = {"tag": tag, "lemma": span.text}
retokenizer.merge(span, attrs=attrs)
doc.ents = doc.ents + (span,)
text = "The golf club is broken"
pattern = [{"ORTH": "golf"}, {"ORTH": "club"}]
label = "Sport_Equipment"
doc = en_tokenizer(text)
matcher = Matcher(doc.vocab)
matcher.add(label, [pattern], on_match=merge_phrases)
matcher(doc)
entities = list(doc.ents)
assert entities != []
assert entities[0].label != 0
@pytest.mark.issue(850)
def test_issue850():
"""The variable-length pattern matches the succeeding token. Check we
handle the ambiguity correctly."""
vocab = Vocab(lex_attr_getters={LOWER: lambda string: string.lower()})
matcher = Matcher(vocab)
pattern = [{"LOWER": "bob"}, {"OP": "*"}, {"LOWER": "frank"}]
matcher.add("FarAway", [pattern])
doc = Doc(matcher.vocab, words=["bob", "and", "and", "frank"])
match = matcher(doc)
assert len(match) == 1
ent_id, start, end = match[0]
assert start == 0
assert end == 4
@pytest.mark.issue(850)
def test_issue850_basic():
"""Test Matcher matches with '*' operator and Boolean flag"""
vocab = Vocab(lex_attr_getters={LOWER: lambda string: string.lower()})
matcher = Matcher(vocab)
pattern = [{"LOWER": "bob"}, {"OP": "*", "LOWER": "and"}, {"LOWER": "frank"}]
matcher.add("FarAway", [pattern])
doc = Doc(matcher.vocab, words=["bob", "and", "and", "frank"])
match = matcher(doc)
assert len(match) == 1
ent_id, start, end = match[0]
assert start == 0
assert end == 4
@pytest.mark.issue(1434)
def test_issue1434():
"""Test matches occur when optional element at end of short doc."""
pattern = [{"ORTH": "Hello"}, {"IS_ALPHA": True, "OP": "?"}]
vocab = Vocab(lex_attr_getters=LEX_ATTRS)
hello_world = Doc(vocab, words=["Hello", "World"])
hello = Doc(vocab, words=["Hello"])
matcher = Matcher(vocab)
matcher.add("MyMatcher", [pattern])
matches = matcher(hello_world)
assert matches
matches = matcher(hello)
assert matches
@pytest.mark.parametrize(
"string,start,end",
[
("a", 0, 1),
("a b", 0, 2),
("a c", 0, 1),
("a b c", 0, 2),
("a b b c", 0, 3),
("a b b", 0, 3),
],
)
@pytest.mark.issue(1450)
def test_issue1450(string, start, end):
"""Test matcher works when patterns end with * operator."""
pattern = [{"ORTH": "a"}, {"ORTH": "b", "OP": "*"}]
matcher = Matcher(Vocab())
matcher.add("TSTEND", [pattern])
doc = Doc(Vocab(), words=string.split())
matches = matcher(doc)
if start is None or end is None:
assert matches == []
assert matches[-1][1] == start
assert matches[-1][2] == end
@pytest.mark.issue(1945)
def test_issue1945():
"""Test regression in Matcher introduced in v2.0.6."""
matcher = Matcher(Vocab())
matcher.add("MWE", [[{"orth": "a"}, {"orth": "a"}]])
doc = Doc(matcher.vocab, words=["a", "a", "a"])
matches = matcher(doc) # we should see two overlapping matches here
assert len(matches) == 2
assert matches[0][1:] == (0, 2)
assert matches[1][1:] == (1, 3)
@pytest.mark.issue(1971)
def test_issue1971(en_vocab):
# Possibly related to #2675 and #2671?
matcher = Matcher(en_vocab)
pattern = [
{"ORTH": "Doe"},
{"ORTH": "!", "OP": "?"},
{"_": {"optional": True}, "OP": "?"},
{"ORTH": "!", "OP": "?"},
]
Token.set_extension("optional", default=False)
matcher.add("TEST", [pattern])
doc = Doc(en_vocab, words=["Hello", "John", "Doe", "!"])
# We could also assert length 1 here, but this is more conclusive, because
# the real problem here is that it returns a duplicate match for a match_id
# that's not actually in the vocab!
matches = matcher(doc)
assert all([match_id in en_vocab.strings for match_id, start, end in matches])
@pytest.mark.issue(1971)
def test_issue_1971_2(en_vocab):
matcher = Matcher(en_vocab)
pattern1 = [{"ORTH": "EUR", "LOWER": {"IN": ["eur"]}}, {"LIKE_NUM": True}]
pattern2 = [{"LIKE_NUM": True}, {"ORTH": "EUR"}] # {"IN": ["EUR"]}}]
doc = Doc(en_vocab, words=["EUR", "10", "is", "10", "EUR"])
matcher.add("TEST1", [pattern1, pattern2])
matches = matcher(doc)
assert len(matches) == 2
@pytest.mark.issue(1971)
def test_issue_1971_3(en_vocab):
"""Test that pattern matches correctly for multiple extension attributes."""
Token.set_extension("a", default=1, force=True)
Token.set_extension("b", default=2, force=True)
doc = Doc(en_vocab, words=["hello", "world"])
matcher = Matcher(en_vocab)
matcher.add("A", [[{"_": {"a": 1}}]])
matcher.add("B", [[{"_": {"b": 2}}]])
matches = sorted((en_vocab.strings[m_id], s, e) for m_id, s, e in matcher(doc))
assert len(matches) == 4
assert matches == sorted([("A", 0, 1), ("A", 1, 2), ("B", 0, 1), ("B", 1, 2)])
@pytest.mark.issue(1971)
def test_issue_1971_4(en_vocab):
"""Test that pattern matches correctly with multiple extension attribute
values on a single token.
"""
Token.set_extension("ext_a", default="str_a", force=True)
Token.set_extension("ext_b", default="str_b", force=True)
matcher = Matcher(en_vocab)
doc = Doc(en_vocab, words=["this", "is", "text"])
pattern = [{"_": {"ext_a": "str_a", "ext_b": "str_b"}}] * 3
matcher.add("TEST", [pattern])
matches = matcher(doc)
# Uncommenting this caused a segmentation fault
assert len(matches) == 1
assert matches[0] == (en_vocab.strings["TEST"], 0, 3)
@pytest.mark.issue(2464)
def test_issue2464(en_vocab):
"""Test problem with successive ?. This is the same bug, so putting it here."""
matcher = Matcher(en_vocab)
doc = Doc(en_vocab, words=["a", "b"])
matcher.add("4", [[{"OP": "?"}, {"OP": "?"}]])
matches = matcher(doc)
assert len(matches) == 3
@pytest.mark.issue(2569)
def test_issue2569(en_tokenizer):
"""Test that operator + is greedy."""
doc = en_tokenizer("It is May 15, 1993.")
doc.ents = [Span(doc, 2, 6, label=doc.vocab.strings["DATE"])]
matcher = Matcher(doc.vocab)
matcher.add("RULE", [[{"ENT_TYPE": "DATE", "OP": "+"}]])
matched = [doc[start:end] for _, start, end in matcher(doc)]
matched = sorted(matched, key=len, reverse=True)
assert len(matched) == 10
assert len(matched[0]) == 4
assert matched[0].text == "May 15, 1993"
@pytest.mark.issue(2671)
def test_issue2671():
"""Ensure the correct entity ID is returned for matches with quantifiers.
See also #2675
"""
nlp = English()
matcher = Matcher(nlp.vocab)
pattern_id = "test_pattern"
pattern = [
{"LOWER": "high"},
{"IS_PUNCT": True, "OP": "?"},
{"LOWER": "adrenaline"},
]
matcher.add(pattern_id, [pattern])
doc1 = nlp("This is a high-adrenaline situation.")
doc2 = nlp("This is a high adrenaline situation.")
matches1 = matcher(doc1)
for match_id, start, end in matches1:
assert nlp.vocab.strings[match_id] == pattern_id
matches2 = matcher(doc2)
for match_id, start, end in matches2:
assert nlp.vocab.strings[match_id] == pattern_id
@pytest.mark.issue(3009)
def test_issue3009(en_vocab):
"""Test problem with matcher quantifiers"""
patterns = [
[{"ORTH": "has"}, {"LOWER": "to"}, {"LOWER": "do"}, {"TAG": "IN"}],
[
{"ORTH": "has"},
{"IS_ASCII": True, "IS_PUNCT": False, "OP": "*"},
{"LOWER": "to"},
{"LOWER": "do"},
{"TAG": "IN"},
],
[
{"ORTH": "has"},
{"IS_ASCII": True, "IS_PUNCT": False, "OP": "?"},
{"LOWER": "to"},
{"LOWER": "do"},
{"TAG": "IN"},
],
]
words = ["also", "has", "to", "do", "with"]
tags = ["RB", "VBZ", "TO", "VB", "IN"]
pos = ["ADV", "VERB", "ADP", "VERB", "ADP"]
doc = Doc(en_vocab, words=words, tags=tags, pos=pos)
matcher = Matcher(en_vocab)
for i, pattern in enumerate(patterns):
matcher.add(str(i), [pattern])
matches = matcher(doc)
assert matches
@pytest.mark.issue(3328)
def test_issue3328(en_vocab):
doc = Doc(en_vocab, words=["Hello", ",", "how", "are", "you", "doing", "?"])
matcher = Matcher(en_vocab)
patterns = [
[{"LOWER": {"IN": ["hello", "how"]}}],
[{"LOWER": {"IN": ["you", "doing"]}}],
]
matcher.add("TEST", patterns)
matches = matcher(doc)
assert len(matches) == 4
matched_texts = [doc[start:end].text for _, start, end in matches]
assert matched_texts == ["Hello", "how", "you", "doing"]
@pytest.mark.issue(3549)
def test_issue3549(en_vocab):
"""Test that match pattern validation doesn't raise on empty errors."""
matcher = Matcher(en_vocab, validate=True)
pattern = [{"LOWER": "hello"}, {"LOWER": "world"}]
matcher.add("GOOD", [pattern])
with pytest.raises(MatchPatternError):
matcher.add("BAD", [[{"X": "Y"}]])
@pytest.mark.skip("Matching currently only works on strings and integers")
@pytest.mark.issue(3555)
def test_issue3555(en_vocab):
"""Test that custom extensions with default None don't break matcher."""
Token.set_extension("issue3555", default=None)
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "have"}, {"_": {"issue3555": True}}]
matcher.add("TEST", [pattern])
doc = Doc(en_vocab, words=["have", "apple"])
matcher(doc)
@pytest.mark.issue(3839)
def test_issue3839(en_vocab):
"""Test that match IDs returned by the matcher are correct, are in the string"""
doc = Doc(en_vocab, words=["terrific", "group", "of", "people"])
matcher = Matcher(en_vocab)
match_id = "PATTERN"
pattern1 = [{"LOWER": "terrific"}, {"OP": "?"}, {"LOWER": "group"}]
pattern2 = [{"LOWER": "terrific"}, {"OP": "?"}, {"OP": "?"}, {"LOWER": "group"}]
matcher.add(match_id, [pattern1])
matches = matcher(doc)
assert matches[0][0] == en_vocab.strings[match_id]
matcher = Matcher(en_vocab)
matcher.add(match_id, [pattern2])
matches = matcher(doc)
assert matches[0][0] == en_vocab.strings[match_id]
@pytest.mark.issue(3879)
def test_issue3879(en_vocab):
doc = Doc(en_vocab, words=["This", "is", "a", "test", "."])
assert len(doc) == 5
pattern = [{"ORTH": "This", "OP": "?"}, {"OP": "?"}, {"ORTH": "test"}]
matcher = Matcher(en_vocab)
matcher.add("TEST", [pattern])
assert len(matcher(doc)) == 2 # fails because of a FP match 'is a test'
@pytest.mark.issue(3951)
def test_issue3951(en_vocab):
"""Test that combinations of optional rules are matched correctly."""
matcher = Matcher(en_vocab)
pattern = [
{"LOWER": "hello"},
{"LOWER": "this", "OP": "?"},
{"OP": "?"},
{"LOWER": "world"},
]
matcher.add("TEST", [pattern])
doc = Doc(en_vocab, words=["Hello", "my", "new", "world"])
matches = matcher(doc)
assert len(matches) == 0
@pytest.mark.issue(4120)
def test_issue4120(en_vocab):
"""Test that matches without a final {OP: ?} token are returned."""
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"ORTH": "a"}, {"OP": "?"}]])
doc1 = Doc(en_vocab, words=["a"])
assert len(matcher(doc1)) == 1 # works
doc2 = Doc(en_vocab, words=["a", "b", "c"])
assert len(matcher(doc2)) == 2 # fixed
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"ORTH": "a"}, {"OP": "?"}, {"ORTH": "b"}]])
doc3 = Doc(en_vocab, words=["a", "b", "b", "c"])
assert len(matcher(doc3)) == 2 # works
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"ORTH": "a"}, {"OP": "?"}, {"ORTH": "b", "OP": "?"}]])
doc4 = Doc(en_vocab, words=["a", "b", "b", "c"])
assert len(matcher(doc4)) == 3 # fixed
@pytest.mark.parametrize(
"pattern,re_pattern",
[
(pattern1, re_pattern1),
(pattern2, re_pattern2),
(pattern3, re_pattern3),
(pattern4, re_pattern4),
(pattern5, re_pattern5),
],
)
def test_greedy_matching_first(doc, text, pattern, re_pattern):
"""Test that the greedy matching behavior "FIRST" is consistent with
other re implementations."""
matcher = Matcher(doc.vocab)
matcher.add(re_pattern, [pattern], greedy="FIRST")
matches = matcher(doc)
re_matches = [m.span() for m in re.finditer(re_pattern, text)]
for (key, m_s, m_e), (re_s, re_e) in zip(matches, re_matches):
# matching the string, not the exact position
assert doc[m_s:m_e].text == doc[re_s:re_e].text
@pytest.mark.parametrize(
"pattern,longest",
[
(pattern1, longest1),
(pattern2, longest2),
(pattern3, longest3),
(pattern4, longest4),
(pattern5, longest5),
],
)
def test_greedy_matching_longest(doc, text, pattern, longest):
"""Test the "LONGEST" greedy matching behavior"""
matcher = Matcher(doc.vocab)
matcher.add("RULE", [pattern], greedy="LONGEST")
matches = matcher(doc)
for (key, s, e) in matches:
assert doc[s:e].text == longest
def test_greedy_matching_longest_first(en_tokenizer):
"""Test that "LONGEST" matching prefers the first of two equally long matches"""
doc = en_tokenizer(" ".join("CCC"))
matcher = Matcher(doc.vocab)
pattern = [{"ORTH": "C"}, {"ORTH": "C"}]
matcher.add("RULE", [pattern], greedy="LONGEST")
matches = matcher(doc)
# out of 0-2 and 1-3, the first should be picked
assert len(matches) == 1
assert matches[0][1] == 0
assert matches[0][2] == 2
def test_invalid_greediness(doc, text):
matcher = Matcher(doc.vocab)
with pytest.raises(ValueError):
matcher.add("RULE", [pattern1], greedy="GREEDY")
@pytest.mark.parametrize(
"pattern,re_pattern",
[
(pattern1, re_pattern1),
(pattern2, re_pattern2),
(pattern3, re_pattern3),
(pattern4, re_pattern4),
(pattern5, re_pattern5),
],
)
def test_match_consuming(doc, text, pattern, re_pattern):
"""Test that matcher.__call__ consumes tokens on a match similar to
re.findall."""
matcher = Matcher(doc.vocab)
matcher.add(re_pattern, [pattern], greedy="FIRST")
matches = matcher(doc)
re_matches = [m.span() for m in re.finditer(re_pattern, text)]
assert len(matches) == len(re_matches)
def test_operator_combos(en_vocab):
cases = [
("aaab", "a a a b", True),
("aaab", "a+ b", True),
("aaab", "a+ a+ b", True),
("aaab", "a+ a+ a b", True),
("aaab", "a+ a+ a+ b", True),
("aaab", "a+ a a b", True),
("aaab", "a+ a a", True),
("aaab", "a+", True),
("aaa", "a+ b", False),
("aaa", "a+ a+ b", False),
("aaa", "a+ a+ a+ b", False),
("aaa", "a+ a b", False),
("aaa", "a+ a a b", False),
("aaab", "a+ a a", True),
("aaab", "a+", True),
("aaab", "a+ a b", True),
]
for string, pattern_str, result in cases:
matcher = Matcher(en_vocab)
doc = Doc(matcher.vocab, words=list(string))
pattern = []
for part in pattern_str.split():
if part.endswith("+"):
pattern.append({"ORTH": part[0], "OP": "+"})
else:
pattern.append({"ORTH": part})
matcher.add("PATTERN", [pattern])
matches = matcher(doc)
if result:
assert matches, (string, pattern_str)
else:
assert not matches, (string, pattern_str)
@pytest.mark.issue(1450)
def test_matcher_end_zero_plus(en_vocab):
"""Test matcher works when patterns end with * operator. (issue 1450)"""
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "a"}, {"ORTH": "b", "OP": "*"}]
matcher.add("TSTEND", [pattern])
nlp = lambda string: Doc(matcher.vocab, words=string.split())
assert len(matcher(nlp("a"))) == 1
assert len(matcher(nlp("a b"))) == 2
assert len(matcher(nlp("a c"))) == 1
assert len(matcher(nlp("a b c"))) == 2
assert len(matcher(nlp("a b b c"))) == 3
assert len(matcher(nlp("a b b"))) == 3
def test_matcher_sets_return_correct_tokens(en_vocab):
matcher = Matcher(en_vocab)
patterns = [
[{"LOWER": {"IN": ["zero"]}}],
[{"LOWER": {"IN": ["one"]}}],
[{"LOWER": {"IN": ["two"]}}],
]
matcher.add("TEST", patterns)
doc = Doc(en_vocab, words="zero one two three".split())
matches = matcher(doc)
texts = [Span(doc, s, e, label=L).text for L, s, e in matches]
assert texts == ["zero", "one", "two"]
@pytest.mark.filterwarnings("ignore:\\[W036")
def test_matcher_remove():
nlp = English()
matcher = Matcher(nlp.vocab)
text = "This is a test case."
pattern = [{"ORTH": "test"}, {"OP": "?"}]
assert len(matcher) == 0
matcher.add("Rule", [pattern])
assert "Rule" in matcher
# should give two matches
results1 = matcher(nlp(text))
assert len(results1) == 2
# removing once should work
matcher.remove("Rule")
# should not return any maches anymore
results2 = matcher(nlp(text))
assert len(results2) == 0
# removing again should throw an error
with pytest.raises(ValueError):
matcher.remove("Rule")
def test_matcher_with_alignments_greedy_longest(en_vocab):
cases = [
("aaab", "a* b", [0, 0, 0, 1]),
("baab", "b a* b", [0, 1, 1, 2]),
("aaab", "a a a b", [0, 1, 2, 3]),
("aaab", "a+ b", [0, 0, 0, 1]),
("aaba", "a+ b a+", [0, 0, 1, 2]),
("aabaa", "a+ b a+", [0, 0, 1, 2, 2]),
("aaba", "a+ b a*", [0, 0, 1, 2]),
("aaaa", "a*", [0, 0, 0, 0]),
("baab", "b a* b b*", [0, 1, 1, 2]),
("aabb", "a* b* a*", [0, 0, 1, 1]),
("aaab", "a+ a+ a b", [0, 1, 2, 3]),
("aaab", "a+ a+ a+ b", [0, 1, 2, 3]),
("aaab", "a+ a a b", [0, 1, 2, 3]),
("aaab", "a+ a a", [0, 1, 2]),
("aaab", "a+ a a?", [0, 1, 2]),
("aaaa", "a a a a a?", [0, 1, 2, 3]),
("aaab", "a+ a b", [0, 0, 1, 2]),
("aaab", "a+ a+ b", [0, 0, 1, 2]),
("aaab", "a{2,} b", [0, 0, 0, 1]),
("aaab", "a{,3} b", [0, 0, 0, 1]),
("aaab", "a{2} b", [0, 0, 1]),
("aaab", "a{2,3} b", [0, 0, 0, 1]),
]
for string, pattern_str, result in cases:
matcher = Matcher(en_vocab)
doc = Doc(matcher.vocab, words=list(string))
pattern = []
for part in pattern_str.split():
if part.endswith("+"):
pattern.append({"ORTH": part[0], "OP": "+"})
elif part.endswith("*"):
pattern.append({"ORTH": part[0], "OP": "*"})
elif part.endswith("?"):
pattern.append({"ORTH": part[0], "OP": "?"})
elif part.endswith("}"):
pattern.append({"ORTH": part[0], "OP": part[1:]})
else:
pattern.append({"ORTH": part})
matcher.add("PATTERN", [pattern], greedy="LONGEST")
matches = matcher(doc, with_alignments=True)
n_matches = len(matches)
_, s, e, expected = matches[0]
assert expected == result, (string, pattern_str, s, e, n_matches)
def test_matcher_with_alignments_non_greedy(en_vocab):
cases = [
(0, "aaab", "a* b", [[0, 1], [0, 0, 1], [0, 0, 0, 1], [1]]),
(1, "baab", "b a* b", [[0, 1, 1, 2]]),
(2, "aaab", "a a a b", [[0, 1, 2, 3]]),
(3, "aaab", "a+ b", [[0, 1], [0, 0, 1], [0, 0, 0, 1]]),
(4, "aaba", "a+ b a+", [[0, 1, 2], [0, 0, 1, 2]]),
(
5,
"aabaa",
"a+ b a+",
[[0, 1, 2], [0, 0, 1, 2], [0, 0, 1, 2, 2], [0, 1, 2, 2]],
),
(6, "aaba", "a+ b a*", [[0, 1], [0, 0, 1], [0, 0, 1, 2], [0, 1, 2]]),
(7, "aaaa", "a*", [[0], [0, 0], [0, 0, 0], [0, 0, 0, 0]]),
(8, "baab", "b a* b b*", [[0, 1, 1, 2]]),
(
9,
"aabb",
"a* b* a*",
[[1], [2], [2, 2], [0, 1], [0, 0, 1], [0, 0, 1, 1], [0, 1, 1], [1, 1]],
),
(10, "aaab", "a+ a+ a b", [[0, 1, 2, 3]]),
(11, "aaab", "a+ a+ a+ b", [[0, 1, 2, 3]]),
(12, "aaab", "a+ a a b", [[0, 1, 2, 3]]),
(13, "aaab", "a+ a a", [[0, 1, 2]]),
(14, "aaab", "a+ a a?", [[0, 1], [0, 1, 2]]),
(15, "aaaa", "a a a a a?", [[0, 1, 2, 3]]),
(16, "aaab", "a+ a b", [[0, 1, 2], [0, 0, 1, 2]]),
(17, "aaab", "a+ a+ b", [[0, 1, 2], [0, 0, 1, 2]]),
(18, "aaab", "a{2,} b", [[0, 0, 1], [0, 0, 0, 1]]),
(19, "aaab", "a{3} b", [[0, 0, 0, 1]]),
(20, "aaab", "a{2} b", [[0, 0, 1]]),
(21, "aaab", "a{2,3} b", [[0, 0, 1], [0, 0, 0, 1]]),
]
for case_id, string, pattern_str, results in cases:
matcher = Matcher(en_vocab)
doc = Doc(matcher.vocab, words=list(string))
pattern = []
for part in pattern_str.split():
if part.endswith("+"):
pattern.append({"ORTH": part[0], "OP": "+"})
elif part.endswith("*"):
pattern.append({"ORTH": part[0], "OP": "*"})
elif part.endswith("?"):
pattern.append({"ORTH": part[0], "OP": "?"})
elif part.endswith("}"):
pattern.append({"ORTH": part[0], "OP": part[1:]})
else:
pattern.append({"ORTH": part})
matcher.add("PATTERN", [pattern])
matches = matcher(doc, with_alignments=True)
n_matches = len(matches)
for _, s, e, expected in matches:
assert expected in results, (case_id, string, pattern_str, s, e, n_matches)
assert len(expected) == e - s
| 27,183 | 33.453739 | 87 | py |
spaCy | spaCy-master/spacy/tests/matcher/test_pattern_validation.py | import pytest
from spacy.errors import MatchPatternError
from spacy.matcher import Matcher
from spacy.schemas import validate_token_pattern
# (pattern, num errors with validation, num errors identified with minimal
# checks)
TEST_PATTERNS = [
# Bad patterns flagged in all cases
([{"XX": "foo"}], 1, 1),
([{"IS_ALPHA": {"==": True}}, {"LIKE_NUM": None}], 2, 1),
([{"IS_PUNCT": True, "OP": "$"}], 1, 1),
([{"_": "foo"}], 1, 1),
('[{"TEXT": "foo"}, {"LOWER": "bar"}]', 1, 1),
([{"ENT_IOB": "foo"}], 1, 1),
([1, 2, 3], 3, 1),
([{"TEXT": "foo", "OP": "{,}"}], 1, 1),
([{"TEXT": "foo", "OP": "{,4}4"}], 1, 1),
([{"TEXT": "foo", "OP": "{a,3}"}], 1, 1),
([{"TEXT": "foo", "OP": "{a}"}], 1, 1),
([{"TEXT": "foo", "OP": "{,a}"}], 1, 1),
([{"TEXT": "foo", "OP": "{1,2,3}"}], 1, 1),
([{"TEXT": "foo", "OP": "{1, 3}"}], 1, 1),
([{"TEXT": "foo", "OP": "{-2}"}], 1, 1),
# Bad patterns flagged outside of Matcher
([{"_": {"foo": "bar", "baz": {"IN": "foo"}}}], 2, 0), # prev: (1, 0)
# Bad patterns not flagged with minimal checks
([{"LENGTH": "2", "TEXT": 2}, {"LOWER": "test"}], 2, 0),
([{"LENGTH": {"IN": [1, 2, "3"]}}, {"POS": {"IN": "VERB"}}], 4, 0), # prev: (2, 0)
([{"LENGTH": {"VALUE": 5}}], 2, 0), # prev: (1, 0)
([{"TEXT": {"VALUE": "foo"}}], 2, 0), # prev: (1, 0)
([{"IS_DIGIT": -1}], 1, 0),
([{"ORTH": -1}], 1, 0),
([{"ENT_ID": -1}], 1, 0),
([{"ENT_KB_ID": -1}], 1, 0),
# Good patterns
([{"TEXT": "foo"}, {"LOWER": "bar"}], 0, 0),
([{"LEMMA": {"IN": ["love", "like"]}}, {"POS": "DET", "OP": "?"}], 0, 0),
([{"LIKE_NUM": True, "LENGTH": {">=": 5}}], 0, 0),
([{"LENGTH": 2}], 0, 0),
([{"LOWER": {"REGEX": "^X", "NOT_IN": ["XXX", "XY"]}}], 0, 0),
([{"NORM": "a"}, {"POS": {"IN": ["NOUN"]}}], 0, 0),
([{"_": {"foo": {"NOT_IN": ["bar", "baz"]}, "a": 5, "b": {">": 10}}}], 0, 0),
([{"orth": "foo"}], 0, 0), # prev: xfail
([{"IS_SENT_START": True}], 0, 0),
([{"SENT_START": True}], 0, 0),
([{"ENT_ID": "STRING"}], 0, 0),
([{"ENT_KB_ID": "STRING"}], 0, 0),
([{"TEXT": "ha", "OP": "{3}"}], 0, 0),
]
@pytest.mark.parametrize(
"pattern",
[[{"XX": "y"}], [{"LENGTH": "2"}], [{"TEXT": {"IN": 5}}], [{"text": {"in": 6}}]],
)
def test_matcher_pattern_validation(en_vocab, pattern):
matcher = Matcher(en_vocab, validate=True)
with pytest.raises(MatchPatternError):
matcher.add("TEST", [pattern])
@pytest.mark.parametrize("pattern,n_errors,_", TEST_PATTERNS)
def test_pattern_validation(pattern, n_errors, _):
errors = validate_token_pattern(pattern)
assert len(errors) == n_errors
@pytest.mark.parametrize("pattern,n_errors,n_min_errors", TEST_PATTERNS)
def test_minimal_pattern_validation(en_vocab, pattern, n_errors, n_min_errors):
matcher = Matcher(en_vocab)
if n_min_errors > 0:
with pytest.raises(ValueError):
matcher.add("TEST", [pattern])
elif n_errors == 0:
matcher.add("TEST", [pattern])
def test_pattern_errors(en_vocab):
matcher = Matcher(en_vocab)
# normalize "regex" to upper like "text"
matcher.add("TEST1", [[{"text": {"regex": "regex"}}]])
# error if subpattern attribute isn't recognized and processed
with pytest.raises(MatchPatternError):
matcher.add("TEST2", [[{"TEXT": {"XX": "xx"}}]])
| 3,367 | 37.712644 | 87 | py |
spaCy | spaCy-master/spacy/tests/matcher/test_phrase_matcher.py | import warnings
import pytest
import srsly
from mock import Mock
from spacy.lang.en import English
from spacy.matcher import Matcher, PhraseMatcher
from spacy.tokens import Doc, Span
from spacy.vocab import Vocab
from ..util import make_tempdir
@pytest.mark.issue(3248)
def test_issue3248_1():
"""Test that the PhraseMatcher correctly reports its number of rules, not
total number of patterns."""
nlp = English()
matcher = PhraseMatcher(nlp.vocab)
matcher.add("TEST1", [nlp("a"), nlp("b"), nlp("c")])
matcher.add("TEST2", [nlp("d")])
assert len(matcher) == 2
@pytest.mark.issue(3331)
def test_issue3331(en_vocab):
"""Test that duplicate patterns for different rules result in multiple
matches, one per rule.
"""
matcher = PhraseMatcher(en_vocab)
matcher.add("A", [Doc(en_vocab, words=["Barack", "Obama"])])
matcher.add("B", [Doc(en_vocab, words=["Barack", "Obama"])])
doc = Doc(en_vocab, words=["Barack", "Obama", "lifts", "America"])
matches = matcher(doc)
assert len(matches) == 2
match_ids = [en_vocab.strings[matches[0][0]], en_vocab.strings[matches[1][0]]]
assert sorted(match_ids) == ["A", "B"]
@pytest.mark.issue(3972)
def test_issue3972(en_vocab):
"""Test that the PhraseMatcher returns duplicates for duplicate match IDs."""
matcher = PhraseMatcher(en_vocab)
matcher.add("A", [Doc(en_vocab, words=["New", "York"])])
matcher.add("B", [Doc(en_vocab, words=["New", "York"])])
doc = Doc(en_vocab, words=["I", "live", "in", "New", "York"])
matches = matcher(doc)
assert len(matches) == 2
# We should have a match for each of the two rules
found_ids = [en_vocab.strings[ent_id] for (ent_id, _, _) in matches]
assert "A" in found_ids
assert "B" in found_ids
@pytest.mark.issue(4002)
def test_issue4002(en_vocab):
"""Test that the PhraseMatcher can match on overwritten NORM attributes."""
matcher = PhraseMatcher(en_vocab, attr="NORM")
pattern1 = Doc(en_vocab, words=["c", "d"])
assert [t.norm_ for t in pattern1] == ["c", "d"]
matcher.add("TEST", [pattern1])
doc = Doc(en_vocab, words=["a", "b", "c", "d"])
assert [t.norm_ for t in doc] == ["a", "b", "c", "d"]
matches = matcher(doc)
assert len(matches) == 1
matcher = PhraseMatcher(en_vocab, attr="NORM")
pattern2 = Doc(en_vocab, words=["1", "2"])
pattern2[0].norm_ = "c"
pattern2[1].norm_ = "d"
assert [t.norm_ for t in pattern2] == ["c", "d"]
matcher.add("TEST", [pattern2])
matches = matcher(doc)
assert len(matches) == 1
@pytest.mark.issue(4373)
def test_issue4373():
"""Test that PhraseMatcher.vocab can be accessed (like Matcher.vocab)."""
matcher = Matcher(Vocab())
assert isinstance(matcher.vocab, Vocab)
matcher = PhraseMatcher(Vocab())
assert isinstance(matcher.vocab, Vocab)
@pytest.mark.issue(4651)
def test_issue4651_with_phrase_matcher_attr():
"""Test that the EntityRuler PhraseMatcher is deserialized correctly using
the method from_disk when the EntityRuler argument phrase_matcher_attr is
specified.
"""
text = "Spacy is a python library for nlp"
nlp = English()
patterns = [{"label": "PYTHON_LIB", "pattern": "spacy", "id": "spaCy"}]
ruler = nlp.add_pipe("entity_ruler", config={"phrase_matcher_attr": "LOWER"})
ruler.add_patterns(patterns)
doc = nlp(text)
res = [(ent.text, ent.label_, ent.ent_id_) for ent in doc.ents]
nlp_reloaded = English()
with make_tempdir() as d:
file_path = d / "entityruler"
ruler.to_disk(file_path)
nlp_reloaded.add_pipe("entity_ruler").from_disk(file_path)
doc_reloaded = nlp_reloaded(text)
res_reloaded = [(ent.text, ent.label_, ent.ent_id_) for ent in doc_reloaded.ents]
assert res == res_reloaded
@pytest.mark.issue(6839)
def test_issue6839(en_vocab):
"""Ensure that PhraseMatcher accepts Span as input"""
# fmt: off
words = ["I", "like", "Spans", "and", "Docs", "in", "my", "input", ",", "and", "nothing", "else", "."]
# fmt: on
doc = Doc(en_vocab, words=words)
span = doc[:8]
pattern = Doc(en_vocab, words=["Spans", "and", "Docs"])
matcher = PhraseMatcher(en_vocab)
matcher.add("SPACY", [pattern])
matches = matcher(span)
assert matches
@pytest.mark.issue(10643)
def test_issue10643(en_vocab):
"""Ensure overlapping terms can be removed from PhraseMatcher"""
# fmt: off
words = ["Only", "save", "out", "the", "binary", "data", "for", "the", "individual", "components", "."]
# fmt: on
doc = Doc(en_vocab, words=words)
terms = {
"0": Doc(en_vocab, words=["binary"]),
"1": Doc(en_vocab, words=["binary", "data"]),
}
matcher = PhraseMatcher(en_vocab)
for match_id, term in terms.items():
matcher.add(match_id, [term])
matches = matcher(doc)
assert matches == [(en_vocab.strings["0"], 4, 5), (en_vocab.strings["1"], 4, 6)]
matcher.remove("0")
assert len(matcher) == 1
new_matches = matcher(doc)
assert new_matches == [(en_vocab.strings["1"], 4, 6)]
matcher.remove("1")
assert len(matcher) == 0
no_matches = matcher(doc)
assert not no_matches
def test_matcher_phrase_matcher(en_vocab):
doc = Doc(en_vocab, words=["I", "like", "Google", "Now", "best"])
# intermediate phrase
pattern = Doc(en_vocab, words=["Google", "Now"])
matcher = PhraseMatcher(en_vocab)
matcher.add("COMPANY", [pattern])
assert len(matcher(doc)) == 1
# initial token
pattern = Doc(en_vocab, words=["I"])
matcher = PhraseMatcher(en_vocab)
matcher.add("I", [pattern])
assert len(matcher(doc)) == 1
# initial phrase
pattern = Doc(en_vocab, words=["I", "like"])
matcher = PhraseMatcher(en_vocab)
matcher.add("ILIKE", [pattern])
assert len(matcher(doc)) == 1
# final token
pattern = Doc(en_vocab, words=["best"])
matcher = PhraseMatcher(en_vocab)
matcher.add("BEST", [pattern])
assert len(matcher(doc)) == 1
# final phrase
pattern = Doc(en_vocab, words=["Now", "best"])
matcher = PhraseMatcher(en_vocab)
matcher.add("NOWBEST", [pattern])
assert len(matcher(doc)) == 1
def test_phrase_matcher_length(en_vocab):
matcher = PhraseMatcher(en_vocab)
assert len(matcher) == 0
matcher.add("TEST", [Doc(en_vocab, words=["test"])])
assert len(matcher) == 1
matcher.add("TEST2", [Doc(en_vocab, words=["test2"])])
assert len(matcher) == 2
def test_phrase_matcher_contains(en_vocab):
matcher = PhraseMatcher(en_vocab)
matcher.add("TEST", [Doc(en_vocab, words=["test"])])
assert "TEST" in matcher
assert "TEST2" not in matcher
def test_phrase_matcher_add_new_api(en_vocab):
doc = Doc(en_vocab, words=["a", "b"])
patterns = [Doc(en_vocab, words=["a"]), Doc(en_vocab, words=["a", "b"])]
matcher = PhraseMatcher(en_vocab)
matcher.add("OLD_API", None, *patterns)
assert len(matcher(doc)) == 2
matcher = PhraseMatcher(en_vocab)
on_match = Mock()
matcher.add("OLD_API_CALLBACK", on_match, *patterns)
assert len(matcher(doc)) == 2
assert on_match.call_count == 2
# New API: add(key: str, patterns: List[List[dict]], on_match: Callable)
matcher = PhraseMatcher(en_vocab)
matcher.add("NEW_API", patterns)
assert len(matcher(doc)) == 2
matcher = PhraseMatcher(en_vocab)
on_match = Mock()
matcher.add("NEW_API_CALLBACK", patterns, on_match=on_match)
assert len(matcher(doc)) == 2
assert on_match.call_count == 2
def test_phrase_matcher_repeated_add(en_vocab):
matcher = PhraseMatcher(en_vocab)
# match ID only gets added once
matcher.add("TEST", [Doc(en_vocab, words=["like"])])
matcher.add("TEST", [Doc(en_vocab, words=["like"])])
matcher.add("TEST", [Doc(en_vocab, words=["like"])])
matcher.add("TEST", [Doc(en_vocab, words=["like"])])
doc = Doc(en_vocab, words=["I", "like", "Google", "Now", "best"])
assert "TEST" in matcher
assert "TEST2" not in matcher
assert len(matcher(doc)) == 1
def test_phrase_matcher_remove(en_vocab):
matcher = PhraseMatcher(en_vocab)
matcher.add("TEST1", [Doc(en_vocab, words=["like"])])
matcher.add("TEST2", [Doc(en_vocab, words=["best"])])
doc = Doc(en_vocab, words=["I", "like", "Google", "Now", "best"])
assert "TEST1" in matcher
assert "TEST2" in matcher
assert "TEST3" not in matcher
assert len(matcher(doc)) == 2
matcher.remove("TEST1")
assert "TEST1" not in matcher
assert "TEST2" in matcher
assert "TEST3" not in matcher
assert len(matcher(doc)) == 1
matcher.remove("TEST2")
assert "TEST1" not in matcher
assert "TEST2" not in matcher
assert "TEST3" not in matcher
assert len(matcher(doc)) == 0
with pytest.raises(KeyError):
matcher.remove("TEST3")
assert "TEST1" not in matcher
assert "TEST2" not in matcher
assert "TEST3" not in matcher
assert len(matcher(doc)) == 0
def test_phrase_matcher_overlapping_with_remove(en_vocab):
matcher = PhraseMatcher(en_vocab)
matcher.add("TEST", [Doc(en_vocab, words=["like"])])
# TEST2 is added alongside TEST
matcher.add("TEST2", [Doc(en_vocab, words=["like"])])
doc = Doc(en_vocab, words=["I", "like", "Google", "Now", "best"])
assert "TEST" in matcher
assert len(matcher) == 2
assert len(matcher(doc)) == 2
# removing TEST does not remove the entry for TEST2
matcher.remove("TEST")
assert "TEST" not in matcher
assert len(matcher) == 1
assert len(matcher(doc)) == 1
assert matcher(doc)[0][0] == en_vocab.strings["TEST2"]
# removing TEST2 removes all
matcher.remove("TEST2")
assert "TEST2" not in matcher
assert len(matcher) == 0
assert len(matcher(doc)) == 0
def test_phrase_matcher_string_attrs(en_vocab):
words1 = ["I", "like", "cats"]
pos1 = ["PRON", "VERB", "NOUN"]
words2 = ["Yes", ",", "you", "hate", "dogs", "very", "much"]
pos2 = ["INTJ", "PUNCT", "PRON", "VERB", "NOUN", "ADV", "ADV"]
pattern = Doc(en_vocab, words=words1, pos=pos1)
matcher = PhraseMatcher(en_vocab, attr="POS")
matcher.add("TEST", [pattern])
doc = Doc(en_vocab, words=words2, pos=pos2)
matches = matcher(doc)
assert len(matches) == 1
match_id, start, end = matches[0]
assert match_id == en_vocab.strings["TEST"]
assert start == 2
assert end == 5
def test_phrase_matcher_string_attrs_negative(en_vocab):
"""Test that token with the control codes as ORTH are *not* matched."""
words1 = ["I", "like", "cats"]
pos1 = ["PRON", "VERB", "NOUN"]
words2 = ["matcher:POS-PRON", "matcher:POS-VERB", "matcher:POS-NOUN"]
pos2 = ["X", "X", "X"]
pattern = Doc(en_vocab, words=words1, pos=pos1)
matcher = PhraseMatcher(en_vocab, attr="POS")
matcher.add("TEST", [pattern])
doc = Doc(en_vocab, words=words2, pos=pos2)
matches = matcher(doc)
assert len(matches) == 0
def test_phrase_matcher_bool_attrs(en_vocab):
words1 = ["Hello", "world", "!"]
words2 = ["No", "problem", ",", "he", "said", "."]
pattern = Doc(en_vocab, words=words1)
matcher = PhraseMatcher(en_vocab, attr="IS_PUNCT")
matcher.add("TEST", [pattern])
doc = Doc(en_vocab, words=words2)
matches = matcher(doc)
assert len(matches) == 2
match_id1, start1, end1 = matches[0]
match_id2, start2, end2 = matches[1]
assert match_id1 == en_vocab.strings["TEST"]
assert match_id2 == en_vocab.strings["TEST"]
assert start1 == 0
assert end1 == 3
assert start2 == 3
assert end2 == 6
def test_phrase_matcher_validation(en_vocab):
doc1 = Doc(en_vocab, words=["Test"])
doc1[0].dep_ = "ROOT"
doc2 = Doc(en_vocab, words=["Test"])
doc2[0].tag_ = "TAG"
doc2[0].pos_ = "X"
doc2[0].set_morph("Feat=Val")
doc3 = Doc(en_vocab, words=["Test"])
matcher = PhraseMatcher(en_vocab, validate=True)
with pytest.warns(UserWarning):
matcher.add("TEST1", [doc1])
with pytest.warns(UserWarning):
matcher.add("TEST2", [doc2])
with warnings.catch_warnings():
warnings.simplefilter("error")
matcher.add("TEST3", [doc3])
matcher = PhraseMatcher(en_vocab, attr="POS", validate=True)
with warnings.catch_warnings():
warnings.simplefilter("error")
matcher.add("TEST4", [doc2])
def test_attr_validation(en_vocab):
with pytest.raises(ValueError):
PhraseMatcher(en_vocab, attr="UNSUPPORTED")
def test_attr_pipeline_checks(en_vocab):
doc1 = Doc(en_vocab, words=["Test"])
doc1[0].dep_ = "ROOT"
doc2 = Doc(en_vocab, words=["Test"])
doc2[0].tag_ = "TAG"
doc2[0].pos_ = "X"
doc2[0].set_morph("Feat=Val")
doc2[0].lemma_ = "LEMMA"
doc3 = Doc(en_vocab, words=["Test"])
# DEP requires DEP
matcher = PhraseMatcher(en_vocab, attr="DEP")
matcher.add("TEST1", [doc1])
with pytest.raises(ValueError):
matcher.add("TEST2", [doc2])
with pytest.raises(ValueError):
matcher.add("TEST3", [doc3])
# TAG, POS, LEMMA require those values
for attr in ("TAG", "POS", "LEMMA"):
matcher = PhraseMatcher(en_vocab, attr=attr)
matcher.add("TEST2", [doc2])
with pytest.raises(ValueError):
matcher.add("TEST1", [doc1])
with pytest.raises(ValueError):
matcher.add("TEST3", [doc3])
# TEXT/ORTH only require tokens
matcher = PhraseMatcher(en_vocab, attr="ORTH")
matcher.add("TEST3", [doc3])
matcher = PhraseMatcher(en_vocab, attr="TEXT")
matcher.add("TEST3", [doc3])
def test_phrase_matcher_callback(en_vocab):
mock = Mock()
doc = Doc(en_vocab, words=["I", "like", "Google", "Now", "best"])
pattern = Doc(en_vocab, words=["Google", "Now"])
matcher = PhraseMatcher(en_vocab)
matcher.add("COMPANY", [pattern], on_match=mock)
matches = matcher(doc)
mock.assert_called_once_with(matcher, doc, 0, matches)
def test_phrase_matcher_remove_overlapping_patterns(en_vocab):
matcher = PhraseMatcher(en_vocab)
pattern1 = Doc(en_vocab, words=["this"])
pattern2 = Doc(en_vocab, words=["this", "is"])
pattern3 = Doc(en_vocab, words=["this", "is", "a"])
pattern4 = Doc(en_vocab, words=["this", "is", "a", "word"])
matcher.add("THIS", [pattern1, pattern2, pattern3, pattern4])
matcher.remove("THIS")
def test_phrase_matcher_basic_check(en_vocab):
matcher = PhraseMatcher(en_vocab)
# Potential mistake: pass in pattern instead of list of patterns
pattern = Doc(en_vocab, words=["hello", "world"])
with pytest.raises(ValueError):
matcher.add("TEST", pattern)
def test_phrase_matcher_pickle(en_vocab):
matcher = PhraseMatcher(en_vocab)
mock = Mock()
matcher.add("TEST", [Doc(en_vocab, words=["test"])])
matcher.add("TEST2", [Doc(en_vocab, words=["test2"])], on_match=mock)
doc = Doc(en_vocab, words=["these", "are", "tests", ":", "test", "test2"])
assert len(matcher) == 2
b = srsly.pickle_dumps(matcher)
matcher_unpickled = srsly.pickle_loads(b)
# call after pickling to avoid recursion error related to mock
matches = matcher(doc)
matches_unpickled = matcher_unpickled(doc)
assert len(matcher) == len(matcher_unpickled)
assert matches == matches_unpickled
# clunky way to vaguely check that callback is unpickled
(vocab, docs, callbacks, attr) = matcher_unpickled.__reduce__()[1]
assert isinstance(callbacks.get("TEST2"), Mock)
def test_phrase_matcher_as_spans(en_vocab):
"""Test the new as_spans=True API."""
matcher = PhraseMatcher(en_vocab)
matcher.add("A", [Doc(en_vocab, words=["hello", "world"])])
matcher.add("B", [Doc(en_vocab, words=["test"])])
doc = Doc(en_vocab, words=["...", "hello", "world", "this", "is", "a", "test"])
matches = matcher(doc, as_spans=True)
assert len(matches) == 2
assert isinstance(matches[0], Span)
assert matches[0].text == "hello world"
assert matches[0].label_ == "A"
assert isinstance(matches[1], Span)
assert matches[1].text == "test"
assert matches[1].label_ == "B"
def test_phrase_matcher_deprecated(en_vocab):
matcher = PhraseMatcher(en_vocab)
matcher.add("TEST", [Doc(en_vocab, words=["helllo"])])
doc = Doc(en_vocab, words=["hello", "world"])
with pytest.warns(DeprecationWarning) as record:
for _ in matcher.pipe([doc]):
pass
assert record.list
assert "spaCy v3.0" in str(record.list[0].message)
@pytest.mark.parametrize("attr", ["SENT_START", "IS_SENT_START"])
def test_phrase_matcher_sent_start(en_vocab, attr):
_ = PhraseMatcher(en_vocab, attr=attr) # noqa: F841
def test_span_in_phrasematcher(en_vocab):
"""Ensure that PhraseMatcher accepts Span and Doc as input"""
# fmt: off
words = ["I", "like", "Spans", "and", "Docs", "in", "my", "input", ",", "and", "nothing", "else", "."]
# fmt: on
doc = Doc(en_vocab, words=words)
span = doc[:8]
pattern = Doc(en_vocab, words=["Spans", "and", "Docs"])
matcher = PhraseMatcher(en_vocab)
matcher.add("SPACY", [pattern])
matches_doc = matcher(doc)
matches_span = matcher(span)
assert len(matches_doc) == 1
assert len(matches_span) == 1
def test_span_v_doc_in_phrasematcher(en_vocab):
"""Ensure that PhraseMatcher only returns matches in input Span and not in entire Doc"""
# fmt: off
words = [
"I", "like", "Spans", "and", "Docs", "in", "my", "input", ",", "Spans",
"and", "Docs", "in", "my", "matchers", "," "and", "Spans", "and", "Docs",
"everywhere", "."
]
# fmt: on
doc = Doc(en_vocab, words=words)
span = doc[9:15] # second clause
pattern = Doc(en_vocab, words=["Spans", "and", "Docs"])
matcher = PhraseMatcher(en_vocab)
matcher.add("SPACY", [pattern])
matches_doc = matcher(doc)
matches_span = matcher(span)
assert len(matches_doc) == 3
assert len(matches_span) == 1
| 17,967 | 34.231373 | 107 | py |
spaCy | spaCy-master/spacy/tests/morphology/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/morphology/test_morph_converters.py | from spacy.morphology import Morphology
def test_feats_converters():
feats = "Case=dat,gen|Number=sing"
feats_dict = {"Case": "dat,gen", "Number": "sing"}
# simple conversions
assert Morphology.dict_to_feats(feats_dict) == feats
assert Morphology.feats_to_dict(feats) == feats_dict
# roundtrips
assert Morphology.dict_to_feats(Morphology.feats_to_dict(feats)) == feats
assert Morphology.feats_to_dict(Morphology.dict_to_feats(feats_dict)) == feats_dict
# unsorted input is normalized
unsorted_feats = "Number=sing|Case=gen,dat"
unsorted_feats_dict = {"Case": "gen,dat", "Number": "sing"}
assert Morphology.feats_to_dict(unsorted_feats) == feats_dict
assert Morphology.dict_to_feats(unsorted_feats_dict) == feats
assert Morphology.dict_to_feats(Morphology.feats_to_dict(unsorted_feats)) == feats
| 856 | 37.954545 | 87 | py |
spaCy | spaCy-master/spacy/tests/morphology/test_morph_features.py | import pytest
from spacy.morphology import Morphology
from spacy.strings import StringStore, get_string_id
@pytest.fixture
def morphology():
return Morphology(StringStore())
def test_init(morphology):
pass
def test_add_morphology_with_string_names(morphology):
morphology.add({"Case": "gen", "Number": "sing"})
def test_add_morphology_with_int_ids(morphology):
morphology.strings.add("Case")
morphology.strings.add("gen")
morphology.strings.add("Number")
morphology.strings.add("sing")
morphology.add(
{
get_string_id("Case"): get_string_id("gen"),
get_string_id("Number"): get_string_id("sing"),
}
)
def test_add_morphology_with_mix_strings_and_ints(morphology):
morphology.strings.add("PunctSide")
morphology.strings.add("ini")
morphology.add(
{get_string_id("PunctSide"): get_string_id("ini"), "VerbType": "aux"}
)
def test_morphology_tags_hash_distinctly(morphology):
tag1 = morphology.add({"PunctSide": "ini", "VerbType": "aux"})
tag2 = morphology.add({"Case": "gen", "Number": "sing"})
assert tag1 != tag2
def test_morphology_tags_hash_independent_of_order(morphology):
tag1 = morphology.add({"Case": "gen", "Number": "sing"})
tag2 = morphology.add({"Number": "sing", "Case": "gen"})
assert tag1 == tag2
| 1,349 | 25.470588 | 77 | py |
spaCy | spaCy-master/spacy/tests/morphology/test_morph_pickle.py | import pickle
import pytest
from spacy.morphology import Morphology
from spacy.strings import StringStore
@pytest.fixture
def morphology():
morphology = Morphology(StringStore())
morphology.add("Feat1=Val1|Feat2=Val2")
morphology.add("Feat3=Val3|Feat4=Val4")
return morphology
def test_morphology_pickle_roundtrip(morphology):
b = pickle.dumps(morphology)
reloaded_morphology = pickle.loads(b)
feat = reloaded_morphology.get(morphology.strings["Feat1=Val1|Feat2=Val2"])
assert feat == "Feat1=Val1|Feat2=Val2"
feat = reloaded_morphology.get(morphology.strings["Feat3=Val3|Feat4=Val4"])
assert feat == "Feat3=Val3|Feat4=Val4"
| 670 | 26.958333 | 79 | py |
spaCy | spaCy-master/spacy/tests/package/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/package/test_requirements.py | import re
from pathlib import Path
def test_build_dependencies():
# Check that library requirements are pinned exactly the same across different setup files.
# TODO: correct checks for numpy rather than ignoring
libs_ignore_requirements = [
"pytest",
"pytest-timeout",
"mock",
"flake8",
"hypothesis",
"pre-commit",
"black",
"isort",
"mypy",
"types-dataclasses",
"types-mock",
"types-requests",
"types-setuptools",
]
# ignore language-specific packages that shouldn't be installed by all
libs_ignore_setup = [
"fugashi",
"natto-py",
"pythainlp",
"sudachipy",
"sudachidict_core",
"spacy-pkuseg",
"thinc-apple-ops",
]
# check requirements.txt
req_dict = {}
root_dir = Path(__file__).parent
req_file = root_dir / "requirements.txt"
with req_file.open() as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if not line.startswith("#"):
lib, v = _parse_req(line)
if lib and lib not in libs_ignore_requirements:
req_dict[lib] = v
# check setup.cfg and compare to requirements.txt
# also fails when there are missing or additional libs
setup_file = root_dir / "setup.cfg"
with setup_file.open() as f:
lines = f.readlines()
setup_keys = set()
for line in lines:
line = line.strip()
if not line.startswith("#"):
lib, v = _parse_req(line)
if lib and not lib.startswith("cupy") and lib not in libs_ignore_setup:
req_v = req_dict.get(lib, None)
assert (
req_v is not None
), "{} in setup.cfg but not in requirements.txt".format(lib)
assert (lib + v) == (lib + req_v), (
"{} has different version in setup.cfg and in requirements.txt: "
"{} and {} respectively".format(lib, v, req_v)
)
setup_keys.add(lib)
assert sorted(setup_keys) == sorted(
req_dict.keys()
) # if fail: requirements.txt contains a lib not in setup.cfg
# check pyproject.toml and compare the versions of the libs to requirements.txt
# does not fail when there are missing or additional libs
toml_file = root_dir / "pyproject.toml"
with toml_file.open() as f:
lines = f.readlines()
for line in lines:
line = line.strip().strip(",").strip('"')
if not line.startswith("#"):
lib, v = _parse_req(line)
if lib and lib not in libs_ignore_requirements:
req_v = req_dict.get(lib, None)
assert (lib + v) == (lib + req_v), (
"{} has different version in pyproject.toml and in requirements.txt: "
"{} and {} respectively".format(lib, v, req_v)
)
def _parse_req(line):
lib = re.match(r"^[a-z0-9\-]*", line).group(0)
v = line.replace(lib, "").strip()
if not re.match(r"^[<>=][<>=].*", v):
return None, None
return lib, v
| 3,208 | 32.778947 | 95 | py |
spaCy | spaCy-master/spacy/tests/parser/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/parser/test_add_label.py | import pytest
from thinc.api import Adam, fix_random_seed
from spacy import registry
from spacy.attrs import NORM
from spacy.language import Language
from spacy.pipeline import DependencyParser, EntityRecognizer
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
from spacy.pipeline.ner import DEFAULT_NER_MODEL
from spacy.tokens import Doc
from spacy.training import Example
from spacy.vocab import Vocab
@pytest.fixture
def vocab():
return Vocab(lex_attr_getters={NORM: lambda s: s})
@pytest.fixture
def parser(vocab):
cfg = {"model": DEFAULT_PARSER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
parser = DependencyParser(vocab, model)
return parser
def test_init_parser(parser):
pass
def _train_parser(parser):
fix_random_seed(1)
parser.add_label("left")
parser.initialize(lambda: [_parser_example(parser)])
sgd = Adam(0.001)
for i in range(5):
losses = {}
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
gold = {"heads": [1, 1, 3, 3], "deps": ["left", "ROOT", "left", "ROOT"]}
example = Example.from_dict(doc, gold)
parser.update([example], sgd=sgd, losses=losses)
return parser
def _parser_example(parser):
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
gold = {"heads": [1, 1, 3, 3], "deps": ["right", "ROOT", "left", "ROOT"]}
return Example.from_dict(doc, gold)
def _ner_example(ner):
doc = Doc(
ner.vocab,
words=["Joe", "loves", "visiting", "London", "during", "the", "weekend"],
)
gold = {"entities": [(0, 3, "PERSON"), (19, 25, "LOC")]}
return Example.from_dict(doc, gold)
def test_add_label(parser):
parser = _train_parser(parser)
parser.add_label("right")
sgd = Adam(0.001)
for i in range(100):
losses = {}
parser.update([_parser_example(parser)], sgd=sgd, losses=losses)
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
doc = parser(doc)
assert doc[0].dep_ == "right"
assert doc[2].dep_ == "left"
def test_add_label_deserializes_correctly():
cfg = {"model": DEFAULT_NER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
ner1 = EntityRecognizer(Vocab(), model)
ner1.add_label("C")
ner1.add_label("B")
ner1.add_label("A")
ner1.initialize(lambda: [_ner_example(ner1)])
ner2 = EntityRecognizer(Vocab(), model)
# the second model needs to be resized before we can call from_bytes
ner2.model.attrs["resize_output"](ner2.model, ner1.moves.n_moves)
ner2.from_bytes(ner1.to_bytes())
assert ner1.moves.n_moves == ner2.moves.n_moves
for i in range(ner1.moves.n_moves):
assert ner1.moves.get_class_name(i) == ner2.moves.get_class_name(i)
@pytest.mark.parametrize(
"pipe_cls,n_moves,model_config",
[
(DependencyParser, 5, DEFAULT_PARSER_MODEL),
(EntityRecognizer, 4, DEFAULT_NER_MODEL),
],
)
def test_add_label_get_label(pipe_cls, n_moves, model_config):
"""Test that added labels are returned correctly. This test was added to
test for a bug in DependencyParser.labels that'd cause it to fail when
splitting the move names.
"""
labels = ["A", "B", "C"]
model = registry.resolve({"model": model_config}, validate=True)["model"]
pipe = pipe_cls(Vocab(), model)
for label in labels:
pipe.add_label(label)
assert len(pipe.move_names) == len(labels) * n_moves
pipe_labels = sorted(list(pipe.labels))
assert pipe_labels == labels
def test_ner_labels_added_implicitly_on_predict():
nlp = Language()
ner = nlp.add_pipe("ner")
for label in ["A", "B", "C"]:
ner.add_label(label)
nlp.initialize()
doc = Doc(nlp.vocab, words=["hello", "world"], ents=["B-D", "O"])
ner(doc)
assert [t.ent_type_ for t in doc] == ["D", ""]
assert "D" in ner.labels
def test_ner_labels_added_implicitly_on_beam_parse():
nlp = Language()
ner = nlp.add_pipe("beam_ner")
for label in ["A", "B", "C"]:
ner.add_label(label)
nlp.initialize()
doc = Doc(nlp.vocab, words=["hello", "world"], ents=["B-D", "O"])
ner.beam_parse([doc], beam_width=32)
assert "D" in ner.labels
def test_ner_labels_added_implicitly_on_greedy_parse():
nlp = Language()
ner = nlp.add_pipe("beam_ner")
for label in ["A", "B", "C"]:
ner.add_label(label)
nlp.initialize()
doc = Doc(nlp.vocab, words=["hello", "world"], ents=["B-D", "O"])
ner.greedy_parse([doc])
assert "D" in ner.labels
def test_ner_labels_added_implicitly_on_update():
nlp = Language()
ner = nlp.add_pipe("ner")
for label in ["A", "B", "C"]:
ner.add_label(label)
nlp.initialize()
doc = Doc(nlp.vocab, words=["hello", "world"], ents=["B-D", "O"])
example = Example(nlp.make_doc(doc.text), doc)
assert "D" not in ner.labels
nlp.update([example])
assert "D" in ner.labels
| 4,920 | 29.75625 | 81 | py |
spaCy | spaCy-master/spacy/tests/parser/test_arc_eager_oracle.py | import pytest
from spacy import registry
from spacy.pipeline import DependencyParser
from spacy.pipeline._parser_internals.arc_eager import ArcEager
from spacy.pipeline._parser_internals.nonproj import projectivize
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
from spacy.tokens import Doc
from spacy.training import Example
from spacy.vocab import Vocab
def get_sequence_costs(M, words, heads, deps, transitions):
doc = Doc(Vocab(), words=words)
example = Example.from_dict(doc, {"heads": heads, "deps": deps})
states, golds, _ = M.init_gold_batch([example])
state = states[0]
gold = golds[0]
cost_history = []
for gold_action in transitions:
gold.update(state)
state_costs = {}
for i in range(M.n_moves):
name = M.class_name(i)
state_costs[name] = M.get_cost(state, gold, i)
M.transition(state, gold_action)
cost_history.append(state_costs)
return state, cost_history
@pytest.fixture
def vocab():
return Vocab()
@pytest.fixture
def arc_eager(vocab):
moves = ArcEager(vocab.strings, ArcEager.get_actions())
moves.add_action(2, "left")
moves.add_action(3, "right")
return moves
@pytest.mark.issue(7056)
def test_issue7056():
"""Test that the Unshift transition works properly, and doesn't cause
sentence segmentation errors."""
vocab = Vocab()
ae = ArcEager(
vocab.strings, ArcEager.get_actions(left_labels=["amod"], right_labels=["pobj"])
)
doc = Doc(vocab, words="Severe pain , after trauma".split())
state = ae.init_batch([doc])[0]
ae.apply_transition(state, "S")
ae.apply_transition(state, "L-amod")
ae.apply_transition(state, "S")
ae.apply_transition(state, "S")
ae.apply_transition(state, "S")
ae.apply_transition(state, "R-pobj")
ae.apply_transition(state, "D")
ae.apply_transition(state, "D")
ae.apply_transition(state, "D")
assert not state.eol()
def test_oracle_four_words(arc_eager, vocab):
words = ["a", "b", "c", "d"]
heads = [1, 1, 3, 3]
deps = ["left", "ROOT", "left", "ROOT"]
for dep in deps:
arc_eager.add_action(2, dep) # Left
arc_eager.add_action(3, dep) # Right
actions = ["S", "L-left", "B-ROOT", "S", "D", "S", "L-left", "S", "D"]
state, cost_history = get_sequence_costs(arc_eager, words, heads, deps, actions)
expected_gold = [
["S"],
["B-ROOT", "L-left"],
["B-ROOT"],
["S"],
["D"],
["S"],
["L-left"],
["S"],
["D"],
]
assert state.is_final()
for i, state_costs in enumerate(cost_history):
# Check gold moves is 0 cost
golds = [act for act, cost in state_costs.items() if cost < 1]
assert golds == expected_gold[i], (i, golds, expected_gold[i])
annot_tuples = [
(0, "When", "WRB", 11, "advmod", "O"),
(1, "Walter", "NNP", 2, "compound", "B-PERSON"),
(2, "Rodgers", "NNP", 11, "nsubj", "L-PERSON"),
(3, ",", ",", 2, "punct", "O"),
(4, "our", "PRP$", 6, "poss", "O"),
(5, "embedded", "VBN", 6, "amod", "O"),
(6, "reporter", "NN", 2, "appos", "O"),
(7, "with", "IN", 6, "prep", "O"),
(8, "the", "DT", 10, "det", "B-ORG"),
(9, "3rd", "NNP", 10, "compound", "I-ORG"),
(10, "Cavalry", "NNP", 7, "pobj", "L-ORG"),
(11, "says", "VBZ", 44, "advcl", "O"),
(12, "three", "CD", 13, "nummod", "U-CARDINAL"),
(13, "battalions", "NNS", 16, "nsubj", "O"),
(14, "of", "IN", 13, "prep", "O"),
(15, "troops", "NNS", 14, "pobj", "O"),
(16, "are", "VBP", 11, "ccomp", "O"),
(17, "on", "IN", 16, "prep", "O"),
(18, "the", "DT", 19, "det", "O"),
(19, "ground", "NN", 17, "pobj", "O"),
(20, ",", ",", 17, "punct", "O"),
(21, "inside", "IN", 17, "prep", "O"),
(22, "Baghdad", "NNP", 21, "pobj", "U-GPE"),
(23, "itself", "PRP", 22, "appos", "O"),
(24, ",", ",", 16, "punct", "O"),
(25, "have", "VBP", 26, "aux", "O"),
(26, "taken", "VBN", 16, "dep", "O"),
(27, "up", "RP", 26, "prt", "O"),
(28, "positions", "NNS", 26, "dobj", "O"),
(29, "they", "PRP", 31, "nsubj", "O"),
(30, "'re", "VBP", 31, "aux", "O"),
(31, "going", "VBG", 26, "parataxis", "O"),
(32, "to", "TO", 33, "aux", "O"),
(33, "spend", "VB", 31, "xcomp", "O"),
(34, "the", "DT", 35, "det", "B-TIME"),
(35, "night", "NN", 33, "dobj", "L-TIME"),
(36, "there", "RB", 33, "advmod", "O"),
(37, "presumably", "RB", 33, "advmod", "O"),
(38, ",", ",", 44, "punct", "O"),
(39, "how", "WRB", 40, "advmod", "O"),
(40, "many", "JJ", 41, "amod", "O"),
(41, "soldiers", "NNS", 44, "pobj", "O"),
(42, "are", "VBP", 44, "aux", "O"),
(43, "we", "PRP", 44, "nsubj", "O"),
(44, "talking", "VBG", 44, "ROOT", "O"),
(45, "about", "IN", 44, "prep", "O"),
(46, "right", "RB", 47, "advmod", "O"),
(47, "now", "RB", 44, "advmod", "O"),
(48, "?", ".", 44, "punct", "O"),
]
def test_get_oracle_actions():
ids, words, tags, heads, deps, ents = [], [], [], [], [], []
for id_, word, tag, head, dep, ent in annot_tuples:
ids.append(id_)
words.append(word)
tags.append(tag)
heads.append(head)
deps.append(dep)
ents.append(ent)
doc = Doc(Vocab(), words=[t[1] for t in annot_tuples])
cfg = {"model": DEFAULT_PARSER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
parser = DependencyParser(doc.vocab, model)
parser.moves.add_action(0, "")
parser.moves.add_action(1, "")
parser.moves.add_action(1, "")
parser.moves.add_action(4, "ROOT")
heads, deps = projectivize(heads, deps)
for i, (head, dep) in enumerate(zip(heads, deps)):
if head > i:
parser.moves.add_action(2, dep)
elif head < i:
parser.moves.add_action(3, dep)
example = Example.from_dict(
doc, {"words": words, "tags": tags, "heads": heads, "deps": deps}
)
parser.moves.get_oracle_sequence(example)
def test_oracle_dev_sentence(vocab, arc_eager):
words_deps_heads = """
Rolls-Royce nn Inc.
Motor nn Inc.
Cars nn Inc.
Inc. nsubj said
said ROOT said
it nsubj expects
expects ccomp said
its poss sales
U.S. nn sales
sales nsubj steady
to aux steady
remain cop steady
steady xcomp expects
at prep steady
about quantmod 1,200
1,200 num cars
cars pobj at
in prep steady
1990 pobj in
. punct said
"""
expected_transitions = [
"S", # Shift "Rolls-Royce"
"S", # Shift 'Motor'
"S", # Shift 'Cars'
"L-nn", # Attach 'Cars' to 'Inc.'
"L-nn", # Attach 'Motor' to 'Inc.'
"L-nn", # Attach 'Rolls-Royce' to 'Inc.'
"S", # Shift "Inc."
"L-nsubj", # Attach 'Inc.' to 'said'
"S", # Shift 'said'
"S", # Shift 'it'
"L-nsubj", # Attach 'it.' to 'expects'
"R-ccomp", # Attach 'expects' to 'said'
"S", # Shift 'its'
"S", # Shift 'U.S.'
"L-nn", # Attach 'U.S.' to 'sales'
"L-poss", # Attach 'its' to 'sales'
"S", # Shift 'sales'
"S", # Shift 'to'
"S", # Shift 'remain'
"L-cop", # Attach 'remain' to 'steady'
"L-aux", # Attach 'to' to 'steady'
"L-nsubj", # Attach 'sales' to 'steady'
"R-xcomp", # Attach 'steady' to 'expects'
"R-prep", # Attach 'at' to 'steady'
"S", # Shift 'about'
"L-quantmod", # Attach "about" to "1,200"
"S", # Shift "1,200"
"L-num", # Attach "1,200" to "cars"
"R-pobj", # Attach "cars" to "at"
"D", # Reduce "cars"
"D", # Reduce "at"
"R-prep", # Attach "in" to "steady"
"R-pobj", # Attach "1990" to "in"
"D", # Reduce "1990"
"D", # Reduce "in"
"D", # Reduce "steady"
"D", # Reduce "expects"
"R-punct", # Attach "." to "said"
"D", # Reduce "."
"D", # Reduce "said"
]
gold_words = []
gold_deps = []
gold_heads = []
for line in words_deps_heads.strip().split("\n"):
line = line.strip()
if not line:
continue
word, dep, head = line.split()
gold_words.append(word)
gold_deps.append(dep)
gold_heads.append(head)
gold_heads = [gold_words.index(head) for head in gold_heads]
for dep in gold_deps:
arc_eager.add_action(2, dep) # Left
arc_eager.add_action(3, dep) # Right
doc = Doc(Vocab(), words=gold_words)
example = Example.from_dict(doc, {"heads": gold_heads, "deps": gold_deps})
ae_oracle_actions = arc_eager.get_oracle_sequence(example, _debug=False)
ae_oracle_actions = [arc_eager.get_class_name(i) for i in ae_oracle_actions]
assert ae_oracle_actions == expected_transitions
def test_oracle_bad_tokenization(vocab, arc_eager):
words_deps_heads = """
[catalase] dep is
: punct is
that nsubj is
is root is
bad comp is
"""
gold_words = []
gold_deps = []
gold_heads = []
for line in words_deps_heads.strip().split("\n"):
line = line.strip()
if not line:
continue
word, dep, head = line.split()
gold_words.append(word)
gold_deps.append(dep)
gold_heads.append(head)
gold_heads = [gold_words.index(head) for head in gold_heads]
for dep in gold_deps:
arc_eager.add_action(2, dep) # Left
arc_eager.add_action(3, dep) # Right
reference = Doc(Vocab(), words=gold_words, deps=gold_deps, heads=gold_heads)
predicted = Doc(
reference.vocab, words=["[", "catalase", "]", ":", "that", "is", "bad"]
)
example = Example(predicted=predicted, reference=reference)
ae_oracle_actions = arc_eager.get_oracle_sequence(example, _debug=False)
ae_oracle_actions = [arc_eager.get_class_name(i) for i in ae_oracle_actions]
assert ae_oracle_actions
| 10,086 | 33.19322 | 88 | py |
spaCy | spaCy-master/spacy/tests/parser/test_ner.py | import logging
import random
import pytest
from numpy.testing import assert_equal
from spacy import registry, util
from spacy.attrs import ENT_IOB
from spacy.lang.en import English
from spacy.lang.it import Italian
from spacy.language import Language
from spacy.lookups import Lookups
from spacy.pipeline import EntityRecognizer
from spacy.pipeline._parser_internals.ner import BiluoPushDown
from spacy.pipeline.ner import DEFAULT_NER_MODEL
from spacy.tokens import Doc, Span
from spacy.training import Example, iob_to_biluo, split_bilu_label
from spacy.vocab import Vocab
from ..util import make_tempdir
TRAIN_DATA = [
("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}),
("I like London and Berlin.", {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]}),
]
@pytest.fixture
def neg_key():
return "non_entities"
@pytest.fixture
def vocab():
return Vocab()
@pytest.fixture
def doc(vocab):
return Doc(vocab, words=["Casey", "went", "to", "New", "York", "."])
@pytest.fixture
def entity_annots(doc):
casey = doc[0:1]
ny = doc[3:5]
return [
(casey.start_char, casey.end_char, "PERSON"),
(ny.start_char, ny.end_char, "GPE"),
]
@pytest.fixture
def entity_types(entity_annots):
return sorted(set([label for (s, e, label) in entity_annots]))
@pytest.fixture
def tsys(vocab, entity_types):
actions = BiluoPushDown.get_actions(entity_types=entity_types)
return BiluoPushDown(vocab.strings, actions)
@pytest.mark.parametrize("label", ["U-JOB-NAME"])
@pytest.mark.issue(1967)
def test_issue1967(label):
nlp = Language()
config = {}
ner = nlp.create_pipe("ner", config=config)
example = Example.from_dict(
Doc(ner.vocab, words=["word"]),
{
"ids": [0],
"words": ["word"],
"tags": ["tag"],
"heads": [0],
"deps": ["dep"],
"entities": [label],
},
)
assert "JOB-NAME" in ner.moves.get_actions(examples=[example])[1]
@pytest.mark.issue(2179)
def test_issue2179():
"""Test that spurious 'extra_labels' aren't created when initializing NER."""
nlp = Italian()
ner = nlp.add_pipe("ner")
ner.add_label("CITIZENSHIP")
nlp.initialize()
nlp2 = Italian()
nlp2.add_pipe("ner")
assert len(nlp2.get_pipe("ner").labels) == 0
model = nlp2.get_pipe("ner").model
model.attrs["resize_output"](model, nlp.get_pipe("ner").moves.n_moves)
nlp2.from_bytes(nlp.to_bytes())
assert "extra_labels" not in nlp2.get_pipe("ner").cfg
assert nlp2.get_pipe("ner").labels == ("CITIZENSHIP",)
@pytest.mark.issue(2385)
def test_issue2385():
"""Test that IOB tags are correctly converted to BILUO tags."""
# fix bug in labels with a 'b' character
tags1 = ("B-BRAWLER", "I-BRAWLER", "I-BRAWLER")
assert iob_to_biluo(tags1) == ["B-BRAWLER", "I-BRAWLER", "L-BRAWLER"]
# maintain support for iob1 format
tags2 = ("I-ORG", "I-ORG", "B-ORG")
assert iob_to_biluo(tags2) == ["B-ORG", "L-ORG", "U-ORG"]
# maintain support for iob2 format
tags3 = ("B-PERSON", "I-PERSON", "B-PERSON")
assert iob_to_biluo(tags3) == ["B-PERSON", "L-PERSON", "U-PERSON"]
# ensure it works with hyphens in the name
tags4 = ("B-MULTI-PERSON", "I-MULTI-PERSON", "B-MULTI-PERSON")
assert iob_to_biluo(tags4) == ["B-MULTI-PERSON", "L-MULTI-PERSON", "U-MULTI-PERSON"]
@pytest.mark.issue(2800)
def test_issue2800():
"""Test issue that arises when too many labels are added to NER model.
Used to cause segfault.
"""
nlp = English()
train_data = []
train_data.extend(
[Example.from_dict(nlp.make_doc("One sentence"), {"entities": []})]
)
entity_types = [str(i) for i in range(1000)]
ner = nlp.add_pipe("ner")
for entity_type in list(entity_types):
ner.add_label(entity_type)
optimizer = nlp.initialize()
for i in range(20):
losses = {}
random.shuffle(train_data)
for example in train_data:
nlp.update([example], sgd=optimizer, losses=losses, drop=0.5)
@pytest.mark.issue(3209)
def test_issue3209():
"""Test issue that occurred in spaCy nightly where NER labels were being
mapped to classes incorrectly after loading the model, when the labels
were added using ner.add_label().
"""
nlp = English()
ner = nlp.add_pipe("ner")
ner.add_label("ANIMAL")
nlp.initialize()
move_names = ["O", "B-ANIMAL", "I-ANIMAL", "L-ANIMAL", "U-ANIMAL"]
assert ner.move_names == move_names
nlp2 = English()
ner2 = nlp2.add_pipe("ner")
model = ner2.model
model.attrs["resize_output"](model, ner.moves.n_moves)
nlp2.from_bytes(nlp.to_bytes())
assert ner2.move_names == move_names
def test_labels_from_BILUO():
"""Test that labels are inferred correctly when there's a - in label."""
nlp = English()
ner = nlp.add_pipe("ner")
ner.add_label("LARGE-ANIMAL")
nlp.initialize()
move_names = [
"O",
"B-LARGE-ANIMAL",
"I-LARGE-ANIMAL",
"L-LARGE-ANIMAL",
"U-LARGE-ANIMAL",
]
labels = {"LARGE-ANIMAL"}
assert ner.move_names == move_names
assert set(ner.labels) == labels
@pytest.mark.issue(4267)
def test_issue4267():
"""Test that running an entity_ruler after ner gives consistent results"""
nlp = English()
ner = nlp.add_pipe("ner")
ner.add_label("PEOPLE")
nlp.initialize()
assert "ner" in nlp.pipe_names
# assert that we have correct IOB annotations
doc1 = nlp("hi")
assert doc1.has_annotation("ENT_IOB")
for token in doc1:
assert token.ent_iob == 2
# add entity ruler and run again
patterns = [{"label": "SOFTWARE", "pattern": "spacy"}]
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
assert "entity_ruler" in nlp.pipe_names
assert "ner" in nlp.pipe_names
# assert that we still have correct IOB annotations
doc2 = nlp("hi")
assert doc2.has_annotation("ENT_IOB")
for token in doc2:
assert token.ent_iob == 2
@pytest.mark.issue(4313)
def test_issue4313():
"""This should not crash or exit with some strange error code"""
beam_width = 16
beam_density = 0.0001
nlp = English()
config = {
"beam_width": beam_width,
"beam_density": beam_density,
}
ner = nlp.add_pipe("beam_ner", config=config)
ner.add_label("SOME_LABEL")
nlp.initialize()
# add a new label to the doc
doc = nlp("What do you think about Apple ?")
assert len(ner.labels) == 1
assert "SOME_LABEL" in ner.labels
apple_ent = Span(doc, 5, 6, label="MY_ORG")
doc.ents = list(doc.ents) + [apple_ent]
# ensure the beam_parse still works with the new label
docs = [doc]
ner.beam_parse(docs, drop=0.0, beam_width=beam_width, beam_density=beam_density)
assert len(ner.labels) == 2
assert "MY_ORG" in ner.labels
def test_get_oracle_moves(tsys, doc, entity_annots):
example = Example.from_dict(doc, {"entities": entity_annots})
act_classes = tsys.get_oracle_sequence(example, _debug=False)
names = [tsys.get_class_name(act) for act in act_classes]
assert names == ["U-PERSON", "O", "O", "B-GPE", "L-GPE", "O"]
def test_negative_samples_two_word_input(tsys, vocab, neg_key):
"""Test that we don't get stuck in a two word input when we have a negative
span. This could happen if we don't have the right check on the B action.
"""
tsys.cfg["neg_key"] = neg_key
doc = Doc(vocab, words=["A", "B"])
entity_annots = [None, None]
example = Example.from_dict(doc, {"entities": entity_annots})
# These mean that the oracle sequence shouldn't have O for the first
# word, and it shouldn't analyse it as B-PERSON, L-PERSON
example.y.spans[neg_key] = [
Span(example.y, 0, 1, label="O"),
Span(example.y, 0, 2, label="PERSON"),
]
act_classes = tsys.get_oracle_sequence(example)
names = [tsys.get_class_name(act) for act in act_classes]
assert names
assert names[0] != "O"
assert names[0] != "B-PERSON"
assert names[1] != "L-PERSON"
def test_negative_samples_three_word_input(tsys, vocab, neg_key):
"""Test that we exclude a 2-word entity correctly using a negative example."""
tsys.cfg["neg_key"] = neg_key
doc = Doc(vocab, words=["A", "B", "C"])
entity_annots = [None, None, None]
example = Example.from_dict(doc, {"entities": entity_annots})
# These mean that the oracle sequence shouldn't have O for the first
# word, and it shouldn't analyse it as B-PERSON, L-PERSON
example.y.spans[neg_key] = [
Span(example.y, 0, 1, label="O"),
Span(example.y, 0, 2, label="PERSON"),
]
act_classes = tsys.get_oracle_sequence(example)
names = [tsys.get_class_name(act) for act in act_classes]
assert names
assert names[0] != "O"
assert names[1] != "B-PERSON"
def test_negative_samples_U_entity(tsys, vocab, neg_key):
"""Test that we exclude a 2-word entity correctly using a negative example."""
tsys.cfg["neg_key"] = neg_key
doc = Doc(vocab, words=["A"])
entity_annots = [None]
example = Example.from_dict(doc, {"entities": entity_annots})
# These mean that the oracle sequence shouldn't have O for the first
# word, and it shouldn't analyse it as B-PERSON, L-PERSON
example.y.spans[neg_key] = [
Span(example.y, 0, 1, label="O"),
Span(example.y, 0, 1, label="PERSON"),
]
act_classes = tsys.get_oracle_sequence(example)
names = [tsys.get_class_name(act) for act in act_classes]
assert names
assert names[0] != "O"
assert names[0] != "U-PERSON"
def test_negative_sample_key_is_in_config(vocab, entity_types):
actions = BiluoPushDown.get_actions(entity_types=entity_types)
tsys = BiluoPushDown(vocab.strings, actions, incorrect_spans_key="non_entities")
assert tsys.cfg["neg_key"] == "non_entities"
# We can't easily represent this on a Doc object. Not sure what the best solution
# would be, but I don't think it's an important use case?
@pytest.mark.skip(reason="No longer supported")
def test_oracle_moves_missing_B(en_vocab):
words = ["B", "52", "Bomber"]
biluo_tags = [None, None, "L-PRODUCT"]
doc = Doc(en_vocab, words=words)
example = Example.from_dict(doc, {"words": words, "entities": biluo_tags})
moves = BiluoPushDown(en_vocab.strings)
move_types = ("M", "B", "I", "L", "U", "O")
for tag in biluo_tags:
if tag is None:
continue
elif tag == "O":
moves.add_action(move_types.index("O"), "")
else:
action, label = split_bilu_label(tag)
moves.add_action(move_types.index("B"), label)
moves.add_action(move_types.index("I"), label)
moves.add_action(move_types.index("L"), label)
moves.add_action(move_types.index("U"), label)
moves.get_oracle_sequence(example)
# We can't easily represent this on a Doc object. Not sure what the best solution
# would be, but I don't think it's an important use case?
@pytest.mark.skip(reason="No longer supported")
def test_oracle_moves_whitespace(en_vocab):
words = ["production", "\n", "of", "Northrop", "\n", "Corp.", "\n", "'s", "radar"]
biluo_tags = ["O", "O", "O", "B-ORG", None, "I-ORG", "L-ORG", "O", "O"]
doc = Doc(en_vocab, words=words)
example = Example.from_dict(doc, {"entities": biluo_tags})
moves = BiluoPushDown(en_vocab.strings)
move_types = ("M", "B", "I", "L", "U", "O")
for tag in biluo_tags:
if tag is None:
continue
elif tag == "O":
moves.add_action(move_types.index("O"), "")
else:
action, label = split_bilu_label(tag)
moves.add_action(move_types.index(action), label)
moves.get_oracle_sequence(example)
def test_accept_blocked_token():
"""Test succesful blocking of tokens to be in an entity."""
# 1. test normal behaviour
nlp1 = English()
doc1 = nlp1("I live in New York")
config = {}
ner1 = nlp1.create_pipe("ner", config=config)
assert [token.ent_iob_ for token in doc1] == ["", "", "", "", ""]
assert [token.ent_type_ for token in doc1] == ["", "", "", "", ""]
# Add the OUT action
ner1.moves.add_action(5, "")
ner1.add_label("GPE")
# Get into the state just before "New"
state1 = ner1.moves.init_batch([doc1])[0]
ner1.moves.apply_transition(state1, "O")
ner1.moves.apply_transition(state1, "O")
ner1.moves.apply_transition(state1, "O")
# Check that B-GPE is valid.
assert ner1.moves.is_valid(state1, "B-GPE")
# 2. test blocking behaviour
nlp2 = English()
doc2 = nlp2("I live in New York")
config = {}
ner2 = nlp2.create_pipe("ner", config=config)
# set "New York" to a blocked entity
doc2.set_ents([], blocked=[doc2[3:5]], default="unmodified")
assert [token.ent_iob_ for token in doc2] == ["", "", "", "B", "B"]
assert [token.ent_type_ for token in doc2] == ["", "", "", "", ""]
# Check that B-GPE is now invalid.
ner2.moves.add_action(4, "")
ner2.moves.add_action(5, "")
ner2.add_label("GPE")
state2 = ner2.moves.init_batch([doc2])[0]
ner2.moves.apply_transition(state2, "O")
ner2.moves.apply_transition(state2, "O")
ner2.moves.apply_transition(state2, "O")
# we can only use U- for "New"
assert not ner2.moves.is_valid(state2, "B-GPE")
assert ner2.moves.is_valid(state2, "U-")
ner2.moves.apply_transition(state2, "U-")
# we can only use U- for "York"
assert not ner2.moves.is_valid(state2, "B-GPE")
assert ner2.moves.is_valid(state2, "U-")
def test_train_empty():
"""Test that training an empty text does not throw errors."""
train_data = [
("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}),
("", {"entities": []}),
]
nlp = English()
train_examples = []
for t in train_data:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
ner = nlp.add_pipe("ner", last=True)
ner.add_label("PERSON")
nlp.initialize()
for itn in range(2):
losses = {}
batches = util.minibatch(train_examples, size=8)
for batch in batches:
nlp.update(batch, losses=losses)
def test_train_negative_deprecated():
"""Test that the deprecated negative entity format raises a custom error."""
train_data = [
("Who is Shaka Khan?", {"entities": [(7, 17, "!PERSON")]}),
]
nlp = English()
train_examples = []
for t in train_data:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
ner = nlp.add_pipe("ner", last=True)
ner.add_label("PERSON")
nlp.initialize()
for itn in range(2):
losses = {}
batches = util.minibatch(train_examples, size=8)
for batch in batches:
with pytest.raises(ValueError):
nlp.update(batch, losses=losses)
def test_overwrite_token():
nlp = English()
nlp.add_pipe("ner")
nlp.initialize()
# The untrained NER will predict O for each token
doc = nlp("I live in New York")
assert [token.ent_iob_ for token in doc] == ["O", "O", "O", "O", "O"]
assert [token.ent_type_ for token in doc] == ["", "", "", "", ""]
# Check that a new ner can overwrite O
config = {}
ner2 = nlp.create_pipe("ner", config=config)
ner2.moves.add_action(5, "")
ner2.add_label("GPE")
state = ner2.moves.init_batch([doc])[0]
assert ner2.moves.is_valid(state, "B-GPE")
assert ner2.moves.is_valid(state, "U-GPE")
ner2.moves.apply_transition(state, "B-GPE")
assert ner2.moves.is_valid(state, "I-GPE")
assert ner2.moves.is_valid(state, "L-GPE")
def test_empty_ner():
nlp = English()
ner = nlp.add_pipe("ner")
ner.add_label("MY_LABEL")
nlp.initialize()
doc = nlp("John is watching the news about Croatia's elections")
# if this goes wrong, the initialization of the parser's upper layer is probably broken
result = ["O", "O", "O", "O", "O", "O", "O", "O", "O"]
assert [token.ent_iob_ for token in doc] == result
def test_ruler_before_ner():
"""Test that an NER works after an entity_ruler: the second can add annotations"""
nlp = English()
# 1 : Entity Ruler - should set "this" to B and everything else to empty
patterns = [{"label": "THING", "pattern": "This"}]
ruler = nlp.add_pipe("entity_ruler")
# 2: untrained NER - should set everything else to O
untrained_ner = nlp.add_pipe("ner")
untrained_ner.add_label("MY_LABEL")
nlp.initialize()
ruler.add_patterns(patterns)
doc = nlp("This is Antti Korhonen speaking in Finland")
expected_iobs = ["B", "O", "O", "O", "O", "O", "O"]
expected_types = ["THING", "", "", "", "", "", ""]
assert [token.ent_iob_ for token in doc] == expected_iobs
assert [token.ent_type_ for token in doc] == expected_types
def test_ner_constructor(en_vocab):
config = {
"update_with_oracle_cut_size": 100,
}
cfg = {"model": DEFAULT_NER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
EntityRecognizer(en_vocab, model, **config)
EntityRecognizer(en_vocab, model)
def test_ner_before_ruler():
"""Test that an entity_ruler works after an NER: the second can overwrite O annotations"""
nlp = English()
# 1: untrained NER - should set everything to O
untrained_ner = nlp.add_pipe("ner", name="uner")
untrained_ner.add_label("MY_LABEL")
nlp.initialize()
# 2 : Entity Ruler - should set "this" to B and keep everything else O
patterns = [{"label": "THING", "pattern": "This"}]
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
doc = nlp("This is Antti Korhonen speaking in Finland")
expected_iobs = ["B", "O", "O", "O", "O", "O", "O"]
expected_types = ["THING", "", "", "", "", "", ""]
assert [token.ent_iob_ for token in doc] == expected_iobs
assert [token.ent_type_ for token in doc] == expected_types
def test_block_ner():
"""Test functionality for blocking tokens so they can't be in a named entity"""
# block "Antti L Korhonen" from being a named entity
nlp = English()
nlp.add_pipe("blocker", config={"start": 2, "end": 5})
untrained_ner = nlp.add_pipe("ner")
untrained_ner.add_label("MY_LABEL")
nlp.initialize()
doc = nlp("This is Antti L Korhonen speaking in Finland")
expected_iobs = ["O", "O", "B", "B", "B", "O", "O", "O"]
expected_types = ["", "", "", "", "", "", "", ""]
assert [token.ent_iob_ for token in doc] == expected_iobs
assert [token.ent_type_ for token in doc] == expected_types
@pytest.mark.parametrize("use_upper", [True, False])
def test_overfitting_IO(use_upper):
# Simple test to try and quickly overfit the NER component
nlp = English()
ner = nlp.add_pipe("ner", config={"model": {"use_upper": use_upper}})
train_examples = []
for text, annotations in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for ent in annotations.get("entities"):
ner.add_label(ent[2])
optimizer = nlp.initialize()
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["ner"] < 0.00001
# test the trained model
test_text = "I like London."
doc = nlp(test_text)
ents = doc.ents
assert len(ents) == 1
assert ents[0].text == "London"
assert ents[0].label_ == "LOC"
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
ents2 = doc2.ents
assert len(ents2) == 1
assert ents2[0].text == "London"
assert ents2[0].label_ == "LOC"
# Ensure that the predictions are still the same, even after adding a new label
ner2 = nlp2.get_pipe("ner")
assert ner2.model.attrs["has_upper"] == use_upper
ner2.add_label("RANDOM_NEW_LABEL")
doc3 = nlp2(test_text)
ents3 = doc3.ents
assert len(ents3) == 1
assert ents3[0].text == "London"
assert ents3[0].label_ == "LOC"
# Make sure that running pipe twice, or comparing to call, always amounts to the same predictions
texts = [
"Just a sentence.",
"Then one more sentence about London.",
"Here is another one.",
"I like London.",
]
batch_deps_1 = [doc.to_array([ENT_IOB]) for doc in nlp.pipe(texts)]
batch_deps_2 = [doc.to_array([ENT_IOB]) for doc in nlp.pipe(texts)]
no_batch_deps = [doc.to_array([ENT_IOB]) for doc in [nlp(text) for text in texts]]
assert_equal(batch_deps_1, batch_deps_2)
assert_equal(batch_deps_1, no_batch_deps)
# test that kb_id is preserved
test_text = "I like London and London."
doc = nlp.make_doc(test_text)
doc.ents = [Span(doc, 2, 3, label="LOC", kb_id=1234)]
ents = doc.ents
assert len(ents) == 1
assert ents[0].text == "London"
assert ents[0].label_ == "LOC"
assert ents[0].kb_id == 1234
doc = nlp.get_pipe("ner")(doc)
ents = doc.ents
assert len(ents) == 2
assert ents[0].text == "London"
assert ents[0].label_ == "LOC"
assert ents[0].kb_id == 1234
# ent added by ner has kb_id == 0
assert ents[1].text == "London"
assert ents[1].label_ == "LOC"
assert ents[1].kb_id == 0
def test_beam_ner_scores():
# Test that we can get confidence values out of the beam_ner pipe
beam_width = 16
beam_density = 0.0001
nlp = English()
config = {
"beam_width": beam_width,
"beam_density": beam_density,
}
ner = nlp.add_pipe("beam_ner", config=config)
train_examples = []
for text, annotations in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for ent in annotations.get("entities"):
ner.add_label(ent[2])
optimizer = nlp.initialize()
# update once
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
# test the scores from the beam
test_text = "I like London."
doc = nlp.make_doc(test_text)
docs = [doc]
beams = ner.predict(docs)
entity_scores = ner.scored_ents(beams)[0]
for j in range(len(doc)):
for label in ner.labels:
score = entity_scores[(j, j + 1, label)]
eps = 0.00001
assert 0 - eps <= score <= 1 + eps
def test_beam_overfitting_IO(neg_key):
# Simple test to try and quickly overfit the Beam NER component
nlp = English()
beam_width = 16
beam_density = 0.0001
config = {
"beam_width": beam_width,
"beam_density": beam_density,
"incorrect_spans_key": neg_key,
}
ner = nlp.add_pipe("beam_ner", config=config)
train_examples = []
for text, annotations in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for ent in annotations.get("entities"):
ner.add_label(ent[2])
optimizer = nlp.initialize()
# run overfitting
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["beam_ner"] < 0.0001
# test the scores from the beam
test_text = "I like London"
docs = [nlp.make_doc(test_text)]
beams = ner.predict(docs)
entity_scores = ner.scored_ents(beams)[0]
assert entity_scores[(2, 3, "LOC")] == 1.0
assert entity_scores[(2, 3, "PERSON")] == 0.0
assert len(nlp(test_text).ents) == 1
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
docs2 = [nlp2.make_doc(test_text)]
ner2 = nlp2.get_pipe("beam_ner")
beams2 = ner2.predict(docs2)
entity_scores2 = ner2.scored_ents(beams2)[0]
assert entity_scores2[(2, 3, "LOC")] == 1.0
assert entity_scores2[(2, 3, "PERSON")] == 0.0
# Try to unlearn the entity by using negative annotations
neg_doc = nlp.make_doc(test_text)
neg_ex = Example(neg_doc, neg_doc)
neg_ex.reference.spans[neg_key] = [Span(neg_doc, 2, 3, "LOC")]
neg_train_examples = [neg_ex]
for i in range(20):
losses = {}
nlp.update(neg_train_examples, sgd=optimizer, losses=losses)
# test the "untrained" model
assert len(nlp(test_text).ents) == 0
def test_neg_annotation(neg_key):
"""Check that the NER update works with a negative annotation that is a different label of the correct one,
or partly overlapping, etc"""
nlp = English()
beam_width = 16
beam_density = 0.0001
config = {
"beam_width": beam_width,
"beam_density": beam_density,
"incorrect_spans_key": neg_key,
}
ner = nlp.add_pipe("beam_ner", config=config)
train_text = "Who is Shaka Khan?"
neg_doc = nlp.make_doc(train_text)
ner.add_label("PERSON")
ner.add_label("ORG")
example = Example.from_dict(neg_doc, {"entities": [(7, 17, "PERSON")]})
example.reference.spans[neg_key] = [
Span(example.reference, 2, 4, "ORG"),
Span(example.reference, 2, 3, "PERSON"),
Span(example.reference, 1, 4, "PERSON"),
]
optimizer = nlp.initialize()
for i in range(2):
losses = {}
nlp.update([example], sgd=optimizer, losses=losses)
def test_neg_annotation_conflict(neg_key):
# Check that NER raises for a negative annotation that is THE SAME as a correct one
nlp = English()
beam_width = 16
beam_density = 0.0001
config = {
"beam_width": beam_width,
"beam_density": beam_density,
"incorrect_spans_key": neg_key,
}
ner = nlp.add_pipe("beam_ner", config=config)
train_text = "Who is Shaka Khan?"
neg_doc = nlp.make_doc(train_text)
ner.add_label("PERSON")
ner.add_label("LOC")
example = Example.from_dict(neg_doc, {"entities": [(7, 17, "PERSON")]})
example.reference.spans[neg_key] = [Span(example.reference, 2, 4, "PERSON")]
assert len(example.reference.ents) == 1
assert example.reference.ents[0].text == "Shaka Khan"
assert example.reference.ents[0].label_ == "PERSON"
assert len(example.reference.spans[neg_key]) == 1
assert example.reference.spans[neg_key][0].text == "Shaka Khan"
assert example.reference.spans[neg_key][0].label_ == "PERSON"
optimizer = nlp.initialize()
for i in range(2):
losses = {}
with pytest.raises(ValueError):
nlp.update([example], sgd=optimizer, losses=losses)
def test_beam_valid_parse(neg_key):
"""Regression test for previously flakey behaviour"""
nlp = English()
beam_width = 16
beam_density = 0.0001
config = {
"beam_width": beam_width,
"beam_density": beam_density,
"incorrect_spans_key": neg_key,
}
nlp.add_pipe("beam_ner", config=config)
# fmt: off
tokens = ['FEDERAL', 'NATIONAL', 'MORTGAGE', 'ASSOCIATION', '(', 'Fannie', 'Mae', '):', 'Posted', 'yields', 'on', '30', 'year', 'mortgage', 'commitments', 'for', 'delivery', 'within', '30', 'days', '(', 'priced', 'at', 'par', ')', '9.75', '%', ',', 'standard', 'conventional', 'fixed', '-', 'rate', 'mortgages', ';', '8.70', '%', ',', '6/2', 'rate', 'capped', 'one', '-', 'year', 'adjustable', 'rate', 'mortgages', '.', 'Source', ':', 'Telerate', 'Systems', 'Inc.']
iob = ['B-ORG', 'I-ORG', 'I-ORG', 'L-ORG', 'O', 'B-ORG', 'L-ORG', 'O', 'O', 'O', 'O', 'B-DATE', 'L-DATE', 'O', 'O', 'O', 'O', 'O', 'B-DATE', 'L-DATE', 'O', 'O', 'O', 'O', 'O', 'B-PERCENT', 'L-PERCENT', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-PERCENT', 'L-PERCENT', 'O', 'U-CARDINAL', 'O', 'O', 'B-DATE', 'I-DATE', 'L-DATE', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']
# fmt: on
doc = Doc(nlp.vocab, words=tokens)
example = Example.from_dict(doc, {"ner": iob})
neg_span = Span(example.reference, 50, 53, "ORG")
example.reference.spans[neg_key] = [neg_span]
optimizer = nlp.initialize()
for i in range(5):
losses = {}
nlp.update([example], sgd=optimizer, losses=losses)
assert "beam_ner" in losses
def test_ner_warns_no_lookups(caplog):
nlp = English()
assert nlp.lang in util.LEXEME_NORM_LANGS
nlp.vocab.lookups = Lookups()
assert not len(nlp.vocab.lookups)
nlp.add_pipe("ner")
with caplog.at_level(logging.DEBUG):
nlp.initialize()
assert "W033" in caplog.text
caplog.clear()
nlp.vocab.lookups.add_table("lexeme_norm")
nlp.vocab.lookups.get_table("lexeme_norm")["a"] = "A"
with caplog.at_level(logging.DEBUG):
nlp.initialize()
assert "W033" not in caplog.text
@Language.factory("blocker")
class BlockerComponent1:
def __init__(self, nlp, start, end, name="my_blocker"):
self.start = start
self.end = end
self.name = name
def __call__(self, doc):
doc.set_ents([], blocked=[doc[self.start : self.end]], default="unmodified")
return doc
| 29,176 | 34.195416 | 469 | py |
spaCy | spaCy-master/spacy/tests/parser/test_neural_parser.py | import pytest
from thinc.api import Model
from spacy import registry
from spacy.pipeline._parser_internals.arc_eager import ArcEager
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL
from spacy.pipeline.transition_parser import Parser
from spacy.tokens.doc import Doc
from spacy.training import Example
from spacy.vocab import Vocab
@pytest.fixture
def vocab():
return Vocab()
@pytest.fixture
def arc_eager(vocab):
actions = ArcEager.get_actions(left_labels=["L"], right_labels=["R"])
return ArcEager(vocab.strings, actions)
@pytest.fixture
def tok2vec():
cfg = {"model": DEFAULT_TOK2VEC_MODEL}
tok2vec = registry.resolve(cfg, validate=True)["model"]
tok2vec.initialize()
return tok2vec
@pytest.fixture
def parser(vocab, arc_eager):
config = {
"learn_tokens": False,
"min_action_freq": 30,
"update_with_oracle_cut_size": 100,
}
cfg = {"model": DEFAULT_PARSER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
return Parser(vocab, model, moves=arc_eager, **config)
@pytest.fixture
def model(arc_eager, tok2vec, vocab):
cfg = {"model": DEFAULT_PARSER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
model.attrs["resize_output"](model, arc_eager.n_moves)
model.initialize()
return model
@pytest.fixture
def doc(vocab):
return Doc(vocab, words=["a", "b", "c"])
@pytest.fixture
def gold(doc):
return {"heads": [1, 1, 1], "deps": ["L", "ROOT", "R"]}
def test_can_init_nn_parser(parser):
assert isinstance(parser.model, Model)
def test_build_model(parser, vocab):
config = {
"learn_tokens": False,
"min_action_freq": 0,
"update_with_oracle_cut_size": 100,
}
cfg = {"model": DEFAULT_PARSER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
parser.model = Parser(vocab, model=model, moves=parser.moves, **config).model
assert parser.model is not None
def test_predict_doc(parser, tok2vec, model, doc):
doc.tensor = tok2vec.predict([doc])[0]
parser.model = model
parser(doc)
def test_update_doc(parser, model, doc, gold):
parser.model = model
def optimize(key, weights, gradient):
weights -= 0.001 * gradient
return weights, gradient
example = Example.from_dict(doc, gold)
parser.update([example], sgd=optimize)
@pytest.mark.skip(reason="No longer supported")
def test_predict_doc_beam(parser, model, doc):
parser.model = model
parser(doc, beam_width=32, beam_density=0.001)
@pytest.mark.skip(reason="No longer supported")
def test_update_doc_beam(parser, model, doc, gold):
parser.model = model
def optimize(weights, gradient, key=None):
weights -= 0.001 * gradient
parser.update_beam((doc, gold), sgd=optimize)
| 2,859 | 24.765766 | 81 | py |
spaCy | spaCy-master/spacy/tests/parser/test_nn_beam.py | import hypothesis
import hypothesis.strategies
import numpy
import pytest
from thinc.tests.strategies import ndarrays_of_shape
from spacy.language import Language
from spacy.pipeline._parser_internals._beam_utils import BeamBatch
from spacy.pipeline._parser_internals.arc_eager import ArcEager
from spacy.pipeline._parser_internals.stateclass import StateClass
from spacy.tokens import Doc
from spacy.training import Example
from spacy.vocab import Vocab
@pytest.fixture(scope="module")
def vocab():
return Vocab()
@pytest.fixture(scope="module")
def moves(vocab):
aeager = ArcEager(vocab.strings, {})
aeager.add_action(0, "")
aeager.add_action(1, "")
aeager.add_action(2, "nsubj")
aeager.add_action(2, "punct")
aeager.add_action(2, "aux")
aeager.add_action(2, "nsubjpass")
aeager.add_action(3, "dobj")
aeager.add_action(2, "aux")
aeager.add_action(4, "ROOT")
return aeager
@pytest.fixture(scope="module")
def docs(vocab):
return [
Doc(
vocab,
words=["Rats", "bite", "things"],
heads=[1, 1, 1],
deps=["nsubj", "ROOT", "dobj"],
sent_starts=[True, False, False],
)
]
@pytest.fixture(scope="module")
def examples(docs):
return [Example(doc, doc.copy()) for doc in docs]
@pytest.fixture
def states(docs):
return [StateClass(doc) for doc in docs]
@pytest.fixture
def tokvecs(docs, vector_size):
output = []
for doc in docs:
vec = numpy.random.uniform(-0.1, 0.1, (len(doc), vector_size))
output.append(numpy.asarray(vec))
return output
@pytest.fixture(scope="module")
def batch_size(docs):
return len(docs)
@pytest.fixture(scope="module")
def beam_width():
return 4
@pytest.fixture(params=[0.0, 0.5, 1.0])
def beam_density(request):
return request.param
@pytest.fixture
def vector_size():
return 6
@pytest.fixture
def beam(moves, examples, beam_width):
states, golds, _ = moves.init_gold_batch(examples)
return BeamBatch(moves, states, golds, width=beam_width, density=0.0)
@pytest.fixture
def scores(moves, batch_size, beam_width):
return numpy.asarray(
numpy.concatenate(
[
numpy.random.uniform(-0.1, 0.1, (beam_width, moves.n_moves))
for _ in range(batch_size)
]
),
dtype="float32",
)
def test_create_beam(beam):
pass
def test_beam_advance(beam, scores):
beam.advance(scores)
def test_beam_advance_too_few_scores(beam, scores):
n_state = sum(len(beam) for beam in beam)
scores = scores[:n_state]
with pytest.raises(IndexError):
beam.advance(scores[:-1])
def test_beam_parse(examples, beam_width):
nlp = Language()
parser = nlp.add_pipe("beam_parser")
parser.cfg["beam_width"] = beam_width
parser.add_label("nsubj")
parser.initialize(lambda: examples)
doc = nlp.make_doc("Australia is a country")
parser(doc)
@hypothesis.given(hyp=hypothesis.strategies.data())
def test_beam_density(moves, examples, beam_width, hyp):
beam_density = float(hyp.draw(hypothesis.strategies.floats(0.0, 1.0, width=32)))
states, golds, _ = moves.init_gold_batch(examples)
beam = BeamBatch(moves, states, golds, width=beam_width, density=beam_density)
n_state = sum(len(beam) for beam in beam)
scores = hyp.draw(ndarrays_of_shape((n_state, moves.n_moves)))
beam.advance(scores)
for b in beam:
beam_probs = b.probs
assert b.min_density == beam_density
assert beam_probs[-1] >= beam_probs[0] * beam_density
| 3,601 | 24.013889 | 84 | py |
spaCy | spaCy-master/spacy/tests/parser/test_nonproj.py | import pytest
from spacy.pipeline._parser_internals import nonproj
from spacy.pipeline._parser_internals.nonproj import (
ancestors,
contains_cycle,
is_nonproj_arc,
is_nonproj_tree,
)
from spacy.tokens import Doc
@pytest.fixture
def tree():
return [1, 2, 2, 4, 5, 2, 2]
@pytest.fixture
def cyclic_tree():
return [1, 2, 2, 4, 5, 3, 2]
@pytest.fixture
def partial_tree():
return [1, 2, 2, 4, 5, None, 7, 4, 2]
@pytest.fixture
def nonproj_tree():
return [1, 2, 2, 4, 5, 2, 7, 4, 2]
@pytest.fixture
def proj_tree():
return [1, 2, 2, 4, 5, 2, 7, 5, 2]
@pytest.fixture
def multirooted_tree():
return [3, 2, 0, 3, 3, 7, 7, 3, 7, 10, 7, 10, 11, 12, 18, 16, 18, 17, 12, 3]
def test_parser_ancestors(tree, cyclic_tree, partial_tree, multirooted_tree):
assert [a for a in ancestors(3, tree)] == [4, 5, 2]
assert [a for a in ancestors(3, cyclic_tree)] == [4, 5, 3, 4, 5, 3, 4]
assert [a for a in ancestors(3, partial_tree)] == [4, 5, None]
assert [a for a in ancestors(17, multirooted_tree)] == []
def test_parser_contains_cycle(tree, cyclic_tree, partial_tree, multirooted_tree):
assert contains_cycle(tree) is None
assert contains_cycle(cyclic_tree) == {3, 4, 5}
assert contains_cycle(partial_tree) is None
assert contains_cycle(multirooted_tree) is None
def test_parser_is_nonproj_arc(
cyclic_tree, nonproj_tree, partial_tree, multirooted_tree
):
assert is_nonproj_arc(0, nonproj_tree) is False
assert is_nonproj_arc(1, nonproj_tree) is False
assert is_nonproj_arc(2, nonproj_tree) is False
assert is_nonproj_arc(3, nonproj_tree) is False
assert is_nonproj_arc(4, nonproj_tree) is False
assert is_nonproj_arc(5, nonproj_tree) is False
assert is_nonproj_arc(6, nonproj_tree) is False
assert is_nonproj_arc(7, nonproj_tree) is True
assert is_nonproj_arc(8, nonproj_tree) is False
assert is_nonproj_arc(7, partial_tree) is False
assert is_nonproj_arc(17, multirooted_tree) is False
assert is_nonproj_arc(16, multirooted_tree) is True
with pytest.raises(
ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]"
):
is_nonproj_arc(6, cyclic_tree)
def test_parser_is_nonproj_tree(
proj_tree, cyclic_tree, nonproj_tree, partial_tree, multirooted_tree
):
assert is_nonproj_tree(proj_tree) is False
assert is_nonproj_tree(nonproj_tree) is True
assert is_nonproj_tree(partial_tree) is False
assert is_nonproj_tree(multirooted_tree) is True
with pytest.raises(
ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]"
):
is_nonproj_tree(cyclic_tree)
def test_parser_pseudoprojectivity(en_vocab):
def deprojectivize(proj_heads, deco_labels):
words = ["whatever "] * len(proj_heads)
doc = Doc(en_vocab, words=words, deps=deco_labels, heads=proj_heads)
nonproj.deprojectivize(doc)
return [t.head.i for t in doc], [token.dep_ for token in doc]
# fmt: off
tree = [1, 2, 2]
nonproj_tree = [1, 2, 2, 4, 5, 2, 7, 4, 2]
nonproj_tree2 = [9, 1, 3, 1, 5, 6, 9, 8, 6, 1, 6, 12, 13, 10, 1]
cyclic_tree = [1, 2, 2, 4, 5, 3, 2]
labels = ["det", "nsubj", "root", "det", "dobj", "aux", "nsubj", "acl", "punct"]
labels2 = ["advmod", "root", "det", "nsubj", "advmod", "det", "dobj", "det", "nmod", "aux", "nmod", "advmod", "det", "amod", "punct"]
cyclic_labels = ["det", "nsubj", "root", "det", "dobj", "aux", "punct"]
# fmt: on
assert nonproj.decompose("X||Y") == ("X", "Y")
assert nonproj.decompose("X") == ("X", "")
assert nonproj.is_decorated("X||Y") is True
assert nonproj.is_decorated("X") is False
nonproj._lift(0, tree)
assert tree == [2, 2, 2]
assert nonproj.get_smallest_nonproj_arc_slow(nonproj_tree) == 7
assert nonproj.get_smallest_nonproj_arc_slow(nonproj_tree2) == 10
# fmt: off
proj_heads, deco_labels = nonproj.projectivize(nonproj_tree, labels)
with pytest.raises(ValueError, match=r'Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]'):
nonproj.projectivize(cyclic_tree, cyclic_labels)
assert proj_heads == [1, 2, 2, 4, 5, 2, 7, 5, 2]
assert deco_labels == ["det", "nsubj", "root", "det", "dobj", "aux",
"nsubj", "acl||dobj", "punct"]
deproj_heads, undeco_labels = deprojectivize(proj_heads, deco_labels)
assert deproj_heads == nonproj_tree
assert undeco_labels == labels
proj_heads, deco_labels = nonproj.projectivize(nonproj_tree2, labels2)
assert proj_heads == [1, 1, 3, 1, 5, 6, 9, 8, 6, 1, 9, 12, 13, 10, 1]
assert deco_labels == ["advmod||aux", "root", "det", "nsubj", "advmod",
"det", "dobj", "det", "nmod", "aux", "nmod||dobj",
"advmod", "det", "amod", "punct"]
deproj_heads, undeco_labels = deprojectivize(proj_heads, deco_labels)
assert deproj_heads == nonproj_tree2
assert undeco_labels == labels2
# if decoration is wrong such that there is no head with the desired label
# the structure is kept and the label is undecorated
proj_heads = [1, 2, 2, 4, 5, 2, 7, 5, 2]
deco_labels = ["det", "nsubj", "root", "det", "dobj", "aux", "nsubj",
"acl||iobj", "punct"]
deproj_heads, undeco_labels = deprojectivize(proj_heads, deco_labels)
assert deproj_heads == proj_heads
assert undeco_labels == ["det", "nsubj", "root", "det", "dobj", "aux",
"nsubj", "acl", "punct"]
# if there are two potential new heads, the first one is chosen even if
# it's wrong
proj_heads = [1, 1, 3, 1, 5, 6, 9, 8, 6, 1, 9, 12, 13, 10, 1]
deco_labels = ["advmod||aux", "root", "det", "aux", "advmod", "det",
"dobj", "det", "nmod", "aux", "nmod||dobj", "advmod",
"det", "amod", "punct"]
deproj_heads, undeco_labels = deprojectivize(proj_heads, deco_labels)
assert deproj_heads == [3, 1, 3, 1, 5, 6, 9, 8, 6, 1, 6, 12, 13, 10, 1]
assert undeco_labels == ["advmod", "root", "det", "aux", "advmod", "det",
"dobj", "det", "nmod", "aux", "nmod", "advmod",
"det", "amod", "punct"]
# fmt: on
| 6,259 | 39.387097 | 137 | py |
spaCy | spaCy-master/spacy/tests/parser/test_parse.py | import pytest
from numpy.testing import assert_equal
from thinc.api import Adam
from spacy import registry, util
from spacy.attrs import DEP, NORM
from spacy.lang.en import English
from spacy.pipeline import DependencyParser
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL
from spacy.tokens import Doc
from spacy.training import Example
from spacy.vocab import Vocab
from ..util import apply_transition_sequence, make_tempdir
TRAIN_DATA = [
(
"They trade mortgage-backed securities.",
{
"heads": [1, 1, 4, 4, 5, 1, 1],
"deps": ["nsubj", "ROOT", "compound", "punct", "nmod", "dobj", "punct"],
},
),
(
"I like London and Berlin.",
{
"heads": [1, 1, 1, 2, 2, 1],
"deps": ["nsubj", "ROOT", "dobj", "cc", "conj", "punct"],
},
),
]
CONFLICTING_DATA = [
(
"I like London and Berlin.",
{
"heads": [1, 1, 1, 2, 2, 1],
"deps": ["nsubj", "ROOT", "dobj", "cc", "conj", "punct"],
},
),
(
"I like London and Berlin.",
{
"heads": [0, 0, 0, 0, 0, 0],
"deps": ["ROOT", "nsubj", "nsubj", "cc", "conj", "punct"],
},
),
]
PARTIAL_DATA = [
(
"I like London.",
{
"heads": [1, 1, 1, None],
"deps": ["nsubj", "ROOT", "dobj", None],
},
),
]
eps = 0.1
@pytest.fixture
def vocab():
return Vocab(lex_attr_getters={NORM: lambda s: s})
@pytest.fixture
def parser(vocab):
vocab.strings.add("ROOT")
cfg = {"model": DEFAULT_PARSER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
parser = DependencyParser(vocab, model)
parser.cfg["token_vector_width"] = 4
parser.cfg["hidden_width"] = 32
# parser.add_label('right')
parser.add_label("left")
parser.initialize(lambda: [_parser_example(parser)])
sgd = Adam(0.001)
for i in range(10):
losses = {}
doc = Doc(vocab, words=["a", "b", "c", "d"])
example = Example.from_dict(
doc, {"heads": [1, 1, 3, 3], "deps": ["left", "ROOT", "left", "ROOT"]}
)
parser.update([example], sgd=sgd, losses=losses)
return parser
def _parser_example(parser):
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
gold = {"heads": [1, 1, 3, 3], "deps": ["right", "ROOT", "left", "ROOT"]}
return Example.from_dict(doc, gold)
@pytest.mark.issue(2772)
def test_issue2772(en_vocab):
"""Test that deprojectivization doesn't mess up sentence boundaries."""
# fmt: off
words = ["When", "we", "write", "or", "communicate", "virtually", ",", "we", "can", "hide", "our", "true", "feelings", "."]
# fmt: on
# A tree with a non-projective (i.e. crossing) arc
# The arcs (0, 4) and (2, 9) cross.
heads = [4, 2, 9, 2, 2, 4, 9, 9, 9, 9, 12, 12, 9, 9]
deps = ["dep"] * len(heads)
doc = Doc(en_vocab, words=words, heads=heads, deps=deps)
assert doc[1].is_sent_start is False
@pytest.mark.issue(3830)
def test_issue3830_no_subtok():
"""Test that the parser doesn't have subtok label if not learn_tokens"""
config = {
"learn_tokens": False,
}
model = registry.resolve({"model": DEFAULT_PARSER_MODEL}, validate=True)["model"]
parser = DependencyParser(Vocab(), model, **config)
parser.add_label("nsubj")
assert "subtok" not in parser.labels
parser.initialize(lambda: [_parser_example(parser)])
assert "subtok" not in parser.labels
@pytest.mark.issue(3830)
def test_issue3830_with_subtok():
"""Test that the parser does have subtok label if learn_tokens=True."""
config = {
"learn_tokens": True,
}
model = registry.resolve({"model": DEFAULT_PARSER_MODEL}, validate=True)["model"]
parser = DependencyParser(Vocab(), model, **config)
parser.add_label("nsubj")
assert "subtok" not in parser.labels
parser.initialize(lambda: [_parser_example(parser)])
assert "subtok" in parser.labels
@pytest.mark.issue(7716)
@pytest.mark.xfail(reason="Not fixed yet")
def test_partial_annotation(parser):
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
doc[2].is_sent_start = False
# Note that if the following line is used, then doc[2].is_sent_start == False
# doc[3].is_sent_start = False
doc = parser(doc)
assert doc[2].is_sent_start == False
def test_parser_root(en_vocab):
words = ["i", "do", "n't", "have", "other", "assistance"]
heads = [3, 3, 3, 3, 5, 3]
deps = ["nsubj", "aux", "neg", "ROOT", "amod", "dobj"]
doc = Doc(en_vocab, words=words, heads=heads, deps=deps)
for t in doc:
assert t.dep != 0, t.text
@pytest.mark.skip(
reason="The step_through API was removed (but should be brought back)"
)
@pytest.mark.parametrize("words", [["Hello"]])
def test_parser_parse_one_word_sentence(en_vocab, en_parser, words):
doc = Doc(en_vocab, words=words, heads=[0], deps=["ROOT"])
assert len(doc) == 1
with en_parser.step_through(doc) as _: # noqa: F841
pass
assert doc[0].dep != 0
@pytest.mark.skip(
reason="The step_through API was removed (but should be brought back)"
)
def test_parser_initial(en_vocab, en_parser):
words = ["I", "ate", "the", "pizza", "with", "anchovies", "."]
transition = ["L-nsubj", "S", "L-det"]
doc = Doc(en_vocab, words=words)
apply_transition_sequence(en_parser, doc, transition)
assert doc[0].head.i == 1
assert doc[1].head.i == 1
assert doc[2].head.i == 3
assert doc[3].head.i == 3
def test_parser_parse_subtrees(en_vocab, en_parser):
words = ["The", "four", "wheels", "on", "the", "bus", "turned", "quickly"]
heads = [2, 2, 6, 2, 5, 3, 6, 6]
deps = ["dep"] * len(heads)
doc = Doc(en_vocab, words=words, heads=heads, deps=deps)
assert len(list(doc[2].lefts)) == 2
assert len(list(doc[2].rights)) == 1
assert len(list(doc[2].children)) == 3
assert len(list(doc[5].lefts)) == 1
assert len(list(doc[5].rights)) == 0
assert len(list(doc[5].children)) == 1
assert len(list(doc[2].subtree)) == 6
def test_parser_merge_pp(en_vocab):
words = ["A", "phrase", "with", "another", "phrase", "occurs"]
heads = [1, 5, 1, 4, 2, 5]
deps = ["det", "nsubj", "prep", "det", "pobj", "ROOT"]
pos = ["DET", "NOUN", "ADP", "DET", "NOUN", "VERB"]
doc = Doc(en_vocab, words=words, deps=deps, heads=heads, pos=pos)
with doc.retokenize() as retokenizer:
for np in doc.noun_chunks:
retokenizer.merge(np, attrs={"lemma": np.lemma_})
assert doc[0].text == "A phrase"
assert doc[1].text == "with"
assert doc[2].text == "another phrase"
assert doc[3].text == "occurs"
@pytest.mark.skip(
reason="The step_through API was removed (but should be brought back)"
)
def test_parser_arc_eager_finalize_state(en_vocab, en_parser):
words = ["a", "b", "c", "d", "e"]
# right branching
transition = ["R-nsubj", "D", "R-nsubj", "R-nsubj", "D", "R-ROOT"]
tokens = Doc(en_vocab, words=words)
apply_transition_sequence(en_parser, tokens, transition)
assert tokens[0].n_lefts == 0
assert tokens[0].n_rights == 2
assert tokens[0].left_edge.i == 0
assert tokens[0].right_edge.i == 4
assert tokens[0].head.i == 0
assert tokens[1].n_lefts == 0
assert tokens[1].n_rights == 0
assert tokens[1].left_edge.i == 1
assert tokens[1].right_edge.i == 1
assert tokens[1].head.i == 0
assert tokens[2].n_lefts == 0
assert tokens[2].n_rights == 2
assert tokens[2].left_edge.i == 2
assert tokens[2].right_edge.i == 4
assert tokens[2].head.i == 0
assert tokens[3].n_lefts == 0
assert tokens[3].n_rights == 0
assert tokens[3].left_edge.i == 3
assert tokens[3].right_edge.i == 3
assert tokens[3].head.i == 2
assert tokens[4].n_lefts == 0
assert tokens[4].n_rights == 0
assert tokens[4].left_edge.i == 4
assert tokens[4].right_edge.i == 4
assert tokens[4].head.i == 2
# left branching
transition = ["S", "S", "S", "L-nsubj", "L-nsubj", "L-nsubj", "L-nsubj"]
tokens = Doc(en_vocab, words=words)
apply_transition_sequence(en_parser, tokens, transition)
assert tokens[0].n_lefts == 0
assert tokens[0].n_rights == 0
assert tokens[0].left_edge.i == 0
assert tokens[0].right_edge.i == 0
assert tokens[0].head.i == 4
assert tokens[1].n_lefts == 0
assert tokens[1].n_rights == 0
assert tokens[1].left_edge.i == 1
assert tokens[1].right_edge.i == 1
assert tokens[1].head.i == 4
assert tokens[2].n_lefts == 0
assert tokens[2].n_rights == 0
assert tokens[2].left_edge.i == 2
assert tokens[2].right_edge.i == 2
assert tokens[2].head.i == 4
assert tokens[3].n_lefts == 0
assert tokens[3].n_rights == 0
assert tokens[3].left_edge.i == 3
assert tokens[3].right_edge.i == 3
assert tokens[3].head.i == 4
assert tokens[4].n_lefts == 4
assert tokens[4].n_rights == 0
assert tokens[4].left_edge.i == 0
assert tokens[4].right_edge.i == 4
assert tokens[4].head.i == 4
def test_parser_set_sent_starts(en_vocab):
# fmt: off
words = ['Ein', 'Satz', '.', 'Außerdem', 'ist', 'Zimmer', 'davon', 'überzeugt', ',', 'dass', 'auch', 'epige-', '\n', 'netische', 'Mechanismen', 'eine', 'Rolle', 'spielen', ',', 'also', 'Vorgänge', ',', 'die', '\n', 'sich', 'darauf', 'auswirken', ',', 'welche', 'Gene', 'abgelesen', 'werden', 'und', '\n', 'welche', 'nicht', '.', '\n']
heads = [1, 1, 1, 30, 4, 4, 7, 4, 7, 17, 14, 14, 11, 14, 17, 16, 17, 6, 17, 20, 11, 20, 26, 22, 26, 26, 20, 26, 29, 31, 31, 25, 31, 32, 17, 4, 4, 36]
deps = ['nk', 'ROOT', 'punct', 'mo', 'ROOT', 'sb', 'op', 'pd', 'punct', 'cp', 'mo', 'nk', '', 'nk', 'sb', 'nk', 'oa', 're', 'punct', 'mo', 'app', 'punct', 'sb', '', 'oa', 'op', 'rc', 'punct', 'nk', 'sb', 'oc', 're', 'cd', '', 'oa', 'ng', 'punct', '']
# fmt: on
doc = Doc(en_vocab, words=words, deps=deps, heads=heads)
for i in range(len(words)):
if i == 0 or i == 3:
assert doc[i].is_sent_start is True
else:
assert doc[i].is_sent_start is False
for sent in doc.sents:
for token in sent:
assert token.head in sent
def test_parser_constructor(en_vocab):
config = {
"learn_tokens": False,
"min_action_freq": 30,
"update_with_oracle_cut_size": 100,
}
cfg = {"model": DEFAULT_PARSER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
DependencyParser(en_vocab, model, **config)
DependencyParser(en_vocab, model)
@pytest.mark.parametrize("pipe_name", ["parser", "beam_parser"])
def test_incomplete_data(pipe_name):
# Test that the parser works with incomplete information
nlp = English()
parser = nlp.add_pipe(pipe_name)
train_examples = []
for text, annotations in PARTIAL_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for dep in annotations.get("deps", []):
if dep is not None:
parser.add_label(dep)
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(150):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses[pipe_name] < 0.0001
# test the trained model
test_text = "I like securities."
doc = nlp(test_text)
assert doc[0].dep_ == "nsubj"
assert doc[2].dep_ == "dobj"
assert doc[0].head.i == 1
assert doc[2].head.i == 1
@pytest.mark.parametrize("pipe_name", ["parser", "beam_parser"])
def test_overfitting_IO(pipe_name):
# Simple test to try and quickly overfit the dependency parser (normal or beam)
nlp = English()
parser = nlp.add_pipe(pipe_name)
train_examples = []
for text, annotations in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for dep in annotations.get("deps", []):
parser.add_label(dep)
optimizer = nlp.initialize()
# run overfitting
for i in range(200):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses[pipe_name] < 0.0001
# test the trained model
test_text = "I like securities."
doc = nlp(test_text)
assert doc[0].dep_ == "nsubj"
assert doc[2].dep_ == "dobj"
assert doc[3].dep_ == "punct"
assert doc[0].head.i == 1
assert doc[2].head.i == 1
assert doc[3].head.i == 1
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
assert doc2[0].dep_ == "nsubj"
assert doc2[2].dep_ == "dobj"
assert doc2[3].dep_ == "punct"
assert doc2[0].head.i == 1
assert doc2[2].head.i == 1
assert doc2[3].head.i == 1
# Make sure that running pipe twice, or comparing to call, always amounts to the same predictions
texts = [
"Just a sentence.",
"Then one more sentence about London.",
"Here is another one.",
"I like London.",
]
batch_deps_1 = [doc.to_array([DEP]) for doc in nlp.pipe(texts)]
batch_deps_2 = [doc.to_array([DEP]) for doc in nlp.pipe(texts)]
no_batch_deps = [doc.to_array([DEP]) for doc in [nlp(text) for text in texts]]
assert_equal(batch_deps_1, batch_deps_2)
assert_equal(batch_deps_1, no_batch_deps)
# fmt: off
@pytest.mark.slow
@pytest.mark.parametrize("pipe_name", ["parser", "beam_parser"])
@pytest.mark.parametrize(
"parser_config",
[
# TransitionBasedParser V1
({"@architectures": "spacy.TransitionBasedParser.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "state_type": "parser", "extra_state_tokens": False, "hidden_width": 64, "maxout_pieces": 2, "use_upper": True}),
# TransitionBasedParser V2
({"@architectures": "spacy.TransitionBasedParser.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "state_type": "parser", "extra_state_tokens": False, "hidden_width": 64, "maxout_pieces": 2, "use_upper": True}),
],
)
# fmt: on
def test_parser_configs(pipe_name, parser_config):
pipe_config = {"model": parser_config}
nlp = English()
parser = nlp.add_pipe(pipe_name, config=pipe_config)
train_examples = []
for text, annotations in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for dep in annotations.get("deps", []):
parser.add_label(dep)
optimizer = nlp.initialize()
for i in range(5):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
def test_beam_parser_scores():
# Test that we can get confidence values out of the beam_parser pipe
beam_width = 16
beam_density = 0.0001
nlp = English()
config = {
"beam_width": beam_width,
"beam_density": beam_density,
}
parser = nlp.add_pipe("beam_parser", config=config)
train_examples = []
for text, annotations in CONFLICTING_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for dep in annotations.get("deps", []):
parser.add_label(dep)
optimizer = nlp.initialize()
# update a bit with conflicting data
for i in range(10):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
# test the scores from the beam
test_text = "I like securities."
doc = nlp.make_doc(test_text)
docs = [doc]
beams = parser.predict(docs)
head_scores, label_scores = parser.scored_parses(beams)
for j in range(len(doc)):
for label in parser.labels:
label_score = label_scores[0][(j, label)]
assert 0 - eps <= label_score <= 1 + eps
for i in range(len(doc)):
head_score = head_scores[0][(j, i)]
assert 0 - eps <= head_score <= 1 + eps
def test_beam_overfitting_IO():
# Simple test to try and quickly overfit the Beam dependency parser
nlp = English()
beam_width = 16
beam_density = 0.0001
config = {
"beam_width": beam_width,
"beam_density": beam_density,
}
parser = nlp.add_pipe("beam_parser", config=config)
train_examples = []
for text, annotations in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for dep in annotations.get("deps", []):
parser.add_label(dep)
optimizer = nlp.initialize()
# run overfitting
for i in range(150):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["beam_parser"] < 0.0001
# test the scores from the beam
test_text = "I like securities."
docs = [nlp.make_doc(test_text)]
beams = parser.predict(docs)
head_scores, label_scores = parser.scored_parses(beams)
# we only processed one document
head_scores = head_scores[0]
label_scores = label_scores[0]
# test label annotations: 0=nsubj, 2=dobj, 3=punct
assert label_scores[(0, "nsubj")] == pytest.approx(1.0, abs=eps)
assert label_scores[(0, "dobj")] == pytest.approx(0.0, abs=eps)
assert label_scores[(0, "punct")] == pytest.approx(0.0, abs=eps)
assert label_scores[(2, "nsubj")] == pytest.approx(0.0, abs=eps)
assert label_scores[(2, "dobj")] == pytest.approx(1.0, abs=eps)
assert label_scores[(2, "punct")] == pytest.approx(0.0, abs=eps)
assert label_scores[(3, "nsubj")] == pytest.approx(0.0, abs=eps)
assert label_scores[(3, "dobj")] == pytest.approx(0.0, abs=eps)
assert label_scores[(3, "punct")] == pytest.approx(1.0, abs=eps)
# test head annotations: the root is token at index 1
assert head_scores[(0, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores[(0, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores[(0, 2)] == pytest.approx(0.0, abs=eps)
assert head_scores[(2, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores[(2, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores[(2, 2)] == pytest.approx(0.0, abs=eps)
assert head_scores[(3, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores[(3, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores[(3, 2)] == pytest.approx(0.0, abs=eps)
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
docs2 = [nlp2.make_doc(test_text)]
parser2 = nlp2.get_pipe("beam_parser")
beams2 = parser2.predict(docs2)
head_scores2, label_scores2 = parser2.scored_parses(beams2)
# we only processed one document
head_scores2 = head_scores2[0]
label_scores2 = label_scores2[0]
# check the results again
assert label_scores2[(0, "nsubj")] == pytest.approx(1.0, abs=eps)
assert label_scores2[(0, "dobj")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(0, "punct")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(2, "nsubj")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(2, "dobj")] == pytest.approx(1.0, abs=eps)
assert label_scores2[(2, "punct")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(3, "nsubj")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(3, "dobj")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(3, "punct")] == pytest.approx(1.0, abs=eps)
assert head_scores2[(0, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores2[(0, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores2[(0, 2)] == pytest.approx(0.0, abs=eps)
assert head_scores2[(2, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores2[(2, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores2[(2, 2)] == pytest.approx(0.0, abs=eps)
assert head_scores2[(3, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores2[(3, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores2[(3, 2)] == pytest.approx(0.0, abs=eps)
| 20,155 | 35.983486 | 338 | py |
spaCy | spaCy-master/spacy/tests/parser/test_parse_navigate.py | import pytest
from spacy.tokens import Doc
@pytest.fixture
def words():
# fmt: off
return [
"\n", "It", "was", "a", "bright", "cold", "day", "in", "April", ",",
"and", "the", "clocks", "were", "striking", "thirteen", ".", "\n",
"Winston", "Smith", ",", "his", "chin", "nuzzled", "into", "his",
"breast", "in", "an", "effort", "to", "escape", "the", "\n", "vile",
"wind", ",", "slipped", "quickly", "through", "the", "glass", "doors",
"of", "Victory", "Mansions", ",", "\n", "though", "not", "quickly",
"enough", "to", "prevent", "a", "swirl", "of", "gritty", "dust",
"from", "entering", "\n", "along", "with", "him", ".", "\n\n", "The",
"hallway", "smelt", "of", "boiled", "cabbage", "and", "old", "rag",
"mats", ".", "At", "one", "end", "of", "it", "a", "\n", "coloured",
"poster", ",", "too", "large", "for", "indoor", "display", ",", "had",
"been", "tacked", "to", "the", "wall", ".", "\n", "It", "depicted",
"simply", "an", "enormous", "face", ",", "more", "than", "a", "metre",
"wide", ":", "the", "face", "of", "a", "\n", "man", "of", "about",
"forty", "-", "five", ",", "with", "a", "heavy", "black", "moustache",
"and", "ruggedly", "handsome", "\n", "features", ".", "Winston", "made",
"for", "the", "stairs", ".", "It", "was", "no", "use", "trying", "the",
"lift", ".", "Even", "at", "\n", "the", "best", "of", "times", "it",
"was", "seldom", "working", ",", "and", "at", "present", "the",
"electric", "current", "\n", "was", "cut", "off", "during", "daylight",
"hours", ".", "It", "was", "part", "of", "the", "economy", "drive",
"in", "\n", "preparation", "for", "Hate", "Week", ".", "The", "flat",
"was", "seven", "flights", "up", ",", "and", "Winston", ",", "who",
"\n", "was", "thirty", "-", "nine", "and", "had", "a", "varicose",
"ulcer", "above", "his", "right", "ankle", ",", "went", "slowly", ",",
"\n", "resting", "several", "times", "on", "the", "way", ".", "On",
"each", "landing", ",", "opposite", "the", "lift", "-", "shaft", ",",
"\n", "the", "poster", "with", "the", "enormous", "face", "gazed",
"from", "the", "wall", ".", "It", "was", "one", "of", "those", "\n",
"pictures", "which", "are", "so", "contrived", "that", "the", "eyes",
"follow", "you", "about", "when", "you", "move", ".", "\n", "BIG",
"BROTHER", "IS", "WATCHING", "YOU", ",", "the", "caption", "beneath",
"it", "ran", ".", "\n", ]
# fmt: on
@pytest.fixture
def heads():
# fmt: off
return [
1, 2, 2, 6, 6, 6, 2, 6, 7, 2, 2, 12, 14, 14, 2, 14, 14, 16, 19, 23, 23,
22, 23, 23, 23, 26, 24, 23, 29, 27, 31, 29, 35, 32, 35, 31, 23, 23, 37,
37, 42, 42, 39, 42, 45, 43, 37, 46, 37, 50, 51, 37, 53, 51, 55, 53, 55,
58, 56, 53, 59, 60, 60, 62, 63, 23, 65, 68, 69, 69, 69, 72, 70, 72, 76,
76, 72, 69, 96, 80, 78, 80, 81, 86, 83, 86, 96, 96, 89, 96, 89, 92, 90,
96, 96, 96, 96, 96, 99, 97, 96, 100, 103, 103, 103, 107, 107, 103, 107,
111, 111, 112, 113, 107, 103, 116, 136, 116, 120, 118, 117, 120, 125,
125, 125, 121, 116, 116, 131, 131, 131, 127, 131, 134, 131, 134, 136,
136, 139, 139, 139, 142, 140, 139, 145, 145, 147, 145, 147, 150, 148,
145, 153, 162, 153, 156, 162, 156, 157, 162, 162, 162, 162, 162, 162,
172, 165, 169, 169, 172, 169, 172, 162, 172, 172, 176, 174, 172, 179,
179, 179, 180, 183, 181, 179, 184, 185, 185, 187, 190, 188, 179, 193,
194, 194, 196, 194, 196, 194, 194, 218, 200, 204, 202, 200, 207, 207,
204, 204, 204, 212, 212, 209, 212, 216, 216, 213, 200, 194, 218, 218,
220, 218, 224, 222, 222, 227, 225, 218, 246, 231, 229, 246, 246, 237,
237, 237, 233, 246, 238, 241, 246, 241, 245, 245, 242, 246, 246, 249,
247, 246, 252, 252, 252, 253, 257, 255, 254, 259, 257, 261, 259, 265,
264, 265, 261, 265, 265, 270, 270, 267, 252, 271, 274, 275, 275, 276,
283, 283, 280, 283, 280, 281, 283, 283, 284]
# fmt: on
def test_parser_parse_navigate_consistency(en_vocab, words, heads):
doc = Doc(en_vocab, words=words, heads=heads, deps=["dep"] * len(heads))
for head in doc:
for child in head.lefts:
assert child.head == head
for child in head.rights:
assert child.head == head
def test_parser_parse_navigate_child_consistency(en_vocab, words, heads):
doc = Doc(en_vocab, words=words, heads=heads, deps=["dep"] * len(heads))
lefts = {}
rights = {}
for head in doc:
assert head.i not in lefts
lefts[head.i] = set()
for left in head.lefts:
lefts[head.i].add(left.i)
assert head.i not in rights
rights[head.i] = set()
for right in head.rights:
rights[head.i].add(right.i)
for head in doc:
assert head.n_rights == len(rights[head.i])
assert head.n_lefts == len(lefts[head.i])
for child in doc:
if child.i < child.head.i:
assert child.i in lefts[child.head.i]
assert child.i not in rights[child.head.i]
lefts[child.head.i].remove(child.i)
elif child.i > child.head.i:
assert child.i in rights[child.head.i]
assert child.i not in lefts[child.head.i]
rights[child.head.i].remove(child.i)
for head_index, children in lefts.items():
assert not children
for head_index, children in rights.items():
assert not children
def test_parser_parse_navigate_edges(en_vocab, words, heads):
doc = Doc(en_vocab, words=words, heads=heads, deps=["dep"] * len(heads))
for token in doc:
subtree = list(token.subtree)
debug = "\t".join((token.text, token.left_edge.text, subtree[0].text))
assert token.left_edge == subtree[0], debug
debug = "\t".join(
(
token.text,
token.right_edge.text,
subtree[-1].text,
token.right_edge.head.text,
)
)
assert token.right_edge == subtree[-1], debug
| 6,216 | 47.952756 | 80 | py |
spaCy | spaCy-master/spacy/tests/parser/test_preset_sbd.py | import pytest
from thinc.api import Adam
from spacy import registry
from spacy.attrs import NORM
from spacy.pipeline import DependencyParser
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
from spacy.tokens import Doc
from spacy.training import Example
from spacy.vocab import Vocab
@pytest.fixture
def vocab():
return Vocab(lex_attr_getters={NORM: lambda s: s})
def _parser_example(parser):
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
gold = {"heads": [1, 1, 3, 3], "deps": ["right", "ROOT", "left", "ROOT"]}
return Example.from_dict(doc, gold)
@pytest.fixture
def parser(vocab):
vocab.strings.add("ROOT")
cfg = {"model": DEFAULT_PARSER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
parser = DependencyParser(vocab, model)
parser.cfg["token_vector_width"] = 4
parser.cfg["hidden_width"] = 32
# parser.add_label('right')
parser.add_label("left")
parser.initialize(lambda: [_parser_example(parser)])
sgd = Adam(0.001)
for i in range(10):
losses = {}
doc = Doc(vocab, words=["a", "b", "c", "d"])
example = Example.from_dict(
doc, {"heads": [1, 1, 3, 3], "deps": ["left", "ROOT", "left", "ROOT"]}
)
parser.update([example], sgd=sgd, losses=losses)
return parser
def test_no_sentences(parser):
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
doc = parser(doc)
assert len(list(doc.sents)) >= 1
def test_sents_1(parser):
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) >= 2
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
doc[1].sent_start = False
doc[2].sent_start = True
doc[3].sent_start = False
doc = parser(doc)
assert len(list(doc.sents)) == 2
def test_sents_1_2(parser):
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
doc[1].sent_start = True
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) >= 3
def test_sents_1_3(parser):
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
doc[0].is_sent_start = True
doc[1].is_sent_start = True
doc[2].is_sent_start = None
doc[3].is_sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) >= 3
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
doc[0].is_sent_start = True
doc[1].is_sent_start = True
doc[2].is_sent_start = False
doc[3].is_sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
| 2,561 | 27.786517 | 82 | py |
spaCy | spaCy-master/spacy/tests/parser/test_space_attachment.py | import pytest
from spacy.tokens import Doc
from ..util import apply_transition_sequence
def test_parser_space_attachment(en_vocab):
# fmt: off
words = ["This", "is", "a", "test", ".", "\n", "To", "ensure", " ", "spaces", "are", "attached", "well", "."]
heads = [1, 1, 3, 1, 1, 4, 7, 11, 7, 11, 11, 11, 11, 11]
# fmt: on
deps = ["dep"] * len(heads)
doc = Doc(en_vocab, words=words, heads=heads, deps=deps)
for sent in doc.sents:
if len(sent) == 1:
assert not sent[-1].is_space
def test_parser_sentence_space(en_vocab):
# fmt: off
words = ["I", "look", "forward", "to", "using", "Thingamajig", ".", " ", "I", "'ve", "been", "told", "it", "will", "make", "my", "life", "easier", "..."]
heads = [1, 1, 1, 1, 3, 4, 1, 6, 11, 11, 11, 11, 14, 14, 11, 16, 17, 14, 11]
deps = ["nsubj", "ROOT", "advmod", "prep", "pcomp", "dobj", "punct", "",
"nsubjpass", "aux", "auxpass", "ROOT", "nsubj", "aux", "ccomp",
"poss", "nsubj", "ccomp", "punct"]
# fmt: on
doc = Doc(en_vocab, words=words, heads=heads, deps=deps)
assert len(list(doc.sents)) == 2
@pytest.mark.skip(
reason="The step_through API was removed (but should be brought back)"
)
def test_parser_space_attachment_leading(en_vocab, en_parser):
words = ["\t", "\n", "This", "is", "a", "sentence", "."]
heads = [1, 2, 2, 4, 2, 2]
doc = Doc(en_vocab, words=words, heads=heads)
assert doc[0].is_space
assert doc[1].is_space
assert doc[2].text == "This"
with en_parser.step_through(doc) as stepwise:
pass
assert doc[0].head.i == 2
assert doc[1].head.i == 2
assert stepwise.stack == set([2])
@pytest.mark.skip(
reason="The step_through API was removed (but should be brought back)"
)
def test_parser_space_attachment_intermediate_trailing(en_vocab, en_parser):
words = ["This", "is", "\t", "a", "\t\n", "\n", "sentence", ".", "\n\n", "\n"]
heads = [1, 1, 1, 5, 3, 1, 1, 6]
transition = ["L-nsubj", "S", "L-det", "R-attr", "D", "R-punct"]
doc = Doc(en_vocab, words=words, heads=heads)
assert doc[2].is_space
assert doc[4].is_space
assert doc[5].is_space
assert doc[8].is_space
assert doc[9].is_space
apply_transition_sequence(en_parser, doc, transition)
for token in doc:
assert token.dep != 0 or token.is_space
assert [token.head.i for token in doc] == [1, 1, 1, 6, 3, 3, 1, 1, 7, 7]
@pytest.mark.parametrize("text,length", [(["\n"], 1), (["\n", "\t", "\n\n", "\t"], 4)])
@pytest.mark.skip(
reason="The step_through API was removed (but should be brought back)"
)
def test_parser_space_attachment_space(en_parser, text, length):
doc = Doc(en_parser.vocab, words=text)
assert len(doc) == length
with en_parser.step_through(doc) as _: # noqa: F841
pass
assert doc[0].is_space
for token in doc:
assert token.head.i == length - 1
| 2,928 | 35.6125 | 157 | py |
spaCy | spaCy-master/spacy/tests/parser/test_state.py | import pytest
from spacy.pipeline._parser_internals.stateclass import StateClass
from spacy.tokens.doc import Doc
from spacy.vocab import Vocab
@pytest.fixture
def vocab():
return Vocab()
@pytest.fixture
def doc(vocab):
return Doc(vocab, words=["a", "b", "c", "d"])
def test_init_state(doc):
state = StateClass(doc)
assert state.stack == []
assert state.queue == list(range(len(doc)))
assert not state.is_final()
assert state.buffer_length() == 4
def test_push_pop(doc):
state = StateClass(doc)
state.push()
assert state.buffer_length() == 3
assert state.stack == [0]
assert 0 not in state.queue
state.push()
assert state.stack == [1, 0]
assert 1 not in state.queue
assert state.buffer_length() == 2
state.pop()
assert state.stack == [0]
assert 1 not in state.queue
def test_stack_depth(doc):
state = StateClass(doc)
assert state.stack_depth() == 0
assert state.buffer_length() == len(doc)
state.push()
assert state.buffer_length() == 3
assert state.stack_depth() == 1
def test_H(doc):
state = StateClass(doc)
assert state.H(0) == -1
state.add_arc(1, 0, 0)
assert state.arcs == [{"head": 1, "child": 0, "label": 0}]
assert state.H(0) == 1
state.add_arc(3, 1, 0)
assert state.H(1) == 3
def test_L(doc):
state = StateClass(doc)
assert state.L(2, 1) == -1
state.add_arc(2, 1, 0)
assert state.arcs == [{"head": 2, "child": 1, "label": 0}]
assert state.L(2, 1) == 1
state.add_arc(2, 0, 0)
assert state.L(2, 1) == 0
assert state.n_L(2) == 2
def test_R(doc):
state = StateClass(doc)
assert state.R(0, 1) == -1
state.add_arc(0, 1, 0)
assert state.arcs == [{"head": 0, "child": 1, "label": 0}]
assert state.R(0, 1) == 1
state.add_arc(0, 2, 0)
assert state.R(0, 1) == 2
assert state.n_R(0) == 2
| 1,894 | 22.6875 | 66 | py |
spaCy | spaCy-master/spacy/tests/pipeline/__init__.py | 0 | 0 | 0 | py |
|
spaCy | spaCy-master/spacy/tests/pipeline/test_analysis.py | import pytest
from mock import Mock
from spacy.language import Language
from spacy.pipe_analysis import get_attr_info, validate_attrs
def test_component_decorator_assigns():
@Language.component("c1", assigns=["token.tag", "doc.tensor"])
def test_component1(doc):
return doc
@Language.component(
"c2", requires=["token.tag", "token.pos"], assigns=["token.lemma", "doc.tensor"]
)
def test_component2(doc):
return doc
@Language.component(
"c3", requires=["token.lemma"], assigns=["token._.custom_lemma"]
)
def test_component3(doc):
return doc
assert Language.has_factory("c1")
assert Language.has_factory("c2")
assert Language.has_factory("c3")
nlp = Language()
nlp.add_pipe("c1")
nlp.add_pipe("c2")
problems = nlp.analyze_pipes()["problems"]
assert problems["c2"] == ["token.pos"]
nlp.add_pipe("c3")
assert get_attr_info(nlp, "doc.tensor")["assigns"] == ["c1", "c2"]
nlp.add_pipe("c1", name="c4")
test_component4_meta = nlp.get_pipe_meta("c1")
assert test_component4_meta.factory == "c1"
assert nlp.pipe_names == ["c1", "c2", "c3", "c4"]
assert not Language.has_factory("c4")
assert nlp.pipe_factories["c1"] == "c1"
assert nlp.pipe_factories["c4"] == "c1"
assert get_attr_info(nlp, "doc.tensor")["assigns"] == ["c1", "c2", "c4"]
assert get_attr_info(nlp, "token.pos")["requires"] == ["c2"]
assert nlp("hello world")
def test_component_factories_class_func():
"""Test that class components can implement a from_nlp classmethod that
gives them access to the nlp object and config via the factory."""
class TestComponent5:
def __call__(self, doc):
return doc
mock = Mock()
mock.return_value = TestComponent5()
def test_componen5_factory(nlp, foo: str = "bar", name="c5"):
return mock(nlp, foo=foo)
Language.factory("c5", func=test_componen5_factory)
assert Language.has_factory("c5")
nlp = Language()
nlp.add_pipe("c5", config={"foo": "bar"})
assert nlp("hello world")
mock.assert_called_once_with(nlp, foo="bar")
def test_analysis_validate_attrs_valid():
attrs = ["doc.sents", "doc.ents", "token.tag", "token._.xyz", "span._.xyz"]
assert validate_attrs(attrs)
for attr in attrs:
assert validate_attrs([attr])
with pytest.raises(ValueError):
validate_attrs(["doc.sents", "doc.xyz"])
@pytest.mark.parametrize(
"attr",
[
"doc",
"doc_ents",
"doc.xyz",
"token.xyz",
"token.tag_",
"token.tag.xyz",
"token._.xyz.abc",
"span.label",
],
)
def test_analysis_validate_attrs_invalid(attr):
with pytest.raises(ValueError):
validate_attrs([attr])
def test_analysis_validate_attrs_remove_pipe():
"""Test that attributes are validated correctly on remove."""
@Language.component("pipe_analysis_c6", assigns=["token.tag"])
def c1(doc):
return doc
@Language.component("pipe_analysis_c7", requires=["token.pos"])
def c2(doc):
return doc
nlp = Language()
nlp.add_pipe("pipe_analysis_c6")
nlp.add_pipe("pipe_analysis_c7")
problems = nlp.analyze_pipes()["problems"]
assert problems["pipe_analysis_c7"] == ["token.pos"]
nlp.remove_pipe("pipe_analysis_c7")
problems = nlp.analyze_pipes()["problems"]
assert all(p == [] for p in problems.values())
| 3,457 | 28.810345 | 88 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_annotates_on_update.py | from typing import Callable, Iterable, Iterator
import pytest
from thinc.api import Config
from spacy.lang.en import English
from spacy.language import Language
from spacy.training import Example
from spacy.training.loop import train
from spacy.util import load_model_from_config, registry
@pytest.fixture
def config_str():
return """
[nlp]
lang = "en"
pipeline = ["sentencizer","assert_sents"]
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null
batch_size = 1000
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}
[components]
[components.assert_sents]
factory = "assert_sents"
[components.sentencizer]
factory = "sentencizer"
punct_chars = null
[training]
dev_corpus = "corpora.dev"
train_corpus = "corpora.train"
annotating_components = ["sentencizer"]
max_steps = 2
[corpora]
[corpora.dev]
@readers = "unannotated_corpus"
[corpora.train]
@readers = "unannotated_corpus"
"""
def test_annotates_on_update():
# The custom component checks for sentence annotation
@Language.factory("assert_sents", default_config={})
def assert_sents(nlp, name):
return AssertSents(name)
class AssertSents:
def __init__(self, name, **cfg):
self.name = name
pass
def __call__(self, doc):
if not doc.has_annotation("SENT_START"):
raise ValueError("No sents")
return doc
def update(self, examples, *, drop=0.0, sgd=None, losses=None):
for example in examples:
if not example.predicted.has_annotation("SENT_START"):
raise ValueError("No sents")
return {}
nlp = English()
nlp.add_pipe("sentencizer")
nlp.add_pipe("assert_sents")
# When the pipeline runs, annotations are set
nlp("This is a sentence.")
examples = []
for text in ["a a", "b b", "c c"]:
examples.append(Example(nlp.make_doc(text), nlp(text)))
for example in examples:
assert not example.predicted.has_annotation("SENT_START")
# If updating without setting annotations, assert_sents will raise an error
with pytest.raises(ValueError):
nlp.update(examples)
# Updating while setting annotations for the sentencizer succeeds
nlp.update(examples, annotates=["sentencizer"])
def test_annotating_components_from_config(config_str):
@registry.readers("unannotated_corpus")
def create_unannotated_corpus() -> Callable[[Language], Iterable[Example]]:
return UnannotatedCorpus()
class UnannotatedCorpus:
def __call__(self, nlp: Language) -> Iterator[Example]:
for text in ["a a", "b b", "c c"]:
doc = nlp.make_doc(text)
yield Example(doc, doc)
orig_config = Config().from_str(config_str)
nlp = load_model_from_config(orig_config, auto_fill=True, validate=True)
assert nlp.config["training"]["annotating_components"] == ["sentencizer"]
train(nlp)
nlp.config["training"]["annotating_components"] = []
with pytest.raises(ValueError):
train(nlp)
| 3,193 | 27.017544 | 79 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_attributeruler.py | import numpy
import pytest
from spacy import registry, util
from spacy.lang.en import English
from spacy.pipeline import AttributeRuler
from spacy.tokens import Doc
from spacy.training import Example
from ..util import make_tempdir
@pytest.fixture
def nlp():
return English()
@pytest.fixture
def pattern_dicts():
return [
{
"patterns": [[{"ORTH": "a"}], [{"ORTH": "irrelevant"}]],
"attrs": {"LEMMA": "the", "MORPH": "Case=Nom|Number=Plur"},
},
# one pattern sets the lemma
{"patterns": [[{"ORTH": "test"}]], "attrs": {"LEMMA": "cat"}},
# another pattern sets the morphology
{
"patterns": [[{"ORTH": "test"}]],
"attrs": {"MORPH": "Case=Nom|Number=Sing"},
"index": 0,
},
]
@pytest.fixture
def tag_map():
return {
".": {"POS": "PUNCT", "PunctType": "peri"},
",": {"POS": "PUNCT", "PunctType": "comm"},
}
@pytest.fixture
def morph_rules():
return {"DT": {"the": {"POS": "DET", "LEMMA": "a", "Case": "Nom"}}}
def check_tag_map(ruler):
doc = Doc(
ruler.vocab,
words=["This", "is", "a", "test", "."],
tags=["DT", "VBZ", "DT", "NN", "."],
)
doc = ruler(doc)
for i in range(len(doc)):
if i == 4:
assert doc[i].pos_ == "PUNCT"
assert str(doc[i].morph) == "PunctType=peri"
else:
assert doc[i].pos_ == ""
assert str(doc[i].morph) == ""
def check_morph_rules(ruler):
doc = Doc(
ruler.vocab,
words=["This", "is", "the", "test", "."],
tags=["DT", "VBZ", "DT", "NN", "."],
)
doc = ruler(doc)
for i in range(len(doc)):
if i != 2:
assert doc[i].pos_ == ""
assert str(doc[i].morph) == ""
else:
assert doc[2].pos_ == "DET"
assert doc[2].lemma_ == "a"
assert str(doc[2].morph) == "Case=Nom"
def test_attributeruler_init(nlp, pattern_dicts):
a = nlp.add_pipe("attribute_ruler")
for p in pattern_dicts:
a.add(**p)
doc = nlp("This is a test.")
assert doc[2].lemma_ == "the"
assert str(doc[2].morph) == "Case=Nom|Number=Plur"
assert doc[3].lemma_ == "cat"
assert str(doc[3].morph) == "Case=Nom|Number=Sing"
assert doc.has_annotation("LEMMA")
assert doc.has_annotation("MORPH")
def test_attributeruler_init_patterns(nlp, pattern_dicts):
# initialize with patterns
ruler = nlp.add_pipe("attribute_ruler")
ruler.initialize(lambda: [], patterns=pattern_dicts)
doc = nlp("This is a test.")
assert doc[2].lemma_ == "the"
assert str(doc[2].morph) == "Case=Nom|Number=Plur"
assert doc[3].lemma_ == "cat"
assert str(doc[3].morph) == "Case=Nom|Number=Sing"
assert doc.has_annotation("LEMMA")
assert doc.has_annotation("MORPH")
nlp.remove_pipe("attribute_ruler")
# initialize with patterns from misc registry
@registry.misc("attribute_ruler_patterns")
def attribute_ruler_patterns():
return [
{
"patterns": [[{"ORTH": "a"}], [{"ORTH": "irrelevant"}]],
"attrs": {"LEMMA": "the", "MORPH": "Case=Nom|Number=Plur"},
},
# one pattern sets the lemma
{"patterns": [[{"ORTH": "test"}]], "attrs": {"LEMMA": "cat"}},
# another pattern sets the morphology
{
"patterns": [[{"ORTH": "test"}]],
"attrs": {"MORPH": "Case=Nom|Number=Sing"},
"index": 0,
},
]
nlp.config["initialize"]["components"]["attribute_ruler"] = {
"patterns": {"@misc": "attribute_ruler_patterns"}
}
nlp.add_pipe("attribute_ruler")
nlp.initialize()
doc = nlp("This is a test.")
assert doc[2].lemma_ == "the"
assert str(doc[2].morph) == "Case=Nom|Number=Plur"
assert doc[3].lemma_ == "cat"
assert str(doc[3].morph) == "Case=Nom|Number=Sing"
assert doc.has_annotation("LEMMA")
assert doc.has_annotation("MORPH")
def test_attributeruler_init_clear(nlp, pattern_dicts):
"""Test that initialization clears patterns."""
ruler = nlp.add_pipe("attribute_ruler")
assert not len(ruler.matcher)
ruler.add_patterns(pattern_dicts)
assert len(ruler.matcher)
ruler.initialize(lambda: [])
assert not len(ruler.matcher)
def test_attributeruler_score(nlp, pattern_dicts):
# initialize with patterns
ruler = nlp.add_pipe("attribute_ruler")
ruler.initialize(lambda: [], patterns=pattern_dicts)
doc = nlp("This is a test.")
assert doc[2].lemma_ == "the"
assert str(doc[2].morph) == "Case=Nom|Number=Plur"
assert doc[3].lemma_ == "cat"
assert str(doc[3].morph) == "Case=Nom|Number=Sing"
doc = nlp.make_doc("This is a test.")
dev_examples = [Example.from_dict(doc, {"lemmas": ["this", "is", "a", "cat", "."]})]
scores = nlp.evaluate(dev_examples)
# "cat" is the only correct lemma
assert scores["lemma_acc"] == pytest.approx(0.2)
# no morphs are set
assert scores["morph_acc"] is None
nlp.remove_pipe("attribute_ruler")
# test with custom scorer
@registry.misc("weird_scorer.v1")
def make_weird_scorer():
def weird_scorer(examples, weird_score, **kwargs):
return {"weird_score": weird_score}
return weird_scorer
ruler = nlp.add_pipe(
"attribute_ruler", config={"scorer": {"@misc": "weird_scorer.v1"}}
)
ruler.initialize(lambda: [], patterns=pattern_dicts)
scores = nlp.evaluate(dev_examples, scorer_cfg={"weird_score": 0.12345})
assert scores["weird_score"] == 0.12345
assert "token_acc" in scores
assert "lemma_acc" not in scores
scores = nlp.evaluate(dev_examples, scorer_cfg={"weird_score": 0.23456})
assert scores["weird_score"] == 0.23456
def test_attributeruler_rule_order(nlp):
a = AttributeRuler(nlp.vocab)
patterns = [
{"patterns": [[{"TAG": "VBZ"}]], "attrs": {"POS": "VERB"}},
{"patterns": [[{"TAG": "VBZ"}]], "attrs": {"POS": "NOUN"}},
]
a.add_patterns(patterns)
doc = Doc(
nlp.vocab,
words=["This", "is", "a", "test", "."],
tags=["DT", "VBZ", "DT", "NN", "."],
)
doc = a(doc)
assert doc[1].pos_ == "NOUN"
def test_attributeruler_tag_map(nlp, tag_map):
ruler = AttributeRuler(nlp.vocab)
ruler.load_from_tag_map(tag_map)
check_tag_map(ruler)
def test_attributeruler_tag_map_initialize(nlp, tag_map):
ruler = nlp.add_pipe("attribute_ruler")
ruler.initialize(lambda: [], tag_map=tag_map)
check_tag_map(ruler)
def test_attributeruler_morph_rules(nlp, morph_rules):
ruler = AttributeRuler(nlp.vocab)
ruler.load_from_morph_rules(morph_rules)
check_morph_rules(ruler)
def test_attributeruler_morph_rules_initialize(nlp, morph_rules):
ruler = nlp.add_pipe("attribute_ruler")
ruler.initialize(lambda: [], morph_rules=morph_rules)
check_morph_rules(ruler)
def test_attributeruler_indices(nlp):
a = nlp.add_pipe("attribute_ruler")
a.add(
[[{"ORTH": "a"}, {"ORTH": "test"}]],
{"LEMMA": "the", "MORPH": "Case=Nom|Number=Plur"},
index=0,
)
a.add(
[[{"ORTH": "This"}, {"ORTH": "is"}]],
{"LEMMA": "was", "MORPH": "Case=Nom|Number=Sing"},
index=1,
)
a.add([[{"ORTH": "a"}, {"ORTH": "test"}]], {"LEMMA": "cat"}, index=-1)
text = "This is a test."
doc = nlp(text)
for i in range(len(doc)):
if i == 1:
assert doc[i].lemma_ == "was"
assert str(doc[i].morph) == "Case=Nom|Number=Sing"
elif i == 2:
assert doc[i].lemma_ == "the"
assert str(doc[i].morph) == "Case=Nom|Number=Plur"
elif i == 3:
assert doc[i].lemma_ == "cat"
else:
assert str(doc[i].morph) == ""
# raises an error when trying to modify a token outside of the match
a.add([[{"ORTH": "a"}, {"ORTH": "test"}]], {"LEMMA": "cat"}, index=2)
with pytest.raises(ValueError):
doc = nlp(text)
# raises an error when trying to modify a token outside of the match
a.add([[{"ORTH": "a"}, {"ORTH": "test"}]], {"LEMMA": "cat"}, index=10)
with pytest.raises(ValueError):
doc = nlp(text)
def test_attributeruler_patterns_prop(nlp, pattern_dicts):
a = nlp.add_pipe("attribute_ruler")
a.add_patterns(pattern_dicts)
for p1, p2 in zip(pattern_dicts, a.patterns):
assert p1["patterns"] == p2["patterns"]
assert p1["attrs"] == p2["attrs"]
if p1.get("index"):
assert p1["index"] == p2["index"]
def test_attributeruler_serialize(nlp, pattern_dicts):
a = nlp.add_pipe("attribute_ruler")
a.add_patterns(pattern_dicts)
text = "This is a test."
attrs = ["ORTH", "LEMMA", "MORPH"]
doc = nlp(text)
# bytes roundtrip
a_reloaded = AttributeRuler(nlp.vocab).from_bytes(a.to_bytes())
assert a.to_bytes() == a_reloaded.to_bytes()
doc1 = a_reloaded(nlp.make_doc(text))
numpy.array_equal(doc.to_array(attrs), doc1.to_array(attrs))
assert a.patterns == a_reloaded.patterns
# disk roundtrip
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(text)
assert nlp2.get_pipe("attribute_ruler").to_bytes() == a.to_bytes()
assert numpy.array_equal(doc.to_array(attrs), doc2.to_array(attrs))
assert a.patterns == nlp2.get_pipe("attribute_ruler").patterns
| 9,536 | 31.328814 | 88 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_edit_tree_lemmatizer.py | import pickle
import hypothesis.strategies as st
import pytest
from hypothesis import given
from spacy import util
from spacy.lang.en import English
from spacy.language import Language
from spacy.pipeline._edit_tree_internals.edit_trees import EditTrees
from spacy.strings import StringStore
from spacy.training import Example
from spacy.util import make_tempdir
TRAIN_DATA = [
("She likes green eggs", {"lemmas": ["she", "like", "green", "egg"]}),
("Eat blue ham", {"lemmas": ["eat", "blue", "ham"]}),
]
PARTIAL_DATA = [
# partial annotation
("She likes green eggs", {"lemmas": ["", "like", "green", ""]}),
# misaligned partial annotation
(
"He hates green eggs",
{
"words": ["He", "hat", "es", "green", "eggs"],
"lemmas": ["", "hat", "e", "green", ""],
},
),
]
def test_initialize_examples():
nlp = Language()
lemmatizer = nlp.add_pipe("trainable_lemmatizer")
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
# you shouldn't really call this more than once, but for testing it should be fine
nlp.initialize(get_examples=lambda: train_examples)
with pytest.raises(TypeError):
nlp.initialize(get_examples=lambda: None)
with pytest.raises(TypeError):
nlp.initialize(get_examples=lambda: train_examples[0])
with pytest.raises(TypeError):
nlp.initialize(get_examples=lambda: [])
with pytest.raises(TypeError):
nlp.initialize(get_examples=train_examples)
def test_initialize_from_labels():
nlp = Language()
lemmatizer = nlp.add_pipe("trainable_lemmatizer")
lemmatizer.min_tree_freq = 1
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
nlp.initialize(get_examples=lambda: train_examples)
nlp2 = Language()
lemmatizer2 = nlp2.add_pipe("trainable_lemmatizer")
lemmatizer2.initialize(
# We want to check that the strings in replacement nodes are
# added to the string store. Avoid that they get added through
# the examples.
get_examples=lambda: train_examples[:1],
labels=lemmatizer.label_data,
)
assert lemmatizer2.tree2label == {1: 0, 3: 1, 4: 2, 6: 3}
assert lemmatizer2.label_data == {
"trees": [
{"orig": "S", "subst": "s"},
{
"prefix_len": 1,
"suffix_len": 0,
"prefix_tree": 0,
"suffix_tree": 4294967295,
},
{"orig": "s", "subst": ""},
{
"prefix_len": 0,
"suffix_len": 1,
"prefix_tree": 4294967295,
"suffix_tree": 2,
},
{
"prefix_len": 0,
"suffix_len": 0,
"prefix_tree": 4294967295,
"suffix_tree": 4294967295,
},
{"orig": "E", "subst": "e"},
{
"prefix_len": 1,
"suffix_len": 0,
"prefix_tree": 5,
"suffix_tree": 4294967295,
},
],
"labels": (1, 3, 4, 6),
}
@pytest.mark.parametrize("top_k", (1, 5, 30))
def test_no_data(top_k):
# Test that the lemmatizer provides a nice error when there's no tagging data / labels
TEXTCAT_DATA = [
("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}),
("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}),
]
nlp = English()
nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
nlp.add_pipe("textcat")
train_examples = []
for t in TEXTCAT_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
with pytest.raises(ValueError):
nlp.initialize(get_examples=lambda: train_examples)
@pytest.mark.parametrize("top_k", (1, 5, 30))
def test_incomplete_data(top_k):
# Test that the lemmatizer works with incomplete information
nlp = English()
lemmatizer = nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
lemmatizer.min_tree_freq = 1
train_examples = []
for t in PARTIAL_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["trainable_lemmatizer"] < 0.00001
# test the trained model
test_text = "She likes blue eggs"
doc = nlp(test_text)
assert doc[1].lemma_ == "like"
assert doc[2].lemma_ == "blue"
# Check that incomplete annotations are ignored.
scores, _ = lemmatizer.model([eg.predicted for eg in train_examples], is_train=True)
_, dX = lemmatizer.get_loss(train_examples, scores)
xp = lemmatizer.model.ops.xp
# Missing annotations.
assert xp.count_nonzero(dX[0][0]) == 0
assert xp.count_nonzero(dX[0][3]) == 0
assert xp.count_nonzero(dX[1][0]) == 0
assert xp.count_nonzero(dX[1][3]) == 0
# Misaligned annotations.
assert xp.count_nonzero(dX[1][1]) == 0
@pytest.mark.parametrize("top_k", (1, 5, 30))
def test_overfitting_IO(top_k):
nlp = English()
lemmatizer = nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
lemmatizer.min_tree_freq = 1
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["trainable_lemmatizer"] < 0.00001
test_text = "She likes blue eggs"
doc = nlp(test_text)
assert doc[0].lemma_ == "she"
assert doc[1].lemma_ == "like"
assert doc[2].lemma_ == "blue"
assert doc[3].lemma_ == "egg"
# Check model after a {to,from}_disk roundtrip
with util.make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
assert doc2[0].lemma_ == "she"
assert doc2[1].lemma_ == "like"
assert doc2[2].lemma_ == "blue"
assert doc2[3].lemma_ == "egg"
# Check model after a {to,from}_bytes roundtrip
nlp_bytes = nlp.to_bytes()
nlp3 = English()
nlp3.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
nlp3.from_bytes(nlp_bytes)
doc3 = nlp3(test_text)
assert doc3[0].lemma_ == "she"
assert doc3[1].lemma_ == "like"
assert doc3[2].lemma_ == "blue"
assert doc3[3].lemma_ == "egg"
# Check model after a pickle roundtrip.
nlp_bytes = pickle.dumps(nlp)
nlp4 = pickle.loads(nlp_bytes)
doc4 = nlp4(test_text)
assert doc4[0].lemma_ == "she"
assert doc4[1].lemma_ == "like"
assert doc4[2].lemma_ == "blue"
assert doc4[3].lemma_ == "egg"
def test_lemmatizer_requires_labels():
nlp = English()
nlp.add_pipe("trainable_lemmatizer")
with pytest.raises(ValueError):
nlp.initialize()
def test_lemmatizer_label_data():
nlp = English()
lemmatizer = nlp.add_pipe("trainable_lemmatizer")
lemmatizer.min_tree_freq = 1
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
nlp.initialize(get_examples=lambda: train_examples)
nlp2 = English()
lemmatizer2 = nlp2.add_pipe("trainable_lemmatizer")
lemmatizer2.initialize(
get_examples=lambda: train_examples, labels=lemmatizer.label_data
)
# Verify that the labels and trees are the same.
assert lemmatizer.labels == lemmatizer2.labels
assert lemmatizer.trees.to_bytes() == lemmatizer2.trees.to_bytes()
def test_dutch():
strings = StringStore()
trees = EditTrees(strings)
tree = trees.add("deelt", "delen")
assert trees.tree_to_str(tree) == "(m 0 3 () (m 0 2 (s '' 'l') (s 'lt' 'n')))"
tree = trees.add("gedeeld", "delen")
assert (
trees.tree_to_str(tree) == "(m 2 3 (s 'ge' '') (m 0 2 (s '' 'l') (s 'ld' 'n')))"
)
def test_from_to_bytes():
strings = StringStore()
trees = EditTrees(strings)
trees.add("deelt", "delen")
trees.add("gedeeld", "delen")
b = trees.to_bytes()
trees2 = EditTrees(strings)
trees2.from_bytes(b)
# Verify that the nodes did not change.
assert len(trees) == len(trees2)
for i in range(len(trees)):
assert trees.tree_to_str(i) == trees2.tree_to_str(i)
# Reinserting the same trees should not add new nodes.
trees2.add("deelt", "delen")
trees2.add("gedeeld", "delen")
assert len(trees) == len(trees2)
def test_from_to_disk():
strings = StringStore()
trees = EditTrees(strings)
trees.add("deelt", "delen")
trees.add("gedeeld", "delen")
trees2 = EditTrees(strings)
with make_tempdir() as temp_dir:
trees_file = temp_dir / "edit_trees.bin"
trees.to_disk(trees_file)
trees2 = trees2.from_disk(trees_file)
# Verify that the nodes did not change.
assert len(trees) == len(trees2)
for i in range(len(trees)):
assert trees.tree_to_str(i) == trees2.tree_to_str(i)
# Reinserting the same trees should not add new nodes.
trees2.add("deelt", "delen")
trees2.add("gedeeld", "delen")
assert len(trees) == len(trees2)
@given(st.text(), st.text())
def test_roundtrip(form, lemma):
strings = StringStore()
trees = EditTrees(strings)
tree = trees.add(form, lemma)
assert trees.apply(tree, form) == lemma
@given(st.text(alphabet="ab"), st.text(alphabet="ab"))
def test_roundtrip_small_alphabet(form, lemma):
# Test with small alphabets to have more overlap.
strings = StringStore()
trees = EditTrees(strings)
tree = trees.add(form, lemma)
assert trees.apply(tree, form) == lemma
def test_unapplicable_trees():
strings = StringStore()
trees = EditTrees(strings)
tree3 = trees.add("deelt", "delen")
# Replacement fails.
assert trees.apply(tree3, "deeld") == None
# Suffix + prefix are too large.
assert trees.apply(tree3, "de") == None
def test_empty_strings():
strings = StringStore()
trees = EditTrees(strings)
no_change = trees.add("xyz", "xyz")
empty = trees.add("", "")
assert no_change == empty
| 10,449 | 30.287425 | 90 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_entity_linker.py | from typing import Any, Callable, Dict, Iterable, Tuple
import pytest
from numpy.testing import assert_equal
from spacy import Language, registry, util
from spacy.attrs import ENT_KB_ID
from spacy.compat import pickle
from spacy.kb import Candidate, InMemoryLookupKB, KnowledgeBase, get_candidates
from spacy.lang.en import English
from spacy.ml import load_kb
from spacy.ml.models.entity_linker import build_span_maker
from spacy.pipeline import EntityLinker
from spacy.pipeline.legacy import EntityLinker_v1
from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL
from spacy.scorer import Scorer
from spacy.tests.util import make_tempdir
from spacy.tokens import Doc, Span
from spacy.training import Example
from spacy.util import ensure_path
from spacy.vocab import Vocab
@pytest.fixture
def nlp():
return English()
def assert_almost_equal(a, b):
delta = 0.0001
assert a - delta <= b <= a + delta
@pytest.mark.issue(4674)
def test_issue4674():
"""Test that setting entities with overlapping identifiers does not mess up IO"""
nlp = English()
kb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
vector1 = [0.9, 1.1, 1.01]
vector2 = [1.8, 2.25, 2.01]
with pytest.warns(UserWarning):
kb.set_entities(
entity_list=["Q1", "Q1"],
freq_list=[32, 111],
vector_list=[vector1, vector2],
)
assert kb.get_size_entities() == 1
# dumping to file & loading back in
with make_tempdir() as d:
dir_path = ensure_path(d)
if not dir_path.exists():
dir_path.mkdir()
file_path = dir_path / "kb"
kb.to_disk(str(file_path))
kb2 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
kb2.from_disk(str(file_path))
assert kb2.get_size_entities() == 1
@pytest.mark.issue(6730)
def test_issue6730(en_vocab):
"""Ensure that the KB does not accept empty strings, but otherwise IO works fine."""
from spacy.kb.kb_in_memory import InMemoryLookupKB
kb = InMemoryLookupKB(en_vocab, entity_vector_length=3)
kb.add_entity(entity="1", freq=148, entity_vector=[1, 2, 3])
with pytest.raises(ValueError):
kb.add_alias(alias="", entities=["1"], probabilities=[0.4])
assert kb.contains_alias("") is False
kb.add_alias(alias="x", entities=["1"], probabilities=[0.2])
kb.add_alias(alias="y", entities=["1"], probabilities=[0.1])
with make_tempdir() as tmp_dir:
kb.to_disk(tmp_dir)
kb.from_disk(tmp_dir)
assert kb.get_size_aliases() == 2
assert set(kb.get_alias_strings()) == {"x", "y"}
@pytest.mark.issue(7065)
def test_issue7065():
text = "Kathleen Battle sang in Mahler 's Symphony No. 8 at the Cincinnati Symphony Orchestra 's May Festival."
nlp = English()
nlp.add_pipe("sentencizer")
ruler = nlp.add_pipe("entity_ruler")
patterns = [
{
"label": "THING",
"pattern": [
{"LOWER": "symphony"},
{"LOWER": "no"},
{"LOWER": "."},
{"LOWER": "8"},
],
}
]
ruler.add_patterns(patterns)
doc = nlp(text)
sentences = [s for s in doc.sents]
assert len(sentences) == 2
sent0 = sentences[0]
ent = doc.ents[0]
assert ent.start < sent0.end < ent.end
assert sentences.index(ent.sent) == 0
@pytest.mark.issue(7065)
@pytest.mark.parametrize("entity_in_first_sentence", [True, False])
def test_sentence_crossing_ents(entity_in_first_sentence: bool):
"""Tests if NEL crashes if entities cross sentence boundaries and the first associated sentence doesn't have an
entity.
entity_in_prior_sentence (bool): Whether to include an entity in the first sentence associated with the
sentence-crossing entity.
"""
# Test that the NEL doesn't crash when an entity crosses a sentence boundary
nlp = English()
vector_length = 3
text = "Mahler 's Symphony No. 8 was beautiful."
entities = [(10, 24, "WORK")]
links = {(10, 24): {"Q7304": 0.0, "Q270853": 1.0}}
if entity_in_first_sentence:
entities.append((0, 6, "PERSON"))
links[(0, 6)] = {"Q7304": 1.0, "Q270853": 0.0}
sent_starts = [1, -1, 0, 0, 0, 1, 0, 0, 0]
doc = nlp(text)
example = Example.from_dict(
doc, {"entities": entities, "links": links, "sent_starts": sent_starts}
)
train_examples = [example]
def create_kb(vocab):
# create artificial KB
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q270853", freq=12, entity_vector=[9, 1, -7])
mykb.add_alias(
alias="No. 8",
entities=["Q270853"],
probabilities=[1.0],
)
mykb.add_entity(entity="Q7304", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias(
alias="Mahler",
entities=["Q7304"],
probabilities=[1.0],
)
return mykb
# Create the Entity Linker component and add it to the pipeline
entity_linker = nlp.add_pipe("entity_linker", last=True)
entity_linker.set_kb(create_kb) # type: ignore
# train the NEL pipe
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(2):
nlp.update(train_examples, sgd=optimizer)
# This shouldn't crash.
entity_linker.predict([example.reference]) # type: ignore
def test_no_entities():
# Test that having no entities doesn't crash the model
TRAIN_DATA = [
(
"The sky is blue.",
{
"sent_starts": [1, 0, 0, 0, 0],
},
)
]
nlp = English()
vector_length = 3
train_examples = []
for text, annotation in TRAIN_DATA:
doc = nlp(text)
train_examples.append(Example.from_dict(doc, annotation))
def create_kb(vocab):
# create artificial KB
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias("Russ Cochran", ["Q2146908"], [0.9])
return mykb
# Create and train the Entity Linker
entity_linker = nlp.add_pipe("entity_linker", last=True)
entity_linker.set_kb(create_kb)
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(2):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
# adding additional components that are required for the entity_linker
nlp.add_pipe("sentencizer", first=True)
# this will run the pipeline on the examples and shouldn't crash
nlp.evaluate(train_examples)
def test_partial_links():
# Test that having some entities on the doc without gold links, doesn't crash
TRAIN_DATA = [
(
"Russ Cochran his reprints include EC Comics.",
{
"links": {(0, 12): {"Q2146908": 1.0}},
"entities": [(0, 12, "PERSON")],
"sent_starts": [1, -1, 0, 0, 0, 0, 0, 0],
},
)
]
nlp = English()
vector_length = 3
train_examples = []
for text, annotation in TRAIN_DATA:
doc = nlp(text)
train_examples.append(Example.from_dict(doc, annotation))
def create_kb(vocab):
# create artificial KB
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias("Russ Cochran", ["Q2146908"], [0.9])
return mykb
# Create and train the Entity Linker
entity_linker = nlp.add_pipe("entity_linker", last=True)
entity_linker.set_kb(create_kb)
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(2):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
# adding additional components that are required for the entity_linker
nlp.add_pipe("sentencizer", first=True)
patterns = [
{"label": "PERSON", "pattern": [{"LOWER": "russ"}, {"LOWER": "cochran"}]},
{"label": "ORG", "pattern": [{"LOWER": "ec"}, {"LOWER": "comics"}]},
]
ruler = nlp.add_pipe("entity_ruler", before="entity_linker")
ruler.add_patterns(patterns)
# this will run the pipeline on the examples and shouldn't crash
results = nlp.evaluate(train_examples)
assert "PERSON" in results["ents_per_type"]
assert "PERSON" in results["nel_f_per_type"]
assert "ORG" in results["ents_per_type"]
assert "ORG" not in results["nel_f_per_type"]
def test_kb_valid_entities(nlp):
"""Test the valid construction of a KB with 3 entities and two aliases"""
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[8, 4, 3])
mykb.add_entity(entity="Q2", freq=5, entity_vector=[2, 1, 0])
mykb.add_entity(entity="Q3", freq=25, entity_vector=[-1, -6, 5])
# adding aliases
mykb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.2])
mykb.add_alias(alias="adam", entities=["Q2"], probabilities=[0.9])
# test the size of the corresponding KB
assert mykb.get_size_entities() == 3
assert mykb.get_size_aliases() == 2
# test retrieval of the entity vectors
assert mykb.get_vector("Q1") == [8, 4, 3]
assert mykb.get_vector("Q2") == [2, 1, 0]
assert mykb.get_vector("Q3") == [-1, -6, 5]
# test retrieval of prior probabilities
assert_almost_equal(mykb.get_prior_prob(entity="Q2", alias="douglas"), 0.8)
assert_almost_equal(mykb.get_prior_prob(entity="Q3", alias="douglas"), 0.2)
assert_almost_equal(mykb.get_prior_prob(entity="Q342", alias="douglas"), 0.0)
assert_almost_equal(mykb.get_prior_prob(entity="Q3", alias="douglassssss"), 0.0)
def test_kb_invalid_entities(nlp):
"""Test the invalid construction of a KB with an alias linked to a non-existing entity"""
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[1])
mykb.add_entity(entity="Q2", freq=5, entity_vector=[2])
mykb.add_entity(entity="Q3", freq=25, entity_vector=[3])
# adding aliases - should fail because one of the given IDs is not valid
with pytest.raises(ValueError):
mykb.add_alias(
alias="douglas", entities=["Q2", "Q342"], probabilities=[0.8, 0.2]
)
def test_kb_invalid_probabilities(nlp):
"""Test the invalid construction of a KB with wrong prior probabilities"""
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[1])
mykb.add_entity(entity="Q2", freq=5, entity_vector=[2])
mykb.add_entity(entity="Q3", freq=25, entity_vector=[3])
# adding aliases - should fail because the sum of the probabilities exceeds 1
with pytest.raises(ValueError):
mykb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.4])
def test_kb_invalid_combination(nlp):
"""Test the invalid construction of a KB with non-matching entity and probability lists"""
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[1])
mykb.add_entity(entity="Q2", freq=5, entity_vector=[2])
mykb.add_entity(entity="Q3", freq=25, entity_vector=[3])
# adding aliases - should fail because the entities and probabilities vectors are not of equal length
with pytest.raises(ValueError):
mykb.add_alias(
alias="douglas", entities=["Q2", "Q3"], probabilities=[0.3, 0.4, 0.1]
)
def test_kb_invalid_entity_vector(nlp):
"""Test the invalid construction of a KB with non-matching entity vector lengths"""
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[1, 2, 3])
# this should fail because the kb's expected entity vector length is 3
with pytest.raises(ValueError):
mykb.add_entity(entity="Q2", freq=5, entity_vector=[2])
def test_kb_default(nlp):
"""Test that the default (empty) KB is loaded upon construction"""
entity_linker = nlp.add_pipe("entity_linker", config={})
assert len(entity_linker.kb) == 0
with pytest.raises(ValueError, match="E139"):
# this raises an error because the KB is empty
entity_linker.validate_kb()
assert entity_linker.kb.get_size_entities() == 0
assert entity_linker.kb.get_size_aliases() == 0
# 64 is the default value from pipeline.entity_linker
assert entity_linker.kb.entity_vector_length == 64
def test_kb_custom_length(nlp):
"""Test that the default (empty) KB can be configured with a custom entity length"""
entity_linker = nlp.add_pipe("entity_linker", config={"entity_vector_length": 35})
assert len(entity_linker.kb) == 0
assert entity_linker.kb.get_size_entities() == 0
assert entity_linker.kb.get_size_aliases() == 0
assert entity_linker.kb.entity_vector_length == 35
def test_kb_initialize_empty(nlp):
"""Test that the EL can't initialize without examples"""
entity_linker = nlp.add_pipe("entity_linker")
with pytest.raises(TypeError):
entity_linker.initialize(lambda: [])
def test_kb_serialize(nlp):
"""Test serialization of the KB"""
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
with make_tempdir() as d:
# normal read-write behaviour
mykb.to_disk(d / "kb")
mykb.from_disk(d / "kb")
mykb.to_disk(d / "new" / "kb")
mykb.from_disk(d / "new" / "kb")
# allow overwriting an existing file
mykb.to_disk(d / "kb")
with pytest.raises(ValueError):
# can not read from an unknown file
mykb.from_disk(d / "unknown" / "kb")
@pytest.mark.issue(9137)
def test_kb_serialize_2(nlp):
v = [5, 6, 7, 8]
kb1 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4)
kb1.set_entities(["E1"], [1], [v])
assert kb1.get_vector("E1") == v
with make_tempdir() as d:
kb1.to_disk(d / "kb")
kb2 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4)
kb2.from_disk(d / "kb")
assert kb2.get_vector("E1") == v
def test_kb_set_entities(nlp):
"""Test that set_entities entirely overwrites the previous set of entities"""
v = [5, 6, 7, 8]
v1 = [1, 1, 1, 0]
v2 = [2, 2, 2, 3]
kb1 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4)
kb1.set_entities(["E0"], [1], [v])
assert kb1.get_entity_strings() == ["E0"]
kb1.set_entities(["E1", "E2"], [1, 9], [v1, v2])
assert set(kb1.get_entity_strings()) == {"E1", "E2"}
assert kb1.get_vector("E1") == v1
assert kb1.get_vector("E2") == v2
with make_tempdir() as d:
kb1.to_disk(d / "kb")
kb2 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4)
kb2.from_disk(d / "kb")
assert set(kb2.get_entity_strings()) == {"E1", "E2"}
assert kb2.get_vector("E1") == v1
assert kb2.get_vector("E2") == v2
def test_kb_serialize_vocab(nlp):
"""Test serialization of the KB and custom strings"""
entity = "MyFunnyID"
assert entity not in nlp.vocab.strings
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
assert not mykb.contains_entity(entity)
mykb.add_entity(entity, freq=342, entity_vector=[3])
assert mykb.contains_entity(entity)
assert entity in mykb.vocab.strings
with make_tempdir() as d:
# normal read-write behaviour
mykb.to_disk(d / "kb")
mykb_new = InMemoryLookupKB(Vocab(), entity_vector_length=1)
mykb_new.from_disk(d / "kb")
assert entity in mykb_new.vocab.strings
def test_candidate_generation(nlp):
"""Test correct candidate generation"""
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
doc = nlp("douglas adam Adam shrubbery")
douglas_ent = doc[0:1]
adam_ent = doc[1:2]
Adam_ent = doc[2:3]
shrubbery_ent = doc[3:4]
# adding entities
mykb.add_entity(entity="Q1", freq=27, entity_vector=[1])
mykb.add_entity(entity="Q2", freq=12, entity_vector=[2])
mykb.add_entity(entity="Q3", freq=5, entity_vector=[3])
# adding aliases
mykb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.1])
mykb.add_alias(alias="adam", entities=["Q2"], probabilities=[0.9])
# test the size of the relevant candidates
assert len(get_candidates(mykb, douglas_ent)) == 2
assert len(get_candidates(mykb, adam_ent)) == 1
assert len(get_candidates(mykb, Adam_ent)) == 0 # default case sensitive
assert len(get_candidates(mykb, shrubbery_ent)) == 0
# test the content of the candidates
assert get_candidates(mykb, adam_ent)[0].entity_ == "Q2"
assert get_candidates(mykb, adam_ent)[0].alias_ == "adam"
assert_almost_equal(get_candidates(mykb, adam_ent)[0].entity_freq, 12)
assert_almost_equal(get_candidates(mykb, adam_ent)[0].prior_prob, 0.9)
def test_el_pipe_configuration(nlp):
"""Test correct candidate generation as part of the EL pipe"""
nlp.add_pipe("sentencizer")
pattern = {"label": "PERSON", "pattern": [{"LOWER": "douglas"}]}
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns([pattern])
def create_kb(vocab):
kb = InMemoryLookupKB(vocab, entity_vector_length=1)
kb.add_entity(entity="Q2", freq=12, entity_vector=[2])
kb.add_entity(entity="Q3", freq=5, entity_vector=[3])
kb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.1])
return kb
# run an EL pipe without a trained context encoder, to check the candidate generation step only
entity_linker = nlp.add_pipe("entity_linker", config={"incl_context": False})
entity_linker.set_kb(create_kb)
# With the default get_candidates function, matching is case-sensitive
text = "Douglas and douglas are not the same."
doc = nlp(text)
assert doc[0].ent_kb_id_ == "NIL"
assert doc[1].ent_kb_id_ == ""
assert doc[2].ent_kb_id_ == "Q2"
def get_lowercased_candidates(kb, span):
return kb.get_alias_candidates(span.text.lower())
def get_lowercased_candidates_batch(kb, spans):
return [get_lowercased_candidates(kb, span) for span in spans]
@registry.misc("spacy.LowercaseCandidateGenerator.v1")
def create_candidates() -> Callable[
[InMemoryLookupKB, "Span"], Iterable[Candidate]
]:
return get_lowercased_candidates
@registry.misc("spacy.LowercaseCandidateBatchGenerator.v1")
def create_candidates_batch() -> Callable[
[InMemoryLookupKB, Iterable["Span"]], Iterable[Iterable[Candidate]]
]:
return get_lowercased_candidates_batch
# replace the pipe with a new one with with a different candidate generator
entity_linker = nlp.replace_pipe(
"entity_linker",
"entity_linker",
config={
"incl_context": False,
"get_candidates": {"@misc": "spacy.LowercaseCandidateGenerator.v1"},
"get_candidates_batch": {
"@misc": "spacy.LowercaseCandidateBatchGenerator.v1"
},
},
)
entity_linker.set_kb(create_kb)
doc = nlp(text)
assert doc[0].ent_kb_id_ == "Q2"
assert doc[1].ent_kb_id_ == ""
assert doc[2].ent_kb_id_ == "Q2"
def test_nel_nsents(nlp):
"""Test that n_sents can be set through the configuration"""
entity_linker = nlp.add_pipe("entity_linker", config={})
assert entity_linker.n_sents == 0
entity_linker = nlp.replace_pipe(
"entity_linker", "entity_linker", config={"n_sents": 2}
)
assert entity_linker.n_sents == 2
def test_vocab_serialization(nlp):
"""Test that string information is retained across storage"""
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=27, entity_vector=[1])
q2_hash = mykb.add_entity(entity="Q2", freq=12, entity_vector=[2])
mykb.add_entity(entity="Q3", freq=5, entity_vector=[3])
# adding aliases
mykb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.4, 0.1])
adam_hash = mykb.add_alias(alias="adam", entities=["Q2"], probabilities=[0.9])
candidates = mykb.get_alias_candidates("adam")
assert len(candidates) == 1
assert candidates[0].entity == q2_hash
assert candidates[0].entity_ == "Q2"
assert candidates[0].alias == adam_hash
assert candidates[0].alias_ == "adam"
with make_tempdir() as d:
mykb.to_disk(d / "kb")
kb_new_vocab = InMemoryLookupKB(Vocab(), entity_vector_length=1)
kb_new_vocab.from_disk(d / "kb")
candidates = kb_new_vocab.get_alias_candidates("adam")
assert len(candidates) == 1
assert candidates[0].entity == q2_hash
assert candidates[0].entity_ == "Q2"
assert candidates[0].alias == adam_hash
assert candidates[0].alias_ == "adam"
assert kb_new_vocab.get_vector("Q2") == [2]
assert_almost_equal(kb_new_vocab.get_prior_prob("Q2", "douglas"), 0.4)
def test_append_alias(nlp):
"""Test that we can append additional alias-entity pairs"""
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=27, entity_vector=[1])
mykb.add_entity(entity="Q2", freq=12, entity_vector=[2])
mykb.add_entity(entity="Q3", freq=5, entity_vector=[3])
# adding aliases
mykb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.4, 0.1])
mykb.add_alias(alias="adam", entities=["Q2"], probabilities=[0.9])
# test the size of the relevant candidates
assert len(mykb.get_alias_candidates("douglas")) == 2
# append an alias
mykb.append_alias(alias="douglas", entity="Q1", prior_prob=0.2)
# test the size of the relevant candidates has been incremented
assert len(mykb.get_alias_candidates("douglas")) == 3
# append the same alias-entity pair again should not work (will throw a warning)
with pytest.warns(UserWarning):
mykb.append_alias(alias="douglas", entity="Q1", prior_prob=0.3)
# test the size of the relevant candidates remained unchanged
assert len(mykb.get_alias_candidates("douglas")) == 3
@pytest.mark.filterwarnings("ignore:\\[W036")
def test_append_invalid_alias(nlp):
"""Test that append an alias will throw an error if prior probs are exceeding 1"""
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=27, entity_vector=[1])
mykb.add_entity(entity="Q2", freq=12, entity_vector=[2])
mykb.add_entity(entity="Q3", freq=5, entity_vector=[3])
# adding aliases
mykb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.1])
mykb.add_alias(alias="adam", entities=["Q2"], probabilities=[0.9])
# append an alias - should fail because the entities and probabilities vectors are not of equal length
with pytest.raises(ValueError):
mykb.append_alias(alias="douglas", entity="Q1", prior_prob=0.2)
@pytest.mark.filterwarnings("ignore:\\[W036")
def test_preserving_links_asdoc(nlp):
"""Test that Span.as_doc preserves the existing entity links"""
vector_length = 1
def create_kb(vocab):
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[1])
mykb.add_entity(entity="Q2", freq=8, entity_vector=[1])
# adding aliases
mykb.add_alias(alias="Boston", entities=["Q1"], probabilities=[0.7])
mykb.add_alias(alias="Denver", entities=["Q2"], probabilities=[0.6])
return mykb
# set up pipeline with NER (Entity Ruler) and NEL (prior probability only, model not trained)
nlp.add_pipe("sentencizer")
patterns = [
{"label": "GPE", "pattern": "Boston"},
{"label": "GPE", "pattern": "Denver"},
]
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
config = {"incl_prior": False}
entity_linker = nlp.add_pipe("entity_linker", config=config, last=True)
entity_linker.set_kb(create_kb)
nlp.initialize()
assert entity_linker.model.get_dim("nO") == vector_length
# test whether the entity links are preserved by the `as_doc()` function
text = "She lives in Boston. He lives in Denver."
doc = nlp(text)
for ent in doc.ents:
orig_text = ent.text
orig_kb_id = ent.kb_id_
sent_doc = ent.sent.as_doc()
for s_ent in sent_doc.ents:
if s_ent.text == orig_text:
assert s_ent.kb_id_ == orig_kb_id
def test_preserving_links_ents(nlp):
"""Test that doc.ents preserves KB annotations"""
text = "She lives in Boston. He lives in Denver."
doc = nlp(text)
assert len(list(doc.ents)) == 0
boston_ent = Span(doc, 3, 4, label="LOC", kb_id="Q1")
doc.ents = [boston_ent]
assert len(list(doc.ents)) == 1
assert list(doc.ents)[0].label_ == "LOC"
assert list(doc.ents)[0].kb_id_ == "Q1"
def test_preserving_links_ents_2(nlp):
"""Test that doc.ents preserves KB annotations"""
text = "She lives in Boston. He lives in Denver."
doc = nlp(text)
assert len(list(doc.ents)) == 0
loc = doc.vocab.strings.add("LOC")
q1 = doc.vocab.strings.add("Q1")
doc.ents = [(loc, q1, 3, 4)]
assert len(list(doc.ents)) == 1
assert list(doc.ents)[0].label_ == "LOC"
assert list(doc.ents)[0].kb_id_ == "Q1"
# fmt: off
TRAIN_DATA = [
("Russ Cochran captured his first major title with his son as caddie.",
{"links": {(0, 12): {"Q7381115": 0.0, "Q2146908": 1.0}},
"entities": [(0, 12, "PERSON")],
"sent_starts": [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}),
("Russ Cochran his reprints include EC Comics.",
{"links": {(0, 12): {"Q7381115": 1.0, "Q2146908": 0.0}},
"entities": [(0, 12, "PERSON"), (34, 43, "ART")],
"sent_starts": [1, -1, 0, 0, 0, 0, 0, 0]}),
("Russ Cochran has been publishing comic art.",
{"links": {(0, 12): {"Q7381115": 1.0, "Q2146908": 0.0}},
"entities": [(0, 12, "PERSON")],
"sent_starts": [1, -1, 0, 0, 0, 0, 0, 0]}),
("Russ Cochran was a member of University of Kentucky's golf team.",
{"links": {(0, 12): {"Q7381115": 0.0, "Q2146908": 1.0}},
"entities": [(0, 12, "PERSON"), (43, 51, "LOC")],
"sent_starts": [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}),
# having a blank instance shouldn't break things
("The weather is nice today.",
{"links": {}, "entities": [],
"sent_starts": [1, -1, 0, 0, 0, 0]})
]
GOLD_entities = ["Q2146908", "Q7381115", "Q7381115", "Q2146908"]
# fmt: on
def test_overfitting_IO():
# Simple test to try and quickly overfit the NEL component - ensuring the ML models work correctly
nlp = English()
vector_length = 3
assert "Q2146908" not in nlp.vocab.strings
# Convert the texts to docs to make sure we have doc.ents set for the training examples
train_examples = []
for text, annotation in TRAIN_DATA:
doc = nlp(text)
train_examples.append(Example.from_dict(doc, annotation))
def create_kb(vocab):
# create artificial KB - assign same prior weight to the two russ cochran's
# Q2146908 (Russ Cochran): American golfer
# Q7381115 (Russ Cochran): publisher
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7])
mykb.add_alias(
alias="Russ Cochran",
entities=["Q2146908", "Q7381115"],
probabilities=[0.5, 0.5],
)
return mykb
# Create the Entity Linker component and add it to the pipeline
entity_linker = nlp.add_pipe("entity_linker", last=True)
assert isinstance(entity_linker, EntityLinker)
entity_linker.set_kb(create_kb)
assert "Q2146908" in entity_linker.vocab.strings
assert "Q2146908" in entity_linker.kb.vocab.strings
# train the NEL pipe
optimizer = nlp.initialize(get_examples=lambda: train_examples)
assert entity_linker.model.get_dim("nO") == vector_length
assert entity_linker.model.get_dim("nO") == entity_linker.kb.entity_vector_length
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["entity_linker"] < 0.001
# adding additional components that are required for the entity_linker
nlp.add_pipe("sentencizer", first=True)
# Add a custom component to recognize "Russ Cochran" as an entity for the example training data
patterns = [
{"label": "PERSON", "pattern": [{"LOWER": "russ"}, {"LOWER": "cochran"}]}
]
ruler = nlp.add_pipe("entity_ruler", before="entity_linker")
ruler.add_patterns(patterns)
# test the trained model
predictions = []
for text, annotation in TRAIN_DATA:
doc = nlp(text)
for ent in doc.ents:
predictions.append(ent.kb_id_)
assert predictions == GOLD_entities
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
assert nlp2.pipe_names == nlp.pipe_names
assert "Q2146908" in nlp2.vocab.strings
entity_linker2 = nlp2.get_pipe("entity_linker")
assert "Q2146908" in entity_linker2.vocab.strings
assert "Q2146908" in entity_linker2.kb.vocab.strings
predictions = []
for text, annotation in TRAIN_DATA:
doc2 = nlp2(text)
for ent in doc2.ents:
predictions.append(ent.kb_id_)
assert predictions == GOLD_entities
# Make sure that running pipe twice, or comparing to call, always amounts to the same predictions
texts = [
"Russ Cochran captured his first major title with his son as caddie.",
"Russ Cochran his reprints include EC Comics.",
"Russ Cochran has been publishing comic art.",
"Russ Cochran was a member of University of Kentucky's golf team.",
]
batch_deps_1 = [doc.to_array([ENT_KB_ID]) for doc in nlp.pipe(texts)]
batch_deps_2 = [doc.to_array([ENT_KB_ID]) for doc in nlp.pipe(texts)]
no_batch_deps = [doc.to_array([ENT_KB_ID]) for doc in [nlp(text) for text in texts]]
assert_equal(batch_deps_1, batch_deps_2)
assert_equal(batch_deps_1, no_batch_deps)
def test_kb_serialization():
# Test that the KB can be used in a pipeline with a different vocab
vector_length = 3
with make_tempdir() as tmp_dir:
kb_dir = tmp_dir / "kb"
nlp1 = English()
assert "Q2146908" not in nlp1.vocab.strings
mykb = InMemoryLookupKB(nlp1.vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
assert "Q2146908" in nlp1.vocab.strings
mykb.to_disk(kb_dir)
nlp2 = English()
assert "RandomWord" not in nlp2.vocab.strings
nlp2.vocab.strings.add("RandomWord")
assert "RandomWord" in nlp2.vocab.strings
assert "Q2146908" not in nlp2.vocab.strings
# Create the Entity Linker component with the KB from file, and check the final vocab
entity_linker = nlp2.add_pipe("entity_linker", last=True)
entity_linker.set_kb(load_kb(kb_dir))
assert "Q2146908" in nlp2.vocab.strings
assert "RandomWord" in nlp2.vocab.strings
@pytest.mark.xfail(reason="Needs fixing")
def test_kb_pickle():
# Test that the KB can be pickled
nlp = English()
kb_1 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
kb_1.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
assert not kb_1.contains_alias("Russ Cochran")
kb_1.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
assert kb_1.contains_alias("Russ Cochran")
data = pickle.dumps(kb_1)
kb_2 = pickle.loads(data)
assert kb_2.contains_alias("Russ Cochran")
@pytest.mark.xfail(reason="Needs fixing")
def test_nel_pickle():
# Test that a pipeline with an EL component can be pickled
def create_kb(vocab):
kb = InMemoryLookupKB(vocab, entity_vector_length=3)
kb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
kb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
return kb
nlp_1 = English()
nlp_1.add_pipe("ner")
entity_linker_1 = nlp_1.add_pipe("entity_linker", last=True)
entity_linker_1.set_kb(create_kb)
assert nlp_1.pipe_names == ["ner", "entity_linker"]
assert entity_linker_1.kb.contains_alias("Russ Cochran")
data = pickle.dumps(nlp_1)
nlp_2 = pickle.loads(data)
assert nlp_2.pipe_names == ["ner", "entity_linker"]
entity_linker_2 = nlp_2.get_pipe("entity_linker")
assert entity_linker_2.kb.contains_alias("Russ Cochran")
def test_kb_to_bytes():
# Test that the KB's to_bytes method works correctly
nlp = English()
kb_1 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
kb_1.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
kb_1.add_entity(entity="Q66", freq=9, entity_vector=[1, 2, 3])
kb_1.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
kb_1.add_alias(alias="Boeing", entities=["Q66"], probabilities=[0.5])
kb_1.add_alias(
alias="Randomness", entities=["Q66", "Q2146908"], probabilities=[0.1, 0.2]
)
assert kb_1.contains_alias("Russ Cochran")
kb_bytes = kb_1.to_bytes()
kb_2 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
assert not kb_2.contains_alias("Russ Cochran")
kb_2 = kb_2.from_bytes(kb_bytes)
# check that both KBs are exactly the same
assert kb_1.get_size_entities() == kb_2.get_size_entities()
assert kb_1.entity_vector_length == kb_2.entity_vector_length
assert kb_1.get_entity_strings() == kb_2.get_entity_strings()
assert kb_1.get_vector("Q2146908") == kb_2.get_vector("Q2146908")
assert kb_1.get_vector("Q66") == kb_2.get_vector("Q66")
assert kb_2.contains_alias("Russ Cochran")
assert kb_1.get_size_aliases() == kb_2.get_size_aliases()
assert kb_1.get_alias_strings() == kb_2.get_alias_strings()
assert len(kb_1.get_alias_candidates("Russ Cochran")) == len(
kb_2.get_alias_candidates("Russ Cochran")
)
assert len(kb_1.get_alias_candidates("Randomness")) == len(
kb_2.get_alias_candidates("Randomness")
)
def test_nel_to_bytes():
# Test that a pipeline with an EL component can be converted to bytes
def create_kb(vocab):
kb = InMemoryLookupKB(vocab, entity_vector_length=3)
kb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
kb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
return kb
nlp_1 = English()
nlp_1.add_pipe("ner")
entity_linker_1 = nlp_1.add_pipe("entity_linker", last=True)
entity_linker_1.set_kb(create_kb)
assert entity_linker_1.kb.contains_alias("Russ Cochran")
assert nlp_1.pipe_names == ["ner", "entity_linker"]
nlp_bytes = nlp_1.to_bytes()
nlp_2 = English()
nlp_2.add_pipe("ner")
nlp_2.add_pipe("entity_linker", last=True)
assert nlp_2.pipe_names == ["ner", "entity_linker"]
assert not nlp_2.get_pipe("entity_linker").kb.contains_alias("Russ Cochran")
nlp_2 = nlp_2.from_bytes(nlp_bytes)
kb_2 = nlp_2.get_pipe("entity_linker").kb
assert kb_2.contains_alias("Russ Cochran")
assert kb_2.get_vector("Q2146908") == [6, -4, 3]
assert_almost_equal(
kb_2.get_prior_prob(entity="Q2146908", alias="Russ Cochran"), 0.8
)
def test_scorer_links():
train_examples = []
nlp = English()
ref1 = nlp("Julia lives in London happily.")
ref1.ents = [
Span(ref1, 0, 1, label="PERSON", kb_id="Q2"),
Span(ref1, 3, 4, label="LOC", kb_id="Q3"),
]
pred1 = nlp("Julia lives in London happily.")
pred1.ents = [
Span(pred1, 0, 1, label="PERSON", kb_id="Q70"),
Span(pred1, 3, 4, label="LOC", kb_id="Q3"),
]
train_examples.append(Example(pred1, ref1))
ref2 = nlp("She loves London.")
ref2.ents = [
Span(ref2, 0, 1, label="PERSON", kb_id="Q2"),
Span(ref2, 2, 3, label="LOC", kb_id="Q13"),
]
pred2 = nlp("She loves London.")
pred2.ents = [
Span(pred2, 0, 1, label="PERSON", kb_id="Q2"),
Span(pred2, 2, 3, label="LOC", kb_id="NIL"),
]
train_examples.append(Example(pred2, ref2))
ref3 = nlp("London is great.")
ref3.ents = [Span(ref3, 0, 1, label="LOC", kb_id="NIL")]
pred3 = nlp("London is great.")
pred3.ents = [Span(pred3, 0, 1, label="LOC", kb_id="NIL")]
train_examples.append(Example(pred3, ref3))
scores = Scorer().score_links(train_examples, negative_labels=["NIL"])
assert scores["nel_f_per_type"]["PERSON"]["p"] == 1 / 2
assert scores["nel_f_per_type"]["PERSON"]["r"] == 1 / 2
assert scores["nel_f_per_type"]["LOC"]["p"] == 1 / 1
assert scores["nel_f_per_type"]["LOC"]["r"] == 1 / 2
assert scores["nel_micro_p"] == 2 / 3
assert scores["nel_micro_r"] == 2 / 4
# fmt: off
@pytest.mark.parametrize(
"name,config",
[
("entity_linker", {"@architectures": "spacy.EntityLinker.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL}),
("entity_linker", {"@architectures": "spacy.EntityLinker.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL}),
],
)
# fmt: on
def test_legacy_architectures(name, config):
# Ensure that the legacy architectures still work
vector_length = 3
nlp = English()
train_examples = []
for text, annotation in TRAIN_DATA:
doc = nlp.make_doc(text)
train_examples.append(Example.from_dict(doc, annotation))
def create_kb(vocab):
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7])
mykb.add_alias(
alias="Russ Cochran",
entities=["Q2146908", "Q7381115"],
probabilities=[0.5, 0.5],
)
return mykb
entity_linker = nlp.add_pipe(name, config={"model": config})
if config["@architectures"] == "spacy.EntityLinker.v1":
assert isinstance(entity_linker, EntityLinker_v1)
else:
assert isinstance(entity_linker, EntityLinker)
entity_linker.set_kb(create_kb)
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(2):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
@pytest.mark.parametrize(
"patterns",
[
# perfect case
[{"label": "CHARACTER", "pattern": "Kirby"}],
# typo for false negative
[{"label": "PERSON", "pattern": "Korby"}],
# random stuff for false positive
[{"label": "IS", "pattern": "is"}, {"label": "COLOR", "pattern": "pink"}],
],
)
def test_no_gold_ents(patterns):
# test that annotating components work
TRAIN_DATA = [
(
"Kirby is pink",
{
"links": {(0, 5): {"Q613241": 1.0}},
"entities": [(0, 5, "CHARACTER")],
"sent_starts": [1, 0, 0],
},
)
]
nlp = English()
vector_length = 3
train_examples = []
for text, annotation in TRAIN_DATA:
doc = nlp(text)
train_examples.append(Example.from_dict(doc, annotation))
# Create a ruler to mark entities
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
# Apply ruler to examples. In a real pipeline this would be an annotating component.
for eg in train_examples:
eg.predicted = ruler(eg.predicted)
# Entity ruler is no longer needed (initialization below wipes out the
# patterns and causes warnings)
nlp.remove_pipe("entity_ruler")
def create_kb(vocab):
# create artificial KB
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q613241", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias("Kirby", ["Q613241"], [0.9])
# Placeholder
mykb.add_entity(entity="pink", freq=12, entity_vector=[7, 2, -5])
mykb.add_alias("pink", ["pink"], [0.9])
return mykb
# Create and train the Entity Linker
entity_linker = nlp.add_pipe(
"entity_linker", config={"use_gold_ents": False}, last=True
)
entity_linker.set_kb(create_kb)
assert entity_linker.use_gold_ents is False
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(2):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
# adding additional components that are required for the entity_linker
nlp.add_pipe("sentencizer", first=True)
# this will run the pipeline on the examples and shouldn't crash
nlp.evaluate(train_examples)
@pytest.mark.issue(9575)
def test_tokenization_mismatch():
nlp = English()
# include a matching entity so that update isn't skipped
doc1 = Doc(
nlp.vocab,
words=["Kirby", "123456"],
spaces=[True, False],
ents=["B-CHARACTER", "B-CARDINAL"],
)
doc2 = Doc(
nlp.vocab,
words=["Kirby", "123", "456"],
spaces=[True, False, False],
ents=["B-CHARACTER", "B-CARDINAL", "B-CARDINAL"],
)
eg = Example(doc1, doc2)
train_examples = [eg]
vector_length = 3
def create_kb(vocab):
# create placeholder KB
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q613241", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias("Kirby", ["Q613241"], [0.9])
return mykb
entity_linker = nlp.add_pipe("entity_linker", last=True)
entity_linker.set_kb(create_kb)
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(2):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
nlp.add_pipe("sentencizer", first=True)
nlp.evaluate(train_examples)
def test_abstract_kb_instantiation():
"""Test whether instantiation of abstract KB base class fails."""
with pytest.raises(TypeError):
KnowledgeBase(None, 3)
# fmt: off
@pytest.mark.parametrize(
"meet_threshold,config",
[
(False, {"@architectures": "spacy.EntityLinker.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL}),
(True, {"@architectures": "spacy.EntityLinker.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL}),
],
)
# fmt: on
def test_threshold(meet_threshold: bool, config: Dict[str, Any]):
"""Tests abstention threshold.
meet_threshold (bool): Whether to configure NEL setup so that confidence threshold is met.
config (Dict[str, Any]): NEL architecture config.
"""
nlp = English()
nlp.add_pipe("sentencizer")
text = "Mahler's Symphony No. 8 was beautiful."
entities = [(0, 6, "PERSON")]
links = {(0, 6): {"Q7304": 1.0}}
sent_starts = [1, -1, 0, 0, 0, 0, 0, 0, 0]
entity_id = "Q7304"
doc = nlp(text)
train_examples = [
Example.from_dict(
doc, {"entities": entities, "links": links, "sent_starts": sent_starts}
)
]
def create_kb(vocab):
# create artificial KB
mykb = InMemoryLookupKB(vocab, entity_vector_length=3)
mykb.add_entity(entity=entity_id, freq=12, entity_vector=[6, -4, 3])
mykb.add_alias(
alias="Mahler",
entities=[entity_id],
probabilities=[1 if meet_threshold else 0.01],
)
return mykb
# Create the Entity Linker component and add it to the pipeline
entity_linker = nlp.add_pipe(
"entity_linker",
last=True,
config={"threshold": 0.99, "model": config},
)
entity_linker.set_kb(create_kb) # type: ignore
nlp.initialize(get_examples=lambda: train_examples)
# Add a custom rule-based component to mimick NER
ruler = nlp.add_pipe("entity_ruler", before="entity_linker")
ruler.add_patterns([{"label": "PERSON", "pattern": [{"LOWER": "mahler"}]}]) # type: ignore
doc = nlp(text)
assert len(doc.ents) == 1
assert doc.ents[0].kb_id_ == entity_id if meet_threshold else EntityLinker.NIL
def test_span_maker_forward_with_empty():
"""The forward pass of the span maker may have a doc with no entities."""
nlp = English()
doc1 = nlp("a b c")
ent = doc1[0:1]
ent.label_ = "X"
doc1.ents = [ent]
# no entities
doc2 = nlp("x y z")
# just to get a model
span_maker = build_span_maker()
span_maker([doc1, doc2], False)
| 45,209 | 36.363636 | 115 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_entity_ruler.py | import pytest
from thinc.api import NumpyOps, get_current_ops
from spacy import registry
from spacy.errors import MatchPatternError
from spacy.lang.en import English
from spacy.language import Language
from spacy.pipeline import EntityRecognizer, EntityRuler, SpanRuler, merge_entities
from spacy.pipeline.ner import DEFAULT_NER_MODEL
from spacy.tests.util import make_tempdir
from spacy.tokens import Doc, Span
ENTITY_RULERS = ["entity_ruler", "future_entity_ruler"]
@pytest.fixture
def nlp():
return Language()
@pytest.fixture
@registry.misc("entity_ruler_patterns")
def patterns():
return [
{"label": "HELLO", "pattern": "hello world"},
{"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]},
{"label": "HELLO", "pattern": [{"ORTH": "HELLO"}]},
{"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]},
{"label": "TECH_ORG", "pattern": "Apple", "id": "a1"},
{"label": "TECH_ORG", "pattern": "Microsoft", "id": "a2"},
]
@Language.component("add_ent")
def add_ent_component(doc):
doc.ents = [Span(doc, 0, 3, label="ORG")]
return doc
@pytest.mark.issue(3345)
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_issue3345(entity_ruler_factory):
"""Test case where preset entity crosses sentence boundary."""
nlp = English()
doc = Doc(nlp.vocab, words=["I", "live", "in", "New", "York"])
doc[4].is_sent_start = True
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns([{"label": "GPE", "pattern": "New York"}])
cfg = {"model": DEFAULT_NER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
ner = EntityRecognizer(doc.vocab, model)
# Add the OUT action. I wouldn't have thought this would be necessary...
ner.moves.add_action(5, "")
ner.add_label("GPE")
doc = ruler(doc)
# Get into the state just before "New"
state = ner.moves.init_batch([doc])[0]
ner.moves.apply_transition(state, "O")
ner.moves.apply_transition(state, "O")
ner.moves.apply_transition(state, "O")
# Check that B-GPE is valid.
assert ner.moves.is_valid(state, "B-GPE")
@pytest.mark.issue(4849)
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_issue4849(entity_ruler_factory):
nlp = English()
patterns = [
{"label": "PERSON", "pattern": "joe biden", "id": "joe-biden"},
{"label": "PERSON", "pattern": "bernie sanders", "id": "bernie-sanders"},
]
ruler = nlp.add_pipe(
entity_ruler_factory,
name="entity_ruler",
config={"phrase_matcher_attr": "LOWER"},
)
ruler.add_patterns(patterns)
text = """
The left is starting to take aim at Democratic front-runner Joe Biden.
Sen. Bernie Sanders joined in her criticism: "There is no 'middle ground' when it comes to climate policy."
"""
# USING 1 PROCESS
count_ents = 0
for doc in nlp.pipe([text], n_process=1):
count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])
assert count_ents == 2
# USING 2 PROCESSES
if isinstance(get_current_ops, NumpyOps):
count_ents = 0
for doc in nlp.pipe([text], n_process=2):
count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])
assert count_ents == 2
@pytest.mark.issue(5918)
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_issue5918(entity_ruler_factory):
# Test edge case when merging entities.
nlp = English()
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [
{"label": "ORG", "pattern": "Digicon Inc"},
{"label": "ORG", "pattern": "Rotan Mosle Inc's"},
{"label": "ORG", "pattern": "Rotan Mosle Technology Partners Ltd"},
]
ruler.add_patterns(patterns)
text = """
Digicon Inc said it has completed the previously-announced disposition
of its computer systems division to an investment group led by
Rotan Mosle Inc's Rotan Mosle Technology Partners Ltd affiliate.
"""
doc = nlp(text)
assert len(doc.ents) == 3
# make it so that the third span's head is within the entity (ent_iob=I)
# bug #5918 would wrongly transfer that I to the full entity, resulting in 2 instead of 3 final ents.
# TODO: test for logging here
# with pytest.warns(UserWarning):
# doc[29].head = doc[33]
doc = merge_entities(doc)
assert len(doc.ents) == 3
@pytest.mark.issue(8168)
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_issue8168(entity_ruler_factory):
nlp = English()
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [
{"label": "ORG", "pattern": "Apple"},
{
"label": "GPE",
"pattern": [{"LOWER": "san"}, {"LOWER": "francisco"}],
"id": "san-francisco",
},
{
"label": "GPE",
"pattern": [{"LOWER": "san"}, {"LOWER": "fran"}],
"id": "san-francisco",
},
]
ruler.add_patterns(patterns)
doc = nlp("San Francisco San Fran")
assert all(t.ent_id_ == "san-francisco" for t in doc)
@pytest.mark.issue(8216)
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_fix8216(nlp, patterns, entity_ruler_factory):
"""Test that patterns don't get added excessively."""
ruler = nlp.add_pipe(
entity_ruler_factory, name="entity_ruler", config={"validate": True}
)
ruler.add_patterns(patterns)
pattern_count = sum(len(mm) for mm in ruler.matcher._patterns.values())
assert pattern_count > 0
ruler.add_patterns([])
after_count = sum(len(mm) for mm in ruler.matcher._patterns.values())
assert after_count == pattern_count
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_init(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
assert "HELLO" in ruler
assert "BYE" in ruler
nlp.remove_pipe("entity_ruler")
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns)
doc = nlp("hello world bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[1].label_ == "BYE"
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_no_patterns_warns(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
assert len(ruler) == 0
assert len(ruler.labels) == 0
nlp.remove_pipe("entity_ruler")
nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
assert nlp.pipe_names == ["entity_ruler"]
with pytest.warns(UserWarning):
doc = nlp("hello world bye bye")
assert len(doc.ents) == 0
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_init_patterns(nlp, patterns, entity_ruler_factory):
# initialize with patterns
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
assert len(ruler.labels) == 0
ruler.initialize(lambda: [], patterns=patterns)
assert len(ruler.labels) == 4
doc = nlp("hello world bye bye")
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[1].label_ == "BYE"
nlp.remove_pipe("entity_ruler")
# initialize with patterns from misc registry
nlp.config["initialize"]["components"]["entity_ruler"] = {
"patterns": {"@misc": "entity_ruler_patterns"}
}
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
assert len(ruler.labels) == 0
nlp.initialize()
assert len(ruler.labels) == 4
doc = nlp("hello world bye bye")
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[1].label_ == "BYE"
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_init_clear(nlp, patterns, entity_ruler_factory):
"""Test that initialization clears patterns."""
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns)
assert len(ruler.labels) == 4
ruler.initialize(lambda: [])
assert len(ruler.labels) == 0
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_clear(nlp, patterns, entity_ruler_factory):
"""Test that initialization clears patterns."""
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns)
assert len(ruler.labels) == 4
doc = nlp("hello world")
assert len(doc.ents) == 1
ruler.clear()
assert len(ruler.labels) == 0
with pytest.warns(UserWarning):
doc = nlp("hello world")
assert len(doc.ents) == 0
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_existing(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("OH HELLO WORLD bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "ORG"
assert doc.ents[1].label_ == "BYE"
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_existing_overwrite(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(
entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True}
)
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("OH HELLO WORLD bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[0].text == "HELLO"
assert doc.ents[1].label_ == "BYE"
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_existing_complex(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(
entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True}
)
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("foo foo bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "COMPLEX"
assert doc.ents[1].label_ == "BYE"
assert len(doc.ents[0]) == 2
assert len(doc.ents[1]) == 2
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_entity_id(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(
entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True}
)
ruler.add_patterns(patterns)
doc = nlp("Apple is a technology company")
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "TECH_ORG"
assert doc.ents[0].ent_id_ == "a1"
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_cfg_ent_id_sep(nlp, patterns, entity_ruler_factory):
config = {"overwrite_ents": True, "ent_id_sep": "**"}
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler", config=config)
ruler.add_patterns(patterns)
doc = nlp("Apple is a technology company")
if isinstance(ruler, EntityRuler):
assert "TECH_ORG**a1" in ruler.phrase_patterns
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "TECH_ORG"
assert doc.ents[0].ent_id_ == "a1"
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_serialize_bytes(nlp, patterns, entity_ruler_factory):
ruler = EntityRuler(nlp, patterns=patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
ruler_bytes = ruler.to_bytes()
new_ruler = EntityRuler(nlp)
assert len(new_ruler) == 0
assert len(new_ruler.labels) == 0
new_ruler = new_ruler.from_bytes(ruler_bytes)
assert len(new_ruler) == len(patterns)
assert len(new_ruler.labels) == 4
assert len(new_ruler.patterns) == len(ruler.patterns)
for pattern in ruler.patterns:
assert pattern in new_ruler.patterns
assert sorted(new_ruler.labels) == sorted(ruler.labels)
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_serialize_phrase_matcher_attr_bytes(
nlp, patterns, entity_ruler_factory
):
ruler = EntityRuler(nlp, phrase_matcher_attr="LOWER", patterns=patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
ruler_bytes = ruler.to_bytes()
new_ruler = EntityRuler(nlp)
assert len(new_ruler) == 0
assert len(new_ruler.labels) == 0
assert new_ruler.phrase_matcher_attr is None
new_ruler = new_ruler.from_bytes(ruler_bytes)
assert len(new_ruler) == len(patterns)
assert len(new_ruler.labels) == 4
assert new_ruler.phrase_matcher_attr == "LOWER"
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_validate(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
validated_ruler = EntityRuler(nlp, validate=True)
valid_pattern = {"label": "HELLO", "pattern": [{"LOWER": "HELLO"}]}
invalid_pattern = {"label": "HELLO", "pattern": [{"ASDF": "HELLO"}]}
# invalid pattern raises error without validate
with pytest.raises(ValueError):
ruler.add_patterns([invalid_pattern])
# valid pattern is added without errors with validate
validated_ruler.add_patterns([valid_pattern])
# invalid pattern raises error with validate
with pytest.raises(MatchPatternError):
validated_ruler.add_patterns([invalid_pattern])
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_properties(nlp, patterns, entity_ruler_factory):
ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True)
assert sorted(ruler.labels) == sorted(["HELLO", "BYE", "COMPLEX", "TECH_ORG"])
assert sorted(ruler.ent_ids) == ["a1", "a2"]
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_overlapping_spans(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [
{"label": "FOOBAR", "pattern": "foo bar"},
{"label": "BARBAZ", "pattern": "bar baz"},
]
ruler.add_patterns(patterns)
doc = nlp("foo bar baz")
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "FOOBAR"
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_fuzzy_pipe(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [{"label": "HELLO", "pattern": [{"LOWER": {"FUZZY": "hello"}}]}]
ruler.add_patterns(patterns)
doc = nlp("helloo")
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "HELLO"
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_fuzzy(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [{"label": "HELLO", "pattern": [{"LOWER": {"FUZZY": "hello"}}]}]
ruler.add_patterns(patterns)
doc = nlp("helloo")
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "HELLO"
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_fuzzy_disabled(nlp, entity_ruler_factory):
@registry.misc("test_fuzzy_compare_disabled")
def make_test_fuzzy_compare_disabled():
return lambda x, y, z: False
ruler = nlp.add_pipe(
entity_ruler_factory,
name="entity_ruler",
config={"matcher_fuzzy_compare": {"@misc": "test_fuzzy_compare_disabled"}},
)
patterns = [{"label": "HELLO", "pattern": [{"LOWER": {"FUZZY": "hello"}}]}]
ruler.add_patterns(patterns)
doc = nlp("helloo")
assert len(doc.ents) == 0
@pytest.mark.parametrize("n_process", [1, 2])
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_multiprocessing(nlp, n_process, entity_ruler_factory):
if isinstance(get_current_ops, NumpyOps) or n_process < 2:
texts = ["I enjoy eating Pizza Hut pizza."]
patterns = [{"label": "FASTFOOD", "pattern": "Pizza Hut", "id": "1234"}]
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns)
for doc in nlp.pipe(texts, n_process=2):
for ent in doc.ents:
assert ent.ent_id_ == "1234"
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_serialize_jsonl(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns)
with make_tempdir() as d:
ruler.to_disk(d / "test_ruler.jsonl")
ruler.from_disk(d / "test_ruler.jsonl") # read from an existing jsonl file
with pytest.raises(ValueError):
ruler.from_disk(d / "non_existing.jsonl") # read from a bad jsonl file
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_serialize_dir(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns)
with make_tempdir() as d:
ruler.to_disk(d / "test_ruler")
ruler.from_disk(d / "test_ruler") # read from an existing directory
with pytest.raises(ValueError):
ruler.from_disk(d / "non_existing_dir") # read from a bad directory
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_remove_basic(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
{"label": "ORG", "pattern": "ACM"},
]
ruler.add_patterns(patterns)
doc = nlp("Dina went to school")
assert len(ruler.patterns) == 3
assert len(doc.ents) == 1
if isinstance(ruler, EntityRuler):
assert "PERSON||dina" in ruler.phrase_matcher
assert doc.ents[0].label_ == "PERSON"
assert doc.ents[0].text == "Dina"
if isinstance(ruler, EntityRuler):
ruler.remove("dina")
else:
ruler.remove_by_id("dina")
doc = nlp("Dina went to school")
assert len(doc.ents) == 0
if isinstance(ruler, EntityRuler):
assert "PERSON||dina" not in ruler.phrase_matcher
assert len(ruler.patterns) == 2
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_remove_same_id_multiple_patterns(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "DinaCorp", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
]
ruler.add_patterns(patterns)
doc = nlp("Dina founded DinaCorp and ACME.")
assert len(ruler.patterns) == 3
if isinstance(ruler, EntityRuler):
assert "PERSON||dina" in ruler.phrase_matcher
assert "ORG||dina" in ruler.phrase_matcher
assert len(doc.ents) == 3
if isinstance(ruler, EntityRuler):
ruler.remove("dina")
else:
ruler.remove_by_id("dina")
doc = nlp("Dina founded DinaCorp and ACME.")
assert len(ruler.patterns) == 1
if isinstance(ruler, EntityRuler):
assert "PERSON||dina" not in ruler.phrase_matcher
assert "ORG||dina" not in ruler.phrase_matcher
assert len(doc.ents) == 1
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_remove_nonexisting_pattern(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
{"label": "ORG", "pattern": "ACM"},
]
ruler.add_patterns(patterns)
assert len(ruler.patterns) == 3
with pytest.raises(ValueError):
ruler.remove("nepattern")
if isinstance(ruler, SpanRuler):
with pytest.raises(ValueError):
ruler.remove_by_id("nepattern")
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_remove_several_patterns(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
{"label": "ORG", "pattern": "ACM"},
]
ruler.add_patterns(patterns)
doc = nlp("Dina founded her company ACME.")
assert len(ruler.patterns) == 3
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "PERSON"
assert doc.ents[0].text == "Dina"
assert doc.ents[1].label_ == "ORG"
assert doc.ents[1].text == "ACME"
if isinstance(ruler, EntityRuler):
ruler.remove("dina")
else:
ruler.remove_by_id("dina")
doc = nlp("Dina founded her company ACME")
assert len(ruler.patterns) == 2
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "ORG"
assert doc.ents[0].text == "ACME"
if isinstance(ruler, EntityRuler):
ruler.remove("acme")
else:
ruler.remove_by_id("acme")
doc = nlp("Dina founded her company ACME")
assert len(ruler.patterns) == 1
assert len(doc.ents) == 0
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_remove_patterns_in_a_row(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
{"label": "DATE", "pattern": "her birthday", "id": "bday"},
{"label": "ORG", "pattern": "ACM"},
]
ruler.add_patterns(patterns)
doc = nlp("Dina founded her company ACME on her birthday")
assert len(doc.ents) == 3
assert doc.ents[0].label_ == "PERSON"
assert doc.ents[0].text == "Dina"
assert doc.ents[1].label_ == "ORG"
assert doc.ents[1].text == "ACME"
assert doc.ents[2].label_ == "DATE"
assert doc.ents[2].text == "her birthday"
if isinstance(ruler, EntityRuler):
ruler.remove("dina")
ruler.remove("acme")
ruler.remove("bday")
else:
ruler.remove_by_id("dina")
ruler.remove_by_id("acme")
ruler.remove_by_id("bday")
doc = nlp("Dina went to school")
assert len(doc.ents) == 0
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_remove_all_patterns(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
{"label": "DATE", "pattern": "her birthday", "id": "bday"},
]
ruler.add_patterns(patterns)
assert len(ruler.patterns) == 3
if isinstance(ruler, EntityRuler):
ruler.remove("dina")
else:
ruler.remove_by_id("dina")
assert len(ruler.patterns) == 2
if isinstance(ruler, EntityRuler):
ruler.remove("acme")
else:
ruler.remove_by_id("acme")
assert len(ruler.patterns) == 1
if isinstance(ruler, EntityRuler):
ruler.remove("bday")
else:
ruler.remove_by_id("bday")
assert len(ruler.patterns) == 0
with pytest.warns(UserWarning):
doc = nlp("Dina founded her company ACME on her birthday")
assert len(doc.ents) == 0
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_remove_and_add(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [{"label": "DATE", "pattern": "last time"}]
ruler.add_patterns(patterns)
doc = ruler(
nlp.make_doc("I saw him last time we met, this time he brought some flowers")
)
assert len(ruler.patterns) == 1
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "DATE"
assert doc.ents[0].text == "last time"
patterns1 = [{"label": "DATE", "pattern": "this time", "id": "ttime"}]
ruler.add_patterns(patterns1)
doc = ruler(
nlp.make_doc("I saw him last time we met, this time he brought some flowers")
)
assert len(ruler.patterns) == 2
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "DATE"
assert doc.ents[0].text == "last time"
assert doc.ents[1].label_ == "DATE"
assert doc.ents[1].text == "this time"
if isinstance(ruler, EntityRuler):
ruler.remove("ttime")
else:
ruler.remove_by_id("ttime")
doc = ruler(
nlp.make_doc("I saw him last time we met, this time he brought some flowers")
)
assert len(ruler.patterns) == 1
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "DATE"
assert doc.ents[0].text == "last time"
ruler.add_patterns(patterns1)
doc = ruler(
nlp.make_doc("I saw him last time we met, this time he brought some flowers")
)
assert len(ruler.patterns) == 2
assert len(doc.ents) == 2
patterns2 = [{"label": "DATE", "pattern": "another time", "id": "ttime"}]
ruler.add_patterns(patterns2)
doc = ruler(
nlp.make_doc(
"I saw him last time we met, this time he brought some flowers, another time some chocolate."
)
)
assert len(ruler.patterns) == 3
assert len(doc.ents) == 3
if isinstance(ruler, EntityRuler):
ruler.remove("ttime")
else:
ruler.remove_by_id("ttime")
doc = ruler(
nlp.make_doc(
"I saw him last time we met, this time he brought some flowers, another time some chocolate."
)
)
assert len(ruler.patterns) == 1
assert len(doc.ents) == 1
| 25,896 | 36.916545 | 111 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_functions.py | import pytest
from spacy.language import Language
from spacy.pipeline.functions import merge_subtokens
from spacy.tokens import Doc, Span
from ..doc.test_underscore import clean_underscore # noqa: F401
@pytest.fixture
def doc(en_vocab):
# fmt: off
words = ["This", "is", "a", "sentence", ".", "This", "is", "another", "sentence", ".", "And", "a", "third", "."]
heads = [1, 1, 3, 1, 1, 6, 6, 8, 6, 6, 11, 12, 13, 13]
deps = ["nsubj", "ROOT", "subtok", "attr", "punct", "nsubj", "ROOT",
"subtok", "attr", "punct", "subtok", "subtok", "subtok", "ROOT"]
# fmt: on
return Doc(en_vocab, words=words, heads=heads, deps=deps)
@pytest.fixture
def doc2(en_vocab):
words = ["I", "like", "New", "York", "in", "Autumn", "."]
heads = [1, 1, 3, 1, 1, 4, 1]
tags = ["PRP", "IN", "NNP", "NNP", "IN", "NNP", "."]
pos = ["PRON", "VERB", "PROPN", "PROPN", "ADP", "PROPN", "PUNCT"]
deps = ["ROOT", "prep", "compound", "pobj", "prep", "pobj", "punct"]
doc = Doc(en_vocab, words=words, heads=heads, tags=tags, pos=pos, deps=deps)
doc.ents = [Span(doc, 2, 4, label="GPE")]
return doc
def test_merge_subtokens(doc):
doc = merge_subtokens(doc)
# Doc doesn't have spaces, so the result is "And a third ."
# fmt: off
assert [t.text for t in doc] == ["This", "is", "a sentence", ".", "This", "is", "another sentence", ".", "And a third ."]
# fmt: on
def test_factories_merge_noun_chunks(doc2):
assert len(doc2) == 7
nlp = Language()
merge_noun_chunks = nlp.create_pipe("merge_noun_chunks")
merge_noun_chunks(doc2)
assert len(doc2) == 6
assert doc2[2].text == "New York"
def test_factories_merge_ents(doc2):
assert len(doc2) == 7
assert len(list(doc2.ents)) == 1
nlp = Language()
merge_entities = nlp.create_pipe("merge_entities")
merge_entities(doc2)
assert len(doc2) == 6
assert len(list(doc2.ents)) == 1
assert doc2[2].text == "New York"
def test_token_splitter():
nlp = Language()
config = {"min_length": 20, "split_length": 5}
token_splitter = nlp.add_pipe("token_splitter", config=config)
doc = nlp("aaaaabbbbbcccccdddd e f g")
assert [t.text for t in doc] == ["aaaaabbbbbcccccdddd", "e", "f", "g"]
doc = nlp("aaaaabbbbbcccccdddddeeeeeff g h i")
assert [t.text for t in doc] == [
"aaaaa",
"bbbbb",
"ccccc",
"ddddd",
"eeeee",
"ff",
"g",
"h",
"i",
]
assert all(len(t.text) <= token_splitter.split_length for t in doc)
@pytest.mark.usefixtures("clean_underscore")
def test_factories_doc_cleaner():
nlp = Language()
nlp.add_pipe("doc_cleaner")
doc = nlp.make_doc("text")
doc.tensor = [1, 2, 3]
doc = nlp(doc)
assert doc.tensor is None
nlp = Language()
nlp.add_pipe("doc_cleaner", config={"silent": False})
with pytest.warns(UserWarning):
doc = nlp("text")
Doc.set_extension("test_attr", default=-1)
nlp = Language()
nlp.add_pipe("doc_cleaner", config={"attrs": {"_.test_attr": 0}})
doc = nlp.make_doc("text")
doc._.test_attr = 100
doc = nlp(doc)
assert doc._.test_attr == 0
| 3,189 | 29.970874 | 125 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_initialize.py | import pytest
from pydantic import StrictBool
from thinc.api import ConfigValidationError
from spacy.lang.en import English
from spacy.language import Language
from spacy.training import Example
def test_initialize_arguments():
name = "test_initialize_arguments"
class CustomTokenizer:
def __init__(self, tokenizer):
self.tokenizer = tokenizer
self.from_initialize = None
def __call__(self, text):
return self.tokenizer(text)
def initialize(self, get_examples, nlp, custom: int):
self.from_initialize = custom
class Component:
def __init__(self):
self.from_initialize = None
def initialize(
self, get_examples, nlp, custom1: str, custom2: StrictBool = False
):
self.from_initialize = (custom1, custom2)
Language.factory(name, func=lambda nlp, name: Component())
nlp = English()
nlp.tokenizer = CustomTokenizer(nlp.tokenizer)
example = Example.from_dict(nlp("x"), {})
get_examples = lambda: [example]
nlp.add_pipe(name)
# The settings here will typically come from the [initialize] block
init_cfg = {"tokenizer": {"custom": 1}, "components": {name: {}}}
nlp.config["initialize"].update(init_cfg)
with pytest.raises(ConfigValidationError) as e:
# Empty config for component, no required custom1 argument
nlp.initialize(get_examples)
errors = e.value.errors
assert len(errors) == 1
assert errors[0]["loc"] == ("custom1",)
assert errors[0]["type"] == "value_error.missing"
init_cfg = {
"tokenizer": {"custom": 1},
"components": {name: {"custom1": "x", "custom2": 1}},
}
nlp.config["initialize"].update(init_cfg)
with pytest.raises(ConfigValidationError) as e:
# Wrong type of custom 2
nlp.initialize(get_examples)
errors = e.value.errors
assert len(errors) == 1
assert errors[0]["loc"] == ("custom2",)
assert errors[0]["type"] == "value_error.strictbool"
init_cfg = {
"tokenizer": {"custom": 1},
"components": {name: {"custom1": "x"}},
}
nlp.config["initialize"].update(init_cfg)
nlp.initialize(get_examples)
assert nlp.tokenizer.from_initialize == 1
pipe = nlp.get_pipe(name)
assert pipe.from_initialize == ("x", False)
| 2,351 | 32.126761 | 78 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_lemmatizer.py | import pickle
import pytest
from spacy import registry, util
from spacy.lang.en import English
from spacy.lookups import Lookups
from ..util import make_tempdir
@pytest.fixture
def nlp():
@registry.misc("cope_lookups")
def cope_lookups():
lookups = Lookups()
lookups.add_table("lemma_lookup", {"cope": "cope", "coped": "cope"})
lookups.add_table("lemma_index", {"verb": ("cope", "cop")})
lookups.add_table("lemma_exc", {"verb": {"coping": ("cope",)}})
lookups.add_table("lemma_rules", {"verb": [["ing", ""]]})
return lookups
nlp = English()
nlp.config["initialize"]["components"]["lemmatizer"] = {
"lookups": {"@misc": "cope_lookups"}
}
return nlp
def test_lemmatizer_init(nlp):
lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
assert isinstance(lemmatizer.lookups, Lookups)
assert not lemmatizer.lookups.tables
assert lemmatizer.mode == "lookup"
with pytest.raises(ValueError):
nlp("test")
nlp.initialize()
assert lemmatizer.lookups.tables
assert nlp("cope")[0].lemma_ == "cope"
assert nlp("coped")[0].lemma_ == "cope"
# replace any tables from spacy-lookups-data
lemmatizer.lookups = Lookups()
# lookup with no tables sets text as lemma
assert nlp("cope")[0].lemma_ == "cope"
assert nlp("coped")[0].lemma_ == "coped"
nlp.remove_pipe("lemmatizer")
lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
with pytest.raises(ValueError):
# Can't initialize without required tables
lemmatizer.initialize(lookups=Lookups())
lookups = Lookups()
lookups.add_table("lemma_lookup", {})
lemmatizer.initialize(lookups=lookups)
def test_lemmatizer_config(nlp):
lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "rule"})
nlp.initialize()
# warning if no POS assigned
doc = nlp.make_doc("coping")
with pytest.warns(UserWarning):
doc = lemmatizer(doc)
# warns once by default
doc = lemmatizer(doc)
# works with POS
doc = nlp.make_doc("coping")
assert doc[0].lemma_ == ""
doc[0].pos_ = "VERB"
doc = lemmatizer(doc)
doc = lemmatizer(doc)
assert doc[0].text == "coping"
assert doc[0].lemma_ == "cope"
doc = nlp.make_doc("coping")
doc[0].pos_ = "VERB"
assert doc[0].lemma_ == ""
doc = lemmatizer(doc)
assert doc[0].text == "coping"
assert doc[0].lemma_ == "cope"
def test_lemmatizer_serialize(nlp):
lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "rule"})
nlp.initialize()
def cope_lookups():
lookups = Lookups()
lookups.add_table("lemma_lookup", {"cope": "cope", "coped": "cope"})
lookups.add_table("lemma_index", {"verb": ("cope", "cop")})
lookups.add_table("lemma_exc", {"verb": {"coping": ("cope",)}})
lookups.add_table("lemma_rules", {"verb": [["ing", ""]]})
return lookups
nlp2 = English()
lemmatizer2 = nlp2.add_pipe("lemmatizer", config={"mode": "rule"})
lemmatizer2.initialize(lookups=cope_lookups())
lemmatizer2.from_bytes(lemmatizer.to_bytes())
assert lemmatizer.to_bytes() == lemmatizer2.to_bytes()
assert lemmatizer.lookups.tables == lemmatizer2.lookups.tables
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2.make_doc("coping")
doc2[0].pos_ = "VERB"
assert doc2[0].lemma_ == ""
doc2 = lemmatizer2(doc2)
assert doc2[0].text == "coping"
assert doc2[0].lemma_ == "cope"
# Make sure that lemmatizer cache can be pickled
pickle.dumps(lemmatizer2)
| 3,727 | 31.137931 | 76 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_models.py | from typing import List
import numpy
import pytest
from numpy.testing import assert_almost_equal
from thinc.api import Model, data_validation, get_current_ops
from thinc.types import Array2d, Ragged
from spacy.lang.en import English
from spacy.ml import FeatureExtractor, StaticVectors
from spacy.ml._character_embed import CharacterEmbed
from spacy.tokens import Doc
from spacy.vocab import Vocab
OPS = get_current_ops()
texts = ["These are 4 words", "Here just three"]
l0 = [[1, 2], [3, 4], [5, 6], [7, 8]]
l1 = [[9, 8], [7, 6], [5, 4]]
list_floats = [OPS.xp.asarray(l0, dtype="f"), OPS.xp.asarray(l1, dtype="f")]
list_ints = [OPS.xp.asarray(l0, dtype="i"), OPS.xp.asarray(l1, dtype="i")]
array = OPS.xp.asarray(l1, dtype="f")
ragged = Ragged(array, OPS.xp.asarray([2, 1], dtype="i"))
def get_docs():
vocab = Vocab()
for t in texts:
for word in t.split():
hash_id = vocab.strings.add(word)
vector = numpy.random.uniform(-1, 1, (7,))
vocab.set_vector(hash_id, vector)
docs = [English(vocab)(t) for t in texts]
return docs
# Test components with a model of type Model[List[Doc], List[Floats2d]]
@pytest.mark.parametrize("name", ["tagger", "tok2vec", "morphologizer", "senter"])
def test_components_batching_list(name):
nlp = English()
proc = nlp.create_pipe(name)
util_batch_unbatch_docs_list(proc.model, get_docs(), list_floats)
# Test components with a model of type Model[List[Doc], Floats2d]
@pytest.mark.parametrize("name", ["textcat"])
def test_components_batching_array(name):
nlp = English()
proc = nlp.create_pipe(name)
util_batch_unbatch_docs_array(proc.model, get_docs(), array)
LAYERS = [
(CharacterEmbed(nM=5, nC=3), get_docs(), list_floats),
(FeatureExtractor([100, 200]), get_docs(), list_ints),
(StaticVectors(), get_docs(), ragged),
]
@pytest.mark.parametrize("model,in_data,out_data", LAYERS)
def test_layers_batching_all(model, in_data, out_data):
# In = List[Doc]
if isinstance(in_data, list) and isinstance(in_data[0], Doc):
if isinstance(out_data, OPS.xp.ndarray) and out_data.ndim == 2:
util_batch_unbatch_docs_array(model, in_data, out_data)
elif (
isinstance(out_data, list)
and isinstance(out_data[0], OPS.xp.ndarray)
and out_data[0].ndim == 2
):
util_batch_unbatch_docs_list(model, in_data, out_data)
elif isinstance(out_data, Ragged):
util_batch_unbatch_docs_ragged(model, in_data, out_data)
def util_batch_unbatch_docs_list(
model: Model[List[Doc], List[Array2d]], in_data: List[Doc], out_data: List[Array2d]
):
with data_validation(True):
model.initialize(in_data, out_data)
Y_batched = model.predict(in_data)
Y_not_batched = [model.predict([u])[0] for u in in_data]
for i in range(len(Y_batched)):
assert_almost_equal(
OPS.to_numpy(Y_batched[i]), OPS.to_numpy(Y_not_batched[i]), decimal=4
)
def util_batch_unbatch_docs_array(
model: Model[List[Doc], Array2d], in_data: List[Doc], out_data: Array2d
):
with data_validation(True):
model.initialize(in_data, out_data)
Y_batched = model.predict(in_data).tolist()
Y_not_batched = [model.predict([u])[0].tolist() for u in in_data]
assert_almost_equal(Y_batched, Y_not_batched, decimal=4)
def util_batch_unbatch_docs_ragged(
model: Model[List[Doc], Ragged], in_data: List[Doc], out_data: Ragged
):
with data_validation(True):
model.initialize(in_data, out_data)
Y_batched = model.predict(in_data).data.tolist()
Y_not_batched = []
for u in in_data:
Y_not_batched.extend(model.predict([u]).data.tolist())
assert_almost_equal(Y_batched, Y_not_batched, decimal=4)
| 3,842 | 34.256881 | 87 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_morphologizer.py | import pytest
from numpy.testing import assert_almost_equal, assert_equal
from thinc.api import get_current_ops
from spacy import util
from spacy.attrs import MORPH
from spacy.lang.en import English
from spacy.language import Language
from spacy.morphology import Morphology
from spacy.tests.util import make_tempdir
from spacy.tokens import Doc
from spacy.training import Example
def test_label_types():
nlp = Language()
morphologizer = nlp.add_pipe("morphologizer")
morphologizer.add_label("Feat=A")
with pytest.raises(ValueError):
morphologizer.add_label(9)
TAGS = ["Feat=N", "Feat=V", "Feat=J"]
TRAIN_DATA = [
(
"I like green eggs",
{
"morphs": ["Feat=N", "Feat=V", "Feat=J", "Feat=N"],
"pos": ["NOUN", "VERB", "ADJ", "NOUN"],
},
),
# test combinations of morph+POS
("Eat blue ham", {"morphs": ["Feat=V", "", ""], "pos": ["", "ADJ", ""]}),
]
def test_label_smoothing():
nlp = Language()
morph_no_ls = nlp.add_pipe("morphologizer", "no_label_smoothing")
morph_ls = nlp.add_pipe(
"morphologizer", "label_smoothing", config=dict(label_smoothing=0.05)
)
train_examples = []
losses = {}
for tag in TAGS:
morph_no_ls.add_label(tag)
morph_ls.add_label(tag)
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
nlp.initialize(get_examples=lambda: train_examples)
tag_scores, bp_tag_scores = morph_ls.model.begin_update(
[eg.predicted for eg in train_examples]
)
ops = get_current_ops()
no_ls_grads = ops.to_numpy(morph_no_ls.get_loss(train_examples, tag_scores)[1][0])
ls_grads = ops.to_numpy(morph_ls.get_loss(train_examples, tag_scores)[1][0])
assert_almost_equal(ls_grads / no_ls_grads, 0.94285715)
def test_no_label():
nlp = Language()
nlp.add_pipe("morphologizer")
with pytest.raises(ValueError):
nlp.initialize()
def test_implicit_label():
nlp = Language()
nlp.add_pipe("morphologizer")
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
nlp.initialize(get_examples=lambda: train_examples)
def test_no_resize():
nlp = Language()
morphologizer = nlp.add_pipe("morphologizer")
morphologizer.add_label("POS" + Morphology.FIELD_SEP + "NOUN")
morphologizer.add_label("POS" + Morphology.FIELD_SEP + "VERB")
nlp.initialize()
# this throws an error because the morphologizer can't be resized after initialization
with pytest.raises(ValueError):
morphologizer.add_label("POS" + Morphology.FIELD_SEP + "ADJ")
def test_initialize_examples():
nlp = Language()
morphologizer = nlp.add_pipe("morphologizer")
morphologizer.add_label("POS" + Morphology.FIELD_SEP + "NOUN")
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
# you shouldn't really call this more than once, but for testing it should be fine
nlp.initialize()
nlp.initialize(get_examples=lambda: train_examples)
with pytest.raises(TypeError):
nlp.initialize(get_examples=lambda: None)
with pytest.raises(TypeError):
nlp.initialize(get_examples=train_examples)
def test_overfitting_IO():
# Simple test to try and quickly overfit the morphologizer - ensuring the ML models work correctly
nlp = English()
nlp.add_pipe("morphologizer")
train_examples = []
for inst in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(inst[0]), inst[1]))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["morphologizer"] < 0.00001
# test the trained model
test_text = "I like blue ham"
doc = nlp(test_text)
gold_morphs = ["Feat=N", "Feat=V", "", ""]
gold_pos_tags = ["NOUN", "VERB", "ADJ", ""]
assert [str(t.morph) for t in doc] == gold_morphs
assert [t.pos_ for t in doc] == gold_pos_tags
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
assert [str(t.morph) for t in doc2] == gold_morphs
assert [t.pos_ for t in doc2] == gold_pos_tags
# Make sure that running pipe twice, or comparing to call, always amounts to the same predictions
texts = [
"Just a sentence.",
"Then one more sentence about London.",
"Here is another one.",
"I like London.",
]
batch_deps_1 = [doc.to_array([MORPH]) for doc in nlp.pipe(texts)]
batch_deps_2 = [doc.to_array([MORPH]) for doc in nlp.pipe(texts)]
no_batch_deps = [doc.to_array([MORPH]) for doc in [nlp(text) for text in texts]]
assert_equal(batch_deps_1, batch_deps_2)
assert_equal(batch_deps_1, no_batch_deps)
# Test without POS
nlp.remove_pipe("morphologizer")
nlp.add_pipe("morphologizer")
for example in train_examples:
for token in example.reference:
token.pos_ = ""
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["morphologizer"] < 0.00001
# Test the trained model
test_text = "I like blue ham"
doc = nlp(test_text)
gold_morphs = ["Feat=N", "Feat=V", "", ""]
gold_pos_tags = ["", "", "", ""]
assert [str(t.morph) for t in doc] == gold_morphs
assert [t.pos_ for t in doc] == gold_pos_tags
# Test overwrite+extend settings
# (note that "" is unset, "_" is set and empty)
morphs = ["Feat=V", "Feat=N", "_"]
doc = Doc(nlp.vocab, words=["blue", "ham", "like"], morphs=morphs)
orig_morphs = [str(t.morph) for t in doc]
orig_pos_tags = [t.pos_ for t in doc]
morphologizer = nlp.get_pipe("morphologizer")
# don't overwrite or extend
morphologizer.cfg["overwrite"] = False
doc = morphologizer(doc)
assert [str(t.morph) for t in doc] == orig_morphs
assert [t.pos_ for t in doc] == orig_pos_tags
# overwrite and extend
morphologizer.cfg["overwrite"] = True
morphologizer.cfg["extend"] = True
doc = Doc(nlp.vocab, words=["I", "like"], morphs=["Feat=A|That=A|This=A", ""])
doc = morphologizer(doc)
assert [str(t.morph) for t in doc] == ["Feat=N|That=A|This=A", "Feat=V"]
# extend without overwriting
morphologizer.cfg["overwrite"] = False
morphologizer.cfg["extend"] = True
doc = Doc(nlp.vocab, words=["I", "like"], morphs=["Feat=A|That=A|This=A", "That=B"])
doc = morphologizer(doc)
assert [str(t.morph) for t in doc] == ["Feat=A|That=A|This=A", "Feat=V|That=B"]
# overwrite without extending
morphologizer.cfg["overwrite"] = True
morphologizer.cfg["extend"] = False
doc = Doc(nlp.vocab, words=["I", "like"], morphs=["Feat=A|That=A|This=A", ""])
doc = morphologizer(doc)
assert [str(t.morph) for t in doc] == ["Feat=N", "Feat=V"]
# Test with unset morph and partial POS
nlp.remove_pipe("morphologizer")
nlp.add_pipe("morphologizer")
for example in train_examples:
for token in example.reference:
if token.text == "ham":
token.pos_ = "NOUN"
else:
token.pos_ = ""
token.set_morph(None)
optimizer = nlp.initialize(get_examples=lambda: train_examples)
assert nlp.get_pipe("morphologizer").labels is not None
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["morphologizer"] < 0.00001
# Test the trained model
test_text = "I like blue ham"
doc = nlp(test_text)
gold_morphs = ["", "", "", ""]
gold_pos_tags = ["NOUN", "NOUN", "NOUN", "NOUN"]
assert [str(t.morph) for t in doc] == gold_morphs
assert [t.pos_ for t in doc] == gold_pos_tags
| 8,094 | 34.660793 | 102 | py |
spaCy | spaCy-master/spacy/tests/pipeline/test_pipe_factories.py | import pytest
from pydantic import StrictInt, StrictStr
from thinc.api import ConfigValidationError, Linear, Model
import spacy
from spacy.lang.de import German
from spacy.lang.en import English
from spacy.language import Language
from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL
from spacy.tokens import Doc
from spacy.util import SimpleFrozenDict, combine_score_weights, registry
from ..util import make_tempdir
@pytest.mark.issue(5137)
def test_issue5137():
factory_name = "test_issue5137"
pipe_name = "my_component"
@Language.factory(factory_name)
class MyComponent:
def __init__(self, nlp, name=pipe_name, categories="all_categories"):
self.nlp = nlp
self.categories = categories
self.name = name
def __call__(self, doc):
pass
def to_disk(self, path, **kwargs):
pass
def from_disk(self, path, **cfg):
pass
nlp = English()
my_component = nlp.add_pipe(factory_name, name=pipe_name)
assert my_component.categories == "all_categories"
with make_tempdir() as tmpdir:
nlp.to_disk(tmpdir)
overrides = {"components": {pipe_name: {"categories": "my_categories"}}}
nlp2 = spacy.load(tmpdir, config=overrides)
assert nlp2.get_pipe(pipe_name).categories == "my_categories"
def test_pipe_function_component():
name = "test_component"
@Language.component(name)
def component(doc: Doc) -> Doc:
return doc
assert name in registry.factories
nlp = Language()
with pytest.raises(ValueError):
nlp.add_pipe(component)
nlp.add_pipe(name)
assert name in nlp.pipe_names
assert nlp.pipe_factories[name] == name
assert Language.get_factory_meta(name)
assert nlp.get_pipe_meta(name)
pipe = nlp.get_pipe(name)
assert pipe == component
pipe = nlp.create_pipe(name)
assert pipe == component
def test_pipe_class_component_init():
name1 = "test_class_component1"
name2 = "test_class_component2"
@Language.factory(name1)
class Component1:
def __init__(self, nlp: Language, name: str):
self.nlp = nlp
def __call__(self, doc: Doc) -> Doc:
return doc
class Component2:
def __init__(self, nlp: Language, name: str):
self.nlp = nlp
def __call__(self, doc: Doc) -> Doc:
return doc
@Language.factory(name2)
def factory(nlp: Language, name=name2):
return Component2(nlp, name)
nlp = Language()
for name, Component in [(name1, Component1), (name2, Component2)]:
assert name in registry.factories
with pytest.raises(ValueError):
nlp.add_pipe(Component(nlp, name))
nlp.add_pipe(name)
assert name in nlp.pipe_names
assert nlp.pipe_factories[name] == name
assert Language.get_factory_meta(name)
assert nlp.get_pipe_meta(name)
pipe = nlp.get_pipe(name)
assert isinstance(pipe, Component)
assert isinstance(pipe.nlp, Language)
pipe = nlp.create_pipe(name)
assert isinstance(pipe, Component)
assert isinstance(pipe.nlp, Language)
def test_pipe_class_component_config():
name = "test_class_component_config"
@Language.factory(name)
class Component:
def __init__(
self, nlp: Language, name: str, value1: StrictInt, value2: StrictStr
):
self.nlp = nlp
self.value1 = value1
self.value2 = value2
self.is_base = True
self.name = name
def __call__(self, doc: Doc) -> Doc:
return doc
@English.factory(name)
class ComponentEN:
def __init__(
self, nlp: Language, name: str, value1: StrictInt, value2: StrictStr
):
self.nlp = nlp
self.value1 = value1
self.value2 = value2
self.is_base = False
def __call__(self, doc: Doc) -> Doc:
return doc
nlp = Language()
with pytest.raises(ConfigValidationError): # no config provided
nlp.add_pipe(name)
with pytest.raises(ConfigValidationError): # invalid config
nlp.add_pipe(name, config={"value1": "10", "value2": "hello"})
with pytest.warns(UserWarning):
nlp.add_pipe(
name, config={"value1": 10, "value2": "hello", "name": "wrong_name"}
)
pipe = nlp.get_pipe(name)
assert isinstance(pipe.nlp, Language)
assert pipe.value1 == 10
assert pipe.value2 == "hello"
assert pipe.is_base is True
assert pipe.name == name
nlp_en = English()
with pytest.raises(ConfigValidationError): # invalid config
nlp_en.add_pipe(name, config={"value1": "10", "value2": "hello"})
nlp_en.add_pipe(name, config={"value1": 10, "value2": "hello"})
pipe = nlp_en.get_pipe(name)
assert isinstance(pipe.nlp, English)
assert pipe.value1 == 10
assert pipe.value2 == "hello"
assert pipe.is_base is False
def test_pipe_class_component_defaults():
name = "test_class_component_defaults"
@Language.factory(name)
class Component:
def __init__(
self,
nlp: Language,
name: str,
value1: StrictInt = StrictInt(10),
value2: StrictStr = StrictStr("hello"),
):
self.nlp = nlp
self.value1 = value1
self.value2 = value2
def __call__(self, doc: Doc) -> Doc:
return doc
nlp = Language()
nlp.add_pipe(name)
pipe = nlp.get_pipe(name)
assert isinstance(pipe.nlp, Language)
assert pipe.value1 == 10
assert pipe.value2 == "hello"
def test_pipe_class_component_model():
name = "test_class_component_model"
default_config = {
"model": {
"@architectures": "spacy.TextCatEnsemble.v2",
"tok2vec": DEFAULT_TOK2VEC_MODEL,
"linear_model": {
"@architectures": "spacy.TextCatBOW.v2",
"exclusive_classes": False,
"ngram_size": 1,
"no_output_layer": False,
},
},
"value1": 10,
}
@Language.factory(name, default_config=default_config)
class Component:
def __init__(self, nlp: Language, model: Model, name: str, value1: StrictInt):
self.nlp = nlp
self.model = model
self.value1 = value1
self.name = name
def __call__(self, doc: Doc) -> Doc:
return doc
nlp = Language()
nlp.add_pipe(name)
pipe = nlp.get_pipe(name)
assert isinstance(pipe.nlp, Language)
assert pipe.value1 == 10
assert isinstance(pipe.model, Model)
def test_pipe_class_component_model_custom():
name = "test_class_component_model_custom"
arch = f"{name}.arch"
default_config = {"value1": 1, "model": {"@architectures": arch, "nO": 0, "nI": 0}}
@Language.factory(name, default_config=default_config)
class Component:
def __init__(
self,
nlp: Language,
model: Model,
name: str,
value1: StrictInt = StrictInt(10),
):
self.nlp = nlp
self.model = model
self.value1 = value1
self.name = name
def __call__(self, doc: Doc) -> Doc:
return doc
@registry.architectures(arch)
def make_custom_arch(nO: StrictInt, nI: StrictInt):
return Linear(nO, nI)
nlp = Language()
config = {"value1": 20, "model": {"@architectures": arch, "nO": 1, "nI": 2}}
nlp.add_pipe(name, config=config)
pipe = nlp.get_pipe(name)
assert isinstance(pipe.nlp, Language)
assert pipe.value1 == 20
assert isinstance(pipe.model, Model)
assert pipe.model.name == "linear"
nlp = Language()
with pytest.raises(ConfigValidationError):
config = {"value1": "20", "model": {"@architectures": arch, "nO": 1, "nI": 2}}
nlp.add_pipe(name, config=config)
with pytest.raises(ConfigValidationError):
config = {"value1": 20, "model": {"@architectures": arch, "nO": 1.0, "nI": 2.0}}
nlp.add_pipe(name, config=config)
def test_pipe_factories_wrong_formats():
with pytest.raises(ValueError):
# Decorator is not called
@Language.component
def component(foo: int, bar: str):
...
with pytest.raises(ValueError):
# Decorator is not called
@Language.factory
def factory1(foo: int, bar: str):
...
with pytest.raises(ValueError):
# Factory function is missing "nlp" and "name" arguments
@Language.factory("test_pipe_factories_missing_args")
def factory2(foo: int, bar: str):
...
def test_pipe_factory_meta_config_cleanup():
"""Test that component-specific meta and config entries are represented
correctly and cleaned up when pipes are removed, replaced or renamed."""
nlp = Language()
nlp.add_pipe("ner", name="ner_component")
nlp.add_pipe("textcat")
assert nlp.get_factory_meta("ner")
assert nlp.get_pipe_meta("ner_component")
assert nlp.get_pipe_config("ner_component")
assert nlp.get_factory_meta("textcat")
assert nlp.get_pipe_meta("textcat")
assert nlp.get_pipe_config("textcat")
nlp.rename_pipe("textcat", "tc")
assert nlp.get_pipe_meta("tc")
assert nlp.get_pipe_config("tc")
with pytest.raises(ValueError):
nlp.remove_pipe("ner")
nlp.remove_pipe("ner_component")
assert "ner_component" not in nlp._pipe_meta
assert "ner_component" not in nlp._pipe_configs
with pytest.raises(ValueError):
nlp.replace_pipe("textcat", "parser")
nlp.replace_pipe("tc", "parser")
assert nlp.get_factory_meta("parser")
assert nlp.get_pipe_meta("tc").factory == "parser"
def test_pipe_factories_empty_dict_default():
"""Test that default config values can be empty dicts and that no config
validation error is raised."""
# TODO: fix this
name = "test_pipe_factories_empty_dict_default"
@Language.factory(name, default_config={"foo": {}})
def factory(nlp: Language, name: str, foo: dict):
...
nlp = Language()
nlp.create_pipe(name)
def test_pipe_factories_language_specific():
"""Test that language sub-classes can have their own factories, with
fallbacks to the base factories."""
name1 = "specific_component1"
name2 = "specific_component2"
Language.component(name1, func=lambda: "base")
English.component(name1, func=lambda: "en")
German.component(name2, func=lambda: "de")
assert Language.has_factory(name1)
assert not Language.has_factory(name2)
assert English.has_factory(name1)
assert not English.has_factory(name2)
assert German.has_factory(name1)
assert German.has_factory(name2)
nlp = Language()
assert nlp.create_pipe(name1)() == "base"
with pytest.raises(ValueError):
nlp.create_pipe(name2)
nlp_en = English()
assert nlp_en.create_pipe(name1)() == "en"
with pytest.raises(ValueError):
nlp_en.create_pipe(name2)
nlp_de = German()
assert nlp_de.create_pipe(name1)() == "base"
assert nlp_de.create_pipe(name2)() == "de"
def test_language_factories_invalid():
"""Test that assigning directly to Language.factories is now invalid and
raises a custom error."""
assert isinstance(Language.factories, SimpleFrozenDict)
with pytest.raises(NotImplementedError):
Language.factories["foo"] = "bar"
nlp = Language()
assert isinstance(nlp.factories, SimpleFrozenDict)
assert len(nlp.factories)
with pytest.raises(NotImplementedError):
nlp.factories["foo"] = "bar"
@pytest.mark.parametrize(
"weights,override,expected",
[
([{"a": 1.0}, {"b": 1.0}, {"c": 1.0}], {}, {"a": 0.33, "b": 0.33, "c": 0.33}),
([{"a": 1.0}, {"b": 50}, {"c": 100}], {}, {"a": 0.01, "b": 0.33, "c": 0.66}),
(
[{"a": 0.7, "b": 0.3}, {"c": 1.0}, {"d": 0.5, "e": 0.5}],
{},
{"a": 0.23, "b": 0.1, "c": 0.33, "d": 0.17, "e": 0.17},
),
(
[{"a": 100, "b": 300}, {"c": 50, "d": 50}],
{},
{"a": 0.2, "b": 0.6, "c": 0.1, "d": 0.1},
),
([{"a": 0.5, "b": 0.5}, {"b": 1.0}], {}, {"a": 0.33, "b": 0.67}),
([{"a": 0.5, "b": 0.0}], {}, {"a": 1.0, "b": 0.0}),
([{"a": 0.5, "b": 0.5}, {"b": 1.0}], {"a": 0.0}, {"a": 0.0, "b": 1.0}),
([{"a": 0.0, "b": 0.0}, {"c": 0.0}], {}, {"a": 0.0, "b": 0.0, "c": 0.0}),
([{"a": 0.0, "b": 0.0}, {"c": 1.0}], {}, {"a": 0.0, "b": 0.0, "c": 1.0}),
(
[{"a": 0.0, "b": 0.0}, {"c": 0.0}],
{"c": 0.2},
{"a": 0.0, "b": 0.0, "c": 1.0},
),
(
[{"a": 0.5, "b": 0.5, "c": 1.0, "d": 1.0}],
{"a": 0.0, "b": 0.0},
{"a": 0.0, "b": 0.0, "c": 0.5, "d": 0.5},
),
(
[{"a": 0.5, "b": 0.5, "c": 1.0, "d": 1.0}],
{"a": 0.0, "b": 0.0, "f": 0.0},
{"a": 0.0, "b": 0.0, "c": 0.5, "d": 0.5, "f": 0.0},
),
],
)
def test_language_factories_combine_score_weights(weights, override, expected):
result = combine_score_weights(weights, override)
assert sum(result.values()) in (0.99, 1.0, 0.0)
assert result == expected
def test_language_factories_scores():
name = "test_language_factories_scores"
func = lambda nlp, name: lambda doc: doc
weights1 = {"a1": 0.5, "a2": 0.5}
weights2 = {"b1": 0.2, "b2": 0.7, "b3": 0.1}
Language.factory(f"{name}1", default_score_weights=weights1, func=func)
Language.factory(f"{name}2", default_score_weights=weights2, func=func)
meta1 = Language.get_factory_meta(f"{name}1")
assert meta1.default_score_weights == weights1
meta2 = Language.get_factory_meta(f"{name}2")
assert meta2.default_score_weights == weights2
nlp = Language()
nlp._config["training"]["score_weights"] = {}
nlp.add_pipe(f"{name}1")
nlp.add_pipe(f"{name}2")
cfg = nlp.config["training"]
expected_weights = {"a1": 0.25, "a2": 0.25, "b1": 0.1, "b2": 0.35, "b3": 0.05}
assert cfg["score_weights"] == expected_weights
# Test with custom defaults
config = nlp.config.copy()
config["training"]["score_weights"]["a1"] = 0.0
config["training"]["score_weights"]["b3"] = 1.3
nlp = English.from_config(config)
score_weights = nlp.config["training"]["score_weights"]
expected = {"a1": 0.0, "a2": 0.12, "b1": 0.05, "b2": 0.17, "b3": 0.65}
assert score_weights == expected
# Test with null values
config = nlp.config.copy()
config["training"]["score_weights"]["a1"] = None
nlp = English.from_config(config)
score_weights = nlp.config["training"]["score_weights"]
expected = {"a1": None, "a2": 0.12, "b1": 0.05, "b2": 0.17, "b3": 0.66}
assert score_weights == expected
def test_pipe_factories_from_source():
"""Test adding components from a source model."""
source_nlp = English()
source_nlp.add_pipe("tagger", name="my_tagger")
nlp = English()
with pytest.raises(ValueError):
nlp.add_pipe("my_tagger", source="en_core_web_sm")
nlp.add_pipe("my_tagger", source=source_nlp)
assert "my_tagger" in nlp.pipe_names
with pytest.raises(KeyError):
nlp.add_pipe("custom", source=source_nlp)
def test_pipe_factories_from_source_language_subclass():
class CustomEnglishDefaults(English.Defaults):
stop_words = set(["custom", "stop"])
@registry.languages("custom_en")
class CustomEnglish(English):
lang = "custom_en"
Defaults = CustomEnglishDefaults
source_nlp = English()
source_nlp.add_pipe("tagger")
# custom subclass
nlp = CustomEnglish()
nlp.add_pipe("tagger", source=source_nlp)
assert "tagger" in nlp.pipe_names
# non-subclass
nlp = German()
nlp.add_pipe("tagger", source=source_nlp)
assert "tagger" in nlp.pipe_names
# mismatched vectors
nlp = English()
nlp.vocab.vectors.resize((1, 4))
nlp.vocab.vectors.add("cat", vector=[1, 2, 3, 4])
with pytest.warns(UserWarning):
nlp.add_pipe("tagger", source=source_nlp)
def test_pipe_factories_from_source_custom():
"""Test adding components from a source model with custom components."""
name = "test_pipe_factories_from_source_custom"
@Language.factory(name, default_config={"arg": "hello"})
def test_factory(nlp, name, arg: str):
return lambda doc: doc
source_nlp = English()
source_nlp.add_pipe("tagger")
source_nlp.add_pipe(name, config={"arg": "world"})
nlp = English()
nlp.add_pipe(name, source=source_nlp)
assert name in nlp.pipe_names
assert nlp.get_pipe_meta(name).default_config["arg"] == "hello"
config = nlp.config["components"][name]
assert config["factory"] == name
assert config["arg"] == "world"
def test_pipe_factories_from_source_config():
name = "test_pipe_factories_from_source_config"
@Language.factory(name, default_config={"arg": "hello"})
def test_factory(nlp, name, arg: str):
return lambda doc: doc
source_nlp = English()
source_nlp.add_pipe("tagger")
source_nlp.add_pipe(name, name="yolo", config={"arg": "world"})
dest_nlp_cfg = {"lang": "en", "pipeline": ["parser", "custom"]}
with make_tempdir() as tempdir:
source_nlp.to_disk(tempdir)
dest_components_cfg = {
"parser": {"factory": "parser"},
"custom": {"source": str(tempdir), "component": "yolo"},
}
dest_config = {"nlp": dest_nlp_cfg, "components": dest_components_cfg}
nlp = English.from_config(dest_config)
assert nlp.pipe_names == ["parser", "custom"]
assert nlp.pipe_factories == {"parser": "parser", "custom": name}
meta = nlp.get_pipe_meta("custom")
assert meta.factory == name
assert meta.default_config["arg"] == "hello"
config = nlp.config["components"]["custom"]
assert config["factory"] == name
assert config["arg"] == "world"
class PipeFactoriesIdempotent:
def __init__(self, nlp, name):
...
def __call__(self, doc):
...
@pytest.mark.parametrize(
"i,func,func2",
[
(0, lambda nlp, name: lambda doc: doc, lambda doc: doc),
(1, PipeFactoriesIdempotent, PipeFactoriesIdempotent(None, None)),
],
)
def test_pipe_factories_decorator_idempotent(i, func, func2):
"""Check that decorator can be run multiple times if the function is the
same. This is especially relevant for live reloading because we don't
want spaCy to raise an error if a module registering components is reloaded.
"""
name = f"test_pipe_factories_decorator_idempotent_{i}"
for i in range(5):
Language.factory(name, func=func)
nlp = Language()
nlp.add_pipe(name)
Language.factory(name, func=func)
# Make sure it also works for component decorator, which creates the
# factory function
name2 = f"{name}2"
for i in range(5):
Language.component(name2, func=func2)
nlp = Language()
nlp.add_pipe(name)
Language.component(name2, func=func2)
def test_pipe_factories_config_excludes_nlp():
"""Test that the extra values we temporarily add to component config
blocks/functions are removed and not copied around.
"""
name = "test_pipe_factories_config_excludes_nlp"
func = lambda nlp, name: lambda doc: doc
Language.factory(name, func=func)
config = {
"nlp": {"lang": "en", "pipeline": [name]},
"components": {name: {"factory": name}},
}
nlp = English.from_config(config)
assert nlp.pipe_names == [name]
pipe_cfg = nlp.get_pipe_config(name)
pipe_cfg == {"factory": name}
assert nlp._pipe_configs[name] == {"factory": name}
| 19,895 | 32.270903 | 88 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.