Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
spaCy
spaCy-master/spacy/tests/pipeline/test_pipe_methods.py
import gc import numpy import pytest from thinc.api import get_current_ops import spacy from spacy.lang.en import English from spacy.lang.en.syntax_iterators import noun_chunks from spacy.language import Language from spacy.pipeline import TrainablePipe from spacy.tokens import Doc from spacy.training import Example from spacy.util import SimpleFrozenList, get_arg_names, make_tempdir from spacy.vocab import Vocab @pytest.fixture def nlp(): return Language() @Language.component("new_pipe") def new_pipe(doc): return doc @Language.component("other_pipe") def other_pipe(doc): return doc @pytest.mark.issue(1506) def test_issue1506(): def string_generator(): for _ in range(10001): yield "It's sentence produced by that bug." for _ in range(10001): yield "I erase some hbdsaj lemmas." for _ in range(10001): yield "I erase lemmas." for _ in range(10001): yield "It's sentence produced by that bug." for _ in range(10001): yield "It's sentence produced by that bug." nlp = English() for i, d in enumerate(nlp.pipe(string_generator())): # We should run cleanup more than one time to actually cleanup data. # In first run — clean up only mark strings as «not hitted». if i == 10000 or i == 20000 or i == 30000: gc.collect() for t in d: str(t.lemma_) @pytest.mark.issue(1654) def test_issue1654(): nlp = Language(Vocab()) assert not nlp.pipeline @Language.component("component") def component(doc): return doc nlp.add_pipe("component", name="1") nlp.add_pipe("component", name="2", after="1") nlp.add_pipe("component", name="3", after="2") assert nlp.pipe_names == ["1", "2", "3"] nlp2 = Language(Vocab()) assert not nlp2.pipeline nlp2.add_pipe("component", name="3") nlp2.add_pipe("component", name="2", before="3") nlp2.add_pipe("component", name="1", before="2") assert nlp2.pipe_names == ["1", "2", "3"] @pytest.mark.issue(3880) def test_issue3880(): """Test that `nlp.pipe()` works when an empty string ends the batch. Fixed in v7.0.5 of Thinc. """ texts = ["hello", "world", "", ""] nlp = English() nlp.add_pipe("parser").add_label("dep") nlp.add_pipe("ner").add_label("PERSON") nlp.add_pipe("tagger").add_label("NN") nlp.initialize() for doc in nlp.pipe(texts): pass @pytest.mark.issue(5082) def test_issue5082(): # Ensure the 'merge_entities' pipeline does something sensible for the vectors of the merged tokens nlp = English() vocab = nlp.vocab array1 = numpy.asarray([0.1, 0.5, 0.8], dtype=numpy.float32) array2 = numpy.asarray([-0.2, -0.6, -0.9], dtype=numpy.float32) array3 = numpy.asarray([0.3, -0.1, 0.7], dtype=numpy.float32) array4 = numpy.asarray([0.5, 0, 0.3], dtype=numpy.float32) array34 = numpy.asarray([0.4, -0.05, 0.5], dtype=numpy.float32) vocab.set_vector("I", array1) vocab.set_vector("like", array2) vocab.set_vector("David", array3) vocab.set_vector("Bowie", array4) text = "I like David Bowie" patterns = [ {"label": "PERSON", "pattern": [{"LOWER": "david"}, {"LOWER": "bowie"}]} ] ruler = nlp.add_pipe("entity_ruler") ruler.add_patterns(patterns) parsed_vectors_1 = [t.vector for t in nlp(text)] assert len(parsed_vectors_1) == 4 ops = get_current_ops() numpy.testing.assert_array_equal(ops.to_numpy(parsed_vectors_1[0]), array1) numpy.testing.assert_array_equal(ops.to_numpy(parsed_vectors_1[1]), array2) numpy.testing.assert_array_equal(ops.to_numpy(parsed_vectors_1[2]), array3) numpy.testing.assert_array_equal(ops.to_numpy(parsed_vectors_1[3]), array4) nlp.add_pipe("merge_entities") parsed_vectors_2 = [t.vector for t in nlp(text)] assert len(parsed_vectors_2) == 3 numpy.testing.assert_array_equal(ops.to_numpy(parsed_vectors_2[0]), array1) numpy.testing.assert_array_equal(ops.to_numpy(parsed_vectors_2[1]), array2) numpy.testing.assert_array_equal(ops.to_numpy(parsed_vectors_2[2]), array34) @pytest.mark.issue(5458) def test_issue5458(): # Test that the noun chuncker does not generate overlapping spans # fmt: off words = ["In", "an", "era", "where", "markets", "have", "brought", "prosperity", "and", "empowerment", "."] vocab = Vocab(strings=words) deps = ["ROOT", "det", "pobj", "advmod", "nsubj", "aux", "relcl", "dobj", "cc", "conj", "punct"] pos = ["ADP", "DET", "NOUN", "ADV", "NOUN", "AUX", "VERB", "NOUN", "CCONJ", "NOUN", "PUNCT"] heads = [0, 2, 0, 9, 6, 6, 2, 6, 7, 7, 0] # fmt: on en_doc = Doc(vocab, words=words, pos=pos, heads=heads, deps=deps) en_doc.noun_chunks_iterator = noun_chunks # if there are overlapping spans, this will fail with an E102 error "Can't merge non-disjoint spans" nlp = English() merge_nps = nlp.create_pipe("merge_noun_chunks") merge_nps(en_doc) def test_multiple_predictions(): class DummyPipe(TrainablePipe): def __init__(self): self.model = "dummy_model" def predict(self, docs): return ([1, 2, 3], [4, 5, 6]) def set_annotations(self, docs, scores): return docs nlp = Language() doc = nlp.make_doc("foo") dummy_pipe = DummyPipe() dummy_pipe(doc) def test_add_pipe_no_name(nlp): nlp.add_pipe("new_pipe") assert "new_pipe" in nlp.pipe_names def test_add_pipe_duplicate_name(nlp): nlp.add_pipe("new_pipe", name="duplicate_name") with pytest.raises(ValueError): nlp.add_pipe("new_pipe", name="duplicate_name") @pytest.mark.parametrize("name", ["parser"]) def test_add_pipe_first(nlp, name): nlp.add_pipe("new_pipe", name=name, first=True) assert nlp.pipeline[0][0] == name @pytest.mark.parametrize("name1,name2", [("parser", "lambda_pipe")]) def test_add_pipe_last(nlp, name1, name2): Language.component("new_pipe2", func=lambda doc: doc) nlp.add_pipe("new_pipe2", name=name2) nlp.add_pipe("new_pipe", name=name1, last=True) assert nlp.pipeline[0][0] != name1 assert nlp.pipeline[-1][0] == name1 def test_cant_add_pipe_first_and_last(nlp): with pytest.raises(ValueError): nlp.add_pipe("new_pipe", first=True, last=True) @pytest.mark.parametrize("name", ["test_get_pipe"]) def test_get_pipe(nlp, name): with pytest.raises(KeyError): nlp.get_pipe(name) nlp.add_pipe("new_pipe", name=name) assert nlp.get_pipe(name) == new_pipe @pytest.mark.parametrize( "name,replacement,invalid_replacement", [("test_replace_pipe", "other_pipe", lambda doc: doc)], ) def test_replace_pipe(nlp, name, replacement, invalid_replacement): with pytest.raises(ValueError): nlp.replace_pipe(name, new_pipe) nlp.add_pipe("new_pipe", name=name) with pytest.raises(ValueError): nlp.replace_pipe(name, invalid_replacement) nlp.replace_pipe(name, replacement) assert nlp.get_pipe(name) == nlp.create_pipe(replacement) def test_replace_last_pipe(nlp): nlp.add_pipe("sentencizer") nlp.add_pipe("ner") assert nlp.pipe_names == ["sentencizer", "ner"] nlp.replace_pipe("ner", "ner") assert nlp.pipe_names == ["sentencizer", "ner"] def test_replace_pipe_config(nlp): nlp.add_pipe("entity_linker") nlp.add_pipe("sentencizer") assert nlp.get_pipe("entity_linker").incl_prior is True nlp.replace_pipe("entity_linker", "entity_linker", config={"incl_prior": False}) assert nlp.get_pipe("entity_linker").incl_prior is False @pytest.mark.parametrize("old_name,new_name", [("old_pipe", "new_pipe")]) def test_rename_pipe(nlp, old_name, new_name): with pytest.raises(ValueError): nlp.rename_pipe(old_name, new_name) nlp.add_pipe("new_pipe", name=old_name) nlp.rename_pipe(old_name, new_name) assert nlp.pipeline[0][0] == new_name @pytest.mark.parametrize("name", ["my_component"]) def test_remove_pipe(nlp, name): with pytest.raises(ValueError): nlp.remove_pipe(name) nlp.add_pipe("new_pipe", name=name) assert len(nlp.pipeline) == 1 removed_name, removed_component = nlp.remove_pipe(name) assert not len(nlp.pipeline) assert removed_name == name assert removed_component == new_pipe @pytest.mark.parametrize("name", ["my_component"]) def test_disable_pipes_method(nlp, name): nlp.add_pipe("new_pipe", name=name) assert nlp.has_pipe(name) disabled = nlp.select_pipes(disable=name) assert not nlp.has_pipe(name) disabled.restore() @pytest.mark.parametrize("name", ["my_component"]) def test_enable_pipes_method(nlp, name): nlp.add_pipe("new_pipe", name=name) assert nlp.has_pipe(name) disabled = nlp.select_pipes(enable=[]) assert not nlp.has_pipe(name) disabled.restore() @pytest.mark.parametrize("name", ["my_component"]) def test_disable_pipes_context(nlp, name): """Test that an enabled component stays enabled after running the context manager.""" nlp.add_pipe("new_pipe", name=name) assert nlp.has_pipe(name) with nlp.select_pipes(disable=name): assert not nlp.has_pipe(name) assert nlp.has_pipe(name) @pytest.mark.parametrize("name", ["my_component"]) def test_disable_pipes_context_restore(nlp, name): """Test that a disabled component stays disabled after running the context manager.""" nlp.add_pipe("new_pipe", name=name) assert nlp.has_pipe(name) nlp.disable_pipe(name) assert not nlp.has_pipe(name) with nlp.select_pipes(disable=name): assert not nlp.has_pipe(name) assert not nlp.has_pipe(name) def test_select_pipes_list_arg(nlp): for name in ["c1", "c2", "c3"]: nlp.add_pipe("new_pipe", name=name) assert nlp.has_pipe(name) with nlp.select_pipes(disable=["c1", "c2"]): assert not nlp.has_pipe("c1") assert not nlp.has_pipe("c2") assert nlp.has_pipe("c3") with nlp.select_pipes(enable="c3"): assert not nlp.has_pipe("c1") assert not nlp.has_pipe("c2") assert nlp.has_pipe("c3") with nlp.select_pipes(enable=["c1", "c2"], disable="c3"): assert nlp.has_pipe("c1") assert nlp.has_pipe("c2") assert not nlp.has_pipe("c3") with nlp.select_pipes(enable=[]): assert not nlp.has_pipe("c1") assert not nlp.has_pipe("c2") assert not nlp.has_pipe("c3") with nlp.select_pipes(enable=["c1", "c2", "c3"], disable=[]): assert nlp.has_pipe("c1") assert nlp.has_pipe("c2") assert nlp.has_pipe("c3") with nlp.select_pipes(disable=["c1", "c2", "c3"], enable=[]): assert not nlp.has_pipe("c1") assert not nlp.has_pipe("c2") assert not nlp.has_pipe("c3") def test_select_pipes_errors(nlp): for name in ["c1", "c2", "c3"]: nlp.add_pipe("new_pipe", name=name) assert nlp.has_pipe(name) with pytest.raises(ValueError): nlp.select_pipes() with pytest.raises(ValueError): nlp.select_pipes(enable=["c1", "c2"], disable=["c1"]) with pytest.raises(ValueError): nlp.select_pipes(enable=["c1", "c2"], disable=[]) with pytest.raises(ValueError): nlp.select_pipes(enable=[], disable=["c3"]) disabled = nlp.select_pipes(disable=["c2"]) nlp.remove_pipe("c2") with pytest.raises(ValueError): disabled.restore() @pytest.mark.parametrize("n_pipes", [100]) def test_add_lots_of_pipes(nlp, n_pipes): Language.component("n_pipes", func=lambda doc: doc) for i in range(n_pipes): nlp.add_pipe("n_pipes", name=f"pipe_{i}") assert len(nlp.pipe_names) == n_pipes @pytest.mark.parametrize("component", [lambda doc: doc, {"hello": "world"}]) def test_raise_for_invalid_components(nlp, component): with pytest.raises(ValueError): nlp.add_pipe(component) @pytest.mark.parametrize("component", ["ner", "tagger", "parser", "textcat"]) def test_pipe_base_class_add_label(nlp, component): label = "TEST" pipe = nlp.create_pipe(component) pipe.add_label(label) if component == "tagger": # Tagger always has the default coarse-grained label scheme assert label in pipe.labels else: assert pipe.labels == (label,) def test_pipe_labels(nlp): input_labels = { "ner": ["PERSON", "ORG", "GPE"], "textcat": ["POSITIVE", "NEGATIVE"], } for name, labels in input_labels.items(): nlp.add_pipe(name) pipe = nlp.get_pipe(name) for label in labels: pipe.add_label(label) assert len(pipe.labels) == len(labels) assert len(nlp.pipe_labels) == len(input_labels) for name, labels in nlp.pipe_labels.items(): assert sorted(input_labels[name]) == sorted(labels) def test_add_pipe_before_after(): """Test that before/after works with strings and ints.""" nlp = Language() nlp.add_pipe("ner") with pytest.raises(ValueError): nlp.add_pipe("textcat", before="parser") nlp.add_pipe("textcat", before="ner") assert nlp.pipe_names == ["textcat", "ner"] with pytest.raises(ValueError): nlp.add_pipe("parser", before=3) with pytest.raises(ValueError): nlp.add_pipe("parser", after=3) nlp.add_pipe("parser", after=0) assert nlp.pipe_names == ["textcat", "parser", "ner"] nlp.add_pipe("tagger", before=2) assert nlp.pipe_names == ["textcat", "parser", "tagger", "ner"] with pytest.raises(ValueError): nlp.add_pipe("entity_ruler", after=1, first=True) with pytest.raises(ValueError): nlp.add_pipe("entity_ruler", before="ner", after=2) with pytest.raises(ValueError): nlp.add_pipe("entity_ruler", before=True) with pytest.raises(ValueError): nlp.add_pipe("entity_ruler", first=False) def test_disable_enable_pipes(): name = "test_disable_enable_pipes" results = {} def make_component(name): results[name] = "" def component(doc): nonlocal results results[name] = doc.text return doc return component c1 = Language.component(f"{name}1", func=make_component(f"{name}1")) c2 = Language.component(f"{name}2", func=make_component(f"{name}2")) nlp = Language() nlp.add_pipe(f"{name}1") nlp.add_pipe(f"{name}2") assert results[f"{name}1"] == "" assert results[f"{name}2"] == "" assert nlp.pipeline == [(f"{name}1", c1), (f"{name}2", c2)] assert nlp.pipe_names == [f"{name}1", f"{name}2"] nlp.disable_pipe(f"{name}1") assert nlp.disabled == [f"{name}1"] assert nlp.component_names == [f"{name}1", f"{name}2"] assert nlp.pipe_names == [f"{name}2"] assert nlp.config["nlp"]["disabled"] == [f"{name}1"] nlp("hello") assert results[f"{name}1"] == "" # didn't run assert results[f"{name}2"] == "hello" # ran nlp.enable_pipe(f"{name}1") assert nlp.disabled == [] assert nlp.pipe_names == [f"{name}1", f"{name}2"] assert nlp.config["nlp"]["disabled"] == [] nlp("world") assert results[f"{name}1"] == "world" assert results[f"{name}2"] == "world" nlp.disable_pipe(f"{name}2") nlp.remove_pipe(f"{name}2") assert nlp.components == [(f"{name}1", c1)] assert nlp.pipeline == [(f"{name}1", c1)] assert nlp.component_names == [f"{name}1"] assert nlp.pipe_names == [f"{name}1"] assert nlp.disabled == [] assert nlp.config["nlp"]["disabled"] == [] nlp.rename_pipe(f"{name}1", name) assert nlp.components == [(name, c1)] assert nlp.component_names == [name] nlp("!") assert results[f"{name}1"] == "!" assert results[f"{name}2"] == "world" with pytest.raises(ValueError): nlp.disable_pipe(f"{name}2") nlp.disable_pipe(name) assert nlp.component_names == [name] assert nlp.pipe_names == [] assert nlp.config["nlp"]["disabled"] == [name] nlp("?") assert results[f"{name}1"] == "!" def test_pipe_methods_frozen(): """Test that spaCy raises custom error messages if "frozen" properties are accessed. We still want to use a list here to not break backwards compatibility, but users should see an error if they're trying to append to nlp.pipeline etc.""" nlp = Language() ner = nlp.add_pipe("ner") assert nlp.pipe_names == ["ner"] for prop in [ nlp.pipeline, nlp.pipe_names, nlp.components, nlp.component_names, nlp.disabled, nlp.factory_names, ]: assert isinstance(prop, list) assert isinstance(prop, SimpleFrozenList) with pytest.raises(NotImplementedError): nlp.pipeline.append(("ner2", ner)) with pytest.raises(NotImplementedError): nlp.pipe_names.pop() with pytest.raises(NotImplementedError): nlp.components.sort() with pytest.raises(NotImplementedError): nlp.component_names.clear() @pytest.mark.parametrize( "pipe", ["tagger", "parser", "ner", "textcat", "morphologizer"] ) def test_pipe_label_data_exports_labels(pipe): nlp = Language() pipe = nlp.add_pipe(pipe) # Make sure pipe has pipe labels assert getattr(pipe, "label_data", None) is not None # Make sure pipe can be initialized with labels initialize = getattr(pipe, "initialize", None) assert initialize is not None assert "labels" in get_arg_names(initialize) @pytest.mark.parametrize("pipe", ["senter", "entity_linker"]) def test_pipe_label_data_no_labels(pipe): nlp = Language() pipe = nlp.add_pipe(pipe) assert getattr(pipe, "label_data", None) is None initialize = getattr(pipe, "initialize", None) if initialize is not None: assert "labels" not in get_arg_names(initialize) def test_warning_pipe_begin_training(): with pytest.warns(UserWarning, match="begin_training"): class IncompatPipe(TrainablePipe): def __init__(self): ... def begin_training(*args, **kwargs): ... def test_pipe_methods_initialize(): """Test that the [initialize] config reflects the components correctly.""" nlp = Language() nlp.add_pipe("tagger") assert "tagger" not in nlp.config["initialize"]["components"] nlp.config["initialize"]["components"]["tagger"] = {"labels": ["hello"]} assert nlp.config["initialize"]["components"]["tagger"] == {"labels": ["hello"]} nlp.remove_pipe("tagger") assert "tagger" not in nlp.config["initialize"]["components"] nlp.add_pipe("tagger") assert "tagger" not in nlp.config["initialize"]["components"] nlp.config["initialize"]["components"]["tagger"] = {"labels": ["hello"]} nlp.rename_pipe("tagger", "my_tagger") assert "tagger" not in nlp.config["initialize"]["components"] assert nlp.config["initialize"]["components"]["my_tagger"] == {"labels": ["hello"]} nlp.config["initialize"]["components"]["test"] = {"foo": "bar"} nlp.add_pipe("ner", name="test") assert "test" in nlp.config["initialize"]["components"] nlp.remove_pipe("test") assert "test" not in nlp.config["initialize"]["components"] def test_update_with_annotates(): name = "test_with_annotates" results = {} def make_component(name): results[name] = "" def component(doc): nonlocal results results[name] += doc.text return doc return component Language.component(f"{name}1", func=make_component(f"{name}1")) Language.component(f"{name}2", func=make_component(f"{name}2")) components = set([f"{name}1", f"{name}2"]) nlp = English() texts = ["a", "bb", "ccc"] examples = [] for text in texts: examples.append(Example(nlp.make_doc(text), nlp.make_doc(text))) for components_to_annotate in [ [], [f"{name}1"], [f"{name}1", f"{name}2"], [f"{name}2", f"{name}1"], ]: for key in results: results[key] = "" nlp = English(vocab=nlp.vocab) nlp.add_pipe(f"{name}1") nlp.add_pipe(f"{name}2") nlp.update(examples, annotates=components_to_annotate) for component in components_to_annotate: assert results[component] == "".join(eg.predicted.text for eg in examples) for component in components - set(components_to_annotate): assert results[component] == "" @pytest.mark.issue(11443) def test_enable_disable_conflict_with_config(): """Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config.""" nlp = English() nlp.add_pipe("tagger") nlp.add_pipe("senter") nlp.add_pipe("sentencizer") with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) # Expected to succeed, as config and arguments do not conflict. assert spacy.load( tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}} ).disabled == ["senter", "sentencizer"] # Expected to succeed without warning due to the lack of a conflicting config option. spacy.load(tmp_dir, enable=["tagger"]) # Expected to fail due to conflict between enable and disabled. with pytest.raises(ValueError): spacy.load( tmp_dir, enable=["senter"], config={"nlp": {"disabled": ["senter", "tagger"]}}, ) def test_load_disable_enable(): """Tests spacy.load() with dis-/enabling components.""" base_nlp = English() for pipe in ("sentencizer", "tagger", "parser"): base_nlp.add_pipe(pipe) with make_tempdir() as tmp_dir: base_nlp.to_disk(tmp_dir) to_disable = ["parser", "tagger"] to_enable = ["tagger", "parser"] single_str = "tagger" # Setting only `disable`. nlp = spacy.load(tmp_dir, disable=to_disable) assert all([comp_name in nlp.disabled for comp_name in to_disable]) # Setting only `enable`. nlp = spacy.load(tmp_dir, enable=to_enable) assert all( [ (comp_name in nlp.disabled) is (comp_name not in to_enable) for comp_name in nlp.component_names ] ) # Loading with a string representing one component nlp = spacy.load(tmp_dir, exclude=single_str) assert single_str not in nlp.component_names nlp = spacy.load(tmp_dir, disable=single_str) assert single_str in nlp.component_names assert single_str not in nlp.pipe_names assert nlp._disabled == {single_str} assert nlp.disabled == [single_str] # Testing consistent enable/disable combination. nlp = spacy.load( tmp_dir, enable=to_enable, disable=[ comp_name for comp_name in nlp.component_names if comp_name not in to_enable ], ) assert all( [ (comp_name in nlp.disabled) is (comp_name not in to_enable) for comp_name in nlp.component_names ] ) # Inconsistent enable/disable combination. with pytest.raises(ValueError): spacy.load(tmp_dir, enable=to_enable, disable=["parser"])
23,305
32.825835
111
py
spaCy
spaCy-master/spacy/tests/pipeline/test_sentencizer.py
import pytest import spacy from spacy.lang.en import English from spacy.pipeline import Sentencizer from spacy.tokens import Doc def test_sentencizer(en_vocab): doc = Doc(en_vocab, words=["Hello", "!", "This", "is", "a", "test", "."]) sentencizer = Sentencizer(punct_chars=None) doc = sentencizer(doc) assert doc.has_annotation("SENT_START") sent_starts = [t.is_sent_start for t in doc] sent_ends = [t.is_sent_end for t in doc] assert sent_starts == [True, False, True, False, False, False, False] assert sent_ends == [False, True, False, False, False, False, True] assert len(list(doc.sents)) == 2 def test_sentencizer_pipe(): texts = ["Hello! This is a test.", "Hi! This is a test."] nlp = English() nlp.add_pipe("sentencizer") for doc in nlp.pipe(texts): assert doc.has_annotation("SENT_START") sent_starts = [t.is_sent_start for t in doc] assert sent_starts == [True, False, True, False, False, False, False] assert len(list(doc.sents)) == 2 for ex in nlp.pipe(texts): doc = ex.doc assert doc.has_annotation("SENT_START") sent_starts = [t.is_sent_start for t in doc] assert sent_starts == [True, False, True, False, False, False, False] assert len(list(doc.sents)) == 2 def test_sentencizer_empty_docs(): one_empty_text = [""] many_empty_texts = ["", "", ""] some_empty_texts = ["hi", "", "This is a test. Here are two sentences.", ""] nlp = English() nlp.add_pipe("sentencizer") for texts in [one_empty_text, many_empty_texts, some_empty_texts]: for doc in nlp.pipe(texts): assert doc.has_annotation("SENT_START") sent_starts = [t.is_sent_start for t in doc] if len(doc) == 0: assert sent_starts == [] else: assert len(sent_starts) > 0 @pytest.mark.parametrize( "words,sent_starts,sent_ends,n_sents", [ # The expected result here is that the duplicate punctuation gets merged # onto the same sentence and no one-token sentence is created for them. ( ["Hello", "!", ".", "Test", ".", ".", "ok"], [True, False, False, True, False, False, True], [False, False, True, False, False, True, True], 3, ), # We also want to make sure ¡ and ¿ aren't treated as sentence end # markers, even though they're punctuation ( ["¡", "Buen", "día", "!", "Hola", ",", "¿", "qué", "tal", "?"], [True, False, False, False, True, False, False, False, False, False], [False, False, False, True, False, False, False, False, False, True], 2, ), # The Token.is_punct check ensures that quotes are handled as well ( ['"', "Nice", "!", '"', "I", "am", "happy", "."], [True, False, False, False, True, False, False, False], [False, False, False, True, False, False, False, True], 2, ), ], ) def test_sentencizer_complex(en_vocab, words, sent_starts, sent_ends, n_sents): doc = Doc(en_vocab, words=words) sentencizer = Sentencizer(punct_chars=None) doc = sentencizer(doc) assert doc.has_annotation("SENT_START") assert [t.is_sent_start for t in doc] == sent_starts assert [t.is_sent_end for t in doc] == sent_ends assert len(list(doc.sents)) == n_sents @pytest.mark.parametrize( "punct_chars,words,sent_starts,sent_ends,n_sents", [ ( ["~", "?"], ["Hello", "world", "~", "A", ".", "B", "."], [True, False, False, True, False, False, False], [False, False, True, False, False, False, True], 2, ), # Even thought it's not common, the punct_chars should be able to # handle any tokens ( [".", "ö"], ["Hello", ".", "Test", "ö", "Ok", "."], [True, False, True, False, True, False], [False, True, False, True, False, True], 3, ), ], ) def test_sentencizer_custom_punct( en_vocab, punct_chars, words, sent_starts, sent_ends, n_sents ): doc = Doc(en_vocab, words=words) sentencizer = Sentencizer(punct_chars=punct_chars) doc = sentencizer(doc) assert doc.has_annotation("SENT_START") assert [t.is_sent_start for t in doc] == sent_starts assert [t.is_sent_end for t in doc] == sent_ends assert len(list(doc.sents)) == n_sents def test_sentencizer_serialize_bytes(en_vocab): punct_chars = [".", "~", "+"] sentencizer = Sentencizer(punct_chars=punct_chars) assert sentencizer.punct_chars == set(punct_chars) bytes_data = sentencizer.to_bytes() new_sentencizer = Sentencizer(punct_chars=None).from_bytes(bytes_data) assert new_sentencizer.punct_chars == set(punct_chars) @pytest.mark.parametrize( # fmt: off "lang,text", [ ('bn', 'বাংলা ভাষা (বাঙলা, বাঙ্গলা, তথা বাঙ্গালা নামগুলোতেও পরিচিত) একটি ইন্দো-আর্য ভাষা, যা দক্ষিণ এশিয়ার বাঙালি জাতির প্রধান কথ্য ও লেখ্য ভাষা। মাতৃভাষীর সংখ্যায় বাংলা ইন্দো-ইউরোপীয় ভাষা পরিবারের চতুর্থ ও বিশ্বের ষষ্ঠ বৃহত্তম ভাষা।[৫] মোট ব্যবহারকারীর সংখ্যা অনুসারে বাংলা বিশ্বের সপ্তম বৃহত্তম ভাষা। বাংলা সার্বভৌম ভাষাভিত্তিক জাতিরাষ্ট্র বাংলাদেশের একমাত্র রাষ্ট্রভাষা তথা সরকারি ভাষা[৬] এবং ভারতের পশ্চিমবঙ্গ, ত্রিপুরা, আসামের বরাক উপত্যকার সরকারি ভাষা। বঙ্গোপসাগরে অবস্থিত আন্দামান দ্বীপপুঞ্জের প্রধান কথ্য ভাষা বাংলা। এছাড়া ভারতের ঝাড়খণ্ড, বিহার, মেঘালয়, মিজোরাম, উড়িষ্যা রাজ্যগুলোতে উল্লেখযোগ্য পরিমাণে বাংলাভাষী জনগণ রয়েছে। ভারতে হিন্দির পরেই সর্বাধিক প্রচলিত ভাষা বাংলা।[৭][৮] এছাড়াও মধ্য প্রাচ্য, আমেরিকা ও ইউরোপে উল্লেখযোগ্য পরিমাণে বাংলাভাষী অভিবাসী রয়েছে।[৯] সারা বিশ্বে সব মিলিয়ে ২৬ কোটির অধিক লোক দৈনন্দিন জীবনে বাংলা ব্যবহার করে।[২] বাংলাদেশের জাতীয় সঙ্গীত এবং ভারতের জাতীয় সঙ্গীত ও স্তোত্র বাংলাতে রচিত।'), ('de', 'Die deutsche Sprache bzw. Deutsch ([dɔʏ̯t͡ʃ]; abgekürzt dt. oder dtsch.) ist eine westgermanische Sprache. Ihr Sprachraum umfasst Deutschland, Österreich, die Deutschschweiz, Liechtenstein, Luxemburg, Ostbelgien, Südtirol, das Elsass und Lothringen sowie Nordschleswig. Außerdem ist sie eine Minderheitensprache in einigen europäischen und außereuropäischen Ländern, z. B. in Rumänien und Südafrika, sowie Nationalsprache im afrikanischen Namibia.'), ('hi', 'हिन्दी विश्व की एक प्रमुख भाषा है एवं भारत की राजभाषा है। केन्द्रीय स्तर पर भारत में दूसरी आधिकारिक भाषा अंग्रेजी है। यह हिंदुस्तानी भाषा की एक मानकीकृत रूप है जिसमें संस्कृत के तत्सम तथा तद्भव शब्दों का प्रयोग अधिक है और अरबी-फ़ारसी शब्द कम हैं। हिंदी संवैधानिक रूप से भारत की राजभाषा और भारत की सबसे अधिक बोली और समझी जाने वाली भाषा है। हालाँकि, हिन्दी भारत की राष्ट्रभाषा नहीं है,[3] क्योंकि भारत के संविधान में कोई भी भाषा को ऐसा दर्जा नहीं दिया गया था।[4][5] चीनी के बाद यह विश्व में सबसे अधिक बोली जाने वाली भाषा भी है। विश्व आर्थिक मंच की गणना के अनुसार यह विश्व की दस शक्तिशाली भाषाओं में से एक है।[6]'), ('kn', 'ದ್ರಾವಿಡ ಭಾಷೆಗಳಲ್ಲಿ ಪ್ರಾಮುಖ್ಯವುಳ್ಳ ಭಾಷೆಯೂ ಭಾರತದ ಪುರಾತನವಾದ ಭಾಷೆಗಳಲ್ಲಿ ಒಂದೂ ಆಗಿರುವ ಕನ್ನಡ ಭಾಷೆಯನ್ನು ಅದರ ವಿವಿಧ ರೂಪಗಳಲ್ಲಿ ಸುಮಾರು ೪೫ ದಶಲಕ್ಷ ಜನರು ಆಡು ನುಡಿಯಾಗಿ ಬಳಸುತ್ತಲಿದ್ದಾರೆ. ಕನ್ನಡ ಕರ್ನಾಟಕ ರಾಜ್ಯದ ಆಡಳಿತ ಭಾಷೆ.[೧೧] ಜಗತ್ತಿನಲ್ಲಿ ಅತ್ಯಂತ ಹೆಚ್ಚು ಮಂದಿ ಮಾತನಾಡುವ ಭಾಷೆಯೆಂಬ ನೆಲೆಯಲ್ಲಿ ಇಪ್ಪತೊಂಬತ್ತನೆಯ ಸ್ಥಾನ ಕನ್ನಡಕ್ಕಿದೆ. ೨೦೧೧ರ ಜನಗಣತಿಯ ಪ್ರಕಾರ ಜಗತ್ತಿನಲ್ಲಿ ೬.೪ ಕೋಟಿ ಜನಗಳು ಕನ್ನಡ ಮಾತನಾಡುತ್ತಾರೆ ಎಂದು ತಿಳಿದುಬಂದಿದೆ. ಇವರಲ್ಲಿ ೫.೫ ಕೋಟಿ ಜನಗಳ ಮಾತೃಭಾಷೆ ಕನ್ನಡವಾಗಿದೆ. ಬ್ರಾಹ್ಮಿ ಲಿಪಿಯಿಂದ ರೂಪುಗೊಂಡ ಕನ್ನಡ ಲಿಪಿಯನ್ನು ಉಪಯೋಗಿಸಿ ಕನ್ನಡ ಭಾಷೆಯನ್ನು ಬರೆಯಲಾಗುತ್ತದೆ. ಕನ್ನಡ ಬರಹದ ಮಾದರಿಗಳಿಗೆ ಸಾವಿರದ ಐನೂರು ವರುಷಗಳ ಚರಿತ್ರೆಯಿದೆ. ಕ್ರಿ.ಶ. ಆರನೆಯ ಶತಮಾನದ ಪಶ್ಚಿಮ ಗಂಗ ಸಾಮ್ರಾಜ್ಯದ ಕಾಲದಲ್ಲಿ [೧೨] ಮತ್ತು ಒಂಬತ್ತನೆಯ ಶತಮಾನದ ರಾಷ್ಟ್ರಕೂಟ ಸಾಮ್ರಾಜ್ಯದ ಕಾಲದಲ್ಲಿ ಹಳಗನ್ನಡ ಸಾಹಿತ್ಯ ಅತ್ಯಂತ ಹೆಚ್ಚಿನ ರಾಜಾಶ್ರಯ ಪಡೆಯಿತು.[೧೩][೧೪] ಅದಲ್ಲದೆ ಸಾವಿರ ವರುಷಗಳ ಸಾಹಿತ್ಯ ಪರಂಪರೆ ಕನ್ನಡಕ್ಕಿದೆ.[೧೫]ವಿನೋಬಾ ಭಾವೆ ಕನ್ನಡ ಲಿಪಿಯನ್ನು ಲಿಪಿಗಳ ರಾಣಿಯೆಂದು ಹೊಗಳಿದ್ದಾರೆ.[ಸೂಕ್ತ ಉಲ್ಲೇಖನ ಬೇಕು]'), ('si', 'ශ්‍රී ලංකාවේ ප්‍රධාන ජාතිය වන සිංහල ජනයාගේ මව් බස සිංහල වෙයි. අද වන විට මිලියන 20 කට අධික සිංහල සහ මිලියන 3කට අධික සිංහල නොවන ජනගහනයක් සිංහල භාෂාව භාවිත කරති. සිංහල‍ ඉන්දු-යුරෝපීය භාෂාවල උප ගණයක් වන ඉන්දු-ආර්ය භාෂා ගණයට අයිති වන අතර මාල දිවයින භාවිත කරන දිවෙහි භාෂාව සිංහලයෙන් පැවත එන්නකි. සිංහල ශ්‍රී ලංකාවේ නිල භාෂාවයි .'), ('ta', 'தமிழ் மொழி (Tamil language) தமிழர்களினதும், தமிழ் பேசும் பலரதும் தாய்மொழி ஆகும். தமிழ் திராவிட மொழிக் குடும்பத்தின் முதன்மையான மொழிகளில் ஒன்றும் செம்மொழியும் ஆகும். இந்தியா, இலங்கை, மலேசியா, சிங்கப்பூர் ஆகிய நாடுகளில் அதிக அளவிலும், ஐக்கிய அரபு அமீரகம், தென்னாப்பிரிக்கா, மொரிசியசு, பிஜி, ரீயூனியன், டிரினிடாட் போன்ற நாடுகளில் சிறிய அளவிலும் தமிழ் பேசப்படுகிறது. 1997ஆம் ஆண்டுப் புள்ளி விவரப்படி உலகம் முழுவதிலும் 8 கோடி (80 மில்லியன்) மக்களால் பேசப்படும் தமிழ்[13], ஒரு மொழியைத் தாய்மொழியாகக் கொண்டு பேசும் மக்களின் எண்ணிக்கை அடிப்படையில் பதினெட்டாவது இடத்தில் உள்ளது.[14] இணையத்தில் அதிகம் பயன்படுத்தப்படும் இந்திய மொழிகளில் தமிழ் முதன்மையாக உள்ளதாக 2017 ஆவது ஆண்டில் நடைபெற்ற கூகுள் கணக்கெடுப்பில் தெரிய வந்தது.[15]'), ('te', 'ఆంధ్ర ప్రదేశ్, తెలంగాణ రాష్ట్రాల అధికార భాష తెలుగు. భారత దేశంలో తెలుగు మాతృభాషగా మాట్లాడే 8.7 కోట్ల (2001) జనాభాతో [1] ప్రాంతీయ భాషలలో మొదటి స్థానంలో ఉంది. ప్రపంచంలోని ప్రజలు అత్యధికముగా మాట్లాడే భాషలలో 15 స్థానములోనూ, భారత దేశములో హిందీ, తర్వాత స్థానములోనూ నిలుస్తుంది. పాతవైన ప్రపంచ భాష గణాంకాల (ఎథ్నోలాగ్) ప్రకారం ప్రపంచవ్యాప్తంగా 7.4 కోట్లు మందికి మాతృభాషగా ఉంది.[2] మొదటి భాషగా మాట్లాడతారు. అతి ప్రాచీన దేశ భాషలలో సంస్కృతము తమిళముతో బాటు తెలుగు భాషను 2008 అక్టోబరు 31న భారత ప్రభుత్వము గుర్తించింది.'), ('ur', 'اُردُو لشکری زبان[8] (یا جدید معیاری اردو) برصغیر کی معیاری زبانوں میں سے ایک ہے۔ یہ پاکستان کی قومی اور رابطہ عامہ کی زبان ہے، جبکہ بھارت کی چھے ریاستوں کی دفتری زبان کا درجہ رکھتی ہے۔ آئین ہند کے مطابق اسے 22 دفتری شناخت زبانوں میں شامل کیا جاچکا ہے۔ 2001ء کی مردم شماری کے مطابق اردو کو بطور مادری زبان بھارت میں 5.01% فیصد لوگ بولتے ہیں اور اس لحاظ سے یہ بھارت کی چھٹی بڑی زبان ہے جبکہ پاکستان میں اسے بطور مادری زبان 7.59% فیصد لوگ استعمال کرتے ہیں، یہ پاکستان کی پانچویں بڑی زبان ہے۔ اردو تاریخی طور پر ہندوستان کی مسلم آبادی سے جڑی ہے۔[حوالہ درکار] بعض ذخیرہ الفاظ کے علاوہ یہ زبان معیاری ہندی سے قابل فہم ہے جو اس خطے کی ہندوؤں سے منسوب ہے۔[حوالہ درکار] زبانِ اردو کو پہچان و ترقی اس وقت ملی جب برطانوی دور میں انگریز حکمرانوں نے اسے فارسی کی بجائے انگریزی کے ساتھ شمالی ہندوستان کے علاقوں اور جموں و کشمیر میں اسے سنہ 1846ء اور پنجاب میں سنہ 1849ء میں بطور دفتری زبان نافذ کیا۔ اس کے علاوہ خلیجی، یورپی، ایشیائی اور امریکی علاقوں میں اردو بولنے والوں کی ایک بڑی تعداد آباد ہے جو بنیادی طور پر جنوبی ایشیاء سے کوچ کرنے والے اہلِ اردو ہیں۔ 1999ء کے اعداد وشمار کے مطابق اردو زبان کے مجموعی متکلمین کی تعداد دس کروڑ ساٹھ لاکھ کے لگ بھگ تھی۔ اس لحاظ سے یہ دنیا کی نویں بڑی زبان ہے۔'), ], # fmt: on ) def test_sentencizer_across_scripts(lang, text): nlp = spacy.blank(lang) nlp.add_pipe("sentencizer") doc = nlp(text) assert len(list(doc.sents)) > 1
10,882
69.668831
1,205
py
spaCy
spaCy-master/spacy/tests/pipeline/test_senter.py
import pytest from numpy.testing import assert_equal from spacy import util from spacy.attrs import SENT_START from spacy.lang.en import English from spacy.language import Language from spacy.tests.util import make_tempdir from spacy.training import Example def test_label_types(): nlp = Language() senter = nlp.add_pipe("senter") with pytest.raises(NotImplementedError): senter.add_label("A") SENT_STARTS = [0] * 14 SENT_STARTS[0] = 1 SENT_STARTS[5] = 1 SENT_STARTS[9] = 1 TRAIN_DATA = [ ( "I like green eggs. Eat blue ham. I like purple eggs.", {"sent_starts": SENT_STARTS}, ), ( "She likes purple eggs. They hate ham. You like yellow eggs.", {"sent_starts": SENT_STARTS}, ), ] def test_initialize_examples(): nlp = Language() nlp.add_pipe("senter") train_examples = [] for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) # you shouldn't really call this more than once, but for testing it should be fine nlp.initialize() nlp.initialize(get_examples=lambda: train_examples) with pytest.raises(TypeError): nlp.initialize(get_examples=lambda: None) with pytest.raises(TypeError): nlp.initialize(get_examples=train_examples) def test_overfitting_IO(): # Simple test to try and quickly overfit the senter - ensuring the ML models work correctly nlp = English() train_examples = [] for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) # add some cases where SENT_START == -1 train_examples[0].reference[10].is_sent_start = False train_examples[1].reference[1].is_sent_start = False train_examples[1].reference[11].is_sent_start = False nlp.add_pipe("senter") optimizer = nlp.initialize() for i in range(200): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) assert losses["senter"] < 0.001 # test the trained model test_text = TRAIN_DATA[0][0] doc = nlp(test_text) gold_sent_starts = [0] * 14 gold_sent_starts[0] = 1 gold_sent_starts[5] = 1 gold_sent_starts[9] = 1 assert [int(t.is_sent_start) for t in doc] == gold_sent_starts # Also test the results are still the same after IO with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) doc2 = nlp2(test_text) assert [int(t.is_sent_start) for t in doc2] == gold_sent_starts # Make sure that running pipe twice, or comparing to call, always amounts to the same predictions texts = [ "Just a sentence.", "Then one more sentence about London.", "Here is another one.", "I like London.", ] batch_deps_1 = [doc.to_array([SENT_START]) for doc in nlp.pipe(texts)] batch_deps_2 = [doc.to_array([SENT_START]) for doc in nlp.pipe(texts)] no_batch_deps = [ doc.to_array([SENT_START]) for doc in [nlp(text) for text in texts] ] assert_equal(batch_deps_1, batch_deps_2) assert_equal(batch_deps_1, no_batch_deps) # test internal pipe labels vs. Language.pipe_labels with hidden labels assert nlp.get_pipe("senter").labels == ("I", "S") assert "senter" not in nlp.pipe_labels
3,313
30.865385
101
py
spaCy
spaCy-master/spacy/tests/pipeline/test_span_finder.py
import pytest from thinc.api import Config from spacy import util from spacy.lang.en import English from spacy.language import Language from spacy.pipeline.span_finder import span_finder_default_config from spacy.tokens import Doc from spacy.training import Example from spacy.util import fix_random_seed, make_tempdir, registry SPANS_KEY = "pytest" TRAIN_DATA = [ ("Who is Shaka Khan?", {"spans": {SPANS_KEY: [(7, 17)]}}), ( "I like London and Berlin.", {"spans": {SPANS_KEY: [(7, 13), (18, 24)]}}, ), ] TRAIN_DATA_OVERLAPPING = [ ("Who is Shaka Khan?", {"spans": {SPANS_KEY: [(7, 17)]}}), ( "I like London and Berlin", {"spans": {SPANS_KEY: [(7, 13), (18, 24), (7, 24)]}}, ), ("", {"spans": {SPANS_KEY: []}}), ] def make_examples(nlp, data=TRAIN_DATA): train_examples = [] for t in data: eg = Example.from_dict(nlp.make_doc(t[0]), t[1]) train_examples.append(eg) return train_examples @pytest.mark.parametrize( "tokens_predicted, tokens_reference, reference_truths", [ ( ["Mon", ".", "-", "June", "16"], ["Mon.", "-", "June", "16"], [(0, 0), (0, 0), (0, 0), (1, 1), (0, 0)], ), ( ["Mon.", "-", "J", "une", "16"], ["Mon.", "-", "June", "16"], [(0, 0), (0, 0), (1, 0), (0, 1), (0, 0)], ), ( ["Mon", ".", "-", "June", "16"], ["Mon.", "-", "June", "1", "6"], [(0, 0), (0, 0), (0, 0), (1, 1), (0, 0)], ), ( ["Mon.", "-J", "un", "e 16"], ["Mon.", "-", "June", "16"], [(0, 0), (0, 0), (0, 0), (0, 0)], ), pytest.param( ["Mon.-June", "16"], ["Mon.", "-", "June", "16"], [(0, 1), (0, 0)], ), pytest.param( ["Mon.-", "June", "16"], ["Mon.", "-", "J", "une", "16"], [(0, 0), (1, 1), (0, 0)], ), pytest.param( ["Mon.-", "June 16"], ["Mon.", "-", "June", "16"], [(0, 0), (1, 0)], ), ], ) def test_loss_alignment_example(tokens_predicted, tokens_reference, reference_truths): nlp = Language() predicted = Doc( nlp.vocab, words=tokens_predicted, spaces=[False] * len(tokens_predicted) ) reference = Doc( nlp.vocab, words=tokens_reference, spaces=[False] * len(tokens_reference) ) example = Example(predicted, reference) example.reference.spans[SPANS_KEY] = [example.reference.char_span(5, 9)] span_finder = nlp.add_pipe("span_finder", config={"spans_key": SPANS_KEY}) nlp.initialize() ops = span_finder.model.ops if predicted.text != reference.text: with pytest.raises( ValueError, match="must match between reference and predicted" ): span_finder._get_aligned_truth_scores([example], ops) return truth_scores, masks = span_finder._get_aligned_truth_scores([example], ops) assert len(truth_scores) == len(tokens_predicted) ops.xp.testing.assert_array_equal(truth_scores, ops.xp.asarray(reference_truths)) def test_span_finder_model(): nlp = Language() docs = [nlp("This is an example."), nlp("This is the second example.")] docs[0].spans[SPANS_KEY] = [docs[0][3:4]] docs[1].spans[SPANS_KEY] = [docs[1][3:5]] total_tokens = 0 for doc in docs: total_tokens += len(doc) config = Config().from_str(span_finder_default_config).interpolate() model = registry.resolve(config)["model"] model.initialize(X=docs) predictions = model.predict(docs) assert len(predictions) == total_tokens assert len(predictions[0]) == 2 def test_span_finder_component(): nlp = Language() docs = [nlp("This is an example."), nlp("This is the second example.")] docs[0].spans[SPANS_KEY] = [docs[0][3:4]] docs[1].spans[SPANS_KEY] = [docs[1][3:5]] span_finder = nlp.add_pipe("span_finder", config={"spans_key": SPANS_KEY}) nlp.initialize() docs = list(span_finder.pipe(docs)) assert SPANS_KEY in docs[0].spans @pytest.mark.parametrize( "min_length, max_length, span_count", [(0, 0, 0), (None, None, 8), (2, None, 6), (None, 1, 2), (2, 3, 2)], ) def test_set_annotations_span_lengths(min_length, max_length, span_count): nlp = Language() doc = nlp("Me and Jenny goes together like peas and carrots.") if min_length == 0 and max_length == 0: with pytest.raises(ValueError, match="Both 'min_length' and 'max_length'"): span_finder = nlp.add_pipe( "span_finder", config={ "max_length": max_length, "min_length": min_length, "spans_key": SPANS_KEY, }, ) return span_finder = nlp.add_pipe( "span_finder", config={ "max_length": max_length, "min_length": min_length, "spans_key": SPANS_KEY, }, ) nlp.initialize() # Starts [Me, Jenny, peas] # Ends [Jenny, peas, carrots] scores = [ (1, 0), (0, 0), (1, 1), (0, 0), (0, 0), (0, 0), (1, 1), (0, 0), (0, 1), (0, 0), ] span_finder.set_annotations([doc], scores) assert doc.spans[SPANS_KEY] assert len(doc.spans[SPANS_KEY]) == span_count # Assert below will fail when max_length is set to 0 if max_length is None: max_length = float("inf") if min_length is None: min_length = 1 assert all(min_length <= len(span) <= max_length for span in doc.spans[SPANS_KEY]) def test_overfitting_IO(): # Simple test to try and quickly overfit the span_finder component - ensuring the ML models work correctly fix_random_seed(0) nlp = English() span_finder = nlp.add_pipe("span_finder", config={"spans_key": SPANS_KEY}) train_examples = make_examples(nlp) optimizer = nlp.initialize(get_examples=lambda: train_examples) assert span_finder.model.get_dim("nO") == 2 for i in range(50): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) assert losses["span_finder"] < 0.001 # test the trained model test_text = "I like London and Berlin" doc = nlp(test_text) spans = doc.spans[SPANS_KEY] assert len(spans) == 3 assert set([span.text for span in spans]) == { "London", "Berlin", "London and Berlin", } # Also test the results are still the same after IO with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) doc2 = nlp2(test_text) spans2 = doc2.spans[SPANS_KEY] assert len(spans2) == 3 assert set([span.text for span in spans2]) == { "London", "Berlin", "London and Berlin", } # Test scoring scores = nlp.evaluate(train_examples) assert f"spans_{SPANS_KEY}_f" in scores # It's not perfect 1.0 F1 because it's designed to overgenerate for now. assert scores[f"spans_{SPANS_KEY}_p"] == 0.75 assert scores[f"spans_{SPANS_KEY}_r"] == 1.0 # also test that the spancat works for just a single entity in a sentence doc = nlp("London") assert len(doc.spans[SPANS_KEY]) == 1
7,429
29.829876
110
py
spaCy
spaCy-master/spacy/tests/pipeline/test_span_ruler.py
import pytest from thinc.api import NumpyOps, get_current_ops import spacy from spacy import registry from spacy.errors import MatchPatternError from spacy.tests.util import make_tempdir from spacy.tokens import Span from spacy.training import Example @pytest.fixture @registry.misc("span_ruler_patterns") def patterns(): return [ {"label": "HELLO", "pattern": "hello world", "id": "hello1"}, {"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]}, {"label": "HELLO", "pattern": [{"ORTH": "HELLO"}], "id": "hello2"}, {"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]}, {"label": "TECH_ORG", "pattern": "Apple"}, {"label": "TECH_ORG", "pattern": "Microsoft"}, ] @pytest.fixture def overlapping_patterns(): return [ {"label": "FOOBAR", "pattern": "foo bar"}, {"label": "BARBAZ", "pattern": "bar baz"}, ] @pytest.fixture def person_org_patterns(): return [ {"label": "PERSON", "pattern": "Dina"}, {"label": "ORG", "pattern": "ACME"}, {"label": "ORG", "pattern": "ACM"}, ] @pytest.fixture def person_org_date_patterns(person_org_patterns): return person_org_patterns + [{"label": "DATE", "pattern": "June 14th"}] def test_span_ruler_add_empty(patterns): """Test that patterns don't get added excessively.""" nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler", config={"validate": True}) ruler.add_patterns(patterns) pattern_count = sum(len(mm) for mm in ruler.matcher._patterns.values()) assert pattern_count > 0 ruler.add_patterns([]) after_count = sum(len(mm) for mm in ruler.matcher._patterns.values()) assert after_count == pattern_count def test_span_ruler_init(patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) assert len(ruler) == len(patterns) assert len(ruler.labels) == 4 assert "HELLO" in ruler assert "BYE" in ruler doc = nlp("hello world bye bye") assert len(doc.spans["ruler"]) == 2 assert doc.spans["ruler"][0].label_ == "HELLO" assert doc.spans["ruler"][0].id_ == "hello1" assert doc.spans["ruler"][1].label_ == "BYE" assert doc.spans["ruler"][1].id_ == "" def test_span_ruler_no_patterns_warns(): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") assert len(ruler) == 0 assert len(ruler.labels) == 0 assert nlp.pipe_names == ["span_ruler"] with pytest.warns(UserWarning): doc = nlp("hello world bye bye") assert len(doc.spans["ruler"]) == 0 def test_span_ruler_init_patterns(patterns): # initialize with patterns nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") assert len(ruler.labels) == 0 ruler.initialize(lambda: [], patterns=patterns) assert len(ruler.labels) == 4 doc = nlp("hello world bye bye") assert doc.spans["ruler"][0].label_ == "HELLO" assert doc.spans["ruler"][1].label_ == "BYE" nlp.remove_pipe("span_ruler") # initialize with patterns from misc registry nlp.config["initialize"]["components"]["span_ruler"] = { "patterns": {"@misc": "span_ruler_patterns"} } ruler = nlp.add_pipe("span_ruler") assert len(ruler.labels) == 0 nlp.initialize() assert len(ruler.labels) == 4 doc = nlp("hello world bye bye") assert doc.spans["ruler"][0].label_ == "HELLO" assert doc.spans["ruler"][1].label_ == "BYE" def test_span_ruler_init_clear(patterns): """Test that initialization clears patterns.""" nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) assert len(ruler.labels) == 4 ruler.initialize(lambda: []) assert len(ruler.labels) == 0 def test_span_ruler_clear(patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) assert len(ruler.labels) == 4 doc = nlp("hello world") assert len(doc.spans["ruler"]) == 1 ruler.clear() assert len(ruler.labels) == 0 with pytest.warns(UserWarning): doc = nlp("hello world") assert len(doc.spans["ruler"]) == 0 def test_span_ruler_existing(patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler", config={"overwrite": False}) ruler.add_patterns(patterns) doc = nlp.make_doc("OH HELLO WORLD bye bye") doc.spans["ruler"] = [doc[0:2]] doc = nlp(doc) assert len(doc.spans["ruler"]) == 3 assert doc.spans["ruler"][0] == doc[0:2] assert doc.spans["ruler"][1].label_ == "HELLO" assert doc.spans["ruler"][1].id_ == "hello2" assert doc.spans["ruler"][2].label_ == "BYE" assert doc.spans["ruler"][2].id_ == "" def test_span_ruler_existing_overwrite(patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler", config={"overwrite": True}) ruler.add_patterns(patterns) doc = nlp.make_doc("OH HELLO WORLD bye bye") doc.spans["ruler"] = [doc[0:2]] doc = nlp(doc) assert len(doc.spans["ruler"]) == 2 assert doc.spans["ruler"][0].label_ == "HELLO" assert doc.spans["ruler"][0].text == "HELLO" assert doc.spans["ruler"][1].label_ == "BYE" def test_span_ruler_serialize_bytes(patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) assert len(ruler) == len(patterns) assert len(ruler.labels) == 4 ruler_bytes = ruler.to_bytes() new_nlp = spacy.blank("xx") new_ruler = new_nlp.add_pipe("span_ruler") assert len(new_ruler) == 0 assert len(new_ruler.labels) == 0 new_ruler = new_ruler.from_bytes(ruler_bytes) assert len(new_ruler) == len(patterns) assert len(new_ruler.labels) == 4 assert len(new_ruler.patterns) == len(ruler.patterns) for pattern in ruler.patterns: assert pattern in new_ruler.patterns assert sorted(new_ruler.labels) == sorted(ruler.labels) def test_span_ruler_validate(): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") validated_ruler = nlp.add_pipe( "span_ruler", name="validated_span_ruler", config={"validate": True} ) valid_pattern = {"label": "HELLO", "pattern": [{"LOWER": "HELLO"}]} invalid_pattern = {"label": "HELLO", "pattern": [{"ASDF": "HELLO"}]} # invalid pattern raises error without validate with pytest.raises(ValueError): ruler.add_patterns([invalid_pattern]) # valid pattern is added without errors with validate validated_ruler.add_patterns([valid_pattern]) # invalid pattern raises error with validate with pytest.raises(MatchPatternError): validated_ruler.add_patterns([invalid_pattern]) def test_span_ruler_properties(patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler", config={"overwrite": True}) ruler.add_patterns(patterns) assert sorted(ruler.labels) == sorted(set([p["label"] for p in patterns])) def test_span_ruler_overlapping_spans(overlapping_patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(overlapping_patterns) doc = ruler(nlp.make_doc("foo bar baz")) assert len(doc.spans["ruler"]) == 2 assert doc.spans["ruler"][0].label_ == "FOOBAR" assert doc.spans["ruler"][1].label_ == "BARBAZ" def test_span_ruler_scorer(overlapping_patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(overlapping_patterns) text = "foo bar baz" pred_doc = ruler(nlp.make_doc(text)) assert len(pred_doc.spans["ruler"]) == 2 assert pred_doc.spans["ruler"][0].label_ == "FOOBAR" assert pred_doc.spans["ruler"][1].label_ == "BARBAZ" ref_doc = nlp.make_doc(text) ref_doc.spans["ruler"] = [Span(ref_doc, 0, 2, label="FOOBAR")] scores = nlp.evaluate([Example(pred_doc, ref_doc)]) assert scores["spans_ruler_p"] == 0.5 assert scores["spans_ruler_r"] == 1.0 @pytest.mark.parametrize("n_process", [1, 2]) def test_span_ruler_multiprocessing(n_process): if isinstance(get_current_ops, NumpyOps) or n_process < 2: texts = ["I enjoy eating Pizza Hut pizza."] patterns = [{"label": "FASTFOOD", "pattern": "Pizza Hut"}] nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) for doc in nlp.pipe(texts, n_process=2): for ent in doc.spans["ruler"]: assert ent.label_ == "FASTFOOD" def test_span_ruler_serialize_dir(patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) with make_tempdir() as d: ruler.to_disk(d / "test_ruler") ruler.from_disk(d / "test_ruler") # read from an existing directory with pytest.raises(ValueError): ruler.from_disk(d / "non_existing_dir") # read from a bad directory def test_span_ruler_remove_basic(person_org_patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(person_org_patterns) doc = ruler(nlp.make_doc("Dina went to school")) assert len(ruler.patterns) == 3 assert len(doc.spans["ruler"]) == 1 assert doc.spans["ruler"][0].label_ == "PERSON" assert doc.spans["ruler"][0].text == "Dina" ruler.remove("PERSON") doc = ruler(nlp.make_doc("Dina went to school")) assert len(doc.spans["ruler"]) == 0 assert len(ruler.patterns) == 2 def test_span_ruler_remove_nonexisting_pattern(person_org_patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(person_org_patterns) assert len(ruler.patterns) == 3 with pytest.raises(ValueError): ruler.remove("NE") with pytest.raises(ValueError): ruler.remove_by_id("NE") def test_span_ruler_remove_several_patterns(person_org_patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(person_org_patterns) doc = ruler(nlp.make_doc("Dina founded the company ACME.")) assert len(ruler.patterns) == 3 assert len(doc.spans["ruler"]) == 2 assert doc.spans["ruler"][0].label_ == "PERSON" assert doc.spans["ruler"][0].text == "Dina" assert doc.spans["ruler"][1].label_ == "ORG" assert doc.spans["ruler"][1].text == "ACME" ruler.remove("PERSON") doc = ruler(nlp.make_doc("Dina founded the company ACME")) assert len(ruler.patterns) == 2 assert len(doc.spans["ruler"]) == 1 assert doc.spans["ruler"][0].label_ == "ORG" assert doc.spans["ruler"][0].text == "ACME" ruler.remove("ORG") with pytest.warns(UserWarning): doc = ruler(nlp.make_doc("Dina founded the company ACME")) assert len(ruler.patterns) == 0 assert len(doc.spans["ruler"]) == 0 def test_span_ruler_remove_patterns_in_a_row(person_org_date_patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(person_org_date_patterns) doc = ruler(nlp.make_doc("Dina founded the company ACME on June 14th")) assert len(doc.spans["ruler"]) == 3 assert doc.spans["ruler"][0].label_ == "PERSON" assert doc.spans["ruler"][0].text == "Dina" assert doc.spans["ruler"][1].label_ == "ORG" assert doc.spans["ruler"][1].text == "ACME" assert doc.spans["ruler"][2].label_ == "DATE" assert doc.spans["ruler"][2].text == "June 14th" ruler.remove("ORG") ruler.remove("DATE") doc = ruler(nlp.make_doc("Dina went to school")) assert len(doc.spans["ruler"]) == 1 def test_span_ruler_remove_all_patterns(person_org_date_patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(person_org_date_patterns) assert len(ruler.patterns) == 4 ruler.remove("PERSON") assert len(ruler.patterns) == 3 ruler.remove("ORG") assert len(ruler.patterns) == 1 ruler.remove("DATE") assert len(ruler.patterns) == 0 with pytest.warns(UserWarning): doc = ruler(nlp.make_doc("Dina founded the company ACME on June 14th")) assert len(doc.spans["ruler"]) == 0 def test_span_ruler_remove_and_add(): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler") patterns1 = [{"label": "DATE1", "pattern": "last time"}] ruler.add_patterns(patterns1) doc = ruler( nlp.make_doc("I saw him last time we met, this time he brought some flowers") ) assert len(ruler.patterns) == 1 assert len(doc.spans["ruler"]) == 1 assert doc.spans["ruler"][0].label_ == "DATE1" assert doc.spans["ruler"][0].text == "last time" patterns2 = [{"label": "DATE2", "pattern": "this time"}] ruler.add_patterns(patterns2) doc = ruler( nlp.make_doc("I saw him last time we met, this time he brought some flowers") ) assert len(ruler.patterns) == 2 assert len(doc.spans["ruler"]) == 2 assert doc.spans["ruler"][0].label_ == "DATE1" assert doc.spans["ruler"][0].text == "last time" assert doc.spans["ruler"][1].label_ == "DATE2" assert doc.spans["ruler"][1].text == "this time" ruler.remove("DATE1") doc = ruler( nlp.make_doc("I saw him last time we met, this time he brought some flowers") ) assert len(ruler.patterns) == 1 assert len(doc.spans["ruler"]) == 1 assert doc.spans["ruler"][0].label_ == "DATE2" assert doc.spans["ruler"][0].text == "this time" ruler.add_patterns(patterns1) doc = ruler( nlp.make_doc("I saw him last time we met, this time he brought some flowers") ) assert len(ruler.patterns) == 2 assert len(doc.spans["ruler"]) == 2 patterns3 = [{"label": "DATE3", "pattern": "another time"}] ruler.add_patterns(patterns3) doc = ruler( nlp.make_doc( "I saw him last time we met, this time he brought some flowers, another time some chocolate." ) ) assert len(ruler.patterns) == 3 assert len(doc.spans["ruler"]) == 3 ruler.remove("DATE3") doc = ruler( nlp.make_doc( "I saw him last time we met, this time he brought some flowers, another time some chocolate." ) ) assert len(ruler.patterns) == 2 assert len(doc.spans["ruler"]) == 2 def test_span_ruler_spans_filter(overlapping_patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe( "span_ruler", config={"spans_filter": {"@misc": "spacy.first_longest_spans_filter.v1"}}, ) ruler.add_patterns(overlapping_patterns) doc = ruler(nlp.make_doc("foo bar baz")) assert len(doc.spans["ruler"]) == 1 assert doc.spans["ruler"][0].label_ == "FOOBAR" def test_span_ruler_ents_default_filter(overlapping_patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe("span_ruler", config={"annotate_ents": True}) ruler.add_patterns(overlapping_patterns) doc = ruler(nlp.make_doc("foo bar baz")) assert len(doc.ents) == 1 assert doc.ents[0].label_ == "FOOBAR" def test_span_ruler_ents_overwrite_filter(overlapping_patterns): nlp = spacy.blank("xx") ruler = nlp.add_pipe( "span_ruler", config={ "annotate_ents": True, "overwrite": False, "ents_filter": {"@misc": "spacy.prioritize_new_ents_filter.v1"}, }, ) ruler.add_patterns(overlapping_patterns) # overlapping ents are clobbered, non-overlapping ents are preserved doc = nlp.make_doc("foo bar baz a b c") doc.ents = [Span(doc, 1, 3, label="BARBAZ"), Span(doc, 3, 6, label="ABC")] doc = ruler(doc) assert len(doc.ents) == 2 assert doc.ents[0].label_ == "FOOBAR" assert doc.ents[1].label_ == "ABC" def test_span_ruler_ents_bad_filter(overlapping_patterns): @registry.misc("test_pass_through_filter") def make_pass_through_filter(): def pass_through_filter(spans1, spans2): return spans1 + spans2 return pass_through_filter nlp = spacy.blank("xx") ruler = nlp.add_pipe( "span_ruler", config={ "annotate_ents": True, "ents_filter": {"@misc": "test_pass_through_filter"}, }, ) ruler.add_patterns(overlapping_patterns) with pytest.raises(ValueError): ruler(nlp.make_doc("foo bar baz"))
16,220
33.883871
105
py
spaCy
spaCy-master/spacy/tests/pipeline/test_spancat.py
import numpy import pytest from numpy.testing import assert_almost_equal, assert_array_equal from thinc.api import NumpyOps, Ragged, get_current_ops from spacy import util from spacy.lang.en import English from spacy.language import Language from spacy.tokens import SpanGroup from spacy.tokens._dict_proxies import SpanGroups from spacy.training import Example from spacy.util import fix_random_seed, make_tempdir, registry OPS = get_current_ops() SPAN_KEY = "labeled_spans" SPANCAT_COMPONENTS = ["spancat", "spancat_singlelabel"] TRAIN_DATA = [ ("Who is Shaka Khan?", {"spans": {SPAN_KEY: [(7, 17, "PERSON")]}}), ( "I like London and Berlin.", {"spans": {SPAN_KEY: [(7, 13, "LOC"), (18, 24, "LOC")]}}, ), ] TRAIN_DATA_OVERLAPPING = [ ("Who is Shaka Khan?", {"spans": {SPAN_KEY: [(7, 17, "PERSON")]}}), ( "I like London and Berlin", {"spans": {SPAN_KEY: [(7, 13, "LOC"), (18, 24, "LOC"), (7, 24, "DOUBLE_LOC")]}}, ), ("", {"spans": {SPAN_KEY: []}}), ] def make_examples(nlp, data=TRAIN_DATA): train_examples = [] for t in data: eg = Example.from_dict(nlp.make_doc(t[0]), t[1]) train_examples.append(eg) return train_examples @pytest.mark.parametrize("name", SPANCAT_COMPONENTS) def test_no_label(name): nlp = Language() nlp.add_pipe(name, config={"spans_key": SPAN_KEY}) with pytest.raises(ValueError): nlp.initialize() @pytest.mark.parametrize("name", SPANCAT_COMPONENTS) def test_no_resize(name): nlp = Language() spancat = nlp.add_pipe(name, config={"spans_key": SPAN_KEY}) spancat.add_label("Thing") spancat.add_label("Phrase") assert spancat.labels == ("Thing", "Phrase") nlp.initialize() assert spancat.model.get_dim("nO") == spancat._n_labels # this throws an error because the spancat can't be resized after initialization with pytest.raises(ValueError): spancat.add_label("Stuff") @pytest.mark.parametrize("name", SPANCAT_COMPONENTS) def test_implicit_labels(name): nlp = Language() spancat = nlp.add_pipe(name, config={"spans_key": SPAN_KEY}) assert len(spancat.labels) == 0 train_examples = make_examples(nlp) nlp.initialize(get_examples=lambda: train_examples) assert spancat.labels == ("PERSON", "LOC") @pytest.mark.parametrize("name", SPANCAT_COMPONENTS) def test_explicit_labels(name): nlp = Language() spancat = nlp.add_pipe(name, config={"spans_key": SPAN_KEY}) assert len(spancat.labels) == 0 spancat.add_label("PERSON") spancat.add_label("LOC") nlp.initialize() assert spancat.labels == ("PERSON", "LOC") # TODO figure out why this is flaky @pytest.mark.skip(reason="Test is unreliable for unknown reason") def test_doc_gc(): # If the Doc object is garbage collected, the spans won't be functional afterwards nlp = Language() spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY}) spancat.add_label("PERSON") nlp.initialize() texts = [ "Just a sentence.", "I like London and Berlin", "I like Berlin", "I eat ham.", ] all_spans = [doc.spans for doc in nlp.pipe(texts)] for text, spangroups in zip(texts, all_spans): assert isinstance(spangroups, SpanGroups) for key, spangroup in spangroups.items(): assert isinstance(spangroup, SpanGroup) # XXX This fails with length 0 sometimes assert len(spangroup) > 0 with pytest.raises(RuntimeError): spangroup[0] @pytest.mark.parametrize( "max_positive,nr_results", [(None, 4), (1, 2), (2, 3), (3, 4), (4, 4)] ) def test_make_spangroup_multilabel(max_positive, nr_results): fix_random_seed(0) nlp = Language() spancat = nlp.add_pipe( "spancat", config={"spans_key": SPAN_KEY, "threshold": 0.5, "max_positive": max_positive}, ) doc = nlp.make_doc("Greater London") ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1, 2]) indices = ngram_suggester([doc])[0].dataXd assert_array_equal(OPS.to_numpy(indices), numpy.asarray([[0, 1], [1, 2], [0, 2]])) labels = ["Thing", "City", "Person", "GreatCity"] for label in labels: spancat.add_label(label) scores = numpy.asarray( [[0.2, 0.4, 0.3, 0.1], [0.1, 0.6, 0.2, 0.4], [0.8, 0.7, 0.3, 0.9]], dtype="f" ) spangroup = spancat._make_span_group_multilabel(doc, indices, scores) assert len(spangroup) == nr_results # first span is always the second token "London" assert spangroup[0].text == "London" assert spangroup[0].label_ == "City" assert_almost_equal(0.6, spangroup.attrs["scores"][0], 5) # second span depends on the number of positives that were allowed assert spangroup[1].text == "Greater London" if max_positive == 1: assert spangroup[1].label_ == "GreatCity" assert_almost_equal(0.9, spangroup.attrs["scores"][1], 5) else: assert spangroup[1].label_ == "Thing" assert_almost_equal(0.8, spangroup.attrs["scores"][1], 5) if nr_results > 2: assert spangroup[2].text == "Greater London" if max_positive == 2: assert spangroup[2].label_ == "GreatCity" assert_almost_equal(0.9, spangroup.attrs["scores"][2], 5) else: assert spangroup[2].label_ == "City" assert_almost_equal(0.7, spangroup.attrs["scores"][2], 5) assert spangroup[-1].text == "Greater London" assert spangroup[-1].label_ == "GreatCity" assert_almost_equal(0.9, spangroup.attrs["scores"][-1], 5) @pytest.mark.parametrize( "threshold,allow_overlap,nr_results", [(0.05, True, 3), (0.05, False, 1), (0.5, True, 2), (0.5, False, 1)], ) def test_make_spangroup_singlelabel(threshold, allow_overlap, nr_results): fix_random_seed(0) nlp = Language() spancat = nlp.add_pipe( "spancat", config={ "spans_key": SPAN_KEY, "threshold": threshold, "max_positive": 1, }, ) doc = nlp.make_doc("Greater London") ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1, 2]) indices = ngram_suggester([doc])[0].dataXd assert_array_equal(OPS.to_numpy(indices), numpy.asarray([[0, 1], [1, 2], [0, 2]])) labels = ["Thing", "City", "Person", "GreatCity"] for label in labels: spancat.add_label(label) scores = numpy.asarray( [[0.2, 0.4, 0.3, 0.1], [0.1, 0.6, 0.2, 0.4], [0.8, 0.7, 0.3, 0.9]], dtype="f" ) spangroup = spancat._make_span_group_singlelabel( doc, indices, scores, allow_overlap ) if threshold > 0.4: if allow_overlap: assert spangroup[0].text == "London" assert spangroup[0].label_ == "City" assert_almost_equal(0.6, spangroup.attrs["scores"][0], 5) assert spangroup[1].text == "Greater London" assert spangroup[1].label_ == "GreatCity" assert spangroup.attrs["scores"][1] == 0.9 assert_almost_equal(0.9, spangroup.attrs["scores"][1], 5) else: assert spangroup[0].text == "Greater London" assert spangroup[0].label_ == "GreatCity" assert spangroup.attrs["scores"][0] == 0.9 else: if allow_overlap: assert spangroup[0].text == "Greater" assert spangroup[0].label_ == "City" assert spangroup[1].text == "London" assert spangroup[1].label_ == "City" assert spangroup[2].text == "Greater London" assert spangroup[2].label_ == "GreatCity" else: assert spangroup[0].text == "Greater London" def test_make_spangroup_negative_label(): fix_random_seed(0) nlp_single = Language() nlp_multi = Language() spancat_single = nlp_single.add_pipe( "spancat", config={ "spans_key": SPAN_KEY, "threshold": 0.1, "max_positive": 1, }, ) spancat_multi = nlp_multi.add_pipe( "spancat", config={ "spans_key": SPAN_KEY, "threshold": 0.1, "max_positive": 2, }, ) spancat_single.add_negative_label = True spancat_multi.add_negative_label = True doc = nlp_single.make_doc("Greater London") labels = ["Thing", "City", "Person", "GreatCity"] for label in labels: spancat_multi.add_label(label) spancat_single.add_label(label) ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1, 2]) indices = ngram_suggester([doc])[0].dataXd assert_array_equal(OPS.to_numpy(indices), numpy.asarray([[0, 1], [1, 2], [0, 2]])) scores = numpy.asarray( [ [0.2, 0.4, 0.3, 0.1, 0.1], [0.1, 0.6, 0.2, 0.4, 0.9], [0.8, 0.7, 0.3, 0.9, 0.1], ], dtype="f", ) spangroup_multi = spancat_multi._make_span_group_multilabel(doc, indices, scores) spangroup_single = spancat_single._make_span_group_singlelabel(doc, indices, scores) assert len(spangroup_single) == 2 assert spangroup_single[0].text == "Greater" assert spangroup_single[0].label_ == "City" assert_almost_equal(0.4, spangroup_single.attrs["scores"][0], 5) assert spangroup_single[1].text == "Greater London" assert spangroup_single[1].label_ == "GreatCity" assert spangroup_single.attrs["scores"][1] == 0.9 assert_almost_equal(0.9, spangroup_single.attrs["scores"][1], 5) assert len(spangroup_multi) == 6 assert spangroup_multi[0].text == "Greater" assert spangroup_multi[0].label_ == "City" assert_almost_equal(0.4, spangroup_multi.attrs["scores"][0], 5) assert spangroup_multi[1].text == "Greater" assert spangroup_multi[1].label_ == "Person" assert_almost_equal(0.3, spangroup_multi.attrs["scores"][1], 5) assert spangroup_multi[2].text == "London" assert spangroup_multi[2].label_ == "City" assert_almost_equal(0.6, spangroup_multi.attrs["scores"][2], 5) assert spangroup_multi[3].text == "London" assert spangroup_multi[3].label_ == "GreatCity" assert_almost_equal(0.4, spangroup_multi.attrs["scores"][3], 5) assert spangroup_multi[4].text == "Greater London" assert spangroup_multi[4].label_ == "Thing" assert spangroup_multi[4].text == "Greater London" assert_almost_equal(0.8, spangroup_multi.attrs["scores"][4], 5) assert spangroup_multi[5].text == "Greater London" assert spangroup_multi[5].label_ == "GreatCity" assert_almost_equal(0.9, spangroup_multi.attrs["scores"][5], 5) def test_ngram_suggester(en_tokenizer): # test different n-gram lengths for size in [1, 2, 3]: ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[size]) docs = [ en_tokenizer(text) for text in [ "a", "a b", "a b c", "a b c d", "a b c d e", "a " * 100, ] ] ngrams = ngram_suggester(docs) # span sizes are correct for s in ngrams.data: assert s[1] - s[0] == size # spans are within docs offset = 0 for i, doc in enumerate(docs): spans = ngrams.dataXd[offset : offset + ngrams.lengths[i]] spans_set = set() for span in spans: assert 0 <= span[0] < len(doc) assert 0 < span[1] <= len(doc) spans_set.add((int(span[0]), int(span[1]))) # spans are unique assert spans.shape[0] == len(spans_set) offset += ngrams.lengths[i] # the number of spans is correct assert_array_equal( OPS.to_numpy(ngrams.lengths), [max(0, len(doc) - (size - 1)) for doc in docs], ) # test 1-3-gram suggestions ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1, 2, 3]) docs = [ en_tokenizer(text) for text in ["a", "a b", "a b c", "a b c d", "a b c d e"] ] ngrams = ngram_suggester(docs) assert_array_equal(OPS.to_numpy(ngrams.lengths), [1, 3, 6, 9, 12]) assert_array_equal( OPS.to_numpy(ngrams.data), [ # doc 0 [0, 1], # doc 1 [0, 1], [1, 2], [0, 2], # doc 2 [0, 1], [1, 2], [2, 3], [0, 2], [1, 3], [0, 3], # doc 3 [0, 1], [1, 2], [2, 3], [3, 4], [0, 2], [1, 3], [2, 4], [0, 3], [1, 4], # doc 4 [0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [0, 2], [1, 3], [2, 4], [3, 5], [0, 3], [1, 4], [2, 5], ], ) # test some empty docs ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1]) docs = [en_tokenizer(text) for text in ["", "a", ""]] ngrams = ngram_suggester(docs) assert_array_equal(OPS.to_numpy(ngrams.lengths), [len(doc) for doc in docs]) # test all empty docs ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1]) docs = [en_tokenizer(text) for text in ["", "", ""]] ngrams = ngram_suggester(docs) assert_array_equal(OPS.to_numpy(ngrams.lengths), [len(doc) for doc in docs]) def test_ngram_sizes(en_tokenizer): # test that the range suggester works well size_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1, 2, 3]) suggester_factory = registry.misc.get("spacy.ngram_range_suggester.v1") range_suggester = suggester_factory(min_size=1, max_size=3) docs = [ en_tokenizer(text) for text in ["a", "a b", "a b c", "a b c d", "a b c d e"] ] ngrams_1 = size_suggester(docs) ngrams_2 = range_suggester(docs) assert_array_equal(OPS.to_numpy(ngrams_1.lengths), [1, 3, 6, 9, 12]) assert_array_equal(OPS.to_numpy(ngrams_1.lengths), OPS.to_numpy(ngrams_2.lengths)) assert_array_equal(OPS.to_numpy(ngrams_1.data), OPS.to_numpy(ngrams_2.data)) # one more variation suggester_factory = registry.misc.get("spacy.ngram_range_suggester.v1") range_suggester = suggester_factory(min_size=2, max_size=4) ngrams_3 = range_suggester(docs) assert_array_equal(OPS.to_numpy(ngrams_3.lengths), [0, 1, 3, 6, 9]) def test_preset_spans_suggester(): nlp = Language() docs = [nlp("This is an example."), nlp("This is the second example.")] docs[0].spans[SPAN_KEY] = [docs[0][3:4]] docs[1].spans[SPAN_KEY] = [docs[1][0:4], docs[1][3:5]] suggester = registry.misc.get("spacy.preset_spans_suggester.v1")(spans_key=SPAN_KEY) candidates = suggester(docs) assert type(candidates) == Ragged assert len(candidates) == 2 assert list(candidates.dataXd[0]) == [3, 4] assert list(candidates.dataXd[1]) == [0, 4] assert list(candidates.dataXd[2]) == [3, 5] assert list(candidates.lengths) == [1, 2] def test_overfitting_IO(): # Simple test to try and quickly overfit the spancat component - ensuring the ML models work correctly fix_random_seed(0) nlp = English() spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY}) train_examples = make_examples(nlp) optimizer = nlp.initialize(get_examples=lambda: train_examples) assert spancat.model.get_dim("nO") == 2 assert set(spancat.labels) == {"LOC", "PERSON"} for i in range(50): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) assert losses["spancat"] < 0.01 # test the trained model test_text = "I like London and Berlin" doc = nlp(test_text) assert doc.spans[spancat.key] == doc.spans[SPAN_KEY] spans = doc.spans[SPAN_KEY] assert len(spans) == 2 assert len(spans.attrs["scores"]) == 2 assert min(spans.attrs["scores"]) > 0.8 assert set([span.text for span in spans]) == {"London", "Berlin"} assert set([span.label_ for span in spans]) == {"LOC"} # Also test the results are still the same after IO with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) doc2 = nlp2(test_text) spans2 = doc2.spans[SPAN_KEY] assert len(spans2) == 2 assert len(spans2.attrs["scores"]) == 2 assert min(spans2.attrs["scores"]) > 0.8 assert set([span.text for span in spans2]) == {"London", "Berlin"} assert set([span.label_ for span in spans2]) == {"LOC"} # Test scoring scores = nlp.evaluate(train_examples) assert f"spans_{SPAN_KEY}_f" in scores assert scores[f"spans_{SPAN_KEY}_p"] == 1.0 assert scores[f"spans_{SPAN_KEY}_r"] == 1.0 assert scores[f"spans_{SPAN_KEY}_f"] == 1.0 # also test that the spancat works for just a single entity in a sentence doc = nlp("London") assert len(doc.spans[spancat.key]) == 1 def test_overfitting_IO_overlapping(): # Test for overfitting on overlapping entities fix_random_seed(0) nlp = English() spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY}) train_examples = make_examples(nlp, data=TRAIN_DATA_OVERLAPPING) optimizer = nlp.initialize(get_examples=lambda: train_examples) assert spancat.model.get_dim("nO") == 3 assert set(spancat.labels) == {"PERSON", "LOC", "DOUBLE_LOC"} for i in range(50): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) assert losses["spancat"] < 0.01 # test the trained model test_text = "I like London and Berlin" doc = nlp(test_text) spans = doc.spans[SPAN_KEY] assert len(spans) == 3 assert len(spans.attrs["scores"]) == 3 assert min(spans.attrs["scores"]) > 0.9 assert set([span.text for span in spans]) == { "London", "Berlin", "London and Berlin", } assert set([span.label_ for span in spans]) == {"LOC", "DOUBLE_LOC"} # Also test the results are still the same after IO with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) doc2 = nlp2(test_text) spans2 = doc2.spans[SPAN_KEY] assert len(spans2) == 3 assert len(spans2.attrs["scores"]) == 3 assert min(spans2.attrs["scores"]) > 0.9 assert set([span.text for span in spans2]) == { "London", "Berlin", "London and Berlin", } assert set([span.label_ for span in spans2]) == {"LOC", "DOUBLE_LOC"} @pytest.mark.parametrize("name", SPANCAT_COMPONENTS) def test_zero_suggestions(name): # Test with a suggester that can return 0 suggestions @registry.misc("test_mixed_zero_suggester") def make_mixed_zero_suggester(): def mixed_zero_suggester(docs, *, ops=None): if ops is None: ops = get_current_ops() spans = [] lengths = [] for doc in docs: if len(doc) > 0 and len(doc) % 2 == 0: spans.append((0, 1)) lengths.append(1) else: lengths.append(0) spans = ops.asarray2i(spans) lengths_array = ops.asarray1i(lengths) if len(spans) > 0: output = Ragged(ops.xp.vstack(spans), lengths_array) else: output = Ragged(ops.xp.zeros((0, 0), dtype="i"), lengths_array) return output return mixed_zero_suggester fix_random_seed(0) nlp = English() spancat = nlp.add_pipe( name, config={ "suggester": {"@misc": "test_mixed_zero_suggester"}, "spans_key": SPAN_KEY, }, ) train_examples = make_examples(nlp) optimizer = nlp.initialize(get_examples=lambda: train_examples) assert spancat.model.get_dim("nO") == spancat._n_labels assert set(spancat.labels) == {"LOC", "PERSON"} nlp.update(train_examples, sgd=optimizer) # empty doc nlp("") # single doc with zero suggestions nlp("one") # single doc with one suggestion nlp("two two") # batch with mixed zero/one suggestions list(nlp.pipe(["one", "two two", "three three three", "", "four four four four"])) # batch with no suggestions list(nlp.pipe(["", "one", "three three three"])) @pytest.mark.parametrize("name", SPANCAT_COMPONENTS) def test_set_candidates(name): nlp = Language() spancat = nlp.add_pipe(name, config={"spans_key": SPAN_KEY}) train_examples = make_examples(nlp) nlp.initialize(get_examples=lambda: train_examples) texts = [ "Just a sentence.", "I like London and Berlin", "I like Berlin", "I eat ham.", ] docs = [nlp(text) for text in texts] spancat.set_candidates(docs) assert len(docs) == len(texts) assert type(docs[0].spans["candidates"]) == SpanGroup assert len(docs[0].spans["candidates"]) == 9 assert docs[0].spans["candidates"][0].text == "Just" assert docs[0].spans["candidates"][4].text == "Just a" @pytest.mark.parametrize("name", SPANCAT_COMPONENTS) @pytest.mark.parametrize("n_process", [1, 2]) def test_spancat_multiprocessing(name, n_process): if isinstance(get_current_ops, NumpyOps) or n_process < 2: nlp = Language() spancat = nlp.add_pipe(name, config={"spans_key": SPAN_KEY}) train_examples = make_examples(nlp) nlp.initialize(get_examples=lambda: train_examples) texts = [ "Just a sentence.", "I like London and Berlin", "I like Berlin", "I eat ham.", ] docs = list(nlp.pipe(texts, n_process=n_process)) assert len(docs) == len(texts)
21,976
34.85155
106
py
spaCy
spaCy-master/spacy/tests/pipeline/test_tagger.py
import pytest from numpy.testing import assert_almost_equal, assert_equal from thinc.api import compounding, get_current_ops from spacy import util from spacy.attrs import TAG from spacy.lang.en import English from spacy.language import Language from spacy.training import Example from ..util import make_tempdir @pytest.mark.issue(4348) def test_issue4348(): """Test that training the tagger with empty data, doesn't throw errors""" nlp = English() example = Example.from_dict(nlp.make_doc(""), {"tags": []}) TRAIN_DATA = [example, example] tagger = nlp.add_pipe("tagger") tagger.add_label("A") optimizer = nlp.initialize() for i in range(5): losses = {} batches = util.minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: nlp.update(batch, sgd=optimizer, losses=losses) def test_label_types(): nlp = Language() tagger = nlp.add_pipe("tagger") tagger.add_label("A") with pytest.raises(ValueError): tagger.add_label(9) def test_tagger_initialize_tag_map(): """Test that Tagger.initialize() without gold tuples does not clobber the tag map.""" nlp = Language() tagger = nlp.add_pipe("tagger") orig_tag_count = len(tagger.labels) tagger.add_label("A") nlp.initialize() assert orig_tag_count + 1 == len(nlp.get_pipe("tagger").labels) TAGS = ("N", "V", "J") TRAIN_DATA = [ ("I like green eggs", {"tags": ["N", "V", "J", "N"]}), ("Eat blue ham", {"tags": ["V", "J", "N"]}), ] PARTIAL_DATA = [ # partial annotation ("I like green eggs", {"tags": ["", "V", "J", ""]}), # misaligned partial annotation ( "He hates green eggs", { "words": ["He", "hate", "s", "green", "eggs"], "tags": ["", "V", "S", "J", ""], }, ), ] def test_label_smoothing(): nlp = Language() tagger_no_ls = nlp.add_pipe("tagger", "no_label_smoothing") tagger_ls = nlp.add_pipe( "tagger", "label_smoothing", config=dict(label_smoothing=0.05) ) train_examples = [] losses = {} for tag in TAGS: tagger_no_ls.add_label(tag) tagger_ls.add_label(tag) for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) nlp.initialize(get_examples=lambda: train_examples) tag_scores, bp_tag_scores = tagger_ls.model.begin_update( [eg.predicted for eg in train_examples] ) ops = get_current_ops() no_ls_grads = ops.to_numpy(tagger_no_ls.get_loss(train_examples, tag_scores)[1][0]) ls_grads = ops.to_numpy(tagger_ls.get_loss(train_examples, tag_scores)[1][0]) assert_almost_equal(ls_grads / no_ls_grads, 0.925) def test_no_label(): nlp = Language() nlp.add_pipe("tagger") with pytest.raises(ValueError): nlp.initialize() def test_no_resize(): nlp = Language() tagger = nlp.add_pipe("tagger") tagger.add_label("N") tagger.add_label("V") assert tagger.labels == ("N", "V") nlp.initialize() assert tagger.model.get_dim("nO") == 2 # this throws an error because the tagger can't be resized after initialization with pytest.raises(ValueError): tagger.add_label("J") def test_implicit_label(): nlp = Language() nlp.add_pipe("tagger") train_examples = [] for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) nlp.initialize(get_examples=lambda: train_examples) def test_initialize_examples(): nlp = Language() tagger = nlp.add_pipe("tagger") train_examples = [] for tag in TAGS: tagger.add_label(tag) for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) # you shouldn't really call this more than once, but for testing it should be fine nlp.initialize() nlp.initialize(get_examples=lambda: train_examples) with pytest.raises(TypeError): nlp.initialize(get_examples=lambda: None) with pytest.raises(TypeError): nlp.initialize(get_examples=lambda: train_examples[0]) with pytest.raises(TypeError): nlp.initialize(get_examples=lambda: []) with pytest.raises(TypeError): nlp.initialize(get_examples=train_examples) def test_no_data(): # Test that the tagger provides a nice error when there's no tagging data / labels TEXTCAT_DATA = [ ("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}), ("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}), ] nlp = English() nlp.add_pipe("tagger") nlp.add_pipe("textcat") train_examples = [] for t in TEXTCAT_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) with pytest.raises(ValueError): nlp.initialize(get_examples=lambda: train_examples) def test_incomplete_data(): # Test that the tagger works with incomplete information nlp = English() nlp.add_pipe("tagger") train_examples = [] for t in PARTIAL_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) optimizer = nlp.initialize(get_examples=lambda: train_examples) for i in range(50): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) assert losses["tagger"] < 0.00001 # test the trained model test_text = "I like blue eggs" doc = nlp(test_text) assert doc[1].tag_ == "V" assert doc[2].tag_ == "J" def test_overfitting_IO(): # Simple test to try and quickly overfit the tagger - ensuring the ML models work correctly nlp = English() tagger = nlp.add_pipe("tagger") train_examples = [] for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) optimizer = nlp.initialize(get_examples=lambda: train_examples) assert tagger.model.get_dim("nO") == len(TAGS) for i in range(50): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) assert losses["tagger"] < 0.00001 # test the trained model test_text = "I like blue eggs" doc = nlp(test_text) assert doc[0].tag_ == "N" assert doc[1].tag_ == "V" assert doc[2].tag_ == "J" assert doc[3].tag_ == "N" # Also test the results are still the same after IO with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) doc2 = nlp2(test_text) assert doc2[0].tag_ == "N" assert doc2[1].tag_ == "V" assert doc2[2].tag_ == "J" assert doc2[3].tag_ == "N" # Make sure that running pipe twice, or comparing to call, always amounts to the same predictions texts = [ "Just a sentence.", "I like green eggs.", "Here is another one.", "I eat ham.", ] batch_deps_1 = [doc.to_array([TAG]) for doc in nlp.pipe(texts)] batch_deps_2 = [doc.to_array([TAG]) for doc in nlp.pipe(texts)] no_batch_deps = [doc.to_array([TAG]) for doc in [nlp(text) for text in texts]] assert_equal(batch_deps_1, batch_deps_2) assert_equal(batch_deps_1, no_batch_deps) # Try to unlearn the first 'N' tag with negative annotation neg_ex = Example.from_dict(nlp.make_doc(test_text), {"tags": ["!N", "V", "J", "N"]}) for i in range(20): losses = {} nlp.update([neg_ex], sgd=optimizer, losses=losses) # test the "untrained" tag doc3 = nlp(test_text) assert doc3[0].tag_ != "N" def test_tagger_requires_labels(): nlp = English() nlp.add_pipe("tagger") with pytest.raises(ValueError): nlp.initialize()
7,653
30.497942
101
py
spaCy
spaCy-master/spacy/tests/pipeline/test_textcat.py
import random import numpy.random import pytest from numpy.testing import assert_almost_equal from thinc.api import Config, compounding, fix_random_seed, get_current_ops from wasabi import msg import spacy from spacy import util from spacy.cli.evaluate import print_prf_per_type, print_textcats_auc_per_cat from spacy.lang.en import English from spacy.language import Language from spacy.pipeline import TextCategorizer from spacy.pipeline.textcat import ( single_label_bow_config, single_label_cnn_config, single_label_default_config, ) from spacy.pipeline.textcat_multilabel import ( multi_label_bow_config, multi_label_cnn_config, multi_label_default_config, ) from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL from spacy.scorer import Scorer from spacy.tokens import Doc, DocBin from spacy.training import Example from spacy.training.initialize import init_nlp from ..util import make_tempdir TRAIN_DATA_SINGLE_LABEL = [ ("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}), ("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}), ] TRAIN_DATA_MULTI_LABEL = [ ("I'm angry and confused", {"cats": {"ANGRY": 1.0, "CONFUSED": 1.0, "HAPPY": 0.0}}), ("I'm confused but happy", {"cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0}}), ] def make_get_examples_single_label(nlp): train_examples = [] for t in TRAIN_DATA_SINGLE_LABEL: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) def get_examples(): return train_examples return get_examples def make_get_examples_multi_label(nlp): train_examples = [] for t in TRAIN_DATA_MULTI_LABEL: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) def get_examples(): return train_examples return get_examples @pytest.mark.issue(3611) def test_issue3611(): """Test whether adding n-grams in the textcat works even when n > token length of some docs""" unique_classes = ["offensive", "inoffensive"] x_train = [ "This is an offensive text", "This is the second offensive text", "inoff", ] y_train = ["offensive", "offensive", "inoffensive"] nlp = spacy.blank("en") # preparing the data train_data = [] for text, train_instance in zip(x_train, y_train): cat_dict = {label: label == train_instance for label in unique_classes} train_data.append(Example.from_dict(nlp.make_doc(text), {"cats": cat_dict})) # add a text categorizer component model = { "@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "ngram_size": 2, "no_output_layer": False, } textcat = nlp.add_pipe("textcat", config={"model": model}, last=True) for label in unique_classes: textcat.add_label(label) # training the network with nlp.select_pipes(enable="textcat"): optimizer = nlp.initialize() for i in range(3): losses = {} batches = util.minibatch(train_data, size=compounding(4.0, 32.0, 1.001)) for batch in batches: nlp.update(examples=batch, sgd=optimizer, drop=0.1, losses=losses) @pytest.mark.issue(4030) def test_issue4030(): """Test whether textcat works fine with empty doc""" unique_classes = ["offensive", "inoffensive"] x_train = [ "This is an offensive text", "This is the second offensive text", "inoff", ] y_train = ["offensive", "offensive", "inoffensive"] nlp = spacy.blank("en") # preparing the data train_data = [] for text, train_instance in zip(x_train, y_train): cat_dict = {label: label == train_instance for label in unique_classes} train_data.append(Example.from_dict(nlp.make_doc(text), {"cats": cat_dict})) # add a text categorizer component model = { "@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "ngram_size": 2, "no_output_layer": False, } textcat = nlp.add_pipe("textcat", config={"model": model}, last=True) for label in unique_classes: textcat.add_label(label) # training the network with nlp.select_pipes(enable="textcat"): optimizer = nlp.initialize() for i in range(3): losses = {} batches = util.minibatch(train_data, size=compounding(4.0, 32.0, 1.001)) for batch in batches: nlp.update(examples=batch, sgd=optimizer, drop=0.1, losses=losses) # processing of an empty doc should result in 0.0 for all categories doc = nlp("") assert doc.cats["offensive"] == 0.0 assert doc.cats["inoffensive"] == 0.0 @pytest.mark.parametrize( "textcat_config", [ single_label_default_config, single_label_bow_config, single_label_cnn_config, multi_label_default_config, multi_label_bow_config, multi_label_cnn_config, ], ) @pytest.mark.issue(5551) def test_issue5551(textcat_config): """Test that after fixing the random seed, the results of the pipeline are truly identical""" component = "textcat" pipe_cfg = Config().from_str(textcat_config) results = [] for i in range(3): fix_random_seed(0) nlp = English() text = "Once hot, form ping-pong-ball-sized balls of the mixture, each weighing roughly 25 g." annots = {"cats": {"Labe1": 1.0, "Label2": 0.0, "Label3": 0.0}} pipe = nlp.add_pipe(component, config=pipe_cfg, last=True) for label in set(annots["cats"]): pipe.add_label(label) # Train nlp.initialize() doc = nlp.make_doc(text) nlp.update([Example.from_dict(doc, annots)]) # Store the result of each iteration result = pipe.model.predict([doc]) results.append(result[0]) # All results should be the same because of the fixed seed assert len(results) == 3 ops = get_current_ops() assert_almost_equal(ops.to_numpy(results[0]), ops.to_numpy(results[1]), decimal=5) assert_almost_equal(ops.to_numpy(results[0]), ops.to_numpy(results[2]), decimal=5) CONFIG_ISSUE_6908 = """ [paths] train = "TRAIN_PLACEHOLDER" raw = null init_tok2vec = null vectors = null [system] seed = 0 gpu_allocator = null [nlp] lang = "en" pipeline = ["textcat"] tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"} disabled = [] before_creation = null after_creation = null after_pipeline_creation = null batch_size = 1000 [components] [components.textcat] factory = "TEXTCAT_PLACEHOLDER" [corpora] [corpora.train] @readers = "spacy.Corpus.v1" path = ${paths:train} [corpora.dev] @readers = "spacy.Corpus.v1" path = ${paths:train} [training] train_corpus = "corpora.train" dev_corpus = "corpora.dev" seed = ${system.seed} gpu_allocator = ${system.gpu_allocator} frozen_components = [] before_to_disk = null [pretraining] [initialize] vectors = ${paths.vectors} init_tok2vec = ${paths.init_tok2vec} vocab_data = null lookups = null before_init = null after_init = null [initialize.components] [initialize.components.textcat] labels = ['label1', 'label2'] [initialize.tokenizer] """ @pytest.mark.parametrize( "component_name", ["textcat", "textcat_multilabel"], ) @pytest.mark.issue(6908) def test_issue6908(component_name): """Test intializing textcat with labels in a list""" def create_data(out_file): nlp = spacy.blank("en") doc = nlp.make_doc("Some text") doc.cats = {"label1": 0, "label2": 1} out_data = DocBin(docs=[doc]).to_bytes() with out_file.open("wb") as file_: file_.write(out_data) with make_tempdir() as tmp_path: train_path = tmp_path / "train.spacy" create_data(train_path) config_str = CONFIG_ISSUE_6908.replace("TEXTCAT_PLACEHOLDER", component_name) config_str = config_str.replace("TRAIN_PLACEHOLDER", train_path.as_posix()) config = util.load_config_from_str(config_str) init_nlp(config) @pytest.mark.issue(7019) def test_issue7019(): scores = {"LABEL_A": 0.39829102, "LABEL_B": 0.938298329382, "LABEL_C": None} print_textcats_auc_per_cat(msg, scores) scores = { "LABEL_A": {"p": 0.3420302, "r": 0.3929020, "f": 0.49823928932}, "LABEL_B": {"p": None, "r": None, "f": None}, } print_prf_per_type(msg, scores, name="foo", type="bar") @pytest.mark.issue(9904) def test_issue9904(): nlp = Language() textcat = nlp.add_pipe("textcat") get_examples = make_get_examples_single_label(nlp) nlp.initialize(get_examples) examples = get_examples() scores = textcat.predict([eg.predicted for eg in examples]) loss = textcat.get_loss(examples, scores)[0] loss_double_bs = textcat.get_loss(examples * 2, scores.repeat(2, axis=0))[0] assert loss == pytest.approx(loss_double_bs) @pytest.mark.skip(reason="Test is flakey when run with others") def test_simple_train(): nlp = Language() textcat = nlp.add_pipe("textcat") textcat.add_label("answer") nlp.initialize() for i in range(5): for text, answer in [ ("aaaa", 1.0), ("bbbb", 0), ("aa", 1.0), ("bbbbbbbbb", 0.0), ("aaaaaa", 1), ]: nlp.update((text, {"cats": {"answer": answer}})) doc = nlp("aaa") assert "answer" in doc.cats assert doc.cats["answer"] >= 0.5 @pytest.mark.skip(reason="Test is flakey when run with others") def test_textcat_learns_multilabel(): random.seed(5) numpy.random.seed(5) docs = [] nlp = Language() letters = ["a", "b", "c"] for w1 in letters: for w2 in letters: cats = {letter: float(w2 == letter) for letter in letters} docs.append((Doc(nlp.vocab, words=["d"] * 3 + [w1, w2] + ["d"] * 3), cats)) random.shuffle(docs) textcat = TextCategorizer(nlp.vocab, width=8) for letter in letters: textcat.add_label(letter) optimizer = textcat.initialize(lambda: []) for i in range(30): losses = {} examples = [Example.from_dict(doc, {"cats": cats}) for doc, cat in docs] textcat.update(examples, sgd=optimizer, losses=losses) random.shuffle(docs) for w1 in letters: for w2 in letters: doc = Doc(nlp.vocab, words=["d"] * 3 + [w1, w2] + ["d"] * 3) truth = {letter: w2 == letter for letter in letters} textcat(doc) for cat, score in doc.cats.items(): if not truth[cat]: assert score < 0.5 else: assert score > 0.5 @pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"]) def test_label_types(name): nlp = Language() textcat = nlp.add_pipe(name) textcat.add_label("answer") with pytest.raises(ValueError): textcat.add_label(9) # textcat requires at least two labels if name == "textcat": with pytest.raises(ValueError): nlp.initialize() else: nlp.initialize() @pytest.mark.parametrize( "name,get_examples", [ ("textcat", make_get_examples_single_label), ("textcat_multilabel", make_get_examples_multi_label), ], ) def test_invalid_label_value(name, get_examples): nlp = Language() textcat = nlp.add_pipe(name) example_getter = get_examples(nlp) def invalid_examples(): # make one example with an invalid score examples = example_getter() ref = examples[0].reference key = list(ref.cats.keys())[0] ref.cats[key] = 2.0 return examples with pytest.raises(ValueError): nlp.initialize(get_examples=invalid_examples) @pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"]) def test_no_label(name): nlp = Language() nlp.add_pipe(name) with pytest.raises(ValueError): nlp.initialize() @pytest.mark.parametrize( "name,get_examples", [ ("textcat", make_get_examples_single_label), ("textcat_multilabel", make_get_examples_multi_label), ], ) def test_implicit_label(name, get_examples): nlp = Language() nlp.add_pipe(name) nlp.initialize(get_examples=get_examples(nlp)) # fmt: off @pytest.mark.slow @pytest.mark.parametrize( "name,textcat_config", [ # BOW ("textcat", {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "no_output_layer": False, "ngram_size": 3}), ("textcat", {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "no_output_layer": True, "ngram_size": 3}), ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": False, "no_output_layer": False, "ngram_size": 3}), ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": False, "no_output_layer": True, "ngram_size": 3}), # ENSEMBLE V1 ("textcat", {"@architectures": "spacy.TextCatEnsemble.v1", "exclusive_classes": False, "pretrained_vectors": None, "width": 64, "embed_size": 2000, "conv_depth": 2, "window_size": 1, "ngram_size": 1, "dropout": None}), ("textcat_multilabel", {"@architectures": "spacy.TextCatEnsemble.v1", "exclusive_classes": False, "pretrained_vectors": None, "width": 64, "embed_size": 2000, "conv_depth": 2, "window_size": 1, "ngram_size": 1, "dropout": None}), # ENSEMBLE V2 ("textcat", {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "no_output_layer": False, "ngram_size": 3}}), ("textcat", {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "no_output_layer": True, "ngram_size": 3}}), ("textcat_multilabel", {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": False, "no_output_layer": False, "ngram_size": 3}}), ("textcat_multilabel", {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": False, "no_output_layer": True, "ngram_size": 3}}), # CNN ("textcat", {"@architectures": "spacy.TextCatCNN.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True}), ("textcat_multilabel", {"@architectures": "spacy.TextCatCNN.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False}), ], ) # fmt: on def test_no_resize(name, textcat_config): """The old textcat architectures weren't resizable""" nlp = Language() pipe_config = {"model": textcat_config} textcat = nlp.add_pipe(name, config=pipe_config) textcat.add_label("POSITIVE") textcat.add_label("NEGATIVE") nlp.initialize() assert textcat.model.maybe_get_dim("nO") in [2, None] # this throws an error because the textcat can't be resized after initialization with pytest.raises(ValueError): textcat.add_label("NEUTRAL") # fmt: off @pytest.mark.parametrize( "name,textcat_config", [ # BOW ("textcat", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "no_output_layer": False, "ngram_size": 3}), ("textcat", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "no_output_layer": True, "ngram_size": 3}), ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "no_output_layer": False, "ngram_size": 3}), ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "no_output_layer": True, "ngram_size": 3}), # CNN ("textcat", {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True}), ("textcat_multilabel", {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False}), ], ) # fmt: on def test_resize(name, textcat_config): """The new textcat architectures are resizable""" nlp = Language() pipe_config = {"model": textcat_config} textcat = nlp.add_pipe(name, config=pipe_config) textcat.add_label("POSITIVE") textcat.add_label("NEGATIVE") assert textcat.model.maybe_get_dim("nO") in [2, None] nlp.initialize() assert textcat.model.maybe_get_dim("nO") in [2, None] textcat.add_label("NEUTRAL") assert textcat.model.maybe_get_dim("nO") in [3, None] # fmt: off @pytest.mark.parametrize( "name,textcat_config", [ # BOW ("textcat", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "no_output_layer": False, "ngram_size": 3}), ("textcat", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "no_output_layer": True, "ngram_size": 3}), ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "no_output_layer": False, "ngram_size": 3}), ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "no_output_layer": True, "ngram_size": 3}), # CNN ("textcat", {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True}), ("textcat_multilabel", {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False}), ], ) # fmt: on def test_resize_same_results(name, textcat_config): # Ensure that the resized textcat classifiers still produce the same results for old labels fix_random_seed(0) nlp = English() pipe_config = {"model": textcat_config} textcat = nlp.add_pipe(name, config=pipe_config) train_examples = [] for text, annotations in TRAIN_DATA_SINGLE_LABEL: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer = nlp.initialize(get_examples=lambda: train_examples) assert textcat.model.maybe_get_dim("nO") in [2, None] for i in range(5): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) # test the trained model before resizing test_text = "I am happy." doc = nlp(test_text) assert len(doc.cats) == 2 pos_pred = doc.cats["POSITIVE"] neg_pred = doc.cats["NEGATIVE"] # test the trained model again after resizing textcat.add_label("NEUTRAL") doc = nlp(test_text) assert len(doc.cats) == 3 assert doc.cats["POSITIVE"] == pos_pred assert doc.cats["NEGATIVE"] == neg_pred assert doc.cats["NEUTRAL"] <= 1 for i in range(5): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) # test the trained model again after training further with new label doc = nlp(test_text) assert len(doc.cats) == 3 assert doc.cats["POSITIVE"] != pos_pred assert doc.cats["NEGATIVE"] != neg_pred for cat in doc.cats: assert doc.cats[cat] <= 1 def test_error_with_multi_labels(): nlp = Language() nlp.add_pipe("textcat") train_examples = [] for text, annotations in TRAIN_DATA_MULTI_LABEL: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) with pytest.raises(ValueError): nlp.initialize(get_examples=lambda: train_examples) @pytest.mark.parametrize( "name,get_examples, train_data", [ ("textcat", make_get_examples_single_label, TRAIN_DATA_SINGLE_LABEL), ("textcat_multilabel", make_get_examples_multi_label, TRAIN_DATA_MULTI_LABEL), ], ) def test_initialize_examples(name, get_examples, train_data): nlp = Language() textcat = nlp.add_pipe(name) for text, annotations in train_data: for label, value in annotations.get("cats").items(): textcat.add_label(label) # you shouldn't really call this more than once, but for testing it should be fine nlp.initialize() nlp.initialize(get_examples=get_examples(nlp)) with pytest.raises(TypeError): nlp.initialize(get_examples=lambda: None) with pytest.raises(TypeError): nlp.initialize(get_examples=get_examples()) def test_overfitting_IO(): # Simple test to try and quickly overfit the single-label textcat component - ensuring the ML models work correctly fix_random_seed(0) nlp = English() textcat = nlp.add_pipe("textcat") train_examples = [] for text, annotations in TRAIN_DATA_SINGLE_LABEL: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer = nlp.initialize(get_examples=lambda: train_examples) assert textcat.model.get_dim("nO") == 2 for i in range(50): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) assert losses["textcat"] < 0.01 # test the trained model test_text = "I am happy." doc = nlp(test_text) cats = doc.cats assert cats["POSITIVE"] > 0.9 assert cats["POSITIVE"] + cats["NEGATIVE"] == pytest.approx(1.0, 0.001) # Also test the results are still the same after IO with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) doc2 = nlp2(test_text) cats2 = doc2.cats assert cats2["POSITIVE"] > 0.9 assert cats2["POSITIVE"] + cats2["NEGATIVE"] == pytest.approx(1.0, 0.001) # Test scoring scores = nlp.evaluate(train_examples) assert scores["cats_micro_f"] == 1.0 assert scores["cats_macro_f"] == 1.0 assert scores["cats_macro_auc"] == 1.0 assert scores["cats_score"] == 1.0 assert "cats_score_desc" in scores # Make sure that running pipe twice, or comparing to call, always amounts to the same predictions texts = ["Just a sentence.", "I like green eggs.", "I am happy.", "I eat ham."] batch_cats_1 = [doc.cats for doc in nlp.pipe(texts)] batch_cats_2 = [doc.cats for doc in nlp.pipe(texts)] no_batch_cats = [doc.cats for doc in [nlp(text) for text in texts]] for cats_1, cats_2 in zip(batch_cats_1, batch_cats_2): for cat in cats_1: assert_almost_equal(cats_1[cat], cats_2[cat], decimal=5) for cats_1, cats_2 in zip(batch_cats_1, no_batch_cats): for cat in cats_1: assert_almost_equal(cats_1[cat], cats_2[cat], decimal=5) def test_overfitting_IO_multi(): # Simple test to try and quickly overfit the multi-label textcat component - ensuring the ML models work correctly fix_random_seed(0) nlp = English() textcat = nlp.add_pipe("textcat_multilabel") train_examples = [] for text, annotations in TRAIN_DATA_MULTI_LABEL: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer = nlp.initialize(get_examples=lambda: train_examples) assert textcat.model.get_dim("nO") == 3 for i in range(100): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) assert losses["textcat_multilabel"] < 0.01 # test the trained model test_text = "I am confused but happy." doc = nlp(test_text) cats = doc.cats assert cats["HAPPY"] > 0.9 assert cats["CONFUSED"] > 0.9 # Also test the results are still the same after IO with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) doc2 = nlp2(test_text) cats2 = doc2.cats assert cats2["HAPPY"] > 0.9 assert cats2["CONFUSED"] > 0.9 # Test scoring scores = nlp.evaluate(train_examples) assert scores["cats_micro_f"] == 1.0 assert scores["cats_macro_f"] == 1.0 assert "cats_score_desc" in scores # Make sure that running pipe twice, or comparing to call, always amounts to the same predictions texts = ["Just a sentence.", "I like green eggs.", "I am happy.", "I eat ham."] batch_deps_1 = [doc.cats for doc in nlp.pipe(texts)] batch_deps_2 = [doc.cats for doc in nlp.pipe(texts)] no_batch_deps = [doc.cats for doc in [nlp(text) for text in texts]] for cats_1, cats_2 in zip(batch_deps_1, batch_deps_2): for cat in cats_1: assert_almost_equal(cats_1[cat], cats_2[cat], decimal=5) for cats_1, cats_2 in zip(batch_deps_1, no_batch_deps): for cat in cats_1: assert_almost_equal(cats_1[cat], cats_2[cat], decimal=5) # fmt: off @pytest.mark.slow @pytest.mark.parametrize( "name,train_data,textcat_config", [ # BOW V1 ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": False, "ngram_size": 1, "no_output_layer": False}), ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "ngram_size": 4, "no_output_layer": False}), # ENSEMBLE V1 ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatEnsemble.v1", "exclusive_classes": False, "pretrained_vectors": None, "width": 64, "embed_size": 2000, "conv_depth": 2, "window_size": 1, "ngram_size": 1, "dropout": None}), ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatEnsemble.v1", "exclusive_classes": False, "pretrained_vectors": None, "width": 64, "embed_size": 2000, "conv_depth": 2, "window_size": 1, "ngram_size": 1, "dropout": None}), # CNN V1 ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatCNN.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True}), ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatCNN.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False}), # BOW V2 ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "ngram_size": 1, "no_output_layer": False}), ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "ngram_size": 4, "no_output_layer": False}), ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "ngram_size": 3, "no_output_layer": True}), ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "ngram_size": 2, "no_output_layer": True}), # ENSEMBLE V2 ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "ngram_size": 1, "no_output_layer": False}}), ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "ngram_size": 5, "no_output_layer": False}}), # CNN V2 ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True}), ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False}), ], ) # fmt: on def test_textcat_configs(name, train_data, textcat_config): pipe_config = {"model": textcat_config} nlp = English() textcat = nlp.add_pipe(name, config=pipe_config) train_examples = [] for text, annotations in train_data: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) for label, value in annotations.get("cats").items(): textcat.add_label(label) optimizer = nlp.initialize() for i in range(5): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) def test_positive_class(): nlp = English() textcat = nlp.add_pipe("textcat") get_examples = make_get_examples_single_label(nlp) textcat.initialize(get_examples, labels=["POS", "NEG"], positive_label="POS") assert textcat.labels == ("POS", "NEG") assert textcat.cfg["positive_label"] == "POS" textcat_multilabel = nlp.add_pipe("textcat_multilabel") get_examples = make_get_examples_multi_label(nlp) with pytest.raises(TypeError): textcat_multilabel.initialize( get_examples, labels=["POS", "NEG"], positive_label="POS" ) textcat_multilabel.initialize(get_examples, labels=["FICTION", "DRAMA"]) assert textcat_multilabel.labels == ("FICTION", "DRAMA") assert "positive_label" not in textcat_multilabel.cfg def test_positive_class_not_present(): nlp = English() textcat = nlp.add_pipe("textcat") get_examples = make_get_examples_single_label(nlp) with pytest.raises(ValueError): textcat.initialize(get_examples, labels=["SOME", "THING"], positive_label="POS") def test_positive_class_not_binary(): nlp = English() textcat = nlp.add_pipe("textcat") get_examples = make_get_examples_multi_label(nlp) with pytest.raises(ValueError): textcat.initialize( get_examples, labels=["SOME", "THING", "POS"], positive_label="POS" ) def test_textcat_evaluation(): train_examples = [] nlp = English() ref1 = nlp("one") ref1.cats = {"winter": 1.0, "summer": 1.0, "spring": 1.0, "autumn": 1.0} pred1 = nlp("one") pred1.cats = {"winter": 1.0, "summer": 0.0, "spring": 1.0, "autumn": 1.0} train_examples.append(Example(pred1, ref1)) ref2 = nlp("two") ref2.cats = {"winter": 0.0, "summer": 0.0, "spring": 1.0, "autumn": 1.0} pred2 = nlp("two") pred2.cats = {"winter": 1.0, "summer": 0.0, "spring": 0.0, "autumn": 1.0} train_examples.append(Example(pred2, ref2)) scores = Scorer().score_cats( train_examples, "cats", labels=["winter", "summer", "spring", "autumn"] ) assert scores["cats_f_per_type"]["winter"]["p"] == 1 / 2 assert scores["cats_f_per_type"]["winter"]["r"] == 1 / 1 assert scores["cats_f_per_type"]["summer"]["p"] == 0 assert scores["cats_f_per_type"]["summer"]["r"] == 0 / 1 assert scores["cats_f_per_type"]["spring"]["p"] == 1 / 1 assert scores["cats_f_per_type"]["spring"]["r"] == 1 / 2 assert scores["cats_f_per_type"]["autumn"]["p"] == 2 / 2 assert scores["cats_f_per_type"]["autumn"]["r"] == 2 / 2 assert scores["cats_micro_p"] == 4 / 5 assert scores["cats_micro_r"] == 4 / 6 @pytest.mark.parametrize( "multi_label,spring_p", [(True, 1 / 1), (False, 1 / 2)], ) def test_textcat_eval_missing(multi_label: bool, spring_p: float): """ multi-label: the missing 'spring' in gold_doc_2 doesn't incur a penalty exclusive labels: the missing 'spring' in gold_doc_2 is interpreted as 0.0""" train_examples = [] nlp = English() ref1 = nlp("one") ref1.cats = {"winter": 0.0, "summer": 0.0, "autumn": 0.0, "spring": 1.0} pred1 = nlp("one") pred1.cats = {"winter": 0.0, "summer": 0.0, "autumn": 0.0, "spring": 1.0} train_examples.append(Example(ref1, pred1)) ref2 = nlp("two") # reference 'spring' is missing, pred 'spring' is 1 ref2.cats = {"winter": 0.0, "summer": 0.0, "autumn": 1.0} pred2 = nlp("two") pred2.cats = {"winter": 0.0, "summer": 0.0, "autumn": 0.0, "spring": 1.0} train_examples.append(Example(pred2, ref2)) scores = Scorer().score_cats( train_examples, "cats", labels=["winter", "summer", "spring", "autumn"], multi_label=multi_label, ) assert scores["cats_f_per_type"]["spring"]["p"] == spring_p assert scores["cats_f_per_type"]["spring"]["r"] == 1 / 1 @pytest.mark.parametrize( "multi_label,expected_loss", [(True, 0), (False, 0.125)], ) def test_textcat_loss(multi_label: bool, expected_loss: float): """ multi-label: the missing 'spring' in gold_doc_2 doesn't incur an increase in loss exclusive labels: the missing 'spring' in gold_doc_2 is interpreted as 0.0 and adds to the loss""" train_examples = [] nlp = English() doc1 = nlp("one") cats1 = {"winter": 0.0, "summer": 0.0, "autumn": 0.0, "spring": 1.0} train_examples.append(Example.from_dict(doc1, {"cats": cats1})) doc2 = nlp("two") cats2 = {"winter": 0.0, "summer": 0.0, "autumn": 1.0} train_examples.append(Example.from_dict(doc2, {"cats": cats2})) if multi_label: textcat = nlp.add_pipe("textcat_multilabel") else: textcat = nlp.add_pipe("textcat") assert isinstance(textcat, TextCategorizer) textcat.initialize(lambda: train_examples) scores = textcat.model.ops.asarray( [[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype="f" # type: ignore ) loss, d_scores = textcat.get_loss(train_examples, scores) assert loss == expected_loss def test_textcat_multilabel_threshold(): # Ensure the scorer can be called with a different threshold nlp = English() nlp.add_pipe("textcat_multilabel") train_examples = [] for text, annotations in TRAIN_DATA_SINGLE_LABEL: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) nlp.initialize(get_examples=lambda: train_examples) # score the model (it's not actually trained but that doesn't matter) scores = nlp.evaluate(train_examples) assert 0 <= scores["cats_score"] <= 1 scores = nlp.evaluate(train_examples, scorer_cfg={"threshold": 1.0}) assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 0 scores = nlp.evaluate(train_examples, scorer_cfg={"threshold": 0}) macro_f = scores["cats_score"] assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 1.0 scores = nlp.evaluate( train_examples, scorer_cfg={"threshold": 0, "positive_label": "POSITIVE"} ) pos_f = scores["cats_score"] assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 1.0 assert pos_f >= macro_f def test_textcat_multi_threshold(): # Ensure the scorer can be called with a different threshold nlp = English() nlp.add_pipe("textcat_multilabel") train_examples = [] for text, annotations in TRAIN_DATA_SINGLE_LABEL: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) nlp.initialize(get_examples=lambda: train_examples) # score the model (it's not actually trained but that doesn't matter) scores = nlp.evaluate(train_examples) assert 0 <= scores["cats_score"] <= 1 scores = nlp.evaluate(train_examples, scorer_cfg={"threshold": 1.0}) assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 0 scores = nlp.evaluate(train_examples, scorer_cfg={"threshold": 0}) assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 1.0 @pytest.mark.parametrize( "component_name,scorer", [ ("textcat", "spacy.textcat_scorer.v1"), ("textcat_multilabel", "spacy.textcat_multilabel_scorer.v1"), ], ) def test_textcat_legacy_scorers(component_name, scorer): """Check that legacy scorers are registered and produce the expected score keys.""" nlp = English() nlp.add_pipe(component_name, config={"scorer": {"@scorers": scorer}}) train_examples = [] for text, annotations in TRAIN_DATA_SINGLE_LABEL: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) nlp.initialize(get_examples=lambda: train_examples) # score the model (it's not actually trained but that doesn't matter) scores = nlp.evaluate(train_examples) assert 0 <= scores["cats_score"] <= 1
35,613
37.501622
267
py
spaCy
spaCy-master/spacy/tests/pipeline/test_tok2vec.py
import pytest from numpy.testing import assert_array_equal from thinc.api import Config, get_current_ops from spacy import util from spacy.lang.en import English from spacy.ml.models.tok2vec import ( MaxoutWindowEncoder, MultiHashEmbed, build_Tok2Vec_model, ) from spacy.pipeline.tok2vec import Tok2Vec, Tok2VecListener from spacy.tokens import Doc from spacy.training import Example from spacy.util import registry from spacy.vocab import Vocab from ..util import add_vecs_to_vocab, get_batch, make_tempdir def test_empty_doc(): width = 128 embed_size = 2000 vocab = Vocab() doc = Doc(vocab, words=[]) tok2vec = build_Tok2Vec_model( MultiHashEmbed( width=width, rows=[embed_size, embed_size, embed_size, embed_size], include_static_vectors=False, attrs=["NORM", "PREFIX", "SUFFIX", "SHAPE"], ), MaxoutWindowEncoder(width=width, depth=4, window_size=1, maxout_pieces=3), ) tok2vec.initialize() vectors, backprop = tok2vec.begin_update([doc]) assert len(vectors) == 1 assert vectors[0].shape == (0, width) @pytest.mark.parametrize( "batch_size,width,embed_size", [[1, 128, 2000], [2, 128, 2000], [3, 8, 63]] ) def test_tok2vec_batch_sizes(batch_size, width, embed_size): batch = get_batch(batch_size) tok2vec = build_Tok2Vec_model( MultiHashEmbed( width=width, rows=[embed_size] * 4, include_static_vectors=False, attrs=["NORM", "PREFIX", "SUFFIX", "SHAPE"], ), MaxoutWindowEncoder(width=width, depth=4, window_size=1, maxout_pieces=3), ) tok2vec.initialize() vectors, backprop = tok2vec.begin_update(batch) assert len(vectors) == len(batch) for doc_vec, doc in zip(vectors, batch): assert doc_vec.shape == (len(doc), width) @pytest.mark.slow @pytest.mark.parametrize("width", [8]) @pytest.mark.parametrize( "embed_arch,embed_config", # fmt: off [ ("spacy.MultiHashEmbed.v1", {"rows": [100, 100], "attrs": ["SHAPE", "LOWER"], "include_static_vectors": False}), ("spacy.MultiHashEmbed.v1", {"rows": [100, 20], "attrs": ["ORTH", "PREFIX"], "include_static_vectors": False}), ("spacy.CharacterEmbed.v1", {"rows": 100, "nM": 64, "nC": 8, "include_static_vectors": False}), ("spacy.CharacterEmbed.v1", {"rows": 100, "nM": 16, "nC": 2, "include_static_vectors": False}), ], # fmt: on ) @pytest.mark.parametrize( "tok2vec_arch,encode_arch,encode_config", # fmt: off [ ("spacy.Tok2Vec.v1", "spacy.MaxoutWindowEncoder.v1", {"window_size": 1, "maxout_pieces": 3, "depth": 2}), ("spacy.Tok2Vec.v2", "spacy.MaxoutWindowEncoder.v2", {"window_size": 1, "maxout_pieces": 3, "depth": 2}), ("spacy.Tok2Vec.v1", "spacy.MishWindowEncoder.v1", {"window_size": 1, "depth": 6}), ("spacy.Tok2Vec.v2", "spacy.MishWindowEncoder.v2", {"window_size": 1, "depth": 6}), ], # fmt: on ) def test_tok2vec_configs( width, tok2vec_arch, embed_arch, embed_config, encode_arch, encode_config ): embed = registry.get("architectures", embed_arch) encode = registry.get("architectures", encode_arch) tok2vec_model = registry.get("architectures", tok2vec_arch) embed_config["width"] = width encode_config["width"] = width docs = get_batch(3) tok2vec = tok2vec_model(embed(**embed_config), encode(**encode_config)) tok2vec.initialize(docs) vectors, backprop = tok2vec.begin_update(docs) assert len(vectors) == len(docs) assert vectors[0].shape == (len(docs[0]), width) backprop(vectors) def test_init_tok2vec(): # Simple test to initialize the default tok2vec nlp = English() tok2vec = nlp.add_pipe("tok2vec") assert tok2vec.listeners == [] nlp.initialize() assert tok2vec.model.get_dim("nO") cfg_string = """ [nlp] lang = "en" pipeline = ["tok2vec","tagger"] [components] [components.tagger] factory = "tagger" [components.tagger.model] @architectures = "spacy.Tagger.v2" nO = null [components.tagger.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.encode.width} [components.tok2vec] factory = "tok2vec" [components.tok2vec.model] @architectures = "spacy.Tok2Vec.v2" [components.tok2vec.model.embed] @architectures = "spacy.MultiHashEmbed.v1" width = ${components.tok2vec.model.encode.width} rows = [2000, 1000, 1000, 1000] attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"] include_static_vectors = false [components.tok2vec.model.encode] @architectures = "spacy.MaxoutWindowEncoder.v2" width = 96 depth = 4 window_size = 1 maxout_pieces = 3 """ TRAIN_DATA = [ ( "I like green eggs", {"tags": ["N", "V", "J", "N"], "cats": {"preference": 1.0, "imperative": 0.0}}, ), ( "Eat blue ham", {"tags": ["V", "J", "N"], "cats": {"preference": 0.0, "imperative": 1.0}}, ), ] @pytest.mark.parametrize("with_vectors", (False, True)) def test_tok2vec_listener(with_vectors): orig_config = Config().from_str(cfg_string) orig_config["components"]["tok2vec"]["model"]["embed"][ "include_static_vectors" ] = with_vectors nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) if with_vectors: ops = get_current_ops() vectors = [ ("apple", ops.asarray([1, 2, 3])), ("orange", ops.asarray([-1, -2, -3])), ("and", ops.asarray([-1, -1, -1])), ("juice", ops.asarray([5, 5, 10])), ("pie", ops.asarray([7, 6.3, 8.9])), ] add_vecs_to_vocab(nlp.vocab, vectors) assert nlp.pipe_names == ["tok2vec", "tagger"] tagger = nlp.get_pipe("tagger") tok2vec = nlp.get_pipe("tok2vec") tagger_tok2vec = tagger.model.get_ref("tok2vec") assert isinstance(tok2vec, Tok2Vec) assert isinstance(tagger_tok2vec, Tok2VecListener) train_examples = [] for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) for tag in t[1]["tags"]: tagger.add_label(tag) # Check that the Tok2Vec component finds its listeners optimizer = nlp.initialize(lambda: train_examples) assert tok2vec.listeners == [tagger_tok2vec] for i in range(5): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) doc = nlp("Running the pipeline as a whole.") doc_tensor = tagger_tok2vec.predict([doc])[0] ops = get_current_ops() assert_array_equal(ops.to_numpy(doc.tensor), ops.to_numpy(doc_tensor)) # test with empty doc doc = nlp("") # TODO: should this warn or error? nlp.select_pipes(disable="tok2vec") assert nlp.pipe_names == ["tagger"] nlp("Running the pipeline with the Tok2Vec component disabled.") def test_tok2vec_listener_callback(): orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert nlp.pipe_names == ["tok2vec", "tagger"] tagger = nlp.get_pipe("tagger") tok2vec = nlp.get_pipe("tok2vec") docs = [nlp.make_doc("A random sentence")] tok2vec.model.initialize(X=docs) gold_array = [[1.0 for tag in ["V", "Z"]] for word in docs] label_sample = [tagger.model.ops.asarray(gold_array, dtype="float32")] tagger.model.initialize(X=docs, Y=label_sample) docs = [nlp.make_doc("Another entirely random sentence")] tok2vec.update([Example.from_dict(x, {}) for x in docs]) Y, get_dX = tagger.model.begin_update(docs) # assure that the backprop call works (and doesn't hit a 'None' callback) assert get_dX(Y) is not None def test_tok2vec_listener_overfitting(): """Test that a pipeline with a listener properly overfits, even if 'tok2vec' is in the annotating components""" orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) train_examples = [] for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) optimizer = nlp.initialize(get_examples=lambda: train_examples) for i in range(50): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses, annotates=["tok2vec"]) assert losses["tagger"] < 0.00001 # test the trained model test_text = "I like blue eggs" doc = nlp(test_text) assert doc[0].tag_ == "N" assert doc[1].tag_ == "V" assert doc[2].tag_ == "J" assert doc[3].tag_ == "N" # Also test the results are still the same after IO with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) doc2 = nlp2(test_text) assert doc2[0].tag_ == "N" assert doc2[1].tag_ == "V" assert doc2[2].tag_ == "J" assert doc2[3].tag_ == "N" def test_tok2vec_frozen_not_annotating(): """Test that a pipeline with a frozen tok2vec raises an error when the tok2vec is not annotating""" orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) train_examples = [] for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) optimizer = nlp.initialize(get_examples=lambda: train_examples) for i in range(2): losses = {} with pytest.raises( ValueError, match=r"the tok2vec embedding layer is not updated" ): nlp.update( train_examples, sgd=optimizer, losses=losses, exclude=["tok2vec"] ) def test_tok2vec_frozen_overfitting(): """Test that a pipeline with a frozen & annotating tok2vec can still overfit""" orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) train_examples = [] for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) optimizer = nlp.initialize(get_examples=lambda: train_examples) for i in range(100): losses = {} nlp.update( train_examples, sgd=optimizer, losses=losses, exclude=["tok2vec"], annotates=["tok2vec"], ) assert losses["tagger"] < 0.0001 # test the trained model test_text = "I like blue eggs" doc = nlp(test_text) assert doc[0].tag_ == "N" assert doc[1].tag_ == "V" assert doc[2].tag_ == "J" assert doc[3].tag_ == "N" # Also test the results are still the same after IO with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) doc2 = nlp2(test_text) assert doc2[0].tag_ == "N" assert doc2[1].tag_ == "V" assert doc2[2].tag_ == "J" assert doc2[3].tag_ == "N" def test_replace_listeners(): orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) examples = [Example.from_dict(nlp.make_doc("x y"), {"tags": ["V", "Z"]})] nlp.initialize(lambda: examples) tok2vec = nlp.get_pipe("tok2vec") tagger = nlp.get_pipe("tagger") assert isinstance(tagger.model.layers[0], Tok2VecListener) assert tok2vec.listener_map["tagger"][0] == tagger.model.layers[0] assert ( nlp.config["components"]["tok2vec"]["model"]["@architectures"] == "spacy.Tok2Vec.v2" ) assert ( nlp.config["components"]["tagger"]["model"]["tok2vec"]["@architectures"] == "spacy.Tok2VecListener.v1" ) nlp.replace_listeners("tok2vec", "tagger", ["model.tok2vec"]) assert not isinstance(tagger.model.layers[0], Tok2VecListener) t2v_cfg = nlp.config["components"]["tok2vec"]["model"] assert t2v_cfg["@architectures"] == "spacy.Tok2Vec.v2" assert nlp.config["components"]["tagger"]["model"]["tok2vec"] == t2v_cfg with pytest.raises(ValueError): nlp.replace_listeners("invalid", "tagger", ["model.tok2vec"]) with pytest.raises(ValueError): nlp.replace_listeners("tok2vec", "parser", ["model.tok2vec"]) with pytest.raises(ValueError): nlp.replace_listeners("tok2vec", "tagger", ["model.yolo"]) with pytest.raises(ValueError): nlp.replace_listeners("tok2vec", "tagger", ["model.tok2vec", "model.yolo"]) # attempt training with the new pipeline optimizer = nlp.initialize(lambda: examples) for i in range(2): losses = {} nlp.update(examples, sgd=optimizer, losses=losses) assert losses["tok2vec"] == 0.0 assert losses["tagger"] > 0.0 cfg_string_multi = """ [nlp] lang = "en" pipeline = ["tok2vec","tagger", "ner"] [components] [components.tagger] factory = "tagger" [components.tagger.model] @architectures = "spacy.Tagger.v2" nO = null [components.tagger.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.encode.width} [components.ner] factory = "ner" [components.ner.model] @architectures = "spacy.TransitionBasedParser.v2" [components.ner.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.encode.width} [components.tok2vec] factory = "tok2vec" [components.tok2vec.model] @architectures = "spacy.Tok2Vec.v2" [components.tok2vec.model.embed] @architectures = "spacy.MultiHashEmbed.v1" width = ${components.tok2vec.model.encode.width} rows = [2000, 1000, 1000, 1000] attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"] include_static_vectors = false [components.tok2vec.model.encode] @architectures = "spacy.MaxoutWindowEncoder.v2" width = 96 depth = 4 window_size = 1 maxout_pieces = 3 """ def test_replace_listeners_from_config(): orig_config = Config().from_str(cfg_string_multi) nlp = util.load_model_from_config(orig_config, auto_fill=True) annots = {"tags": ["V", "Z"], "entities": [(0, 1, "A"), (1, 2, "B")]} examples = [Example.from_dict(nlp.make_doc("x y"), annots)] nlp.initialize(lambda: examples) tok2vec = nlp.get_pipe("tok2vec") tagger = nlp.get_pipe("tagger") ner = nlp.get_pipe("ner") assert tok2vec.listening_components == ["tagger", "ner"] assert any(isinstance(node, Tok2VecListener) for node in ner.model.walk()) assert any(isinstance(node, Tok2VecListener) for node in tagger.model.walk()) with make_tempdir() as dir_path: nlp.to_disk(dir_path) base_model = str(dir_path) new_config = { "nlp": { "lang": "en", "pipeline": ["tok2vec", "tagger2", "ner3", "tagger4"], }, "components": { "tok2vec": {"source": base_model}, "tagger2": { "source": base_model, "component": "tagger", "replace_listeners": ["model.tok2vec"], }, "ner3": { "source": base_model, "component": "ner", }, "tagger4": { "source": base_model, "component": "tagger", }, }, } new_nlp = util.load_model_from_config(new_config, auto_fill=True) new_nlp.initialize(lambda: examples) tok2vec = new_nlp.get_pipe("tok2vec") tagger = new_nlp.get_pipe("tagger2") ner = new_nlp.get_pipe("ner3") assert "ner" not in new_nlp.pipe_names assert "tagger" not in new_nlp.pipe_names assert tok2vec.listening_components == ["ner3", "tagger4"] assert any(isinstance(node, Tok2VecListener) for node in ner.model.walk()) assert not any(isinstance(node, Tok2VecListener) for node in tagger.model.walk()) t2v_cfg = new_nlp.config["components"]["tok2vec"]["model"] assert t2v_cfg["@architectures"] == "spacy.Tok2Vec.v2" assert new_nlp.config["components"]["tagger2"]["model"]["tok2vec"] == t2v_cfg assert ( new_nlp.config["components"]["ner3"]["model"]["tok2vec"]["@architectures"] == "spacy.Tok2VecListener.v1" ) assert ( new_nlp.config["components"]["tagger4"]["model"]["tok2vec"]["@architectures"] == "spacy.Tok2VecListener.v1" ) cfg_string_multi_textcat = """ [nlp] lang = "en" pipeline = ["tok2vec","textcat_multilabel","tagger"] [components] [components.textcat_multilabel] factory = "textcat_multilabel" [components.textcat_multilabel.model] @architectures = "spacy.TextCatEnsemble.v2" nO = null [components.textcat_multilabel.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.encode.width} [components.textcat_multilabel.model.linear_model] @architectures = "spacy.TextCatBOW.v1" exclusive_classes = false ngram_size = 1 no_output_layer = false [components.tagger] factory = "tagger" [components.tagger.model] @architectures = "spacy.Tagger.v2" nO = null [components.tagger.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.encode.width} [components.tok2vec] factory = "tok2vec" [components.tok2vec.model] @architectures = "spacy.Tok2Vec.v2" [components.tok2vec.model.embed] @architectures = "spacy.MultiHashEmbed.v1" width = ${components.tok2vec.model.encode.width} rows = [2000, 1000, 1000, 1000] attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"] include_static_vectors = false [components.tok2vec.model.encode] @architectures = "spacy.MaxoutWindowEncoder.v2" width = 96 depth = 4 window_size = 1 maxout_pieces = 3 """ def test_tok2vec_listeners_textcat(): orig_config = Config().from_str(cfg_string_multi_textcat) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert nlp.pipe_names == ["tok2vec", "textcat_multilabel", "tagger"] tagger = nlp.get_pipe("tagger") textcat = nlp.get_pipe("textcat_multilabel") tok2vec = nlp.get_pipe("tok2vec") tagger_tok2vec = tagger.model.get_ref("tok2vec") textcat_tok2vec = textcat.model.get_ref("tok2vec") assert isinstance(tok2vec, Tok2Vec) assert isinstance(tagger_tok2vec, Tok2VecListener) assert isinstance(textcat_tok2vec, Tok2VecListener) train_examples = [] for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) optimizer = nlp.initialize(lambda: train_examples) for i in range(50): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) docs = list(nlp.pipe(["Eat blue ham", "I like green eggs"])) cats0 = docs[0].cats assert cats0["preference"] < 0.1 assert cats0["imperative"] > 0.9 cats1 = docs[1].cats assert cats1["preference"] > 0.1 assert cats1["imperative"] < 0.9 assert [t.tag_ for t in docs[0]] == ["V", "J", "N"] assert [t.tag_ for t in docs[1]] == ["N", "V", "J", "N"] def test_tok2vec_listener_source_link_name(): """The component's internal name and the tok2vec listener map correspond to the most recently modified pipeline. """ orig_config = Config().from_str(cfg_string_multi) nlp1 = util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"] nlp2 = English() nlp2.add_pipe("tok2vec", source=nlp1) nlp2.add_pipe("tagger", name="tagger2", source=nlp1) # there is no way to have the component have the right name for both # pipelines, right now the most recently modified pipeline is prioritized assert nlp1.get_pipe("tagger").name == nlp2.get_pipe("tagger2").name == "tagger2" # there is no way to have the tok2vec have the right listener map for both # pipelines, right now the most recently modified pipeline is prioritized assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2"] nlp2.add_pipe("ner", name="ner3", source=nlp1) assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2", "ner3"] nlp2.remove_pipe("ner3") assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2"] nlp2.remove_pipe("tagger2") assert nlp2.get_pipe("tok2vec").listening_components == [] # at this point the tok2vec component corresponds to nlp2 assert nlp1.get_pipe("tok2vec").listening_components == [] # modifying the nlp1 pipeline syncs the tok2vec listener map back to nlp1 nlp1.add_pipe("sentencizer") assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"] # modifying nlp2 syncs it back to nlp2 nlp2.add_pipe("sentencizer") assert nlp1.get_pipe("tok2vec").listening_components == [] def test_tok2vec_listener_source_replace_listeners(): orig_config = Config().from_str(cfg_string_multi) nlp1 = util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"] nlp1.replace_listeners("tok2vec", "tagger", ["model.tok2vec"]) assert nlp1.get_pipe("tok2vec").listening_components == ["ner"] nlp2 = English() nlp2.add_pipe("tok2vec", source=nlp1) assert nlp2.get_pipe("tok2vec").listening_components == [] nlp2.add_pipe("tagger", source=nlp1) assert nlp2.get_pipe("tok2vec").listening_components == [] nlp2.add_pipe("ner", name="ner2", source=nlp1) assert nlp2.get_pipe("tok2vec").listening_components == ["ner2"]
21,819
34.422078
120
py
spaCy
spaCy-master/spacy/tests/serialize/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/serialize/test_resource_warning.py
import warnings from unittest import TestCase import pytest import srsly from numpy import zeros from spacy.kb.kb_in_memory import InMemoryLookupKB, Writer from spacy.language import Language from spacy.pipeline import TrainablePipe from spacy.vectors import Vectors from spacy.vocab import Vocab from ..util import make_tempdir def nlp(): return Language() def vectors(): data = zeros((3, 1), dtype="f") keys = ["cat", "dog", "rat"] return Vectors(data=data, keys=keys) def custom_pipe(): # create dummy pipe partially implementing interface -- only want to test to_disk class SerializableDummy: def __init__(self, **cfg): if cfg: self.cfg = cfg else: self.cfg = None super(SerializableDummy, self).__init__() def to_bytes(self, exclude=tuple(), disable=None, **kwargs): return srsly.msgpack_dumps({"dummy": srsly.json_dumps(None)}) def from_bytes(self, bytes_data, exclude): return self def to_disk(self, path, exclude=tuple(), **kwargs): pass def from_disk(self, path, exclude=tuple(), **kwargs): return self class MyPipe(TrainablePipe): def __init__(self, vocab, model=True, **cfg): if cfg: self.cfg = cfg else: self.cfg = None self.model = SerializableDummy() self.vocab = vocab return MyPipe(Vocab()) def tagger(): nlp = Language() tagger = nlp.add_pipe("tagger") # need to add model for two reasons: # 1. no model leads to error in serialization, # 2. the affected line is the one for model serialization tagger.add_label("A") nlp.initialize() return tagger def entity_linker(): nlp = Language() def create_kb(vocab): kb = InMemoryLookupKB(vocab, entity_vector_length=1) kb.add_entity("test", 0.0, zeros((1,), dtype="f")) return kb entity_linker = nlp.add_pipe("entity_linker") entity_linker.set_kb(create_kb) # need to add model for two reasons: # 1. no model leads to error in serialization, # 2. the affected line is the one for model serialization nlp.initialize() return entity_linker objects_to_test = ( [nlp(), vectors(), custom_pipe(), tagger(), entity_linker()], ["nlp", "vectors", "custom_pipe", "tagger", "entity_linker"], ) def write_obj_and_catch_warnings(obj): with make_tempdir() as d: with warnings.catch_warnings(record=True) as warnings_list: warnings.filterwarnings("always", category=ResourceWarning) obj.to_disk(d) # in python3.5 it seems that deprecation warnings are not filtered by filterwarnings return list(filter(lambda x: isinstance(x, ResourceWarning), warnings_list)) @pytest.mark.parametrize("obj", objects_to_test[0], ids=objects_to_test[1]) def test_to_disk_resource_warning(obj): warnings_list = write_obj_and_catch_warnings(obj) assert len(warnings_list) == 0 def test_writer_with_path_py35(): writer = None with make_tempdir() as d: path = d / "test" try: writer = Writer(path) except Exception as e: pytest.fail(str(e)) finally: if writer: writer.close() def test_save_and_load_knowledge_base(): nlp = Language() kb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) with make_tempdir() as d: path = d / "kb" try: kb.to_disk(path) except Exception as e: pytest.fail(str(e)) try: kb_loaded = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) kb_loaded.from_disk(path) except Exception as e: pytest.fail(str(e)) class TestToDiskResourceWarningUnittest(TestCase): def test_resource_warning(self): scenarios = zip(*objects_to_test) for scenario in scenarios: with self.subTest(msg=scenario[1]): warnings_list = write_obj_and_catch_warnings(scenario[0]) self.assertEqual(len(warnings_list), 0)
4,185
27.283784
96
py
spaCy
spaCy-master/spacy/tests/serialize/test_serialize_config.py
import pytest from catalogue import RegistryError from thinc.api import Config, ConfigValidationError import spacy from spacy.lang.de import German from spacy.lang.en import English from spacy.language import DEFAULT_CONFIG, DEFAULT_CONFIG_PRETRAIN_PATH, Language from spacy.ml.models import ( MaxoutWindowEncoder, MultiHashEmbed, build_tb_parser_model, build_Tok2Vec_model, ) from spacy.schemas import ConfigSchema, ConfigSchemaPretrain from spacy.training import Example from spacy.util import ( load_config, load_config_from_str, load_model_from_config, registry, ) from ..util import make_tempdir nlp_config_string = """ [paths] train = null dev = null [corpora] [corpora.train] @readers = "spacy.Corpus.v1" path = ${paths.train} [corpora.dev] @readers = "spacy.Corpus.v1" path = ${paths.dev} [training] [training.batcher] @batchers = "spacy.batch_by_words.v1" size = 666 [nlp] lang = "en" pipeline = ["tok2vec", "tagger"] [components] [components.tok2vec] factory = "tok2vec" [components.tok2vec.model] @architectures = "spacy.HashEmbedCNN.v1" pretrained_vectors = null width = 342 depth = 4 window_size = 1 embed_size = 2000 maxout_pieces = 3 subword_features = true [components.tagger] factory = "tagger" [components.tagger.model] @architectures = "spacy.Tagger.v2" [components.tagger.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.width} """ pretrain_config_string = """ [paths] train = null dev = null [corpora] [corpora.train] @readers = "spacy.Corpus.v1" path = ${paths.train} [corpora.dev] @readers = "spacy.Corpus.v1" path = ${paths.dev} [training] [training.batcher] @batchers = "spacy.batch_by_words.v1" size = 666 [nlp] lang = "en" pipeline = ["tok2vec", "tagger"] [components] [components.tok2vec] factory = "tok2vec" [components.tok2vec.model] @architectures = "spacy.HashEmbedCNN.v1" pretrained_vectors = null width = 342 depth = 4 window_size = 1 embed_size = 2000 maxout_pieces = 3 subword_features = true [components.tagger] factory = "tagger" [components.tagger.model] @architectures = "spacy.Tagger.v2" [components.tagger.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.width} [pretraining] """ parser_config_string_upper = """ [model] @architectures = "spacy.TransitionBasedParser.v2" state_type = "parser" extra_state_tokens = false hidden_width = 66 maxout_pieces = 2 use_upper = true [model.tok2vec] @architectures = "spacy.HashEmbedCNN.v1" pretrained_vectors = null width = 333 depth = 4 embed_size = 5555 window_size = 1 maxout_pieces = 7 subword_features = false """ parser_config_string_no_upper = """ [model] @architectures = "spacy.TransitionBasedParser.v2" state_type = "parser" extra_state_tokens = false hidden_width = 66 maxout_pieces = 2 use_upper = false [model.tok2vec] @architectures = "spacy.HashEmbedCNN.v1" pretrained_vectors = null width = 333 depth = 4 embed_size = 5555 window_size = 1 maxout_pieces = 7 subword_features = false """ @registry.architectures("my_test_parser") def my_parser(): tok2vec = build_Tok2Vec_model( MultiHashEmbed( width=321, attrs=["LOWER", "SHAPE"], rows=[5432, 5432], include_static_vectors=False, ), MaxoutWindowEncoder(width=321, window_size=3, maxout_pieces=4, depth=2), ) parser = build_tb_parser_model( tok2vec=tok2vec, state_type="parser", extra_state_tokens=True, hidden_width=65, maxout_pieces=5, use_upper=True, ) return parser @pytest.mark.issue(8190) def test_issue8190(): """Test that config overrides are not lost after load is complete.""" source_cfg = { "nlp": { "lang": "en", }, "custom": {"key": "value"}, } source_nlp = English.from_config(source_cfg) with make_tempdir() as dir_path: # We need to create a loadable source pipeline source_path = dir_path / "test_model" source_nlp.to_disk(source_path) nlp = spacy.load(source_path, config={"custom": {"key": "updated_value"}}) assert nlp.config["custom"]["key"] == "updated_value" def test_create_nlp_from_config(): config = Config().from_str(nlp_config_string) with pytest.raises(ConfigValidationError): load_model_from_config(config, auto_fill=False) nlp = load_model_from_config(config, auto_fill=True) assert nlp.config["training"]["batcher"]["size"] == 666 assert len(nlp.config["training"]) > 1 assert nlp.pipe_names == ["tok2vec", "tagger"] assert len(nlp.config["components"]) == 2 assert len(nlp.config["nlp"]["pipeline"]) == 2 nlp.remove_pipe("tagger") assert len(nlp.config["components"]) == 1 assert len(nlp.config["nlp"]["pipeline"]) == 1 with pytest.raises(ValueError): bad_cfg = {"yolo": {}} load_model_from_config(Config(bad_cfg), auto_fill=True) with pytest.raises(ValueError): bad_cfg = {"pipeline": {"foo": "bar"}} load_model_from_config(Config(bad_cfg), auto_fill=True) def test_create_nlp_from_pretraining_config(): """Test that the default pretraining config validates properly""" config = Config().from_str(pretrain_config_string) pretrain_config = load_config(DEFAULT_CONFIG_PRETRAIN_PATH) filled = config.merge(pretrain_config) registry.resolve(filled["pretraining"], schema=ConfigSchemaPretrain) def test_create_nlp_from_config_multiple_instances(): """Test that the nlp object is created correctly for a config with multiple instances of the same component.""" config = Config().from_str(nlp_config_string) config["components"] = { "t2v": config["components"]["tok2vec"], "tagger1": config["components"]["tagger"], "tagger2": config["components"]["tagger"], } config["nlp"]["pipeline"] = list(config["components"].keys()) nlp = load_model_from_config(config, auto_fill=True) assert nlp.pipe_names == ["t2v", "tagger1", "tagger2"] assert nlp.get_pipe_meta("t2v").factory == "tok2vec" assert nlp.get_pipe_meta("tagger1").factory == "tagger" assert nlp.get_pipe_meta("tagger2").factory == "tagger" pipeline_config = nlp.config["components"] assert len(pipeline_config) == 3 assert list(pipeline_config.keys()) == ["t2v", "tagger1", "tagger2"] assert nlp.config["nlp"]["pipeline"] == ["t2v", "tagger1", "tagger2"] def test_serialize_nlp(): """Create a custom nlp pipeline from config and ensure it serializes it correctly""" nlp_config = Config().from_str(nlp_config_string) nlp = load_model_from_config(nlp_config, auto_fill=True) nlp.get_pipe("tagger").add_label("A") nlp.initialize() assert "tok2vec" in nlp.pipe_names assert "tagger" in nlp.pipe_names assert "parser" not in nlp.pipe_names assert nlp.get_pipe("tagger").model.get_ref("tok2vec").get_dim("nO") == 342 with make_tempdir() as d: nlp.to_disk(d) nlp2 = spacy.load(d) assert "tok2vec" in nlp2.pipe_names assert "tagger" in nlp2.pipe_names assert "parser" not in nlp2.pipe_names assert nlp2.get_pipe("tagger").model.get_ref("tok2vec").get_dim("nO") == 342 def test_serialize_custom_nlp(): """Create a custom nlp pipeline and ensure it serializes it correctly""" nlp = English() parser_cfg = dict() parser_cfg["model"] = {"@architectures": "my_test_parser"} nlp.add_pipe("parser", config=parser_cfg) nlp.initialize() with make_tempdir() as d: nlp.to_disk(d) nlp2 = spacy.load(d) model = nlp2.get_pipe("parser").model model.get_ref("tok2vec") # check that we have the correct settings, not the default ones assert model.get_ref("upper").get_dim("nI") == 65 assert model.get_ref("lower").get_dim("nI") == 65 @pytest.mark.parametrize( "parser_config_string", [parser_config_string_upper, parser_config_string_no_upper] ) def test_serialize_parser(parser_config_string): """Create a non-default parser config to check nlp serializes it correctly""" nlp = English() model_config = Config().from_str(parser_config_string) parser = nlp.add_pipe("parser", config=model_config) parser.add_label("nsubj") nlp.initialize() with make_tempdir() as d: nlp.to_disk(d) nlp2 = spacy.load(d) model = nlp2.get_pipe("parser").model model.get_ref("tok2vec") # check that we have the correct settings, not the default ones if model.attrs["has_upper"]: assert model.get_ref("upper").get_dim("nI") == 66 assert model.get_ref("lower").get_dim("nI") == 66 def test_config_nlp_roundtrip(): """Test that a config produced by the nlp object passes training config validation.""" nlp = English() nlp.add_pipe("entity_ruler") nlp.add_pipe("ner") new_nlp = load_model_from_config(nlp.config, auto_fill=False) assert new_nlp.config == nlp.config assert new_nlp.pipe_names == nlp.pipe_names assert new_nlp._pipe_configs == nlp._pipe_configs assert new_nlp._pipe_meta == nlp._pipe_meta assert new_nlp._factory_meta == nlp._factory_meta def test_config_nlp_roundtrip_bytes_disk(): """Test that the config is serialized correctly and not interpolated by mistake.""" nlp = English() nlp_bytes = nlp.to_bytes() new_nlp = English().from_bytes(nlp_bytes) assert new_nlp.config == nlp.config nlp = English() with make_tempdir() as d: nlp.to_disk(d) new_nlp = spacy.load(d) assert new_nlp.config == nlp.config def test_serialize_config_language_specific(): """Test that config serialization works as expected with language-specific factories.""" name = "test_serialize_config_language_specific" @English.factory(name, default_config={"foo": 20}) def custom_factory(nlp: Language, name: str, foo: int): return lambda doc: doc nlp = Language() assert not nlp.has_factory(name) nlp = English() assert nlp.has_factory(name) nlp.add_pipe(name, config={"foo": 100}, name="bar") pipe_config = nlp.config["components"]["bar"] assert pipe_config["foo"] == 100 assert pipe_config["factory"] == name with make_tempdir() as d: nlp.to_disk(d) nlp2 = spacy.load(d) assert nlp2.has_factory(name) assert nlp2.pipe_names == ["bar"] assert nlp2.get_pipe_meta("bar").factory == name pipe_config = nlp2.config["components"]["bar"] assert pipe_config["foo"] == 100 assert pipe_config["factory"] == name config = Config().from_str(nlp2.config.to_str()) config["nlp"]["lang"] = "de" with pytest.raises(ValueError): # German doesn't have a factory, only English does load_model_from_config(config) def test_serialize_config_missing_pipes(): config = Config().from_str(nlp_config_string) config["components"].pop("tok2vec") assert "tok2vec" in config["nlp"]["pipeline"] assert "tok2vec" not in config["components"] with pytest.raises(ValueError): load_model_from_config(config, auto_fill=True) def test_config_overrides(): overrides_nested = {"nlp": {"lang": "de", "pipeline": ["tagger"]}} overrides_dot = {"nlp.lang": "de", "nlp.pipeline": ["tagger"]} # load_model from config with overrides passed directly to Config config = Config().from_str(nlp_config_string, overrides=overrides_dot) nlp = load_model_from_config(config, auto_fill=True) assert isinstance(nlp, German) assert nlp.pipe_names == ["tagger"] # Serialized roundtrip with config passed in base_config = Config().from_str(nlp_config_string) base_nlp = load_model_from_config(base_config, auto_fill=True) assert isinstance(base_nlp, English) assert base_nlp.pipe_names == ["tok2vec", "tagger"] with make_tempdir() as d: base_nlp.to_disk(d) nlp = spacy.load(d, config=overrides_nested) assert isinstance(nlp, German) assert nlp.pipe_names == ["tagger"] with make_tempdir() as d: base_nlp.to_disk(d) nlp = spacy.load(d, config=overrides_dot) assert isinstance(nlp, German) assert nlp.pipe_names == ["tagger"] with make_tempdir() as d: base_nlp.to_disk(d) nlp = spacy.load(d) assert isinstance(nlp, English) assert nlp.pipe_names == ["tok2vec", "tagger"] @pytest.mark.filterwarnings("ignore:\\[W036") def test_config_overrides_registered_functions(): nlp = spacy.blank("en") nlp.add_pipe("attribute_ruler") with make_tempdir() as d: nlp.to_disk(d) nlp_re1 = spacy.load( d, config={ "components": { "attribute_ruler": { "scorer": {"@scorers": "spacy.tagger_scorer.v1"} } } }, ) assert ( nlp_re1.config["components"]["attribute_ruler"]["scorer"]["@scorers"] == "spacy.tagger_scorer.v1" ) @registry.misc("test_some_other_key") def misc_some_other_key(): return "some_other_key" nlp_re2 = spacy.load( d, config={ "components": { "attribute_ruler": { "scorer": { "@scorers": "spacy.overlapping_labeled_spans_scorer.v1", "spans_key": {"@misc": "test_some_other_key"}, } } } }, ) assert nlp_re2.config["components"]["attribute_ruler"]["scorer"][ "spans_key" ] == {"@misc": "test_some_other_key"} # run dummy evaluation (will return None scores) in order to test that # the spans_key value in the nested override is working as intended in # the config example = Example.from_dict(nlp_re2.make_doc("a b c"), {}) scores = nlp_re2.evaluate([example]) assert "spans_some_other_key_f" in scores def test_config_interpolation(): config = Config().from_str(nlp_config_string, interpolate=False) assert config["corpora"]["train"]["path"] == "${paths.train}" interpolated = config.interpolate() assert interpolated["corpora"]["train"]["path"] is None nlp = English.from_config(config) assert nlp.config["corpora"]["train"]["path"] == "${paths.train}" # Ensure that variables are preserved in nlp config width = "${components.tok2vec.model.width}" assert config["components"]["tagger"]["model"]["tok2vec"]["width"] == width assert nlp.config["components"]["tagger"]["model"]["tok2vec"]["width"] == width interpolated2 = nlp.config.interpolate() assert interpolated2["corpora"]["train"]["path"] is None assert interpolated2["components"]["tagger"]["model"]["tok2vec"]["width"] == 342 nlp2 = English.from_config(interpolated) assert nlp2.config["corpora"]["train"]["path"] is None assert nlp2.config["components"]["tagger"]["model"]["tok2vec"]["width"] == 342 def test_config_optional_sections(): config = Config().from_str(nlp_config_string) config = DEFAULT_CONFIG.merge(config) assert "pretraining" not in config filled = registry.fill(config, schema=ConfigSchema, validate=False) # Make sure that optional "pretraining" block doesn't default to None, # which would (rightly) cause error because it'd result in a top-level # key that's not a section (dict). Note that the following roundtrip is # also how Config.interpolate works under the hood. new_config = Config().from_str(filled.to_str()) assert new_config["pretraining"] == {} def test_config_auto_fill_extra_fields(): config = Config({"nlp": {"lang": "en"}, "training": {}}) assert load_model_from_config(config, auto_fill=True) config = Config({"nlp": {"lang": "en"}, "training": {"extra": "hello"}}) nlp = load_model_from_config(config, auto_fill=True, validate=False) assert "extra" not in nlp.config["training"] # Make sure the config generated is valid load_model_from_config(nlp.config) @pytest.mark.parametrize( "parser_config_string", [parser_config_string_upper, parser_config_string_no_upper] ) def test_config_validate_literal(parser_config_string): nlp = English() config = Config().from_str(parser_config_string) config["model"]["state_type"] = "nonsense" with pytest.raises(ConfigValidationError): nlp.add_pipe("parser", config=config) config["model"]["state_type"] = "ner" nlp.add_pipe("parser", config=config) def test_config_only_resolve_relevant_blocks(): """Test that only the relevant blocks are resolved in the different methods and that invalid blocks are ignored if needed. For instance, the [initialize] shouldn't be resolved at runtime. """ nlp = English() config = nlp.config config["training"]["before_to_disk"] = {"@misc": "nonexistent"} config["initialize"]["lookups"] = {"@misc": "nonexistent"} # This shouldn't resolve [training] or [initialize] nlp = load_model_from_config(config, auto_fill=True) # This will raise for nonexistent value with pytest.raises(RegistryError): nlp.initialize() nlp.config["initialize"]["lookups"] = None nlp.initialize() def test_hyphen_in_config(): hyphen_config_str = """ [nlp] lang = "en" pipeline = ["my_punctual_component"] [components] [components.my_punctual_component] factory = "my_punctual_component" punctuation = ["?","-"] """ @spacy.Language.factory("my_punctual_component") class MyPunctualComponent(object): name = "my_punctual_component" def __init__( self, nlp, name, punctuation, ): self.punctuation = punctuation nlp = English.from_config(load_config_from_str(hyphen_config_str)) assert nlp.get_pipe("my_punctual_component").punctuation == ["?", "-"]
18,108
30.493913
88
py
spaCy
spaCy-master/spacy/tests/serialize/test_serialize_doc.py
import copy import pickle import numpy import pytest from spacy.attrs import DEP, HEAD from spacy.lang.en import English from spacy.language import Language from spacy.matcher import Matcher, PhraseMatcher from spacy.tokens import Doc from spacy.vectors import Vectors from spacy.vocab import Vocab from ..util import make_tempdir @pytest.mark.issue(1727) def test_issue1727(): """Test that models with no pretrained vectors can be deserialized correctly after vectors are added.""" nlp = Language(Vocab()) data = numpy.ones((3, 300), dtype="f") vectors = Vectors(data=data, keys=["I", "am", "Matt"]) tagger = nlp.create_pipe("tagger") tagger.add_label("PRP") assert tagger.cfg.get("pretrained_dims", 0) == 0 tagger.vocab.vectors = vectors with make_tempdir() as path: tagger.to_disk(path) tagger = nlp.create_pipe("tagger").from_disk(path) assert tagger.cfg.get("pretrained_dims", 0) == 0 @pytest.mark.issue(1799) def test_issue1799(): """Test sentence boundaries are deserialized correctly, even for non-projective sentences.""" heads_deps = numpy.asarray( [ [1, 397], [4, 436], [2, 426], [1, 402], [0, 8206900633647566924], [18446744073709551615, 440], [18446744073709551614, 442], ], dtype="uint64", ) doc = Doc(Vocab(), words="Just what I was looking for .".split()) doc.vocab.strings.add("ROOT") doc = doc.from_array([HEAD, DEP], heads_deps) assert len(list(doc.sents)) == 1 @pytest.mark.issue(1834) def test_issue1834(): """Test that sentence boundaries & parse/tag flags are not lost during serialization.""" words = ["This", "is", "a", "first", "sentence", ".", "And", "another", "one"] doc = Doc(Vocab(), words=words) doc[6].is_sent_start = True new_doc = Doc(doc.vocab).from_bytes(doc.to_bytes()) assert new_doc[6].sent_start assert not new_doc.has_annotation("DEP") assert not new_doc.has_annotation("TAG") doc = Doc( Vocab(), words=words, tags=["TAG"] * len(words), heads=[0, 0, 0, 0, 0, 0, 6, 6, 6], deps=["dep"] * len(words), ) new_doc = Doc(doc.vocab).from_bytes(doc.to_bytes()) assert new_doc[6].sent_start assert new_doc.has_annotation("DEP") assert new_doc.has_annotation("TAG") @pytest.mark.issue(1883) def test_issue1883(): matcher = Matcher(Vocab()) matcher.add("pat1", [[{"orth": "hello"}]]) doc = Doc(matcher.vocab, words=["hello"]) assert len(matcher(doc)) == 1 new_matcher = copy.deepcopy(matcher) new_doc = Doc(new_matcher.vocab, words=["hello"]) assert len(new_matcher(new_doc)) == 1 @pytest.mark.issue(2564) def test_issue2564(): """Test the tagger sets has_annotation("TAG") correctly when used via Language.pipe.""" nlp = Language() tagger = nlp.add_pipe("tagger") tagger.add_label("A") nlp.initialize() doc = nlp("hello world") assert doc.has_annotation("TAG") docs = nlp.pipe(["hello", "world"]) piped_doc = next(docs) assert piped_doc.has_annotation("TAG") @pytest.mark.issue(3248) def test_issue3248_2(): """Test that the PhraseMatcher can be pickled correctly.""" nlp = English() matcher = PhraseMatcher(nlp.vocab) matcher.add("TEST1", [nlp("a"), nlp("b"), nlp("c")]) matcher.add("TEST2", [nlp("d")]) data = pickle.dumps(matcher) new_matcher = pickle.loads(data) assert len(new_matcher) == len(matcher) @pytest.mark.issue(3289) def test_issue3289(): """Test that Language.to_bytes handles serializing a pipeline component with an uninitialized model.""" nlp = English() nlp.add_pipe("textcat") bytes_data = nlp.to_bytes() new_nlp = English() new_nlp.add_pipe("textcat") new_nlp.from_bytes(bytes_data) @pytest.mark.issue(3468) def test_issue3468(): """Test that sentence boundaries are set correctly so Doc.has_annotation("SENT_START") can be restored after serialization.""" nlp = English() nlp.add_pipe("sentencizer") doc = nlp("Hello world") assert doc[0].is_sent_start assert doc.has_annotation("SENT_START") assert len(list(doc.sents)) == 1 doc_bytes = doc.to_bytes() new_doc = Doc(nlp.vocab).from_bytes(doc_bytes) assert new_doc[0].is_sent_start assert new_doc.has_annotation("SENT_START") assert len(list(new_doc.sents)) == 1 @pytest.mark.issue(3959) def test_issue3959(): """Ensure that a modified pos attribute is serialized correctly.""" nlp = English() doc = nlp( "displaCy uses JavaScript, SVG and CSS to show you how computers understand language" ) assert doc[0].pos_ == "" doc[0].pos_ = "NOUN" assert doc[0].pos_ == "NOUN" # usually this is already True when starting from proper models instead of blank English with make_tempdir() as tmp_dir: file_path = tmp_dir / "my_doc" doc.to_disk(file_path) doc2 = nlp("") doc2.from_disk(file_path) assert doc2[0].pos_ == "NOUN" def test_serialize_empty_doc(en_vocab): doc = Doc(en_vocab) data = doc.to_bytes() doc2 = Doc(en_vocab) doc2.from_bytes(data) assert len(doc) == len(doc2) for token1, token2 in zip(doc, doc2): assert token1.text == token2.text def test_serialize_doc_roundtrip_bytes(en_vocab): doc = Doc(en_vocab, words=["hello", "world"]) doc.cats = {"A": 0.5} doc_b = doc.to_bytes() new_doc = Doc(en_vocab).from_bytes(doc_b) assert new_doc.to_bytes() == doc_b def test_serialize_doc_roundtrip_disk(en_vocab): doc = Doc(en_vocab, words=["hello", "world"]) with make_tempdir() as d: file_path = d / "doc" doc.to_disk(file_path) doc_d = Doc(en_vocab).from_disk(file_path) assert doc.to_bytes() == doc_d.to_bytes() def test_serialize_doc_roundtrip_disk_str_path(en_vocab): doc = Doc(en_vocab, words=["hello", "world"]) with make_tempdir() as d: file_path = d / "doc" file_path = str(file_path) doc.to_disk(file_path) doc_d = Doc(en_vocab).from_disk(file_path) assert doc.to_bytes() == doc_d.to_bytes() def test_serialize_doc_exclude(en_vocab): doc = Doc(en_vocab, words=["hello", "world"]) doc.user_data["foo"] = "bar" new_doc = Doc(en_vocab).from_bytes(doc.to_bytes()) assert new_doc.user_data["foo"] == "bar" new_doc = Doc(en_vocab).from_bytes(doc.to_bytes(), exclude=["user_data"]) assert not new_doc.user_data new_doc = Doc(en_vocab).from_bytes(doc.to_bytes(exclude=["user_data"])) assert not new_doc.user_data def test_serialize_doc_span_groups(en_vocab): doc = Doc(en_vocab, words=["hello", "world", "!"]) span = doc[0:2] span.label_ = "test_serialize_doc_span_groups_label" span.id_ = "test_serialize_doc_span_groups_id" span.kb_id_ = "test_serialize_doc_span_groups_kb_id" doc.spans["content"] = [span] new_doc = Doc(en_vocab).from_bytes(doc.to_bytes()) assert len(new_doc.spans["content"]) == 1 assert new_doc.spans["content"][0].label_ == "test_serialize_doc_span_groups_label" assert new_doc.spans["content"][0].id_ == "test_serialize_doc_span_groups_id" assert new_doc.spans["content"][0].kb_id_ == "test_serialize_doc_span_groups_kb_id"
7,370
31.615044
94
py
spaCy
spaCy-master/spacy/tests/serialize/test_serialize_docbin.py
import pytest import spacy from spacy.lang.en import English from spacy.tokens import Doc, DocBin from spacy.tokens.underscore import Underscore @pytest.mark.issue(4367) def test_issue4367(): """Test that docbin init goes well""" DocBin() DocBin(attrs=["LEMMA"]) DocBin(attrs=["LEMMA", "ENT_IOB", "ENT_TYPE"]) @pytest.mark.issue(4528) def test_issue4528(en_vocab): """Test that user_data is correctly serialized in DocBin.""" doc = Doc(en_vocab, words=["hello", "world"]) doc.user_data["foo"] = "bar" # This is how extension attribute values are stored in the user data doc.user_data[("._.", "foo", None, None)] = "bar" doc_bin = DocBin(store_user_data=True) doc_bin.add(doc) doc_bin_bytes = doc_bin.to_bytes() new_doc_bin = DocBin(store_user_data=True).from_bytes(doc_bin_bytes) new_doc = list(new_doc_bin.get_docs(en_vocab))[0] assert new_doc.user_data["foo"] == "bar" assert new_doc.user_data[("._.", "foo", None, None)] == "bar" @pytest.mark.issue(5141) def test_issue5141(en_vocab): """Ensure an empty DocBin does not crash on serialization""" doc_bin = DocBin(attrs=["DEP", "HEAD"]) assert list(doc_bin.get_docs(en_vocab)) == [] doc_bin_bytes = doc_bin.to_bytes() doc_bin_2 = DocBin().from_bytes(doc_bin_bytes) assert list(doc_bin_2.get_docs(en_vocab)) == [] def test_serialize_doc_bin(): doc_bin = DocBin( attrs=["LEMMA", "ENT_IOB", "ENT_TYPE", "NORM", "ENT_ID"], store_user_data=True ) texts = ["Some text", "Lots of texts...", "..."] cats = {"A": 0.5} nlp = English() for doc in nlp.pipe(texts): doc.cats = cats span = doc[0:2] span.label_ = "UNUSUAL_SPAN_LABEL" span.id_ = "UNUSUAL_SPAN_ID" span.kb_id_ = "UNUSUAL_SPAN_KB_ID" doc.spans["start"] = [span] doc[0].norm_ = "UNUSUAL_TOKEN_NORM" doc[0].ent_id_ = "UNUSUAL_TOKEN_ENT_ID" doc_bin.add(doc) bytes_data = doc_bin.to_bytes() # Deserialize later, e.g. in a new process nlp = spacy.blank("en") doc_bin = DocBin().from_bytes(bytes_data) reloaded_docs = list(doc_bin.get_docs(nlp.vocab)) for i, doc in enumerate(reloaded_docs): assert doc.text == texts[i] assert doc.cats == cats assert len(doc.spans) == 1 assert doc.spans["start"][0].label_ == "UNUSUAL_SPAN_LABEL" assert doc.spans["start"][0].id_ == "UNUSUAL_SPAN_ID" assert doc.spans["start"][0].kb_id_ == "UNUSUAL_SPAN_KB_ID" assert doc[0].norm_ == "UNUSUAL_TOKEN_NORM" assert doc[0].ent_id_ == "UNUSUAL_TOKEN_ENT_ID" def test_serialize_doc_bin_unknown_spaces(en_vocab): doc1 = Doc(en_vocab, words=["that", "'s"]) assert doc1.has_unknown_spaces assert doc1.text == "that 's " doc2 = Doc(en_vocab, words=["that", "'s"], spaces=[False, False]) assert not doc2.has_unknown_spaces assert doc2.text == "that's" doc_bin = DocBin().from_bytes(DocBin(docs=[doc1, doc2]).to_bytes()) re_doc1, re_doc2 = doc_bin.get_docs(en_vocab) assert re_doc1.has_unknown_spaces assert re_doc1.text == "that 's " assert not re_doc2.has_unknown_spaces assert re_doc2.text == "that's" @pytest.mark.parametrize( "writer_flag,reader_flag,reader_value", [ (True, True, "bar"), (True, False, "bar"), (False, True, "nothing"), (False, False, "nothing"), ], ) def test_serialize_custom_extension(en_vocab, writer_flag, reader_flag, reader_value): """Test that custom extensions are correctly serialized in DocBin.""" Doc.set_extension("foo", default="nothing") doc = Doc(en_vocab, words=["hello", "world"]) doc._.foo = "bar" doc_bin_1 = DocBin(store_user_data=writer_flag) doc_bin_1.add(doc) doc_bin_bytes = doc_bin_1.to_bytes() doc_bin_2 = DocBin(store_user_data=reader_flag).from_bytes(doc_bin_bytes) doc_2 = list(doc_bin_2.get_docs(en_vocab))[0] assert doc_2._.foo == reader_value Underscore.doc_extensions = {}
4,028
34.342105
86
py
spaCy
spaCy-master/spacy/tests/serialize/test_serialize_extension_attrs.py
import pytest from spacy.tokens import Doc, Token from spacy.vocab import Vocab @pytest.fixture def doc_w_attrs(en_tokenizer): Doc.set_extension("_test_attr", default=False) Doc.set_extension("_test_prop", getter=lambda doc: len(doc.text)) Doc.set_extension("_test_method", method=lambda doc, arg: f"{len(doc.text)}{arg}") doc = en_tokenizer("This is a test.") doc._._test_attr = "test" Token.set_extension("_test_token", default="t0") doc[1]._._test_token = "t1" return doc def test_serialize_ext_attrs_from_bytes(doc_w_attrs): doc_b = doc_w_attrs.to_bytes() doc = Doc(Vocab()).from_bytes(doc_b) assert doc._.has("_test_attr") assert doc._._test_attr == "test" assert doc._._test_prop == len(doc.text) assert doc._._test_method("test") == f"{len(doc.text)}test" assert doc[0]._._test_token == "t0" assert doc[1]._._test_token == "t1" assert doc[2]._._test_token == "t0"
946
29.548387
86
py
spaCy
spaCy-master/spacy/tests/serialize/test_serialize_kb.py
from pathlib import Path from typing import Any, Callable, Dict, Iterable import srsly from numpy import zeros from thinc.api import Config from spacy import Errors, util from spacy.kb.kb_in_memory import InMemoryLookupKB from spacy.util import SimpleFrozenList, ensure_path, load_model_from_config, registry from spacy.vocab import Vocab from ..util import make_tempdir def test_serialize_kb_disk(en_vocab): # baseline assertions kb1 = _get_dummy_kb(en_vocab) _check_kb(kb1) # dumping to file & loading back in with make_tempdir() as d: dir_path = ensure_path(d) if not dir_path.exists(): dir_path.mkdir() file_path = dir_path / "kb" kb1.to_disk(str(file_path)) kb2 = InMemoryLookupKB(vocab=en_vocab, entity_vector_length=3) kb2.from_disk(str(file_path)) # final assertions _check_kb(kb2) def _get_dummy_kb(vocab): kb = InMemoryLookupKB(vocab, entity_vector_length=3) kb.add_entity(entity="Q53", freq=33, entity_vector=[0, 5, 3]) kb.add_entity(entity="Q17", freq=2, entity_vector=[7, 1, 0]) kb.add_entity(entity="Q007", freq=7, entity_vector=[0, 0, 7]) kb.add_entity(entity="Q44", freq=342, entity_vector=[4, 4, 4]) kb.add_alias(alias="double07", entities=["Q17", "Q007"], probabilities=[0.1, 0.9]) kb.add_alias( alias="guy", entities=["Q53", "Q007", "Q17", "Q44"], probabilities=[0.3, 0.3, 0.2, 0.1], ) kb.add_alias(alias="random", entities=["Q007"], probabilities=[1.0]) return kb def _check_kb(kb): # check entities assert kb.get_size_entities() == 4 for entity_string in ["Q53", "Q17", "Q007", "Q44"]: assert entity_string in kb.get_entity_strings() for entity_string in ["", "Q0"]: assert entity_string not in kb.get_entity_strings() # check aliases assert kb.get_size_aliases() == 3 for alias_string in ["double07", "guy", "random"]: assert alias_string in kb.get_alias_strings() for alias_string in ["nothingness", "", "randomnoise"]: assert alias_string not in kb.get_alias_strings() # check candidates & probabilities candidates = sorted(kb.get_alias_candidates("double07"), key=lambda x: x.entity_) assert len(candidates) == 2 assert candidates[0].entity_ == "Q007" assert 6.999 < candidates[0].entity_freq < 7.01 assert candidates[0].entity_vector == [0, 0, 7] assert candidates[0].alias_ == "double07" assert 0.899 < candidates[0].prior_prob < 0.901 assert candidates[1].entity_ == "Q17" assert 1.99 < candidates[1].entity_freq < 2.01 assert candidates[1].entity_vector == [7, 1, 0] assert candidates[1].alias_ == "double07" assert 0.099 < candidates[1].prior_prob < 0.101 def test_serialize_subclassed_kb(): """Check that IO of a custom KB works fine as part of an EL pipe.""" config_string = """ [nlp] lang = "en" pipeline = ["entity_linker"] [components] [components.entity_linker] factory = "entity_linker" [components.entity_linker.generate_empty_kb] @misc = "kb_test.CustomEmptyKB.v1" [initialize] [initialize.components] [initialize.components.entity_linker] [initialize.components.entity_linker.kb_loader] @misc = "kb_test.CustomKB.v1" entity_vector_length = 342 custom_field = 666 """ class SubInMemoryLookupKB(InMemoryLookupKB): def __init__(self, vocab, entity_vector_length, custom_field): super().__init__(vocab, entity_vector_length) self.custom_field = custom_field def to_disk(self, path, exclude: Iterable[str] = SimpleFrozenList()): """We overwrite InMemoryLookupKB.to_disk() to ensure that self.custom_field is stored as well.""" path = ensure_path(path) if not path.exists(): path.mkdir(parents=True) if not path.is_dir(): raise ValueError(Errors.E928.format(loc=path)) def serialize_custom_fields(file_path: Path) -> None: srsly.write_json(file_path, {"custom_field": self.custom_field}) serialize = { "contents": lambda p: self.write_contents(p), "strings.json": lambda p: self.vocab.strings.to_disk(p), "custom_fields": lambda p: serialize_custom_fields(p), } util.to_disk(path, serialize, exclude) def from_disk(self, path, exclude: Iterable[str] = SimpleFrozenList()): """We overwrite InMemoryLookupKB.from_disk() to ensure that self.custom_field is loaded as well.""" path = ensure_path(path) if not path.exists(): raise ValueError(Errors.E929.format(loc=path)) if not path.is_dir(): raise ValueError(Errors.E928.format(loc=path)) def deserialize_custom_fields(file_path: Path) -> None: self.custom_field = srsly.read_json(file_path)["custom_field"] deserialize: Dict[str, Callable[[Any], Any]] = { "contents": lambda p: self.read_contents(p), "strings.json": lambda p: self.vocab.strings.from_disk(p), "custom_fields": lambda p: deserialize_custom_fields(p), } util.from_disk(path, deserialize, exclude) @registry.misc("kb_test.CustomEmptyKB.v1") def empty_custom_kb() -> Callable[[Vocab, int], SubInMemoryLookupKB]: def empty_kb_factory(vocab: Vocab, entity_vector_length: int): return SubInMemoryLookupKB( vocab=vocab, entity_vector_length=entity_vector_length, custom_field=0, ) return empty_kb_factory @registry.misc("kb_test.CustomKB.v1") def custom_kb( entity_vector_length: int, custom_field: int ) -> Callable[[Vocab], SubInMemoryLookupKB]: def custom_kb_factory(vocab): kb = SubInMemoryLookupKB( vocab=vocab, entity_vector_length=entity_vector_length, custom_field=custom_field, ) kb.add_entity("random_entity", 0.0, zeros(entity_vector_length)) return kb return custom_kb_factory config = Config().from_str(config_string) nlp = load_model_from_config(config, auto_fill=True) nlp.initialize() entity_linker = nlp.get_pipe("entity_linker") assert type(entity_linker.kb) == SubInMemoryLookupKB assert entity_linker.kb.entity_vector_length == 342 assert entity_linker.kb.custom_field == 666 # Make sure the custom KB is serialized correctly with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) entity_linker2 = nlp2.get_pipe("entity_linker") # After IO, the KB is the standard one assert type(entity_linker2.kb) == SubInMemoryLookupKB assert entity_linker2.kb.entity_vector_length == 342 assert entity_linker2.kb.custom_field == 666
7,066
34.691919
111
py
spaCy
spaCy-master/spacy/tests/serialize/test_serialize_language.py
import pickle import re import pytest from spacy.lang.en import English from spacy.lang.it import Italian from spacy.language import Language from spacy.tokenizer import Tokenizer from spacy.training import Example from spacy.util import load_config_from_str from ..util import make_tempdir @pytest.fixture def meta_data(): return { "name": "name-in-fixture", "version": "version-in-fixture", "description": "description-in-fixture", "author": "author-in-fixture", "email": "email-in-fixture", "url": "url-in-fixture", "license": "license-in-fixture", "vectors": {"width": 0, "vectors": 0, "keys": 0, "name": None}, } @pytest.mark.issue(2482) def test_issue2482(): """Test we can serialize and deserialize a blank NER or parser model.""" nlp = Italian() nlp.add_pipe("ner") b = nlp.to_bytes() Italian().from_bytes(b) CONFIG_ISSUE_6950 = """ [nlp] lang = "en" pipeline = ["tok2vec", "tagger"] [components] [components.tok2vec] factory = "tok2vec" [components.tok2vec.model] @architectures = "spacy.Tok2Vec.v1" [components.tok2vec.model.embed] @architectures = "spacy.MultiHashEmbed.v1" width = ${components.tok2vec.model.encode:width} attrs = ["NORM","PREFIX","SUFFIX","SHAPE"] rows = [5000,2500,2500,2500] include_static_vectors = false [components.tok2vec.model.encode] @architectures = "spacy.MaxoutWindowEncoder.v1" width = 96 depth = 4 window_size = 1 maxout_pieces = 3 [components.ner] factory = "ner" [components.tagger] factory = "tagger" [components.tagger.model] @architectures = "spacy.Tagger.v2" nO = null [components.tagger.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.encode:width} upstream = "*" """ @pytest.mark.issue(6950) def test_issue6950(): """Test that the nlp object with initialized tok2vec with listeners pickles correctly (and doesn't have lambdas). """ nlp = English.from_config(load_config_from_str(CONFIG_ISSUE_6950)) nlp.initialize(lambda: [Example.from_dict(nlp.make_doc("hello"), {"tags": ["V"]})]) pickle.dumps(nlp) nlp("hello") pickle.dumps(nlp) def test_serialize_language_meta_disk(meta_data): language = Language(meta=meta_data) with make_tempdir() as d: language.to_disk(d) new_language = Language().from_disk(d) assert new_language.meta == language.meta def test_serialize_with_custom_tokenizer(): """Test that serialization with custom tokenizer works without token_match. See: https://support.prodi.gy/t/how-to-save-a-custom-tokenizer/661/2 """ prefix_re = re.compile(r"""1/|2/|:[0-9][0-9][A-K]:|:[0-9][0-9]:""") suffix_re = re.compile(r"""""") infix_re = re.compile(r"""[~]""") def custom_tokenizer(nlp): return Tokenizer( nlp.vocab, {}, prefix_search=prefix_re.search, suffix_search=suffix_re.search, infix_finditer=infix_re.finditer, ) nlp = Language() nlp.tokenizer = custom_tokenizer(nlp) with make_tempdir() as d: nlp.to_disk(d) def test_serialize_language_exclude(meta_data): name = "name-in-fixture" nlp = Language(meta=meta_data) assert nlp.meta["name"] == name new_nlp = Language().from_bytes(nlp.to_bytes()) assert new_nlp.meta["name"] == name new_nlp = Language().from_bytes(nlp.to_bytes(), exclude=["meta"]) assert not new_nlp.meta["name"] == name new_nlp = Language().from_bytes(nlp.to_bytes(exclude=["meta"])) assert not new_nlp.meta["name"] == name
3,594
25.433824
87
py
spaCy
spaCy-master/spacy/tests/serialize/test_serialize_pipeline.py
import pickle import pytest import srsly from thinc.api import Linear import spacy from spacy import Vocab, load, registry from spacy.lang.en import English from spacy.language import Language from spacy.pipeline import ( DependencyParser, EntityRecognizer, EntityRuler, SentenceRecognizer, Tagger, TextCategorizer, TrainablePipe, ) from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL from spacy.pipeline.senter import DEFAULT_SENTER_MODEL from spacy.pipeline.tagger import DEFAULT_TAGGER_MODEL from spacy.pipeline.textcat import DEFAULT_SINGLE_TEXTCAT_MODEL from spacy.tokens import Span from spacy.util import ensure_path, load_model from ..util import make_tempdir test_parsers = [DependencyParser, EntityRecognizer] @pytest.fixture def parser(en_vocab): config = { "learn_tokens": False, "min_action_freq": 30, "update_with_oracle_cut_size": 100, "beam_width": 1, "beam_update_prob": 1.0, "beam_density": 0.0, } cfg = {"model": DEFAULT_PARSER_MODEL} model = registry.resolve(cfg, validate=True)["model"] parser = DependencyParser(en_vocab, model, **config) parser.add_label("nsubj") return parser @pytest.fixture def blank_parser(en_vocab): config = { "learn_tokens": False, "min_action_freq": 30, "update_with_oracle_cut_size": 100, "beam_width": 1, "beam_update_prob": 1.0, "beam_density": 0.0, } cfg = {"model": DEFAULT_PARSER_MODEL} model = registry.resolve(cfg, validate=True)["model"] parser = DependencyParser(en_vocab, model, **config) return parser @pytest.fixture def taggers(en_vocab): cfg = {"model": DEFAULT_TAGGER_MODEL} model = registry.resolve(cfg, validate=True)["model"] tagger1 = Tagger(en_vocab, model) tagger2 = Tagger(en_vocab, model) return tagger1, tagger2 @pytest.mark.issue(3456) def test_issue3456(): # this crashed because of a padding error in layer.ops.unflatten in thinc nlp = English() tagger = nlp.add_pipe("tagger") tagger.add_label("A") nlp.initialize() list(nlp.pipe(["hi", ""])) @pytest.mark.issue(3526) def test_issue_3526_1(en_vocab): patterns = [ {"label": "HELLO", "pattern": "hello world"}, {"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]}, {"label": "HELLO", "pattern": [{"ORTH": "HELLO"}]}, {"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]}, {"label": "TECH_ORG", "pattern": "Apple", "id": "a1"}, ] nlp = Language(vocab=en_vocab) ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True) ruler_bytes = ruler.to_bytes() assert len(ruler) == len(patterns) assert len(ruler.labels) == 4 assert ruler.overwrite new_ruler = EntityRuler(nlp) new_ruler = new_ruler.from_bytes(ruler_bytes) assert len(new_ruler) == len(ruler) assert len(new_ruler.labels) == 4 assert new_ruler.overwrite == ruler.overwrite assert new_ruler.ent_id_sep == ruler.ent_id_sep @pytest.mark.issue(3526) def test_issue_3526_2(en_vocab): patterns = [ {"label": "HELLO", "pattern": "hello world"}, {"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]}, {"label": "HELLO", "pattern": [{"ORTH": "HELLO"}]}, {"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]}, {"label": "TECH_ORG", "pattern": "Apple", "id": "a1"}, ] nlp = Language(vocab=en_vocab) ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True) bytes_old_style = srsly.msgpack_dumps(ruler.patterns) new_ruler = EntityRuler(nlp) new_ruler = new_ruler.from_bytes(bytes_old_style) assert len(new_ruler) == len(ruler) for pattern in ruler.patterns: assert pattern in new_ruler.patterns assert new_ruler.overwrite is not ruler.overwrite @pytest.mark.issue(3526) def test_issue_3526_3(en_vocab): patterns = [ {"label": "HELLO", "pattern": "hello world"}, {"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]}, {"label": "HELLO", "pattern": [{"ORTH": "HELLO"}]}, {"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]}, {"label": "TECH_ORG", "pattern": "Apple", "id": "a1"}, ] nlp = Language(vocab=en_vocab) ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True) with make_tempdir() as tmpdir: out_file = tmpdir / "entity_ruler" srsly.write_jsonl(out_file.with_suffix(".jsonl"), ruler.patterns) new_ruler = EntityRuler(nlp).from_disk(out_file) for pattern in ruler.patterns: assert pattern in new_ruler.patterns assert len(new_ruler) == len(ruler) assert new_ruler.overwrite is not ruler.overwrite @pytest.mark.issue(3526) def test_issue_3526_4(en_vocab): nlp = Language(vocab=en_vocab) patterns = [{"label": "ORG", "pattern": "Apple"}] config = {"overwrite_ents": True} ruler = nlp.add_pipe("entity_ruler", config=config) ruler.add_patterns(patterns) with make_tempdir() as tmpdir: nlp.to_disk(tmpdir) ruler = nlp.get_pipe("entity_ruler") assert ruler.patterns == [{"label": "ORG", "pattern": "Apple"}] assert ruler.overwrite is True nlp2 = load(tmpdir) new_ruler = nlp2.get_pipe("entity_ruler") assert new_ruler.patterns == [{"label": "ORG", "pattern": "Apple"}] assert new_ruler.overwrite is True @pytest.mark.issue(4042) def test_issue4042(): """Test that serialization of an EntityRuler before NER works fine.""" nlp = English() # add ner pipe ner = nlp.add_pipe("ner") ner.add_label("SOME_LABEL") nlp.initialize() # Add entity ruler patterns = [ {"label": "MY_ORG", "pattern": "Apple"}, {"label": "MY_GPE", "pattern": [{"lower": "san"}, {"lower": "francisco"}]}, ] # works fine with "after" ruler = nlp.add_pipe("entity_ruler", before="ner") ruler.add_patterns(patterns) doc1 = nlp("What do you think about Apple ?") assert doc1.ents[0].label_ == "MY_ORG" with make_tempdir() as d: output_dir = ensure_path(d) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) nlp2 = load_model(output_dir) doc2 = nlp2("What do you think about Apple ?") assert doc2.ents[0].label_ == "MY_ORG" @pytest.mark.issue(4042) def test_issue4042_bug2(): """ Test that serialization of an NER works fine when new labels were added. This is the second bug of two bugs underlying the issue 4042. """ nlp1 = English() # add ner pipe ner1 = nlp1.add_pipe("ner") ner1.add_label("SOME_LABEL") nlp1.initialize() # add a new label to the doc doc1 = nlp1("What do you think about Apple ?") assert len(ner1.labels) == 1 assert "SOME_LABEL" in ner1.labels apple_ent = Span(doc1, 5, 6, label="MY_ORG") doc1.ents = list(doc1.ents) + [apple_ent] # Add the label explicitly. Previously we didn't require this. ner1.add_label("MY_ORG") ner1(doc1) assert len(ner1.labels) == 2 assert "SOME_LABEL" in ner1.labels assert "MY_ORG" in ner1.labels with make_tempdir() as d: # assert IO goes fine output_dir = ensure_path(d) if not output_dir.exists(): output_dir.mkdir() ner1.to_disk(output_dir) config = {} ner2 = nlp1.create_pipe("ner", config=config) ner2.from_disk(output_dir) assert len(ner2.labels) == 2 @pytest.mark.issue(4725) def test_issue4725_1(): """Ensure the pickling of the NER goes well""" vocab = Vocab(vectors_name="test_vocab_add_vector") nlp = English(vocab=vocab) config = { "update_with_oracle_cut_size": 111, } ner = nlp.create_pipe("ner", config=config) with make_tempdir() as tmp_path: with (tmp_path / "ner.pkl").open("wb") as file_: pickle.dump(ner, file_) assert ner.cfg["update_with_oracle_cut_size"] == 111 with (tmp_path / "ner.pkl").open("rb") as file_: ner2 = pickle.load(file_) assert ner2.cfg["update_with_oracle_cut_size"] == 111 @pytest.mark.parametrize("Parser", test_parsers) def test_serialize_parser_roundtrip_bytes(en_vocab, Parser): cfg = {"model": DEFAULT_PARSER_MODEL} model = registry.resolve(cfg, validate=True)["model"] parser = Parser(en_vocab, model) new_parser = Parser(en_vocab, model) new_parser = new_parser.from_bytes(parser.to_bytes(exclude=["vocab"])) bytes_2 = new_parser.to_bytes(exclude=["vocab"]) bytes_3 = parser.to_bytes(exclude=["vocab"]) assert len(bytes_2) == len(bytes_3) assert bytes_2 == bytes_3 @pytest.mark.parametrize("Parser", test_parsers) def test_serialize_parser_strings(Parser): vocab1 = Vocab() label = "FunnyLabel" assert label not in vocab1.strings cfg = {"model": DEFAULT_PARSER_MODEL} model = registry.resolve(cfg, validate=True)["model"] parser1 = Parser(vocab1, model) parser1.add_label(label) assert label in parser1.vocab.strings vocab2 = Vocab() assert label not in vocab2.strings parser2 = Parser(vocab2, model) parser2 = parser2.from_bytes(parser1.to_bytes(exclude=["vocab"])) assert label in parser2.vocab.strings @pytest.mark.parametrize("Parser", test_parsers) def test_serialize_parser_roundtrip_disk(en_vocab, Parser): cfg = {"model": DEFAULT_PARSER_MODEL} model = registry.resolve(cfg, validate=True)["model"] parser = Parser(en_vocab, model) with make_tempdir() as d: file_path = d / "parser" parser.to_disk(file_path) parser_d = Parser(en_vocab, model) parser_d = parser_d.from_disk(file_path) parser_bytes = parser.to_bytes(exclude=["model", "vocab"]) parser_d_bytes = parser_d.to_bytes(exclude=["model", "vocab"]) assert len(parser_bytes) == len(parser_d_bytes) assert parser_bytes == parser_d_bytes def test_to_from_bytes(parser, blank_parser): assert parser.model is not True assert blank_parser.model is not True assert blank_parser.moves.n_moves != parser.moves.n_moves bytes_data = parser.to_bytes(exclude=["vocab"]) # the blank parser needs to be resized before we can call from_bytes blank_parser.model.attrs["resize_output"](blank_parser.model, parser.moves.n_moves) blank_parser.from_bytes(bytes_data) assert blank_parser.model is not True assert blank_parser.moves.n_moves == parser.moves.n_moves def test_serialize_tagger_roundtrip_bytes(en_vocab, taggers): tagger1 = taggers[0] tagger1_b = tagger1.to_bytes() tagger1 = tagger1.from_bytes(tagger1_b) assert tagger1.to_bytes() == tagger1_b cfg = {"model": DEFAULT_TAGGER_MODEL} model = registry.resolve(cfg, validate=True)["model"] new_tagger1 = Tagger(en_vocab, model).from_bytes(tagger1_b) new_tagger1_b = new_tagger1.to_bytes() assert len(new_tagger1_b) == len(tagger1_b) assert new_tagger1_b == tagger1_b def test_serialize_tagger_roundtrip_disk(en_vocab, taggers): tagger1, tagger2 = taggers with make_tempdir() as d: file_path1 = d / "tagger1" file_path2 = d / "tagger2" tagger1.to_disk(file_path1) tagger2.to_disk(file_path2) cfg = {"model": DEFAULT_TAGGER_MODEL} model = registry.resolve(cfg, validate=True)["model"] tagger1_d = Tagger(en_vocab, model).from_disk(file_path1) tagger2_d = Tagger(en_vocab, model).from_disk(file_path2) assert tagger1_d.to_bytes() == tagger2_d.to_bytes() def test_serialize_tagger_strings(en_vocab, de_vocab, taggers): label = "SomeWeirdLabel" assert label not in en_vocab.strings assert label not in de_vocab.strings tagger = taggers[0] assert label not in tagger.vocab.strings with make_tempdir() as d: # check that custom labels are serialized as part of the component's strings.jsonl tagger.add_label(label) assert label in tagger.vocab.strings file_path = d / "tagger1" tagger.to_disk(file_path) # ensure that the custom strings are loaded back in when using the tagger in another pipeline cfg = {"model": DEFAULT_TAGGER_MODEL} model = registry.resolve(cfg, validate=True)["model"] tagger2 = Tagger(de_vocab, model).from_disk(file_path) assert label in tagger2.vocab.strings @pytest.mark.issue(1105) def test_serialize_textcat_empty(en_vocab): # See issue #1105 cfg = {"model": DEFAULT_SINGLE_TEXTCAT_MODEL} model = registry.resolve(cfg, validate=True)["model"] textcat = TextCategorizer(en_vocab, model, threshold=0.5) textcat.to_bytes(exclude=["vocab"]) @pytest.mark.parametrize("Parser", test_parsers) def test_serialize_pipe_exclude(en_vocab, Parser): cfg = {"model": DEFAULT_PARSER_MODEL} model = registry.resolve(cfg, validate=True)["model"] def get_new_parser(): new_parser = Parser(en_vocab, model) return new_parser parser = Parser(en_vocab, model) parser.cfg["foo"] = "bar" new_parser = get_new_parser().from_bytes(parser.to_bytes(exclude=["vocab"])) assert "foo" in new_parser.cfg new_parser = get_new_parser().from_bytes( parser.to_bytes(exclude=["vocab"]), exclude=["cfg"] ) assert "foo" not in new_parser.cfg new_parser = get_new_parser().from_bytes( parser.to_bytes(exclude=["cfg"]), exclude=["vocab"] ) assert "foo" not in new_parser.cfg def test_serialize_sentencerecognizer(en_vocab): cfg = {"model": DEFAULT_SENTER_MODEL} model = registry.resolve(cfg, validate=True)["model"] sr = SentenceRecognizer(en_vocab, model) sr_b = sr.to_bytes() sr_d = SentenceRecognizer(en_vocab, model).from_bytes(sr_b) assert sr.to_bytes() == sr_d.to_bytes() def test_serialize_pipeline_disable_enable(): nlp = English() nlp.add_pipe("ner") nlp.add_pipe("tagger") nlp.disable_pipe("tagger") assert nlp.config["nlp"]["disabled"] == ["tagger"] config = nlp.config.copy() nlp2 = English.from_config(config) assert nlp2.pipe_names == ["ner"] assert nlp2.component_names == ["ner", "tagger"] assert nlp2.disabled == ["tagger"] assert nlp2.config["nlp"]["disabled"] == ["tagger"] with make_tempdir() as d: nlp2.to_disk(d) nlp3 = spacy.load(d) assert nlp3.pipe_names == ["ner"] assert nlp3.component_names == ["ner", "tagger"] with make_tempdir() as d: nlp3.to_disk(d) nlp4 = spacy.load(d, disable=["ner"]) assert nlp4.pipe_names == [] assert nlp4.component_names == ["ner", "tagger"] assert nlp4.disabled == ["ner", "tagger"] with make_tempdir() as d: nlp.to_disk(d) nlp5 = spacy.load(d, exclude=["tagger"]) assert nlp5.pipe_names == ["ner"] assert nlp5.component_names == ["ner"] assert nlp5.disabled == [] def test_serialize_custom_trainable_pipe(): class BadCustomPipe1(TrainablePipe): def __init__(self, vocab): pass class BadCustomPipe2(TrainablePipe): def __init__(self, vocab): self.vocab = vocab self.model = None class CustomPipe(TrainablePipe): def __init__(self, vocab, model): self.vocab = vocab self.model = model pipe = BadCustomPipe1(Vocab()) with pytest.raises(ValueError): pipe.to_bytes() with make_tempdir() as d: with pytest.raises(ValueError): pipe.to_disk(d) pipe = BadCustomPipe2(Vocab()) with pytest.raises(ValueError): pipe.to_bytes() with make_tempdir() as d: with pytest.raises(ValueError): pipe.to_disk(d) pipe = CustomPipe(Vocab(), Linear()) pipe_bytes = pipe.to_bytes() new_pipe = CustomPipe(Vocab(), Linear()).from_bytes(pipe_bytes) assert new_pipe.to_bytes() == pipe_bytes with make_tempdir() as d: pipe.to_disk(d) new_pipe = CustomPipe(Vocab(), Linear()).from_disk(d) assert new_pipe.to_bytes() == pipe_bytes def test_load_without_strings(): nlp = spacy.blank("en") orig_strings_length = len(nlp.vocab.strings) word = "unlikely_word_" * 20 nlp.vocab.strings.add(word) assert len(nlp.vocab.strings) == orig_strings_length + 1 with make_tempdir() as d: nlp.to_disk(d) # reload with strings reloaded_nlp = load(d) assert len(nlp.vocab.strings) == len(reloaded_nlp.vocab.strings) assert word in reloaded_nlp.vocab.strings # reload without strings reloaded_nlp = load(d, exclude=["strings"]) assert orig_strings_length == len(reloaded_nlp.vocab.strings) assert word not in reloaded_nlp.vocab.strings
16,840
34.232218
101
py
spaCy
spaCy-master/spacy/tests/serialize/test_serialize_span_groups.py
import pytest from spacy.tokens import Span, SpanGroup from spacy.tokens._dict_proxies import SpanGroups @pytest.mark.issue(10685) def test_issue10685(en_tokenizer): """Test `SpanGroups` de/serialization""" # Start with a Doc with no SpanGroups doc = en_tokenizer("Will it blend?") # Test empty `SpanGroups` de/serialization: assert len(doc.spans) == 0 doc.spans.from_bytes(doc.spans.to_bytes()) assert len(doc.spans) == 0 # Test non-empty `SpanGroups` de/serialization: doc.spans["test"] = SpanGroup(doc, name="test", spans=[doc[0:1]]) doc.spans["test2"] = SpanGroup(doc, name="test", spans=[doc[1:2]]) def assert_spangroups(): assert len(doc.spans) == 2 assert doc.spans["test"].name == "test" assert doc.spans["test2"].name == "test" assert list(doc.spans["test"]) == [doc[0:1]] assert list(doc.spans["test2"]) == [doc[1:2]] # Sanity check the currently-expected behavior assert_spangroups() # Now test serialization/deserialization: doc.spans.from_bytes(doc.spans.to_bytes()) assert_spangroups() def test_span_groups_serialization_mismatches(en_tokenizer): """Test the serialization of multiple mismatching `SpanGroups` keys and `SpanGroup.name`s""" doc = en_tokenizer("How now, brown cow?") # Some variety: # 1 SpanGroup where its name matches its key # 2 SpanGroups that have the same name--which is not a key # 2 SpanGroups that have the same name--which is a key # 1 SpanGroup that is a value for 2 different keys (where its name is a key) # 1 SpanGroup that is a value for 2 different keys (where its name is not a key) groups = doc.spans groups["key1"] = SpanGroup(doc, name="key1", spans=[doc[0:1], doc[1:2]]) groups["key2"] = SpanGroup(doc, name="too", spans=[doc[3:4], doc[4:5]]) groups["key3"] = SpanGroup(doc, name="too", spans=[doc[1:2], doc[0:1]]) groups["key4"] = SpanGroup(doc, name="key4", spans=[doc[0:1]]) groups["key5"] = SpanGroup(doc, name="key4", spans=[doc[0:1]]) sg6 = SpanGroup(doc, name="key6", spans=[doc[0:1]]) groups["key6"] = sg6 groups["key7"] = sg6 sg8 = SpanGroup(doc, name="also", spans=[doc[1:2]]) groups["key8"] = sg8 groups["key9"] = sg8 regroups = SpanGroups(doc).from_bytes(groups.to_bytes()) # Assert regroups == groups assert regroups.keys() == groups.keys() for key, regroup in regroups.items(): # Assert regroup == groups[key] assert regroup.name == groups[key].name assert list(regroup) == list(groups[key]) @pytest.mark.parametrize( "spans_bytes,doc_text,expected_spangroups,expected_warning", # The bytestrings below were generated from an earlier version of spaCy # that serialized `SpanGroups` as a list of SpanGroup bytes (via SpanGroups.to_bytes). # Comments preceding the bytestrings indicate from what Doc they were created. [ # Empty SpanGroups: (b"\x90", "", {}, False), # doc = nlp("Will it blend?") # doc.spans['test'] = SpanGroup(doc, name='test', spans=[doc[0:1]]) ( b"\x91\xc4C\x83\xa4name\xa4test\xa5attrs\x80\xa5spans\x91\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x04", "Will it blend?", {"test": {"name": "test", "spans": [(0, 1)]}}, False, ), # doc = nlp("Will it blend?") # doc.spans['test'] = SpanGroup(doc, name='test', spans=[doc[0:1]]) # doc.spans['test2'] = SpanGroup(doc, name='test', spans=[doc[1:2]]) ( b"\x92\xc4C\x83\xa4name\xa4test\xa5attrs\x80\xa5spans\x91\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x04\xc4C\x83\xa4name\xa4test\xa5attrs\x80\xa5spans\x91\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x05\x00\x00\x00\x07", "Will it blend?", # We expect only 1 SpanGroup to be in doc.spans in this example # because there are 2 `SpanGroup`s that have the same .name. See #10685. {"test": {"name": "test", "spans": [(1, 2)]}}, True, ), # doc = nlp('How now, brown cow?') # doc.spans['key1'] = SpanGroup(doc, name='key1', spans=[doc[0:1], doc[1:2]]) # doc.spans['key2'] = SpanGroup(doc, name='too', spans=[doc[3:4], doc[4:5]]) # doc.spans['key3'] = SpanGroup(doc, name='too', spans=[doc[1:2], doc[0:1]]) # doc.spans['key4'] = SpanGroup(doc, name='key4', spans=[doc[0:1]]) # doc.spans['key5'] = SpanGroup(doc, name='key4', spans=[doc[0:1]]) ( b"\x95\xc4m\x83\xa4name\xa4key1\xa5attrs\x80\xa5spans\x92\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x03\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x07\xc4l\x83\xa4name\xa3too\xa5attrs\x80\xa5spans\x92\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\t\x00\x00\x00\x0e\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x12\xc4l\x83\xa4name\xa3too\xa5attrs\x80\xa5spans\x92\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x07\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x03\xc4C\x83\xa4name\xa4key4\xa5attrs\x80\xa5spans\x91\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x03\xc4C\x83\xa4name\xa4key4\xa5attrs\x80\xa5spans\x91\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x03", "How now, brown cow?", { "key1": {"name": "key1", "spans": [(0, 1), (1, 2)]}, "too": {"name": "too", "spans": [(1, 2), (0, 1)]}, "key4": {"name": "key4", "spans": [(0, 1)]}, }, True, ), ], ) def test_deserialize_span_groups_compat( en_tokenizer, spans_bytes, doc_text, expected_spangroups, expected_warning ): """Test backwards-compatibility of `SpanGroups` deserialization. This uses serializations (bytes) from a prior version of spaCy (before 3.3.1). spans_bytes (bytes): Serialized `SpanGroups` object. doc_text (str): Doc text. expected_spangroups (dict): Dict mapping every expected (after deserialization) `SpanGroups` key to a SpanGroup's "args", where a SpanGroup's args are given as a dict: {"name": span_group.name, "spans": [(span0.start, span0.end), ...]} expected_warning (bool): Whether a warning is to be expected from .from_bytes() --i.e. if more than 1 SpanGroup has the same .name within the `SpanGroups`. """ doc = en_tokenizer(doc_text) if expected_warning: with pytest.warns(UserWarning): doc.spans.from_bytes(spans_bytes) else: # TODO: explicitly check for lack of a warning doc.spans.from_bytes(spans_bytes) assert doc.spans.keys() == expected_spangroups.keys() for name, spangroup_args in expected_spangroups.items(): assert doc.spans[name].name == spangroup_args["name"] spans = [Span(doc, start, end) for start, end in spangroup_args["spans"]] assert list(doc.spans[name]) == spans def test_span_groups_serialization(en_tokenizer): doc = en_tokenizer("0 1 2 3 4 5 6") span_groups = SpanGroups(doc) spans = [doc[0:2], doc[1:3]] sg1 = SpanGroup(doc, spans=spans) span_groups["key1"] = sg1 span_groups["key2"] = sg1 span_groups["key3"] = [] reloaded_span_groups = SpanGroups(doc).from_bytes(span_groups.to_bytes()) assert span_groups.keys() == reloaded_span_groups.keys() for key, value in span_groups.items(): assert all( span == reloaded_span for span, reloaded_span in zip(span_groups[key], reloaded_span_groups[key]) )
8,768
53.12963
1,591
py
spaCy
spaCy-master/spacy/tests/serialize/test_serialize_tokenizer.py
import pickle import re import pytest from spacy.attrs import ENT_IOB, ENT_TYPE from spacy.lang.en import English from spacy.tokenizer import Tokenizer from spacy.tokens import Doc from spacy.util import ( compile_infix_regex, compile_prefix_regex, compile_suffix_regex, get_lang_class, load_model, ) from ..util import assert_packed_msg_equal, make_tempdir def load_tokenizer(b): tok = get_lang_class("en")().tokenizer tok.from_bytes(b) return tok @pytest.mark.issue(2833) def test_issue2833(en_vocab): """Test that a custom error is raised if a token or span is pickled.""" doc = Doc(en_vocab, words=["Hello", "world"]) with pytest.raises(NotImplementedError): pickle.dumps(doc[0]) with pytest.raises(NotImplementedError): pickle.dumps(doc[0:2]) @pytest.mark.issue(3012) def test_issue3012(en_vocab): """Test that the is_tagged attribute doesn't get overwritten when we from_array without tag information.""" words = ["This", "is", "10", "%", "."] tags = ["DT", "VBZ", "CD", "NN", "."] pos = ["DET", "VERB", "NUM", "NOUN", "PUNCT"] ents = ["O", "O", "B-PERCENT", "I-PERCENT", "O"] doc = Doc(en_vocab, words=words, tags=tags, pos=pos, ents=ents) assert doc.has_annotation("TAG") expected = ("10", "NUM", "CD", "PERCENT") assert (doc[2].text, doc[2].pos_, doc[2].tag_, doc[2].ent_type_) == expected header = [ENT_IOB, ENT_TYPE] ent_array = doc.to_array(header) doc.from_array(header, ent_array) assert (doc[2].text, doc[2].pos_, doc[2].tag_, doc[2].ent_type_) == expected # Serializing then deserializing doc_bytes = doc.to_bytes() doc2 = Doc(en_vocab).from_bytes(doc_bytes) assert (doc2[2].text, doc2[2].pos_, doc2[2].tag_, doc2[2].ent_type_) == expected @pytest.mark.issue(4190) def test_issue4190(): def customize_tokenizer(nlp): prefix_re = compile_prefix_regex(nlp.Defaults.prefixes) suffix_re = compile_suffix_regex(nlp.Defaults.suffixes) infix_re = compile_infix_regex(nlp.Defaults.infixes) # Remove all exceptions where a single letter is followed by a period (e.g. 'h.') exceptions = { k: v for k, v in dict(nlp.Defaults.tokenizer_exceptions).items() if not (len(k) == 2 and k[1] == ".") } new_tokenizer = Tokenizer( nlp.vocab, exceptions, prefix_search=prefix_re.search, suffix_search=suffix_re.search, infix_finditer=infix_re.finditer, token_match=nlp.tokenizer.token_match, faster_heuristics=False, ) nlp.tokenizer = new_tokenizer test_string = "Test c." # Load default language nlp_1 = English() doc_1a = nlp_1(test_string) result_1a = [token.text for token in doc_1a] # noqa: F841 # Modify tokenizer customize_tokenizer(nlp_1) doc_1b = nlp_1(test_string) result_1b = [token.text for token in doc_1b] # Save and Reload with make_tempdir() as model_dir: nlp_1.to_disk(model_dir) nlp_2 = load_model(model_dir) # This should be the modified tokenizer doc_2 = nlp_2(test_string) result_2 = [token.text for token in doc_2] assert result_1b == result_2 assert nlp_2.tokenizer.faster_heuristics is False def test_serialize_custom_tokenizer(en_vocab, en_tokenizer): """Test that custom tokenizer with not all functions defined or empty properties can be serialized and deserialized correctly (see #2494, #4991).""" tokenizer = Tokenizer(en_vocab, suffix_search=en_tokenizer.suffix_search) tokenizer_bytes = tokenizer.to_bytes() Tokenizer(en_vocab).from_bytes(tokenizer_bytes) # test that empty/unset values are set correctly on deserialization tokenizer = get_lang_class("en")().tokenizer tokenizer.token_match = re.compile("test").match assert tokenizer.rules != {} assert tokenizer.token_match is not None assert tokenizer.url_match is not None assert tokenizer.prefix_search is not None assert tokenizer.infix_finditer is not None tokenizer.from_bytes(tokenizer_bytes) assert tokenizer.rules == {} assert tokenizer.token_match is None assert tokenizer.url_match is None assert tokenizer.prefix_search is None assert tokenizer.infix_finditer is None tokenizer = Tokenizer(en_vocab, rules={"ABC.": [{"ORTH": "ABC"}, {"ORTH": "."}]}) tokenizer.rules = {} tokenizer_bytes = tokenizer.to_bytes() tokenizer_reloaded = Tokenizer(en_vocab).from_bytes(tokenizer_bytes) assert tokenizer_reloaded.rules == {} @pytest.mark.parametrize("text", ["I💜you", "they’re", "“hello”"]) def test_serialize_tokenizer_roundtrip_bytes(en_tokenizer, text): tokenizer = en_tokenizer new_tokenizer = load_tokenizer(tokenizer.to_bytes()) assert_packed_msg_equal(new_tokenizer.to_bytes(), tokenizer.to_bytes()) assert new_tokenizer.to_bytes() == tokenizer.to_bytes() doc1 = tokenizer(text) doc2 = new_tokenizer(text) assert [token.text for token in doc1] == [token.text for token in doc2] def test_serialize_tokenizer_roundtrip_disk(en_tokenizer): tokenizer = en_tokenizer with make_tempdir() as d: file_path = d / "tokenizer" tokenizer.to_disk(file_path) tokenizer_d = en_tokenizer.from_disk(file_path) assert tokenizer.to_bytes() == tokenizer_d.to_bytes()
5,436
35.246667
89
py
spaCy
spaCy-master/spacy/tests/serialize/test_serialize_vocab_strings.py
import pickle import pytest from thinc.api import get_current_ops import spacy from spacy.lang.en import English from spacy.strings import StringStore from spacy.tokens import Doc from spacy.util import ensure_path, load_model from spacy.vectors import Vectors from spacy.vocab import Vocab from ..util import make_tempdir test_strings = [([], []), (["rats", "are", "cute"], ["i", "like", "rats"])] test_strings_attrs = [(["rats", "are", "cute"], "Hello")] @pytest.mark.issue(599) def test_issue599(en_vocab): doc = Doc(en_vocab) doc2 = Doc(doc.vocab) doc2.from_bytes(doc.to_bytes()) assert doc2.has_annotation("DEP") @pytest.mark.issue(4054) def test_issue4054(en_vocab): """Test that a new blank model can be made with a vocab from file, and that serialization does not drop the language at any point.""" nlp1 = English() vocab1 = nlp1.vocab with make_tempdir() as d: vocab_dir = ensure_path(d / "vocab") if not vocab_dir.exists(): vocab_dir.mkdir() vocab1.to_disk(vocab_dir) vocab2 = Vocab().from_disk(vocab_dir) nlp2 = spacy.blank("en", vocab=vocab2) nlp_dir = ensure_path(d / "nlp") if not nlp_dir.exists(): nlp_dir.mkdir() nlp2.to_disk(nlp_dir) nlp3 = load_model(nlp_dir) assert nlp3.lang == "en" @pytest.mark.issue(4133) def test_issue4133(en_vocab): nlp = English() vocab_bytes = nlp.vocab.to_bytes() words = ["Apple", "is", "looking", "at", "buying", "a", "startup"] pos = ["NOUN", "VERB", "ADP", "VERB", "PROPN", "NOUN", "ADP"] doc = Doc(en_vocab, words=words) for i, token in enumerate(doc): token.pos_ = pos[i] # usually this is already True when starting from proper models instead of blank English doc_bytes = doc.to_bytes() vocab = Vocab() vocab = vocab.from_bytes(vocab_bytes) doc = Doc(vocab).from_bytes(doc_bytes) actual = [] for token in doc: actual.append(token.pos_) assert actual == pos @pytest.mark.parametrize("text", ["rat"]) def test_serialize_vocab(en_vocab, text): text_hash = en_vocab.strings.add(text) vocab_bytes = en_vocab.to_bytes(exclude=["lookups"]) new_vocab = Vocab().from_bytes(vocab_bytes) assert new_vocab.strings[text_hash] == text assert new_vocab.to_bytes(exclude=["lookups"]) == vocab_bytes @pytest.mark.parametrize("strings1,strings2", test_strings) def test_serialize_vocab_roundtrip_bytes(strings1, strings2): vocab1 = Vocab(strings=strings1) vocab2 = Vocab(strings=strings2) vocab1_b = vocab1.to_bytes() vocab2_b = vocab2.to_bytes() if strings1 == strings2: assert vocab1_b == vocab2_b else: assert vocab1_b != vocab2_b vocab1 = vocab1.from_bytes(vocab1_b) assert vocab1.to_bytes() == vocab1_b new_vocab1 = Vocab().from_bytes(vocab1_b) assert new_vocab1.to_bytes() == vocab1_b assert len(new_vocab1.strings) == len(strings1) assert sorted([s for s in new_vocab1.strings]) == sorted(strings1) @pytest.mark.parametrize("strings1,strings2", test_strings) def test_serialize_vocab_roundtrip_disk(strings1, strings2): vocab1 = Vocab(strings=strings1) vocab2 = Vocab(strings=strings2) with make_tempdir() as d: file_path1 = d / "vocab1" file_path2 = d / "vocab2" vocab1.to_disk(file_path1) vocab2.to_disk(file_path2) vocab1_d = Vocab().from_disk(file_path1) vocab2_d = Vocab().from_disk(file_path2) # check strings rather than lexemes, which are only reloaded on demand assert set(strings1) == set([s for s in vocab1_d.strings]) assert set(strings2) == set([s for s in vocab2_d.strings]) if set(strings1) == set(strings2): assert [s for s in vocab1_d.strings] == [s for s in vocab2_d.strings] else: assert [s for s in vocab1_d.strings] != [s for s in vocab2_d.strings] @pytest.mark.parametrize("strings,lex_attr", test_strings_attrs) def test_serialize_vocab_lex_attrs_bytes(strings, lex_attr): vocab1 = Vocab(strings=strings) vocab2 = Vocab() vocab1[strings[0]].norm_ = lex_attr assert vocab1[strings[0]].norm_ == lex_attr assert vocab2[strings[0]].norm_ != lex_attr vocab2 = vocab2.from_bytes(vocab1.to_bytes()) assert vocab2[strings[0]].norm_ == lex_attr @pytest.mark.parametrize("strings,lex_attr", test_strings_attrs) def test_deserialize_vocab_seen_entries(strings, lex_attr): # Reported in #2153 vocab = Vocab(strings=strings) vocab.from_bytes(vocab.to_bytes()) assert len(vocab.strings) == len(strings) @pytest.mark.parametrize("strings,lex_attr", test_strings_attrs) def test_serialize_vocab_lex_attrs_disk(strings, lex_attr): vocab1 = Vocab(strings=strings) vocab2 = Vocab() vocab1[strings[0]].norm_ = lex_attr assert vocab1[strings[0]].norm_ == lex_attr assert vocab2[strings[0]].norm_ != lex_attr with make_tempdir() as d: file_path = d / "vocab" vocab1.to_disk(file_path) vocab2 = vocab2.from_disk(file_path) assert vocab2[strings[0]].norm_ == lex_attr @pytest.mark.parametrize("strings1,strings2", test_strings) def test_serialize_stringstore_roundtrip_bytes(strings1, strings2): sstore1 = StringStore(strings=strings1) sstore2 = StringStore(strings=strings2) sstore1_b = sstore1.to_bytes() sstore2_b = sstore2.to_bytes() if set(strings1) == set(strings2): assert sstore1_b == sstore2_b else: assert sstore1_b != sstore2_b sstore1 = sstore1.from_bytes(sstore1_b) assert sstore1.to_bytes() == sstore1_b new_sstore1 = StringStore().from_bytes(sstore1_b) assert new_sstore1.to_bytes() == sstore1_b assert set(new_sstore1) == set(strings1) @pytest.mark.parametrize("strings1,strings2", test_strings) def test_serialize_stringstore_roundtrip_disk(strings1, strings2): sstore1 = StringStore(strings=strings1) sstore2 = StringStore(strings=strings2) with make_tempdir() as d: file_path1 = d / "strings1" file_path2 = d / "strings2" sstore1.to_disk(file_path1) sstore2.to_disk(file_path2) sstore1_d = StringStore().from_disk(file_path1) sstore2_d = StringStore().from_disk(file_path2) assert set(sstore1_d) == set(sstore1) assert set(sstore2_d) == set(sstore2) if set(strings1) == set(strings2): assert set(sstore1_d) == set(sstore2_d) else: assert set(sstore1_d) != set(sstore2_d) @pytest.mark.parametrize("strings,lex_attr", test_strings_attrs) def test_pickle_vocab(strings, lex_attr): vocab = Vocab(strings=strings) ops = get_current_ops() vectors = Vectors(data=ops.xp.zeros((10, 10)), mode="floret", hash_count=1) vocab.vectors = vectors vocab[strings[0]].norm_ = lex_attr vocab_pickled = pickle.dumps(vocab) vocab_unpickled = pickle.loads(vocab_pickled) assert vocab.to_bytes() == vocab_unpickled.to_bytes() assert vocab_unpickled.vectors.mode == "floret"
7,067
35.061224
92
py
spaCy
spaCy-master/spacy/tests/tokenizer/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/tokenizer/test_exceptions.py
import sys import pytest def test_tokenizer_handles_emoticons(tokenizer): # Tweebo challenge (CMU) text = ( """:o :/ :'( >:o (: :) >.< XD -__- o.O ;D :-) @_@ :P 8D :1 >:( :D =| :> ....""" ) tokens = tokenizer(text) assert tokens[0].text == ":o" assert tokens[1].text == ":/" assert tokens[2].text == ":'(" assert tokens[3].text == ">:o" assert tokens[4].text == "(:" assert tokens[5].text == ":)" assert tokens[6].text == ">.<" assert tokens[7].text == "XD" assert tokens[8].text == "-__-" assert tokens[9].text == "o.O" assert tokens[10].text == ";D" assert tokens[11].text == ":-)" assert tokens[12].text == "@_@" assert tokens[13].text == ":P" assert tokens[14].text == "8D" assert tokens[15].text == ":1" assert tokens[16].text == ">:(" assert tokens[17].text == ":D" assert tokens[18].text == "=|" assert tokens[19].text == ":>" assert tokens[20].text == "...." @pytest.mark.parametrize("text,length", [("108)", 2), ("XDN", 1)]) def test_tokenizer_excludes_false_pos_emoticons(tokenizer, text, length): tokens = tokenizer(text) assert len(tokens) == length @pytest.mark.parametrize( "text,length", [("can you still dunk?🍕🍔😵LOL", 8), ("i💙you", 3), ("🤘🤘yay!", 4)] ) def test_tokenizer_handles_emoji(tokenizer, text, length): # These break on narrow unicode builds, e.g. Windows if sys.maxunicode >= 1114111: tokens = tokenizer(text) assert len(tokens) == length def test_tokenizer_degree(tokenizer): for u in "cfkCFK": assert [t.text for t in tokenizer(f"°{u}.")] == ["°", f"{u}", "."] assert [t[1] for t in tokenizer.explain(f"°{u}.")] == ["°", f"{u}", "."]
1,735
30.563636
87
py
spaCy
spaCy-master/spacy/tests/tokenizer/test_explain.py
import re import string import hypothesis import hypothesis.strategies import pytest import spacy from spacy.tokenizer import Tokenizer from spacy.util import get_lang_class # Only include languages with no external dependencies # "is" seems to confuse importlib, so we're also excluding it for now # excluded: ja, ru, th, uk, vi, zh, is LANGUAGES = [ pytest.param("fr", marks=pytest.mark.slow()), pytest.param("af", marks=pytest.mark.slow()), pytest.param("ar", marks=pytest.mark.slow()), pytest.param("bg", marks=pytest.mark.slow()), "bn", pytest.param("ca", marks=pytest.mark.slow()), pytest.param("cs", marks=pytest.mark.slow()), pytest.param("da", marks=pytest.mark.slow()), pytest.param("de", marks=pytest.mark.slow()), "el", "en", pytest.param("es", marks=pytest.mark.slow()), pytest.param("et", marks=pytest.mark.slow()), pytest.param("fa", marks=pytest.mark.slow()), pytest.param("fi", marks=pytest.mark.slow()), "fr", pytest.param("ga", marks=pytest.mark.slow()), pytest.param("he", marks=pytest.mark.slow()), pytest.param("hi", marks=pytest.mark.slow()), pytest.param("hr", marks=pytest.mark.slow()), "hu", pytest.param("id", marks=pytest.mark.slow()), pytest.param("it", marks=pytest.mark.slow()), pytest.param("kn", marks=pytest.mark.slow()), pytest.param("lb", marks=pytest.mark.slow()), pytest.param("lt", marks=pytest.mark.slow()), pytest.param("lv", marks=pytest.mark.slow()), pytest.param("nb", marks=pytest.mark.slow()), pytest.param("nl", marks=pytest.mark.slow()), "pl", pytest.param("pt", marks=pytest.mark.slow()), pytest.param("ro", marks=pytest.mark.slow()), pytest.param("si", marks=pytest.mark.slow()), pytest.param("sk", marks=pytest.mark.slow()), pytest.param("sl", marks=pytest.mark.slow()), pytest.param("sq", marks=pytest.mark.slow()), pytest.param("sr", marks=pytest.mark.slow()), pytest.param("sv", marks=pytest.mark.slow()), pytest.param("ta", marks=pytest.mark.slow()), pytest.param("te", marks=pytest.mark.slow()), pytest.param("tl", marks=pytest.mark.slow()), pytest.param("tr", marks=pytest.mark.slow()), pytest.param("tt", marks=pytest.mark.slow()), pytest.param("ur", marks=pytest.mark.slow()), ] @pytest.mark.parametrize("lang", LANGUAGES) def test_tokenizer_explain(lang): tokenizer = get_lang_class(lang)().tokenizer examples = pytest.importorskip(f"spacy.lang.{lang}.examples") for sentence in examples.sentences: tokens = [t.text for t in tokenizer(sentence) if not t.is_space] debug_tokens = [t[1] for t in tokenizer.explain(sentence)] assert tokens == debug_tokens def test_tokenizer_explain_special_matcher(en_vocab): suffix_re = re.compile(r"[\.]$") infix_re = re.compile(r"[/]") rules = {"a.": [{"ORTH": "a."}]} tokenizer = Tokenizer( en_vocab, rules=rules, suffix_search=suffix_re.search, infix_finditer=infix_re.finditer, ) tokens = [t.text for t in tokenizer("a/a.")] explain_tokens = [t[1] for t in tokenizer.explain("a/a.")] assert tokens == explain_tokens @hypothesis.strategies.composite def sentence_strategy(draw: hypothesis.strategies.DrawFn, max_n_words: int = 4) -> str: """ Composite strategy for fuzzily generating sentence with varying interpunctation. draw (hypothesis.strategies.DrawFn): Protocol for drawing function allowing to fuzzily pick from hypothesis' strategies. max_n_words (int): Max. number of words in generated sentence. RETURNS (str): Fuzzily generated sentence. """ punctuation_and_space_regex = "|".join( [*[re.escape(p) for p in string.punctuation], r"\s"] ) sentence = [ [ draw(hypothesis.strategies.text(min_size=1)), draw(hypothesis.strategies.from_regex(punctuation_and_space_regex)), ] for _ in range( draw(hypothesis.strategies.integers(min_value=2, max_value=max_n_words)) ) ] return " ".join([token for token_pair in sentence for token in token_pair]) @pytest.mark.xfail @pytest.mark.parametrize("lang", LANGUAGES) @hypothesis.given(sentence=sentence_strategy()) def test_tokenizer_explain_fuzzy(lang: str, sentence: str) -> None: """ Tests whether output of tokenizer.explain() matches tokenizer output. Input generated by hypothesis. lang (str): Language to test. text (str): Fuzzily generated sentence to tokenize. """ tokenizer: Tokenizer = spacy.blank(lang).tokenizer tokens = [t.text for t in tokenizer(sentence) if not t.is_space] debug_tokens = [t[1] for t in tokenizer.explain(sentence)] assert tokens == debug_tokens, f"{tokens}, {debug_tokens}, {sentence}"
4,850
36.604651
112
py
spaCy
spaCy-master/spacy/tests/tokenizer/test_naughty_strings.py
import pytest # Examples taken from the "Big List of Naughty Strings" # https://github.com/minimaxir/big-list-of-naughty-strings NAUGHTY_STRINGS = [ # ASCII punctuation r",./;'[]\-=", r'<>?:"{}|_+', r'!@#$%^&*()`~"', # Unicode additional control characters, byte order marks r"­؀؁؂؃؄؅؜۝܏᠎​‌‍‎‏‪", r"￾", # Unicode Symbols r"Ω≈ç√∫˜µ≤≥÷", r"åß∂ƒ©˙∆˚¬…æ", "œ∑´®†¥¨ˆøπ“‘", r"¡™£¢∞§¶•ªº–≠", r"¸˛Ç◊ı˜Â¯˘¿", r"ÅÍÎÏ˝ÓÔÒÚÆ☃", r"Œ„´‰ˇÁ¨ˆØ∏”’", r"`⁄€‹›fifl‡°·‚—±", r"⅛⅜⅝⅞", r"ЁЂЃЄЅІЇЈЉЊЋЌЍЎЏАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюя", r"٠١٢٣٤٥٦٧٨٩", # Unicode Subscript/Superscript/Accents r"⁰⁴⁵", r"₀₁₂", r"⁰⁴⁵₀₁₂", r"ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็", r" ̄ ̄", # Two-Byte Characters r"田中さんにあげて下さい", r"パーティーへ行かないか", r"和製漢語", r"部落格", r"사회과학원 어학연구소", r"찦차를 타고 온 펲시맨과 쑛다리 똠방각하", r"社會科學院語學研究所", r"울란바토르", r"𠜎𠜱𠝹𠱓𠱸𠲖𠳏", # Japanese Emoticons r"ヽ༼ຈل͜ຈ༽ノ ヽ༼ຈل͜ຈ༽ノ", r"(。◕ ∀ ◕。)", r"`ィ(´∀`∩", r"__ロ(,_,*)", r"・( ̄∀ ̄)・:*:", r"゚・✿ヾ╲(。◕‿◕。)╱✿・゚", r",。・:*:・゜’( ☻ ω ☻ )。・:*:・゜’", r"(╯°□°)╯︵ ┻━┻)" "(ノಥ益ಥ)ノ ┻━┻", r"┬─┬ノ( º _ ºノ)", r"( ͡° ͜ʖ ͡°)", # Emoji r"😍", r"👩🏽", r"👾 🙇 💁 🙅 🙆 🙋 🙎 🙍", r"🐵 🙈 🙉 🙊", r"❤️ 💔 💌 💕 💞 💓 💗 💖 💘 💝 💟 💜 💛 💚 💙", r"✋🏿 💪🏿 👐🏿 🙌🏿 👏🏿 🙏🏿", r"🚾 🆒 🆓 🆕 🆖 🆗 🆙 🏧", r"0️⃣ 1️⃣ 2️⃣ 3️⃣ 4️⃣ 5️⃣ 6️⃣ 7️⃣ 8️⃣ 9️⃣ 🔟", # Regional Indicator Symbols r"🇺🇸🇷🇺🇸 🇦🇫🇦🇲🇸", r"🇺🇸🇷🇺🇸🇦🇫🇦🇲", r"🇺🇸🇷🇺🇸🇦", # Unicode Numbers r"123", r"١٢٣", # Right-To-Left Strings r"ثم نفس سقطت وبالتحديد،, جزيرتي باستخدام أن دنو. إذ هنا؟ الستار وتنصيب كان. أهّل ايطاليا، بريطانيا-فرنسا قد أخذ. سليمان، إتفاقية بين ما, يذكر الحدود أي بعد, معاملة بولندا، الإطلاق عل إيو.", r"إيو.", r"בְּרֵאשִׁית, בָּרָא אֱלֹהִים, אֵת הַשָּׁמַיִם, וְאֵת הָאָרֶץ", r"הָיְתָהtestالصفحات التّحول", r"﷽", r"ﷺ", r"مُنَاقَشَةُ سُبُلِ اِسْتِخْدَامِ اللُّغَةِ فِي النُّظُمِ الْقَائِمَةِ وَفِيم يَخُصَّ التَّطْبِيقَاتُ الْحاسُوبِيَّةُ،", # Trick Unicode r"‪‪test‪", r"‫test", r"
test
", r"test⁠test", r"⁦test⁧", # Zalgo Text r"Ṱ̺̺̕o͞ ̷i̲̬͇̪͙n̝̗͕v̟̜̘̦͟o̶̙̰̠kè͚̮̺̪̹̱̤ ̖t̝͕̳̣̻̪͞h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳ ̞̥̱̳̭r̛̗̘e͙p͠r̼̞̻̭̗e̺̠̣͟s̘͇̳͍̝͉e͉̥̯̞̲͚̬͜ǹ̬͎͎̟̖͇̤t͍̬̤͓̼̭͘ͅi̪̱n͠g̴͉ ͏͉ͅc̬̟h͡a̫̻̯͘o̫̟̖͍̙̝͉s̗̦̲.̨̹͈̣", r"̡͓̞ͅI̗̘̦͝n͇͇͙v̮̫ok̲̫̙͈i̖͙̭̹̠̞n̡̻̮̣̺g̲͈͙̭͙̬͎ ̰t͔̦h̞̲e̢̤ ͍̬̲͖f̴̘͕̣è͖ẹ̥̩l͖͔͚i͓͚̦͠n͖͍̗͓̳̮g͍ ̨o͚̪͡f̘̣̬ ̖̘͖̟͙̮c҉͔̫͖͓͇͖ͅh̵̤̣͚͔á̗̼͕ͅo̼̣̥s̱͈̺̖̦̻͢.̛̖̞̠̫̰", r"̗̺͖̹̯͓Ṯ̤͍̥͇͈h̲́e͏͓̼̗̙̼̣͔ ͇̜̱̠͓͍ͅN͕͠e̗̱z̘̝̜̺͙p̤̺̹͍̯͚e̠̻̠͜r̨̤͍̺̖͔̖̖d̠̟̭̬̝͟i̦͖̩͓͔̤a̠̗̬͉̙n͚͜ ̻̞̰͚ͅh̵͉i̳̞v̢͇ḙ͎͟-҉̭̩̼͔m̤̭̫i͕͇̝̦n̗͙ḍ̟ ̯̲͕͞ǫ̟̯̰̲͙̻̝f ̪̰̰̗̖̭̘͘c̦͍̲̞͍̩̙ḥ͚a̮͎̟̙͜ơ̩̹͎s̤.̝̝ ҉Z̡̖̜͖̰̣͉̜a͖̰͙̬͡l̲̫̳͍̩g̡̟̼̱͚̞̬ͅo̗͜.̟", r"̦H̬̤̗̤͝e͜ ̜̥̝̻͍̟́w̕h̖̯͓o̝͙̖͎̱̮ ҉̺̙̞̟͈W̷̼̭a̺̪͍į͈͕̭͙̯̜t̶̼̮s̘͙͖̕ ̠̫̠B̻͍͙͉̳ͅe̵h̵̬͇̫͙i̹͓̳̳̮͎̫̕n͟d̴̪̜̖ ̰͉̩͇͙̲͞ͅT͖̼͓̪͢h͏͓̮̻e̬̝̟ͅ ̤̹̝W͙̞̝͔͇͝ͅa͏͓͔̹̼̣l̴͔̰̤̟͔ḽ̫.͕", r"Z̮̞̠͙͔ͅḀ̗̞͈̻̗Ḷ͙͎̯̹̞͓G̻O̭̗̮", # Unicode Upsidedown r"˙ɐnbᴉlɐ ɐuƃɐɯ ǝɹolop ʇǝ ǝɹoqɐl ʇn ʇunpᴉpᴉɔuᴉ ɹodɯǝʇ poɯsnᴉǝ op pǝs 'ʇᴉlǝ ƃuᴉɔsᴉdᴉpɐ ɹnʇǝʇɔǝsuoɔ 'ʇǝɯɐ ʇᴉs ɹolop ɯnsdᴉ ɯǝɹo˥", r"00˙Ɩ$-", # Unicode font r"The quick brown fox jumps over the lazy dog", r"𝐓𝐡𝐞 𝐪𝐮𝐢𝐜𝐤 𝐛𝐫𝐨𝐰𝐧 𝐟𝐨𝐱 𝐣𝐮𝐦𝐩𝐬 𝐨𝐯𝐞𝐫 𝐭𝐡𝐞 𝐥𝐚𝐳𝐲 𝐝𝐨𝐠", r"𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐 𝖇𝖗𝖔𝖜𝖓 𝖋𝖔𝖝 𝖏𝖚𝖒𝖕𝖘 𝖔𝖛𝖊𝖗 𝖙𝖍𝖊 𝖑𝖆𝖟𝖞 𝖉𝖔𝖌", r"𝑻𝒉𝒆 𝒒𝒖𝒊𝒄𝒌 𝒃𝒓𝒐𝒘𝒏 𝒇𝒐𝒙 𝒋𝒖𝒎𝒑𝒔 𝒐𝒗𝒆𝒓 𝒕𝒉𝒆 𝒍𝒂𝒛𝒚 𝒅𝒐𝒈", r"𝓣𝓱𝓮 𝓺𝓾𝓲𝓬𝓴 𝓫𝓻𝓸𝔀𝓷 𝓯𝓸𝔁 𝓳𝓾𝓶𝓹𝓼 𝓸𝓿𝓮𝓻 𝓽𝓱𝓮 𝓵𝓪𝔃𝔂 𝓭𝓸𝓰", r"𝕋𝕙𝕖 𝕢𝕦𝕚𝕔𝕜 𝕓𝕣𝕠𝕨𝕟 𝕗𝕠𝕩 𝕛𝕦𝕞𝕡𝕤 𝕠𝕧𝕖𝕣 𝕥𝕙𝕖 𝕝𝕒𝕫𝕪 𝕕𝕠𝕘", r"𝚃𝚑𝚎 𝚚𝚞𝚒𝚌𝚔 𝚋𝚛𝚘𝚠𝚗 𝚏𝚘𝚡 𝚓𝚞𝚖𝚙𝚜 𝚘𝚟𝚎𝚛 𝚝𝚑𝚎 𝚕𝚊𝚣𝚢 𝚍𝚘𝚐", r"⒯⒣⒠ ⒬⒰⒤⒞⒦ ⒝⒭⒪⒲⒩ ⒡⒪⒳ ⒥⒰⒨⒫⒮ ⒪⒱⒠⒭ ⒯⒣⒠ ⒧⒜⒵⒴ ⒟⒪⒢", # File paths r"../../../../../../../../../../../etc/passwd%00", r"../../../../../../../../../../../etc/hosts", # iOS Vulnerabilities r"Powerلُلُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ冗", r"🏳0🌈️", ] @pytest.mark.slow @pytest.mark.parametrize("text", NAUGHTY_STRINGS) def test_tokenizer_naughty_strings(tokenizer, text): tokens = tokenizer(text) assert tokens.text_with_ws == text
4,201
35.224138
277
py
spaCy
spaCy-master/spacy/tests/tokenizer/test_tokenizer.py
import re import numpy import pytest from spacy.lang.de import German from spacy.lang.en import English from spacy.symbols import ORTH from spacy.tokenizer import Tokenizer from spacy.tokens import Doc from spacy.training import Example from spacy.util import ( compile_infix_regex, compile_prefix_regex, compile_suffix_regex, ensure_path, ) from spacy.vocab import Vocab @pytest.mark.issue(743) def test_issue743(): doc = Doc(Vocab(), ["hello", "world"]) token = doc[0] s = set([token]) items = list(s) assert items[0] is token @pytest.mark.issue(801) @pytest.mark.skip( reason="Can not be fixed unless with variable-width lookbehinds, cf. PR #3218" ) @pytest.mark.parametrize( "text,tokens", [ ('"deserve,"--and', ['"', "deserve", ',"--', "and"]), ("exception;--exclusive", ["exception", ";--", "exclusive"]), ("day.--Is", ["day", ".--", "Is"]), ("refinement:--just", ["refinement", ":--", "just"]), ("memories?--To", ["memories", "?--", "To"]), ("Useful.=--Therefore", ["Useful", ".=--", "Therefore"]), ("=Hope.=--Pandora", ["=", "Hope", ".=--", "Pandora"]), ], ) def test_issue801(en_tokenizer, text, tokens): """Test that special characters + hyphens are split correctly.""" doc = en_tokenizer(text) assert len(doc) == len(tokens) assert [t.text for t in doc] == tokens @pytest.mark.issue(1061) def test_issue1061(): """Test special-case works after tokenizing. Was caching problem.""" text = "I like _MATH_ even _MATH_ when _MATH_, except when _MATH_ is _MATH_! but not _MATH_." tokenizer = English().tokenizer doc = tokenizer(text) assert "MATH" in [w.text for w in doc] assert "_MATH_" not in [w.text for w in doc] tokenizer.add_special_case("_MATH_", [{ORTH: "_MATH_"}]) doc = tokenizer(text) assert "_MATH_" in [w.text for w in doc] assert "MATH" not in [w.text for w in doc] # For sanity, check it works when pipeline is clean. tokenizer = English().tokenizer tokenizer.add_special_case("_MATH_", [{ORTH: "_MATH_"}]) doc = tokenizer(text) assert "_MATH_" in [w.text for w in doc] assert "MATH" not in [w.text for w in doc] @pytest.mark.issue(1963) def test_issue1963(en_tokenizer): """Test that doc.merge() resizes doc.tensor""" doc = en_tokenizer("a b c d") doc.tensor = numpy.ones((len(doc), 128), dtype="f") with doc.retokenize() as retokenizer: retokenizer.merge(doc[0:2]) assert len(doc) == 3 assert doc.tensor.shape == (3, 128) @pytest.mark.skip( reason="Can not be fixed without variable-width look-behind (which we don't want)" ) @pytest.mark.issue(1235) def test_issue1235(): """Test that g is not split of if preceded by a number and a letter""" nlp = English() testwords = "e2g 2g 52g" doc = nlp(testwords) assert len(doc) == 5 assert doc[0].text == "e2g" assert doc[1].text == "2" assert doc[2].text == "g" assert doc[3].text == "52" assert doc[4].text == "g" @pytest.mark.issue(1242) def test_issue1242(): nlp = English() doc = nlp("") assert len(doc) == 0 docs = list(nlp.pipe(["", "hello"])) assert len(docs[0]) == 0 assert len(docs[1]) == 1 @pytest.mark.issue(1257) def test_issue1257(): """Test that tokens compare correctly.""" doc1 = Doc(Vocab(), words=["a", "b", "c"]) doc2 = Doc(Vocab(), words=["a", "c", "e"]) assert doc1[0] != doc2[0] assert not doc1[0] == doc2[0] @pytest.mark.issue(1375) def test_issue1375(): """Test that token.nbor() raises IndexError for out-of-bounds access.""" doc = Doc(Vocab(), words=["0", "1", "2"]) with pytest.raises(IndexError): assert doc[0].nbor(-1) assert doc[1].nbor(-1).text == "0" with pytest.raises(IndexError): assert doc[2].nbor(1) assert doc[1].nbor(1).text == "2" @pytest.mark.issue(1488) def test_issue1488(): """Test that tokenizer can parse DOT inside non-whitespace separators""" prefix_re = re.compile(r"""[\[\("']""") suffix_re = re.compile(r"""[\]\)"']""") infix_re = re.compile(r"""[-~\.]""") simple_url_re = re.compile(r"""^https?://""") def my_tokenizer(nlp): return Tokenizer( nlp.vocab, {}, prefix_search=prefix_re.search, suffix_search=suffix_re.search, infix_finditer=infix_re.finditer, token_match=simple_url_re.match, ) nlp = English() nlp.tokenizer = my_tokenizer(nlp) doc = nlp("This is a test.") for token in doc: assert token.text @pytest.mark.issue(1494) def test_issue1494(): """Test if infix_finditer works correctly""" infix_re = re.compile(r"""[^a-z]""") test_cases = [ ("token 123test", ["token", "1", "2", "3", "test"]), ("token 1test", ["token", "1test"]), ("hello...test", ["hello", ".", ".", ".", "test"]), ] def new_tokenizer(nlp): return Tokenizer(nlp.vocab, {}, infix_finditer=infix_re.finditer) nlp = English() nlp.tokenizer = new_tokenizer(nlp) for text, expected in test_cases: assert [token.text for token in nlp(text)] == expected @pytest.mark.skip( reason="Can not be fixed without iterative looping between prefix/suffix and infix" ) @pytest.mark.issue(2070) def test_issue2070(): """Test that checks that a dot followed by a quote is handled appropriately. """ # Problem: The dot is now properly split off, but the prefix/suffix rules # are not applied again afterwards. This means that the quote will still be # attached to the remaining token. nlp = English() doc = nlp('First sentence."A quoted sentence" he said ...') assert len(doc) == 11 @pytest.mark.issue(2926) def test_issue2926(fr_tokenizer): """Test that the tokenizer correctly splits tokens separated by a slash (/) ending in a digit. """ doc = fr_tokenizer("Learn html5/css3/javascript/jquery") assert len(doc) == 8 assert doc[0].text == "Learn" assert doc[1].text == "html5" assert doc[2].text == "/" assert doc[3].text == "css3" assert doc[4].text == "/" assert doc[5].text == "javascript" assert doc[6].text == "/" assert doc[7].text == "jquery" @pytest.mark.parametrize( "text", [ "ABLEItemColumn IAcceptance Limits of ErrorIn-Service Limits of ErrorColumn IIColumn IIIColumn IVColumn VComputed VolumeUnder Registration of\xa0VolumeOver Registration of\xa0VolumeUnder Registration of\xa0VolumeOver Registration of\xa0VolumeCubic FeetCubic FeetCubic FeetCubic FeetCubic Feet1Up to 10.0100.0050.0100.005220.0200.0100.0200.010350.0360.0180.0360.0184100.0500.0250.0500.0255Over 100.5% of computed volume0.25% of computed volume0.5% of computed volume0.25% of computed volume TABLE ItemColumn IAcceptance Limits of ErrorIn-Service Limits of ErrorColumn IIColumn IIIColumn IVColumn VComputed VolumeUnder Registration of\xa0VolumeOver Registration of\xa0VolumeUnder Registration of\xa0VolumeOver Registration of\xa0VolumeCubic FeetCubic FeetCubic FeetCubic FeetCubic Feet1Up to 10.0100.0050.0100.005220.0200.0100.0200.010350.0360.0180.0360.0184100.0500.0250.0500.0255Over 100.5% of computed volume0.25% of computed volume0.5% of computed volume0.25% of computed volume ItemColumn IAcceptance Limits of ErrorIn-Service Limits of ErrorColumn IIColumn IIIColumn IVColumn VComputed VolumeUnder Registration of\xa0VolumeOver Registration of\xa0VolumeUnder Registration of\xa0VolumeOver Registration of\xa0VolumeCubic FeetCubic FeetCubic FeetCubic FeetCubic Feet1Up to 10.0100.0050.0100.005220.0200.0100.0200.010350.0360.0180.0360.0184100.0500.0250.0500.0255Over 100.5% of computed volume0.25% of computed volume0.5% of computed volume0.25% of computed volume", "oow.jspsearch.eventoracleopenworldsearch.technologyoraclesolarissearch.technologystoragesearch.technologylinuxsearch.technologyserverssearch.technologyvirtualizationsearch.technologyengineeredsystemspcodewwmkmppscem:", ], ) @pytest.mark.issue(2626) def test_issue2626_2835(en_tokenizer, text): """Check that sentence doesn't cause an infinite loop in the tokenizer.""" doc = en_tokenizer(text) assert doc @pytest.mark.issue(2656) def test_issue2656(en_tokenizer): """Test that tokenizer correctly splits off punctuation after numbers with decimal points. """ doc = en_tokenizer("I went for 40.3, and got home by 10.0.") assert len(doc) == 11 assert doc[0].text == "I" assert doc[1].text == "went" assert doc[2].text == "for" assert doc[3].text == "40.3" assert doc[4].text == "," assert doc[5].text == "and" assert doc[6].text == "got" assert doc[7].text == "home" assert doc[8].text == "by" assert doc[9].text == "10.0" assert doc[10].text == "." @pytest.mark.issue(2754) def test_issue2754(en_tokenizer): """Test that words like 'a' and 'a.m.' don't get exceptional norm values.""" a = en_tokenizer("a") assert a[0].norm_ == "a" am = en_tokenizer("am") assert am[0].norm_ == "am" @pytest.mark.issue(3002) def test_issue3002(): """Test that the tokenizer doesn't hang on a long list of dots""" nlp = German() doc = nlp( "880.794.982.218.444.893.023.439.794.626.120.190.780.624.990.275.671 ist eine lange Zahl" ) assert len(doc) == 5 @pytest.mark.skip(reason="default suffix rules avoid one upper-case letter before dot") @pytest.mark.issue(3449) def test_issue3449(): nlp = English() nlp.add_pipe("sentencizer") text1 = "He gave the ball to I. Do you want to go to the movies with I?" text2 = "He gave the ball to I. Do you want to go to the movies with I?" text3 = "He gave the ball to I.\nDo you want to go to the movies with I?" t1 = nlp(text1) t2 = nlp(text2) t3 = nlp(text3) assert t1[5].text == "I" assert t2[5].text == "I" assert t3[5].text == "I" @pytest.mark.parametrize( "text,words", [("A'B C", ["A", "'", "B", "C"]), ("A-B", ["A-B"])] ) def test_gold_misaligned(en_tokenizer, text, words): doc = en_tokenizer(text) Example.from_dict(doc, {"words": words}) def test_tokenizer_handles_no_word(tokenizer): tokens = tokenizer("") assert len(tokens) == 0 @pytest.mark.parametrize("text", ["lorem"]) def test_tokenizer_handles_single_word(tokenizer, text): tokens = tokenizer(text) assert tokens[0].text == text def test_tokenizer_handles_punct(tokenizer): text = "Lorem, ipsum." tokens = tokenizer(text) assert len(tokens) == 4 assert tokens[0].text == "Lorem" assert tokens[1].text == "," assert tokens[2].text == "ipsum" assert tokens[1].text != "Lorem" def test_tokenizer_handles_punct_braces(tokenizer): text = "Lorem, (ipsum)." tokens = tokenizer(text) assert len(tokens) == 6 def test_tokenizer_handles_digits(tokenizer): exceptions = ["hu", "bn"] text = "Lorem ipsum: 1984." tokens = tokenizer(text) if tokens[0].lang_ not in exceptions: assert len(tokens) == 5 assert tokens[0].text == "Lorem" assert tokens[3].text == "1984" @pytest.mark.parametrize( "text", ["google.com", "python.org", "spacy.io", "explosion.ai", "http://www.google.com"], ) def test_tokenizer_keep_urls(tokenizer, text): tokens = tokenizer(text) assert len(tokens) == 1 @pytest.mark.parametrize("text", ["NASDAQ:GOOG"]) def test_tokenizer_colons(tokenizer, text): tokens = tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize( "text", ["[email protected]", "[email protected]", "[email protected]"] ) def test_tokenizer_keeps_email(tokenizer, text): tokens = tokenizer(text) assert len(tokens) == 1 def test_tokenizer_handles_long_text(tokenizer): text = """Lorem ipsum dolor sit amet, consectetur adipiscing elit Cras egestas orci non porttitor maximus. Maecenas quis odio id dolor rhoncus dignissim. Curabitur sed velit at orci ultrices sagittis. Nulla commodo euismod arcu eget vulputate. Phasellus tincidunt, augue quis porta finibus, massa sapien consectetur augue, non lacinia enim nibh eget ipsum. Vestibulum in bibendum mauris. "Nullam porta fringilla enim, a dictum orci consequat in." Mauris nec malesuada justo.""" tokens = tokenizer(text) assert len(tokens) > 5 @pytest.mark.parametrize("file_name", ["sun.txt"]) def test_tokenizer_handle_text_from_file(tokenizer, file_name): loc = ensure_path(__file__).parent / file_name with loc.open("r", encoding="utf8") as infile: text = infile.read() assert len(text) != 0 tokens = tokenizer(text) assert len(tokens) > 100 def test_tokenizer_suspected_freeing_strings(tokenizer): text1 = "Lorem dolor sit amet, consectetur adipiscing elit." text2 = "Lorem ipsum dolor sit amet, consectetur adipiscing elit." tokens1 = tokenizer(text1) tokens2 = tokenizer(text2) assert tokens1[0].text == "Lorem" assert tokens2[0].text == "Lorem" @pytest.mark.parametrize("text,tokens", [("lorem", [{"orth": "lo"}, {"orth": "rem"}])]) def test_tokenizer_add_special_case(tokenizer, text, tokens): tokenizer.add_special_case(text, tokens) doc = tokenizer(text) assert doc[0].text == tokens[0]["orth"] assert doc[1].text == tokens[1]["orth"] @pytest.mark.parametrize( "text,tokens", [ ("lorem", [{"orth": "lo"}, {"orth": "re"}]), ("lorem", [{"orth": "lo", "tag": "A"}, {"orth": "rem"}]), ], ) def test_tokenizer_validate_special_case(tokenizer, text, tokens): with pytest.raises(ValueError): tokenizer.add_special_case(text, tokens) @pytest.mark.parametrize( "text,tokens", [("lorem", [{"orth": "lo", "norm": "LO"}, {"orth": "rem"}])] ) def test_tokenizer_add_special_case_tag(text, tokens): vocab = Vocab() tokenizer = Tokenizer(vocab, {}, None, None, None) tokenizer.add_special_case(text, tokens) doc = tokenizer(text) assert doc[0].text == tokens[0]["orth"] assert doc[0].norm_ == tokens[0]["norm"] assert doc[1].text == tokens[1]["orth"] def test_tokenizer_special_cases_with_affixes(tokenizer): text = '(((_SPECIAL_ A/B, A/B-A/B")' tokenizer.add_special_case("_SPECIAL_", [{"orth": "_SPECIAL_"}]) tokenizer.add_special_case("A/B", [{"orth": "A/B"}]) doc = tokenizer(text) assert [token.text for token in doc] == [ "(", "(", "(", "_SPECIAL_", "A/B", ",", "A/B", "-", "A/B", '"', ")", ] def test_tokenizer_special_cases_with_affixes_preserve_spacy(): tokenizer = English().tokenizer # reset all special cases tokenizer.rules = {} # in-place modification (only merges) text = "''a'' " tokenizer.add_special_case("''", [{"ORTH": "''"}]) assert tokenizer(text).text == text # not in-place (splits and merges) tokenizer.add_special_case("ab", [{"ORTH": "a"}, {"ORTH": "b"}]) text = "ab ab ab ''ab ab'' ab'' ''ab" assert tokenizer(text).text == text def test_tokenizer_special_cases_with_period(tokenizer): text = "_SPECIAL_." tokenizer.add_special_case("_SPECIAL_", [{"orth": "_SPECIAL_"}]) doc = tokenizer(text) assert [token.text for token in doc] == ["_SPECIAL_", "."] def test_tokenizer_special_cases_idx(tokenizer): text = "the _ID'X_" tokenizer.add_special_case("_ID'X_", [{"orth": "_ID"}, {"orth": "'X_"}]) doc = tokenizer(text) assert doc[1].idx == 4 assert doc[2].idx == 7 def test_tokenizer_special_cases_spaces(tokenizer): assert [t.text for t in tokenizer("a b c")] == ["a", "b", "c"] tokenizer.add_special_case("a b c", [{"ORTH": "a b c"}]) assert [t.text for t in tokenizer("a b c")] == ["a b c"] def test_tokenizer_flush_cache(en_vocab): suffix_re = re.compile(r"[\.]$") tokenizer = Tokenizer( en_vocab, suffix_search=suffix_re.search, ) assert [t.text for t in tokenizer("a.")] == ["a", "."] tokenizer.suffix_search = None assert [t.text for t in tokenizer("a.")] == ["a."] def test_tokenizer_flush_specials(en_vocab): suffix_re = re.compile(r"[\.]$") rules = {"a a": [{"ORTH": "a a"}]} tokenizer1 = Tokenizer( en_vocab, suffix_search=suffix_re.search, rules=rules, ) assert [t.text for t in tokenizer1("a a.")] == ["a a", "."] tokenizer1.rules = {} assert [t.text for t in tokenizer1("a a.")] == ["a", "a", "."] def test_tokenizer_prefix_suffix_overlap_lookbehind(en_vocab): # the prefix and suffix matches overlap in the suffix lookbehind prefixes = ["a(?=.)"] suffixes = [r"(?<=\w)\.", r"(?<=a)\d+\."] prefix_re = compile_prefix_regex(prefixes) suffix_re = compile_suffix_regex(suffixes) tokenizer = Tokenizer( en_vocab, prefix_search=prefix_re.search, suffix_search=suffix_re.search, ) tokens = [t.text for t in tokenizer("a10.")] assert tokens == ["a", "10", "."] explain_tokens = [t[1] for t in tokenizer.explain("a10.")] assert tokens == explain_tokens def test_tokenizer_infix_prefix(en_vocab): # the prefix and suffix matches overlap in the suffix lookbehind infixes = ["±"] suffixes = ["%"] infix_re = compile_infix_regex(infixes) suffix_re = compile_suffix_regex(suffixes) tokenizer = Tokenizer( en_vocab, infix_finditer=infix_re.finditer, suffix_search=suffix_re.search, ) tokens = [t.text for t in tokenizer("±10%")] assert tokens == ["±10", "%"] explain_tokens = [t[1] for t in tokenizer.explain("±10%")] assert tokens == explain_tokens @pytest.mark.issue(10086) def test_issue10086(en_tokenizer): """Test special case works when part of infix substring.""" text = "No--don't see" # without heuristics: do n't en_tokenizer.faster_heuristics = False doc = en_tokenizer(text) assert "n't" in [w.text for w in doc] assert "do" in [w.text for w in doc] # with (default) heuristics: don't en_tokenizer.faster_heuristics = True doc = en_tokenizer(text) assert "don't" in [w.text for w in doc] def test_tokenizer_initial_special_case_explain(en_vocab): tokenizer = Tokenizer( en_vocab, token_match=re.compile("^id$").match, rules={ "id": [{"ORTH": "i"}, {"ORTH": "d"}], }, ) tokens = [t.text for t in tokenizer("id")] explain_tokens = [t[1] for t in tokenizer.explain("id")] assert tokens == explain_tokens
18,584
32.306452
1,475
py
spaCy
spaCy-master/spacy/tests/tokenizer/test_urls.py
import pytest from spacy.lang.tokenizer_exceptions import BASE_EXCEPTIONS URLS_BASIC = [ "http://www.nytimes.com/2016/04/20/us/politics/new-york-primary-preview.html?hp&action=click&pgtype=Homepage&clickSource=story-heading&module=a-lede-package-region&region=top-news&WT.nav=top-news&_r=0", "www.red-stars.com", "mailto:[email protected]", ] URLS_FULL = URLS_BASIC + [ "mailto:[email protected]", "mailto:[email protected]?subject=hi", "www.google.com?q=google", "http://foo.com/blah_(wikipedia)#cite-1", ] # URL SHOULD_MATCH and SHOULD_NOT_MATCH patterns courtesy of https://mathiasbynens.be/demo/url-regex URLS_SHOULD_MATCH = [ "http://foo.com/blah_blah", "http://BlahBlah.com/Blah_Blah", "http://foo.com/blah_blah/", "http://www.example.com/wpstyle/?p=364", "https://www.example.com/foo/?bar=baz&inga=42&quux", "http://userid:[email protected]:8080", "http://userid:[email protected]:8080/", "http://[email protected]", "http://[email protected]/", "http://[email protected]:8080", "http://[email protected]:8080/", "http://userid:[email protected]", "http://userid:[email protected]/", "http://142.42.1.1/", "http://142.42.1.1:8080/", "http://foo.com/blah_(wikipedia)#cite-1", "http://foo.com/blah_(wikipedia)_blah#cite-1", "http://foo.com/unicode_(✪)_in_parens", "http://foo.com/(something)?after=parens", "http://code.google.com/events/#&product=browser", "http://j.mp", "ftp://foo.bar/baz", "http://foo.bar/?q=Test%20URL-encoded%20stuff", "http://-.~_!$&'()*+,;=:%40:80%2f::::::@example.com", "http://1337.net", "http://a.b-c.de", "http://223.255.255.254", "http://a.b--c.de/", # this is a legit domain name see: https://gist.github.com/dperini/729294 comment on 9/9/2014 "ssh://[email protected]:12345/repository.git", "svn+ssh://[email protected]/path", pytest.param( "chrome://extensions/?id=mhjfbmdgcfjbbpaeojofohoefgiehjai", marks=pytest.mark.xfail(), ), pytest.param( "chrome-extension://mhjfbmdgcfjbbpaeojofohoefgiehjai", marks=pytest.mark.xfail() ), "http://foo.com/blah_blah_(wikipedia)", "http://foo.com/blah_blah_(wikipedia)_(again)", "http://www.foo.co.uk", "http://www.foo.co.uk/", "http://www.foo.co.uk/blah/blah", "http://⌘.ws", "http://⌘.ws/", "http://☺.damowmow.com/", "http://✪df.ws/123", "http://➡.ws/䨹", "http://مثال.إختبار", "http://例子.测试", "http://उदाहरण.परीक्षा", ] URLS_SHOULD_NOT_MATCH = [ "http://", "http://.", "http://..", "http://../", "http://?", "http://??", "http://??/", "http://#", "http://##", "http://##/", "http://foo.bar?q=Spaces should be encoded", "//", "//a", "///a", "///", "http:///a", "rdar://1234", "h://test", "http:// shouldfail.com", ":// should fail", "http://foo.bar/foo(bar)baz quux", "http://-error-.invalid/", "http://a.b-.co", "http://0.0.0.0", "http://10.1.1.0", "http://10.1.1.255", "http://224.1.1.1", "http://123.123.123", "http://3628126748", "http://.www.foo.bar/", "http://.www.foo.bar./", "http://10.1.1.1", "NASDAQ:GOOG", "http://-a.b.co", pytest.param("foo.com", marks=pytest.mark.xfail()), "http://1.1.1.1.1", "http://www.foo.bar./", ] # Punctuation we want to check is split away before the URL PREFIXES = ["(", '"', ">"] # Punctuation we want to check is split away after the URL SUFFIXES = ['"', ":", ">"] @pytest.mark.parametrize("url", URLS_SHOULD_MATCH) def test_should_match(en_tokenizer, url): assert en_tokenizer.url_match(url) is not None @pytest.mark.parametrize("url", URLS_SHOULD_NOT_MATCH) def test_should_not_match(en_tokenizer, url): assert en_tokenizer.url_match(url) is None @pytest.mark.parametrize("url", URLS_BASIC) def test_tokenizer_handles_simple_url(tokenizer, url): tokens = tokenizer(url) assert len(tokens) == 1 assert tokens[0].text == url @pytest.mark.parametrize("url", URLS_BASIC) def test_tokenizer_handles_simple_surround_url(tokenizer, url): tokens = tokenizer("(" + url + ")") assert len(tokens) == 3 assert tokens[0].text == "(" assert tokens[1].text == url assert tokens[2].text == ")" @pytest.mark.slow @pytest.mark.parametrize("prefix", PREFIXES) @pytest.mark.parametrize("url", URLS_FULL) def test_tokenizer_handles_prefixed_url(tokenizer, prefix, url): tokens = tokenizer(prefix + url) assert len(tokens) == 2 assert tokens[0].text == prefix assert tokens[1].text == url @pytest.mark.slow @pytest.mark.parametrize("suffix", SUFFIXES) @pytest.mark.parametrize("url", URLS_FULL) def test_tokenizer_handles_suffixed_url(tokenizer, url, suffix): tokens = tokenizer(url + suffix) assert len(tokens) == 2 assert tokens[0].text == url assert tokens[1].text == suffix @pytest.mark.slow @pytest.mark.parametrize("prefix", PREFIXES) @pytest.mark.parametrize("suffix", SUFFIXES) @pytest.mark.parametrize("url", URLS_FULL) def test_tokenizer_handles_surround_url(tokenizer, prefix, suffix, url): tokens = tokenizer(prefix + url + suffix) assert len(tokens) == 3 assert tokens[0].text == prefix assert tokens[1].text == url assert tokens[2].text == suffix @pytest.mark.slow @pytest.mark.parametrize("prefix1", PREFIXES) @pytest.mark.parametrize("prefix2", PREFIXES) @pytest.mark.parametrize("url", URLS_FULL) def test_tokenizer_handles_two_prefix_url(tokenizer, prefix1, prefix2, url): tokens = tokenizer(prefix1 + prefix2 + url) assert len(tokens) == 3 assert tokens[0].text == prefix1 assert tokens[1].text == prefix2 assert tokens[2].text == url @pytest.mark.slow @pytest.mark.parametrize("suffix1", SUFFIXES) @pytest.mark.parametrize("suffix2", SUFFIXES) @pytest.mark.parametrize("url", URLS_FULL) def test_tokenizer_handles_two_suffix_url(tokenizer, suffix1, suffix2, url): tokens = tokenizer(url + suffix1 + suffix2) if suffix1 + suffix2 in BASE_EXCEPTIONS: assert len(tokens) == 2 assert tokens[0].text == url assert tokens[1].text == suffix1 + suffix2 else: assert len(tokens) == 3 assert tokens[0].text == url assert tokens[1].text == suffix1 assert tokens[2].text == suffix2
6,404
30.092233
206
py
spaCy
spaCy-master/spacy/tests/tokenizer/test_whitespace.py
import pytest @pytest.mark.parametrize("text", ["lorem ipsum"]) def test_tokenizer_splits_single_space(tokenizer, text): tokens = tokenizer(text) assert len(tokens) == 2 @pytest.mark.parametrize("text", ["lorem ipsum"]) def test_tokenizer_splits_double_space(tokenizer, text): tokens = tokenizer(text) assert len(tokens) == 3 assert tokens[1].text == " " @pytest.mark.parametrize("text", ["lorem ipsum "]) def test_tokenizer_handles_double_trailing_ws(tokenizer, text): tokens = tokenizer(text) assert repr(tokens.text_with_ws) == repr(text) @pytest.mark.parametrize("text", ["lorem\nipsum"]) def test_tokenizer_splits_newline(tokenizer, text): tokens = tokenizer(text) assert len(tokens) == 3 assert tokens[1].text == "\n" @pytest.mark.parametrize("text", ["lorem \nipsum"]) def test_tokenizer_splits_newline_space(tokenizer, text): tokens = tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["lorem \nipsum"]) def test_tokenizer_splits_newline_double_space(tokenizer, text): tokens = tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["lorem \n ipsum"]) def test_tokenizer_splits_newline_space_wrap(tokenizer, text): tokens = tokenizer(text) assert len(tokens) == 3
1,295
27.173913
64
py
spaCy
spaCy-master/spacy/tests/training/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/training/test_augmenters.py
import random from contextlib import contextmanager import pytest from spacy.lang.en import English from spacy.pipeline._parser_internals.nonproj import contains_cycle from spacy.tokens import Doc, DocBin, Span from spacy.training import Corpus, Example from spacy.training.augment import ( create_lower_casing_augmenter, create_orth_variants_augmenter, make_whitespace_variant, ) from ..util import make_tempdir @contextmanager def make_docbin(docs, name="roundtrip.spacy"): with make_tempdir() as tmpdir: output_file = tmpdir / name DocBin(docs=docs).to_disk(output_file) yield output_file @pytest.fixture def nlp(): return English() @pytest.fixture def doc(nlp): # fmt: off words = ["Sarah", "'s", "sister", "flew", "to", "Silicon", "Valley", "via", "London", "."] tags = ["NNP", "POS", "NN", "VBD", "IN", "NNP", "NNP", "IN", "NNP", "."] pos = ["PROPN", "PART", "NOUN", "VERB", "ADP", "PROPN", "PROPN", "ADP", "PROPN", "PUNCT"] ents = ["B-PERSON", "I-PERSON", "O", "", "O", "B-LOC", "I-LOC", "O", "B-GPE", "O"] cats = {"TRAVEL": 1.0, "BAKING": 0.0} # fmt: on doc = Doc(nlp.vocab, words=words, tags=tags, pos=pos, ents=ents) doc.cats = cats return doc @pytest.mark.filterwarnings("ignore::UserWarning") def test_make_orth_variants(nlp): single = [ {"tags": ["NFP"], "variants": ["…", "..."]}, {"tags": [":"], "variants": ["-", "—", "–", "--", "---", "——"]}, ] # fmt: off words = ["\n\n", "A", "\t", "B", "a", "b", "…", "...", "-", "—", "–", "--", "---", "——"] tags = ["_SP", "NN", "\t", "NN", "NN", "NN", "NFP", "NFP", ":", ":", ":", ":", ":", ":"] # fmt: on spaces = [True] * len(words) spaces[0] = False spaces[2] = False doc = Doc(nlp.vocab, words=words, spaces=spaces, tags=tags) augmenter = create_orth_variants_augmenter( level=0.2, lower=0.5, orth_variants={"single": single} ) with make_docbin([doc] * 10) as output_file: reader = Corpus(output_file, augmenter=augmenter) # Due to randomness, only test that it works without errors list(reader(nlp)) # check that the following settings lowercase everything augmenter = create_orth_variants_augmenter( level=1.0, lower=1.0, orth_variants={"single": single} ) with make_docbin([doc] * 10) as output_file: reader = Corpus(output_file, augmenter=augmenter) for example in reader(nlp): for token in example.reference: assert token.text == token.text.lower() # check that lowercasing is applied without tags doc = Doc(nlp.vocab, words=words, spaces=[True] * len(words)) augmenter = create_orth_variants_augmenter( level=1.0, lower=1.0, orth_variants={"single": single} ) with make_docbin([doc] * 10) as output_file: reader = Corpus(output_file, augmenter=augmenter) for example in reader(nlp): for ex_token, doc_token in zip(example.reference, doc): assert ex_token.text == doc_token.text.lower() # check that no lowercasing is applied with lower=0.0 doc = Doc(nlp.vocab, words=words, spaces=[True] * len(words)) augmenter = create_orth_variants_augmenter( level=1.0, lower=0.0, orth_variants={"single": single} ) with make_docbin([doc] * 10) as output_file: reader = Corpus(output_file, augmenter=augmenter) for example in reader(nlp): for ex_token, doc_token in zip(example.reference, doc): assert ex_token.text == doc_token.text def test_lowercase_augmenter(nlp, doc): augmenter = create_lower_casing_augmenter(level=1.0) with make_docbin([doc]) as output_file: reader = Corpus(output_file, augmenter=augmenter) corpus = list(reader(nlp)) eg = corpus[0] assert eg.reference.text == doc.text.lower() assert eg.predicted.text == doc.text.lower() ents = [(e.start, e.end, e.label) for e in doc.ents] assert [(e.start, e.end, e.label) for e in eg.reference.ents] == ents for ref_ent, orig_ent in zip(eg.reference.ents, doc.ents): assert ref_ent.text == orig_ent.text.lower() assert [t.ent_iob for t in doc] == [t.ent_iob for t in eg.reference] assert [t.pos_ for t in eg.reference] == [t.pos_ for t in doc] # check that augmentation works when lowercasing leads to different # predicted tokenization words = ["A", "B", "CCC."] doc = Doc(nlp.vocab, words=words) with make_docbin([doc]) as output_file: reader = Corpus(output_file, augmenter=augmenter) corpus = list(reader(nlp)) eg = corpus[0] assert eg.reference.text == doc.text.lower() assert eg.predicted.text == doc.text.lower() assert [t.text for t in eg.reference] == [t.lower() for t in words] assert [t.text for t in eg.predicted] == [ t.text for t in nlp.make_doc(doc.text.lower()) ] @pytest.mark.filterwarnings("ignore::UserWarning") def test_custom_data_augmentation(nlp, doc): def create_spongebob_augmenter(randomize: bool = False): def augment(nlp, example): text = example.text if randomize: ch = [c.lower() if random.random() < 0.5 else c.upper() for c in text] else: ch = [c.lower() if i % 2 else c.upper() for i, c in enumerate(text)] example_dict = example.to_dict() doc = nlp.make_doc("".join(ch)) example_dict["token_annotation"]["ORTH"] = [t.text for t in doc] yield example yield example.from_dict(doc, example_dict) return augment with make_docbin([doc]) as output_file: reader = Corpus(output_file, augmenter=create_spongebob_augmenter()) corpus = list(reader(nlp)) orig_text = "Sarah 's sister flew to Silicon Valley via London . " augmented = "SaRaH 's sIsTeR FlEw tO SiLiCoN VaLlEy vIa lOnDoN . " assert corpus[0].text == orig_text assert corpus[0].reference.text == orig_text assert corpus[0].predicted.text == orig_text assert corpus[1].text == augmented assert corpus[1].reference.text == augmented assert corpus[1].predicted.text == augmented ents = [(e.start, e.end, e.label) for e in doc.ents] assert [(e.start, e.end, e.label) for e in corpus[0].reference.ents] == ents assert [(e.start, e.end, e.label) for e in corpus[1].reference.ents] == ents def test_make_whitespace_variant(nlp): # fmt: off text = "They flew to New York City.\nThen they drove to Washington, D.C." words = ["They", "flew", "to", "New", "York", "City", ".", "\n", "Then", "they", "drove", "to", "Washington", ",", "D.C."] spaces = [True, True, True, True, True, False, False, False, True, True, True, True, False, True, False] tags = ["PRP", "VBD", "IN", "NNP", "NNP", "NNP", ".", "_SP", "RB", "PRP", "VBD", "IN", "NNP", ",", "NNP"] lemmas = ["they", "fly", "to", "New", "York", "City", ".", "\n", "then", "they", "drive", "to", "Washington", ",", "D.C."] heads = [1, 1, 1, 4, 5, 2, 1, 10, 10, 10, 10, 10, 11, 12, 12] deps = ["nsubj", "ROOT", "prep", "compound", "compound", "pobj", "punct", "dep", "advmod", "nsubj", "ROOT", "prep", "pobj", "punct", "appos"] ents = ["O", "", "O", "B-GPE", "I-GPE", "I-GPE", "O", "O", "O", "O", "O", "O", "B-GPE", "O", "B-GPE"] # fmt: on doc = Doc( nlp.vocab, words=words, spaces=spaces, tags=tags, lemmas=lemmas, heads=heads, deps=deps, ents=ents, ) assert doc.text == text example = Example(nlp.make_doc(text), doc) # whitespace is only added internally in entity spans mod_ex = make_whitespace_variant(nlp, example, " ", 3) assert mod_ex.reference.ents[0].text == "New York City" mod_ex = make_whitespace_variant(nlp, example, " ", 4) assert mod_ex.reference.ents[0].text == "New York City" mod_ex = make_whitespace_variant(nlp, example, " ", 5) assert mod_ex.reference.ents[0].text == "New York City" mod_ex = make_whitespace_variant(nlp, example, " ", 6) assert mod_ex.reference.ents[0].text == "New York City" # add a space at every possible position for i in range(len(doc) + 1): mod_ex = make_whitespace_variant(nlp, example, " ", i) assert mod_ex.reference[i].is_space # adds annotation when the doc contains at least partial annotation assert [t.tag_ for t in mod_ex.reference] == tags[:i] + ["_SP"] + tags[i:] assert [t.lemma_ for t in mod_ex.reference] == lemmas[:i] + [" "] + lemmas[i:] assert [t.dep_ for t in mod_ex.reference] == deps[:i] + ["dep"] + deps[i:] # does not add partial annotation if doc does not contain this feature assert not mod_ex.reference.has_annotation("POS") assert not mod_ex.reference.has_annotation("MORPH") # produces well-formed trees assert not contains_cycle([t.head.i for t in mod_ex.reference]) assert len(list(doc.sents)) == 2 if i == 0: assert mod_ex.reference[i].head.i == 1 else: assert mod_ex.reference[i].head.i == i - 1 # adding another space also produces well-formed trees for j in (3, 8, 10): mod_ex2 = make_whitespace_variant(nlp, mod_ex, "\t\t\n", j) assert not contains_cycle([t.head.i for t in mod_ex2.reference]) assert len(list(doc.sents)) == 2 assert mod_ex2.reference[j].head.i == j - 1 # entities are well-formed assert len(doc.ents) == len(mod_ex.reference.ents) # there is one token with missing entity information assert any(t.ent_iob == 0 for t in mod_ex.reference) for ent in mod_ex.reference.ents: assert not ent[0].is_space assert not ent[-1].is_space # no modifications if: # partial dependencies example.reference[0].dep_ = "" mod_ex = make_whitespace_variant(nlp, example, " ", 5) assert mod_ex.text == example.reference.text example.reference[0].dep_ = "nsubj" # reset # spans example.reference.spans["spans"] = [example.reference[0:5]] mod_ex = make_whitespace_variant(nlp, example, " ", 5) assert mod_ex.text == example.reference.text del example.reference.spans["spans"] # reset # links example.reference.ents = [Span(doc, 0, 2, label="ENT", kb_id="Q123")] mod_ex = make_whitespace_variant(nlp, example, " ", 5) assert mod_ex.text == example.reference.text
10,538
41.841463
145
py
spaCy
spaCy-master/spacy/tests/training/test_corpus.py
import tempfile from contextlib import contextmanager from pathlib import Path from typing import IO, Generator, Iterable, List, TextIO, Tuple import pytest from spacy.lang.en import English from spacy.training import Example, PlainTextCorpus from spacy.util import make_tempdir # Intentional newlines to check that they are skipped. PLAIN_TEXT_DOC = """ This is a doc. It contains two sentences. This is another doc. A third doc. """ PLAIN_TEXT_DOC_TOKENIZED = [ [ "This", "is", "a", "doc", ".", "It", "contains", "two", "sentences", ".", ], ["This", "is", "another", "doc", "."], ["A", "third", "doc", "."], ] @pytest.mark.parametrize("min_length", [0, 5]) @pytest.mark.parametrize("max_length", [0, 5]) def test_plain_text_reader(min_length, max_length): nlp = English() with _string_to_tmp_file(PLAIN_TEXT_DOC) as file_path: corpus = PlainTextCorpus( file_path, min_length=min_length, max_length=max_length ) check = [ doc for doc in PLAIN_TEXT_DOC_TOKENIZED if len(doc) >= min_length and (max_length == 0 or len(doc) <= max_length) ] reference, predicted = _examples_to_tokens(corpus(nlp)) assert reference == check assert predicted == check @contextmanager def _string_to_tmp_file(s: str) -> Generator[Path, None, None]: with make_tempdir() as d: file_path = Path(d) / "string.txt" with open(file_path, "w", encoding="utf-8") as f: f.write(s) yield file_path def _examples_to_tokens( examples: Iterable[Example], ) -> Tuple[List[List[str]], List[List[str]]]: reference = [] predicted = [] for eg in examples: reference.append([t.text for t in eg.reference]) predicted.append([t.text for t in eg.predicted]) return reference, predicted
1,942
23.2875
85
py
spaCy
spaCy-master/spacy/tests/training/test_logger.py
import pytest import spacy from spacy.training import loggers @pytest.fixture() def nlp(): nlp = spacy.blank("en") nlp.add_pipe("ner") return nlp @pytest.fixture() def info(): return { "losses": {"ner": 100}, "other_scores": {"ENTS_F": 0.85, "ENTS_P": 0.90, "ENTS_R": 0.80}, "epoch": 100, "step": 125, "score": 85, } def test_console_logger(nlp, info): console_logger = loggers.console_logger( progress_bar=True, console_output=True, output_file=None ) log_step, finalize = console_logger(nlp) log_step(info)
600
18.387097
73
py
spaCy
spaCy-master/spacy/tests/training/test_new_example.py
import pytest from spacy.tokens import Doc from spacy.training.example import Example from spacy.util import to_ternary_int from spacy.vocab import Vocab def test_Example_init_requires_doc_objects(): vocab = Vocab() with pytest.raises(TypeError): Example(None, None) with pytest.raises(TypeError): Example(Doc(vocab, words=["hi"]), None) with pytest.raises(TypeError): Example(None, Doc(vocab, words=["hi"])) def test_Example_from_dict_basic(): example = Example.from_dict( Doc(Vocab(), words=["hello", "world"]), {"words": ["hello", "world"]} ) assert isinstance(example.x, Doc) assert isinstance(example.y, Doc) @pytest.mark.parametrize( "annots", [{"words": ["ice", "cream"], "weirdannots": ["something", "such"]}] ) def test_Example_from_dict_invalid(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) with pytest.raises(KeyError): Example.from_dict(predicted, annots) @pytest.mark.parametrize( "pred_words", [["ice", "cream"], ["icecream"], ["i", "ce", "cream"]] ) @pytest.mark.parametrize("annots", [{"words": ["icecream"], "tags": ["NN"]}]) def test_Example_from_dict_with_tags(pred_words, annots): vocab = Vocab() predicted = Doc(vocab, words=pred_words) example = Example.from_dict(predicted, annots) for i, token in enumerate(example.reference): assert token.tag_ == annots["tags"][i] aligned_tags = example.get_aligned("TAG", as_string=True) assert aligned_tags == ["NN" for _ in predicted] @pytest.mark.filterwarnings("ignore::UserWarning") def test_aligned_tags(): pred_words = ["Apply", "some", "sunscreen", "unless", "you", "can", "not"] gold_words = ["Apply", "some", "sun", "screen", "unless", "you", "cannot"] gold_tags = ["VERB", "DET", "NOUN", "NOUN", "SCONJ", "PRON", "VERB"] annots = {"words": gold_words, "tags": gold_tags} vocab = Vocab() predicted = Doc(vocab, words=pred_words) example1 = Example.from_dict(predicted, annots) aligned_tags1 = example1.get_aligned("TAG", as_string=True) assert aligned_tags1 == ["VERB", "DET", "NOUN", "SCONJ", "PRON", "VERB", "VERB"] # ensure that to_dict works correctly example2 = Example.from_dict(predicted, example1.to_dict()) aligned_tags2 = example2.get_aligned("TAG", as_string=True) assert aligned_tags2 == ["VERB", "DET", "NOUN", "SCONJ", "PRON", "VERB", "VERB"] def test_aligned_tags_multi(): pred_words = ["Applysome", "sunscreen", "unless", "you", "can", "not"] gold_words = ["Apply", "somesun", "screen", "unless", "you", "cannot"] gold_tags = ["VERB", "DET", "NOUN", "SCONJ", "PRON", "VERB"] annots = {"words": gold_words, "tags": gold_tags} vocab = Vocab() predicted = Doc(vocab, words=pred_words) example = Example.from_dict(predicted, annots) aligned_tags = example.get_aligned("TAG", as_string=True) assert aligned_tags == [None, None, "SCONJ", "PRON", "VERB", "VERB"] @pytest.mark.parametrize( "annots", [ { "words": ["I", "like", "London", "and", "Berlin", "."], "deps": ["nsubj", "ROOT", "dobj", "cc", "conj", "punct"], "heads": [1, 1, 1, 2, 2, 1], } ], ) def test_Example_from_dict_with_parse(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) example = Example.from_dict(predicted, annots) for i, token in enumerate(example.reference): assert token.dep_ == annots["deps"][i] assert token.head.i == annots["heads"][i] @pytest.mark.parametrize( "annots", [ { "words": ["Sarah", "'s", "sister", "flew"], "morphs": [ "NounType=prop|Number=sing", "Poss=yes", "Number=sing", "Tense=past|VerbForm=fin", ], } ], ) def test_Example_from_dict_with_morphology(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) example = Example.from_dict(predicted, annots) for i, token in enumerate(example.reference): assert str(token.morph) == annots["morphs"][i] @pytest.mark.parametrize( "annots", [ { "words": ["This", "is", "one", "sentence", "this", "is", "another"], "sent_starts": [1, False, 0, None, True, -1, -5.7], } ], ) def test_Example_from_dict_with_sent_start(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) example = Example.from_dict(predicted, annots) assert len(list(example.reference.sents)) == 2 for i, token in enumerate(example.reference): if to_ternary_int(annots["sent_starts"][i]) == 1: assert token.is_sent_start is True elif to_ternary_int(annots["sent_starts"][i]) == 0: assert token.is_sent_start is None else: assert token.is_sent_start is False @pytest.mark.parametrize( "annots", [ { "words": ["This", "is", "a", "sentence"], "cats": {"cat1": 1.0, "cat2": 0.0, "cat3": 0.5}, } ], ) def test_Example_from_dict_with_cats(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) example = Example.from_dict(predicted, annots) assert len(list(example.reference.cats)) == 3 assert example.reference.cats["cat1"] == 1.0 assert example.reference.cats["cat2"] == 0.0 assert example.reference.cats["cat3"] == 0.5 @pytest.mark.parametrize( "annots", [ { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "entities": [(7, 15, "LOC"), (20, 26, "LOC")], } ], ) def test_Example_from_dict_with_entities(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) example = Example.from_dict(predicted, annots) assert len(list(example.reference.ents)) == 2 # fmt: off assert [example.reference[i].ent_iob_ for i in range(7)] == ["O", "O", "B", "I", "O", "B", "O"] assert example.get_aligned("ENT_IOB") == [2, 2, 3, 1, 2, 3, 2] # fmt: on assert example.reference[2].ent_type_ == "LOC" assert example.reference[3].ent_type_ == "LOC" assert example.reference[5].ent_type_ == "LOC" def test_Example_from_dict_with_empty_entities(): annots = { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "entities": [], } vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) example = Example.from_dict(predicted, annots) # entities as empty list sets everything to O assert example.reference.has_annotation("ENT_IOB") assert len(list(example.reference.ents)) == 0 assert all(token.ent_iob_ == "O" for token in example.reference) # various unset/missing entities leaves entities unset annots["entities"] = None example = Example.from_dict(predicted, annots) assert not example.reference.has_annotation("ENT_IOB") annots.pop("entities", None) example = Example.from_dict(predicted, annots) assert not example.reference.has_annotation("ENT_IOB") @pytest.mark.parametrize( "annots", [ { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "entities": [ (0, 4, "LOC"), (21, 27, "LOC"), ], # not aligned to token boundaries } ], ) def test_Example_from_dict_with_entities_invalid(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) with pytest.warns(UserWarning): example = Example.from_dict(predicted, annots) assert len(list(example.reference.ents)) == 0 @pytest.mark.parametrize( "annots", [ { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "entities": [ (7, 15, "LOC"), (11, 15, "LOC"), (20, 26, "LOC"), ], # overlapping } ], ) def test_Example_from_dict_with_entities_overlapping(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) with pytest.raises(ValueError): Example.from_dict(predicted, annots) @pytest.mark.parametrize( "annots", [ { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "spans": { "cities": [(7, 15, "LOC"), (20, 26, "LOC")], "people": [(0, 1, "PERSON")], }, } ], ) def test_Example_from_dict_with_spans(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) example = Example.from_dict(predicted, annots) assert len(list(example.reference.ents)) == 0 assert len(list(example.reference.spans["cities"])) == 2 assert len(list(example.reference.spans["people"])) == 1 for span in example.reference.spans["cities"]: assert span.label_ == "LOC" for span in example.reference.spans["people"]: assert span.label_ == "PERSON" @pytest.mark.parametrize( "annots", [ { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "spans": { "cities": [(7, 15, "LOC"), (11, 15, "LOC"), (20, 26, "LOC")], "people": [(0, 1, "PERSON")], }, } ], ) def test_Example_from_dict_with_spans_overlapping(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) example = Example.from_dict(predicted, annots) assert len(list(example.reference.ents)) == 0 assert len(list(example.reference.spans["cities"])) == 3 assert len(list(example.reference.spans["people"])) == 1 for span in example.reference.spans["cities"]: assert span.label_ == "LOC" for span in example.reference.spans["people"]: assert span.label_ == "PERSON" @pytest.mark.parametrize( "annots", [ { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "spans": [(0, 1, "PERSON")], }, { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "spans": {"cities": (7, 15, "LOC")}, }, { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "spans": {"cities": [7, 11]}, }, { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "spans": {"cities": [[7]]}, }, ], ) def test_Example_from_dict_with_spans_invalid(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) with pytest.raises(ValueError): Example.from_dict(predicted, annots) @pytest.mark.parametrize( "annots", [ { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "entities": [(7, 15, "LOC"), (20, 26, "LOC")], "links": { (7, 15): {"Q60": 1.0, "Q64": 0.0}, (20, 26): {"Q60": 0.0, "Q64": 1.0}, }, } ], ) def test_Example_from_dict_with_links(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) example = Example.from_dict(predicted, annots) assert example.reference[0].ent_kb_id_ == "" assert example.reference[1].ent_kb_id_ == "" assert example.reference[2].ent_kb_id_ == "Q60" assert example.reference[3].ent_kb_id_ == "Q60" assert example.reference[4].ent_kb_id_ == "" assert example.reference[5].ent_kb_id_ == "Q64" assert example.reference[6].ent_kb_id_ == "" @pytest.mark.parametrize( "annots", [ { "words": ["I", "like", "New", "York", "and", "Berlin", "."], "links": {(7, 14): {"Q7381115": 1.0, "Q2146908": 0.0}}, } ], ) def test_Example_from_dict_with_links_invalid(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) with pytest.raises(ValueError): Example.from_dict(predicted, annots) def test_Example_from_dict_sentences(): vocab = Vocab() predicted = Doc(vocab, words=["One", "sentence", ".", "one", "more"]) annots = {"sent_starts": [1, 0, 0, 1, 0]} ex = Example.from_dict(predicted, annots) assert len(list(ex.reference.sents)) == 2 # this currently throws an error - bug or feature? # predicted = Doc(vocab, words=["One", "sentence", "not", "one", "more"]) # annots = {"sent_starts": [1, 0, 0, 0, 0]} # ex = Example.from_dict(predicted, annots) # assert len(list(ex.reference.sents)) == 1 predicted = Doc(vocab, words=["One", "sentence", "not", "one", "more"]) annots = {"sent_starts": [1, -1, 0, 0, 0]} ex = Example.from_dict(predicted, annots) assert len(list(ex.reference.sents)) == 1 def test_Example_missing_deps(): vocab = Vocab() words = ["I", "like", "London", "and", "Berlin", "."] deps = ["nsubj", "ROOT", "dobj", "cc", "conj", "punct"] heads = [1, 1, 1, 2, 2, 1] annots_head_only = {"words": words, "heads": heads} annots_head_dep = {"words": words, "heads": heads, "deps": deps} predicted = Doc(vocab, words=words) # when not providing deps, the head information is considered to be missing # in this case, the token's heads refer to themselves example_1 = Example.from_dict(predicted, annots_head_only) assert [t.head.i for t in example_1.reference] == [0, 1, 2, 3, 4, 5] # when providing deps, the head information is actually used example_2 = Example.from_dict(predicted, annots_head_dep) assert [t.head.i for t in example_2.reference] == heads def test_Example_missing_heads(): vocab = Vocab() words = ["I", "like", "London", "and", "Berlin", "."] deps = ["nsubj", "ROOT", "dobj", None, "conj", "punct"] heads = [1, 1, 1, None, 2, 1] annots = {"words": words, "heads": heads, "deps": deps} predicted = Doc(vocab, words=words) example = Example.from_dict(predicted, annots) parsed_heads = [t.head.i for t in example.reference] assert parsed_heads[0] == heads[0] assert parsed_heads[1] == heads[1] assert parsed_heads[2] == heads[2] assert parsed_heads[4] == heads[4] assert parsed_heads[5] == heads[5] expected = [True, True, True, False, True, True] assert [t.has_head() for t in example.reference] == expected # Ensure that the missing head doesn't create an artificial new sentence start expected = [True, False, False, False, False, False] assert example.get_aligned_sent_starts() == expected def test_Example_aligned_whitespace(en_vocab): words = ["a", " ", "b"] tags = ["A", "SPACE", "B"] predicted = Doc(en_vocab, words=words) reference = Doc(en_vocab, words=words, tags=tags) example = Example(predicted, reference) assert example.get_aligned("TAG", as_string=True) == tags @pytest.mark.issue("11260") def test_issue11260(): annots = { "words": ["I", "like", "New", "York", "."], "spans": { "cities": [(7, 15, "LOC", "")], "people": [(0, 1, "PERSON", "")], }, } vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) example = Example.from_dict(predicted, annots) assert len(example.reference.spans["cities"]) == 1 assert len(example.reference.spans["people"]) == 1 output_dict = example.to_dict() assert "spans" in output_dict["doc_annotation"] assert output_dict["doc_annotation"]["spans"]["cities"] == annots["spans"]["cities"] assert output_dict["doc_annotation"]["spans"]["people"] == annots["spans"]["people"] output_example = Example.from_dict(predicted, output_dict) assert len(output_example.reference.spans["cities"]) == len( example.reference.spans["cities"] ) assert len(output_example.reference.spans["people"]) == len( example.reference.spans["people"] ) for span in example.reference.spans["cities"]: assert span.label_ == "LOC" assert span.text == "New York" assert span.start_char == 7 for span in example.reference.spans["people"]: assert span.label_ == "PERSON" assert span.text == "I" assert span.start_char == 0
16,146
33.137421
99
py
spaCy
spaCy-master/spacy/tests/training/test_pretraining.py
from pathlib import Path import numpy as np import pytest import srsly from thinc.api import Config, get_current_ops from spacy import util from spacy.lang.en import English from spacy.language import DEFAULT_CONFIG_PATH, DEFAULT_CONFIG_PRETRAIN_PATH from spacy.ml.models.multi_task import create_pretrain_vectors from spacy.tokens import Doc, DocBin from spacy.training.initialize import init_nlp from spacy.training.loop import train from spacy.training.pretrain import pretrain from spacy.vectors import Vectors from spacy.vocab import Vocab from ..util import make_tempdir pretrain_string_listener = """ [nlp] lang = "en" pipeline = ["tok2vec", "tagger"] [components] [components.tok2vec] factory = "tok2vec" [components.tok2vec.model] @architectures = "spacy.HashEmbedCNN.v1" pretrained_vectors = null width = 342 depth = 4 window_size = 1 embed_size = 2000 maxout_pieces = 3 subword_features = true [components.tagger] factory = "tagger" [components.tagger.model] @architectures = "spacy.Tagger.v2" [components.tagger.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.width} [pretraining] max_epochs = 5 [training] max_epochs = 5 """ pretrain_string_internal = """ [nlp] lang = "en" pipeline = ["tagger"] [components] [components.tagger] factory = "tagger" [components.tagger.model] @architectures = "spacy.Tagger.v2" [components.tagger.model.tok2vec] @architectures = "spacy.HashEmbedCNN.v1" pretrained_vectors = null width = 342 depth = 4 window_size = 1 embed_size = 2000 maxout_pieces = 3 subword_features = true [pretraining] max_epochs = 5 [training] max_epochs = 5 """ pretrain_string_vectors = """ [nlp] lang = "en" pipeline = ["tok2vec", "tagger"] [components] [components.tok2vec] factory = "tok2vec" [components.tok2vec.model] @architectures = "spacy.HashEmbedCNN.v1" pretrained_vectors = null width = 342 depth = 4 window_size = 1 embed_size = 2000 maxout_pieces = 3 subword_features = true [components.tagger] factory = "tagger" [components.tagger.model] @architectures = "spacy.Tagger.v2" [components.tagger.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.width} [pretraining] max_epochs = 5 [pretraining.objective] @architectures = spacy.PretrainVectors.v1 maxout_pieces = 3 hidden_size = 300 loss = cosine [training] max_epochs = 5 """ CHAR_OBJECTIVES = [ {}, {"@architectures": "spacy.PretrainCharacters.v1"}, { "@architectures": "spacy.PretrainCharacters.v1", "maxout_pieces": 5, "hidden_size": 42, "n_characters": 2, }, ] VECTOR_OBJECTIVES = [ { "@architectures": "spacy.PretrainVectors.v1", "maxout_pieces": 3, "hidden_size": 300, "loss": "cosine", }, { "@architectures": "spacy.PretrainVectors.v1", "maxout_pieces": 2, "hidden_size": 200, "loss": "L2", }, ] def test_pretraining_default(): """Test that pretraining defaults to a character objective""" config = Config().from_str(pretrain_string_internal) nlp = util.load_model_from_config(config, auto_fill=True, validate=False) filled = nlp.config pretrain_config = util.load_config(DEFAULT_CONFIG_PRETRAIN_PATH) filled = pretrain_config.merge(filled) assert "PretrainCharacters" in filled["pretraining"]["objective"]["@architectures"] @pytest.mark.parametrize("objective", CHAR_OBJECTIVES) @pytest.mark.parametrize("skip_last", (True, False)) def test_pretraining_tok2vec_characters(objective, skip_last): """Test that pretraining works with the character objective""" config = Config().from_str(pretrain_string_listener) config["pretraining"]["objective"] = objective nlp = util.load_model_from_config(config, auto_fill=True, validate=False) filled = nlp.config pretrain_config = util.load_config(DEFAULT_CONFIG_PRETRAIN_PATH) filled = pretrain_config.merge(filled) with make_tempdir() as tmp_dir: file_path = write_sample_jsonl(tmp_dir) filled["paths"]["raw_text"] = file_path filled = filled.interpolate() assert filled["pretraining"]["component"] == "tok2vec" pretrain(filled, tmp_dir, skip_last=skip_last) assert Path(tmp_dir / "model0.bin").exists() assert Path(tmp_dir / "model4.bin").exists() assert not Path(tmp_dir / "model5.bin").exists() if skip_last: assert not Path(tmp_dir / "model-last.bin").exists() else: assert Path(tmp_dir / "model-last.bin").exists() @pytest.mark.parametrize("objective", VECTOR_OBJECTIVES) def test_pretraining_tok2vec_vectors_fail(objective): """Test that pretraining doesn't works with the vectors objective if there are no static vectors""" config = Config().from_str(pretrain_string_listener) config["pretraining"]["objective"] = objective nlp = util.load_model_from_config(config, auto_fill=True, validate=False) filled = nlp.config pretrain_config = util.load_config(DEFAULT_CONFIG_PRETRAIN_PATH) filled = pretrain_config.merge(filled) with make_tempdir() as tmp_dir: file_path = write_sample_jsonl(tmp_dir) filled["paths"]["raw_text"] = file_path filled = filled.interpolate() assert filled["initialize"]["vectors"] is None with pytest.raises(ValueError): pretrain(filled, tmp_dir) @pytest.mark.parametrize("objective", VECTOR_OBJECTIVES) def test_pretraining_tok2vec_vectors(objective): """Test that pretraining works with the vectors objective and static vectors defined""" config = Config().from_str(pretrain_string_listener) config["pretraining"]["objective"] = objective nlp = util.load_model_from_config(config, auto_fill=True, validate=False) filled = nlp.config pretrain_config = util.load_config(DEFAULT_CONFIG_PRETRAIN_PATH) filled = pretrain_config.merge(filled) with make_tempdir() as tmp_dir: file_path = write_sample_jsonl(tmp_dir) filled["paths"]["raw_text"] = file_path nlp_path = write_vectors_model(tmp_dir) filled["initialize"]["vectors"] = nlp_path filled = filled.interpolate() pretrain(filled, tmp_dir) @pytest.mark.parametrize("config", [pretrain_string_internal, pretrain_string_listener]) def test_pretraining_tagger_tok2vec(config): """Test pretraining of the tagger's tok2vec layer (via a listener)""" config = Config().from_str(pretrain_string_listener) nlp = util.load_model_from_config(config, auto_fill=True, validate=False) filled = nlp.config pretrain_config = util.load_config(DEFAULT_CONFIG_PRETRAIN_PATH) filled = pretrain_config.merge(filled) with make_tempdir() as tmp_dir: file_path = write_sample_jsonl(tmp_dir) filled["paths"]["raw_text"] = file_path filled["pretraining"]["component"] = "tagger" filled["pretraining"]["layer"] = "tok2vec" filled = filled.interpolate() pretrain(filled, tmp_dir) assert Path(tmp_dir / "model0.bin").exists() assert Path(tmp_dir / "model4.bin").exists() assert Path(tmp_dir / "model-last.bin").exists() assert not Path(tmp_dir / "model5.bin").exists() def test_pretraining_tagger(): """Test pretraining of the tagger itself will throw an error (not an appropriate tok2vec layer)""" config = Config().from_str(pretrain_string_internal) nlp = util.load_model_from_config(config, auto_fill=True, validate=False) filled = nlp.config pretrain_config = util.load_config(DEFAULT_CONFIG_PRETRAIN_PATH) filled = pretrain_config.merge(filled) with make_tempdir() as tmp_dir: file_path = write_sample_jsonl(tmp_dir) filled["paths"]["raw_text"] = file_path filled["pretraining"]["component"] = "tagger" filled = filled.interpolate() with pytest.raises(ValueError): pretrain(filled, tmp_dir) def test_pretraining_training(): """Test that training can use a pretrained Tok2Vec model""" config = Config().from_str(pretrain_string_internal) nlp = util.load_model_from_config(config, auto_fill=True, validate=False) filled = nlp.config pretrain_config = util.load_config(DEFAULT_CONFIG_PRETRAIN_PATH) filled = pretrain_config.merge(filled) train_config = util.load_config(DEFAULT_CONFIG_PATH) filled = train_config.merge(filled) with make_tempdir() as tmp_dir: pretrain_dir = tmp_dir / "pretrain" pretrain_dir.mkdir() file_path = write_sample_jsonl(pretrain_dir) filled["paths"]["raw_text"] = file_path filled["pretraining"]["component"] = "tagger" filled["pretraining"]["layer"] = "tok2vec" train_dir = tmp_dir / "train" train_dir.mkdir() train_path, dev_path = write_sample_training(train_dir) filled["paths"]["train"] = train_path filled["paths"]["dev"] = dev_path filled = filled.interpolate() P = filled["pretraining"] nlp_base = init_nlp(filled) model_base = ( nlp_base.get_pipe(P["component"]).model.get_ref(P["layer"]).get_ref("embed") ) embed_base = None for node in model_base.walk(): if node.name == "hashembed": embed_base = node pretrain(filled, pretrain_dir) pretrained_model = Path(pretrain_dir / "model3.bin") assert pretrained_model.exists() filled["initialize"]["init_tok2vec"] = str(pretrained_model) nlp = init_nlp(filled) model = nlp.get_pipe(P["component"]).model.get_ref(P["layer"]).get_ref("embed") embed = None for node in model.walk(): if node.name == "hashembed": embed = node # ensure that the tok2vec weights are actually changed by the pretraining assert np.any(np.not_equal(embed.get_param("E"), embed_base.get_param("E"))) train(nlp, train_dir) def write_sample_jsonl(tmp_dir): data = [ { "meta": {"id": "1"}, "text": "This is the best TV you'll ever buy!", "cats": {"pos": 1, "neg": 0}, }, { "meta": {"id": "2"}, "text": "I wouldn't buy this again.", "cats": {"pos": 0, "neg": 1}, }, ] file_path = f"{tmp_dir}/text.jsonl" srsly.write_jsonl(file_path, data) return file_path def write_sample_training(tmp_dir): words = ["The", "players", "start", "."] tags = ["DT", "NN", "VBZ", "."] doc = Doc(English().vocab, words=words, tags=tags) doc_bin = DocBin() doc_bin.add(doc) train_path = f"{tmp_dir}/train.spacy" dev_path = f"{tmp_dir}/dev.spacy" doc_bin.to_disk(train_path) doc_bin.to_disk(dev_path) return train_path, dev_path def write_vectors_model(tmp_dir): import numpy vocab = Vocab() vector_data = { "dog": numpy.random.uniform(-1, 1, (300,)), "cat": numpy.random.uniform(-1, 1, (300,)), "orange": numpy.random.uniform(-1, 1, (300,)), } for word, vector in vector_data.items(): vocab.set_vector(word, vector) nlp_path = tmp_dir / "vectors_model" nlp = English(vocab) nlp.to_disk(nlp_path) return str(nlp_path) def test_pretrain_default_vectors(): nlp = English() nlp.add_pipe("tok2vec") nlp.initialize() # default vectors are supported nlp.vocab.vectors = Vectors(shape=(10, 10)) create_pretrain_vectors(1, 1, "cosine")(nlp.vocab, nlp.get_pipe("tok2vec").model) # floret vectors are supported nlp.vocab.vectors = Vectors( data=get_current_ops().xp.zeros((10, 10)), mode="floret", hash_count=1 ) create_pretrain_vectors(1, 1, "cosine")(nlp.vocab, nlp.get_pipe("tok2vec").model) # error for no vectors with pytest.raises(ValueError, match="E875"): nlp.vocab.vectors = Vectors() create_pretrain_vectors(1, 1, "cosine")( nlp.vocab, nlp.get_pipe("tok2vec").model )
12,029
30.492147
103
py
spaCy
spaCy-master/spacy/tests/training/test_readers.py
from typing import Callable, Dict, Iterable import pytest from thinc.api import Config, fix_random_seed from spacy import Language from spacy.schemas import ConfigSchemaTraining from spacy.training import Example from spacy.util import load_model_from_config, registry, resolve_dot_names def test_readers(): config_string = """ [training] [corpora] @readers = "myreader.v1" [nlp] lang = "en" pipeline = ["tok2vec", "textcat"] [components] [components.tok2vec] factory = "tok2vec" [components.textcat] factory = "textcat" """ @registry.readers("myreader.v1") def myreader() -> Dict[str, Callable[[Language], Iterable[Example]]]: annots = {"cats": {"POS": 1.0, "NEG": 0.0}} def reader(nlp: Language): doc = nlp.make_doc(f"This is an example") return [Example.from_dict(doc, annots)] return {"train": reader, "dev": reader, "extra": reader, "something": reader} config = Config().from_str(config_string) nlp = load_model_from_config(config, auto_fill=True) T = registry.resolve( nlp.config.interpolate()["training"], schema=ConfigSchemaTraining ) dot_names = [T["train_corpus"], T["dev_corpus"]] train_corpus, dev_corpus = resolve_dot_names(nlp.config, dot_names) assert isinstance(train_corpus, Callable) optimizer = T["optimizer"] # simulate a training loop nlp.initialize(lambda: train_corpus(nlp), sgd=optimizer) for example in train_corpus(nlp): nlp.update([example], sgd=optimizer) scores = nlp.evaluate(list(dev_corpus(nlp))) assert scores["cats_macro_auc"] == 0.0 # ensure the pipeline runs doc = nlp("Quick test") assert doc.cats corpora = {"corpora": nlp.config.interpolate()["corpora"]} extra_corpus = registry.resolve(corpora)["corpora"]["extra"] assert isinstance(extra_corpus, Callable) @pytest.mark.slow @pytest.mark.parametrize( "reader,additional_config", [ ("ml_datasets.imdb_sentiment.v1", {"train_limit": 10, "dev_limit": 10}), ("ml_datasets.dbpedia.v1", {"train_limit": 10, "dev_limit": 10}), ("ml_datasets.cmu_movies.v1", {"limit": 10, "freq_cutoff": 200, "split": 0.8}), ], ) def test_cat_readers(reader, additional_config): nlp_config_string = """ [training] seed = 0 [training.score_weights] cats_macro_auc = 1.0 [corpora] @readers = "PLACEHOLDER" [nlp] lang = "en" pipeline = ["tok2vec", "textcat_multilabel"] [components] [components.tok2vec] factory = "tok2vec" [components.textcat_multilabel] factory = "textcat_multilabel" """ config = Config().from_str(nlp_config_string) fix_random_seed(config["training"]["seed"]) config["corpora"]["@readers"] = reader config["corpora"].update(additional_config) nlp = load_model_from_config(config, auto_fill=True) T = registry.resolve(nlp.config["training"], schema=ConfigSchemaTraining) dot_names = [T["train_corpus"], T["dev_corpus"]] train_corpus, dev_corpus = resolve_dot_names(nlp.config, dot_names) optimizer = T["optimizer"] # simulate a training loop nlp.initialize(lambda: train_corpus(nlp), sgd=optimizer) for example in train_corpus(nlp): assert example.y.cats # this shouldn't fail if each training example has at least one positive label assert sorted(list(set(example.y.cats.values()))) == [0.0, 1.0] nlp.update([example], sgd=optimizer) # simulate performance benchmark on dev corpus dev_examples = list(dev_corpus(nlp)) for example in dev_examples: # this shouldn't fail if each dev example has at least one positive label assert sorted(list(set(example.y.cats.values()))) == [0.0, 1.0] scores = nlp.evaluate(dev_examples) assert scores["cats_score"] # ensure the pipeline runs doc = nlp("Quick test") assert doc.cats
3,949
31.113821
87
py
spaCy
spaCy-master/spacy/tests/training/test_rehearse.py
from typing import List import pytest import spacy from spacy.training import Example TRAIN_DATA = [ ( "Who is Kofi Annan?", { "entities": [(7, 18, "PERSON")], "tags": ["PRON", "AUX", "PROPN", "PRON", "PUNCT"], "heads": [1, 1, 3, 1, 1], "deps": ["attr", "ROOT", "compound", "nsubj", "punct"], "morphs": [ "", "Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin", "Number=Sing", "Number=Sing", "PunctType=Peri", ], "cats": {"question": 1.0}, }, ), ( "Who is Steve Jobs?", { "entities": [(7, 17, "PERSON")], "tags": ["PRON", "AUX", "PROPN", "PRON", "PUNCT"], "heads": [1, 1, 3, 1, 1], "deps": ["attr", "ROOT", "compound", "nsubj", "punct"], "morphs": [ "", "Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin", "Number=Sing", "Number=Sing", "PunctType=Peri", ], "cats": {"question": 1.0}, }, ), ( "Bob is a nice person.", { "entities": [(0, 3, "PERSON")], "tags": ["PROPN", "AUX", "DET", "ADJ", "NOUN", "PUNCT"], "heads": [1, 1, 4, 4, 1, 1], "deps": ["nsubj", "ROOT", "det", "amod", "attr", "punct"], "morphs": [ "Number=Sing", "Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin", "Definite=Ind|PronType=Art", "Degree=Pos", "Number=Sing", "PunctType=Peri", ], "cats": {"statement": 1.0}, }, ), ( "Hi Anil, how are you?", { "entities": [(3, 7, "PERSON")], "tags": ["INTJ", "PROPN", "PUNCT", "ADV", "AUX", "PRON", "PUNCT"], "deps": ["intj", "npadvmod", "punct", "advmod", "ROOT", "nsubj", "punct"], "heads": [4, 0, 4, 4, 4, 4, 4], "morphs": [ "", "Number=Sing", "PunctType=Comm", "", "Mood=Ind|Tense=Pres|VerbForm=Fin", "Case=Nom|Person=2|PronType=Prs", "PunctType=Peri", ], "cats": {"greeting": 1.0, "question": 1.0}, }, ), ( "I like London and Berlin.", { "entities": [(7, 13, "LOC"), (18, 24, "LOC")], "tags": ["PROPN", "VERB", "PROPN", "CCONJ", "PROPN", "PUNCT"], "deps": ["nsubj", "ROOT", "dobj", "cc", "conj", "punct"], "heads": [1, 1, 1, 2, 2, 1], "morphs": [ "Case=Nom|Number=Sing|Person=1|PronType=Prs", "Tense=Pres|VerbForm=Fin", "Number=Sing", "ConjType=Cmp", "Number=Sing", "PunctType=Peri", ], "cats": {"statement": 1.0}, }, ), ] REHEARSE_DATA = [ ( "Hi Anil", { "entities": [(3, 7, "PERSON")], "tags": ["INTJ", "PROPN"], "deps": ["ROOT", "npadvmod"], "heads": [0, 0], "morphs": ["", "Number=Sing"], "cats": {"greeting": 1.0}, }, ), ( "Hi Ravish, how you doing?", { "entities": [(3, 9, "PERSON")], "tags": ["INTJ", "PROPN", "PUNCT", "ADV", "AUX", "PRON", "PUNCT"], "deps": ["intj", "ROOT", "punct", "advmod", "nsubj", "advcl", "punct"], "heads": [1, 1, 1, 5, 5, 1, 1], "morphs": [ "", "VerbForm=Inf", "PunctType=Comm", "", "Case=Nom|Person=2|PronType=Prs", "Aspect=Prog|Tense=Pres|VerbForm=Part", "PunctType=Peri", ], "cats": {"greeting": 1.0, "question": 1.0}, }, ), # UTENSIL new label ( "Natasha bought new forks.", { "entities": [(0, 7, "PERSON"), (19, 24, "UTENSIL")], "tags": ["PROPN", "VERB", "ADJ", "NOUN", "PUNCT"], "deps": ["nsubj", "ROOT", "amod", "dobj", "punct"], "heads": [1, 1, 3, 1, 1], "morphs": [ "Number=Sing", "Tense=Past|VerbForm=Fin", "Degree=Pos", "Number=Plur", "PunctType=Peri", ], "cats": {"statement": 1.0}, }, ), ] def _add_ner_label(ner, data): for _, annotations in data: for ent in annotations["entities"]: ner.add_label(ent[2]) def _add_tagger_label(tagger, data): for _, annotations in data: for tag in annotations["tags"]: tagger.add_label(tag) def _add_parser_label(parser, data): for _, annotations in data: for dep in annotations["deps"]: parser.add_label(dep) def _add_textcat_label(textcat, data): for _, annotations in data: for cat in annotations["cats"]: textcat.add_label(cat) def _optimize(nlp, component: str, data: List, rehearse: bool): """Run either train or rehearse.""" pipe = nlp.get_pipe(component) if component == "ner": _add_ner_label(pipe, data) elif component == "tagger": _add_tagger_label(pipe, data) elif component == "parser": _add_parser_label(pipe, data) elif component == "textcat_multilabel": _add_textcat_label(pipe, data) else: raise NotImplementedError if rehearse: optimizer = nlp.resume_training() else: optimizer = nlp.initialize() for _ in range(5): for text, annotation in data: doc = nlp.make_doc(text) example = Example.from_dict(doc, annotation) if rehearse: nlp.rehearse([example], sgd=optimizer) else: nlp.update([example], sgd=optimizer) return nlp @pytest.mark.parametrize("component", ["ner", "tagger", "parser", "textcat_multilabel"]) def test_rehearse(component): nlp = spacy.blank("en") nlp.add_pipe(component) nlp = _optimize(nlp, component, TRAIN_DATA, False) _optimize(nlp, component, REHEARSE_DATA, True)
6,405
29.216981
88
py
spaCy
spaCy-master/spacy/tests/training/test_training.py
import random import numpy import pytest import srsly from thinc.api import Adam, compounding import spacy from spacy.lang.en import English from spacy.tokens import Doc, DocBin from spacy.training import ( Alignment, Corpus, Example, biluo_tags_to_offsets, biluo_tags_to_spans, docs_to_json, iob_to_biluo, offsets_to_biluo_tags, ) from spacy.training.align import get_alignments from spacy.training.alignment_array import AlignmentArray from spacy.training.converters import json_to_docs from spacy.training.loop import train_while_improving from spacy.util import ( get_words_and_spaces, load_config_from_str, load_model_from_path, minibatch, ) from ..util import make_tempdir @pytest.fixture def doc(): nlp = English() # make sure we get a new vocab every time # fmt: off words = ["Sarah", "'s", "sister", "flew", "to", "Silicon", "Valley", "via", "London", "."] tags = ["NNP", "POS", "NN", "VBD", "IN", "NNP", "NNP", "IN", "NNP", "."] pos = ["PROPN", "PART", "NOUN", "VERB", "ADP", "PROPN", "PROPN", "ADP", "PROPN", "PUNCT"] morphs = ["NounType=prop|Number=sing", "Poss=yes", "Number=sing", "Tense=past|VerbForm=fin", "", "NounType=prop|Number=sing", "NounType=prop|Number=sing", "", "NounType=prop|Number=sing", "PunctType=peri"] # head of '.' is intentionally nonprojective for testing heads = [2, 0, 3, 3, 3, 6, 4, 3, 7, 5] deps = ["poss", "case", "nsubj", "ROOT", "prep", "compound", "pobj", "prep", "pobj", "punct"] lemmas = ["Sarah", "'s", "sister", "fly", "to", "Silicon", "Valley", "via", "London", "."] ents = ["O"] * len(words) ents[0] = "B-PERSON" ents[1] = "I-PERSON" ents[5] = "B-LOC" ents[6] = "I-LOC" ents[8] = "B-GPE" cats = {"TRAVEL": 1.0, "BAKING": 0.0} # fmt: on doc = Doc( nlp.vocab, words=words, tags=tags, pos=pos, morphs=morphs, heads=heads, deps=deps, lemmas=lemmas, ents=ents, ) doc.cats = cats return doc @pytest.fixture() def merged_dict(): return { "ids": [1, 2, 3, 4, 5, 6, 7], "words": ["Hi", "there", "everyone", "It", "is", "just", "me"], "spaces": [True, True, True, True, True, True, False], "tags": ["INTJ", "ADV", "PRON", "PRON", "AUX", "ADV", "PRON"], "sent_starts": [1, 0, 0, 1, 0, 0, 0], } @pytest.fixture def vocab(): nlp = English() return nlp.vocab @pytest.mark.issue(999) def test_issue999(): """Test that adding entities and resuming training works passably OK. There are two issues here: 1) We have to re-add labels. This isn't very nice. 2) There's no way to set the learning rate for the weight update, so we end up out-of-scale, causing it to learn too fast. """ TRAIN_DATA = [ ["hey", []], ["howdy", []], ["hey there", []], ["hello", []], ["hi", []], ["i'm looking for a place to eat", []], ["i'm looking for a place in the north of town", [(31, 36, "LOCATION")]], ["show me chinese restaurants", [(8, 15, "CUISINE")]], ["show me chines restaurants", [(8, 14, "CUISINE")]], ] nlp = English() ner = nlp.add_pipe("ner") for _, offsets in TRAIN_DATA: for start, end, label in offsets: ner.add_label(label) nlp.initialize() for itn in range(20): random.shuffle(TRAIN_DATA) for raw_text, entity_offsets in TRAIN_DATA: example = Example.from_dict( nlp.make_doc(raw_text), {"entities": entity_offsets} ) nlp.update([example]) with make_tempdir() as model_dir: nlp.to_disk(model_dir) nlp2 = load_model_from_path(model_dir) for raw_text, entity_offsets in TRAIN_DATA: doc = nlp2(raw_text) ents = {(ent.start_char, ent.end_char): ent.label_ for ent in doc.ents} for start, end, label in entity_offsets: if (start, end) in ents: assert ents[(start, end)] == label break else: if entity_offsets: raise Exception(ents) @pytest.mark.issue(4402) def test_issue4402(): json_data = { "id": 0, "paragraphs": [ { "raw": "How should I cook bacon in an oven?\nI've heard of people cooking bacon in an oven.", "sentences": [ { "tokens": [ {"id": 0, "orth": "How", "ner": "O"}, {"id": 1, "orth": "should", "ner": "O"}, {"id": 2, "orth": "I", "ner": "O"}, {"id": 3, "orth": "cook", "ner": "O"}, {"id": 4, "orth": "bacon", "ner": "O"}, {"id": 5, "orth": "in", "ner": "O"}, {"id": 6, "orth": "an", "ner": "O"}, {"id": 7, "orth": "oven", "ner": "O"}, {"id": 8, "orth": "?", "ner": "O"}, ], "brackets": [], }, { "tokens": [ {"id": 9, "orth": "\n", "ner": "O"}, {"id": 10, "orth": "I", "ner": "O"}, {"id": 11, "orth": "'ve", "ner": "O"}, {"id": 12, "orth": "heard", "ner": "O"}, {"id": 13, "orth": "of", "ner": "O"}, {"id": 14, "orth": "people", "ner": "O"}, {"id": 15, "orth": "cooking", "ner": "O"}, {"id": 16, "orth": "bacon", "ner": "O"}, {"id": 17, "orth": "in", "ner": "O"}, {"id": 18, "orth": "an", "ner": "O"}, {"id": 19, "orth": "oven", "ner": "O"}, {"id": 20, "orth": ".", "ner": "O"}, ], "brackets": [], }, ], "cats": [ {"label": "baking", "value": 1.0}, {"label": "not_baking", "value": 0.0}, ], }, { "raw": "What is the difference between white and brown eggs?\n", "sentences": [ { "tokens": [ {"id": 0, "orth": "What", "ner": "O"}, {"id": 1, "orth": "is", "ner": "O"}, {"id": 2, "orth": "the", "ner": "O"}, {"id": 3, "orth": "difference", "ner": "O"}, {"id": 4, "orth": "between", "ner": "O"}, {"id": 5, "orth": "white", "ner": "O"}, {"id": 6, "orth": "and", "ner": "O"}, {"id": 7, "orth": "brown", "ner": "O"}, {"id": 8, "orth": "eggs", "ner": "O"}, {"id": 9, "orth": "?", "ner": "O"}, ], "brackets": [], }, {"tokens": [{"id": 10, "orth": "\n", "ner": "O"}], "brackets": []}, ], "cats": [ {"label": "baking", "value": 0.0}, {"label": "not_baking", "value": 1.0}, ], }, ], } nlp = English() attrs = ["ORTH", "SENT_START", "ENT_IOB", "ENT_TYPE"] with make_tempdir() as tmpdir: output_file = tmpdir / "test4402.spacy" docs = json_to_docs([json_data]) data = DocBin(docs=docs, attrs=attrs).to_bytes() with output_file.open("wb") as file_: file_.write(data) reader = Corpus(output_file) train_data = list(reader(nlp)) assert len(train_data) == 2 split_train_data = [] for eg in train_data: split_train_data.extend(eg.split_sents()) assert len(split_train_data) == 4 CONFIG_7029 = """ [nlp] lang = "en" pipeline = ["tok2vec", "tagger"] [components] [components.tok2vec] factory = "tok2vec" [components.tok2vec.model] @architectures = "spacy.Tok2Vec.v1" [components.tok2vec.model.embed] @architectures = "spacy.MultiHashEmbed.v1" width = ${components.tok2vec.model.encode:width} attrs = ["NORM","PREFIX","SUFFIX","SHAPE"] rows = [5000,2500,2500,2500] include_static_vectors = false [components.tok2vec.model.encode] @architectures = "spacy.MaxoutWindowEncoder.v1" width = 96 depth = 4 window_size = 1 maxout_pieces = 3 [components.tagger] factory = "tagger" [components.tagger.model] @architectures = "spacy.Tagger.v2" nO = null [components.tagger.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.encode:width} upstream = "*" """ @pytest.mark.issue(7029) def test_issue7029(): """Test that an empty document doesn't mess up an entire batch.""" TRAIN_DATA = [ ("I like green eggs", {"tags": ["N", "V", "J", "N"]}), ("Eat blue ham", {"tags": ["V", "J", "N"]}), ] nlp = English.from_config(load_config_from_str(CONFIG_7029)) train_examples = [] for t in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) optimizer = nlp.initialize(get_examples=lambda: train_examples) for i in range(50): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) texts = ["first", "second", "third", "fourth", "and", "then", "some", ""] docs1 = list(nlp.pipe(texts, batch_size=1)) docs2 = list(nlp.pipe(texts, batch_size=4)) assert [doc[0].tag_ for doc in docs1[:-1]] == [doc[0].tag_ for doc in docs2[:-1]] def test_gold_biluo_U(en_vocab): words = ["I", "flew", "to", "London", "."] spaces = [True, True, True, False, True] doc = Doc(en_vocab, words=words, spaces=spaces) entities = [(len("I flew to "), len("I flew to London"), "LOC")] tags = offsets_to_biluo_tags(doc, entities) assert tags == ["O", "O", "O", "U-LOC", "O"] def test_gold_biluo_BL(en_vocab): words = ["I", "flew", "to", "San", "Francisco", "."] spaces = [True, True, True, True, False, True] doc = Doc(en_vocab, words=words, spaces=spaces) entities = [(len("I flew to "), len("I flew to San Francisco"), "LOC")] tags = offsets_to_biluo_tags(doc, entities) assert tags == ["O", "O", "O", "B-LOC", "L-LOC", "O"] def test_gold_biluo_BIL(en_vocab): words = ["I", "flew", "to", "San", "Francisco", "Valley", "."] spaces = [True, True, True, True, True, False, True] doc = Doc(en_vocab, words=words, spaces=spaces) entities = [(len("I flew to "), len("I flew to San Francisco Valley"), "LOC")] tags = offsets_to_biluo_tags(doc, entities) assert tags == ["O", "O", "O", "B-LOC", "I-LOC", "L-LOC", "O"] def test_gold_biluo_overlap(en_vocab): words = ["I", "flew", "to", "San", "Francisco", "Valley", "."] spaces = [True, True, True, True, True, False, True] doc = Doc(en_vocab, words=words, spaces=spaces) entities = [ (len("I flew to "), len("I flew to San Francisco Valley"), "LOC"), (len("I flew to "), len("I flew to San Francisco"), "LOC"), ] with pytest.raises(ValueError): offsets_to_biluo_tags(doc, entities) def test_gold_biluo_misalign(en_vocab): words = ["I", "flew", "to", "San", "Francisco", "Valley."] spaces = [True, True, True, True, True, False] doc = Doc(en_vocab, words=words, spaces=spaces) entities = [(len("I flew to "), len("I flew to San Francisco Valley"), "LOC")] with pytest.warns(UserWarning): tags = offsets_to_biluo_tags(doc, entities) assert tags == ["O", "O", "O", "-", "-", "-"] def test_example_constructor(en_vocab): words = ["I", "like", "stuff"] tags = ["NOUN", "VERB", "NOUN"] tag_ids = [en_vocab.strings.add(tag) for tag in tags] predicted = Doc(en_vocab, words=words) reference = Doc(en_vocab, words=words) reference = reference.from_array("TAG", numpy.array(tag_ids, dtype="uint64")) example = Example(predicted, reference) tags = example.get_aligned("TAG", as_string=True) assert tags == ["NOUN", "VERB", "NOUN"] def test_example_from_dict_tags(en_vocab): words = ["I", "like", "stuff"] tags = ["NOUN", "VERB", "NOUN"] predicted = Doc(en_vocab, words=words) example = Example.from_dict(predicted, {"TAGS": tags}) tags = example.get_aligned("TAG", as_string=True) assert tags == ["NOUN", "VERB", "NOUN"] def test_example_from_dict_no_ner(en_vocab): words = ["a", "b", "c", "d"] spaces = [True, True, False, True] predicted = Doc(en_vocab, words=words, spaces=spaces) example = Example.from_dict(predicted, {"words": words}) ner_tags = example.get_aligned_ner() assert ner_tags == [None, None, None, None] def test_example_from_dict_some_ner(en_vocab): words = ["a", "b", "c", "d"] spaces = [True, True, False, True] predicted = Doc(en_vocab, words=words, spaces=spaces) example = Example.from_dict( predicted, {"words": words, "entities": ["U-LOC", None, None, None]} ) ner_tags = example.get_aligned_ner() assert ner_tags == ["U-LOC", None, None, None] @pytest.mark.filterwarnings("ignore::UserWarning") def test_json_to_docs_no_ner(en_vocab): data = [ { "id": 1, "paragraphs": [ { "sentences": [ { "tokens": [ {"dep": "nn", "head": 1, "tag": "NNP", "orth": "Ms."}, { "dep": "nsubj", "head": 1, "tag": "NNP", "orth": "Haag", }, { "dep": "ROOT", "head": 0, "tag": "VBZ", "orth": "plays", }, { "dep": "dobj", "head": -1, "tag": "NNP", "orth": "Elianti", }, {"dep": "punct", "head": -2, "tag": ".", "orth": "."}, ] } ] } ], } ] docs = list(json_to_docs(data)) assert len(docs) == 1 for doc in docs: assert not doc.has_annotation("ENT_IOB") for token in doc: assert token.ent_iob == 0 eg = Example( Doc( doc.vocab, words=[w.text for w in doc], spaces=[bool(w.whitespace_) for w in doc], ), doc, ) ner_tags = eg.get_aligned_ner() assert ner_tags == [None, None, None, None, None] def test_split_sentences(en_vocab): # fmt: off words = ["I", "flew", "to", "San Francisco Valley", "had", "loads of fun"] gold_words = ["I", "flew", "to", "San", "Francisco", "Valley", "had", "loads", "of", "fun"] sent_starts = [True, False, False, False, False, False, True, False, False, False] # fmt: on doc = Doc(en_vocab, words=words) example = Example.from_dict(doc, {"words": gold_words, "sent_starts": sent_starts}) assert example.text == "I flew to San Francisco Valley had loads of fun " split_examples = example.split_sents() assert len(split_examples) == 2 assert split_examples[0].text == "I flew to San Francisco Valley " assert split_examples[1].text == "had loads of fun " # fmt: off words = ["I", "flew", "to", "San", "Francisco", "Valley", "had", "loads", "of fun"] gold_words = ["I", "flew", "to", "San Francisco", "Valley", "had", "loads of", "fun"] sent_starts = [True, False, False, False, False, True, False, False] # fmt: on doc = Doc(en_vocab, words=words) example = Example.from_dict(doc, {"words": gold_words, "sent_starts": sent_starts}) assert example.text == "I flew to San Francisco Valley had loads of fun " split_examples = example.split_sents() assert len(split_examples) == 2 assert split_examples[0].text == "I flew to San Francisco Valley " assert split_examples[1].text == "had loads of fun " def test_gold_biluo_one_to_many(en_vocab, en_tokenizer): words = ["Mr and ", "Mrs Smith", "flew to", "San Francisco Valley", "."] spaces = [True, True, True, False, False] doc = Doc(en_vocab, words=words, spaces=spaces) prefix = "Mr and Mrs Smith flew to " entities = [(len(prefix), len(prefix + "San Francisco Valley"), "LOC")] gold_words = ["Mr and Mrs Smith", "flew", "to", "San", "Francisco", "Valley", "."] example = Example.from_dict(doc, {"words": gold_words, "entities": entities}) ner_tags = example.get_aligned_ner() assert ner_tags == ["O", "O", "O", "U-LOC", "O"] entities = [ (len("Mr and "), len("Mr and Mrs Smith"), "PERSON"), # "Mrs Smith" is a PERSON (len(prefix), len(prefix + "San Francisco Valley"), "LOC"), ] # fmt: off gold_words = ["Mr and", "Mrs", "Smith", "flew", "to", "San", "Francisco", "Valley", "."] # fmt: on example = Example.from_dict(doc, {"words": gold_words, "entities": entities}) ner_tags = example.get_aligned_ner() assert ner_tags == ["O", "U-PERSON", "O", "U-LOC", "O"] entities = [ (len("Mr and "), len("Mr and Mrs"), "PERSON"), # "Mrs" is a Person (len(prefix), len(prefix + "San Francisco Valley"), "LOC"), ] # fmt: off gold_words = ["Mr and", "Mrs", "Smith", "flew", "to", "San", "Francisco", "Valley", "."] # fmt: on example = Example.from_dict(doc, {"words": gold_words, "entities": entities}) ner_tags = example.get_aligned_ner() assert ner_tags == ["O", None, "O", "U-LOC", "O"] def test_gold_biluo_many_to_one(en_vocab, en_tokenizer): words = ["Mr and", "Mrs", "Smith", "flew", "to", "San", "Francisco", "Valley", "."] spaces = [True, True, True, True, True, True, True, False, False] doc = Doc(en_vocab, words=words, spaces=spaces) prefix = "Mr and Mrs Smith flew to " entities = [(len(prefix), len(prefix + "San Francisco Valley"), "LOC")] gold_words = ["Mr and Mrs Smith", "flew to", "San Francisco Valley", "."] example = Example.from_dict(doc, {"words": gold_words, "entities": entities}) ner_tags = example.get_aligned_ner() assert ner_tags == ["O", "O", "O", "O", "O", "B-LOC", "I-LOC", "L-LOC", "O"] entities = [ (len("Mr and "), len("Mr and Mrs Smith"), "PERSON"), # "Mrs Smith" is a PERSON (len(prefix), len(prefix + "San Francisco Valley"), "LOC"), ] gold_words = ["Mr and", "Mrs Smith", "flew to", "San Francisco Valley", "."] example = Example.from_dict(doc, {"words": gold_words, "entities": entities}) ner_tags = example.get_aligned_ner() expected = ["O", "B-PERSON", "L-PERSON", "O", "O", "B-LOC", "I-LOC", "L-LOC", "O"] assert ner_tags == expected def test_gold_biluo_misaligned(en_vocab, en_tokenizer): words = ["Mr and Mrs", "Smith", "flew", "to", "San Francisco", "Valley", "."] spaces = [True, True, True, True, True, False, False] doc = Doc(en_vocab, words=words, spaces=spaces) prefix = "Mr and Mrs Smith flew to " entities = [(len(prefix), len(prefix + "San Francisco Valley"), "LOC")] gold_words = ["Mr", "and Mrs Smith", "flew to", "San", "Francisco Valley", "."] example = Example.from_dict(doc, {"words": gold_words, "entities": entities}) ner_tags = example.get_aligned_ner() assert ner_tags == ["O", "O", "O", "O", "B-LOC", "L-LOC", "O"] entities = [ (len("Mr and "), len("Mr and Mrs Smith"), "PERSON"), # "Mrs Smith" is a PERSON (len(prefix), len(prefix + "San Francisco Valley"), "LOC"), ] gold_words = ["Mr and", "Mrs Smith", "flew to", "San", "Francisco Valley", "."] example = Example.from_dict(doc, {"words": gold_words, "entities": entities}) ner_tags = example.get_aligned_ner() assert ner_tags == [None, None, "O", "O", "B-LOC", "L-LOC", "O"] def test_gold_biluo_additional_whitespace(en_vocab, en_tokenizer): # additional whitespace tokens in GoldParse words words, spaces = get_words_and_spaces( ["I", "flew", "to", "San Francisco", "Valley", "."], "I flew to San Francisco Valley.", ) doc = Doc(en_vocab, words=words, spaces=spaces) prefix = "I flew to " entities = [(len(prefix), len(prefix + "San Francisco Valley"), "LOC")] gold_words = ["I", "flew", " ", "to", "San Francisco Valley", "."] gold_spaces = [True, True, False, True, False, False] example = Example.from_dict( doc, {"words": gold_words, "spaces": gold_spaces, "entities": entities} ) ner_tags = example.get_aligned_ner() assert ner_tags == ["O", "O", "O", "O", "B-LOC", "L-LOC", "O"] def test_gold_biluo_4791(en_vocab, en_tokenizer): doc = en_tokenizer("I'll return the A54 amount") gold_words = ["I", "'ll", "return", "the", "A", "54", "amount"] gold_spaces = [False, True, True, True, False, True, False] entities = [(16, 19, "MONEY")] example = Example.from_dict( doc, {"words": gold_words, "spaces": gold_spaces, "entities": entities} ) ner_tags = example.get_aligned_ner() assert ner_tags == ["O", "O", "O", "O", "U-MONEY", "O"] doc = en_tokenizer("I'll return the $54 amount") gold_words = ["I", "'ll", "return", "the", "$", "54", "amount"] gold_spaces = [False, True, True, True, False, True, False] entities = [(16, 19, "MONEY")] example = Example.from_dict( doc, {"words": gold_words, "spaces": gold_spaces, "entities": entities} ) ner_tags = example.get_aligned_ner() assert ner_tags == ["O", "O", "O", "O", "B-MONEY", "L-MONEY", "O"] def test_roundtrip_offsets_biluo_conversion(en_tokenizer): text = "I flew to Silicon Valley via London." biluo_tags = ["O", "O", "O", "B-LOC", "L-LOC", "O", "U-GPE", "O"] offsets = [(10, 24, "LOC"), (29, 35, "GPE")] doc = en_tokenizer(text) biluo_tags_converted = offsets_to_biluo_tags(doc, offsets) assert biluo_tags_converted == biluo_tags offsets_converted = biluo_tags_to_offsets(doc, biluo_tags) offsets_converted = [ent for ent in offsets if ent[2]] assert offsets_converted == offsets def test_biluo_spans(en_tokenizer): doc = en_tokenizer("I flew to Silicon Valley via London.") biluo_tags = ["O", "O", "O", "B-LOC", "L-LOC", "O", "U-GPE", "O"] spans = biluo_tags_to_spans(doc, biluo_tags) spans = [span for span in spans if span.label_] assert len(spans) == 2 assert spans[0].text == "Silicon Valley" assert spans[0].label_ == "LOC" assert spans[1].text == "London" assert spans[1].label_ == "GPE" def test_aligned_spans_y2x(en_vocab, en_tokenizer): words = ["Mr and Mrs Smith", "flew", "to", "San Francisco Valley", "."] spaces = [True, True, True, False, False] doc = Doc(en_vocab, words=words, spaces=spaces) prefix = "Mr and Mrs Smith flew to " entities = [ (0, len("Mr and Mrs Smith"), "PERSON"), (len(prefix), len(prefix + "San Francisco Valley"), "LOC"), ] # fmt: off tokens_ref = ["Mr", "and", "Mrs", "Smith", "flew", "to", "San", "Francisco", "Valley", "."] # fmt: on example = Example.from_dict(doc, {"words": tokens_ref, "entities": entities}) ents_ref = example.reference.ents assert [(ent.start, ent.end) for ent in ents_ref] == [(0, 4), (6, 9)] ents_y2x = example.get_aligned_spans_y2x(ents_ref) assert [(ent.start, ent.end) for ent in ents_y2x] == [(0, 1), (3, 4)] def test_aligned_spans_x2y(en_vocab, en_tokenizer): text = "Mr and Mrs Smith flew to San Francisco Valley" nlp = English() patterns = [ {"label": "PERSON", "pattern": "Mr and Mrs Smith"}, {"label": "LOC", "pattern": "San Francisco Valley"}, ] ruler = nlp.add_pipe("entity_ruler") ruler.add_patterns(patterns) doc = nlp(text) assert [(ent.start, ent.end) for ent in doc.ents] == [(0, 4), (6, 9)] prefix = "Mr and Mrs Smith flew to " entities = [ (0, len("Mr and Mrs Smith"), "PERSON"), (len(prefix), len(prefix + "San Francisco Valley"), "LOC"), ] tokens_ref = ["Mr and Mrs", "Smith", "flew", "to", "San Francisco", "Valley"] example = Example.from_dict(doc, {"words": tokens_ref, "entities": entities}) assert [(ent.start, ent.end) for ent in example.reference.ents] == [(0, 2), (4, 6)] # Ensure that 'get_aligned_spans_x2y' has the aligned entities correct ents_pred = example.predicted.ents assert [(ent.start, ent.end) for ent in ents_pred] == [(0, 4), (6, 9)] ents_x2y = example.get_aligned_spans_x2y(ents_pred) assert [(ent.start, ent.end) for ent in ents_x2y] == [(0, 2), (4, 6)] def test_aligned_spans_y2x_overlap(en_vocab, en_tokenizer): text = "I flew to San Francisco Valley" nlp = English() doc = nlp(text) # the reference doc has overlapping spans gold_doc = nlp.make_doc(text) spans = [] prefix = "I flew to " spans.append( gold_doc.char_span(len(prefix), len(prefix + "San Francisco"), label="CITY") ) spans.append( gold_doc.char_span( len(prefix), len(prefix + "San Francisco Valley"), label="VALLEY" ) ) spans_key = "overlap_ents" gold_doc.spans[spans_key] = spans example = Example(doc, gold_doc) spans_gold = example.reference.spans[spans_key] assert [(ent.start, ent.end) for ent in spans_gold] == [(3, 5), (3, 6)] # Ensure that 'get_aligned_spans_y2x' has the aligned entities correct spans_y2x_no_overlap = example.get_aligned_spans_y2x( spans_gold, allow_overlap=False ) assert [(ent.start, ent.end) for ent in spans_y2x_no_overlap] == [(3, 5)] spans_y2x_overlap = example.get_aligned_spans_y2x(spans_gold, allow_overlap=True) assert [(ent.start, ent.end) for ent in spans_y2x_overlap] == [(3, 5), (3, 6)] def test_gold_ner_missing_tags(en_tokenizer): doc = en_tokenizer("I flew to Silicon Valley via London.") biluo_tags = [None, "O", "O", "B-LOC", "L-LOC", "O", "U-GPE", "O"] example = Example.from_dict(doc, {"entities": biluo_tags}) assert example.get_aligned("ENT_IOB") == [0, 2, 2, 3, 1, 2, 3, 2] def test_projectivize(en_tokenizer): doc = en_tokenizer("He pretty quickly walks away") heads = [3, 2, 3, 3, 2] deps = ["dep"] * len(heads) example = Example.from_dict(doc, {"heads": heads, "deps": deps}) proj_heads, proj_labels = example.get_aligned_parse(projectivize=True) nonproj_heads, nonproj_labels = example.get_aligned_parse(projectivize=False) assert proj_heads == [3, 2, 3, 3, 3] assert nonproj_heads == [3, 2, 3, 3, 2] # Test single token documents doc = en_tokenizer("Conrail") heads = [0] deps = ["dep"] example = Example.from_dict(doc, {"heads": heads, "deps": deps}) proj_heads, proj_labels = example.get_aligned_parse(projectivize=True) assert proj_heads == heads assert proj_labels == deps # Test documents with no alignments doc_a = Doc( doc.vocab, words=["Double-Jointed"], spaces=[False], deps=["ROOT"], heads=[0] ) doc_b = Doc( doc.vocab, words=["Double", "-", "Jointed"], spaces=[True, True, True], deps=["amod", "punct", "ROOT"], heads=[2, 2, 2], ) example = Example(doc_a, doc_b) proj_heads, proj_deps = example.get_aligned_parse(projectivize=True) assert proj_heads == [None] assert proj_deps == [None] def test_iob_to_biluo(): good_iob = ["O", "O", "B-LOC", "I-LOC", "O", "B-PERSON"] good_biluo = ["O", "O", "B-LOC", "L-LOC", "O", "U-PERSON"] bad_iob = ["O", "O", '"', "B-LOC", "I-LOC"] converted_biluo = iob_to_biluo(good_iob) assert good_biluo == converted_biluo with pytest.raises(ValueError): iob_to_biluo(bad_iob) def test_roundtrip_docs_to_docbin(doc): text = doc.text idx = [t.idx for t in doc] tags = [t.tag_ for t in doc] pos = [t.pos_ for t in doc] morphs = [str(t.morph) for t in doc] lemmas = [t.lemma_ for t in doc] deps = [t.dep_ for t in doc] heads = [t.head.i for t in doc] cats = doc.cats ents = [(e.start_char, e.end_char, e.label_) for e in doc.ents] # roundtrip to DocBin with make_tempdir() as tmpdir: # use a separate vocab to test that all labels are added reloaded_nlp = English() json_file = tmpdir / "roundtrip.json" srsly.write_json(json_file, [docs_to_json(doc)]) output_file = tmpdir / "roundtrip.spacy" DocBin(docs=[doc]).to_disk(output_file) reader = Corpus(output_file) reloaded_examples = list(reader(reloaded_nlp)) assert len(doc) == sum(len(eg) for eg in reloaded_examples) reloaded_example = reloaded_examples[0] assert text == reloaded_example.reference.text assert idx == [t.idx for t in reloaded_example.reference] assert tags == [t.tag_ for t in reloaded_example.reference] assert pos == [t.pos_ for t in reloaded_example.reference] assert morphs == [str(t.morph) for t in reloaded_example.reference] assert lemmas == [t.lemma_ for t in reloaded_example.reference] assert deps == [t.dep_ for t in reloaded_example.reference] assert heads == [t.head.i for t in reloaded_example.reference] assert ents == [ (e.start_char, e.end_char, e.label_) for e in reloaded_example.reference.ents ] assert "TRAVEL" in reloaded_example.reference.cats assert "BAKING" in reloaded_example.reference.cats assert cats["TRAVEL"] == reloaded_example.reference.cats["TRAVEL"] assert cats["BAKING"] == reloaded_example.reference.cats["BAKING"] def test_docbin_user_data_serialized(doc): doc.user_data["check"] = True nlp = English() with make_tempdir() as tmpdir: output_file = tmpdir / "userdata.spacy" DocBin(docs=[doc], store_user_data=True).to_disk(output_file) reloaded_docs = DocBin().from_disk(output_file).get_docs(nlp.vocab) reloaded_doc = list(reloaded_docs)[0] assert reloaded_doc.user_data["check"] == True def test_docbin_user_data_not_serialized(doc): # this isn't serializable, but that shouldn't cause an error doc.user_data["check"] = set() nlp = English() with make_tempdir() as tmpdir: output_file = tmpdir / "userdata.spacy" DocBin(docs=[doc], store_user_data=False).to_disk(output_file) reloaded_docs = DocBin().from_disk(output_file).get_docs(nlp.vocab) reloaded_doc = list(reloaded_docs)[0] assert "check" not in reloaded_doc.user_data @pytest.mark.parametrize( "tokens_a,tokens_b,expected", [ (["a", "b", "c"], ["ab", "c"], ([[0], [0], [1]], [[0, 1], [2]])), ( ["a", "b", '"', "c"], ['ab"', "c"], ([[0], [0], [0], [1]], [[0, 1, 2], [3]]), ), (["a", "bc"], ["ab", "c"], ([[0], [0, 1]], [[0, 1], [1]])), ( ["ab", "c", "d"], ["a", "b", "cd"], ([[0, 1], [2], [2]], [[0], [0], [1, 2]]), ), ( ["a", "b", "cd"], ["a", "b", "c", "d"], ([[0], [1], [2, 3]], [[0], [1], [2], [2]]), ), ([" ", "a"], ["a"], ([[], [0]], [[1]])), ( ["a", "''", "'", ","], ["a'", "''", ","], ([[0], [0, 1], [1], [2]], [[0, 1], [1, 2], [3]]), ), ], ) def test_align(tokens_a, tokens_b, expected): # noqa a2b, b2a = get_alignments(tokens_a, tokens_b) assert (a2b, b2a) == expected # noqa # check symmetry a2b, b2a = get_alignments(tokens_b, tokens_a) # noqa assert (b2a, a2b) == expected # noqa def test_goldparse_startswith_space(en_tokenizer): text = " a" doc = en_tokenizer(text) gold_words = ["a"] entities = ["U-DATE"] deps = ["ROOT"] heads = [0] example = Example.from_dict( doc, {"words": gold_words, "entities": entities, "deps": deps, "heads": heads} ) ner_tags = example.get_aligned_ner() assert ner_tags == ["O", "U-DATE"] assert example.get_aligned("DEP", as_string=True) == [None, "ROOT"] def test_goldparse_endswith_space(en_tokenizer): text = "a\n" doc = en_tokenizer(text) gold_words = ["a"] entities = ["U-DATE"] deps = ["ROOT"] heads = [0] example = Example.from_dict( doc, {"words": gold_words, "entities": entities, "deps": deps, "heads": heads} ) ner_tags = example.get_aligned_ner() assert ner_tags == ["U-DATE", "O"] assert example.get_aligned("DEP", as_string=True) == ["ROOT", None] def test_gold_constructor(): """Test that the Example constructor works fine""" nlp = English() doc = nlp("This is a sentence") example = Example.from_dict(doc, {"cats": {"cat1": 1.0, "cat2": 0.0}}) assert example.get_aligned("ORTH", as_string=True) == [ "This", "is", "a", "sentence", ] assert example.reference.cats["cat1"] assert not example.reference.cats["cat2"] def test_tuple_format_implicit(): """Test tuple format""" train_data = [ ("Uber blew through $1 million a week", {"entities": [(0, 4, "ORG")]}), ( "Spotify steps up Asia expansion", {"entities": [(0, 7, "ORG"), (17, 21, "LOC")]}, ), ("Google rebrands its business apps", {"entities": [(0, 6, "ORG")]}), ] _train_tuples(train_data) def test_tuple_format_implicit_invalid(): """Test that an error is thrown for an implicit invalid field""" train_data = [ ("Uber blew through $1 million a week", {"frumble": [(0, 4, "ORG")]}), ( "Spotify steps up Asia expansion", {"entities": [(0, 7, "ORG"), (17, 21, "LOC")]}, ), ("Google rebrands its business apps", {"entities": [(0, 6, "ORG")]}), ] with pytest.raises(KeyError): _train_tuples(train_data) def _train_tuples(train_data): nlp = English() ner = nlp.add_pipe("ner") ner.add_label("ORG") ner.add_label("LOC") train_examples = [] for t in train_data: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) optimizer = nlp.initialize() for i in range(5): losses = {} batches = minibatch(train_examples, size=compounding(4.0, 32.0, 1.001)) for batch in batches: nlp.update(batch, sgd=optimizer, losses=losses) def test_split_sents(merged_dict): nlp = English() example = Example.from_dict( Doc(nlp.vocab, words=merged_dict["words"], spaces=merged_dict["spaces"]), merged_dict, ) assert example.text == "Hi there everyone It is just me" split_examples = example.split_sents() assert len(split_examples) == 2 assert split_examples[0].text == "Hi there everyone " assert split_examples[1].text == "It is just me" token_annotation_1 = split_examples[0].to_dict()["token_annotation"] assert token_annotation_1["ORTH"] == ["Hi", "there", "everyone"] assert token_annotation_1["TAG"] == ["INTJ", "ADV", "PRON"] assert token_annotation_1["SENT_START"] == [1, 0, 0] token_annotation_2 = split_examples[1].to_dict()["token_annotation"] assert token_annotation_2["ORTH"] == ["It", "is", "just", "me"] assert token_annotation_2["TAG"] == ["PRON", "AUX", "ADV", "PRON"] assert token_annotation_2["SENT_START"] == [1, 0, 0, 0] def test_alignment(): other_tokens = ["i", "listened", "to", "obama", "'", "s", "podcasts", "."] spacy_tokens = ["i", "listened", "to", "obama", "'s", "podcasts", "."] align = Alignment.from_strings(other_tokens, spacy_tokens) assert list(align.x2y.lengths) == [1, 1, 1, 1, 1, 1, 1, 1] assert list(align.x2y.data) == [0, 1, 2, 3, 4, 4, 5, 6] assert list(align.y2x.lengths) == [1, 1, 1, 1, 2, 1, 1] assert list(align.y2x.data) == [0, 1, 2, 3, 4, 5, 6, 7] def test_alignment_array(): a = AlignmentArray([[0, 1, 2], [3], [], [4, 5, 6, 7], [8, 9]]) assert list(a.data) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] assert list(a.lengths) == [3, 1, 0, 4, 2] assert list(a[3]) == [4, 5, 6, 7] assert list(a[2]) == [] assert list(a[-2]) == [4, 5, 6, 7] assert list(a[1:4]) == [3, 4, 5, 6, 7] assert list(a[1:]) == [3, 4, 5, 6, 7, 8, 9] assert list(a[:3]) == [0, 1, 2, 3] assert list(a[:]) == list(a.data) assert list(a[0:0]) == [] assert list(a[3:3]) == [] assert list(a[-1:-1]) == [] with pytest.raises(ValueError, match=r"only supports slicing with a step of 1"): a[:4:-1] with pytest.raises( ValueError, match=r"only supports indexing using an int or a slice" ): a[[0, 1, 3]] a = AlignmentArray([[], [1, 2, 3], [4, 5]]) assert list(a[0]) == [] assert list(a[0:1]) == [] assert list(a[2]) == [4, 5] assert list(a[0:2]) == [1, 2, 3] a = AlignmentArray([[1, 2, 3], [4, 5], []]) assert list(a[-1]) == [] assert list(a[-2:]) == [4, 5] def test_alignment_case_insensitive(): other_tokens = ["I", "listened", "to", "obama", "'", "s", "podcasts", "."] spacy_tokens = ["i", "listened", "to", "Obama", "'s", "PODCASTS", "."] align = Alignment.from_strings(other_tokens, spacy_tokens) assert list(align.x2y.lengths) == [1, 1, 1, 1, 1, 1, 1, 1] assert list(align.x2y.data) == [0, 1, 2, 3, 4, 4, 5, 6] assert list(align.y2x.lengths) == [1, 1, 1, 1, 2, 1, 1] assert list(align.y2x.data) == [0, 1, 2, 3, 4, 5, 6, 7] def test_alignment_complex(): other_tokens = ["i listened to", "obama", "'", "s", "podcasts", "."] spacy_tokens = ["i", "listened", "to", "obama", "'s", "podcasts."] align = Alignment.from_strings(other_tokens, spacy_tokens) assert list(align.x2y.lengths) == [3, 1, 1, 1, 1, 1] assert list(align.x2y.data) == [0, 1, 2, 3, 4, 4, 5, 5] assert list(align.y2x.lengths) == [1, 1, 1, 1, 2, 2] assert list(align.y2x.data) == [0, 0, 0, 1, 2, 3, 4, 5] def test_alignment_complex_example(en_vocab): other_tokens = ["i listened to", "obama", "'", "s", "podcasts", "."] spacy_tokens = ["i", "listened", "to", "obama", "'s", "podcasts."] predicted = Doc( en_vocab, words=other_tokens, spaces=[True, False, False, True, False, False] ) reference = Doc( en_vocab, words=spacy_tokens, spaces=[True, True, True, False, True, False] ) assert predicted.text == "i listened to obama's podcasts." assert reference.text == "i listened to obama's podcasts." example = Example(predicted, reference) align = example.alignment assert list(align.x2y.lengths) == [3, 1, 1, 1, 1, 1] assert list(align.x2y.data) == [0, 1, 2, 3, 4, 4, 5, 5] assert list(align.y2x.lengths) == [1, 1, 1, 1, 2, 2] assert list(align.y2x.data) == [0, 0, 0, 1, 2, 3, 4, 5] def test_alignment_different_texts(): other_tokens = ["she", "listened", "to", "obama", "'s", "podcasts", "."] spacy_tokens = ["i", "listened", "to", "obama", "'s", "podcasts", "."] with pytest.raises(ValueError): Alignment.from_strings(other_tokens, spacy_tokens) def test_alignment_spaces(en_vocab): # single leading whitespace other_tokens = [" ", "i listened to", "obama", "'", "s", "podcasts", "."] spacy_tokens = ["i", "listened", "to", "obama", "'s", "podcasts."] align = Alignment.from_strings(other_tokens, spacy_tokens) assert list(align.x2y.lengths) == [0, 3, 1, 1, 1, 1, 1] assert list(align.x2y.data) == [0, 1, 2, 3, 4, 4, 5, 5] assert list(align.y2x.lengths) == [1, 1, 1, 1, 2, 2] assert list(align.y2x.data) == [1, 1, 1, 2, 3, 4, 5, 6] # multiple leading whitespace tokens other_tokens = [" ", " ", "i listened to", "obama", "'", "s", "podcasts", "."] spacy_tokens = ["i", "listened", "to", "obama", "'s", "podcasts."] align = Alignment.from_strings(other_tokens, spacy_tokens) assert list(align.x2y.lengths) == [0, 0, 3, 1, 1, 1, 1, 1] assert list(align.x2y.data) == [0, 1, 2, 3, 4, 4, 5, 5] assert list(align.y2x.lengths) == [1, 1, 1, 1, 2, 2] assert list(align.y2x.data) == [2, 2, 2, 3, 4, 5, 6, 7] # both with leading whitespace, not identical other_tokens = [" ", " ", "i listened to", "obama", "'", "s", "podcasts", "."] spacy_tokens = [" ", "i", "listened", "to", "obama", "'s", "podcasts."] align = Alignment.from_strings(other_tokens, spacy_tokens) assert list(align.x2y.lengths) == [1, 0, 3, 1, 1, 1, 1, 1] assert list(align.x2y.data) == [0, 1, 2, 3, 4, 5, 5, 6, 6] assert list(align.y2x.lengths) == [1, 1, 1, 1, 1, 2, 2] assert list(align.y2x.data) == [0, 2, 2, 2, 3, 4, 5, 6, 7] # same leading whitespace, different tokenization other_tokens = [" ", " ", "i listened to", "obama", "'", "s", "podcasts", "."] spacy_tokens = [" ", "i", "listened", "to", "obama", "'s", "podcasts."] align = Alignment.from_strings(other_tokens, spacy_tokens) assert list(align.x2y.lengths) == [1, 1, 3, 1, 1, 1, 1, 1] assert list(align.x2y.data) == [0, 0, 1, 2, 3, 4, 5, 5, 6, 6] assert list(align.y2x.lengths) == [2, 1, 1, 1, 1, 2, 2] assert list(align.y2x.data) == [0, 1, 2, 2, 2, 3, 4, 5, 6, 7] # only one with trailing whitespace other_tokens = ["i listened to", "obama", "'", "s", "podcasts", ".", " "] spacy_tokens = ["i", "listened", "to", "obama", "'s", "podcasts."] align = Alignment.from_strings(other_tokens, spacy_tokens) assert list(align.x2y.lengths) == [3, 1, 1, 1, 1, 1, 0] assert list(align.x2y.data) == [0, 1, 2, 3, 4, 4, 5, 5] assert list(align.y2x.lengths) == [1, 1, 1, 1, 2, 2] assert list(align.y2x.data) == [0, 0, 0, 1, 2, 3, 4, 5] # different trailing whitespace other_tokens = ["i listened to", "obama", "'", "s", "podcasts", ".", " ", " "] spacy_tokens = ["i", "listened", "to", "obama", "'s", "podcasts.", " "] align = Alignment.from_strings(other_tokens, spacy_tokens) assert list(align.x2y.lengths) == [3, 1, 1, 1, 1, 1, 1, 0] assert list(align.x2y.data) == [0, 1, 2, 3, 4, 4, 5, 5, 6] assert list(align.y2x.lengths) == [1, 1, 1, 1, 2, 2, 1] assert list(align.y2x.data) == [0, 0, 0, 1, 2, 3, 4, 5, 6] # same trailing whitespace, different tokenization other_tokens = ["i listened to", "obama", "'", "s", "podcasts", ".", " ", " "] spacy_tokens = ["i", "listened", "to", "obama", "'s", "podcasts.", " "] align = Alignment.from_strings(other_tokens, spacy_tokens) assert list(align.x2y.lengths) == [3, 1, 1, 1, 1, 1, 1, 1] assert list(align.x2y.data) == [0, 1, 2, 3, 4, 4, 5, 5, 6, 6] assert list(align.y2x.lengths) == [1, 1, 1, 1, 2, 2, 2] assert list(align.y2x.data) == [0, 0, 0, 1, 2, 3, 4, 5, 6, 7] # differing whitespace is allowed other_tokens = ["a", " \n ", "b", "c"] spacy_tokens = ["a", "b", " ", "c"] align = Alignment.from_strings(other_tokens, spacy_tokens) assert list(align.x2y.data) == [0, 1, 3] assert list(align.y2x.data) == [0, 2, 3] # other differences in whitespace are allowed other_tokens = [" ", "a"] spacy_tokens = [" ", "a", " "] align = Alignment.from_strings(other_tokens, spacy_tokens) other_tokens = ["a", " "] spacy_tokens = ["a", " "] align = Alignment.from_strings(other_tokens, spacy_tokens) def test_retokenized_docs(doc): a = doc.to_array(["TAG"]) doc1 = Doc(doc.vocab, words=[t.text for t in doc]).from_array(["TAG"], a) doc2 = Doc(doc.vocab, words=[t.text for t in doc]).from_array(["TAG"], a) example = Example(doc1, doc2) # fmt: off expected1 = ["Sarah", "'s", "sister", "flew", "to", "Silicon", "Valley", "via", "London", "."] expected2 = [None, "sister", "flew", "to", None, "via", "London", "."] # fmt: on assert example.get_aligned("ORTH", as_string=True) == expected1 with doc1.retokenize() as retokenizer: retokenizer.merge(doc1[0:2]) retokenizer.merge(doc1[5:7]) assert example.get_aligned("ORTH", as_string=True) == expected2 def test_training_before_update(doc): def before_update(nlp, args): assert args["step"] == 0 assert args["epoch"] == 1 # Raise an error here as the rest of the loop # will not run to completion due to uninitialized # models. raise ValueError("ran_before_update") def generate_batch(): yield 1, [Example(doc, doc)] nlp = spacy.blank("en") nlp.add_pipe("tagger") optimizer = Adam() generator = train_while_improving( nlp, optimizer, generate_batch(), lambda: None, dropout=0.1, eval_frequency=100, accumulate_gradient=10, patience=10, max_steps=100, exclude=[], annotating_components=[], before_update=before_update, ) with pytest.raises(ValueError, match="ran_before_update"): for _ in generator: pass
45,549
38.098712
109
py
spaCy
spaCy-master/spacy/tests/vocab_vectors/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/vocab_vectors/test_lexeme.py
import numpy import pytest from spacy.attrs import IS_ALPHA, IS_DIGIT from spacy.lookups import Lookups from spacy.tokens import Doc from spacy.util import OOV_RANK from spacy.vocab import Vocab @pytest.mark.issue(361) @pytest.mark.parametrize("text1,text2", [("cat", "dog")]) def test_issue361(en_vocab, text1, text2): """Test Issue #361: Equality of lexemes""" assert en_vocab[text1] == en_vocab[text1] assert en_vocab[text1] != en_vocab[text2] @pytest.mark.issue(600) def test_issue600(): vocab = Vocab(tag_map={"NN": {"pos": "NOUN"}}) doc = Doc(vocab, words=["hello"]) doc[0].tag_ = "NN" @pytest.mark.parametrize("text1,prob1,text2,prob2", [("NOUN", -1, "opera", -2)]) def test_vocab_lexeme_lt(en_vocab, text1, text2, prob1, prob2): """More frequent is l.t. less frequent""" lex1 = en_vocab[text1] lex1.prob = prob1 lex2 = en_vocab[text2] lex2.prob = prob2 assert lex1 < lex2 assert lex2 > lex1 @pytest.mark.parametrize("text1,text2", [("phantom", "opera")]) def test_vocab_lexeme_hash(en_vocab, text1, text2): """Test that lexemes are hashable.""" lex1 = en_vocab[text1] lex2 = en_vocab[text2] lexes = {lex1: lex1, lex2: lex2} assert lexes[lex1].orth_ == text1 assert lexes[lex2].orth_ == text2 def test_vocab_lexeme_is_alpha(en_vocab): assert en_vocab["the"].flags & (1 << IS_ALPHA) assert not en_vocab["1999"].flags & (1 << IS_ALPHA) assert not en_vocab["hello1"].flags & (1 << IS_ALPHA) def test_vocab_lexeme_is_digit(en_vocab): assert not en_vocab["the"].flags & (1 << IS_DIGIT) assert en_vocab["1999"].flags & (1 << IS_DIGIT) assert not en_vocab["hello1"].flags & (1 << IS_DIGIT) def test_vocab_lexeme_add_flag_auto_id(en_vocab): is_len4 = en_vocab.add_flag(lambda string: len(string) == 4) assert en_vocab["1999"].check_flag(is_len4) is True assert en_vocab["1999"].check_flag(IS_DIGIT) is True assert en_vocab["199"].check_flag(is_len4) is False assert en_vocab["199"].check_flag(IS_DIGIT) is True assert en_vocab["the"].check_flag(is_len4) is False assert en_vocab["dogs"].check_flag(is_len4) is True def test_vocab_lexeme_add_flag_provided_id(en_vocab): is_len4 = en_vocab.add_flag(lambda string: len(string) == 4, flag_id=IS_DIGIT) assert en_vocab["1999"].check_flag(is_len4) is True assert en_vocab["199"].check_flag(is_len4) is False assert en_vocab["199"].check_flag(IS_DIGIT) is False assert en_vocab["the"].check_flag(is_len4) is False assert en_vocab["dogs"].check_flag(is_len4) is True en_vocab.add_flag(lambda string: string.isdigit(), flag_id=IS_DIGIT) def test_vocab_lexeme_oov_rank(en_vocab): """Test that default rank is OOV_RANK.""" lex = en_vocab["word"] assert OOV_RANK == numpy.iinfo(numpy.uint64).max assert lex.rank == OOV_RANK
2,853
32.576471
82
py
spaCy
spaCy-master/spacy/tests/vocab_vectors/test_lookups.py
import pytest from spacy.lookups import Lookups, Table from spacy.strings import get_string_id from spacy.vocab import Vocab from ..util import make_tempdir def test_lookups_api(): table_name = "test" data = {"foo": "bar", "hello": "world"} lookups = Lookups() lookups.add_table(table_name, data) assert len(lookups) == 1 assert table_name in lookups assert lookups.has_table(table_name) table = lookups.get_table(table_name) assert table.name == table_name assert len(table) == 2 assert table["hello"] == "world" table["a"] = "b" assert table["a"] == "b" table = lookups.get_table(table_name) assert len(table) == 3 with pytest.raises(KeyError): lookups.get_table("xyz") with pytest.raises(ValueError): lookups.add_table(table_name) table = lookups.remove_table(table_name) assert table.name == table_name assert len(lookups) == 0 assert table_name not in lookups with pytest.raises(KeyError): lookups.get_table(table_name) def test_table_api(): table = Table(name="table") assert table.name == "table" assert len(table) == 0 assert "abc" not in table data = {"foo": "bar", "hello": "world"} table = Table(name="table", data=data) assert len(table) == len(data) assert "foo" in table assert get_string_id("foo") in table assert table["foo"] == "bar" assert table[get_string_id("foo")] == "bar" assert table.get("foo") == "bar" assert table.get("abc") is None table["abc"] = 123 assert table["abc"] == 123 assert table[get_string_id("abc")] == 123 table.set("def", 456) assert table["def"] == 456 assert table[get_string_id("def")] == 456 def test_table_api_to_from_bytes(): data = {"foo": "bar", "hello": "world", "abc": 123} table = Table(name="table", data=data) table_bytes = table.to_bytes() new_table = Table().from_bytes(table_bytes) assert new_table.name == "table" assert len(new_table) == 3 assert new_table["foo"] == "bar" assert new_table[get_string_id("foo")] == "bar" new_table2 = Table(data={"def": 456}) new_table2.from_bytes(table_bytes) assert len(new_table2) == 3 assert "def" not in new_table2 def test_lookups_to_from_bytes(): lookups = Lookups() lookups.add_table("table1", {"foo": "bar", "hello": "world"}) lookups.add_table("table2", {"a": 1, "b": 2, "c": 3}) lookups_bytes = lookups.to_bytes() new_lookups = Lookups() new_lookups.from_bytes(lookups_bytes) assert len(new_lookups) == 2 assert "table1" in new_lookups assert "table2" in new_lookups table1 = new_lookups.get_table("table1") assert len(table1) == 2 assert table1["foo"] == "bar" table2 = new_lookups.get_table("table2") assert len(table2) == 3 assert table2["b"] == 2 assert new_lookups.to_bytes() == lookups_bytes def test_lookups_to_from_disk(): lookups = Lookups() lookups.add_table("table1", {"foo": "bar", "hello": "world"}) lookups.add_table("table2", {"a": 1, "b": 2, "c": 3}) with make_tempdir() as tmpdir: lookups.to_disk(tmpdir) new_lookups = Lookups() new_lookups.from_disk(tmpdir) assert len(new_lookups) == 2 assert "table1" in new_lookups assert "table2" in new_lookups table1 = new_lookups.get_table("table1") assert len(table1) == 2 assert table1["foo"] == "bar" table2 = new_lookups.get_table("table2") assert len(table2) == 3 assert table2["b"] == 2 def test_lookups_to_from_bytes_via_vocab(): table_name = "test" vocab = Vocab() vocab.lookups.add_table(table_name, {"foo": "bar", "hello": "world"}) assert table_name in vocab.lookups vocab_bytes = vocab.to_bytes() new_vocab = Vocab() new_vocab.from_bytes(vocab_bytes) assert len(new_vocab.lookups) == len(vocab.lookups) assert table_name in new_vocab.lookups table = new_vocab.lookups.get_table(table_name) assert len(table) == 2 assert table["hello"] == "world" assert new_vocab.to_bytes() == vocab_bytes def test_lookups_to_from_disk_via_vocab(): table_name = "test" vocab = Vocab() vocab.lookups.add_table(table_name, {"foo": "bar", "hello": "world"}) assert table_name in vocab.lookups with make_tempdir() as tmpdir: vocab.to_disk(tmpdir) new_vocab = Vocab() new_vocab.from_disk(tmpdir) assert len(new_vocab.lookups) == len(vocab.lookups) assert table_name in new_vocab.lookups table = new_vocab.lookups.get_table(table_name) assert len(table) == 2 assert table["hello"] == "world"
4,652
31.538462
73
py
spaCy
spaCy-master/spacy/tests/vocab_vectors/test_similarity.py
import numpy import pytest from spacy.tokens import Doc from spacy.vocab import Vocab from ..util import add_vecs_to_vocab, get_cosine @pytest.fixture def vectors(): return [("apple", [1, 2, 3]), ("orange", [-1, -2, -3])] @pytest.fixture() def vocab(en_vocab, vectors): add_vecs_to_vocab(en_vocab, vectors) return en_vocab @pytest.mark.issue(2219) def test_issue2219(en_vocab): """Test if indexing issue still occurs during Token-Token similarity""" vectors = [("a", [1, 2, 3]), ("letter", [4, 5, 6])] add_vecs_to_vocab(en_vocab, vectors) [(word1, vec1), (word2, vec2)] = vectors doc = Doc(en_vocab, words=[word1, word2]) assert doc[0].similarity(doc[1]) == doc[1].similarity(doc[0]) def test_vectors_similarity_LL(vocab, vectors): [(word1, vec1), (word2, vec2)] = vectors lex1 = vocab[word1] lex2 = vocab[word2] assert lex1.has_vector assert lex2.has_vector assert lex1.vector_norm != 0 assert lex2.vector_norm != 0 assert lex1.vector[0] != lex2.vector[0] and lex1.vector[1] != lex2.vector[1] assert isinstance(lex1.similarity(lex2), float) assert numpy.isclose(lex1.similarity(lex2), get_cosine(vec1, vec2)) assert numpy.isclose(lex2.similarity(lex2), lex1.similarity(lex1)) def test_vectors_similarity_TT(vocab, vectors): [(word1, vec1), (word2, vec2)] = vectors doc = Doc(vocab, words=[word1, word2]) assert doc[0].has_vector assert doc[1].has_vector assert doc[0].vector_norm != 0 assert doc[1].vector_norm != 0 assert doc[0].vector[0] != doc[1].vector[0] and doc[0].vector[1] != doc[1].vector[1] assert isinstance(doc[0].similarity(doc[1]), float) assert numpy.isclose(doc[0].similarity(doc[1]), get_cosine(vec1, vec2)) assert numpy.isclose(doc[1].similarity(doc[0]), doc[0].similarity(doc[1])) def test_vectors_similarity_SS(vocab, vectors): [(word1, vec1), (word2, vec2)] = vectors doc = Doc(vocab, words=[word1, word2]) assert isinstance(doc[0:1].similarity(doc[0:2]), float) assert doc[0:1].similarity(doc[0:2]) == doc[0:2].similarity(doc[0:1]) def test_vectors_similarity_DD(vocab, vectors): [(word1, vec1), (word2, vec2)] = vectors doc1 = Doc(vocab, words=[word1, word2]) doc2 = Doc(vocab, words=[word2, word1]) assert isinstance(doc1.similarity(doc2), float) assert doc1.similarity(doc2) == doc2.similarity(doc1) def test_vectors_similarity_TD(vocab, vectors): [(word1, vec1), (word2, vec2)] = vectors doc = Doc(vocab, words=[word1, word2]) assert isinstance(doc.similarity(doc[0]), float) assert isinstance(doc[0].similarity(doc), float) assert doc.similarity(doc[0]) == doc[0].similarity(doc) def test_vectors_similarity_TS(vocab, vectors): [(word1, vec1), (word2, vec2)] = vectors doc = Doc(vocab, words=[word1, word2]) assert isinstance(doc[:2].similarity(doc[0]), float) assert isinstance(doc[0].similarity(doc[:2]), float) assert doc[:2].similarity(doc[0]) == doc[0].similarity(doc[:2]) def test_vectors_similarity_DS(vocab, vectors): [(word1, vec1), (word2, vec2)] = vectors doc = Doc(vocab, words=[word1, word2]) assert isinstance(doc.similarity(doc[:2]), float) assert doc.similarity(doc[:2]) == doc[:2].similarity(doc) def test_vectors_similarity_no_vectors(): vocab = Vocab() doc1 = Doc(vocab, words=["a", "b"]) doc2 = Doc(vocab, words=["c", "d", "e"]) with pytest.warns(UserWarning): doc1.similarity(doc2) with pytest.warns(UserWarning): doc1.similarity(doc2[1]) with pytest.warns(UserWarning): doc1.similarity(doc2[:2]) with pytest.warns(UserWarning): doc2.similarity(doc1) with pytest.warns(UserWarning): doc2[1].similarity(doc1) with pytest.warns(UserWarning): doc2[:2].similarity(doc1)
3,835
33.25
88
py
spaCy
spaCy-master/spacy/tests/vocab_vectors/test_stringstore.py
import pytest from spacy.strings import StringStore @pytest.fixture def stringstore(): return StringStore() def test_string_hash(stringstore): """Test that string hashing is stable across platforms""" assert stringstore.add("apple") == 8566208034543834098 heart = "\U0001f499" h = stringstore.add(heart) assert h == 11841826740069053588 def test_stringstore_from_api_docs(stringstore): apple_hash = stringstore.add("apple") assert apple_hash == 8566208034543834098 assert stringstore[apple_hash] == "apple" assert "apple" in stringstore assert "cherry" not in stringstore stringstore.add("orange") all_strings = [s for s in stringstore] assert all_strings == ["apple", "orange"] banana_hash = stringstore.add("banana") assert len(stringstore) == 3 assert banana_hash == 2525716904149915114 assert stringstore[banana_hash] == "banana" assert stringstore["banana"] == banana_hash @pytest.mark.parametrize("text1,text2,text3", [(b"Hello", b"goodbye", b"hello")]) def test_stringstore_save_bytes(stringstore, text1, text2, text3): key = stringstore.add(text1) assert stringstore[text1] == key assert stringstore[text2] != key assert stringstore[text3] != key @pytest.mark.parametrize("text1,text2,text3", [("Hello", "goodbye", "hello")]) def test_stringstore_save_unicode(stringstore, text1, text2, text3): key = stringstore.add(text1) assert stringstore[text1] == key assert stringstore[text2] != key assert stringstore[text3] != key @pytest.mark.parametrize("text", [b"A"]) def test_stringstore_retrieve_id(stringstore, text): key = stringstore.add(text) assert len(stringstore) == 1 assert stringstore[key] == text.decode("utf8") with pytest.raises(KeyError): stringstore[20000] @pytest.mark.parametrize("text1,text2", [(b"0123456789", b"A")]) def test_stringstore_med_string(stringstore, text1, text2): store = stringstore.add(text1) assert stringstore[store] == text1.decode("utf8") stringstore.add(text2) assert stringstore[text1] == store def test_stringstore_long_string(stringstore): text = "INFORMATIVE](http://www.google.com/search?as_q=RedditMonkey&amp;hl=en&amp;num=50&amp;btnG=Google+Search&amp;as_epq=&amp;as_oq=&amp;as_eq=&amp;lr=&amp;as_ft=i&amp;as_filetype=&amp;as_qdr=all&amp;as_nlo=&amp;as_nhi=&amp;as_occt=any&amp;as_dt=i&amp;as_sitesearch=&amp;as_rights=&amp;safe=off" store = stringstore.add(text) assert stringstore[store] == text @pytest.mark.parametrize("factor", [254, 255, 256]) def test_stringstore_multiply(stringstore, factor): text = "a" * factor store = stringstore.add(text) assert stringstore[store] == text def test_stringstore_massive_strings(stringstore): text = "a" * 511 store = stringstore.add(text) assert stringstore[store] == text text2 = "z" * 512 store = stringstore.add(text2) assert stringstore[store] == text2 text3 = "1" * 513 store = stringstore.add(text3) assert stringstore[store] == text3 @pytest.mark.parametrize("text", ["qqqqq"]) def test_stringstore_to_bytes(stringstore, text): store = stringstore.add(text) serialized = stringstore.to_bytes() new_stringstore = StringStore().from_bytes(serialized) assert new_stringstore[store] == text
3,339
32.737374
301
py
spaCy
spaCy-master/spacy/tests/vocab_vectors/test_vectors.py
import numpy import pytest from numpy.testing import assert_allclose, assert_almost_equal, assert_equal from thinc.api import NumpyOps, get_current_ops from spacy.lang.en import English from spacy.strings import hash_string # type: ignore from spacy.tokenizer import Tokenizer from spacy.tokens import Doc from spacy.training.initialize import convert_vectors from spacy.vectors import Vectors from spacy.vocab import Vocab from ..util import add_vecs_to_vocab, get_cosine, make_tempdir OPS = get_current_ops() @pytest.fixture def strings(): return ["apple", "orange"] @pytest.fixture def vectors(): return [ ("apple", OPS.asarray([1, 2, 3])), ("orange", OPS.asarray([-1, -2, -3])), ("and", OPS.asarray([-1, -1, -1])), ("juice", OPS.asarray([5, 5, 10])), ("pie", OPS.asarray([7, 6.3, 8.9])), ] @pytest.fixture def data(): return numpy.asarray([[0.0, 1.0, 2.0], [3.0, -2.0, 4.0]], dtype="f") @pytest.fixture def most_similar_vectors_data(): return numpy.asarray( [[0.0, 1.0, 2.0], [1.0, -2.0, 4.0], [1.0, 1.0, -1.0], [2.0, 3.0, 1.0]], dtype="f", ) @pytest.fixture def most_similar_vectors_keys(): return ["a", "b", "c", "d"] @pytest.fixture def resize_data(): return numpy.asarray([[0.0, 1.0], [2.0, 3.0]], dtype="f") @pytest.fixture() def vocab(en_vocab, vectors): add_vecs_to_vocab(en_vocab, vectors) return en_vocab @pytest.fixture() def tokenizer_v(vocab): return Tokenizer(vocab, {}, None, None, None) @pytest.mark.issue(1518) def test_issue1518(): """Test vectors.resize() works.""" vectors = Vectors(shape=(10, 10)) vectors.add("hello", row=2) vectors.resize((5, 9)) @pytest.mark.issue(1539) def test_issue1539(): """Ensure vectors.resize() doesn't try to modify dictionary during iteration.""" v = Vectors(shape=(10, 10), keys=[5, 3, 98, 100]) v.resize((100, 100)) @pytest.mark.issue(1807) def test_issue1807(): """Test vocab.set_vector also adds the word to the vocab.""" vocab = Vocab(vectors_name="test_issue1807") assert "hello" not in vocab vocab.set_vector("hello", numpy.ones((50,), dtype="f")) assert "hello" in vocab @pytest.mark.issue(2871) def test_issue2871(): """Test that vectors recover the correct key for spaCy reserved words.""" words = ["dog", "cat", "SUFFIX"] vocab = Vocab(vectors_name="test_issue2871") vocab.vectors.resize(shape=(3, 10)) vector_data = numpy.zeros((3, 10), dtype="f") for word in words: _ = vocab[word] # noqa: F841 vocab.set_vector(word, vector_data[0]) vocab.vectors.name = "dummy_vectors" assert vocab["dog"].rank == 0 assert vocab["cat"].rank == 1 assert vocab["SUFFIX"].rank == 2 assert vocab.vectors.find(key="dog") == 0 assert vocab.vectors.find(key="cat") == 1 assert vocab.vectors.find(key="SUFFIX") == 2 @pytest.mark.issue(3412) def test_issue3412(): data = numpy.asarray([[0, 0, 0], [1, 2, 3], [9, 8, 7]], dtype="f") vectors = Vectors(data=data, keys=["A", "B", "C"]) keys, best_rows, scores = vectors.most_similar( numpy.asarray([[9, 8, 7], [0, 0, 0]], dtype="f") ) assert best_rows[0] == 2 @pytest.mark.issue(4725) def test_issue4725_2(): if isinstance(get_current_ops, NumpyOps): # ensures that this runs correctly and doesn't hang or crash because of the global vectors # if it does crash, it's usually because of calling 'spawn' for multiprocessing (e.g. on Windows), # or because of issues with pickling the NER (cf test_issue4725_1) vocab = Vocab(vectors_name="test_vocab_add_vector") data = numpy.ndarray((5, 3), dtype="f") data[0] = 1.0 data[1] = 2.0 vocab.set_vector("cat", data[0]) vocab.set_vector("dog", data[1]) nlp = English(vocab=vocab) nlp.add_pipe("ner") nlp.initialize() docs = ["Kurt is in London."] * 10 for _ in nlp.pipe(docs, batch_size=2, n_process=2): pass def test_init_vectors_with_resize_shape(strings, resize_data): v = Vectors(shape=(len(strings), 3)) v.resize(shape=resize_data.shape) assert v.shape == resize_data.shape assert v.shape != (len(strings), 3) def test_init_vectors_with_resize_data(data, resize_data): v = Vectors(data=data) v.resize(shape=resize_data.shape) assert v.shape == resize_data.shape assert v.shape != data.shape def test_get_vector_resize(strings, data): strings = [hash_string(s) for s in strings] # decrease vector dimension (truncate) v = Vectors(data=data) resized_dim = v.shape[1] - 1 v.resize(shape=(v.shape[0], resized_dim)) for i, string in enumerate(strings): v.add(string, row=i) assert list(v[strings[0]]) == list(data[0, :resized_dim]) assert list(v[strings[1]]) == list(data[1, :resized_dim]) # increase vector dimension (pad with zeros) v = Vectors(data=data) resized_dim = v.shape[1] + 1 v.resize(shape=(v.shape[0], resized_dim)) for i, string in enumerate(strings): v.add(string, row=i) assert list(v[strings[0]]) == list(data[0]) + [0] assert list(v[strings[1]]) == list(data[1]) + [0] def test_init_vectors_with_data(strings, data): v = Vectors(data=data) assert v.shape == data.shape def test_init_vectors_with_shape(strings): v = Vectors(shape=(len(strings), 3)) assert v.shape == (len(strings), 3) assert v.is_full is False def test_get_vector(strings, data): v = Vectors(data=data) strings = [hash_string(s) for s in strings] for i, string in enumerate(strings): v.add(string, row=i) assert list(v[strings[0]]) == list(data[0]) assert list(v[strings[0]]) != list(data[1]) assert list(v[strings[1]]) != list(data[0]) def test_set_vector(strings, data): orig = data.copy() v = Vectors(data=data) strings = [hash_string(s) for s in strings] for i, string in enumerate(strings): v.add(string, row=i) assert list(v[strings[0]]) == list(orig[0]) assert list(v[strings[0]]) != list(orig[1]) v[strings[0]] = data[1] assert list(v[strings[0]]) == list(orig[1]) assert list(v[strings[0]]) != list(orig[0]) def test_vectors_most_similar(most_similar_vectors_data, most_similar_vectors_keys): v = Vectors(data=most_similar_vectors_data, keys=most_similar_vectors_keys) _, best_rows, _ = v.most_similar(v.data, batch_size=2, n=2, sort=True) assert all(row[0] == i for i, row in enumerate(best_rows)) with pytest.raises(ValueError): v.most_similar(v.data, batch_size=2, n=10, sort=True) def test_vectors_most_similar_identical(): """Test that most similar identical vectors are assigned a score of 1.0.""" data = numpy.asarray([[4, 2, 2, 2], [4, 2, 2, 2], [1, 1, 1, 1]], dtype="f") v = Vectors(data=data, keys=["A", "B", "C"]) keys, _, scores = v.most_similar(numpy.asarray([[4, 2, 2, 2]], dtype="f")) assert scores[0][0] == 1.0 # not 1.0000002 data = numpy.asarray([[1, 2, 3], [1, 2, 3], [1, 1, 1]], dtype="f") v = Vectors(data=data, keys=["A", "B", "C"]) keys, _, scores = v.most_similar(numpy.asarray([[1, 2, 3]], dtype="f")) assert scores[0][0] == 1.0 # not 0.9999999 @pytest.mark.parametrize("text", ["apple and orange"]) def test_vectors_token_vector(tokenizer_v, vectors, text): doc = tokenizer_v(text) assert vectors[0][0] == doc[0].text assert all([a == b for a, b in zip(vectors[0][1], doc[0].vector)]) assert vectors[1][0] == doc[2].text assert all([a == b for a, b in zip(vectors[1][1], doc[2].vector)]) @pytest.mark.parametrize("text", ["apple", "orange"]) def test_vectors_lexeme_vector(vocab, text): lex = vocab[text] assert list(lex.vector) assert lex.vector_norm @pytest.mark.parametrize("text", [["apple", "and", "orange"]]) def test_vectors_doc_vector(vocab, text): doc = Doc(vocab, words=text) assert list(doc.vector) assert doc.vector_norm @pytest.mark.parametrize("text", [["apple", "and", "orange"]]) def test_vectors_span_vector(vocab, text): span = Doc(vocab, words=text)[0:2] assert list(span.vector) assert span.vector_norm @pytest.mark.parametrize("text", ["apple orange"]) def test_vectors_token_token_similarity(tokenizer_v, text): doc = tokenizer_v(text) assert doc[0].similarity(doc[1]) == doc[1].similarity(doc[0]) assert -1.0 < doc[0].similarity(doc[1]) < 1.0 @pytest.mark.parametrize("text1,text2", [("apple", "orange")]) def test_vectors_token_lexeme_similarity(tokenizer_v, vocab, text1, text2): token = tokenizer_v(text1) lex = vocab[text2] assert token.similarity(lex) == lex.similarity(token) assert -1.0 < token.similarity(lex) < 1.0 @pytest.mark.parametrize("text", [["apple", "orange", "juice"]]) def test_vectors_token_span_similarity(vocab, text): doc = Doc(vocab, words=text) assert doc[0].similarity(doc[1:3]) == doc[1:3].similarity(doc[0]) assert -1.0 < doc[0].similarity(doc[1:3]) < 1.0 @pytest.mark.parametrize("text", [["apple", "orange", "juice"]]) def test_vectors_token_doc_similarity(vocab, text): doc = Doc(vocab, words=text) assert doc[0].similarity(doc) == doc.similarity(doc[0]) assert -1.0 < doc[0].similarity(doc) < 1.0 @pytest.mark.parametrize("text", [["apple", "orange", "juice"]]) def test_vectors_lexeme_span_similarity(vocab, text): doc = Doc(vocab, words=text) lex = vocab[text[0]] assert lex.similarity(doc[1:3]) == doc[1:3].similarity(lex) assert -1.0 < doc.similarity(doc[1:3]) < 1.0 @pytest.mark.parametrize("text1,text2", [("apple", "orange")]) def test_vectors_lexeme_lexeme_similarity(vocab, text1, text2): lex1 = vocab[text1] lex2 = vocab[text2] assert lex1.similarity(lex2) == lex2.similarity(lex1) assert -1.0 < lex1.similarity(lex2) < 1.0 @pytest.mark.parametrize("text", [["apple", "orange", "juice"]]) def test_vectors_lexeme_doc_similarity(vocab, text): doc = Doc(vocab, words=text) lex = vocab[text[0]] assert lex.similarity(doc) == doc.similarity(lex) assert -1.0 < lex.similarity(doc) < 1.0 @pytest.mark.parametrize("text", [["apple", "orange", "juice"]]) def test_vectors_span_span_similarity(vocab, text): doc = Doc(vocab, words=text) assert doc[0:2].similarity(doc[1:3]) == doc[1:3].similarity(doc[0:2]) assert -1.0 < doc[0:2].similarity(doc[1:3]) < 1.0 @pytest.mark.parametrize("text", [["apple", "orange", "juice"]]) def test_vectors_span_doc_similarity(vocab, text): doc = Doc(vocab, words=text) assert doc[0:2].similarity(doc) == doc.similarity(doc[0:2]) assert -1.0 < doc[0:2].similarity(doc) < 1.0 @pytest.mark.parametrize( "text1,text2", [(["apple", "and", "apple", "pie"], ["orange", "juice"])] ) def test_vectors_doc_doc_similarity(vocab, text1, text2): doc1 = Doc(vocab, words=text1) doc2 = Doc(vocab, words=text2) assert doc1.similarity(doc2) == doc2.similarity(doc1) assert -1.0 < doc1.similarity(doc2) < 1.0 def test_vocab_add_vector(): vocab = Vocab(vectors_name="test_vocab_add_vector") data = OPS.xp.ndarray((5, 3), dtype="f") data[0] = 1.0 data[1] = 2.0 vocab.set_vector("cat", data[0]) vocab.set_vector("dog", data[1]) cat = vocab["cat"] assert list(cat.vector) == [1.0, 1.0, 1.0] dog = vocab["dog"] assert list(dog.vector) == [2.0, 2.0, 2.0] with pytest.raises(ValueError): vocab.vectors.add(vocab["hamster"].orth, row=1000000) def test_vocab_prune_vectors(): vocab = Vocab(vectors_name="test_vocab_prune_vectors") _ = vocab["cat"] # noqa: F841 _ = vocab["dog"] # noqa: F841 _ = vocab["kitten"] # noqa: F841 data = OPS.xp.ndarray((5, 3), dtype="f") data[0] = OPS.asarray([1.0, 1.2, 1.1]) data[1] = OPS.asarray([0.3, 1.3, 1.0]) data[2] = OPS.asarray([0.9, 1.22, 1.05]) vocab.set_vector("cat", data[0]) vocab.set_vector("dog", data[1]) vocab.set_vector("kitten", data[2]) remap = vocab.prune_vectors(2, batch_size=2) assert list(remap.keys()) == ["kitten"] neighbour, similarity = list(remap.values())[0] assert neighbour == "cat", remap cosine = get_cosine(data[0], data[2]) assert_allclose(float(similarity), cosine, atol=1e-4, rtol=1e-3) def test_vectors_serialize(): data = OPS.asarray([[4, 2, 2, 2], [4, 2, 2, 2], [1, 1, 1, 1]], dtype="f") v = Vectors(data=data, keys=["A", "B", "C"]) b = v.to_bytes() v_r = Vectors() v_r.from_bytes(b) assert_equal(OPS.to_numpy(v.data), OPS.to_numpy(v_r.data)) assert v.key2row == v_r.key2row v.resize((5, 4)) v_r.resize((5, 4)) row = v.add("D", vector=OPS.asarray([1, 2, 3, 4], dtype="f")) row_r = v_r.add("D", vector=OPS.asarray([1, 2, 3, 4], dtype="f")) assert row == row_r assert_equal(OPS.to_numpy(v.data), OPS.to_numpy(v_r.data)) assert v.is_full == v_r.is_full with make_tempdir() as d: v.to_disk(d) v_r.from_disk(d) assert_equal(OPS.to_numpy(v.data), OPS.to_numpy(v_r.data)) assert v.key2row == v_r.key2row v.resize((5, 4)) v_r.resize((5, 4)) row = v.add("D", vector=OPS.asarray([10, 20, 30, 40], dtype="f")) row_r = v_r.add("D", vector=OPS.asarray([10, 20, 30, 40], dtype="f")) assert row == row_r assert_equal(OPS.to_numpy(v.data), OPS.to_numpy(v_r.data)) assert v.attr == v_r.attr def test_vector_is_oov(): vocab = Vocab(vectors_name="test_vocab_is_oov") data = OPS.xp.ndarray((5, 3), dtype="f") data[0] = 1.0 data[1] = 2.0 vocab.set_vector("cat", data[0]) vocab.set_vector("dog", data[1]) assert vocab["cat"].is_oov is False assert vocab["dog"].is_oov is False assert vocab["hamster"].is_oov is True def test_init_vectors_unset(): v = Vectors(shape=(10, 10)) assert v.is_full is False assert v.shape == (10, 10) with pytest.raises(ValueError): v = Vectors(shape=(10, 10), mode="floret") v = Vectors(data=OPS.xp.zeros((10, 10)), mode="floret", hash_count=1) assert v.is_full is True def test_vectors_clear(): data = OPS.asarray([[4, 2, 2, 2], [4, 2, 2, 2], [1, 1, 1, 1]], dtype="f") v = Vectors(data=data, keys=["A", "B", "C"]) assert v.is_full is True assert hash_string("A") in v v.clear() # no keys assert v.key2row == {} assert list(v) == [] assert v.is_full is False assert "A" not in v with pytest.raises(KeyError): v["A"] def test_vectors_get_batch(): data = OPS.asarray([[4, 2, 2, 2], [4, 2, 2, 2], [1, 1, 1, 1]], dtype="f") v = Vectors(data=data, keys=["A", "B", "C"]) # check with mixed int/str keys words = ["C", "B", "A", v.strings["B"]] rows = v.find(keys=words) vecs = OPS.as_contig(v.data[rows]) assert_equal(OPS.to_numpy(vecs), OPS.to_numpy(v.get_batch(words))) def test_vectors_deduplicate(): data = OPS.asarray([[1, 1], [2, 2], [3, 4], [1, 1], [3, 4]], dtype="f") v = Vectors(data=data, keys=["a1", "b1", "c1", "a2", "c2"]) vocab = Vocab() vocab.vectors = v # duplicate vectors do not use the same keys assert ( vocab.vectors.key2row[v.strings["a1"]] != vocab.vectors.key2row[v.strings["a2"]] ) assert ( vocab.vectors.key2row[v.strings["c1"]] != vocab.vectors.key2row[v.strings["c2"]] ) vocab.deduplicate_vectors() # there are three unique vectors assert vocab.vectors.shape[0] == 3 # the uniqued data is the same as the deduplicated data assert_equal( numpy.unique(OPS.to_numpy(vocab.vectors.data), axis=0), OPS.to_numpy(vocab.vectors.data), ) # duplicate vectors use the same keys now assert ( vocab.vectors.key2row[v.strings["a1"]] == vocab.vectors.key2row[v.strings["a2"]] ) assert ( vocab.vectors.key2row[v.strings["c1"]] == vocab.vectors.key2row[v.strings["c2"]] ) # deduplicating again makes no changes vocab_b = vocab.to_bytes() vocab.deduplicate_vectors() assert vocab_b == vocab.to_bytes() @pytest.fixture() def floret_vectors_hashvec_str(): """The full hashvec table from floret with the settings: bucket 10, dim 10, minn 2, maxn 3, hash count 2, hash seed 2166136261, bow <, eow >""" return """10 10 2 3 2 2166136261 < > 0 -2.2611 3.9302 2.6676 -11.233 0.093715 -10.52 -9.6463 -0.11853 2.101 -0.10145 1 -3.12 -1.7981 10.7 -6.171 4.4527 10.967 9.073 6.2056 -6.1199 -2.0402 2 9.5689 5.6721 -8.4832 -1.2249 2.1871 -3.0264 -2.391 -5.3308 -3.2847 -4.0382 3 3.6268 4.2759 -1.7007 1.5002 5.5266 1.8716 -12.063 0.26314 2.7645 2.4929 4 -11.683 -7.7068 2.1102 2.214 7.2202 0.69799 3.2173 -5.382 -2.0838 5.0314 5 -4.3024 8.0241 2.0714 -1.0174 -0.28369 1.7622 7.8797 -1.7795 6.7541 5.6703 6 8.3574 -5.225 8.6529 8.5605 -8.9465 3.767 -5.4636 -1.4635 -0.98947 -0.58025 7 -10.01 3.3894 -4.4487 1.1669 -11.904 6.5158 4.3681 0.79913 -6.9131 -8.687 8 -5.4576 7.1019 -8.8259 1.7189 4.955 -8.9157 -3.8905 -0.60086 -2.1233 5.892 9 8.0678 -4.4142 3.6236 4.5889 -2.7611 2.4455 0.67096 -4.2822 2.0875 4.6274 """ @pytest.fixture() def floret_vectors_vec_str(): """The top 10 rows from floret with the settings above, to verify that the spacy floret vectors are equivalent to the fasttext static vectors.""" return """10 10 , -5.7814 2.6918 0.57029 -3.6985 -2.7079 1.4406 1.0084 1.7463 -3.8625 -3.0565 . 3.8016 -1.759 0.59118 3.3044 -0.72975 0.45221 -2.1412 -3.8933 -2.1238 -0.47409 der 0.08224 2.6601 -1.173 1.1549 -0.42821 -0.097268 -2.5589 -1.609 -0.16968 0.84687 die -2.8781 0.082576 1.9286 -0.33279 0.79488 3.36 3.5609 -0.64328 -2.4152 0.17266 und 2.1558 1.8606 -1.382 0.45424 -0.65889 1.2706 0.5929 -2.0592 -2.6949 -1.6015 " -1.1242 1.4588 -1.6263 1.0382 -2.7609 -0.99794 -0.83478 -1.5711 -1.2137 1.0239 in -0.87635 2.0958 4.0018 -2.2473 -1.2429 2.3474 1.8846 0.46521 -0.506 -0.26653 von -0.10589 1.196 1.1143 -0.40907 -1.0848 -0.054756 -2.5016 -1.0381 -0.41598 0.36982 ( 0.59263 2.1856 0.67346 1.0769 1.0701 1.2151 1.718 -3.0441 2.7291 3.719 ) 0.13812 3.3267 1.657 0.34729 -3.5459 0.72372 0.63034 -1.6145 1.2733 0.37798 """ def test_floret_vectors(floret_vectors_vec_str, floret_vectors_hashvec_str): nlp = English() nlp_plain = English() # load both vec and hashvec tables with make_tempdir() as tmpdir: p = tmpdir / "test.hashvec" with open(p, "w") as fileh: fileh.write(floret_vectors_hashvec_str) convert_vectors(nlp, p, truncate=0, prune=-1, mode="floret") p = tmpdir / "test.vec" with open(p, "w") as fileh: fileh.write(floret_vectors_vec_str) convert_vectors(nlp_plain, p, truncate=0, prune=-1) word = "der" # ngrams: full padded word + padded 2-grams + padded 3-grams ngrams = nlp.vocab.vectors._get_ngrams(word) assert ngrams == ["<der>", "<d", "de", "er", "r>", "<de", "der", "er>"] # rows: 2 rows per ngram rows = OPS.xp.asarray( [ h % nlp.vocab.vectors.shape[0] for ngram in ngrams for h in nlp.vocab.vectors._get_ngram_hashes(ngram) ], dtype="uint32", ) assert_equal( OPS.to_numpy(rows), numpy.asarray([5, 6, 7, 5, 8, 2, 8, 9, 3, 3, 4, 6, 7, 3, 0, 2]), ) assert len(rows) == len(ngrams) * nlp.vocab.vectors.hash_count # all vectors are equivalent for plain static table vs. hash ngrams for word in nlp_plain.vocab.vectors: word = nlp_plain.vocab.strings.as_string(word) assert_almost_equal( nlp.vocab[word].vector, nlp_plain.vocab[word].vector, decimal=3 ) # every word has a vector assert nlp.vocab[word * 5].has_vector # n_keys is -1 for floret assert nlp_plain.vocab.vectors.n_keys > 0 assert nlp.vocab.vectors.n_keys == -1 # check that single and batched vector lookups are identical words = [s for s in nlp_plain.vocab.vectors] single_vecs = OPS.to_numpy(OPS.asarray([nlp.vocab[word].vector for word in words])) batch_vecs = OPS.to_numpy(nlp.vocab.vectors.get_batch(words)) assert_equal(single_vecs, batch_vecs) # an empty key returns 0s assert_equal( OPS.to_numpy(nlp.vocab[""].vector), numpy.zeros((nlp.vocab.vectors.shape[0],)), ) # an empty batch returns 0s assert_equal( OPS.to_numpy(nlp.vocab.vectors.get_batch([""])), numpy.zeros((1, nlp.vocab.vectors.shape[0])), ) # an empty key within a batch returns 0s assert_equal( OPS.to_numpy(nlp.vocab.vectors.get_batch(["a", "", "b"])[1]), numpy.zeros((nlp.vocab.vectors.shape[0],)), ) # the loaded ngram vector table cannot be modified # except for clear: warning, then return without modifications vector = list(range(nlp.vocab.vectors.shape[1])) orig_bytes = nlp.vocab.vectors.to_bytes(exclude=["strings"]) with pytest.warns(UserWarning): nlp.vocab.set_vector("the", vector) assert orig_bytes == nlp.vocab.vectors.to_bytes(exclude=["strings"]) with pytest.warns(UserWarning): nlp.vocab[word].vector = vector assert orig_bytes == nlp.vocab.vectors.to_bytes(exclude=["strings"]) with pytest.warns(UserWarning): nlp.vocab.vectors.add("the", row=6) assert orig_bytes == nlp.vocab.vectors.to_bytes(exclude=["strings"]) with pytest.warns(UserWarning): nlp.vocab.vectors.resize(shape=(100, 10)) assert orig_bytes == nlp.vocab.vectors.to_bytes(exclude=["strings"]) with pytest.raises(ValueError): nlp.vocab.vectors.clear() # data and settings are serialized correctly with make_tempdir() as d: nlp.vocab.to_disk(d) vocab_r = Vocab() vocab_r.from_disk(d) assert nlp.vocab.vectors.to_bytes() == vocab_r.vectors.to_bytes() assert_equal( OPS.to_numpy(nlp.vocab.vectors.data), OPS.to_numpy(vocab_r.vectors.data) ) assert_equal(nlp.vocab.vectors._get_cfg(), vocab_r.vectors._get_cfg()) assert_almost_equal( OPS.to_numpy(nlp.vocab[word].vector), OPS.to_numpy(vocab_r[word].vector), decimal=6, ) def test_equality(): vectors1 = Vectors(shape=(10, 10)) vectors2 = Vectors(shape=(10, 8)) assert vectors1 != vectors2 vectors2 = Vectors(shape=(10, 10)) assert vectors1 == vectors2 vectors1.add("hello", row=2) assert vectors1 != vectors2 vectors2.add("hello", row=2) assert vectors1 == vectors2 vectors1.resize((5, 9)) vectors2.resize((5, 9)) assert vectors1 == vectors2 def test_vectors_attr(): data = numpy.asarray([[0, 0, 0], [1, 2, 3], [9, 8, 7]], dtype="f") # default ORTH nlp = English() nlp.vocab.vectors = Vectors(data=data, keys=["A", "B", "C"]) assert nlp.vocab.strings["A"] in nlp.vocab.vectors.key2row assert nlp.vocab.strings["a"] not in nlp.vocab.vectors.key2row assert nlp.vocab["A"].has_vector is True assert nlp.vocab["a"].has_vector is False assert nlp("A")[0].has_vector is True assert nlp("a")[0].has_vector is False # custom LOWER nlp = English() nlp.vocab.vectors = Vectors(data=data, keys=["a", "b", "c"], attr="LOWER") assert nlp.vocab.strings["A"] not in nlp.vocab.vectors.key2row assert nlp.vocab.strings["a"] in nlp.vocab.vectors.key2row assert nlp.vocab["A"].has_vector is True assert nlp.vocab["a"].has_vector is True assert nlp("A")[0].has_vector is True assert nlp("a")[0].has_vector is True # add a new vectors entry assert nlp.vocab["D"].has_vector is False assert nlp.vocab["d"].has_vector is False nlp.vocab.set_vector("D", numpy.asarray([4, 5, 6])) assert nlp.vocab["D"].has_vector is True assert nlp.vocab["d"].has_vector is True
23,814
34.073638
106
py
spaCy
spaCy-master/spacy/tests/vocab_vectors/test_vocab_api.py
import os import pytest from spacy.attrs import IS_ALPHA, LEMMA, ORTH from spacy.lang.en import English from spacy.parts_of_speech import NOUN, VERB from spacy.vocab import Vocab from ..util import make_tempdir @pytest.mark.issue(1868) def test_issue1868(): """Test Vocab.__contains__ works with int keys.""" vocab = Vocab() lex = vocab["hello"] assert lex.orth in vocab assert lex.orth_ in vocab assert "some string" not in vocab int_id = vocab.strings.add("some string") assert int_id not in vocab @pytest.mark.parametrize( "text1,text2", [("Hello", "bye"), ("Hello", "hello"), ("Hello", "Hello,")] ) def test_vocab_api_neq(en_vocab, text1, text2): assert en_vocab[text1].orth != en_vocab[text2].orth @pytest.mark.parametrize("text", "Hello") def test_vocab_api_eq(en_vocab, text): lex = en_vocab[text] assert en_vocab[text].orth == lex.orth @pytest.mark.parametrize("text", ["example"]) def test_vocab_api_shape_attr(en_vocab, text): lex = en_vocab[text] assert lex.orth != lex.shape @pytest.mark.parametrize( "string,symbol", [ ("IS_ALPHA", IS_ALPHA), ("NOUN", NOUN), ("VERB", VERB), ("LEMMA", LEMMA), ("ORTH", ORTH), ], ) def test_vocab_api_symbols(en_vocab, string, symbol): assert en_vocab.strings[string] == symbol @pytest.mark.parametrize("text", "Hello") def test_vocab_api_contains(en_vocab, text): _ = en_vocab[text] # noqa: F841 assert text in en_vocab assert "LKsdjvlsakdvlaksdvlkasjdvljasdlkfvm" not in en_vocab def test_vocab_writing_system(en_vocab): assert en_vocab.writing_system["direction"] == "ltr" assert en_vocab.writing_system["has_case"] is True def test_to_disk(): nlp = English() with make_tempdir() as d: nlp.vocab.to_disk(d) assert "vectors" in os.listdir(d) assert "lookups.bin" in os.listdir(d) def test_to_disk_exclude(): nlp = English() with make_tempdir() as d: nlp.vocab.to_disk(d, exclude=("vectors", "lookups")) assert "vectors" not in os.listdir(d) assert "lookups.bin" not in os.listdir(d)
2,148
24.583333
78
py
spaCy
spaCy-master/spacy/tokens/__init__.py
from ._serialize import DocBin from .doc import Doc from .morphanalysis import MorphAnalysis from .span import Span from .span_group import SpanGroup from .token import Token __all__ = ["Doc", "Token", "Span", "SpanGroup", "DocBin", "MorphAnalysis"]
251
27
74
py
spaCy
spaCy-master/spacy/tokens/_dict_proxies.py
import warnings import weakref from collections import UserDict from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union import srsly from ..errors import Errors, Warnings from .span_group import SpanGroup if TYPE_CHECKING: # This lets us add type hints for mypy etc. without causing circular imports from .doc import Doc # noqa: F401 from .span import Span # noqa: F401 # Why inherit from UserDict instead of dict here? # Well, the 'dict' class doesn't necessarily delegate everything nicely, # for performance reasons. The UserDict is slower but better behaved. # See https://treyhunner.com/2019/04/why-you-shouldnt-inherit-from-list-and-dict-in-python/ class SpanGroups(UserDict): """A dict-like proxy held by the Doc, to control access to span groups.""" _EMPTY_BYTES = srsly.msgpack_dumps([]) def __init__( self, doc: "Doc", items: Iterable[Tuple[str, SpanGroup]] = tuple() ) -> None: self.doc_ref = weakref.ref(doc) UserDict.__init__(self, items) # type: ignore[arg-type] def __setitem__(self, key: str, value: Union[SpanGroup, Iterable["Span"]]) -> None: if not isinstance(value, SpanGroup): value = self._make_span_group(key, value) assert value.doc is self.doc_ref() UserDict.__setitem__(self, key, value) def _make_span_group(self, name: str, spans: Iterable["Span"]) -> SpanGroup: doc = self._ensure_doc() return SpanGroup(doc, name=name, spans=spans) def copy(self, doc: Optional["Doc"] = None) -> "SpanGroups": if doc is None: doc = self._ensure_doc() data_copy = ((k, v.copy(doc=doc)) for k, v in self.items()) return SpanGroups(doc, items=data_copy) def setdefault(self, key, default=None): if not isinstance(default, SpanGroup): if default is None: spans = [] else: spans = default default = self._make_span_group(key, spans) return super().setdefault(key, default=default) def to_bytes(self) -> bytes: # We serialize this as a dict in order to track the key(s) a SpanGroup # is a value of (in a backward- and forward-compatible way), since # a SpanGroup can have a key that doesn't match its `.name` (See #10685) if len(self) == 0: return self._EMPTY_BYTES msg: Dict[bytes, List[str]] = {} for key, value in self.items(): msg.setdefault(value.to_bytes(), []).append(key) return srsly.msgpack_dumps(msg) def from_bytes(self, bytes_data: bytes) -> "SpanGroups": # backwards-compatibility: bytes_data may be one of: # b'', a serialized empty list, a serialized list of SpanGroup bytes # or a serialized dict of SpanGroup bytes -> keys msg = ( [] if not bytes_data or bytes_data == self._EMPTY_BYTES else srsly.msgpack_loads(bytes_data) ) self.clear() doc = self._ensure_doc() if isinstance(msg, list): # This is either the 1st version of `SpanGroups` serialization # or there were no SpanGroups serialized for value_bytes in msg: group = SpanGroup(doc).from_bytes(value_bytes) if group.name in self: # Display a warning if `msg` contains `SpanGroup`s # that have the same .name (attribute). # Because, for `SpanGroups` serialized as lists, # only 1 SpanGroup per .name is loaded. (See #10685) warnings.warn( Warnings.W120.format( group_name=group.name, group_values=self[group.name] ) ) self[group.name] = group else: for value_bytes, keys in msg.items(): group = SpanGroup(doc).from_bytes(value_bytes) # Deserialize `SpanGroup`s as copies because it's possible for two # different `SpanGroup`s (pre-serialization) to have the same bytes # (since they can have the same `.name`). self[keys[0]] = group for key in keys[1:]: self[key] = group.copy() return self def _ensure_doc(self) -> "Doc": doc = self.doc_ref() if doc is None: raise ValueError(Errors.E866) return doc
4,515
39.684685
91
py
spaCy
spaCy-master/spacy/tokens/_serialize.py
import zlib from pathlib import Path from typing import Dict, Iterable, Iterator, List, Optional, Set, Union import numpy import srsly from numpy import ndarray from thinc.api import NumpyOps from ..attrs import IDS, ORTH, SPACY, intify_attr from ..compat import copy_reg from ..errors import Errors from ..util import SimpleFrozenList, ensure_path from ..vocab import Vocab from ._dict_proxies import SpanGroups from .doc import DOCBIN_ALL_ATTRS as ALL_ATTRS from .doc import Doc class DocBin: """Pack Doc objects for binary serialization. The DocBin class lets you efficiently serialize the information from a collection of Doc objects. You can control which information is serialized by passing a list of attribute IDs, and optionally also specify whether the user data is serialized. The DocBin is faster and produces smaller data sizes than pickle, and allows you to deserialize without executing arbitrary Python code. The serialization format is gzipped msgpack, where the msgpack object has the following structure: { "attrs": List[uint64], # e.g. [TAG, HEAD, ENT_IOB, ENT_TYPE] "tokens": bytes, # Serialized numpy uint64 array with the token data "spans": List[Dict[str, bytes]], # SpanGroups data for each doc "spaces": bytes, # Serialized numpy boolean array with spaces data "lengths": bytes, # Serialized numpy int32 array with the doc lengths "strings": List[str] # List of unique strings in the token data "version": str, # DocBin version number } Strings for the words, tags, labels etc are represented by 64-bit hashes in the token data, and every string that occurs at least once is passed via the strings object. This means the storage is more efficient if you pack more documents together, because you have less duplication in the strings. A notable downside to this format is that you can't easily extract just one document from the DocBin. """ def __init__( self, attrs: Iterable[str] = ALL_ATTRS, store_user_data: bool = False, docs: Iterable[Doc] = SimpleFrozenList(), ) -> None: """Create a DocBin object to hold serialized annotations. attrs (Iterable[str]): List of attributes to serialize. 'orth' and 'spacy' are always serialized, so they're not required. store_user_data (bool): Whether to write the `Doc.user_data` to bytes/file. docs (Iterable[Doc]): Docs to add. DOCS: https://spacy.io/api/docbin#init """ int_attrs = [intify_attr(attr) for attr in attrs] if None in int_attrs: non_valid = [attr for attr in attrs if intify_attr(attr) is None] raise KeyError( Errors.E983.format(dict="attrs", key=non_valid, keys=IDS.keys()) ) from None attrs = sorted(int_attrs) self.version = "0.1" self.attrs = [attr for attr in attrs if attr != ORTH and attr != SPACY] self.attrs.insert(0, ORTH) # Ensure ORTH is always attrs[0] self.tokens: List[ndarray] = [] self.spaces: List[ndarray] = [] self.cats: List[Dict] = [] self.span_groups: List[bytes] = [] self.user_data: List[Optional[bytes]] = [] self.flags: List[Dict] = [] self.strings: Set[str] = set() self.store_user_data = store_user_data for doc in docs: self.add(doc) def __len__(self) -> int: """RETURNS: The number of Doc objects added to the DocBin.""" return len(self.tokens) def add(self, doc: Doc) -> None: """Add a Doc's annotations to the DocBin for serialization. doc (Doc): The Doc object to add. DOCS: https://spacy.io/api/docbin#add """ array = doc.to_array(self.attrs) if len(array.shape) == 1: array = array.reshape((array.shape[0], 1)) self.tokens.append(array) spaces = doc.to_array(SPACY) assert array.shape[0] == spaces.shape[0] # this should never happen spaces = spaces.reshape((spaces.shape[0], 1)) self.spaces.append(numpy.asarray(spaces, dtype=bool)) self.flags.append({"has_unknown_spaces": doc.has_unknown_spaces}) for token in doc: self.strings.add(token.text) self.strings.add(token.tag_) self.strings.add(token.lemma_) self.strings.add(token.norm_) self.strings.add(str(token.morph)) self.strings.add(token.dep_) self.strings.add(token.ent_type_) self.strings.add(token.ent_kb_id_) self.strings.add(token.ent_id_) self.cats.append(doc.cats) if self.store_user_data: self.user_data.append(srsly.msgpack_dumps(doc.user_data)) self.span_groups.append(doc.spans.to_bytes()) for key, group in doc.spans.items(): for span in group: self.strings.add(span.label_) if span.kb_id in span.doc.vocab.strings: self.strings.add(span.kb_id_) if span.id in span.doc.vocab.strings: self.strings.add(span.id_) def get_docs(self, vocab: Vocab) -> Iterator[Doc]: """Recover Doc objects from the annotations, using the given vocab. Note that the user data of each doc will be read (if available) and returned, regardless of the setting of 'self.store_user_data'. vocab (Vocab): The shared vocab. YIELDS (Doc): The Doc objects. DOCS: https://spacy.io/api/docbin#get_docs """ for string in self.strings: vocab[string] orth_col = self.attrs.index(ORTH) for i in range(len(self.tokens)): flags = self.flags[i] tokens = self.tokens[i] spaces: Optional[ndarray] = self.spaces[i] if flags.get("has_unknown_spaces"): spaces = None doc = Doc(vocab, words=tokens[:, orth_col], spaces=spaces) # type: ignore doc = doc.from_array(self.attrs, tokens) # type: ignore doc.cats = self.cats[i] # backwards-compatibility: may be b'' or serialized empty list if self.span_groups[i] and self.span_groups[i] != SpanGroups._EMPTY_BYTES: doc.spans.from_bytes(self.span_groups[i]) else: doc.spans.clear() if i < len(self.user_data) and self.user_data[i] is not None: user_data = srsly.msgpack_loads(self.user_data[i], use_list=False) doc.user_data.update(user_data) yield doc def merge(self, other: "DocBin") -> None: """Extend the annotations of this DocBin with the annotations from another. Will raise an error if the pre-defined attrs of the two DocBins don't match, or if they differ in whether or not to store user data. other (DocBin): The DocBin to merge into the current bin. DOCS: https://spacy.io/api/docbin#merge """ if self.attrs != other.attrs: raise ValueError( Errors.E166.format(param="attrs", current=self.attrs, other=other.attrs) ) if self.store_user_data != other.store_user_data: raise ValueError( Errors.E166.format( param="store_user_data", current=self.store_user_data, other=other.store_user_data, ) ) self.tokens.extend(other.tokens) self.spaces.extend(other.spaces) self.strings.update(other.strings) self.cats.extend(other.cats) self.span_groups.extend(other.span_groups) self.flags.extend(other.flags) self.user_data.extend(other.user_data) def to_bytes(self) -> bytes: """Serialize the DocBin's annotations to a bytestring. RETURNS (bytes): The serialized DocBin. DOCS: https://spacy.io/api/docbin#to_bytes """ for tokens in self.tokens: assert len(tokens.shape) == 2, tokens.shape # this should never happen lengths = [len(tokens) for tokens in self.tokens] tokens = numpy.vstack(self.tokens) if self.tokens else numpy.asarray([]) spaces = numpy.vstack(self.spaces) if self.spaces else numpy.asarray([]) msg = { "version": self.version, "attrs": self.attrs, "tokens": tokens.tobytes("C"), "spaces": spaces.tobytes("C"), "lengths": numpy.asarray(lengths, dtype="int32").tobytes("C"), "strings": list(sorted(self.strings)), "cats": self.cats, "flags": self.flags, "span_groups": self.span_groups, } if self.store_user_data: msg["user_data"] = self.user_data return zlib.compress(srsly.msgpack_dumps(msg)) def from_bytes(self, bytes_data: bytes) -> "DocBin": """Deserialize the DocBin's annotations from a bytestring. bytes_data (bytes): The data to load from. RETURNS (DocBin): The loaded DocBin. DOCS: https://spacy.io/api/docbin#from_bytes """ try: msg = srsly.msgpack_loads(zlib.decompress(bytes_data)) except zlib.error: raise ValueError(Errors.E1014) self.attrs = msg["attrs"] self.strings = set(msg["strings"]) lengths = numpy.frombuffer(msg["lengths"], dtype="int32") flat_spaces = numpy.frombuffer(msg["spaces"], dtype=bool) flat_tokens = numpy.frombuffer(msg["tokens"], dtype="uint64") shape = (flat_tokens.size // len(self.attrs), len(self.attrs)) flat_tokens = flat_tokens.reshape(shape) flat_spaces = flat_spaces.reshape((flat_spaces.size, 1)) self.tokens = NumpyOps().unflatten(flat_tokens, lengths) self.spaces = NumpyOps().unflatten(flat_spaces, lengths) self.cats = msg["cats"] self.span_groups = msg.get("span_groups", [b"" for _ in lengths]) self.flags = msg.get("flags", [{} for _ in lengths]) if "user_data" in msg: self.user_data = list(msg["user_data"]) else: self.user_data = [None] * len(self) for tokens in self.tokens: assert len(tokens.shape) == 2, tokens.shape # this should never happen return self def to_disk(self, path: Union[str, Path]) -> None: """Save the DocBin to a file (typically called .spacy). path (str / Path): The file path. DOCS: https://spacy.io/api/docbin#to_disk """ path = ensure_path(path) with path.open("wb") as file_: try: file_.write(self.to_bytes()) except ValueError: raise ValueError(Errors.E870) def from_disk(self, path: Union[str, Path]) -> "DocBin": """Load the DocBin from a file (typically called .spacy). path (str / Path): The file path. RETURNS (DocBin): The loaded DocBin. DOCS: https://spacy.io/api/docbin#to_disk """ path = ensure_path(path) with path.open("rb") as file_: self.from_bytes(file_.read()) return self def merge_bins(bins): merged = None for byte_string in bins: if byte_string is not None: doc_bin = DocBin(store_user_data=True).from_bytes(byte_string) if merged is None: merged = doc_bin else: merged.merge(doc_bin) if merged is not None: return merged.to_bytes() else: return b"" def pickle_bin(doc_bin): return (unpickle_bin, (doc_bin.to_bytes(),)) def unpickle_bin(byte_string): return DocBin().from_bytes(byte_string) copy_reg.pickle(DocBin, pickle_bin, unpickle_bin) # Compatibility, as we had named it this previously. Binder = DocBin __all__ = ["DocBin"]
11,991
37.935065
88
py
spaCy
spaCy-master/spacy/tokens/underscore.py
import copy import functools from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from ..errors import Errors if TYPE_CHECKING: from .doc import Doc from .span import Span from .token import Token class Underscore: mutable_types = (dict, list, set) doc_extensions: Dict[Any, Any] = {} span_extensions: Dict[Any, Any] = {} token_extensions: Dict[Any, Any] = {} _extensions: Dict[str, Any] _obj: Union["Doc", "Span", "Token"] _start: Optional[int] _end: Optional[int] def __init__( self, extensions: Dict[str, Any], obj: Union["Doc", "Span", "Token"], start: Optional[int] = None, end: Optional[int] = None, ): object.__setattr__(self, "_extensions", extensions) object.__setattr__(self, "_obj", obj) # Assumption is that for doc values, _start and _end will both be None # Span will set non-None values for _start and _end # Token will have _start be non-None, _end be None # This lets us key everything into the doc.user_data dictionary, # (see _get_key), and lets us use a single Underscore class. object.__setattr__(self, "_doc", obj.doc) object.__setattr__(self, "_start", start) object.__setattr__(self, "_end", end) def __dir__(self) -> List[str]: # Hack to enable autocomplete on custom extensions extensions = list(self._extensions.keys()) return ["set", "get", "has"] + extensions def __getattr__(self, name: str) -> Any: if name not in self._extensions: raise AttributeError(Errors.E046.format(name=name)) default, method, getter, setter = self._extensions[name] if getter is not None: return getter(self._obj) elif method is not None: method_partial = functools.partial(method, self._obj) # Hack to port over docstrings of the original function # See https://stackoverflow.com/q/27362727/6400719 method_docstring = method.__doc__ or "" method_docstring_prefix = ( "This method is a partial function and its first argument " "(the object it's called on) will be filled automatically. " ) method_partial.__doc__ = method_docstring_prefix + method_docstring return method_partial else: key = self._get_key(name) if key in self._doc.user_data: return self._doc.user_data[key] elif isinstance(default, self.mutable_types): # Handle mutable default arguments (see #2581) new_default = copy.copy(default) self.__setattr__(name, new_default) return new_default return default def __setattr__(self, name: str, value: Any): if name not in self._extensions: raise AttributeError(Errors.E047.format(name=name)) default, method, getter, setter = self._extensions[name] if setter is not None: return setter(self._obj, value) else: self._doc.user_data[self._get_key(name)] = value def set(self, name: str, value: Any): return self.__setattr__(name, value) def get(self, name: str) -> Any: return self.__getattr__(name) def has(self, name: str) -> bool: return name in self._extensions def _get_key(self, name: str) -> Tuple[str, str, Optional[int], Optional[int]]: return ("._.", name, self._start, self._end) @classmethod def get_state(cls) -> Tuple[Dict[Any, Any], Dict[Any, Any], Dict[Any, Any]]: return cls.token_extensions, cls.span_extensions, cls.doc_extensions @classmethod def load_state( cls, state: Tuple[Dict[Any, Any], Dict[Any, Any], Dict[Any, Any]] ) -> None: cls.token_extensions, cls.span_extensions, cls.doc_extensions = state def get_ext_args(**kwargs: Any): """Validate and convert arguments. Reused in Doc, Token and Span.""" default = kwargs.get("default") getter = kwargs.get("getter") setter = kwargs.get("setter") method = kwargs.get("method") if getter is None and setter is not None: raise ValueError(Errors.E089) valid_opts = ("default" in kwargs, method is not None, getter is not None) nr_defined = sum(t is True for t in valid_opts) if nr_defined != 1: raise ValueError(Errors.E083.format(nr_defined=nr_defined)) if setter is not None and not hasattr(setter, "__call__"): raise ValueError(Errors.E091.format(name="setter", value=repr(setter))) if getter is not None and not hasattr(getter, "__call__"): raise ValueError(Errors.E091.format(name="getter", value=repr(getter))) if method is not None and not hasattr(method, "__call__"): raise ValueError(Errors.E091.format(name="method", value=repr(method))) return (default, method, getter, setter) def is_writable_attr(ext): """Check if an extension attribute is writable. ext (tuple): The (default, getter, setter, method) tuple available via {Doc,Span,Token}.get_extension. RETURNS (bool): Whether the attribute is writable. """ default, method, getter, setter = ext # Extension is writable if it has a setter (getter + setter), if it has a # default value (or, if its default value is none, none of the other values # should be set). if setter is not None or default is not None or all(e is None for e in ext): return True return False
5,590
38.935714
83
py
spaCy
spaCy-master/spacy/training/__init__.py
from .alignment import Alignment # noqa: F401 from .augment import dont_augment, orth_variants_augmenter # noqa: F401 from .batchers import minibatch_by_padded_size, minibatch_by_words # noqa: F401 from .callbacks import create_copy_from_base_model # noqa: F401 from .corpus import Corpus, JsonlCorpus, PlainTextCorpus # noqa: F401 from .example import Example, validate_examples, validate_get_examples # noqa: F401 from .gold_io import docs_to_json, read_json_file # noqa: F401 from .iob_utils import ( # noqa: F401 biluo_tags_to_offsets, biluo_tags_to_spans, biluo_to_iob, iob_to_biluo, offsets_to_biluo_tags, remove_bilu_prefix, split_bilu_label, tags_to_entities, ) from .loggers import console_logger # noqa: F401
760
39.052632
84
py
spaCy
spaCy-master/spacy/training/alignment.py
from dataclasses import dataclass from typing import List from .align import get_alignments from .alignment_array import AlignmentArray @dataclass class Alignment: x2y: AlignmentArray y2x: AlignmentArray @classmethod def from_indices(cls, x2y: List[List[int]], y2x: List[List[int]]) -> "Alignment": x2y = AlignmentArray(x2y) y2x = AlignmentArray(y2x) return Alignment(x2y=x2y, y2x=y2x) @classmethod def from_strings(cls, A: List[str], B: List[str]) -> "Alignment": x2y, y2x = get_alignments(A, B) return Alignment.from_indices(x2y=x2y, y2x=y2x)
614
25.73913
85
py
spaCy
spaCy-master/spacy/training/augment.py
import itertools import random from functools import partial from typing import TYPE_CHECKING, Callable, Dict, Iterator, List, Optional, Tuple from ..util import registry from .example import Example from .iob_utils import _doc_to_biluo_tags_with_partial, split_bilu_label if TYPE_CHECKING: from ..language import Language # noqa: F401 @registry.augmenters("spacy.combined_augmenter.v1") def create_combined_augmenter( lower_level: float, orth_level: float, orth_variants: Optional[Dict[str, List[Dict]]], whitespace_level: float, whitespace_per_token: float, whitespace_variants: Optional[List[str]], ) -> Callable[["Language", Example], Iterator[Example]]: """Create a data augmentation callback that uses orth-variant replacement. The callback can be added to a corpus or other data iterator during training. lower_level (float): The percentage of texts that will be lowercased. orth_level (float): The percentage of texts that will be augmented. orth_variants (Optional[Dict[str, List[Dict]]]): A dictionary containing the single and paired orth variants. Typically loaded from a JSON file. whitespace_level (float): The percentage of texts that will have whitespace tokens inserted. whitespace_per_token (float): The number of whitespace tokens to insert in the modified doc as a percentage of the doc length. whitespace_variants (Optional[List[str]]): The whitespace token texts. RETURNS (Callable[[Language, Example], Iterator[Example]]): The augmenter. """ return partial( combined_augmenter, lower_level=lower_level, orth_level=orth_level, orth_variants=orth_variants, whitespace_level=whitespace_level, whitespace_per_token=whitespace_per_token, whitespace_variants=whitespace_variants, ) def combined_augmenter( nlp: "Language", example: Example, *, lower_level: float = 0.0, orth_level: float = 0.0, orth_variants: Optional[Dict[str, List[Dict]]] = None, whitespace_level: float = 0.0, whitespace_per_token: float = 0.0, whitespace_variants: Optional[List[str]] = None, ) -> Iterator[Example]: if random.random() < lower_level: example = make_lowercase_variant(nlp, example) if orth_variants and random.random() < orth_level: raw_text = example.text orig_dict = example.to_dict() orig_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial( example.reference ) variant_text, variant_token_annot = make_orth_variants( nlp, raw_text, orig_dict["token_annotation"], orth_variants, lower=False, ) orig_dict["token_annotation"] = variant_token_annot example = example.from_dict(nlp.make_doc(variant_text), orig_dict) if whitespace_variants and random.random() < whitespace_level: for _ in range(int(len(example.reference) * whitespace_per_token)): example = make_whitespace_variant( nlp, example, random.choice(whitespace_variants), random.randrange(0, len(example.reference)), ) yield example @registry.augmenters("spacy.orth_variants.v1") def create_orth_variants_augmenter( level: float, lower: float, orth_variants: Dict[str, List[Dict]] ) -> Callable[["Language", Example], Iterator[Example]]: """Create a data augmentation callback that uses orth-variant replacement. The callback can be added to a corpus or other data iterator during training. level (float): The percentage of texts that will be augmented. lower (float): The percentage of texts that will be lowercased. orth_variants (Dict[str, List[Dict]]): A dictionary containing the single and paired orth variants. Typically loaded from a JSON file. RETURNS (Callable[[Language, Example], Iterator[Example]]): The augmenter. """ return partial( orth_variants_augmenter, orth_variants=orth_variants, level=level, lower=lower ) @registry.augmenters("spacy.lower_case.v1") def create_lower_casing_augmenter( level: float, ) -> Callable[["Language", Example], Iterator[Example]]: """Create a data augmentation callback that converts documents to lowercase. The callback can be added to a corpus or other data iterator during training. level (float): The percentage of texts that will be augmented. RETURNS (Callable[[Language, Example], Iterator[Example]]): The augmenter. """ return partial(lower_casing_augmenter, level=level) def dont_augment(nlp: "Language", example: Example) -> Iterator[Example]: yield example def lower_casing_augmenter( nlp: "Language", example: Example, *, level: float ) -> Iterator[Example]: if random.random() >= level: yield example else: yield make_lowercase_variant(nlp, example) def make_lowercase_variant(nlp: "Language", example: Example): example_dict = example.to_dict() example_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial( example.reference ) doc = nlp.make_doc(example.text.lower()) example_dict["token_annotation"]["ORTH"] = [t.lower_ for t in example.reference] return example.from_dict(doc, example_dict) def orth_variants_augmenter( nlp: "Language", example: Example, orth_variants: Dict[str, List[Dict]], *, level: float = 0.0, lower: float = 0.0, ) -> Iterator[Example]: if random.random() >= level: yield example else: raw_text = example.text orig_dict = example.to_dict() orig_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial( example.reference ) variant_text, variant_token_annot = make_orth_variants( nlp, raw_text, orig_dict["token_annotation"], orth_variants, lower=raw_text is not None and random.random() < lower, ) orig_dict["token_annotation"] = variant_token_annot yield example.from_dict(nlp.make_doc(variant_text), orig_dict) def make_orth_variants( nlp: "Language", raw: str, token_dict: Dict[str, List[str]], orth_variants: Dict[str, List[Dict[str, List[str]]]], *, lower: bool = False, ) -> Tuple[str, Dict[str, List[str]]]: words = token_dict.get("ORTH", []) tags = token_dict.get("TAG", []) # keep unmodified if words are not defined if not words: return raw, token_dict if lower: words = [w.lower() for w in words] raw = raw.lower() # if no tags, only lowercase if not tags: token_dict["ORTH"] = words return raw, token_dict # single variants ndsv = orth_variants.get("single", []) punct_choices = [random.choice(x["variants"]) for x in ndsv] for word_idx in range(len(words)): for punct_idx in range(len(ndsv)): if ( tags[word_idx] in ndsv[punct_idx]["tags"] and words[word_idx] in ndsv[punct_idx]["variants"] ): words[word_idx] = punct_choices[punct_idx] # paired variants ndpv = orth_variants.get("paired", []) punct_choices = [random.choice(x["variants"]) for x in ndpv] for word_idx in range(len(words)): for punct_idx in range(len(ndpv)): if tags[word_idx] in ndpv[punct_idx]["tags"] and words[ word_idx ] in itertools.chain.from_iterable(ndpv[punct_idx]["variants"]): # backup option: random left vs. right from pair pair_idx = random.choice([0, 1]) # best option: rely on paired POS tags like `` / '' if len(ndpv[punct_idx]["tags"]) == 2: pair_idx = ndpv[punct_idx]["tags"].index(tags[word_idx]) # next best option: rely on position in variants # (may not be unambiguous, so order of variants matters) else: for pair in ndpv[punct_idx]["variants"]: if words[word_idx] in pair: pair_idx = pair.index(words[word_idx]) words[word_idx] = punct_choices[punct_idx][pair_idx] token_dict["ORTH"] = words raw = construct_modified_raw_text(token_dict) return raw, token_dict def make_whitespace_variant( nlp: "Language", example: Example, whitespace: str, position: int, ) -> Example: """Insert the whitespace token at the specified token offset in the doc. This is primarily intended for v2-compatible training data that doesn't include links or spans. If the document includes links, spans, or partial dependency annotation, it is returned without modifications. The augmentation follows the basics of the v2 space attachment policy, but without a distinction between "real" and other tokens, so space tokens may be attached to space tokens: - at the beginning of a sentence attach the space token to the following token - otherwise attach the space token to the preceding token The augmenter does not attempt to consolidate adjacent whitespace in the same way that the tokenizer would. The following annotation is used for the space token: TAG: "_SP" MORPH: "" POS: "SPACE" LEMMA: ORTH DEP: "dep" SENT_START: False The annotation for each attribute is only set for the space token if there is already at least partial annotation for that attribute in the original example. RETURNS (Example): Example with one additional space token. """ example_dict = example.to_dict() example_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial( example.reference ) doc_dict = example_dict.get("doc_annotation", {}) token_dict = example_dict.get("token_annotation", {}) # returned unmodified if: # - doc is empty # - words are not defined # - links are defined (only character-based offsets, which is more a quirk # of Example.to_dict than a technical constraint) # - spans are defined # - there are partial dependencies if ( len(example.reference) == 0 or "ORTH" not in token_dict or len(doc_dict.get("links", [])) > 0 or len(example.reference.spans) > 0 or ( example.reference.has_annotation("DEP") and not example.reference.has_annotation("DEP", require_complete=True) ) ): return example words = token_dict.get("ORTH", []) length = len(words) assert 0 <= position <= length if example.reference.has_annotation("ENT_TYPE"): # I-ENTITY if between B/I-ENTITY and I/L-ENTITY otherwise O entity = "O" if position > 1 and position < length: ent_prev = doc_dict["entities"][position - 1] ent_next = doc_dict["entities"][position] if "-" in ent_prev and "-" in ent_next: ent_iob_prev, ent_type_prev = split_bilu_label(ent_prev) ent_iob_next, ent_type_next = split_bilu_label(ent_next) if ( ent_iob_prev in ("B", "I") and ent_iob_next in ("I", "L") and ent_type_prev == ent_type_next ): entity = f"I-{ent_type_prev}" doc_dict["entities"].insert(position, entity) else: del doc_dict["entities"] token_dict["ORTH"].insert(position, whitespace) token_dict["SPACY"].insert(position, False) if example.reference.has_annotation("TAG"): token_dict["TAG"].insert(position, "_SP") else: del token_dict["TAG"] if example.reference.has_annotation("LEMMA"): token_dict["LEMMA"].insert(position, whitespace) else: del token_dict["LEMMA"] if example.reference.has_annotation("POS"): token_dict["POS"].insert(position, "SPACE") else: del token_dict["POS"] if example.reference.has_annotation("MORPH"): token_dict["MORPH"].insert(position, "") else: del token_dict["MORPH"] if example.reference.has_annotation("DEP", require_complete=True): if position == 0: token_dict["HEAD"].insert(position, 0) else: token_dict["HEAD"].insert(position, position - 1) for i in range(len(token_dict["HEAD"])): if token_dict["HEAD"][i] >= position: token_dict["HEAD"][i] += 1 token_dict["DEP"].insert(position, "dep") else: del token_dict["HEAD"] del token_dict["DEP"] if example.reference.has_annotation("SENT_START"): token_dict["SENT_START"].insert(position, False) else: del token_dict["SENT_START"] raw = construct_modified_raw_text(token_dict) return Example.from_dict(nlp.make_doc(raw), example_dict) def construct_modified_raw_text(token_dict): """Construct modified raw text from words and spaces.""" raw = "" for orth, spacy in zip(token_dict["ORTH"], token_dict["SPACY"]): raw += orth if spacy: raw += " " return raw
13,261
37
86
py
spaCy
spaCy-master/spacy/training/batchers.py
import itertools from functools import partial from typing import ( Any, Callable, Iterable, Iterator, List, Optional, Sequence, TypeVar, Union, ) from ..util import minibatch, registry Sizing = Union[Sequence[int], int] ItemT = TypeVar("ItemT") BatcherT = Callable[[Iterable[ItemT]], Iterable[List[ItemT]]] @registry.batchers("spacy.batch_by_padded.v1") def configure_minibatch_by_padded_size( *, size: Sizing, buffer: int, discard_oversize: bool, get_length: Optional[Callable[[ItemT], int]] = None ) -> BatcherT: """Create a batcher that uses the `batch_by_padded_size` strategy. The padded size is defined as the maximum length of sequences within the batch multiplied by the number of sequences in the batch. size (int or Sequence[int]): The largest padded size to batch sequences into. Can be a single integer, or a sequence, allowing for variable batch sizes. buffer (int): The number of sequences to accumulate before sorting by length. A larger buffer will result in more even sizing, but if the buffer is very large, the iteration order will be less random, which can result in suboptimal training. discard_oversize (bool): Whether to discard sequences that are by themselves longer than the largest padded batch size. get_length (Callable or None): Function to get the length of a sequence item. The `len` function is used by default. """ # Avoid displacing optional values from the underlying function. optionals = {"get_length": get_length} if get_length is not None else {} return partial( minibatch_by_padded_size, size=size, buffer=buffer, discard_oversize=discard_oversize, **optionals ) @registry.batchers("spacy.batch_by_words.v1") def configure_minibatch_by_words( *, size: Sizing, tolerance: float, discard_oversize: bool, get_length: Optional[Callable[[ItemT], int]] = None ) -> BatcherT: """Create a batcher that uses the "minibatch by words" strategy. size (int or Sequence[int]): The target number of words per batch. Can be a single integer, or a sequence, allowing for variable batch sizes. tolerance (float): What percentage of the size to allow batches to exceed. discard_oversize (bool): Whether to discard sequences that by themselves exceed the tolerated size. get_length (Callable or None): Function to get the length of a sequence item. The `len` function is used by default. """ optionals = {"get_length": get_length} if get_length is not None else {} return partial( minibatch_by_words, size=size, tolerance=tolerance, discard_oversize=discard_oversize, **optionals ) @registry.batchers("spacy.batch_by_sequence.v1") def configure_minibatch( size: Sizing, get_length: Optional[Callable[[ItemT], int]] = None ) -> BatcherT: """Create a batcher that creates batches of the specified size. size (int or Sequence[int]): The target number of items per batch. Can be a single integer, or a sequence, allowing for variable batch sizes. """ optionals = {"get_length": get_length} if get_length is not None else {} return partial(minibatch, size=size, **optionals) def minibatch_by_padded_size( seqs: Iterable[ItemT], size: Sizing, buffer: int = 256, discard_oversize: bool = False, get_length: Callable = len, ) -> Iterable[List[ItemT]]: """Minibatch a sequence by the size of padded batches that would result, with sequences binned by length within a window. The padded size is defined as the maximum length of sequences within the batch multiplied by the number of sequences in the batch. size (int or Sequence[int]): The largest padded size to batch sequences into. buffer (int): The number of sequences to accumulate before sorting by length. A larger buffer will result in more even sizing, but if the buffer is very large, the iteration order will be less random, which can result in suboptimal training. discard_oversize (bool): Whether to discard sequences that are by themselves longer than the largest padded batch size. get_length (Callable or None): Function to get the length of a sequence item. The `len` function is used by default. """ if isinstance(size, int): size_ = itertools.repeat(size) # type: Iterator[int] else: size_ = iter(size) for outer_batch in minibatch(seqs, size=buffer): outer_batch = list(outer_batch) target_size = next(size_) for indices in _batch_by_length(outer_batch, target_size, get_length): subbatch = [outer_batch[i] for i in indices] padded_size = max(len(seq) for seq in subbatch) * len(subbatch) if discard_oversize and padded_size >= target_size: pass else: yield subbatch def minibatch_by_words( seqs: Iterable[ItemT], size: Sizing, tolerance=0.2, discard_oversize=False, get_length=len, ) -> Iterable[List[ItemT]]: """Create minibatches of roughly a given number of words. If any examples are longer than the specified batch length, they will appear in a batch by themselves, or be discarded if discard_oversize=True. seqs (Iterable[Sequence]): The sequences to minibatch. size (int or Sequence[int]): The target number of words per batch. Can be a single integer, or a sequence, allowing for variable batch sizes. tolerance (float): What percentage of the size to allow batches to exceed. discard_oversize (bool): Whether to discard sequences that by themselves exceed the tolerated size. get_length (Callable or None): Function to get the length of a sequence item. The `len` function is used by default. """ if isinstance(size, int): size_ = itertools.repeat(size) # type: Iterator[int] else: size_ = iter(size) target_size = next(size_) tol_size = target_size * tolerance batch = [] overflow = [] batch_size = 0 overflow_size = 0 for seq in seqs: n_words = get_length(seq) # if the current example exceeds the maximum batch size, it is returned separately # but only if discard_oversize=False. if n_words > target_size + tol_size: if not discard_oversize: yield [seq] # add the example to the current batch if there's no overflow yet and it still fits elif overflow_size == 0 and (batch_size + n_words) <= target_size: batch.append(seq) batch_size += n_words # add the example to the overflow buffer if it fits in the tolerance margin elif (batch_size + overflow_size + n_words) <= (target_size + tol_size): overflow.append(seq) overflow_size += n_words # yield the previous batch and start a new one. The new one gets the overflow examples. else: if batch: yield batch target_size = next(size_) tol_size = target_size * tolerance batch = overflow batch_size = overflow_size overflow = [] overflow_size = 0 # this example still fits if (batch_size + n_words) <= target_size: batch.append(seq) batch_size += n_words # this example fits in overflow elif (batch_size + n_words) <= (target_size + tol_size): overflow.append(seq) overflow_size += n_words # this example does not fit with the previous overflow: start another new batch else: if batch: yield batch target_size = next(size_) tol_size = target_size * tolerance batch = [seq] batch_size = n_words batch.extend(overflow) if batch: yield batch def _batch_by_length( seqs: Sequence[Any], max_words: int, get_length=len ) -> List[List[Any]]: """Given a list of sequences, return a batched list of indices into the list, where the batches are grouped by length, in descending order. Batches may be at most max_words in size, defined as max sequence length * size. """ # Use negative index so we can get sort by position ascending. lengths_indices = [(get_length(seq), i) for i, seq in enumerate(seqs)] lengths_indices.sort() batches = [] batch: List[int] = [] for length, i in lengths_indices: if not batch: batch.append(i) elif length * (len(batch) + 1) <= max_words: batch.append(i) else: batches.append(batch) batch = [i] if batch: batches.append(batch) # Check lengths match assert sum(len(b) for b in batches) == len(seqs) batches = [list(sorted(batch)) for batch in batches] batches.reverse() return batches
9,132
36.896266
95
py
spaCy
spaCy-master/spacy/training/callbacks.py
from typing import TYPE_CHECKING, Callable, Optional from ..errors import Errors from ..util import load_model, logger, registry if TYPE_CHECKING: from ..language import Language @registry.callbacks("spacy.copy_from_base_model.v1") def create_copy_from_base_model( tokenizer: Optional[str] = None, vocab: Optional[str] = None, ) -> Callable[["Language"], "Language"]: def copy_from_base_model(nlp): if tokenizer: logger.info("Copying tokenizer from: %s", tokenizer) base_nlp = load_model(tokenizer) if nlp.config["nlp"]["tokenizer"] == base_nlp.config["nlp"]["tokenizer"]: nlp.tokenizer.from_bytes(base_nlp.tokenizer.to_bytes(exclude=["vocab"])) else: raise ValueError( Errors.E872.format( curr_config=nlp.config["nlp"]["tokenizer"], base_config=base_nlp.config["nlp"]["tokenizer"], ) ) if vocab: logger.info("Copying vocab from: %s", vocab) # only reload if the vocab is from a different model if tokenizer != vocab: base_nlp = load_model(vocab) nlp.vocab.from_bytes(base_nlp.vocab.to_bytes()) return copy_from_base_model
1,312
35.472222
88
py
spaCy
spaCy-master/spacy/training/corpus.py
import random import warnings from pathlib import Path from typing import TYPE_CHECKING, Callable, Iterable, Iterator, List, Optional, Union import srsly from .. import util from ..errors import Errors, Warnings from ..tokens import Doc, DocBin from ..vocab import Vocab from .augment import dont_augment from .example import Example if TYPE_CHECKING: # This lets us add type hints for mypy etc. without causing circular imports from ..language import Language # noqa: F401 FILE_TYPE = ".spacy" @util.registry.readers("spacy.Corpus.v1") def create_docbin_reader( path: Optional[Path], gold_preproc: bool, max_length: int = 0, limit: int = 0, augmenter: Optional[Callable] = None, ) -> Callable[["Language"], Iterable[Example]]: if path is None: raise ValueError(Errors.E913) util.logger.debug("Loading corpus from path: %s", path) return Corpus( path, gold_preproc=gold_preproc, max_length=max_length, limit=limit, augmenter=augmenter, ) @util.registry.readers("spacy.JsonlCorpus.v1") def create_jsonl_reader( path: Optional[Union[str, Path]], min_length: int = 0, max_length: int = 0, limit: int = 0, ) -> Callable[["Language"], Iterable[Example]]: return JsonlCorpus(path, min_length=min_length, max_length=max_length, limit=limit) @util.registry.readers("spacy.read_labels.v1") def read_labels(path: Path, *, require: bool = False): # I decided not to give this a generic name, because I don't want people to # use it for arbitrary stuff, as I want this require arg with default False. if not require and not path.exists(): return None return srsly.read_json(path) @util.registry.readers("spacy.PlainTextCorpus.v1") def create_plain_text_reader( path: Optional[Path], min_length: int = 0, max_length: int = 0, ) -> Callable[["Language"], Iterable[Doc]]: """Iterate Example objects from a file or directory of plain text UTF-8 files with one line per doc. path (Path): The directory or filename to read from. min_length (int): Minimum document length (in tokens). Shorter documents will be skipped. Defaults to 0, which indicates no limit. max_length (int): Maximum document length (in tokens). Longer documents will be skipped. Defaults to 0, which indicates no limit. DOCS: https://spacy.io/api/corpus#plaintextcorpus """ if path is None: raise ValueError(Errors.E913) return PlainTextCorpus(path, min_length=min_length, max_length=max_length) def walk_corpus(path: Union[str, Path], file_type) -> List[Path]: path = util.ensure_path(path) if not path.is_dir() and path.parts[-1].endswith(file_type): return [path] orig_path = path paths = [path] locs = [] seen = set() for path in paths: if str(path) in seen: continue seen.add(str(path)) if path.parts and path.parts[-1].startswith("."): continue elif path.is_dir(): paths.extend(path.iterdir()) elif path.parts[-1].endswith(file_type): locs.append(path) if len(locs) == 0: warnings.warn(Warnings.W090.format(path=orig_path, format=file_type)) # It's good to sort these, in case the ordering messes up a cache. locs.sort() return locs class Corpus: """Iterate Example objects from a file or directory of DocBin (.spacy) formatted data files. path (Path): The directory or filename to read from. gold_preproc (bool): Whether to set up the Example object with gold-standard sentences and tokens for the predictions. Gold preprocessing helps the annotations align to the tokenization, and may result in sequences of more consistent length. However, it may reduce run-time accuracy due to train/test skew. Defaults to False. max_length (int): Maximum document length. Longer documents will be split into sentences, if sentence boundaries are available. Defaults to 0, which indicates no limit. limit (int): Limit corpus to a subset of examples, e.g. for debugging. Defaults to 0, which indicates no limit. augment (Callable[Example, Iterable[Example]]): Optional data augmentation function, to extrapolate additional examples from your annotations. shuffle (bool): Whether to shuffle the examples. DOCS: https://spacy.io/api/corpus """ def __init__( self, path: Union[str, Path], *, limit: int = 0, gold_preproc: bool = False, max_length: int = 0, augmenter: Optional[Callable] = None, shuffle: bool = False, ) -> None: self.path = util.ensure_path(path) self.gold_preproc = gold_preproc self.max_length = max_length self.limit = limit self.augmenter = augmenter if augmenter is not None else dont_augment self.shuffle = shuffle def __call__(self, nlp: "Language") -> Iterator[Example]: """Yield examples from the data. nlp (Language): The current nlp object. YIELDS (Example): The examples. DOCS: https://spacy.io/api/corpus#call """ ref_docs = self.read_docbin(nlp.vocab, walk_corpus(self.path, FILE_TYPE)) if self.shuffle: ref_docs = list(ref_docs) # type: ignore random.shuffle(ref_docs) # type: ignore if self.gold_preproc: examples = self.make_examples_gold_preproc(nlp, ref_docs) else: examples = self.make_examples(nlp, ref_docs) for real_eg in examples: for augmented_eg in self.augmenter(nlp, real_eg): # type: ignore[operator] yield augmented_eg def _make_example( self, nlp: "Language", reference: Doc, gold_preproc: bool ) -> Example: if gold_preproc or reference.has_unknown_spaces: return Example( Doc( nlp.vocab, words=[word.text for word in reference], spaces=[bool(word.whitespace_) for word in reference], ), reference, ) else: return Example(nlp.make_doc(reference.text), reference) def make_examples( self, nlp: "Language", reference_docs: Iterable[Doc] ) -> Iterator[Example]: for reference in reference_docs: if len(reference) == 0: continue elif self.max_length == 0 or len(reference) < self.max_length: yield self._make_example(nlp, reference, False) elif reference.has_annotation("SENT_START"): for ref_sent in reference.sents: if len(ref_sent) == 0: continue elif self.max_length == 0 or len(ref_sent) < self.max_length: yield self._make_example(nlp, ref_sent.as_doc(), False) def make_examples_gold_preproc( self, nlp: "Language", reference_docs: Iterable[Doc] ) -> Iterator[Example]: for reference in reference_docs: if reference.has_annotation("SENT_START"): ref_sents = [sent.as_doc() for sent in reference.sents] else: ref_sents = [reference] for ref_sent in ref_sents: eg = self._make_example(nlp, ref_sent, True) if len(eg.x): yield eg def read_docbin( self, vocab: Vocab, locs: Iterable[Union[str, Path]] ) -> Iterator[Doc]: """Yield training examples as example dicts""" i = 0 for loc in locs: loc = util.ensure_path(loc) if loc.parts[-1].endswith(FILE_TYPE): # type: ignore[union-attr] doc_bin = DocBin().from_disk(loc) docs = doc_bin.get_docs(vocab) for doc in docs: if len(doc): yield doc i += 1 if self.limit >= 1 and i >= self.limit: break class JsonlCorpus: """Iterate Example objects from a file or directory of jsonl formatted raw text files. path (Path): The directory or filename to read from. min_length (int): Minimum document length (in tokens). Shorter documents will be skipped. Defaults to 0, which indicates no limit. max_length (int): Maximum document length (in tokens). Longer documents will be skipped. Defaults to 0, which indicates no limit. limit (int): Limit corpus to a subset of examples, e.g. for debugging. Defaults to 0, which indicates no limit. DOCS: https://spacy.io/api/corpus#jsonlcorpus """ file_type = "jsonl" def __init__( self, path: Optional[Union[str, Path]], *, limit: int = 0, min_length: int = 0, max_length: int = 0, ) -> None: self.path = util.ensure_path(path) self.min_length = min_length self.max_length = max_length self.limit = limit def __call__(self, nlp: "Language") -> Iterator[Example]: """Yield examples from the data. nlp (Language): The current nlp object. YIELDS (Example): The example objects. DOCS: https://spacy.io/api/corpus#jsonlcorpus-call """ for loc in walk_corpus(self.path, ".jsonl"): records = srsly.read_jsonl(loc) for record in records: doc = nlp.make_doc(record["text"]) if self.min_length >= 1 and len(doc) < self.min_length: continue elif self.max_length >= 1 and len(doc) >= self.max_length: continue else: words = [w.text for w in doc] spaces = [bool(w.whitespace_) for w in doc] # We don't *need* an example here, but it seems nice to # make it match the Corpus signature. yield Example(doc, Doc(nlp.vocab, words=words, spaces=spaces)) class PlainTextCorpus: """Iterate Example objects from a file or directory of plain text UTF-8 files with one line per doc. path (Path): The directory or filename to read from. min_length (int): Minimum document length (in tokens). Shorter documents will be skipped. Defaults to 0, which indicates no limit. max_length (int): Maximum document length (in tokens). Longer documents will be skipped. Defaults to 0, which indicates no limit. DOCS: https://spacy.io/api/corpus#plaintextcorpus """ file_type = "txt" def __init__( self, path: Optional[Union[str, Path]], *, min_length: int = 0, max_length: int = 0, ) -> None: self.path = util.ensure_path(path) self.min_length = min_length self.max_length = max_length def __call__(self, nlp: "Language") -> Iterator[Example]: """Yield examples from the data. nlp (Language): The current nlp object. YIELDS (Example): The example objects. DOCS: https://spacy.io/api/corpus#plaintextcorpus-call """ for loc in walk_corpus(self.path, ".txt"): with open(loc, encoding="utf-8") as f: for text in f: text = text.rstrip("\r\n") if len(text): doc = nlp.make_doc(text) if self.min_length >= 1 and len(doc) < self.min_length: continue elif self.max_length >= 1 and len(doc) > self.max_length: continue # We don't *need* an example here, but it seems nice to # make it match the Corpus signature. yield Example(doc, doc.copy())
11,974
35.178248
87
py
spaCy
spaCy-master/spacy/training/initialize.py
import gzip import tarfile import warnings import zipfile from itertools import islice from pathlib import Path from typing import IO, TYPE_CHECKING, Any, Dict, Optional, Union import numpy import srsly import tqdm from thinc.api import Config, ConfigValidationError, fix_random_seed, set_gpu_allocator from ..errors import Errors, Warnings from ..lookups import Lookups from ..schemas import ConfigSchemaTraining from ..util import ( DEFAULT_OOV_PROB, OOV_RANK, ensure_path, get_sourced_components, load_model, load_model_from_config, logger, registry, resolve_dot_names, ) from ..vectors import Mode as VectorsMode from ..vectors import Vectors from .pretrain import get_tok2vec_ref if TYPE_CHECKING: from ..language import Language # noqa: F401 def init_nlp(config: Config, *, use_gpu: int = -1) -> "Language": raw_config = config config = raw_config.interpolate() if "seed" not in config["training"]: raise ValueError(Errors.E1015.format(value="[training] seed")) if "gpu_allocator" not in config["training"]: raise ValueError(Errors.E1015.format(value="[training] gpu_allocator")) if config["training"]["seed"] is not None: fix_random_seed(config["training"]["seed"]) allocator = config["training"]["gpu_allocator"] if use_gpu >= 0 and allocator: set_gpu_allocator(allocator) # Use original config here before it's resolved to functions sourced = get_sourced_components(config) nlp = load_model_from_config(raw_config, auto_fill=True) logger.info("Set up nlp object from config") config = nlp.config.interpolate() # Resolve all training-relevant sections using the filled nlp config T = registry.resolve(config["training"], schema=ConfigSchemaTraining) dot_names = [T["train_corpus"], T["dev_corpus"]] if not isinstance(T["train_corpus"], str): raise ConfigValidationError( desc=Errors.E897.format( field="training.train_corpus", type=type(T["train_corpus"]) ) ) if not isinstance(T["dev_corpus"], str): raise ConfigValidationError( desc=Errors.E897.format( field="training.dev_corpus", type=type(T["dev_corpus"]) ) ) train_corpus, dev_corpus = resolve_dot_names(config, dot_names) optimizer = T["optimizer"] # Components that shouldn't be updated during training frozen_components = T["frozen_components"] # Sourced components that require resume_training resume_components = [p for p in sourced if p not in frozen_components] logger.info("Pipeline: %s", nlp.pipe_names) if resume_components: with nlp.select_pipes(enable=resume_components): logger.info("Resuming training for: %s", resume_components) nlp.resume_training(sgd=optimizer) # Make sure that internal component names are synced and listeners are # defined before initializing further nlp._link_components() with nlp.select_pipes(disable=[*frozen_components, *resume_components]): if T["max_epochs"] == -1: sample_size = 100 logger.debug( "Due to streamed train corpus, using only first %s examples for initialization. " "If necessary, provide all labels in [initialize]. " "More info: https://spacy.io/api/cli#init_labels", sample_size, ) nlp.initialize( lambda: islice(train_corpus(nlp), sample_size), sgd=optimizer ) else: nlp.initialize(lambda: train_corpus(nlp), sgd=optimizer) logger.info("Initialized pipeline components: %s", nlp.pipe_names) # Detect components with listeners that are not frozen consistently for name, proc in nlp.pipeline: for listener in getattr( proc, "listening_components", [] ): # e.g. tok2vec/transformer # Don't warn about components not in the pipeline if listener not in nlp.pipe_names: continue if listener in frozen_components and name not in frozen_components: logger.warning(Warnings.W087.format(name=name, listener=listener)) # We always check this regardless, in case user freezes tok2vec if listener not in frozen_components and name in frozen_components: if name not in T["annotating_components"]: logger.warning(Warnings.W086.format(name=name, listener=listener)) return nlp def init_vocab( nlp: "Language", *, data: Optional[Path] = None, lookups: Optional[Lookups] = None, vectors: Optional[str] = None, ) -> None: if lookups: nlp.vocab.lookups = lookups logger.info("Added vocab lookups: %s", ", ".join(lookups.tables)) data_path = ensure_path(data) if data_path is not None: lex_attrs = srsly.read_jsonl(data_path) for lexeme in nlp.vocab: lexeme.rank = OOV_RANK for attrs in lex_attrs: if "settings" in attrs: continue lexeme = nlp.vocab[attrs["orth"]] lexeme.set_attrs(**attrs) if len(nlp.vocab): oov_prob = min(lex.prob for lex in nlp.vocab) - 1 else: oov_prob = DEFAULT_OOV_PROB nlp.vocab.cfg.update({"oov_prob": oov_prob}) logger.info("Added %d lexical entries to the vocab", len(nlp.vocab)) logger.info("Created vocabulary") if vectors is not None: load_vectors_into_model(nlp, vectors) logger.info("Added vectors: %s", vectors) # warn if source model vectors are not identical sourced_vectors_hashes = nlp.meta.pop("_sourced_vectors_hashes", {}) if len(sourced_vectors_hashes) > 0: vectors_hash = hash(nlp.vocab.vectors.to_bytes(exclude=["strings"])) for sourced_component, sourced_vectors_hash in sourced_vectors_hashes.items(): if vectors_hash != sourced_vectors_hash: warnings.warn(Warnings.W113.format(name=sourced_component)) logger.info("Finished initializing nlp object") def load_vectors_into_model( nlp: "Language", name: Union[str, Path], *, add_strings: bool = True ) -> None: """Load word vectors from an installed model or path into a model instance.""" try: # Load with the same vocab, which automatically adds the vectors to # the current nlp object. Exclude lookups so they are not modified. exclude = ["lookups"] if not add_strings: exclude.append("strings") vectors_nlp = load_model(name, vocab=nlp.vocab, exclude=exclude) except ConfigValidationError as e: title = f"Config validation error for vectors {name}" desc = ( "This typically means that there's a problem in the config.cfg included " "with the packaged vectors. Make sure that the vectors package you're " "loading is compatible with the current version of spaCy." ) err = ConfigValidationError.from_error(e, title=title, desc=desc) raise err from None if ( len(vectors_nlp.vocab.vectors.keys()) == 0 and vectors_nlp.vocab.vectors.mode != VectorsMode.floret ) or ( vectors_nlp.vocab.vectors.shape[0] == 0 and vectors_nlp.vocab.vectors.mode == VectorsMode.floret ): logger.warning(Warnings.W112.format(name=name)) for lex in nlp.vocab: lex.rank = nlp.vocab.vectors.key2row.get(lex.orth, OOV_RANK) # type: ignore[attr-defined] def init_tok2vec( nlp: "Language", pretrain_config: Dict[str, Any], init_config: Dict[str, Any] ) -> bool: # Load pretrained tok2vec weights - cf. CLI command 'pretrain' P = pretrain_config I = init_config weights_data = None init_tok2vec = ensure_path(I["init_tok2vec"]) if init_tok2vec is not None: if not init_tok2vec.exists(): err = f"can't find pretrained tok2vec: {init_tok2vec}" errors = [{"loc": ["initialize", "init_tok2vec"], "msg": err}] raise ConfigValidationError(config=nlp.config, errors=errors) with init_tok2vec.open("rb") as file_: weights_data = file_.read() if weights_data is not None: layer = get_tok2vec_ref(nlp, P) layer.from_bytes(weights_data) logger.info("Loaded pretrained weights from %s", init_tok2vec) return True return False def convert_vectors( nlp: "Language", vectors_loc: Optional[Path], *, truncate: int, prune: int, name: Optional[str] = None, mode: str = VectorsMode.default, attr: str = "ORTH", ) -> None: vectors_loc = ensure_path(vectors_loc) if vectors_loc and vectors_loc.parts[-1].endswith(".npz"): if attr != "ORTH": raise ValueError( "ORTH is the only attribute supported for vectors in .npz format." ) nlp.vocab.vectors = Vectors( strings=nlp.vocab.strings, data=numpy.load(vectors_loc.open("rb")) ) for lex in nlp.vocab: if lex.rank and lex.rank != OOV_RANK: nlp.vocab.vectors.add(lex.orth, row=lex.rank) # type: ignore[attr-defined] nlp.vocab.deduplicate_vectors() else: if vectors_loc: logger.info("Reading vectors from %s", vectors_loc) vectors_data, vector_keys, floret_settings = read_vectors( vectors_loc, truncate, mode=mode, ) logger.info("Loaded vectors from %s", vectors_loc) else: vectors_data, vector_keys = (None, None) if vector_keys is not None and mode != VectorsMode.floret: for word in vector_keys: if word not in nlp.vocab: nlp.vocab[word] if vectors_data is not None: if mode == VectorsMode.floret: nlp.vocab.vectors = Vectors( strings=nlp.vocab.strings, data=vectors_data, attr=attr, **floret_settings, ) else: nlp.vocab.vectors = Vectors( strings=nlp.vocab.strings, data=vectors_data, keys=vector_keys, attr=attr, ) nlp.vocab.deduplicate_vectors() if name is None: # TODO: Is this correct? Does this matter? nlp.vocab.vectors.name = f"{nlp.meta['lang']}_{nlp.meta['name']}.vectors" else: nlp.vocab.vectors.name = name nlp.meta["vectors"]["name"] = nlp.vocab.vectors.name if prune >= 1 and mode != VectorsMode.floret: nlp.vocab.prune_vectors(prune) def read_vectors( vectors_loc: Path, truncate_vectors: int, *, mode: str = VectorsMode.default ): f = ensure_shape(vectors_loc) header_parts = next(f).split() shape = tuple(int(size) for size in header_parts[:2]) floret_settings = {} if mode == VectorsMode.floret: if len(header_parts) != 8: raise ValueError( "Invalid header for floret vectors. " "Expected: bucket dim minn maxn hash_count hash_seed BOW EOW" ) floret_settings = { "mode": "floret", "minn": int(header_parts[2]), "maxn": int(header_parts[3]), "hash_count": int(header_parts[4]), "hash_seed": int(header_parts[5]), "bow": header_parts[6], "eow": header_parts[7], } if truncate_vectors >= 1: raise ValueError(Errors.E860) else: assert len(header_parts) == 2 if truncate_vectors >= 1: shape = (truncate_vectors, shape[1]) vectors_data = numpy.zeros(shape=shape, dtype="f") vectors_keys = [] for i, line in enumerate(tqdm.tqdm(f)): line = line.rstrip() pieces = line.rsplit(" ", vectors_data.shape[1]) word = pieces.pop(0) if len(pieces) != vectors_data.shape[1]: raise ValueError(Errors.E094.format(line_num=i, loc=vectors_loc)) vectors_data[i] = numpy.asarray(pieces, dtype="f") vectors_keys.append(word) if i == truncate_vectors - 1: break return vectors_data, vectors_keys, floret_settings def open_file(loc: Union[str, Path]) -> IO: """Handle .gz, .tar.gz or unzipped files""" loc = ensure_path(loc) if tarfile.is_tarfile(str(loc)): return tarfile.open(str(loc), "r:gz") # type: ignore[return-value] elif loc.parts[-1].endswith("gz"): return (line.decode("utf8") for line in gzip.open(str(loc), "r")) # type: ignore[return-value] elif loc.parts[-1].endswith("zip"): zip_file = zipfile.ZipFile(str(loc)) names = zip_file.namelist() file_ = zip_file.open(names[0]) return (line.decode("utf8") for line in file_) # type: ignore[return-value] else: return loc.open("r", encoding="utf8") def ensure_shape(vectors_loc): """Ensure that the first line of the data is the vectors shape. If it's not, we read in the data and output the shape as the first result, so that the reader doesn't have to deal with the problem. """ lines = open_file(vectors_loc) first_line = next(lines) try: shape = tuple(int(size) for size in first_line.split()[:2]) except ValueError: shape = None if shape is not None: # All good, give the data yield first_line yield from lines else: # Figure out the shape, make it the first value, and then give the # rest of the data. width = len(first_line.split()) - 1 length = 1 for _ in lines: length += 1 yield f"{length} {width}" # Reading the lines in again from file. This to avoid having to # store all the results in a list in memory lines2 = open_file(vectors_loc) yield from lines2 lines2.close() lines.close()
14,130
37.928375
103
py
spaCy
spaCy-master/spacy/training/iob_utils.py
import warnings from typing import Dict, Iterable, Iterator, List, Tuple, Union, cast from ..errors import Errors, Warnings from ..tokens import Doc, Span def iob_to_biluo(tags: Iterable[str]) -> List[str]: out: List[str] = [] tags = list(tags) while tags: out.extend(_consume_os(tags)) out.extend(_consume_ent(tags)) return out def biluo_to_iob(tags: Iterable[str]) -> List[str]: out = [] for tag in tags: if tag is None: out.append(tag) else: tag = tag.replace("U-", "B-", 1).replace("L-", "I-", 1) out.append(tag) return out def _consume_os(tags: List[str]) -> Iterator[str]: while tags and tags[0] == "O": yield tags.pop(0) def _consume_ent(tags: List[str]) -> List[str]: if not tags: return [] tag = tags.pop(0) target_in = "I" + tag[1:] target_last = "L" + tag[1:] length = 1 while tags and tags[0] in {target_in, target_last}: length += 1 tags.pop(0) label = tag[2:] if length == 1: if len(label) == 0: raise ValueError(Errors.E177.format(tag=tag)) return ["U-" + label] else: start = "B-" + label end = "L-" + label middle = [f"I-{label}" for _ in range(1, length - 1)] return [start] + middle + [end] def doc_to_biluo_tags(doc: Doc, missing: str = "O"): return offsets_to_biluo_tags( doc, [(ent.start_char, ent.end_char, ent.label_) for ent in doc.ents], missing=missing, ) def _doc_to_biluo_tags_with_partial(doc: Doc) -> List[str]: ents = doc_to_biluo_tags(doc, missing="-") for i, token in enumerate(doc): if token.ent_iob == 2: ents[i] = "O" return ents def offsets_to_biluo_tags( doc: Doc, entities: Iterable[Tuple[int, int, Union[str, int]]], missing: str = "O" ) -> List[str]: """Encode labelled spans into per-token tags, using the Begin/In/Last/Unit/Out scheme (BILUO). doc (Doc): The document that the entity offsets refer to. The output tags will refer to the token boundaries within the document. entities (iterable): A sequence of `(start, end, label)` triples. `start` and `end` should be character-offset integers denoting the slice into the original string. missing (str): The label used for missing values, e.g. if tokenization doesn’t align with the entity offsets. Defaults to "O". RETURNS (list): A list of unicode strings, describing the tags. Each tag string will be of the form either "", "O" or "{action}-{label}", where action is one of "B", "I", "L", "U". The missing label is used where the entity offsets don't align with the tokenization in the `Doc` object. The training algorithm will view these as missing values. "O" denotes a non-entity token. "B" denotes the beginning of a multi-token entity, "I" the inside of an entity of three or more tokens, and "L" the end of an entity of two or more tokens. "U" denotes a single-token entity. EXAMPLE: >>> text = 'I like London.' >>> entities = [(len('I like '), len('I like London'), 'LOC')] >>> doc = nlp.tokenizer(text) >>> tags = offsets_to_biluo_tags(doc, entities) >>> assert tags == ["O", "O", 'U-LOC', "O"] """ # Ensure no overlapping entity labels exist tokens_in_ents: Dict[int, Tuple[int, int, Union[str, int]]] = {} starts = {token.idx: token.i for token in doc} ends = {token.idx + len(token): token.i for token in doc} biluo = ["-" for _ in doc] # Handle entity cases for start_char, end_char, label in entities: if not label: for s in starts: # account for many-to-one if s >= start_char and s < end_char: biluo[starts[s]] = "O" else: for token_index in range(start_char, end_char): if token_index in tokens_in_ents.keys(): raise ValueError( Errors.E103.format( span1=( tokens_in_ents[token_index][0], tokens_in_ents[token_index][1], tokens_in_ents[token_index][2], ), span2=(start_char, end_char, label), ) ) tokens_in_ents[token_index] = (start_char, end_char, label) start_token = starts.get(start_char) end_token = ends.get(end_char) # Only interested if the tokenization is correct if start_token is not None and end_token is not None: if start_token == end_token: biluo[start_token] = f"U-{label}" else: biluo[start_token] = f"B-{label}" for i in range(start_token + 1, end_token): biluo[i] = f"I-{label}" biluo[end_token] = f"L-{label}" # Now distinguish the O cases from ones where we miss the tokenization entity_chars = set() for start_char, end_char, label in entities: for i in range(start_char, end_char): entity_chars.add(i) for token in doc: for i in range(token.idx, token.idx + len(token)): if i in entity_chars: break else: biluo[token.i] = missing if "-" in biluo and missing != "-": ent_str = str(entities) warnings.warn( Warnings.W030.format( text=doc.text[:50] + "..." if len(doc.text) > 50 else doc.text, entities=ent_str[:50] + "..." if len(ent_str) > 50 else ent_str, ) ) return biluo def biluo_tags_to_spans(doc: Doc, tags: Iterable[str]) -> List[Span]: """Encode per-token tags following the BILUO scheme into Span object, e.g. to overwrite the doc.ents. doc (Doc): The document that the BILUO tags refer to. tags (iterable): A sequence of BILUO tags with each tag describing one token. Each tag string will be of the form of either "", "O" or "{action}-{label}", where action is one of "B", "I", "L", "U". RETURNS (list): A sequence of Span objects. Each token with a missing IOB tag is returned as a Span with an empty label. """ token_offsets = tags_to_entities(tags) spans = [] for label, start_idx, end_idx in token_offsets: span = Span(doc, start_idx, end_idx + 1, label=label) spans.append(span) return spans def biluo_tags_to_offsets( doc: Doc, tags: Iterable[str] ) -> List[Tuple[int, int, Union[str, int]]]: """Encode per-token tags following the BILUO scheme into entity offsets. doc (Doc): The document that the BILUO tags refer to. tags (iterable): A sequence of BILUO tags with each tag describing one token. Each tags string will be of the form of either "", "O" or "{action}-{label}", where action is one of "B", "I", "L", "U". RETURNS (list): A sequence of `(start, end, label)` triples. `start` and `end` will be character-offset integers denoting the slice into the original string. """ spans = biluo_tags_to_spans(doc, tags) return [(span.start_char, span.end_char, span.label_) for span in spans] def tags_to_entities(tags: Iterable[str]) -> List[Tuple[str, int, int]]: """Note that the end index returned by this function is inclusive. To use it for Span creation, increment the end by 1.""" entities = [] start = None for i, tag in enumerate(tags): if tag is None or tag.startswith("-"): # TODO: We shouldn't be getting these malformed inputs. Fix this. if start is not None: start = None else: entities.append(("", i, i)) elif tag.startswith("O"): pass elif tag.startswith("I"): if start is None: raise ValueError( Errors.E067.format(start="I", tags=list(tags)[: i + 1]) ) elif tag.startswith("U"): entities.append((tag[2:], i, i)) elif tag.startswith("B"): start = i elif tag.startswith("L"): if start is None: raise ValueError( Errors.E067.format(start="L", tags=list(tags)[: i + 1]) ) entities.append((tag[2:], start, i)) start = None else: raise ValueError(Errors.E068.format(tag=tag)) return entities def split_bilu_label(label: str) -> Tuple[str, str]: return cast(Tuple[str, str], label.split("-", 1)) def remove_bilu_prefix(label: str) -> str: return label.split("-", 1)[1] # Fallbacks to make backwards-compat easier offsets_from_biluo_tags = biluo_tags_to_offsets spans_from_biluo_tags = biluo_tags_to_spans biluo_tags_from_offsets = offsets_to_biluo_tags
9,073
36.651452
86
py
spaCy
spaCy-master/spacy/training/loggers.py
import sys from pathlib import Path from typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import srsly import tqdm from wasabi import Printer from .. import util from ..errors import Errors from ..util import registry if TYPE_CHECKING: from ..language import Language # noqa: F401 def setup_table( *, cols: List[str], widths: List[int], max_width: int = 13 ) -> Tuple[List[str], List[int], List[str]]: final_cols = [] final_widths = [] for col, width in zip(cols, widths): if len(col) > max_width: col = col[: max_width - 3] + "..." # shorten column if too long final_cols.append(col.upper()) final_widths.append(max(len(col), width)) return final_cols, final_widths, ["r" for _ in final_widths] # We cannot rename this method as it's directly imported # and used by external packages such as spacy-loggers. @registry.loggers("spacy.ConsoleLogger.v2") def console_logger( progress_bar: bool = False, console_output: bool = True, output_file: Optional[Union[str, Path]] = None, ): """The ConsoleLogger.v2 prints out training logs in the console and/or saves them to a jsonl file. progress_bar (bool): Whether the logger should print a progress bar tracking the steps till the next evaluation pass. console_output (bool): Whether the logger should print the logs on the console. output_file (Optional[Union[str, Path]]): The file to save the training logs to. """ return console_logger_v3( progress_bar=None if progress_bar is False else "eval", console_output=console_output, output_file=output_file, ) @registry.loggers("spacy.ConsoleLogger.v3") def console_logger_v3( progress_bar: Optional[str] = None, console_output: bool = True, output_file: Optional[Union[str, Path]] = None, ): """The ConsoleLogger.v3 prints out training logs in the console and/or saves them to a jsonl file. progress_bar (Optional[str]): Type of progress bar to show in the console. Allowed values: train - Tracks the number of steps from the beginning of training until the full training run is complete (training.max_steps is reached). eval - Tracks the number of steps between the previous and next evaluation (training.eval_frequency is reached). console_output (bool): Whether the logger should print the logs on the console. output_file (Optional[Union[str, Path]]): The file to save the training logs to. """ _log_exist = False if output_file: output_file = util.ensure_path(output_file) # type: ignore if output_file.exists(): # type: ignore _log_exist = True if not output_file.parents[0].exists(): # type: ignore output_file.parents[0].mkdir(parents=True) # type: ignore def setup_printer( nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr ) -> Tuple[Callable[[Optional[Dict[str, Any]]], None], Callable[[], None]]: write = lambda text: print(text, file=stdout, flush=True) msg = Printer(no_print=True) nonlocal output_file output_stream = None if _log_exist: write( msg.warn( f"Saving logs is disabled because {output_file} already exists." ) ) output_file = None elif output_file: write(msg.info(f"Saving results to {output_file}")) output_stream = open(output_file, "w", encoding="utf-8") # ensure that only trainable components are logged logged_pipes = [ name for name, proc in nlp.pipeline if hasattr(proc, "is_trainable") and proc.is_trainable ] max_steps = nlp.config["training"]["max_steps"] eval_frequency = nlp.config["training"]["eval_frequency"] score_weights = nlp.config["training"]["score_weights"] score_cols = [col for col, value in score_weights.items() if value is not None] loss_cols = [f"Loss {pipe}" for pipe in logged_pipes] if console_output: spacing = 2 table_header, table_widths, table_aligns = setup_table( cols=["E", "#"] + loss_cols + score_cols + ["Score"], widths=[3, 6] + [8 for _ in loss_cols] + [6 for _ in score_cols] + [6], ) write(msg.row(table_header, widths=table_widths, spacing=spacing)) write(msg.row(["-" * width for width in table_widths], spacing=spacing)) progress = None expected_progress_types = ("train", "eval") if progress_bar is not None and progress_bar not in expected_progress_types: raise ValueError( Errors.E1048.format( unexpected=progress_bar, expected=expected_progress_types ) ) def log_step(info: Optional[Dict[str, Any]]) -> None: nonlocal progress if info is None: # If we don't have a new checkpoint, just return. if progress is not None: progress.update(1) return losses = [] log_losses = {} for pipe_name in logged_pipes: losses.append("{0:.2f}".format(float(info["losses"][pipe_name]))) log_losses[pipe_name] = float(info["losses"][pipe_name]) scores = [] log_scores = {} for col in score_cols: score = info["other_scores"].get(col, 0.0) try: score = float(score) except TypeError: err = Errors.E916.format(name=col, score_type=type(score)) raise ValueError(err) from None if col != "speed": score *= 100 scores.append("{0:.2f}".format(score)) log_scores[str(col)] = score data = ( [info["epoch"], info["step"]] + losses + scores + ["{0:.2f}".format(float(info["score"]))] ) if output_stream: # Write to log file per log_step log_data = { "epoch": info["epoch"], "step": info["step"], "losses": log_losses, "scores": log_scores, "score": float(info["score"]), } output_stream.write(srsly.json_dumps(log_data) + "\n") if progress is not None: progress.close() if console_output: write( msg.row( data, widths=table_widths, aligns=table_aligns, spacing=spacing ) ) if progress_bar: if progress_bar == "train": total = max_steps desc = f"Last Eval Epoch: {info['epoch']}" initial = info["step"] else: total = eval_frequency desc = f"Epoch {info['epoch']+1}" initial = 0 # Set disable=None, so that it disables on non-TTY progress = tqdm.tqdm( total=total, disable=None, leave=False, file=stderr, initial=initial, ) progress.set_description(desc) def finalize() -> None: if output_stream: output_stream.close() return log_step, finalize return setup_printer
7,820
38.105
146
py
spaCy
spaCy-master/spacy/training/loop.py
import random import shutil import sys from pathlib import Path from timeit import default_timer as timer from typing import ( IO, TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, ) from thinc.api import Config, Optimizer, constant, fix_random_seed, set_gpu_allocator from wasabi import Printer from ..errors import Errors from ..schemas import ConfigSchemaTraining from ..util import logger, registry, resolve_dot_names from .example import Example if TYPE_CHECKING: from ..language import Language # noqa: F401 DIR_MODEL_BEST = "model-best" DIR_MODEL_LAST = "model-last" def train( nlp: "Language", output_path: Optional[Path] = None, *, use_gpu: int = -1, stdout: IO = sys.stdout, stderr: IO = sys.stderr, ) -> Tuple["Language", Optional[Path]]: """Train a pipeline. nlp (Language): The initialized nlp object with the full config. output_path (Optional[Path]): Optional output path to save trained model to. use_gpu (int): Whether to train on GPU. Make sure to call require_gpu before calling this function. stdout (file): A file-like object to write output messages. To disable printing, set to io.StringIO. stderr (file): A second file-like object to write output messages. To disable printing, set to io.StringIO. RETURNS (tuple): The final nlp object and the path to the exported model. """ # We use no_print here so we can respect the stdout/stderr options. msg = Printer(no_print=True) # Create iterator, which yields out info after each optimization step. config = nlp.config.interpolate() if config["training"]["seed"] is not None: fix_random_seed(config["training"]["seed"]) allocator = config["training"]["gpu_allocator"] if use_gpu >= 0 and allocator: set_gpu_allocator(allocator) T = registry.resolve(config["training"], schema=ConfigSchemaTraining) dot_names = [T["train_corpus"], T["dev_corpus"]] train_corpus, dev_corpus = resolve_dot_names(config, dot_names) optimizer = T["optimizer"] score_weights = T["score_weights"] batcher = T["batcher"] train_logger = T["logger"] before_to_disk = create_before_to_disk_callback(T["before_to_disk"]) before_update = T["before_update"] # Helper function to save checkpoints. This is a closure for convenience, # to avoid passing in all the args all the time. def save_checkpoint(is_best): with nlp.use_params(optimizer.averages): before_to_disk(nlp).to_disk(output_path / DIR_MODEL_LAST) if is_best: # Avoid saving twice (saving will be more expensive than # the dir copy) if (output_path / DIR_MODEL_BEST).exists(): shutil.rmtree(output_path / DIR_MODEL_BEST) shutil.copytree(output_path / DIR_MODEL_LAST, output_path / DIR_MODEL_BEST) # Components that shouldn't be updated during training frozen_components = T["frozen_components"] # Components that should set annotations on update annotating_components = T["annotating_components"] # Create iterator, which yields out info after each optimization step. training_step_iterator = train_while_improving( nlp, optimizer, create_train_batches(nlp, train_corpus, batcher, T["max_epochs"]), create_evaluation_callback(nlp, dev_corpus, score_weights), dropout=T["dropout"], accumulate_gradient=T["accumulate_gradient"], patience=T["patience"], max_steps=T["max_steps"], eval_frequency=T["eval_frequency"], exclude=frozen_components, annotating_components=annotating_components, before_update=before_update, ) clean_output_dir(output_path) stdout.write(msg.info(f"Pipeline: {nlp.pipe_names}") + "\n") if frozen_components: stdout.write(msg.info(f"Frozen components: {frozen_components}") + "\n") if annotating_components: stdout.write( msg.info(f"Set annotations on update for: {annotating_components}") + "\n" ) stdout.write(msg.info(f"Initial learn rate: {optimizer.learn_rate}") + "\n") with nlp.select_pipes(disable=frozen_components): log_step, finalize_logger = train_logger(nlp, stdout, stderr) try: for batch, info, is_best_checkpoint in training_step_iterator: if is_best_checkpoint is not None: with nlp.select_pipes(disable=frozen_components): update_meta(T, nlp, info) if output_path is not None: save_checkpoint(is_best_checkpoint) info["output_path"] = str(output_path / DIR_MODEL_LAST) log_step(info if is_best_checkpoint is not None else None) except Exception as e: if output_path is not None: stdout.write( msg.warn( f"Aborting and saving the final best model. " f"Encountered exception: {repr(e)}" ) + "\n" ) raise e finally: finalize_logger() if output_path is not None: save_checkpoint(False) # This will only run if we did't hit an error if optimizer.averages: nlp.use_params(optimizer.averages) if output_path is not None: stdout.write( msg.good("Saved pipeline to output directory", output_path / DIR_MODEL_LAST) + "\n" ) return (nlp, output_path / DIR_MODEL_LAST) else: return (nlp, None) def train_while_improving( nlp: "Language", optimizer: Optimizer, train_data, evaluate, *, dropout: float, eval_frequency: int, accumulate_gradient: int, patience: int, max_steps: int, exclude: List[str], annotating_components: List[str], before_update: Optional[Callable[["Language", Dict[str, Any]], None]], ): """Train until an evaluation stops improving. Works as a generator, with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`, where info is a dict, and is_best_checkpoint is in [True, False, None] -- None indicating that the iteration was not evaluated as a checkpoint. The evaluation is conducted by calling the evaluate callback. Positional arguments: nlp: The spaCy pipeline to evaluate. optimizer: The optimizer callable. train_data (Iterable[Batch]): A generator of batches, with the training data. Each batch should be a Sized[Tuple[Input, Annot]]. The training data iterable needs to take care of iterating over the epochs and shuffling. evaluate (Callable[[], Tuple[float, Any]]): A callback to perform evaluation. The callback should take no arguments and return a tuple `(main_score, other_scores)`. The main_score should be a float where higher is better. other_scores can be any object. Every iteration, the function yields out a tuple with: * batch: A list of Example objects. * info: A dict with various information about the last update (see below). * is_best_checkpoint: A value in None, False, True, indicating whether this was the best evaluation so far. You should use this to save the model checkpoints during training. If None, evaluation was not conducted on that iteration. False means evaluation was conducted, but a previous evaluation was better. The info dict provides the following information: epoch (int): How many passes over the data have been completed. step (int): How many steps have been completed. score (float): The main score from the last evaluation. other_scores: : The other scores from the last evaluation. losses: The accumulated losses throughout training. checkpoints: A list of previous results, where each result is a (score, step, epoch) tuple. """ if isinstance(dropout, float): dropouts = constant(dropout) else: dropouts = dropout results = [] losses: Dict[str, float] = {} words_seen = 0 start_time = timer() for step, (epoch, batch) in enumerate(train_data): if before_update: before_update_args = {"step": step, "epoch": epoch} before_update(nlp, before_update_args) dropout = next(dropouts) # type: ignore for subbatch in subdivide_batch(batch, accumulate_gradient): nlp.update( subbatch, drop=dropout, losses=losses, sgd=False, # type: ignore[arg-type] exclude=exclude, annotates=annotating_components, ) # TODO: refactor this so we don't have to run it separately in here for name, proc in nlp.pipeline: if ( name not in exclude and hasattr(proc, "is_trainable") and proc.is_trainable and proc.model not in (True, False, None) # type: ignore[attr-defined] ): proc.finish_update(optimizer) # type: ignore[attr-defined] optimizer.step_schedules() if not (step % eval_frequency): if optimizer.averages: with nlp.use_params(optimizer.averages): score, other_scores = evaluate() else: score, other_scores = evaluate() results.append((score, step)) is_best_checkpoint = score == max(results)[0] else: score, other_scores = (None, None) is_best_checkpoint = None words_seen += sum(len(eg) for eg in batch) info = { "epoch": epoch, "step": step, "score": score, "other_scores": other_scores, "losses": losses, "checkpoints": results, "seconds": int(timer() - start_time), "words": words_seen, } yield batch, info, is_best_checkpoint if is_best_checkpoint is not None: losses = {} # Stop if no improvement in `patience` updates (if specified) # Negate step value so that the earliest best step is chosen for the # same score, i.e. (1.0, 100) is chosen over (1.0, 200) best_result = max((r_score, -r_step) for r_score, r_step in results) best_step = -best_result[1] if patience and (step - best_step) >= patience: break # Stop if we've exhausted our max steps (if specified) if max_steps and step >= max_steps: break def subdivide_batch(batch, accumulate_gradient): batch = list(batch) batch.sort(key=lambda eg: len(eg.predicted)) sub_len = len(batch) // accumulate_gradient start = 0 for i in range(accumulate_gradient): subbatch = batch[start : start + sub_len] if subbatch: yield subbatch start += len(subbatch) subbatch = batch[start:] if subbatch: yield subbatch def create_evaluation_callback( nlp: "Language", dev_corpus: Callable, weights: Dict[str, float] ) -> Callable[[], Tuple[float, Dict[str, float]]]: weights = {key: value for key, value in weights.items() if value is not None} def evaluate() -> Tuple[float, Dict[str, float]]: nonlocal weights try: scores = nlp.evaluate(dev_corpus(nlp)) except KeyError as e: raise KeyError(Errors.E900.format(pipeline=nlp.pipe_names)) from e # Calculate a weighted sum based on score_weights for the main score. # We can only consider scores that are ints/floats, not dicts like # entity scores per type etc. scores = {key: value for key, value in scores.items() if value is not None} weights = {key: value for key, value in weights.items() if key in scores} for key, value in scores.items(): if key in weights and not isinstance(value, (int, float)): raise ValueError(Errors.E915.format(name=key, score_type=type(value))) try: weighted_score = sum( scores.get(s, 0.0) * weights.get(s, 0.0) for s in weights ) except KeyError as e: keys = list(scores.keys()) err = Errors.E983.format(dict="score_weights", key=str(e), keys=keys) raise KeyError(err) from None return weighted_score, scores return evaluate def create_train_batches( nlp: "Language", corpus: Callable[["Language"], Iterable[Example]], batcher: Callable[[Iterable[Example]], Iterable[Example]], max_epochs: int, ): epoch = 0 if max_epochs >= 0: examples = list(corpus(nlp)) # type: Iterable[Example] if not examples: # Raise error if no data raise ValueError(Errors.E986) while max_epochs < 1 or epoch != max_epochs: if max_epochs >= 0: random.shuffle(examples) # type: ignore else: examples = corpus(nlp) for batch in batcher(examples): yield epoch, batch epoch += 1 def update_meta( training: Union[Dict[str, Any], Config], nlp: "Language", info: Dict[str, Any] ) -> None: nlp.meta["performance"] = {} for metric in training["score_weights"]: if metric is not None: nlp.meta["performance"][metric] = info["other_scores"].get(metric, 0.0) for pipe_name in nlp.pipe_names: if pipe_name in info["losses"]: nlp.meta["performance"][f"{pipe_name}_loss"] = info["losses"][pipe_name] def create_before_to_disk_callback( callback: Optional[Callable[["Language"], "Language"]] ) -> Callable[["Language"], "Language"]: from ..language import Language # noqa: F811 def before_to_disk(nlp: Language) -> Language: if not callback: return nlp modified_nlp = callback(nlp) if not isinstance(modified_nlp, Language): err = Errors.E914.format(name="before_to_disk", value=type(modified_nlp)) raise ValueError(err) return modified_nlp return before_to_disk def clean_output_dir(path: Optional[Path]) -> None: """Remove an existing output directory. Typically used to ensure that that a directory like model-best and its contents aren't just being overwritten by nlp.to_disk, which could preserve existing subdirectories (e.g. components that don't exist anymore). """ if path is not None and path.exists(): for subdir in [path / DIR_MODEL_BEST, path / DIR_MODEL_LAST]: if subdir.exists(): try: shutil.rmtree(str(subdir)) logger.debug("Removed existing output directory: %s", subdir) except Exception as e: raise IOError(Errors.E901.format(path=path)) from e
15,029
37.837209
88
py
spaCy
spaCy-master/spacy/training/pretrain.py
import re import time from collections import Counter from pathlib import Path from typing import Callable, Iterable, List, Optional, Union import srsly from thinc.api import ( Config, Model, Optimizer, fix_random_seed, set_dropout_rate, set_gpu_allocator, ) from thinc.config import ConfigValidationError from wasabi import Printer from ..errors import Errors from ..schemas import ConfigSchemaPretrain from ..tokens import Doc from ..util import dot_to_object, load_model_from_config, registry from .example import Example def pretrain( config: Config, output_dir: Path, resume_path: Optional[Path] = None, epoch_resume: Optional[int] = None, use_gpu: int = -1, silent: bool = True, skip_last: bool = False, ): msg = Printer(no_print=silent) if config["training"]["seed"] is not None: fix_random_seed(config["training"]["seed"]) allocator = config["training"]["gpu_allocator"] if use_gpu >= 0 and allocator: set_gpu_allocator(allocator) # ignore in pretraining because we're creating it now config["initialize"]["init_tok2vec"] = None nlp = load_model_from_config(config) _config = nlp.config.interpolate() P = registry.resolve(_config["pretraining"], schema=ConfigSchemaPretrain) corpus = dot_to_object(_config, P["corpus"]) corpus = registry.resolve({"corpus": corpus})["corpus"] batcher = P["batcher"] model = create_pretraining_model(nlp, P) optimizer = P["optimizer"] # Load in pretrained weights to resume from if resume_path is not None: epoch_resume = _resume_model(model, resume_path, epoch_resume, silent=silent) else: # Without '--resume-path' the '--epoch-resume' argument is ignored epoch_resume = 0 objective = model.attrs["loss"] # TODO: move this to logger function? tracker = ProgressTracker(frequency=10000) if P["n_save_epoch"]: msg.divider( f"Pre-training tok2vec layer - starting at epoch {epoch_resume} - saving every {P['n_save_epoch']} epoch" ) else: msg.divider(f"Pre-training tok2vec layer - starting at epoch {epoch_resume}") row_settings = {"widths": (3, 10, 10, 6, 4), "aligns": ("r", "r", "r", "r", "r")} msg.row(("#", "# Words", "Total Loss", "Loss", "w/s"), **row_settings) def _save_model(epoch, is_temp=False, is_last=False): is_temp_str = ".temp" if is_temp else "" with model.use_params(optimizer.averages): if is_last: save_path = output_dir / f"model-last.bin" else: save_path = output_dir / f"model{epoch}{is_temp_str}.bin" with (save_path).open("wb") as file_: file_.write(model.get_ref("tok2vec").to_bytes()) log = { "nr_word": tracker.nr_word, "loss": tracker.loss, "epoch_loss": tracker.epoch_loss, "epoch": epoch, } with (output_dir / "log.jsonl").open("a") as file_: file_.write(srsly.json_dumps(log) + "\n") # TODO: I think we probably want this to look more like the # 'create_train_batches' function? try: for epoch in range(epoch_resume, P["max_epochs"]): for batch_id, batch in enumerate(batcher(corpus(nlp))): docs = ensure_docs(batch) loss = make_update(model, docs, optimizer, objective) progress = tracker.update(epoch, loss, docs) if progress: msg.row(progress, **row_settings) if P["n_save_every"] and (batch_id % P["n_save_every"] == 0): _save_model(epoch, is_temp=True) if P["n_save_epoch"]: if epoch % P["n_save_epoch"] == 0 or epoch == P["max_epochs"] - 1: _save_model(epoch) else: _save_model(epoch) tracker.epoch_loss = 0.0 finally: if not skip_last: _save_model(P["max_epochs"], is_last=True) def ensure_docs(examples_or_docs: Iterable[Union[Doc, Example]]) -> List[Doc]: docs = [] for eg_or_doc in examples_or_docs: if isinstance(eg_or_doc, Doc): docs.append(eg_or_doc) else: docs.append(eg_or_doc.reference) return docs def _resume_model( model: Model, resume_path: Path, epoch_resume: Optional[int], silent: bool = True ) -> int: msg = Printer(no_print=silent) msg.info(f"Resume training tok2vec from: {resume_path}") with resume_path.open("rb") as file_: weights_data = file_.read() model.get_ref("tok2vec").from_bytes(weights_data) if epoch_resume is None: # Parse the epoch number from the given weight file model_name = re.search(r"model\d+\.bin", str(resume_path)) if model_name: # Default weight file name so read epoch_start from it by cutting off 'model' and '.bin' epoch_resume = int(model_name.group(0)[5:][:-4]) + 1 else: # No epoch given and couldn't infer it raise ValueError(Errors.E1020) msg.info(f"Resuming from epoch: {epoch_resume}") return epoch_resume def make_update( model: Model, docs: Iterable[Doc], optimizer: Optimizer, objective_func: Callable ) -> float: """Perform an update over a single batch of documents. docs (iterable): A batch of `Doc` objects. optimizer (callable): An optimizer. RETURNS loss: A float for the loss. """ predictions, backprop = model.begin_update(docs) loss, gradients = objective_func(model.ops, docs, predictions) backprop(gradients) model.finish_update(optimizer) # Don't want to return a cupy object here # The gradients are modified in-place by the BERT MLM, # so we get an accurate loss return float(loss) def create_pretraining_model(nlp, pretrain_config): """Define a network for the pretraining. We simply add an output layer onto the tok2vec input model. The tok2vec input model needs to be a model that takes a batch of Doc objects (as a list), and returns a list of arrays. Each array in the output needs to have one row per token in the doc. The actual tok2vec layer is stored as a reference, and only this bit will be serialized to file and read back in when calling the 'train' command. """ with nlp.select_pipes(enable=[]): nlp.initialize() tok2vec = get_tok2vec_ref(nlp, pretrain_config) # If the config referred to a Tok2VecListener, grab the original model instead if type(tok2vec).__name__ == "Tok2VecListener": original_tok2vec = ( tok2vec.upstream_name if tok2vec.upstream_name != "*" else "tok2vec" ) tok2vec = nlp.get_pipe(original_tok2vec).model try: tok2vec.initialize(X=[nlp.make_doc("Give it a doc to infer shapes")]) except ValueError: component = pretrain_config["component"] layer = pretrain_config["layer"] raise ValueError(Errors.E874.format(component=component, layer=layer)) create_function = pretrain_config["objective"] model = create_function(nlp.vocab, tok2vec) model.initialize(X=[nlp.make_doc("Give it a doc to infer shapes")]) set_dropout_rate(model, pretrain_config["dropout"]) return model def get_tok2vec_ref(nlp, pretrain_config): tok2vec_component = pretrain_config["component"] if tok2vec_component is None: desc = ( f"To use pretrained tok2vec weights, [pretraining.component] " f"needs to specify the component that should load them." ) err = "component can't be null" errors = [{"loc": ["pretraining", "component"], "msg": err}] raise ConfigValidationError( config=nlp.config["pretraining"], errors=errors, desc=desc ) layer = nlp.get_pipe(tok2vec_component).model if pretrain_config["layer"]: layer = layer.get_ref(pretrain_config["layer"]) return layer class ProgressTracker: def __init__(self, frequency=1000000): self.loss = 0.0 self.prev_loss = 0.0 self.nr_word = 0 self.words_per_epoch = Counter() self.frequency = frequency self.last_time = time.time() self.last_update = 0 self.epoch_loss = 0.0 def update(self, epoch, loss, docs): self.loss += loss self.epoch_loss += loss words_in_batch = sum(len(doc) for doc in docs) self.words_per_epoch[epoch] += words_in_batch self.nr_word += words_in_batch words_since_update = self.nr_word - self.last_update if words_since_update >= self.frequency: wps = words_since_update / (time.time() - self.last_time) self.last_update = self.nr_word self.last_time = time.time() loss_per_word = self.loss - self.prev_loss status = ( epoch, self.nr_word, _smart_round(self.loss, width=10), _smart_round(loss_per_word, width=6), int(wps), ) self.prev_loss = float(self.loss) return status else: return None def _smart_round( figure: Union[float, int], width: int = 10, max_decimal: int = 4 ) -> str: """Round large numbers as integers, smaller numbers as decimals.""" n_digits = len(str(int(figure))) n_decimal = width - (n_digits + 1) if n_decimal <= 1: return str(int(figure)) else: n_decimal = min(n_decimal, max_decimal) format_str = "%." + str(n_decimal) + "f" return format_str % figure
9,710
36.206897
117
py
spaCy
spaCy-master/spacy/training/converters/__init__.py
from .conll_ner_to_docs import conll_ner_to_docs # noqa: F401 from .conllu_to_docs import conllu_to_docs # noqa: F401 from .iob_to_docs import iob_to_docs # noqa: F401 from .json_to_docs import json_to_docs # noqa: F401
224
44
62
py
spaCy
spaCy-master/spacy/training/converters/conll_ner_to_docs.py
from wasabi import Printer from ...errors import Errors from ...tokens import Doc, Span from ...training import iob_to_biluo from ...util import get_lang_class, load_model from .. import tags_to_entities def conll_ner_to_docs( input_data, n_sents=10, seg_sents=False, model=None, no_print=False, **kwargs ): """ Convert files in the CoNLL-2003 NER format and similar whitespace-separated columns into Doc objects. The first column is the tokens, the final column is the IOB tags. If an additional second column is present, the second column is the tags. Sentences are separated with whitespace and documents can be separated using the line "-DOCSTART- -X- O O". Sample format: -DOCSTART- -X- O O I O like O London B-GPE and O New B-GPE York I-GPE City I-GPE . O """ msg = Printer(no_print=no_print) doc_delimiter = "-DOCSTART- -X- O O" # check for existing delimiters, which should be preserved if "\n\n" in input_data and seg_sents: msg.warn( "Sentence boundaries found, automatic sentence segmentation with " "`-s` disabled." ) seg_sents = False if doc_delimiter in input_data and n_sents: msg.warn( "Document delimiters found, automatic document segmentation with " "`-n` disabled." ) n_sents = 0 # do document segmentation with existing sentences if "\n\n" in input_data and doc_delimiter not in input_data and n_sents: n_sents_info(msg, n_sents) input_data = segment_docs(input_data, n_sents, doc_delimiter) # do sentence segmentation with existing documents if "\n\n" not in input_data and doc_delimiter in input_data and seg_sents: input_data = segment_sents_and_docs(input_data, 0, "", model=model, msg=msg) # do both sentence segmentation and document segmentation according # to options if "\n\n" not in input_data and doc_delimiter not in input_data: # sentence segmentation required for document segmentation if n_sents > 0 and not seg_sents: msg.warn( f"No sentence boundaries found to use with option `-n {n_sents}`. " f"Use `-s` to automatically segment sentences or `-n 0` " f"to disable." ) else: n_sents_info(msg, n_sents) input_data = segment_sents_and_docs( input_data, n_sents, doc_delimiter, model=model, msg=msg ) # provide warnings for problematic data if "\n\n" not in input_data: msg.warn( "No sentence boundaries found. Use `-s` to automatically segment " "sentences." ) if doc_delimiter not in input_data: msg.warn( "No document delimiters found. Use `-n` to automatically group " "sentences into documents." ) if model: nlp = load_model(model) else: nlp = get_lang_class("xx")() for conll_doc in input_data.strip().split(doc_delimiter): conll_doc = conll_doc.strip() if not conll_doc: continue words = [] sent_starts = [] pos_tags = [] biluo_tags = [] for conll_sent in conll_doc.split("\n\n"): conll_sent = conll_sent.strip() if not conll_sent: continue lines = [line.strip() for line in conll_sent.split("\n") if line.strip()] cols = list(zip(*[line.split() for line in lines])) if len(cols) < 2: raise ValueError(Errors.E903) length = len(cols[0]) words.extend(cols[0]) sent_starts.extend([True] + [False] * (length - 1)) biluo_tags.extend(iob_to_biluo(cols[-1])) pos_tags.extend(cols[1] if len(cols) > 2 else ["-"] * length) doc = Doc(nlp.vocab, words=words) for i, token in enumerate(doc): token.tag_ = pos_tags[i] token.is_sent_start = sent_starts[i] entities = tags_to_entities(biluo_tags) doc.ents = [Span(doc, start=s, end=e + 1, label=L) for L, s, e in entities] yield doc def segment_sents_and_docs(doc, n_sents, doc_delimiter, model=None, msg=None): sentencizer = None if model: nlp = load_model(model) if "parser" in nlp.pipe_names: msg.info(f"Segmenting sentences with parser from model '{model}'.") for name, proc in nlp.pipeline: if "parser" in getattr(proc, "listening_components", []): nlp.replace_listeners(name, "parser", ["model.tok2vec"]) sentencizer = nlp.get_pipe("parser") if not sentencizer: msg.info( "Segmenting sentences with sentencizer. (Use `-b model` for " "improved parser-based sentence segmentation.)" ) nlp = get_lang_class("xx")() sentencizer = nlp.create_pipe("sentencizer") lines = doc.strip().split("\n") words = [line.strip().split()[0] for line in lines] nlpdoc = Doc(nlp.vocab, words=words) sentencizer(nlpdoc) lines_with_segs = [] sent_count = 0 for i, token in enumerate(nlpdoc): if token.is_sent_start: if n_sents and sent_count % n_sents == 0: lines_with_segs.append(doc_delimiter) lines_with_segs.append("") sent_count += 1 lines_with_segs.append(lines[i]) return "\n".join(lines_with_segs) def segment_docs(input_data, n_sents, doc_delimiter): sent_delimiter = "\n\n" sents = input_data.split(sent_delimiter) docs = [sents[i : i + n_sents] for i in range(0, len(sents), n_sents)] input_data = "" for doc in docs: input_data += sent_delimiter + doc_delimiter input_data += sent_delimiter.join(doc) return input_data def n_sents_info(msg, n_sents): msg.info(f"Grouping every {n_sents} sentences into a document.") if n_sents == 1: msg.warn( "To generate better training data, you may want to group " "sentences into documents with `-n 10`." )
6,177
34.918605
85
py
spaCy
spaCy-master/spacy/training/converters/conllu_to_docs.py
import re from wasabi import Printer from ...tokens import Doc, Span, Token from ...training import biluo_tags_to_spans, iob_to_biluo from ...vocab import Vocab from .conll_ner_to_docs import n_sents_info def conllu_to_docs( input_data, n_sents=10, append_morphology=False, ner_map=None, merge_subtokens=False, no_print=False, **_ ): """ Convert conllu files into JSON format for use with train cli. append_morphology parameter enables appending morphology to tags, which is useful for languages such as Spanish, where UD tags are not so rich. Extract NER tags if available and convert them so that they follow BILUO and the Wikipedia scheme """ MISC_NER_PATTERN = "^((?:name|NE)=)?([BILU])-([A-Z_]+)|O$" msg = Printer(no_print=no_print) n_sents_info(msg, n_sents) sent_docs = read_conllx( input_data, append_morphology=append_morphology, ner_tag_pattern=MISC_NER_PATTERN, ner_map=ner_map, merge_subtokens=merge_subtokens, ) sent_docs_to_merge = [] for sent_doc in sent_docs: sent_docs_to_merge.append(sent_doc) if len(sent_docs_to_merge) % n_sents == 0: yield Doc.from_docs(sent_docs_to_merge) sent_docs_to_merge = [] if sent_docs_to_merge: yield Doc.from_docs(sent_docs_to_merge) def has_ner(input_data, ner_tag_pattern): """ Check the MISC column for NER tags. """ for sent in input_data.strip().split("\n\n"): lines = sent.strip().split("\n") if lines: while lines[0].startswith("#"): lines.pop(0) for line in lines: parts = line.split("\t") id_, word, lemma, pos, tag, morph, head, dep, _1, misc = parts for misc_part in misc.split("|"): if re.match(ner_tag_pattern, misc_part): return True return False def read_conllx( input_data, append_morphology=False, merge_subtokens=False, ner_tag_pattern="", ner_map=None, ): """Yield docs, one for each sentence""" vocab = Vocab() # need vocab to make a minimal Doc set_ents = has_ner(input_data, ner_tag_pattern) for sent in input_data.strip().split("\n\n"): lines = sent.strip().split("\n") if lines: while lines[0].startswith("#"): lines.pop(0) doc = conllu_sentence_to_doc( vocab, lines, ner_tag_pattern, merge_subtokens=merge_subtokens, append_morphology=append_morphology, ner_map=ner_map, set_ents=set_ents, ) yield doc def get_entities(lines, tag_pattern, ner_map=None): """Find entities in the MISC column according to the pattern and map to final entity type with `ner_map` if mapping present. Entity tag is 'O' if the pattern is not matched. lines (str): CONLL-U lines for one sentences tag_pattern (str): Regex pattern for entity tag ner_map (dict): Map old NER tag names to new ones, '' maps to O. RETURNS (list): List of BILUO entity tags """ miscs = [] for line in lines: parts = line.split("\t") id_, word, lemma, pos, tag, morph, head, dep, _1, misc = parts if "-" in id_ or "." in id_: continue miscs.append(misc) iob = [] for misc in miscs: iob_tag = "O" for misc_part in misc.split("|"): tag_match = re.match(tag_pattern, misc_part) if tag_match: prefix = tag_match.group(2) suffix = tag_match.group(3) if prefix and suffix: iob_tag = prefix + "-" + suffix if ner_map: suffix = ner_map.get(suffix, suffix) if suffix == "": iob_tag = "O" else: iob_tag = prefix + "-" + suffix break iob.append(iob_tag) return iob_to_biluo(iob) def conllu_sentence_to_doc( vocab, lines, ner_tag_pattern, merge_subtokens=False, append_morphology=False, ner_map=None, set_ents=False, ): """Create an Example from the lines for one CoNLL-U sentence, merging subtokens and appending morphology to tags if required. lines (str): The non-comment lines for a CoNLL-U sentence ner_tag_pattern (str): The regex pattern for matching NER in MISC col RETURNS (Example): An example containing the annotation """ # create a Doc with each subtoken as its own token # if merging subtokens, each subtoken orth is the merged subtoken form if not Token.has_extension("merged_orth"): Token.set_extension("merged_orth", default="") if not Token.has_extension("merged_lemma"): Token.set_extension("merged_lemma", default="") if not Token.has_extension("merged_morph"): Token.set_extension("merged_morph", default="") if not Token.has_extension("merged_spaceafter"): Token.set_extension("merged_spaceafter", default="") words, spaces, tags, poses, morphs, lemmas = [], [], [], [], [], [] heads, deps = [], [] subtok_word = "" in_subtok = False for i in range(len(lines)): line = lines[i] parts = line.split("\t") id_, word, lemma, pos, tag, morph, head, dep, _1, misc = parts if "." in id_: continue if "-" in id_: in_subtok = True if "-" in id_: in_subtok = True subtok_word = word subtok_start, subtok_end = id_.split("-") subtok_spaceafter = "SpaceAfter=No" not in misc continue if merge_subtokens and in_subtok: words.append(subtok_word) else: words.append(word) if in_subtok: if id_ == subtok_end: spaces.append(subtok_spaceafter) else: spaces.append(False) elif "SpaceAfter=No" in misc: spaces.append(False) else: spaces.append(True) if in_subtok and id_ == subtok_end: subtok_word = "" in_subtok = False id_ = int(id_) - 1 head = (int(head) - 1) if head not in ("0", "_") else id_ tag = pos if tag == "_" else tag pos = pos if pos != "_" else "" morph = morph if morph != "_" else "" dep = "ROOT" if dep == "root" else dep lemmas.append(lemma) poses.append(pos) tags.append(tag) morphs.append(morph) heads.append(head) deps.append(dep) doc = Doc( vocab, words=words, spaces=spaces, tags=tags, pos=poses, deps=deps, lemmas=lemmas, morphs=morphs, heads=heads, ) for i in range(len(doc)): doc[i]._.merged_orth = words[i] doc[i]._.merged_morph = morphs[i] doc[i]._.merged_lemma = lemmas[i] doc[i]._.merged_spaceafter = spaces[i] ents = None if set_ents: ents = get_entities(lines, ner_tag_pattern, ner_map) doc.ents = biluo_tags_to_spans(doc, ents) if merge_subtokens: doc = merge_conllu_subtokens(lines, doc) # create final Doc from custom Doc annotation words, spaces, tags, morphs, lemmas, poses = [], [], [], [], [], [] heads, deps = [], [] for i, t in enumerate(doc): words.append(t._.merged_orth) lemmas.append(t._.merged_lemma) spaces.append(t._.merged_spaceafter) morphs.append(t._.merged_morph) if append_morphology and t._.merged_morph: tags.append(t.tag_ + "__" + t._.merged_morph) else: tags.append(t.tag_) poses.append(t.pos_) heads.append(t.head.i) deps.append(t.dep_) doc_x = Doc( vocab, words=words, spaces=spaces, tags=tags, morphs=morphs, lemmas=lemmas, pos=poses, deps=deps, heads=heads, ) if set_ents: doc_x.ents = [ Span(doc_x, ent.start, ent.end, label=ent.label) for ent in doc.ents ] return doc_x def merge_conllu_subtokens(lines, doc): # identify and process all subtoken spans to prepare attrs for merging subtok_spans = [] for line in lines: parts = line.split("\t") id_, word, lemma, pos, tag, morph, head, dep, _1, misc = parts if "-" in id_: subtok_start, subtok_end = id_.split("-") subtok_span = doc[int(subtok_start) - 1 : int(subtok_end)] subtok_spans.append(subtok_span) # create merged tag, morph, and lemma values tags = [] morphs = {} lemmas = [] for token in subtok_span: tags.append(token.tag_) lemmas.append(token.lemma_) if token._.merged_morph: for feature in token._.merged_morph.split("|"): field, values = feature.split("=", 1) if field not in morphs: morphs[field] = set() for value in values.split(","): morphs[field].add(value) # create merged features for each morph field for field, values in morphs.items(): morphs[field] = field + "=" + ",".join(sorted(values)) # set the same attrs on all subtok tokens so that whatever head the # retokenizer chooses, the final attrs are available on that token for token in subtok_span: token._.merged_orth = token.orth_ token._.merged_lemma = " ".join(lemmas) token.tag_ = "_".join(tags) token._.merged_morph = "|".join(sorted(morphs.values())) token._.merged_spaceafter = ( True if subtok_span[-1].whitespace_ else False ) with doc.retokenize() as retokenizer: for span in subtok_spans: retokenizer.merge(span) return doc
10,276
32.47557
80
py
spaCy
spaCy-master/spacy/training/converters/iob_to_docs.py
from wasabi import Printer from ...errors import Errors from ...tokens import Doc, Span from ...training import iob_to_biluo, tags_to_entities from ...util import minibatch from ...vocab import Vocab from .conll_ner_to_docs import n_sents_info def iob_to_docs(input_data, n_sents=10, no_print=False, *args, **kwargs): """ Convert IOB files with one sentence per line and tags separated with '|' into Doc objects so they can be saved. IOB and IOB2 are accepted. Sample formats: I|O like|O London|I-GPE and|O New|B-GPE York|I-GPE City|I-GPE .|O I|O like|O London|B-GPE and|O New|B-GPE York|I-GPE City|I-GPE .|O I|PRP|O like|VBP|O London|NNP|I-GPE and|CC|O New|NNP|B-GPE York|NNP|I-GPE City|NNP|I-GPE .|.|O I|PRP|O like|VBP|O London|NNP|B-GPE and|CC|O New|NNP|B-GPE York|NNP|I-GPE City|NNP|I-GPE .|.|O """ vocab = Vocab() # need vocab to make a minimal Doc msg = Printer(no_print=no_print) if n_sents > 0: n_sents_info(msg, n_sents) yield from read_iob(input_data.split("\n"), vocab, n_sents) def read_iob(raw_sents, vocab, n_sents): for group in minibatch(raw_sents, size=n_sents): tokens = [] words = [] tags = [] iob = [] sent_starts = [] for line in group: if not line.strip(): continue sent_tokens = [t.split("|") for t in line.split()] if len(sent_tokens[0]) == 3: sent_words, sent_tags, sent_iob = zip(*sent_tokens) elif len(sent_tokens[0]) == 2: sent_words, sent_iob = zip(*sent_tokens) sent_tags = ["-"] * len(sent_words) else: raise ValueError(Errors.E902) words.extend(sent_words) tags.extend(sent_tags) iob.extend(sent_iob) tokens.extend(sent_tokens) sent_starts.append(True) sent_starts.extend([False for _ in sent_words[1:]]) doc = Doc(vocab, words=words) for i, tag in enumerate(tags): doc[i].tag_ = tag for i, sent_start in enumerate(sent_starts): doc[i].is_sent_start = sent_start biluo = iob_to_biluo(iob) entities = tags_to_entities(biluo) doc.ents = [Span(doc, start=s, end=e + 1, label=L) for (L, s, e) in entities] yield doc
2,356
36.412698
98
py
spaCy
spaCy-master/spacy/training/converters/json_to_docs.py
import srsly from ...lang.xx import MultiLanguage from ...util import load_model from ..example import ( _fix_legacy_dict_data, _parse_example_dict_data, annotations_to_doc, ) from ..gold_io import json_iterate, json_to_annotations def json_to_docs(input_data, model=None, **kwargs): nlp = load_model(model) if model is not None else MultiLanguage() if not isinstance(input_data, bytes): if not isinstance(input_data, str): input_data = srsly.json_dumps(input_data) input_data = input_data.encode("utf8") for json_doc in json_iterate(input_data): for json_para in json_to_annotations(json_doc): example_dict = _fix_legacy_dict_data(json_para) tok_dict, doc_dict = _parse_example_dict_data(example_dict) doc = annotations_to_doc(nlp.vocab, tok_dict, doc_dict) yield doc
880
34.24
71
py
spaCy
spaCy-master/website/README.md
# spacy.io website and docs ![Netlify Status](https://api.netlify.com/api/v1/badges/d65fe97d-99ab-47f8-a339-1d8987251da0/deploy-status) The styleguide for the spaCy website is available at [spacy.io/styleguide](https://spacy.io/styleguide). ## Setup and installation ```bash # Clone the repository git clone https://github.com/explosion/spaCy cd spaCy/website # Switch to the correct Node version # # If you don't have NVM and don't want to use it, you can manually switch to the Node version # stated in /.nvmrc and skip this step nvm use # Install the dependencies npm install # Start the development server npm run dev ``` If you are planning on making edits to the site, you should also set up the [Prettier](https://prettier.io/) code formatter. It takes care of formatting Markdown and other files automatically. [See here](https://prettier.io/docs/en/editors.html) for the available extensions for your code editor. The [`.prettierrc`](https://github.com/explosion/spaCy/tree/master/website/.prettierrc) file in the root defines the settings used in this codebase. ## Building & developing the site with Docker While it shouldn't be necessary and is not recommended you can run this site in a Docker container. If you'd like to do this, **be sure you do _not_ include your local `node_modules` folder**, since there are some dependencies that need to be built for the image system. Rename it before using. First build the Docker image. This only needs to be done on the first run or when changes are made to `Dockerfile` or the website dependencies: ```bash docker build -t spacy-io . ``` You can then build and run the website with: ```bash docker run -it \ --rm \ -v $(pwd):/home/node/website \ -p 3000:3000 \ spacy-io \ npm run dev -- -H 0.0.0.0 ``` This will allow you to access the built website at http://0.0.0.0:3000/ in your browser, and still edit code in your editor while having the site reflect those changes. ## Project structure ```yaml ├── docs # the actual markdown content ├── meta # JSON-formatted site metadata | ├── dynamicMeta.js # At build time generated meta data | ├── languages.json # supported languages and statistical models | ├── sidebars.json # sidebar navigations for different sections | ├── site.json # general site metadata | ├── type-annotations.json # Type annotations | └── universe.json # data for the spaCy universe section ├── pages # Next router pages ├── public # static images and other assets ├── setup # Jinja setup ├── src # source | ├── components # React components | ├── fonts # webfonts | ├── images # images used in the layout | ├── plugins # custom plugins to transform Markdown | ├── styles # CSS modules and global styles | ├── templates # page layouts | | ├── docs.js # layout template for documentation pages | | ├── index.js # global layout template | | ├── models.js # layout template for model pages | | └── universe.js # layout templates for universe | └── widgets # non-reusable components with content, e.g. changelog ├── .eslintrc.json # ESLint config file ├── .nvmrc # NVM config file | # (to support "nvm use" to switch to correct Node version) | ├── .prettierrc # Prettier config file ├── next.config.mjs # Next config file ├── package.json # package settings and dependencies └── tsconfig.json # TypeScript config file ```
3,624
34.891089
107
md
spaCy
spaCy-master/website/UNIVERSE.md
<a href="https://explosion.ai"><img src="https://explosion.ai/assets/img/logo.svg" width="125" height="125" align="right" /></a> # spaCy Universe The [spaCy Universe](https://spacy.io/universe) collects the many great resources developed with or for spaCy. It includes standalone packages, plugins, extensions, educational materials, operational utilities and bindings for other languages. If you have a project that you want the spaCy community to make use of, you can suggest it by submitting a pull request to this repository. The Universe database is open-source and collected in a simple JSON file. Looking for inspiration for your own spaCy plugin or extension? Check out the [`project ideas`](https://github.com/explosion/spaCy/discussions?discussions_q=category%3A%22New+Features+%26+Project+Ideas%22) discussion forum. ## Checklist ### Projects ✅ Libraries and packages should be **open-source** (with a user-friendly license) and at least somewhat **documented** (e.g. a simple `README` with usage instructions). ✅ We're happy to include work in progress and prereleases, but we'd like to keep the emphasis on projects that should be useful to the community **right away**. ✅ Demos and visualizers should be available via a **public URL**. ### Educational Materials ✅ Books should be **available for purchase or download** (not just pre-order). Ebooks and self-published books are fine, too, if they include enough substantial content. ✅ The `"url"` of book entries should either point to the publisher's website or a reseller of your choice (ideally one that ships worldwide or as close as possible). ✅ If an online course is only available behind a paywall, it should at least have a **free excerpt** or chapter available, so users know what to expect. ## JSON format To add a project, fork this repository, edit the [`universe.json`](meta/universe.json) and add an object of the following format to the list of `"resources"`. Before you submit your pull request, make sure to use a linter to verify that your markup is correct. ```json { "id": "unique-project-id", "title": "Project title", "slogan": "A short summary", "description": "A longer description – *Markdown allowed!*", "github": "user/repo", "pip": "package-name", "code_example": [ "import spacy", "import package_name", "", "nlp = spacy.load('en')", "nlp.add_pipe(package_name)" ], "code_language": "python", "url": "https://example.com", "thumb": "https://example.com/thumb.jpg", "image": "https://example.com/image.jpg", "author": "Your Name", "author_links": { "twitter": "username", "github": "username", "website": "https://example.com" }, "category": ["pipeline", "standalone"], "tags": ["some-tag", "etc"] } ``` | Field | Type | Description | | --------------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------- | | `id` | string | Unique ID of the project. | | `title` | string | Project title. If not set, the `id` will be used as the display title. | | `slogan` | string | A short description of the project. Displayed in the overview and under the title. | | `description` | string | A longer description of the project. Markdown is allowed, but should be limited to basic formatting like bold, italics, code or links. | | `github` | string | Associated GitHub repo in the format `user/repo`. Will be displayed as a link and used for release, license and star badges. | | `pip` | string | Package name on pip. If available, the installation command will be displayed. | | `cran` | string | For R packages: package name on CRAN. If available, the installation command will be displayed. | | `code_example` | array | Short example that shows how to use the project. Formatted as an array with one string per line. | | `code_language` | string | Defaults to `'python'`. Optional code language used for syntax highlighting with [Prism](http://prismjs.com/). | | `url` | string | Optional project link to display as button. | | `thumb` | string | Optional URL to project thumbnail to display in overview and project header. Recommended size is 100x100px. | | `image` | string | Optional URL to project image to display with description. | | `author` | string | Name(s) of project author(s). | | `author_links` | object | Usernames and links to display as icons to author info. Currently supports `twitter` and `github` usernames, as well as `website` link. | | `category` | list | One or more categories to assign to project. Must be one of the available options. | | `tags` | list | Still experimental and not used for filtering: one or more tags to assign to project. | To separate them from the projects, educational materials also specify `"type": "education`. Books can also set a `"cover"` field containing a URL to a cover image. If available, it's used in the overview and displayed on the individual book page.
6,100
57.104762
166
md
spaCy
spaCy-master/website/meta/languageSorted.tsx
import models from './languages.json' export const languagesSorted = models.languages .filter(({ models }) => models && models.length) .sort((a, b) => a.name.localeCompare(b.name))
190
30.833333
52
tsx
spaCy
spaCy-master/website/meta/recordLanguages.tsx
import models from './languages.json' const recordLanguages = Object.fromEntries( models.languages.map((language, index) => [language.code, language]) ) export default recordLanguages
190
22.875
72
tsx
spaCy
spaCy-master/website/meta/recordSections.tsx
import siteMetadata from './site.json' const recordSections = Object.fromEntries(siteMetadata.sections.map((s) => [s.id, s])) export default recordSections
158
25.5
86
tsx
spaCy
spaCy-master/website/meta/recordUniverse.tsx
import universe from './universe.json' export const recordUniverseCategories = Object.fromEntries( universe.categories.flatMap((category) => category.items.map((item) => [item.id, item])) ) export const recordUniverseResources = Object.fromEntries( universe.resources.map((resource) => [resource.id, resource]) )
323
31.4
92
tsx
spaCy
spaCy-master/website/meta/sidebarFlat.tsx
import sidebars from './sidebars.json' export const sidebarUsageFlat = sidebars .find((sidebar) => sidebar.section === 'usage') .items.flatMap((item) => item.items)
174
28.166667
51
tsx
spaCy
spaCy-master/website/pages/[...listPathPage].tsx
import type { GetStaticPaths, GetStaticProps } from 'next' import { serialize } from 'next-mdx-remote/serialize' import fs from 'fs' import { MDXRemote, MDXRemoteSerializeResult } from 'next-mdx-remote' import path from 'path' import Layout from '../src/templates' import remarkPlugins from '../plugins/index.mjs' import recordSection from '../meta/recordSections' import { sidebarUsageFlat } from '../meta/sidebarFlat' type ApiDetails = { stringName: string | null baseClass: { title: string slug: string } | null trainable: string | null } export type PropsPageBase = { /** * TODO: This is only here for legacy support of the old code base * It should be refactort to pass the file path and page path instead. */ slug: string sectionTitle: string | null theme: string | null section: string isIndex: boolean } export type PropsPage = PropsPageBase & { mdx: MDXRemoteSerializeResult apiDetails: ApiDetails } const PostPage = ({ mdx: mdx, ...props }: PropsPage) => { return ( <Layout {...props}> <MDXRemote {...mdx} /> </Layout> ) } export default PostPage type ParsedUrlQuery = { listPathPage: Array<string> } export const getStaticPaths: GetStaticPaths<ParsedUrlQuery> = async () => { // This function needs to be defined inside `getStaticPath` to be executed in executed in the correct context const loadFolder = (pathBase: Array<string> = []): Array<{ params: ParsedUrlQuery }> => fs .readdirSync(path.join('docs', ...pathBase), { withFileTypes: true }) .flatMap((dirent: fs.Dirent) => { if (dirent.isDirectory()) { return loadFolder([...pathBase, dirent.name]) } if (!dirent.name.includes('.mdx') || dirent.name[0] === '_') { return [] } return { params: { listPathPage: dirent.name === 'index.mdx' ? pathBase : [...pathBase, dirent.name.replace('.mdx', '')], }, } }) return { paths: loadFolder(), fallback: false, } } const getPathFileWithExtension = (listPathFile: ReadonlyArray<string>) => `${path.join(...listPathFile)}.mdx` export const getStaticProps: GetStaticProps<PropsPage, ParsedUrlQuery> = async (args) => { if (!args.params) { return { notFound: true } } const listPathFile = ['docs', ...args.params.listPathPage] const isIndex = fs.existsSync(getPathFileWithExtension(listPathFile)) !== true const listPathFileWithIndex = isIndex ? [...listPathFile, 'index'] : listPathFile const pathFileWithIndexAndExtension = getPathFileWithExtension(listPathFileWithIndex) const mdx = await serialize(fs.readFileSync(pathFileWithIndexAndExtension, 'utf-8'), { parseFrontmatter: true, mdxOptions: { remarkPlugins }, }) if (!mdx.frontmatter) { throw new Error(`Frontmatter missing for ${pathFileWithIndexAndExtension}`) } const parentFolder = listPathFileWithIndex.length > 1 ? listPathFileWithIndex[listPathFileWithIndex.length - 2] : null const section = mdx.frontmatter.section ?? parentFolder const sectionMeta = section ? recordSection[section] ?? null : null const baseClass = null const apiDetails: ApiDetails = { stringName: mdx.frontmatter.api_string_name ?? null, baseClass: baseClass ? { title: mdx.frontmatter.title, slug: mdx.frontmatter.api_base_class, } : null, trainable: mdx.frontmatter.api_trainable ?? null, } const slug = `/${args.params.listPathPage.join('/')}` const next = section === 'usage' ? sidebarUsageFlat.find((item, index) => { return ( index > 0 && sidebarUsageFlat[index - 1].url === slug && item.url[0] === '/' ) }) : undefined return { props: { ...mdx.frontmatter, slug, mdx, sectionTitle: sectionMeta?.title ?? null, theme: sectionMeta?.theme ?? null, section: section, apiDetails: apiDetails, isIndex, next: next ? { slug: next.url, title: next.text, } : null, }, } }
4,683
30.019868
113
tsx
spaCy
spaCy-master/website/pages/_app.tsx
import '../src/styles/layout.sass' import '../src/styles/search.sass' import type { AppProps } from 'next/app' import Head from 'next/head' import PlausibleProvider from 'next-plausible' import { MDXProvider } from '@mdx-js/react' import { remarkComponents } from '../src/remark' import { domain } from '../meta/dynamicMeta.mjs' export default function App({ Component, pageProps }: AppProps) { return ( <PlausibleProvider domain={domain} enabled> <Head> <link rel="sitemap" type="application/xml" href="/sitemap.xml" /> <link rel="shortcut icon" href="/icons/icon-192x192.png" /> <link rel="manifest" href="/manifest.webmanifest" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, minimum-scale=1, maximum-scale=5.0, shrink-to-fit=no, viewport-fit=cover" /> <meta name="theme-color" content="#09a3d5" /> <link rel="apple-touch-icon" sizes="192x192" href="/icons/icon-192x192.png" /> <link rel="apple-touch-icon" sizes="256x256" href="/icons/icon-256x256.png" /> <link rel="apple-touch-icon" sizes="384x384" href="/icons/icon-384x384.png" /> <link rel="apple-touch-icon" sizes="512x512" href="/icons/icon-512x512.png" /> </Head> <MDXProvider components={remarkComponents}> <Component {...pageProps} /> </MDXProvider> </PlausibleProvider> ) }
1,554
44.735294
141
tsx
spaCy
spaCy-master/website/pages/_document.tsx
import { Html, Head, Main, NextScript } from 'next/document' export default function Document() { return ( <Html lang="en"> <Head /> <body className="theme-blue"> <Main /> <NextScript /> </body> </Html> ) }
300
20.5
60
tsx
spaCy
spaCy-master/website/pages/index.tsx
import React from 'react' import PropTypes from 'prop-types' import { LandingHeader, LandingTitle, LandingSubtitle, LandingGrid, LandingCard, LandingCol, LandingDemo, LandingBannerGrid, LandingBanner, } from '../src/components/landing' import { H2 } from '../src/components/typography' import { InlineCode } from '../src/components/inlineCode' import { Ul, Li } from '../src/components/list' import Button from '../src/components/button' import Link from '../src/components/link' import QuickstartTraining from '../src/widgets/quickstart-training' import Project from '../src/widgets/project' import Features from '../src/widgets/features' import Layout from '../src/templates' import courseImage from '../public/images/course.jpg' import prodigyImage from '../public/images/prodigy_overview.jpg' import projectsImage from '../public/images/projects.png' import tailoredPipelinesImage from '../public/images/spacy-tailored-pipelines_wide.png' import { nightly, legacy } from '../meta/dynamicMeta.mjs' import Benchmarks from '../docs/usage/_benchmarks-models.mdx' import { ImageFill } from '../src/components/embed' function getCodeExample(nightly) { return `# pip install -U ${nightly ? 'spacy-nightly --pre' : 'spacy'} # python -m spacy download en_core_web_sm import spacy # Load English tokenizer, tagger, parser and NER nlp = spacy.load("en_core_web_sm") # Process whole documents text = ("When Sebastian Thrun started working on self-driving cars at " "Google in 2007, few people outside of the company took him " "seriously. “I can tell you very senior CEOs of major American " "car companies would shake my hand and turn away because I wasn’t " "worth talking to,” said Thrun, in an interview with Recode earlier " "this week.") doc = nlp(text) # Analyze syntax print("Noun phrases:", [chunk.text for chunk in doc.noun_chunks]) print("Verbs:", [token.lemma_ for token in doc if token.pos_ == "VERB"]) # Find named entities, phrases and concepts for entity in doc.ents: print(entity.text, entity.label_) ` } const Landing = () => { const codeExample = getCodeExample(nightly) return ( <Layout> <LandingHeader nightly={nightly} legacy={legacy}> <LandingTitle> Industrial-Strength <br /> Natural Language <br /> Processing </LandingTitle> <LandingSubtitle>in Python</LandingSubtitle> </LandingHeader> <LandingGrid blocks> <LandingCard title="Get things done" url="/usage/spacy-101" button="Get started"> spaCy is designed to help you do real work — to build real products, or gather real insights. The library respects your time, and tries to avoid wasting it. It&apos;s easy to install, and its API is simple and productive. </LandingCard> <LandingCard title="Blazing fast" url="/usage/facts-figures" button="Facts &amp; Figures" > spaCy excels at large-scale information extraction tasks. It&apos;s written from the ground up in carefully memory-managed Cython. If your application needs to process entire web dumps, spaCy is the library you want to be using. </LandingCard> <LandingCard title="Awesome ecosystem" url="/usage/projects" button="Read more"> Since its release in 2015, spaCy has become an industry standard with a huge ecosystem. Choose from a variety of plugins, integrate with your machine learning stack and build custom components and workflows. </LandingCard> </LandingGrid> <LandingGrid> <LandingDemo title="Edit the code &amp; try spaCy">{codeExample}</LandingDemo> <LandingCol> <H2>Features</H2> <Features /> </LandingCol> </LandingGrid> <LandingBannerGrid> <LandingBanner to="https://explosion.ai/custom-solutions" button="Learn more" background="#E4F4F9" color="#1e1935" small > <p> <Link to="https://explosion.ai/custom-solutions" hidden> <ImageFill image={tailoredPipelinesImage} alt="spaCy Tailored Pipelines" /> </Link> </p> <p> <strong> Get a custom spaCy pipeline, tailor-made for your NLP problem by spaCy&apos;s core developers. </strong> </p> <Ul> <Li emoji="🔥"> <strong>Streamlined.</strong> Nobody knows spaCy better than we do. Send us your pipeline requirements and we&apos;ll be ready to start producing your solution in no time at all. </Li> <Li emoji="🐿 "> <strong>Production ready.</strong> spaCy pipelines are robust and easy to deploy. You&apos;ll get a complete spaCy project folder which is ready to <InlineCode>spacy project run</InlineCode>. </Li> <Li emoji="🔮"> <strong>Predictable.</strong> You&apos;ll know exactly what you&apos;re going to get and what it&apos;s going to cost. We quote fees up-front, let you try before you buy, and don&apos;t charge for over-runs at our end — all the risk is on us. </Li> <Li emoji="🛠"> <strong>Maintainable.</strong> spaCy is an industry standard, and we&apos;ll deliver your pipeline with full code, data, tests and documentation, so your team can retrain, update and extend the solution as your requirements change. </Li> </Ul> </LandingBanner> <LandingBanner title="Prodigy: Radically efficient machine teaching" label="From the makers of spaCy" to="https://prodi.gy" button="Try it out" background="#f6f6f6" color="#000" small > <p> <Link to="https://prodi.gy" noLinkLayout> <ImageFill image={prodigyImage} alt="Prodigy: Radically efficient machine teaching" /> </Link> </p> <p> Prodigy is an <strong>annotation tool</strong> so efficient that data scientists can do the annotation themselves, enabling a new level of rapid iteration. Whether you&apos;re working on entity recognition, intent detection or image classification, Prodigy can help you{' '} <strong>train and evaluate</strong> your models faster. </p> </LandingBanner> </LandingBannerGrid> <LandingGrid cols={2} style={{ gridTemplateColumns: '1fr calc(80ch + 14rem)' }}> <LandingCol> <H2>Reproducible training for custom pipelines</H2> <p> spaCy v3.0 introduces a comprehensive and extensible system for{' '} <strong>configuring your training runs</strong>. Your configuration file will describe every detail of your training run, with no hidden defaults, making it easy to <strong>rerun your experiments</strong> and track changes. You can use the quickstart widget or the{' '} <Link to="/api/cli#init-config"> <InlineCode>init config</InlineCode> </Link>{' '} command to get started, or clone a project template for an end-to-end workflow. </p> <p> <Button to="/usage/training">Get started</Button> </p> </LandingCol> <LandingCol> <QuickstartTraining /> </LandingCol> </LandingGrid> <LandingGrid cols={2}> <LandingCol> <Link to="/usage/projects" hidden> <ImageFill image={projectsImage} alt="Illustration of project workflow and commands" /> </Link> <br /> <br /> <br /> <Project id="pipelines/tagger_parser_ud" title="Get started"> The easiest way to get started is to clone a project template and run it – for example, this template for training a{' '} <strong>part-of-speech tagger</strong> and{' '} <strong>dependency parser</strong> on a Universal Dependencies treebank. </Project> </LandingCol> <LandingCol> <H2>End-to-end workflows from prototype to production</H2> <p> spaCy&apos;s new project system gives you a smooth path from prototype to production. It lets you keep track of all those{' '} <strong>data transformation</strong>, preprocessing and{' '} <strong>training steps</strong>, so you can make sure your project is always ready to hand over for automation. It features source asset download, command execution, checksum verification, and caching with a variety of backends and integrations. </p> <p> <Button to="/usage/projects">Try it out</Button> </p> </LandingCol> </LandingGrid> <LandingBannerGrid> <LandingBanner label="New in v3.0" title="Transformer-based pipelines, new training system, project templates &amp; more" to="/usage/v3" button="See what's new" small > <p> spaCy v3.0 features all new <strong>transformer-based pipelines</strong>{' '} that bring spaCy&apos;s accuracy right up to the current{' '} <strong>state-of-the-art</strong>. You can use any pretrained transformer to train your own pipelines, and even share one transformer between multiple components with <strong>multi-task learning</strong>. Training is now fully configurable and extensible, and you can define your own custom models using{' '} <strong>PyTorch</strong>, <strong>TensorFlow</strong> and other frameworks. </p> </LandingBanner> <LandingBanner to="https://course.spacy.io" button="Start the course" background="#f6f6f6" color="#252a33" small > <p> <Link to="https://course.spacy.io" hidden> <ImageFill image={courseImage} alt="Advanced NLP with spaCy: A free online course" /> </Link> </p> <p> In this <strong>free and interactive online course</strong> you’ll learn how to use spaCy to build advanced natural language understanding systems, using both rule-based and machine learning approaches. It includes{' '} <strong>55 exercises</strong> featuring videos, slide decks, multiple-choice questions and interactive coding practice in the browser. </p> </LandingBanner> </LandingBannerGrid> <LandingGrid cols={2} style={{ gridTemplateColumns: '1fr 60%' }}> <LandingCol> <H2>Benchmarks</H2> <p> spaCy v3.0 introduces transformer-based pipelines that bring spaCy&apos;s accuracy right up to the current <strong>state-of-the-art</strong>. You can also use a CPU-optimized pipeline, which is less accurate but much cheaper to run. </p> <p> <Button to="/usage/facts-figures#benchmarks">More results</Button> </p> </LandingCol> <LandingCol> <Benchmarks /> </LandingCol> </LandingGrid> </Layout> ) } export default Landing
14,310
45.615635
106
tsx
spaCy
spaCy-master/website/pages/models/[slug].tsx
import type { GetStaticPaths, GetStaticProps } from 'next' import models from '../../meta/languages.json' import recordSection from '../../meta/recordSections' import recordLanguages from '../../meta/recordLanguages' import Layout from '../../src/templates' import { PropsPageBase } from '../[...listPathPage]' import { languagesSorted } from '../../meta/languageSorted' type PropsPageModel = PropsPageBase & { next: { title: string; slug: string } | null meta: { models?: ReadonlyArray<string>; example?: string; hasExamples?: boolean } } const PostPageModel = (props: PropsPageModel) => { return <Layout {...props} /> } export default PostPageModel export const getStaticPaths: GetStaticPaths<{ slug: string }> = async () => { return { paths: models.languages .filter(({ models }) => models && models.length) .map((language) => `/models/${language.code}`), fallback: false, } } export const getStaticProps: GetStaticProps< PropsPageModel, { slug: string } > = async (args) => { const getSlug = (languageCode: string) => `/${['models', languageCode].join('/')}` if (args.params === undefined) { return { notFound: true } } const language = recordLanguages[args.params.slug] const nextLanguage = languagesSorted.find( (item, index) => index > 0 && languagesSorted[index - 1].code === language.code ) return { props: { id: language.code, slug: getSlug(language.code), isIndex: false, title: language.name, section: 'models', sectionTitle: recordSection.models.title, theme: recordSection.models.theme, next: nextLanguage ? { title: nextLanguage.name, slug: getSlug(nextLanguage.code) } : null, meta: { models: language.models || null, example: language.example || null, hasExamples: language.has_examples || null, }, }, } }
2,076
30
87
tsx
spaCy
spaCy-master/website/pages/universe/index.tsx
import recordSections from '../../meta/recordSections' import Layout from '../../src/templates' const Universe = () => { return ( <Layout slug={'/universe'} section="universe" sectionTitle={recordSections.universe.title} theme={recordSections.universe.theme} isIndex title="Overview" /> ) } export default Universe
413
22
56
tsx
spaCy
spaCy-master/website/pages/universe/category/[slug].tsx
import { GetStaticPaths, GetStaticProps } from 'next' import recordSections from '../../../meta/recordSections' import { recordUniverseCategories } from '../../../meta/recordUniverse' import universe from '../../../meta/universe.json' import Layout from '../../../src/templates' import { PropsPageBase } from '../../[...listPathPage]' type ParsedUrlQuery = { slug: string } export default Layout export const getStaticPaths: GetStaticPaths<ParsedUrlQuery> = async () => { return { paths: universe.categories.flatMap((category) => category.items.map((item) => `/universe/category/${item.id}`) ), fallback: false, } } export const getStaticProps: GetStaticProps<PropsPageBase, ParsedUrlQuery> = async (args) => { if (!args.params) { return { notFound: true } } const item = recordUniverseCategories[args.params.slug] return { props: { id: item.id, title: item.title, teaser: item.description, slug: `/universe/category/${args.params.slug}`, isIndex: false, data: { ...item, isCategory: true }, section: 'universe', sectionTitle: recordSections.universe.title, theme: recordSections.universe.theme, }, } }
1,309
28.772727
94
tsx
spaCy
spaCy-master/website/pages/universe/project/[slug].tsx
import { GetStaticPaths, GetStaticProps } from 'next' import recordSections from '../../../meta/recordSections' import { recordUniverseResources } from '../../../meta/recordUniverse' import universe from '../../../meta/universe.json' import Layout from '../../../src/templates' import { PropsPageBase } from '../../[...listPathPage]' type ParsedUrlQuery = { slug: string } export default Layout export const getStaticPaths: GetStaticPaths<ParsedUrlQuery> = async () => { return { paths: universe.resources.flatMap((resource) => `/universe/project/${resource.id}`), fallback: false, } } export const getStaticProps: GetStaticProps<PropsPageBase, ParsedUrlQuery> = async (args) => { if (!args.params) { return { notFound: true } } const resource = recordUniverseResources[args.params.slug] return { props: { id: resource.id, title: resource.title || resource.id, teaser: resource.slogan || null, slug: `/universe/project/${args.params.slug}`, isIndex: false, data: { ...resource, isProject: true }, section: 'universe', sectionTitle: recordSections.universe.title, theme: recordSections.universe.theme, }, } }
1,294
29.833333
94
tsx
spaCy
spaCy-master/website/public/images/displacy-dep-founded.html
<svg xmlns="http://www.w3.org/2000/svg" xlink="http://www.w3.org/1999/xlink" xml:lang="en" id="c3124cc3e661444cb9d4175a5b7c09d1-0" class="displacy" width="925" height="399.5" direction="ltr" style=" max-width: none; height: 399.5px; color: #000000; background: #ffffff; font-family: Arial; direction: ltr; " > <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="50">Smith</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="50"></tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="225">founded</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="225"></tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="400">a</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="400"></tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="575">healthcare</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="575"></tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="750">company</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="750"></tspan> </text> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-c3124cc3e661444cb9d4175a5b7c09d1-0-0" stroke-width="2px" d="M70,264.5 C70,177.0 215.0,177.0 215.0,264.5" fill="none" stroke="currentColor" ></path> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-c3124cc3e661444cb9d4175a5b7c09d1-0-0" class="displacy-label" startOffset="50%" side="left" fill="currentColor" text-anchor="middle" > nsubj </textPath> </text> <path class="displacy-arrowhead" d="M70,266.5 L62,254.5 78,254.5" fill="currentColor" ></path> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-c3124cc3e661444cb9d4175a5b7c09d1-0-1" stroke-width="2px" d="M420,264.5 C420,89.5 745.0,89.5 745.0,264.5" fill="none" stroke="currentColor" ></path> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-c3124cc3e661444cb9d4175a5b7c09d1-0-1" class="displacy-label" startOffset="50%" side="left" fill="currentColor" text-anchor="middle" > det </textPath> </text> <path class="displacy-arrowhead" d="M420,266.5 L412,254.5 428,254.5" fill="currentColor" ></path> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-c3124cc3e661444cb9d4175a5b7c09d1-0-2" stroke-width="2px" d="M595,264.5 C595,177.0 740.0,177.0 740.0,264.5" fill="none" stroke="currentColor" ></path> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-c3124cc3e661444cb9d4175a5b7c09d1-0-2" class="displacy-label" startOffset="50%" side="left" fill="currentColor" text-anchor="middle" > compound </textPath> </text> <path class="displacy-arrowhead" d="M595,266.5 L587,254.5 603,254.5" fill="currentColor" ></path> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-c3124cc3e661444cb9d4175a5b7c09d1-0-3" stroke-width="2px" d="M245,264.5 C245,2.0 750.0,2.0 750.0,264.5" fill="none" stroke="currentColor" ></path> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-c3124cc3e661444cb9d4175a5b7c09d1-0-3" class="displacy-label" startOffset="50%" side="left" fill="currentColor" text-anchor="middle" > dobj </textPath> </text> <path class="displacy-arrowhead" d="M750.0,266.5 L758.0,254.5 742.0,254.5" fill="currentColor" ></path> </g> </svg>
5,233
32.551282
84
html
spaCy
spaCy-master/website/public/images/displacy-ent-custom.html
<div class="entities" style=" line-height: 2.5; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; font-size: 18px; " >But <mark class="entity" style=" background: linear-gradient(90deg, #aa9cfc, #fc9ce7); padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; " >Google <span style=" font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem; " >ORG</span ></mark >is starting from behind. The company made a late push into hardware, and <mark class="entity" style=" background: linear-gradient(90deg, #aa9cfc, #fc9ce7); padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; " >Apple <span style=" font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem; " >ORG</span ></mark >’s Siri, available on iPhones, and <mark class="entity" style=" background: linear-gradient(90deg, #aa9cfc, #fc9ce7); padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; " >Amazon <span style=" font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem; " >ORG</span ></mark >’s Alexa software, which runs on its Echo and Dot devices, have clear leads in consumer adoption.</div >
2,351
28.037037
97
html
spaCy
spaCy-master/website/public/images/displacy-ent-snek.html
<div class="entities" style=" line-height: 2.5; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; font-size: 16px; " > 🌱🌿 <mark class="entity" style=" background: #3dff74; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; " >🐍 <span style=" font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem; " >SNEK</span ></mark > ____ 🌳🌲 ____ <mark class="entity" style=" background: #cfc5ff; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; " >👨‍🌾 <span style=" font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem; " >HUMAN</span ></mark > 🏘️ </div>
1,476
23.616667
97
html
spaCy
spaCy-master/website/public/images/displacy-ent1.html
<div class="entities" style=" line-height: 2.5; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; font-size: 16px; " > <mark class="entity" style=" background: #7aecec; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; " > Apple <span style=" font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem; " >ORG</span > </mark> is looking at buying <mark class="entity" style=" background: #feca74; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; " > U.K. <span style=" font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem; " >GPE</span > </mark> startup for <mark class="entity" style=" background: #e4e7d2; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; " > $1 billion <span style=" font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem; " >MONEY</span > </mark> </div>
2,098
23.694118
97
html
spaCy
spaCy-master/website/public/images/displacy-ent2.html
<div class="entities" style=" line-height: 2.5; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; font-size: 18px; " > When <mark class="entity" style=" background: #aa9cfc; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; " > Sebastian Thrun <span style=" font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem; " >PERSON</span > </mark> started working on self-driving cars at <mark class="entity" style=" background: #7aecec; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; " > Google <span style=" font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem; " >ORG</span > </mark> in <mark class="entity" style=" background: #bfe1d9; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; " > 2007 <span style=" font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem; " >DATE</span > </mark> , few people outside of the company took him seriously. </div>
2,185
24.126437
97
html
spaCy
spaCy-master/website/public/images/displacy-long.html
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" id="e109581593f245ce9c4ac12f78e0c74e-0" class="displacy" width="1975" height="399.5" style=" max-width: none; height: 399.5px; color: #000000; background: #ffffff; font-family: Arial; " > <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="50">Apple</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="50">PROPN</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="225">is</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="225">VERB</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="400">looking</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="400">VERB</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="575">at</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="575">ADP</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="750">buying</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="750">VERB</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="925">U.K.</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="925">PROPN</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="1100">startup</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="1100">NOUN</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="1275">for</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="1275">ADP</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="1450">$</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="1450">SYM</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="1625">1</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="1625">NUM</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="1800">billion</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="1800">NUM</tspan> </text> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-e109581593f245ce9c4ac12f78e0c74e-0-0" stroke-width="2px" d="M70,264.5 C70,89.5 395.0,89.5 395.0,264.5" fill="none" stroke="currentColor" /> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-e109581593f245ce9c4ac12f78e0c74e-0-0" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > nsubj </textPath> </text> <path class="displacy-arrowhead" d="M70,266.5 L62,254.5 78,254.5" fill="currentColor" /> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-e109581593f245ce9c4ac12f78e0c74e-0-1" stroke-width="2px" d="M245,264.5 C245,177.0 390.0,177.0 390.0,264.5" fill="none" stroke="currentColor" /> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-e109581593f245ce9c4ac12f78e0c74e-0-1" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > aux </textPath> </text> <path class="displacy-arrowhead" d="M245,266.5 L237,254.5 253,254.5" fill="currentColor" /> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-e109581593f245ce9c4ac12f78e0c74e-0-2" stroke-width="2px" d="M420,264.5 C420,177.0 565.0,177.0 565.0,264.5" fill="none" stroke="currentColor" /> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-e109581593f245ce9c4ac12f78e0c74e-0-2" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > prep </textPath> </text> <path class="displacy-arrowhead" d="M565.0,266.5 L573.0,254.5 557.0,254.5" fill="currentColor" /> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-e109581593f245ce9c4ac12f78e0c74e-0-3" stroke-width="2px" d="M595,264.5 C595,177.0 740.0,177.0 740.0,264.5" fill="none" stroke="currentColor" /> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-e109581593f245ce9c4ac12f78e0c74e-0-3" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > pcomp </textPath> </text> <path class="displacy-arrowhead" d="M740.0,266.5 L748.0,254.5 732.0,254.5" fill="currentColor" /> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-e109581593f245ce9c4ac12f78e0c74e-0-4" stroke-width="2px" d="M945,264.5 C945,177.0 1090.0,177.0 1090.0,264.5" fill="none" stroke="currentColor" /> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-e109581593f245ce9c4ac12f78e0c74e-0-4" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > compound </textPath> </text> <path class="displacy-arrowhead" d="M945,266.5 L937,254.5 953,254.5" fill="currentColor" /> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-e109581593f245ce9c4ac12f78e0c74e-0-5" stroke-width="2px" d="M770,264.5 C770,89.5 1095.0,89.5 1095.0,264.5" fill="none" stroke="currentColor" /> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-e109581593f245ce9c4ac12f78e0c74e-0-5" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > dobj </textPath> </text> <path class="displacy-arrowhead" d="M1095.0,266.5 L1103.0,254.5 1087.0,254.5" fill="currentColor" /> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-e109581593f245ce9c4ac12f78e0c74e-0-6" stroke-width="2px" d="M770,264.5 C770,2.0 1275.0,2.0 1275.0,264.5" fill="none" stroke="currentColor" /> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-e109581593f245ce9c4ac12f78e0c74e-0-6" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > prep </textPath> </text> <path class="displacy-arrowhead" d="M1275.0,266.5 L1283.0,254.5 1267.0,254.5" fill="currentColor" /> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-e109581593f245ce9c4ac12f78e0c74e-0-7" stroke-width="2px" d="M1470,264.5 C1470,89.5 1795.0,89.5 1795.0,264.5" fill="none" stroke="currentColor" /> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-e109581593f245ce9c4ac12f78e0c74e-0-7" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > quantmod </textPath> </text> <path class="displacy-arrowhead" d="M1470,266.5 L1462,254.5 1478,254.5" fill="currentColor" /> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-e109581593f245ce9c4ac12f78e0c74e-0-8" stroke-width="2px" d="M1645,264.5 C1645,177.0 1790.0,177.0 1790.0,264.5" fill="none" stroke="currentColor" /> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-e109581593f245ce9c4ac12f78e0c74e-0-8" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > compound </textPath> </text> <path class="displacy-arrowhead" d="M1645,266.5 L1637,254.5 1653,254.5" fill="currentColor" /> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-e109581593f245ce9c4ac12f78e0c74e-0-9" stroke-width="2px" d="M1295,264.5 C1295,2.0 1800.0,2.0 1800.0,264.5" fill="none" stroke="currentColor" /> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textPath xlink:href="#arrow-e109581593f245ce9c4ac12f78e0c74e-0-9" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > pobj </textPath> </text> <path class="displacy-arrowhead" d="M1800.0,266.5 L1808.0,254.5 1792.0,254.5" fill="currentColor" /> </g> </svg>
11,592
34.237082
99
html
spaCy
spaCy-master/website/public/images/displacy-long2.html
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" id="0" class="displacy" width="1275" height="399.5" style=" max-width: none; height: 399.5px; color: #000000; background: #ffffff; font-family: Arial; " > <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="50">Autonomous</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="50">ADJ</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="225">cars</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="225">NOUN</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="400">shift</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="400">VERB</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="575">insurance</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="575">NOUN</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="750">liability</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="750">NOUN</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="925">toward</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="925">ADP</tspan> </text> <text class="displacy-token" fill="currentColor" text-anchor="middle" y="309.5"> <tspan class="displacy-word" fill="currentColor" x="1100">manufacturers</tspan> <tspan class="displacy-tag" dy="2em" fill="currentColor" x="1100">NOUN</tspan> </text> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-0-0" stroke-width="2px" d="M70,264.5 C70,177.0 215.0,177.0 215.0,264.5" fill="none" stroke="currentColor" ></path> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textpath xlink:href="#arrow-0-0" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > amod </textpath> </text> <path class="displacy-arrowhead" d="M70,266.5 L62,254.5 78,254.5" fill="currentColor" ></path> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-0-1" stroke-width="2px" d="M245,264.5 C245,177.0 390.0,177.0 390.0,264.5" fill="none" stroke="currentColor" ></path> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textpath xlink:href="#arrow-0-1" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > nsubj </textpath> </text> <path class="displacy-arrowhead" d="M245,266.5 L237,254.5 253,254.5" fill="currentColor" ></path> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-0-2" stroke-width="2px" d="M595,264.5 C595,177.0 740.0,177.0 740.0,264.5" fill="none" stroke="currentColor" ></path> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textpath xlink:href="#arrow-0-2" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > compound </textpath> </text> <path class="displacy-arrowhead" d="M595,266.5 L587,254.5 603,254.5" fill="currentColor" ></path> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-0-3" stroke-width="2px" d="M420,264.5 C420,89.5 745.0,89.5 745.0,264.5" fill="none" stroke="currentColor" ></path> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textpath xlink:href="#arrow-0-3" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > dobj </textpath> </text> <path class="displacy-arrowhead" d="M745.0,266.5 L753.0,254.5 737.0,254.5" fill="currentColor" ></path> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-0-4" stroke-width="2px" d="M420,264.5 C420,2.0 925.0,2.0 925.0,264.5" fill="none" stroke="currentColor" ></path> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textpath xlink:href="#arrow-0-4" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > prep </textpath> </text> <path class="displacy-arrowhead" d="M925.0,266.5 L933.0,254.5 917.0,254.5" fill="currentColor" ></path> </g> <g class="displacy-arrow"> <path class="displacy-arc" id="arrow-0-5" stroke-width="2px" d="M945,264.5 C945,177.0 1090.0,177.0 1090.0,264.5" fill="none" stroke="currentColor" ></path> <text dy="1.25em" style="font-size: 0.8em; letter-spacing: 1px"> <textpath xlink:href="#arrow-0-5" class="displacy-label" startOffset="50%" fill="currentColor" text-anchor="middle" > pobj </textpath> </text> <path class="displacy-arrowhead" d="M1090.0,266.5 L1098.0,254.5 1082.0,254.5" fill="currentColor" ></path> </g> </svg>
6,927
31.525822
87
html
spaCy
spaCy-master/website/public/images/displacy-span-custom.html
<div class="spans" style=" line-height: 2.5; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; font-size: 18px; direction: ltr; " > Welcome to the <span style="font-weight: bold; display: inline-block; position: relative"> Bank <span style=" background: #ddd; top: 40px; height: 4px; left: -1px; width: calc(100% + 2px); position: absolute; " > </span> <span style=" background: #ddd; top: 40px; height: 4px; border-top-left-radius: 3px; border-bottom-left-radius: 3px; left: -1px; width: calc(100% + 2px); position: absolute; " > <span style=" background: #ddd; color: #000; top: -0.5em; padding: 2px 3px; position: absolute; font-size: 0.6em; font-weight: bold; line-height: 1; border-radius: 3px; " > BANK </span> </span> </span> <span style="font-weight: bold; display: inline-block; position: relative"> of <span style=" background: #ddd; top: 40px; height: 4px; left: -1px; width: calc(100% + 2px); position: absolute; " > </span> </span> <span style="font-weight: bold; display: inline-block; position: relative"> China <span style=" background: #ddd; top: 40px; height: 4px; left: -1px; width: calc(100% + 2px); position: absolute; " > </span> </span> . </div>
2,252
25.505882
97
html
spaCy
spaCy-master/website/public/images/displacy-span.html
<div class="spans" style=" line-height: 2.5; direction: ltr; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; font-size: 18px; " > Welcome to the <span style="font-weight: bold; display: inline-block; position: relative"> Bank <span style=" background: #7aecec; top: 40px; height: 4px; left: -1px; width: calc(100% + 2px); position: absolute; " > </span> <span style=" background: #7aecec; top: 40px; height: 4px; border-top-left-radius: 3px; border-bottom-left-radius: 3px; left: -1px; width: calc(100% + 2px); position: absolute; " > <span style=" background: #7aecec; color: #000; top: -0.5em; padding: 2px 3px; position: absolute; font-size: 0.6em; font-weight: bold; line-height: 1; border-radius: 3px; " > ORG </span> </span> </span> <span style="font-weight: bold; display: inline-block; position: relative"> of <span style=" background: #7aecec; top: 40px; height: 4px; left: -1px; width: calc(100% + 2px); position: absolute; " > </span> </span> <span style="font-weight: bold; display: inline-block; position: relative"> China <span style=" background: #7aecec; top: 40px; height: 4px; left: -1px; width: calc(100% + 2px); position: absolute; " > </span> <span style=" background: #feca74; top: 57px; height: 4px; left: -1px; width: calc(100% + 2px); position: absolute; " > </span> <span style=" background: #feca74; top: 57px; height: 4px; border-top-left-radius: 3px; border-bottom-left-radius: 3px; left: -1px; width: calc(100% + 2px); position: absolute; " > <span style=" background: #feca74; color: #000; top: -0.5em; padding: 2px 3px; position: absolute; font-size: 0.6em; font-weight: bold; line-height: 1; border-radius: 3px; " > GPE </span> </span> </span> . </div>
3,355
26.064516
97
html
spaCy
spaCy-master/website/setup/jinja_to_js.py
# Forked from: https://github.com/jonbretman/jinja-to-js # With additional functionality: in/not in, replace, pprint, round, + for lists, # rendering empty dicts # This script is mostly used to generate the JavaScript function for the # training quickstart widget. import contextlib import json import re import os from os import path from io import StringIO from jinja2 import Environment, FileSystemLoader, nodes from pathlib import Path import srsly import sys OPERANDS = { "eq": "===", "ne": "!==", "lt": " < ", "gt": " > ", "lteq": " <= ", "gteq": " >= ", } DICT_ITER_METHODS = ("iteritems", "items", "values", "keys") STATE_DEFAULT = 0 STATE_EXECUTING = 1 STATE_INTERPOLATING = 2 LOOP_HELPER_INDEX = "index" LOOP_HELPER_INDEX_0 = "index0" LOOP_HELPER_FIRST = "first" LOOP_HELPER_LAST = "last" LOOP_HELPER_LENGTH = "length" LOOP_HELPERS = ( LOOP_HELPER_INDEX, LOOP_HELPER_INDEX_0, LOOP_HELPER_FIRST, LOOP_HELPER_LAST, LOOP_HELPER_LENGTH, ) def amd_format(dependencies, template_function): result = "define([" result += ",".join('"{0}"'.format(x[0]) for x in dependencies) result += "], function (" result += ",".join(x[1] for x in dependencies) result += ") { return " result += template_function result += "; });" return result def commonjs_format(dependencies, template_function): result = "".join('var {0} = require("{1}");'.format(y, x) for x, y in dependencies) result += "module.exports = {0};".format(template_function) return result def es6_format(dependencies, template_function): result = "".join('import {0} from "{1}";'.format(y, x) for x, y in dependencies) result += "export default {0}".format(template_function) return result JS_MODULE_FORMATS = { None: lambda dependencies, template_function: template_function, "amd": amd_format, "commonjs": commonjs_format, "es6": es6_format, } # This string has to double all the '{' and '}' due to Python's string formatting. # See - https://docs.python.org/2/library/string.html#formatstrings TEMPLATE_WRAPPER = """ function {function_name}(ctx) {{ var __result = ""; var __tmp; var __runtime = jinjaToJS.runtime; var __filters = jinjaToJS.filters; var __globals = jinjaToJS.globals; var context = jinjaToJS.createContext(ctx); {template_code} return __result; }} """ class ExtendsException(Exception): """ Raised when an {% extends %} is encountered. At this point the parent template is loaded and all blocks defined in the current template passed to it. """ pass @contextlib.contextmanager def option(current_kwargs, **kwargs): """ Context manager for temporarily setting a keyword argument and then restoring it to whatever it was before. """ tmp_kwargs = dict((key, current_kwargs.get(key)) for key, value in kwargs.items()) current_kwargs.update(kwargs) yield current_kwargs.update(tmp_kwargs) def is_method_call(node, method_name): """ Returns True if `node` is a method call for `method_name`. `method_name` can be either a string or an iterable of strings. """ if not isinstance(node, nodes.Call): return False if isinstance(node.node, nodes.Getattr): # e.g. foo.bar() method = node.node.attr elif isinstance(node.node, nodes.Name): # e.g. bar() method = node.node.name elif isinstance(node.node, nodes.Getitem): # e.g. foo["bar"]() method = node.node.arg.value else: return False if isinstance(method_name, (list, tuple)): return method in method_name return method == method_name def is_loop_helper(node): """ Returns True is node is a loop helper e.g. {{ loop.index }} or {{ loop.first }} """ return ( hasattr(node, "node") and isinstance(node.node, nodes.Name) and node.node.name == "loop" ) def temp_var_names_generator(): x = 0 while True: yield "__$%s" % x x += 1 class JinjaToJS(object): def __init__( self, template_root, template_name, js_module_format=None, runtime_path="jinja-to-js", include_prefix="", include_ext="", child_blocks=None, dependencies=None, custom_filters=None, ): """ Args: template_root (str): The path to where templates should be loaded from. template_name (str): The name of the template to compile (relative to `template_root`). js_module_format (str, optional): The JavaScript module format to use. One of ('amd', 'commonjs', 'es6') runtime_path (str, optional): If `js_module_format` is specified then the JavaScript runtime will be imported using the appropriate method. It defaults to assuming it will be imported from `node_modules` but you can change it using this option. include_prefix (str, optional): If using the `amd` module format you can use this option to add a prefix to every include path as AMD imports are generally relative to the main file, not the module importing. include_ext (str, optional): By default any includes will be references without an extension, as neither AMD, commonJS or ES6 require the '.js' extension. If you want to use an extension, say '.template' then set this option to a string including the leading '.' child_blocks (dict, optional): Used internally when handling templates that extend other templates. dependencies (list of tuple, optional): Used internally when handling templates that extend other templates. custom_filters (list of str, optional): List of custom filters which should be allowed. These may be filters supported by Jinja but not supported by jinja-to-js. These filters MUST be registered with the jinja-to-js JS runtime. """ self.environment = Environment( loader=FileSystemLoader(template_root), autoescape=True, ) self.output = StringIO() self.stored_names = set() self.temp_var_names = temp_var_names_generator() self.state = STATE_DEFAULT self.child_blocks = child_blocks or {} self.dependencies = dependencies or [] self._runtime_function_cache = [] self.js_module_format = js_module_format self.runtime_path = runtime_path self.include_prefix = include_prefix self.include_ext = include_ext self.template_root = template_root self.template_name = template_name self.custom_filters = custom_filters or [] # The name of the JavaScript function that will output this template. By using a named # function the template can call itself which is required to support recursive includes. self.js_function_name = "template" + "".join( x.title() for x in re.split(r"[^\w]|_", path.splitext(self.template_name)[0]) ) self.context_name = "context" self._add_dependency(self.runtime_path, "jinjaToJS") # Jinja2 doesn't accept Windows filepaths if os.name == "nt": self.template_name = self.template_name.replace(os.pathsep, "/") template_string, template_path, _ = self.environment.loader.get_source( self.environment, self.template_name ) # It is assumed that this will be the absolute path to the template. It is used to work out # related paths for inclues. self.template_path = template_path if self.js_module_format not in JS_MODULE_FORMATS.keys(): raise ValueError( "The js_module_format option must be one of: %s" % JS_MODULE_FORMATS.keys() ) self.ast = self.environment.parse(template_string) try: for node in self.ast.body: self._process_node(node) except ExtendsException: pass def get_output(self): """ Returns the generated JavaScript code. Returns: str """ # generate the JS function string template_function = TEMPLATE_WRAPPER.format( function_name=self.js_function_name, template_code=self.output.getvalue() ).strip() # get the correct module format template module_format = JS_MODULE_FORMATS[self.js_module_format] # generate the module code return module_format(self.dependencies, template_function) def _get_depencency_var_name(self, dependency): """ Returns the variable name assigned to the given dependency or None if the dependency has not yet been registered. Args: dependency (str): Thet dependency that needs to be imported. Returns: str or None """ for dep_path, var_name in self.dependencies: if dep_path == dependency: return var_name def _add_dependency(self, dependency, var_name=None): """ Adds the given dependency and returns the variable name to use to access it. If `var_name` is not given then a random one will be created. Args: dependency (str): var_name (str, optional): Returns: str """ if var_name is None: var_name = next(self.temp_var_names) # Don't add duplicate dependencies if (dependency, var_name) not in self.dependencies: self.dependencies.append((dependency, var_name)) return var_name def _process_node(self, node, **kwargs): node_name = node.__class__.__name__.lower() handler = getattr(self, "_process_" + node_name, None) if callable(handler): handler(node, **kwargs) else: raise Exception(f"Unknown node {node} ({node_name})") def _process_extends(self, node, **kwargs): """ Processes an extends block e.g. `{% extends "some/template.jinja" %}` """ # find all the blocks in this template for b in self.ast.find_all(nodes.Block): # if not already in `child_blocks` then this is the first time a # block with this name has been encountered. if b.name not in self.child_blocks: self.child_blocks[b.name] = b else: # otherwise we have seen this block before, so we need to find the last # super_block and add the block from this template to the end. block = self.child_blocks.get(b.name) while hasattr(block, "super_block"): block = block.super_block block.super_block = b # load the parent template parent_template = JinjaToJS( template_root=self.template_root, template_name=node.template.value, js_module_format=self.js_module_format, runtime_path=self.runtime_path, include_prefix=self.include_prefix, include_ext=self.include_ext, child_blocks=self.child_blocks, dependencies=self.dependencies, ) # add the parent templates output to the current output self.output.write(parent_template.output.getvalue()) # Raise an exception so we stop parsing this template raise ExtendsException def _process_block(self, node, **kwargs): """ Processes a block e.g. `{% block my_block %}{% endblock %}` """ # check if this node already has a 'super_block' attribute if not hasattr(node, "super_block"): # since it doesn't it must be the last block in the inheritance chain node.super_block = None # see if there has been a child block defined - if there is this # will be the first block in the inheritance chain child_block = self.child_blocks.get(node.name) if child_block: # we have child nodes so we need to set `node` as the # super of the last one in the chain last_block = child_block while hasattr(last_block, "super_block"): last_block = child_block.super_block # once we have found it, set this node as it's super block last_block.super_block = node # this is the node we want to process as it's the first in the inheritance chain node = child_block # process the block passing the it's super along, if this block # calls super() it will be handled by `_process_call` for n in node.body: self._process_node(n, super_block=node.super_block, **kwargs) def _process_output(self, node, **kwargs): """ Processes an output node, which will contain things like `Name` and `TemplateData` nodes. """ for n in node.nodes: self._process_node(n, **kwargs) def _process_templatedata(self, node, **_): """ Processes a `TemplateData` node, this is just a bit of as-is text to be written to the output. """ # escape double quotes value = re.sub('"', r'\\"', node.data) # escape new lines value = re.sub("\n", r"\\n", value) # append value to the result self.output.write('__result += "' + value + '";') def _process_name(self, node, **kwargs): """ Processes a `Name` node. Some examples of `Name` nodes: {{ foo }} -> 'foo' is a Name {% if foo }} -> 'foo' is a Name """ with self._interpolation(): with self._python_bool_wrapper(**kwargs): if node.name not in self.stored_names and node.ctx != "store": self.output.write(self.context_name) self.output.write(".") if node.ctx == "store": self.stored_names.add(node.name) self.output.write(node.name) def _process_dict(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs): if node.items: err = f"Can't process non-empty dict in expression: {node}" raise ValueError(err) self.output.write("{}") def _process_getattr(self, node, **kwargs): """ Processes a `GetAttr` node. e.g. {{ foo.bar }} """ with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: if is_loop_helper(node): self._process_loop_helper(node, **new_kwargs) else: self._process_node(node.node, **new_kwargs) self.output.write(".") self.output.write(node.attr) def _process_getitem(self, node, **kwargs): """ Processes a `GetItem` node e.g. {{ foo["bar"] }} """ with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self._process_node(node.node, **new_kwargs) if isinstance(node.arg, nodes.Slice): self.output.write(".slice(") if node.arg.step is not None: raise Exception( "The step argument is not supported when slicing." ) if node.arg.start is None: self.output.write("0") else: self._process_node(node.arg.start, **new_kwargs) if node.arg.stop is None: self.output.write(")") else: self.output.write(",") self._process_node(node.arg.stop, **new_kwargs) self.output.write(")") else: self.output.write("[") self._process_node(node.arg, **new_kwargs) self.output.write("]") def _process_for(self, node, **kwargs): """ Processes a for loop. e.g. {% for number in numbers %} {{ number }} {% endfor %} {% for key, value in somemap.items() %} {{ key }} -> {{ value }} {% %} """ # since a for loop can introduce new names into the context # we need to remember the ones that existed outside the loop previous_stored_names = self.stored_names.copy() with self._execution(): self.output.write("__runtime.each(") if is_method_call(node.iter, dict.keys.__name__): self.output.write("Object.keys(") self._process_node(node.iter, **kwargs) if is_method_call(node.iter, dict.keys.__name__): self.output.write(")") self.output.write(",") self.output.write("function") self.output.write("(") # javascript iterations put the value first, then the key if isinstance(node.target, nodes.Tuple): if len(node.target.items) > 2: raise Exception( "De-structuring more than 2 items is not supported." ) for i, item in enumerate(reversed(node.target.items)): self._process_node(item, **kwargs) if i < len(node.target.items) - 1: self.output.write(",") else: self._process_node(node.target, **kwargs) self.output.write(")") self.output.write("{") if node.test: self.output.write("if (!(") self._process_node(node.test, **kwargs) self.output.write(")) { return; }") assigns = ( node.target.items if isinstance(node.target, nodes.Tuple) else [node.target] ) with self._scoped_variables(assigns, **kwargs): for n in node.body: self._process_node(n, **kwargs) with self._execution(): self.output.write("}") self.output.write(")") self.output.write(";") # restore the stored names self.stored_names = previous_stored_names def _process_if(self, node, execute_end=None, **kwargs): """ Processes an if block e.g. `{% if foo %} do something {% endif %}` """ with self._execution(): self.output.write("if") self.output.write("(") with option(kwargs, use_python_bool_wrapper=True): self._process_node(node.test, **kwargs) self.output.write(")") self.output.write("{") # We accept an `execute_end` function as a keyword argument as this function is # recursive in the case of something like if-elif-elif-else. In these cases this # invocation of this function may have to close execution opened by a previous # invocation of this function. if execute_end: execute_end() # body for n in node.body: self._process_node(n, **kwargs) if not node.else_ and not node.elif_: # no else - just close the if with self._execution(): self.output.write("}") else: # either an else or an elif with self._execution() as execute_end: self.output.write("}") self.output.write(" else ") # check for elif for n in node.elif_: self._process_node(n, execute_end=execute_end, **kwargs) if node.elif_ and node.else_: self.output.write(" else ") # open up the body self.output.write("{") # process the body of the else for n in node.else_: self._process_node(n, **kwargs) # close the body with self._execution(): self.output.write("}") def _process_condexpr(self, node, **kwargs): with self._interpolation(): self.output.write("(") with self._python_bool_wrapper(**kwargs) as new_kwargs: self._process_node(node.test, **new_kwargs) self.output.write(" ? ") self._process_node(node.expr1, **kwargs) self.output.write(" : ") self._process_node(node.expr2, **kwargs) self.output.write(")") def _process_not(self, node, **kwargs): self.output.write("!") with self._python_bool_wrapper(**kwargs) as new_kwargs: self._process_node(node.node, **new_kwargs) def _process_or(self, node, **kwargs): self._process_node(node.left, **kwargs) self.output.write(" || ") self._process_node(node.right, **kwargs) def _process_and(self, node, **kwargs): self._process_node(node.left, **kwargs) self.output.write(" && ") self._process_node(node.right, **kwargs) def _process_tuple(self, node, **kwargs): self.output.write("[") for i, item in enumerate(node.items): self._process_node(item, **kwargs) if i < len(node.items) - 1: self.output.write(",") self.output.write("]") def _process_call(self, node, super_block=None, **kwargs): if is_method_call(node, DICT_ITER_METHODS): # special case for dict methods self._process_node(node.node.node, **kwargs) elif is_method_call(node, "super"): # special case for the super() method which is available inside blocks if not super_block: raise Exception("super() called outside of a block with a parent.") self._process_node(super_block, **kwargs) else: # just a normal function call on a context variable with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self._process_node(node.node, **new_kwargs) self.output.write("(") self._process_args(node, **new_kwargs) self.output.write(")") # only output the semi-colon if we are not interpolating if self.state != STATE_INTERPOLATING: self.output.write("") def _process_filter(self, node, **kwargs): method_name = getattr(self, "_process_filter_%s" % node.name, None) if callable(method_name): method_name(node, **kwargs) elif node.name in self.custom_filters: with self._interpolation(safe=True): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("__filters.%s(" % node.name) self._process_node(node.node, **new_kwargs) if getattr(node, "args", None): self.output.write(",") self._process_args(node, **new_kwargs) self.output.write(")") else: raise Exception("Unsupported filter: %s" % node.name) def _process_filter_safe(self, node, **kwargs): with self._interpolation(safe=True): with self._python_bool_wrapper(**kwargs) as new_kwargs: self._process_node(node.node, **new_kwargs) def _process_filter_capitalize(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("__filters.capitalize(") self._process_node(node.node, **new_kwargs) self.output.write(")") def _process_filter_abs(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("Math.abs(") self._process_node(node.node, **new_kwargs) self.output.write(")") def _process_filter_replace(self, node, **kwargs): # We're getting a quoted string from Python/Jinja as the pattern to # replace, but to replace all occurrences in JS, we typically need a # regex, which would be annoying to convert. So we're using split/join # instead here. with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self._process_node(node.node, **new_kwargs) self.output.write(".split(") self._process_node(node.args[0], **new_kwargs) self.output.write(").join(") self._process_node(node.args[1], **new_kwargs) self.output.write(")") def _process_filter_pprint(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("JSON.stringify(") self._process_node(node.node, **new_kwargs) self.output.write(")") def _process_filter_attr(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self._process_node(node.node, **new_kwargs) self.output.write("[") self._process_node(node.args[0], **new_kwargs) self.output.write("]") def _process_filter_batch(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("__filters.batch(") self._process_node(node.node, **new_kwargs) self.output.write(",") self._process_args(node, **new_kwargs) self.output.write(")") def _process_filter_default(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("__filters.default(") self._process_node(node.node, **new_kwargs) if node.args: self.output.write(",") self._process_args(node, **new_kwargs) self.output.write(")") def _process_filter_first(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("__filters.first(") self._process_node(node.node, **new_kwargs) self.output.write(")") def _process_filter_int(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("__filters.int(") self._process_node(node.node, **new_kwargs) if node.args: self.output.write(",") self._process_args(node, **new_kwargs) self.output.write(")") def _process_filter_round(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("Math.round((") self._process_node(node.node, **new_kwargs) self.output.write("+ Number.EPSILON) * 10**") self._process_node(node.args[0], **new_kwargs) self.output.write(") / 10**") self._process_node(node.args[0], **new_kwargs) def _process_filter_last(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("__filters.last(") self._process_node(node.node, **new_kwargs) self.output.write(")") def _process_filter_length(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("__filters.size(") self._process_node(node.node, **new_kwargs) self.output.write(")") def _process_filter_lower(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("(") self._process_node(node.node, **new_kwargs) self.output.write(' + "").toLowerCase()') def _process_filter_slice(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("__filters.slice(") self._process_node(node.node, **new_kwargs) self.output.write(",") self._process_args(node, **new_kwargs) self.output.write(")") def _process_filter_title(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("__filters.title(") self._process_node(node.node, **new_kwargs) self.output.write(")") def _process_filter_trim(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("(") self._process_node(node.node, **new_kwargs) self.output.write(' + "").trim()') def _process_filter_upper(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("(") self._process_node(node.node, **new_kwargs) self.output.write(' + "").toUpperCase()') def _process_filter_truncate(self, node, **kwargs): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self.output.write("__filters.truncate(") self._process_node(node.node, **new_kwargs) self.output.write(",") self._process_args(node, **new_kwargs) self.output.write(")") def _process_assign(self, node, **kwargs): with self._execution(): self.output.write("var ") self._process_node(node.target, **kwargs) self.output.write(" = ") self._process_node(node.node, **kwargs) self.output.write(";") def _process_with(self, node, **kwargs): # keep a copy of the stored names before the scope previous_stored_names = self.stored_names.copy() # assigns in the with tag # e.g. {% with var = "something %} assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)] # assigns in the with body # e.g. {% set name = 'John' %} assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)] # remove assigns from the body node.body = [x for x in node.body if not isinstance(x, nodes.Assign)] # get a list of all the assigns in this with block # both on the tag, and within the body of the block all_assigns = assigns_in_tag + assigns_in_body with self._execution(): self.output.write("(function () {") with self._scoped_variables(all_assigns, **kwargs): for node in node.body: self._process_node(node, **kwargs) with self._execution(): self.output.write("})();") # restore previous stored names self.stored_names = previous_stored_names def _process_compare(self, node, **kwargs): if len(node.ops) > 1: raise Exception("Multiple operands are not supported.") operand = node.ops[0] is_equality = operand.op in ("eq", "ne") left_hand_is_const = isinstance(node.expr, nodes.Const) right_hand_is_const = isinstance(operand.expr, nodes.Const) # If the operand is equality and neither the left or right hand side are constants then we # will need to use the JavaScript deep equals function. Ideally we want to avoid using this # as it is quite a big function. use_is_equal_function = is_equality and not ( left_hand_is_const or right_hand_is_const ) with option(kwargs, use_python_bool_wrapper=False): if operand.op == "in" or operand.op == "notin": # Special case for "in" operator if operand.op == "notin": self.output.write("!") self._process_node(operand.expr, **kwargs) self.output.write(".includes(") self._process_node(node.expr, **kwargs) self.output.write(")") else: if use_is_equal_function: if operand.op == "ne": self.output.write("!") self.output.write("__runtime.isEqual(") self._process_node(node.expr, **kwargs) if use_is_equal_function: self.output.write(",") else: self.output.write(OPERANDS.get(operand.op)) self._process_node(operand.expr, **kwargs) if use_is_equal_function: self.output.write(")") def _process_operand(self, node, **kwargs): self.output.write(OPERANDS.get(node.op)) self._process_node(node.expr, **kwargs) def _process_const(self, node, **_): with self._interpolation(): self.output.write(json.dumps(node.value)) def _process_nonetype(self, node, **_): with self._interpolation(): self.output.write("null") def _process_neg(self, node, **kwargs): with self._interpolation(): self.output.write("-") self._process_node(node.node, **kwargs) def _process_list(self, node, **kwargs): self.output.write("[") for i, item in enumerate(node.items): self._process_node(item, **kwargs) if i < len(node.items) - 1: self.output.write(",") self.output.write("]") def _process_test(self, node, **kwargs): with option(kwargs, use_python_bool_wrapper=False): method_name = getattr(self, "_process_test_%s" % node.name, None) if callable(method_name): method_name(node, **kwargs) else: raise Exception("Unsupported test: %s" % node.name) def _process_test_defined(self, node, **kwargs): self.output.write("(typeof ") self._process_node(node.node, **kwargs) self.output.write(' !== "undefined")') def _process_test_undefined(self, node, **kwargs): self._process_node(node.node, **kwargs) self.output.write(" === undefined") def _process_test_callable(self, node, **kwargs): self.output.write("__runtime.type(") self._process_node(node.node, **kwargs) self.output.write(') === "Function"') def _process_test_divisibleby(self, node, **kwargs): self._process_node(node.node, **kwargs) self.output.write(" % ") self._process_node(node.args[0], **kwargs) self.output.write(" === 0") def _process_test_even(self, node, **kwargs): self._process_node(node.node, **kwargs) self.output.write(" % 2 === 0") def _process_test_odd(self, node, **kwargs): self._process_node(node.node, **kwargs) self.output.write(" % 2 === 1") def _process_test_none(self, node, **kwargs): self._process_node(node.node, **kwargs) self.output.write(" === null") def _process_test_upper(self, node, **kwargs): self._process_node(node.node, **kwargs) self.output.write(".toUpperCase() === ") self._process_node(node.node, **kwargs) def _process_test_lower(self, node, **kwargs): self._process_node(node.node, **kwargs) self.output.write(".toLowerCase() === ") self._process_node(node.node, **kwargs) def _process_test_string(self, node, **kwargs): self.output.write("__runtime.type(") self._process_node(node.node, **kwargs) self.output.write(') === "String"') def _process_test_mapping(self, node, **kwargs): self.output.write("__runtime.type(") self._process_node(node.node, **kwargs) self.output.write(') === "Object"') def _process_test_number(self, node, **kwargs): self.output.write("(__runtime.type(") self._process_node(node.node, **kwargs) self.output.write(') === "Number" && !isNaN(') self._process_node(node.node, **kwargs) self.output.write("))") def _process_include(self, node, **kwargs): with self._interpolation(safe=True): include_path = node.template.value if include_path == self.template_name: # template is including itself include_var_name = self.js_function_name else: if self.include_prefix: include_path = self.include_prefix + node.template.value elif ( self.js_module_format in ("es6", "commonjs",) and self.template_name ): _, absolute_include_path, _ = self.environment.loader.get_source( self.environment, node.template.value ) include_path = os.path.relpath( absolute_include_path, os.path.dirname(self.template_path) ) if not include_path.startswith("."): include_path = "./" + include_path # Jinja2 doesn't accept Windows filepaths (but does output them!) if os.name == "nt": include_path = include_path.replace(os.pathsep, "/") include_path = path.splitext(include_path)[0] + self.include_ext include_var_name = self._get_depencency_var_name(include_path) if not include_var_name: include_var_name = self._add_dependency(include_path) if self.js_module_format is None: self.output.write('jinjaToJS.include("') self.output.write(include_path) self.output.write('");') else: self.output.write(include_var_name) self.output.write("(") self.output.write(self.context_name) self.output.write(")") def _process_add(self, node, **kwargs): # Handle + operator for lists, which behaves differently in JS. Currently # only works if we have an explicit list node on either side (in which # case we assume both are lists). if isinstance(node.left, nodes.List) or isinstance(node.right, nodes.List): with self._interpolation(): with self._python_bool_wrapper(**kwargs) as new_kwargs: self._process_node(node.left, **new_kwargs) self.output.write(".concat(") self._process_node(node.right, **new_kwargs) self.output.write(")") else: self._process_math(node, math_operator=" + ", **kwargs) def _process_sub(self, node, **kwargs): self._process_math(node, math_operator=" - ", **kwargs) def _process_div(self, node, **kwargs): self._process_math(node, math_operator=" / ", **kwargs) def _process_floordiv(self, node, **kwargs): self._process_math(node, math_operator=" / ", function="Math.floor", **kwargs) def _process_mul(self, node, **kwargs): self._process_math(node, math_operator=" * ", **kwargs) def _process_mod(self, node, **kwargs): self._process_math(node, math_operator=" % ", **kwargs) def _process_math(self, node, math_operator=None, function=None, **kwargs): """ Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc... If `function` is provided the expression is wrapped in a call to that function. """ with self._interpolation(): if function: self.output.write(function) self.output.write("(") self._process_node(node.left, **kwargs) self.output.write(math_operator) self._process_node(node.right, **kwargs) if function: self.output.write(")") def _process_loop_helper(self, node, **kwargs): """ Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }} """ if node.attr == LOOP_HELPER_INDEX: self.output.write("(arguments[1] + 1)") elif node.attr == LOOP_HELPER_INDEX_0: self.output.write("arguments[1]") elif node.attr == LOOP_HELPER_FIRST: self.output.write("(arguments[1] == 0)") elif node.attr == LOOP_HELPER_LAST: self.output.write("(arguments[1] == arguments[2].length - 1)") elif node.attr == LOOP_HELPER_LENGTH: self.output.write("arguments[2].length") def _process_args(self, node, **kwargs): args = getattr(node, "args", None) if not args: return for i, item in enumerate(args): self._process_node(item, **kwargs) if i < len(node.args) - 1: self.output.write(",") @contextlib.contextmanager def _execution(self): """ Context manager for executing some JavaScript inside a template. """ did_start_executing = False if self.state == STATE_DEFAULT: did_start_executing = True self.state = STATE_EXECUTING def close(): if did_start_executing and self.state == STATE_EXECUTING: self.state = STATE_DEFAULT yield close close() @contextlib.contextmanager def _interpolation(self, safe=False): did_start_interpolating = False if self.state == STATE_DEFAULT: did_start_interpolating = True self.output.write('__result += "" + ') if safe is not True: self.output.write("__runtime.escape") self.output.write("((__tmp = (") self.state = STATE_INTERPOLATING def close(): if did_start_interpolating and self.state == STATE_INTERPOLATING: self.output.write(')) == null ? "" : __tmp);') self.state = STATE_DEFAULT yield close close() @contextlib.contextmanager def _scoped_variables(self, nodes_list, **kwargs): """ Context manager for creating scoped variables defined by the nodes in `nodes_list`. These variables will be added to the context, and when the context manager exits the context object will be restored to it's previous state. """ tmp_vars = [] for node in nodes_list: is_assign_node = isinstance(node, nodes.Assign) name = node.target.name if is_assign_node else node.name # create a temp variable name tmp_var = next(self.temp_var_names) # save previous context value with self._execution(): # save the current value of this name self.output.write( "var %s = %s.%s;" % (tmp_var, self.context_name, name) ) # add new value to context self.output.write("%s.%s = " % (self.context_name, name)) if is_assign_node: self._process_node(node.node, **kwargs) else: self.output.write(node.name) self.output.write(";") tmp_vars.append((tmp_var, name)) yield # restore context for tmp_var, name in tmp_vars: with self._execution(): self.output.write("%s.%s = %s;" % (self.context_name, name, tmp_var)) @contextlib.contextmanager def _python_bool_wrapper(self, **kwargs): use_python_bool_wrapper = kwargs.get("use_python_bool_wrapper") if use_python_bool_wrapper: self.output.write("__runtime.boolean(") with option(kwargs, use_python_bool_wrapper=False): yield kwargs if use_python_bool_wrapper: self.output.write(")") def main(template_path, output=None, data_path=None): """Convert a jinja2 template to a JavaScript module. template_path (Path): Path to .jijna file. output (Optional[Path]): Path to output .js module (stdout if unset). data_path (Optional[Path]): Optional JSON or YAML file with additional data to be included in the JS module as the exported variable DATA. """ data = "{}" if data_path is not None: if data_path.suffix in (".yml", ".yaml"): data = srsly.read_yaml(data_path) else: data = srsly.read_json(data_path) data = srsly.json_dumps(data) # dump and load for compactness template_path = Path(template_path) tpl_file = template_path.parts[-1] compiler = JinjaToJS(template_path.parent, tpl_file, js_module_format="es6") header = f"// This file was auto-generated by {__file__} based on {tpl_file}" data_str = f"export const DATA = {data}" result = compiler.get_output() if output is not None: with output.open("w", encoding="utf8") as f: f.write(f"{header}\n{result}\n{data_str}") print(f"Updated {output.parts[-1]}") else: print(result) if __name__ == "__main__": args = sys.argv[1:] if not len(args): raise ValueError("Need at least one argument: path to .jinja template") template_path = Path(args[0]) output = Path(args[1]) if len(args) > 1 else None data_path = Path(args[2]) if len(args) > 2 else None main(template_path, output, data_path)
47,269
36.132757
100
py
spaCy
spaCy-master/website/setup/setup.sh
python setup/jinja_to_js.py ../spacy/cli/templates/quickstart_training.jinja src/widgets/quickstart-training-generator.js ../spacy/cli/templates/quickstart_training_recommendations.yml
185
92
184
sh
null
CIRCLe-main/main.py
import torch import random from torch import nn, optim import argparse import os, importlib from tqdm import tqdm import numpy as np from torch.utils import data from util import AverageMeter from dataset import get_fitz_dataloaders parser = argparse.ArgumentParser(description='DG') parser.add_argument('--dataset', type=str, default='FitzPatrick17k') parser.add_argument('--hidden_dim', type=int, default=256) parser.add_argument('--batch_size', type=int, default=32) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--weight_decay', type=float, default=0.001) parser.add_argument('--alpha', type=float, default=0.1) parser.add_argument('--num_classes', type=int, default=114) parser.add_argument('--epochs', type=int, default=100) parser.add_argument('--seed', type=int, default=1) parser.add_argument('--data_dir', type=str, default='../data/fitz17k/images/all/') parser.add_argument('--gan_path', type=str, default='saved/stargan/') parser.add_argument('--model', type=str, default='circle') parser.add_argument('--base', type=str, default='vgg16') parser.add_argument('--model_save_dir', type=str, default='saved/model/') parser.add_argument('--use_reg_loss', type=bool, default=True) flags = parser.parse_args() if flags.dataset == 'FitzPatrick17k': flags.num_classes = 114 # print setup print('Flags:') for k, v in sorted(vars(flags).items()): print("\t{}: {}".format(k, v)) device = 'cuda' # set seed random.seed(flags.seed) np.random.seed(flags.seed) torch.manual_seed(flags.seed) torch.cuda.manual_seed(flags.seed) torch.cuda.manual_seed_all(flags.seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True # Data loader. train_loader, val_loader, _ = get_fitz_dataloaders(root='../data/fitz17k/images/all/', holdout_mode='random_holdout', batch_size=flags.batch_size, shuffle=False, partial_skin_types=[], partial_ratio=1.0 ) # load models model = importlib.import_module('models.' + flags.model) \ .Model(flags, flags.hidden_dim, flags.base, use_reg=flags.use_reg_loss).to(device) optim = torch.optim.SGD(model.parameters(), lr=flags.lr, weight_decay=flags.weight_decay, momentum=0.9) def to_device(data): for i in range(len(data)): data[i] = data[i].to(device) return data best_by_val = 0 best_val_acc = 0.0 best_val_loss = float('inf') best_by_test = 0 best_test_loss = float('inf') for epoch in range(flags.epochs): print('Epoch {}: Best val loss {}, Best val acc {}'.format(epoch, best_val_loss, best_val_acc)) lossMeter = AverageMeter() regMeter = AverageMeter() correctMeter = AverageMeter() model.train() for data in tqdm(train_loader, ncols=75, leave=False): data = to_device(data) loss, reg, correct = model(*data) optim.zero_grad() if flags.use_reg_loss: (loss + reg).backward() else: loss.backward() optim.step() lossMeter.update(loss.detach().item(), data[0].shape[0]) regMeter.update(reg.detach().item(), data[0].shape[0]) correctMeter.update(correct.detach().item(), data[0].shape[0]) del loss, reg, correct print('>>> Training: Loss ', lossMeter, ', Reg ', regMeter, ', Acc ', correctMeter) vallossMeter = AverageMeter() valregMeter = AverageMeter() valcorrectMeter = AverageMeter() model.eval() with torch.no_grad(): for x, y, d in tqdm(val_loader, ncols=75, leave=False): x, y, d = x.to(device), y.to(device), d.to(device) loss, reg, correct = model(x, y) vallossMeter.update(loss.detach().item(), x.shape[0]) valregMeter.update(reg.detach().item(), x.shape[0]) valcorrectMeter.update(correct.detach().item(), x.shape[0]) del loss, reg, correct print('>>> Val: Loss ', vallossMeter, ', Reg ', valregMeter, ', Acc ', valcorrectMeter) if valcorrectMeter.float() > best_val_acc: best_val_acc = valcorrectMeter.float() save_path = os.path.join(flags.model_save_dir, 'epoch{}_acc_{:.3f}.ckpt'.format(epoch, best_val_acc)) torch.save(model.state_dict(), save_path) print('Saved model with highest acc ...') torch.cuda.empty_cache()
4,661
37.528926
109
py
null
CIRCLe-main/solver_stargan.py
from models.stargan import Generator from models.stargan import Discriminator import torch import torch.nn.functional as F import numpy as np import os import time import datetime from logger import Logger class Solver(object): """Solver for training and testing StarGAN.""" def __init__(self, loader, config): """Initialize configurations.""" # Data loader. self.loader = loader # Model configurations. self.c_dim = config.c_dim self.image_size = config.image_size self.g_conv_dim = config.g_conv_dim self.d_conv_dim = config.d_conv_dim self.g_repeat_num = config.g_repeat_num self.d_repeat_num = config.d_repeat_num self.lambda_cls = config.lambda_cls self.lambda_rec = config.lambda_rec self.lambda_gp = config.lambda_gp # Training configurations. self.batch_size = config.batch_size self.num_iters = config.num_iters self.num_iters_decay = config.num_iters_decay self.g_lr = config.g_lr self.d_lr = config.d_lr self.n_critic = config.n_critic self.beta1 = config.beta1 self.beta2 = config.beta2 self.resume_iters = config.resume_iters # Test configurations. self.test_iters = config.test_iters # Miscellaneous. self.use_tensorboard = config.use_tensorboard self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Directories. self.model_save_dir = config.model_save_dir self.dataset = config.dataset # Step size. self.log_step = config.log_step self.sample_step = config.sample_step self.model_save_step = config.model_save_step self.lr_update_step = config.lr_update_step # Build the model and tensorboard. self.build_model() if self.use_tensorboard: self.build_tensorboard() def build_model(self): """Create a generator and a discriminator.""" self.G = Generator(self.g_conv_dim, self.c_dim, self.g_repeat_num) self.D = Discriminator(self.image_size, self.d_conv_dim, self.c_dim, self.d_repeat_num) self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2]) self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2]) self.print_network(self.G, 'G') self.print_network(self.D, 'D') self.G.to(self.device) self.D.to(self.device) def print_network(self, model, name): """Print out the network information.""" num_params = 0 for p in model.parameters(): num_params += p.numel() print(model) print(name) print("The number of parameters: {}".format(num_params)) def restore_model(self, resume_iters=-1): """Restore the trained generator and discriminator.""" print('Loading the trained models from step {}...'.format(resume_iters)) if resume_iters != -1: G_path = os.path.join(self.model_save_dir, 'stargan_{}-G.ckpt'.format(resume_iters)) D_path = os.path.join(self.model_save_dir, 'stargan_{}-D.ckpt'.format(resume_iters)) else: G_path = os.path.join(self.model_save_dir, 'stargan_last-G.ckpt') D_path = os.path.join(self.model_save_dir, 'stargan_last-D.ckpt') self.G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage)) self.D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage)) def build_tensorboard(self): """Build a tensorboard logger.""" self.logger = Logger(self.log_dir) def update_lr(self, g_lr, d_lr): """Decay learning rates of the generator and discriminator.""" for param_group in self.g_optimizer.param_groups: param_group['lr'] = g_lr for param_group in self.d_optimizer.param_groups: param_group['lr'] = d_lr def reset_grad(self): """Reset the gradient buffers.""" self.g_optimizer.zero_grad() self.d_optimizer.zero_grad() def denorm(self, x): """Convert the range from [-1, 1] to [0, 1].""" out = (x + 1) / 2 return out.clamp_(0, 1) def gradient_penalty(self, y, x): """Compute gradient penalty: (L2_norm(dy/dx) - 1)**2.""" weight = torch.ones(y.size()).to(self.device) dydx = torch.autograd.grad(outputs=y, inputs=x, grad_outputs=weight, retain_graph=True, create_graph=True, only_inputs=True)[0] dydx = dydx.view(dydx.size(0), -1) dydx_l2norm = torch.sqrt(torch.sum(dydx ** 2, dim=1)) return torch.mean((dydx_l2norm - 1) ** 2) def label2onehot(self, labels, dim): """Convert label indices to one-hot vectors.""" batch_size = labels.size(0) out = torch.zeros(batch_size, dim) out[np.arange(batch_size), labels.long()] = 1 return out def classification_loss(self, logit, target): """Compute binary or softmax cross entropy loss.""" return F.cross_entropy(logit, target) def train(self): """Train StarGAN within a single dataset.""" # Set data loader. data_loader = self.loader # Learning rate cache for decaying. g_lr = self.g_lr d_lr = self.d_lr # Start training from scratch or resume training. start_iters = 0 if self.resume_iters: start_iters = self.resume_iters self.restore_model(self.resume_iters) # Start training. print('Start training...') start_time = time.time() for i in range(start_iters, self.num_iters): # =================================================================================== # # 1. Preprocess input data # # =================================================================================== # # Fetch real images and labels. try: x_real, _, label_org = next(data_iter) except: data_iter = iter(data_loader) x_real, _, label_org = next(data_iter) # Generate target domain labels randomly. rand_idx = torch.randperm(label_org.size(0)) label_trg = label_org[rand_idx] c_org = self.label2onehot(label_org, self.c_dim) c_trg = self.label2onehot(label_trg, self.c_dim) x_real = x_real.to(self.device) # Input images. c_org = c_org.to(self.device) # Original domain labels. c_trg = c_trg.to(self.device) # Target domain labels. label_org = label_org.to(self.device) # Labels for computing classification loss. label_trg = label_trg.to(self.device) # Labels for computing classification loss. # =================================================================================== # # 2. Train the discriminator # # =================================================================================== # # Compute loss with real images. out_src, out_cls = self.D(x_real) d_loss_real = - torch.mean(out_src) d_loss_cls = self.classification_loss(out_cls, label_org) # Compute loss with fake images. x_fake = self.G(x_real, c_org, c_trg) out_src, out_cls = self.D(x_fake.detach()) d_loss_fake = torch.mean(out_src) # Compute loss for gradient penalty. alpha = torch.rand(x_real.size(0), 1, 1, 1).to(self.device) x_hat = (alpha * x_real.data + (1 - alpha) * x_fake.data).requires_grad_(True) out_src, _ = self.D(x_hat) d_loss_gp = self.gradient_penalty(out_src, x_hat) # Backward and optimize. d_loss = d_loss_real + d_loss_fake + self.lambda_cls * d_loss_cls + self.lambda_gp * d_loss_gp self.reset_grad() d_loss.backward() self.d_optimizer.step() # Logging. loss = {} loss['D/loss_real'] = d_loss_real.item() loss['D/loss_fake'] = d_loss_fake.item() loss['D/loss_cls'] = d_loss_cls.item() loss['D/loss_gp'] = d_loss_gp.item() # =================================================================================== # # 3. Train the generator # # =================================================================================== # if (i + 1) % self.n_critic == 0: # Original-to-target domain. x_fake = self.G(x_real, c_org, c_trg) out_src, out_cls = self.D(x_fake) g_loss_fake = - torch.mean(out_src) g_loss_cls = self.classification_loss(out_cls, label_trg) # Target-to-original domain. x_reconst = self.G(x_fake, c_trg, c_org) g_loss_rec = torch.mean(torch.abs(x_real - x_reconst)) # Backward and optimize. g_loss = g_loss_fake + self.lambda_rec * g_loss_rec + self.lambda_cls * g_loss_cls self.reset_grad() g_loss.backward() self.g_optimizer.step() # Logging. loss['G/loss_fake'] = g_loss_fake.item() loss['G/loss_rec'] = g_loss_rec.item() loss['G/loss_cls'] = g_loss_cls.item() # =================================================================================== # # 4. Miscellaneous # # =================================================================================== # # Print out training information. if (i + 1) % self.log_step == 0: et = time.time() - start_time et = str(datetime.timedelta(seconds=et))[:-7] log = "Elapsed [{}], Iteration [{}/{}]".format(et, i + 1, self.num_iters) for tag, value in loss.items(): log += ", {}: {:.4f}".format(tag, value) print(log) if self.use_tensorboard: for tag, value in loss.items(): self.logger.scalar_summary(tag, value, i + 1) # Save model checkpoints. if (i + 1) % self.model_save_step == 0: G_path = os.path.join(self.model_save_dir, 'stargan_{}-G.ckpt'.format(i + 1)) D_path = os.path.join(self.model_save_dir, 'stargan_{}-D.ckpt'.format(i + 1)) torch.save(self.G.state_dict(), G_path) torch.save(self.D.state_dict(), D_path) G_path = os.path.join(self.model_save_dir, 'stargan_last-G.ckpt') D_path = os.path.join(self.model_save_dir, 'stargan_last-D.ckpt') torch.save(self.G.state_dict(), G_path) torch.save(self.D.state_dict(), D_path) print('Saved model checkpoints into {}...'.format(self.model_save_dir)) # Decay learning rates. if (i + 1) % self.lr_update_step == 0 and (i + 1) > (self.num_iters - self.num_iters_decay): g_lr -= (self.g_lr / float(self.num_iters_decay)) d_lr -= (self.d_lr / float(self.num_iters_decay)) self.update_lr(g_lr, d_lr) print('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))
12,090
40.982639
106
py
null
CIRCLe-main/logger.py
import tensorflow as tf class Logger(object): """Tensorboard logger.""" def __init__(self, log_dir): """Initialize summary writer.""" self.writer = tf.summary.FileWriter(log_dir) def scalar_summary(self, tag, value, step): """Add scalar summary.""" summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)]) self.writer.add_summary(summary, step)
419
29
83
py
null
CIRCLe-main/dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import os import torch from PIL import Image, ImageFile from torchvision import transforms import pandas as pd import numpy as np from sklearn.model_selection import train_test_split class SkinDataset(): def __init__(self, df, root_dir, transform=None): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.df = df self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_name = os.path.join(self.root_dir, self.df.loc[self.df.index[idx], 'hasher'] + ".jpg") image = Image.open(img_name) label = self.df.loc[self.df.index[idx], 'low'] fitzpatrick = self.df.loc[self.df.index[idx], 'fitzpatrick'] - 1 if self.transform: image = self.transform(image) return image, label, fitzpatrick def get_fitz_dataloaders(root, holdout_mode, batch_size, shuffle, partial_skin_types=[], partial_ratio=1.0): all_domains = [1, 2, 3, 4, 5, 6] train_dir = root + 'fitz17k_train_' + holdout_mode + '.csv' val_dir = root + 'fitz17k_val_' + holdout_mode + '.csv' test_dir = root + 'fitz17k_test_' + holdout_mode + '.csv' val = pd.read_csv(val_dir) train = pd.read_csv(train_dir) test = pd.read_csv(test_dir) for s in all_domains: print("\ttrain: skin type", s, ":", len(train[train['fitzpatrick'] == s])) train = train.loc[train['fitzpatrick'] != -1] val = val.loc[val['fitzpatrick'] != -1] test = test.loc[test['fitzpatrick'] != -1] if len(partial_skin_types) > 0: train_1 = train.loc[~train['fitzpatrick'].isin(partial_skin_types)] train_2 = train.loc[train['fitzpatrick'].isin(partial_skin_types)] if partial_ratio > 0: try: train_2_partial, _, _, _ = train_test_split( train_2, train_2.low, train_size=partial_ratio, random_state=None, #4242 stratify=train_2.low) except: print("Unable to stratify -> skipped the stratification") train_2_partial, _, _, _ = train_test_split( train_2, train_2.low, train_size=partial_ratio, random_state=None, #4242 ) train = pd.concat([train_1, train_2_partial]) train.drop_duplicates(subset=['hasher']) train.reset_index(drop=True, inplace=True) else: train = train_1 print("After partial skin type edit:") for s in all_domains: print("\ttrain: skin type", s, ":", len(train[train['fitzpatrick'] == s])) print("train size:", len(train)) print("val size:", len(val)) print("train skin types:", train.fitzpatrick.unique()) print("val skin types:", val.fitzpatrick.unique()) label_codes = sorted(list(train['label'].unique())) print("train skin conditions:", len(label_codes)) label_codes1 = sorted(list(val['label'].unique())) print("val skin conditions:", len(label_codes1)) transformed_train = SkinDataset( df=train, root_dir=root, transform=transforms.Compose([ transforms.RandomRotation(degrees=15), transforms.RandomHorizontalFlip(), transforms.Resize(size=(128, 128)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) ) transformed_val = SkinDataset( df=val, root_dir=root, transform=transforms.Compose([ transforms.Resize(size=(128, 128)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) ) transformed_test = SkinDataset( df=test, root_dir=root, transform=transforms.Compose([ transforms.ToPILImage(), transforms.Resize(size=(128, 128)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) ) train_loader = torch.utils.data.DataLoader( transformed_train, batch_size=batch_size, drop_last=True) val_loader = torch.utils.data.DataLoader( transformed_val, batch_size=batch_size, shuffle=shuffle, drop_last=True) test_loader = torch.utils.data.DataLoader( transformed_test, batch_size=batch_size, shuffle=False, drop_last=False) return train_loader, val_loader, test_loader
5,072
32.375
108
py
null
CIRCLe-main/util.py
class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.count = 0 self.sum = 0 def update(self, val, n=1): self.count += n self.sum += val * n def float(self): return self.sum / self.count def __repr__(self): return '%.3f' % (self.sum / self.count)
356
18.833333
47
py
null
CIRCLe-main/README.md
# CIRCLe: Color Invariant Representation Learning for Unbiased Classification of Skin Lesions This repository holds the source accompanying our [ECCV ISIC Workshop 2022 paper](https://www2.cs.sfu.ca/~hamarneh/ecopy/eccv_isic2022a.pdf). [Paper](https://link.springer.com/chapter/10.1007/978-3-031-25069-9_14) | [Arxiv](https://arxiv.org/abs/2208.13528) | [DOI](https://doi.org/10.1007/978-3-031-25069-9_14) | [Video](https://www.youtube.com/watch?v=7v1YWy7biWI) | [Slides](https://workshop2022.isic-archive.com/slides_pakzad.pdf) ![model_fig](./images/model_fig.png) <p align="center"> Overview of CIRCLe. (a) The skin lesion image x with skin type z and diagnosis label y is passed through the feature extractor. The learned representation r goes through the classifier to obtain the predicted label. The classification loss enforces the correct classification objective. (b) The skin color transformer (G), transforms x with skin type z into x' with the new skin type z'. The generated image x' is fed into the feature extractor to get the representation r'. The regularization loss enforces r and r' to be similar. (c) The skin color transformer's schematic view with the possible transformed images, where one of the possible transformations is randomly chosen for generating x'. </p> # Abstract While deep learning based approaches have demonstrated expert-level performance in dermatological diagnosis tasks, they have also been shown to exhibit biases toward certain demographic attributes, particularly skin types (e.g., light versus dark), a fairness concern that must be addressed. We propose `CIRCLe`, a skin color invariant deep representation learning method for improving fairness in skin lesion classification. CIRCLe is trained to classify images by utilizing a regularization loss that encourages images with the same diagnosis but different skin types to have similar latent representations. ## Keywords Fair AI, Skin Type Bias, Dermatology, Classification, Representation Learning. # Cite If you use our code, please cite our paper: [CIRCLe: Color Invariant Representation Learning for Unbiased Classification of Skin Lesions](https://www2.cs.sfu.ca/~hamarneh/ecopy/eccv_isic2022a.pdf) The corresponding bibtex entry is: ```bibtex @inproceedings{pakzad2022circle, title = {{CIRCLe}: Color Invariant Representation Learning for Unbiased Classification of Skin Lesions}, author = {Pakzad, Arezou and Abhishek, Kumar and Hamarneh, Ghassan}, booktitle = {Proceedings of the 17th European Conference on Computer Vision (ECCV) - ISIC Skin Image Analysis Workshop}, year = {2022}, pages = {203-219}, doi = {10.1007/978-3-031-25069-9_14} } ``` <!-- # Code Code for StarGan is modified from https://github.com/yunjey/stargan --> # Requirements Install the requirements: ```python conda create -n circle-env python=3.8 conda activate circle-env pip install -r requirements.txt ``` # Data The `Fitzpatrick17K` dataset is available [here](https://github.com/mattgroh/fitzpatrick17k). # Training 1) Train StarGAN: ```python python train_stargan.py --model_save_dir ./gan-path ``` 2) Train `CIRCLe` (with or without the regularization loss): ```python python main.py --gan_path ./gan-path --use_reg_loss True #or python main.py --gan_path ./gan-path --use_reg_loss False ``` - Train `CIRCLe` with different backbones: ```python python main.py --base vgg16 python main.py --base densenet121 python main.py --base resnet18 python main.py --base resnet50 python main.py --base mobilenetv3l python main.py --base mobilenetv2 ```
3,603
45.205128
609
md
null
CIRCLe-main/train_stargan.py
import os import argparse from solver_stargan import Solver from torch.backends import cudnn from dataset import get_fitz_dataloaders def str2bool(v): return v.lower() in ('true') def main(config): # For fast training. cudnn.benchmark = True # Create directories if not exist. if not os.path.exists(config.model_save_dir): os.makedirs(config.model_save_dir) # Data loader. train_loader, _, _ = get_fitz_dataloaders(root='../data/fitz17k/images/all/', holdout_mode='random_holdout', test_envs=[], batch_size=config.batch_size, shuffle=True, num_workers=8, use_all=True) # Solver for training and testing StarGAN. solver = Solver(train_loader, config) solver.train() if __name__ == '__main__': parser = argparse.ArgumentParser() # Model configuration. parser.add_argument('--c_dim', type=int, default=6, help='dimension of domain labels (1st dataset)') parser.add_argument('--image_size', type=int, default=128, help='image resolution') parser.add_argument('--g_conv_dim', type=int, default=64, help='number of conv filters in the first layer of G') parser.add_argument('--d_conv_dim', type=int, default=64, help='number of conv filters in the first layer of D') parser.add_argument('--g_repeat_num', type=int, default=6, help='number of residual blocks in G') parser.add_argument('--d_repeat_num', type=int, default=6, help='number of strided conv layers in D') parser.add_argument('--lambda_cls', type=float, default=1, help='weight for domain classification loss') parser.add_argument('--lambda_rec', type=float, default=10, help='weight for reconstruction loss') parser.add_argument('--lambda_gp', type=float, default=10, help='weight for gradient penalty') # Training configuration. parser.add_argument('--batch_size', type=int, default=16, help='mini-batch size') parser.add_argument('--num_iters', type=int, default=1000000, help='number of total iterations for training D') parser.add_argument('--num_iters_decay', type=int, default=200000, help='number of iterations for decaying lr') parser.add_argument('--g_lr', type=float, default=0.0001, help='learning rate for G') parser.add_argument('--d_lr', type=float, default=0.0001, help='learning rate for D') parser.add_argument('--n_critic', type=int, default=5, help='number of D updates per each G update') parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for Adam optimizer') parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam optimizer') parser.add_argument('--resume_iters', type=int, default=None, help='resume training from this step') # Test configuration. parser.add_argument('--test_iters', type=int, default=20000, help='test model from this step') # Miscellaneous. parser.add_argument('--num_workers', type=int, default=1) parser.add_argument('--mode', type=str, default='train', choices=['train', 'test']) parser.add_argument('--use_tensorboard', type=str2bool, default=False) # Directories. parser.add_argument('--data_dir', type=str, default='../data/fitz17k/images/all/') parser.add_argument('--dataset', type=str, default='FitzPatrick17k') parser.add_argument('--log_dir', type=str, default='stargan/logs') parser.add_argument('--model_save_dir', type=str, default='saved/stargan_new2') # Step size. parser.add_argument('--log_step', type=int, default=10) parser.add_argument('--sample_step', type=int, default=1000) parser.add_argument('--model_save_step', type=int, default=10000) parser.add_argument('--lr_update_step', type=int, default=1000) config = parser.parse_args() print(config) main(config)
3,841
47.025
116
py
null
CIRCLe-main/models/base.py
import torchvision.models as models from torch import nn import torch.nn.functional as F class BaseModel(nn.Module): def __init__(self, hidden_dim=256, base='resnet50'): super(BaseModel, self).__init__() if base == 'alexnet': self.base = models.alexnet(pretrained=True) self.base.classifier[6] = nn.Linear(self.base.classifier[6].in_features, hidden_dim) elif base == 'resnet50': self.base = models.resnet50(pretrained=True) self.base.fc = nn.Linear(self.base.fc.in_features, hidden_dim) elif base == 'resnet18': self.base = models.resnet18(pretrained=True) self.base.fc = nn.Linear(self.base.fc.in_features, hidden_dim) elif base == 'vgg16': self.base = models.vgg16(pretrained=True) self.base.classifier[6] = nn.Linear(self.base.classifier[6].in_features, hidden_dim) elif base == 'densenet121': self.base = models.densenet121(pretrained=True) self.base.classifier = nn.Linear(in_features=self.base.classifier.in_features, out_features=hidden_dim) elif base == 'mobilenetv2': self.base = models.mobilenet_v2(pretrained=True) self.base.classifier[1] = nn.Linear(in_features=self.base.classifier[1].in_features, out_features=hidden_dim) elif base == 'mobilenetv3l': self.base = models.mobilenet_v3_large(pretrained=True) self.base.classifier[3] = nn.Linear(in_features=self.base.classifier[3].in_features, out_features=hidden_dim)
1,569
51.333333
121
py
null
CIRCLe-main/models/circle.py
import torch from torch import nn import torch.nn.functional as F from models.base import BaseModel from models.stargan import load_stargan class Model(BaseModel): def __init__(self, config, hidden_dim=256, base='vgg16', use_reg=True): super(Model, self).__init__(hidden_dim, base) self.out_layer = nn.Linear(hidden_dim, config.num_classes) self.trans = load_stargan( config.gan_path + 'stargan_last_G.ckpt') self.trans.eval() self.alpha = config.alpha self.use_reg = use_reg def forward(self, x, y, d=None): z = F.relu(self.base(x)) logits = self.out_layer(z) loss = F.cross_entropy(logits, y) correct = (torch.argmax(logits, 1) == y).sum().float() / x.shape[0] reg = loss.new_zeros([1]) if self.training: if self.use_reg: with torch.no_grad(): d_new = torch.randint(0, 6, (d.size(0), )).to(d.device) d_onehot = d.new_zeros([d.shape[0], 6]) d_onehot.scatter_(1, d[:, None], 1) d_new_onehot = d.new_zeros([d.shape[0], 6]) d_new_onehot.scatter_(1, d_new[:, None], 1) x_new = self.trans(x, d_onehot, d_new_onehot) z_new = F.relu(self.base(x_new)) reg = self.alpha * F.mse_loss(z_new, z) return loss, reg, correct
1,422
34.575
75
py
null
CIRCLe-main/models/stargan.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np class ResidualBlock(nn.Module): """Residual Block with instance normalization.""" def __init__(self, dim_in, dim_out): super(ResidualBlock, self).__init__() self.main = nn.Sequential( nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False), nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True), nn.ReLU(inplace=True), nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False), nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True)) def forward(self, x): return x + self.main(x) class Generator(nn.Module): """Generator network.""" def __init__(self, conv_dim=64, c_dim=6, repeat_num=6, img_channels=3): super(Generator, self).__init__() layers = [] layers.append(nn.Conv2d(img_channels + 2 * c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)) layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True)) layers.append(nn.ReLU(inplace=True)) # Down-sampling layers. curr_dim = conv_dim for i in range(2): layers.append(nn.Conv2d(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1, bias=False)) layers.append(nn.InstanceNorm2d(curr_dim * 2, affine=True, track_running_stats=True)) layers.append(nn.ReLU(inplace=True)) curr_dim = curr_dim * 2 # Bottleneck layers. for i in range(repeat_num): layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim)) # Up-sampling layers. for i in range(2): layers.append(nn.ConvTranspose2d(curr_dim, curr_dim // 2, kernel_size=4, stride=2, padding=1, bias=False)) layers.append(nn.InstanceNorm2d(curr_dim // 2, affine=True, track_running_stats=True)) layers.append(nn.ReLU(inplace=True)) curr_dim = curr_dim // 2 layers.append(nn.Conv2d(curr_dim, img_channels, kernel_size=7, stride=1, padding=3, bias=False)) layers.append(nn.Tanh()) self.main = nn.Sequential(*layers) def forward(self, x, c_org, c_trg): # Replicate spatially and concatenate domain information. # Note that this type of label conditioning does not work at all if we use reflection padding in Conv2d. # This is because instance normalization ignores the shifting (or bias) effect. c_org = c_org.view(c_org.size(0), c_org.size(1), 1, 1) c_org = c_org.repeat(1, 1, x.size(2), x.size(3)) c_trg = c_trg.view(c_trg.size(0), c_trg.size(1), 1, 1) c_trg = c_trg.repeat(1, 1, x.size(2), x.size(3)) x = torch.cat([x, c_org, c_trg], dim=1) return self.main(x) class Discriminator(nn.Module): """Discriminator network with PatchGAN.""" def __init__(self, image_size=128, conv_dim=64, c_dim=6, repeat_num=6, img_channels=3): super(Discriminator, self).__init__() layers = [] layers.append(nn.Conv2d(img_channels, conv_dim, kernel_size=4, stride=2, padding=1)) layers.append(nn.LeakyReLU(0.01)) curr_dim = conv_dim for i in range(1, repeat_num): layers.append(nn.Conv2d(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1)) layers.append(nn.LeakyReLU(0.01)) curr_dim = curr_dim * 2 kernel_size = int(image_size / np.power(2, repeat_num)) self.main = nn.Sequential(*layers) self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False) self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False) def forward(self, x): h = self.main(x) out_src = self.conv1(h) out_cls = self.conv2(h) return out_src, out_cls.view(out_cls.size(0), out_cls.size(1)) def load_stargan(ckpt='saved/stargan.pt'): g = Generator(64, 6, 6) g.load_state_dict(torch.load(ckpt)) return g
4,086
40.282828
118
py