Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
spaCy
spaCy-master/spacy/pipeline/spancat.py
from dataclasses import dataclass from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, cast import numpy from thinc.api import Config, Model, Ops, Optimizer, get_current_ops, set_dropout_rate from thinc.types import Floats2d, Ints1d, Ints2d, Ragged from ..compat import Protocol, runtime_checkable from ..errors import Errors from ..language import Language from ..scorer import Scorer from ..tokens import Doc, Span, SpanGroup from ..training import Example, validate_examples from ..util import registry from ..vocab import Vocab from .trainable_pipe import TrainablePipe spancat_default_config = """ [model] @architectures = "spacy.SpanCategorizer.v1" scorer = {"@layers": "spacy.LinearLogistic.v1"} [model.reducer] @layers = spacy.mean_max_reducer.v1 hidden_size = 128 [model.tok2vec] @architectures = "spacy.Tok2Vec.v2" [model.tok2vec.embed] @architectures = "spacy.MultiHashEmbed.v2" width = 96 rows = [5000, 1000, 2500, 1000] attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"] include_static_vectors = false [model.tok2vec.encode] @architectures = "spacy.MaxoutWindowEncoder.v2" width = ${model.tok2vec.embed.width} window_size = 1 maxout_pieces = 3 depth = 4 """ spancat_singlelabel_default_config = """ [model] @architectures = "spacy.SpanCategorizer.v1" scorer = {"@layers": "Softmax.v2"} [model.reducer] @layers = spacy.mean_max_reducer.v1 hidden_size = 128 [model.tok2vec] @architectures = "spacy.Tok2Vec.v2" [model.tok2vec.embed] @architectures = "spacy.MultiHashEmbed.v1" width = 96 rows = [5000, 1000, 2500, 1000] attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"] include_static_vectors = false [model.tok2vec.encode] @architectures = "spacy.MaxoutWindowEncoder.v2" width = ${model.tok2vec.embed.width} window_size = 1 maxout_pieces = 3 depth = 4 """ DEFAULT_SPANS_KEY = "sc" DEFAULT_SPANCAT_MODEL = Config().from_str(spancat_default_config)["model"] DEFAULT_SPANCAT_SINGLELABEL_MODEL = Config().from_str( spancat_singlelabel_default_config )["model"] @runtime_checkable class Suggester(Protocol): def __call__(self, docs: Iterable[Doc], *, ops: Optional[Ops] = None) -> Ragged: ... def ngram_suggester( docs: Iterable[Doc], sizes: List[int], *, ops: Optional[Ops] = None ) -> Ragged: if ops is None: ops = get_current_ops() spans = [] lengths = [] for doc in docs: starts = ops.xp.arange(len(doc), dtype="i") starts = starts.reshape((-1, 1)) length = 0 for size in sizes: if size <= len(doc): starts_size = starts[: len(doc) - (size - 1)] spans.append(ops.xp.hstack((starts_size, starts_size + size))) length += spans[-1].shape[0] if spans: assert spans[-1].ndim == 2, spans[-1].shape lengths.append(length) lengths_array = ops.asarray1i(lengths) if len(spans) > 0: output = Ragged(ops.xp.vstack(spans), lengths_array) else: output = Ragged(ops.xp.zeros((0, 0), dtype="i"), lengths_array) assert output.dataXd.ndim == 2 return output def preset_spans_suggester( docs: Iterable[Doc], spans_key: str, *, ops: Optional[Ops] = None ) -> Ragged: if ops is None: ops = get_current_ops() spans = [] lengths = [] for doc in docs: length = 0 if doc.spans[spans_key]: for span in doc.spans[spans_key]: spans.append([span.start, span.end]) length += 1 lengths.append(length) lengths_array = cast(Ints1d, ops.asarray(lengths, dtype="i")) if len(spans) > 0: output = Ragged(ops.asarray(spans, dtype="i"), lengths_array) else: output = Ragged(ops.xp.zeros((0, 0), dtype="i"), lengths_array) return output @registry.misc("spacy.ngram_suggester.v1") def build_ngram_suggester(sizes: List[int]) -> Suggester: """Suggest all spans of the given lengths. Spans are returned as a ragged array of integers. The array has two columns, indicating the start and end position.""" return partial(ngram_suggester, sizes=sizes) @registry.misc("spacy.ngram_range_suggester.v1") def build_ngram_range_suggester(min_size: int, max_size: int) -> Suggester: """Suggest all spans of the given lengths between a given min and max value - both inclusive. Spans are returned as a ragged array of integers. The array has two columns, indicating the start and end position.""" sizes = list(range(min_size, max_size + 1)) return build_ngram_suggester(sizes) @registry.misc("spacy.preset_spans_suggester.v1") def build_preset_spans_suggester(spans_key: str) -> Suggester: """Suggest all spans that are already stored in doc.spans[spans_key]. This is useful when an upstream component is used to set the spans on the Doc such as a SpanRuler or SpanFinder.""" return partial(preset_spans_suggester, spans_key=spans_key) @Language.factory( "spancat", assigns=["doc.spans"], default_config={ "threshold": 0.5, "spans_key": DEFAULT_SPANS_KEY, "max_positive": None, "model": DEFAULT_SPANCAT_MODEL, "suggester": {"@misc": "spacy.ngram_suggester.v1", "sizes": [1, 2, 3]}, "scorer": {"@scorers": "spacy.spancat_scorer.v1"}, }, default_score_weights={"spans_sc_f": 1.0, "spans_sc_p": 0.0, "spans_sc_r": 0.0}, ) def make_spancat( nlp: Language, name: str, suggester: Suggester, model: Model[Tuple[List[Doc], Ragged], Floats2d], spans_key: str, scorer: Optional[Callable], threshold: float, max_positive: Optional[int], ) -> "SpanCategorizer": """Create a SpanCategorizer component and configure it for multi-label classification to be able to assign multiple labels for each span. The span categorizer consists of two parts: a suggester function that proposes candidate spans, and a labeller model that predicts one or more labels for each span. name (str): The component instance name, used to add entries to the losses during training. suggester (Callable[[Iterable[Doc], Optional[Ops]], Ragged]): A function that suggests spans. Spans are returned as a ragged array with two integer columns, for the start and end positions. model (Model[Tuple[List[Doc], Ragged], Floats2d]): A model instance that is given a list of documents and (start, end) indices representing candidate span offsets. The model predicts a probability for each category for each span. spans_key (str): Key of the doc.spans dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_spans for the Doc.spans[spans_key] with overlapping spans allowed. threshold (float): Minimum probability to consider a prediction positive. Spans with a positive prediction will be saved on the Doc. Defaults to 0.5. max_positive (Optional[int]): Maximum number of labels to consider positive per span. Defaults to None, indicating no limit. """ return SpanCategorizer( nlp.vocab, model=model, suggester=suggester, name=name, spans_key=spans_key, negative_weight=None, allow_overlap=True, max_positive=max_positive, threshold=threshold, scorer=scorer, add_negative_label=False, ) @Language.factory( "spancat_singlelabel", assigns=["doc.spans"], default_config={ "spans_key": DEFAULT_SPANS_KEY, "model": DEFAULT_SPANCAT_SINGLELABEL_MODEL, "negative_weight": 1.0, "suggester": {"@misc": "spacy.ngram_suggester.v1", "sizes": [1, 2, 3]}, "scorer": {"@scorers": "spacy.spancat_scorer.v1"}, "allow_overlap": True, }, default_score_weights={"spans_sc_f": 1.0, "spans_sc_p": 0.0, "spans_sc_r": 0.0}, ) def make_spancat_singlelabel( nlp: Language, name: str, suggester: Suggester, model: Model[Tuple[List[Doc], Ragged], Floats2d], spans_key: str, negative_weight: float, allow_overlap: bool, scorer: Optional[Callable], ) -> "SpanCategorizer": """Create a SpanCategorizer component and configure it for multi-class classification. With this configuration each span can get at most one label. The span categorizer consists of two parts: a suggester function that proposes candidate spans, and a labeller model that predicts one or more labels for each span. name (str): The component instance name, used to add entries to the losses during training. suggester (Callable[[Iterable[Doc], Optional[Ops]], Ragged]): A function that suggests spans. Spans are returned as a ragged array with two integer columns, for the start and end positions. model (Model[Tuple[List[Doc], Ragged], Floats2d]): A model instance that is given a list of documents and (start, end) indices representing candidate span offsets. The model predicts a probability for each category for each span. spans_key (str): Key of the doc.spans dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_spans for the Doc.spans[spans_key] with overlapping spans allowed. negative_weight (float): Multiplier for the loss terms. Can be used to downweight the negative samples if there are too many. allow_overlap (bool): If True the data is assumed to contain overlapping spans. Otherwise it produces non-overlapping spans greedily prioritizing higher assigned label scores. """ return SpanCategorizer( nlp.vocab, model=model, suggester=suggester, name=name, spans_key=spans_key, negative_weight=negative_weight, allow_overlap=allow_overlap, max_positive=1, add_negative_label=True, threshold=None, scorer=scorer, ) def spancat_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]: kwargs = dict(kwargs) attr_prefix = "spans_" key = kwargs["spans_key"] kwargs.setdefault("attr", f"{attr_prefix}{key}") kwargs.setdefault("allow_overlap", True) kwargs.setdefault( "getter", lambda doc, key: doc.spans.get(key[len(attr_prefix) :], []) ) kwargs.setdefault("has_annotation", lambda doc: key in doc.spans) return Scorer.score_spans(examples, **kwargs) @registry.scorers("spacy.spancat_scorer.v1") def make_spancat_scorer(): return spancat_score @dataclass class _Intervals: """ Helper class to avoid storing overlapping spans. """ def __init__(self): self.ranges = set() def add(self, i, j): for e in range(i, j): self.ranges.add(e) def __contains__(self, rang): i, j = rang for e in range(i, j): if e in self.ranges: return True return False class SpanCategorizer(TrainablePipe): """Pipeline component to label spans of text. DOCS: https://spacy.io/api/spancategorizer """ def __init__( self, vocab: Vocab, model: Model[Tuple[List[Doc], Ragged], Floats2d], suggester: Suggester, name: str = "spancat", *, add_negative_label: bool = False, spans_key: str = "spans", negative_weight: Optional[float] = 1.0, allow_overlap: Optional[bool] = True, max_positive: Optional[int] = None, threshold: Optional[float] = 0.5, scorer: Optional[Callable] = spancat_score, ) -> None: """Initialize the multi-label or multi-class span categorizer. vocab (Vocab): The shared vocabulary. model (thinc.api.Model): The Thinc Model powering the pipeline component. For multi-class classification (single label per span) we recommend using a Softmax classifier as a the final layer, while for multi-label classification (multiple possible labels per span) we recommend Logistic. suggester (Callable[[Iterable[Doc], Optional[Ops]], Ragged]): A function that suggests spans. Spans are returned as a ragged array with two integer columns, for the start and end positions. name (str): The component instance name, used to add entries to the losses during training. spans_key (str): Key of the Doc.spans dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. Defaults to `"spans"`. add_negative_label (bool): Learn to predict a special 'negative_label' when a Span is not annotated. threshold (Optional[float]): Minimum probability to consider a prediction positive. Defaults to 0.5. Spans with a positive prediction will be saved on the Doc. max_positive (Optional[int]): Maximum number of labels to consider positive per span. Defaults to None, indicating no limit. negative_weight (float): Multiplier for the loss terms. Can be used to downweight the negative samples if there are too many when add_negative_label is True. Otherwise its unused. allow_overlap (bool): If True the data is assumed to contain overlapping spans. Otherwise it produces non-overlapping spans greedily prioritizing higher assigned label scores. Only used when max_positive is 1. scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_spans for the Doc.spans[spans_key] with overlapping spans allowed. DOCS: https://spacy.io/api/spancategorizer#init """ self.cfg = { "labels": [], "spans_key": spans_key, "threshold": threshold, "max_positive": max_positive, "negative_weight": negative_weight, "allow_overlap": allow_overlap, } self.vocab = vocab self.suggester = suggester self.model = model self.name = name self.scorer = scorer self.add_negative_label = add_negative_label if not allow_overlap and max_positive is not None and max_positive > 1: raise ValueError(Errors.E1051.format(max_positive=max_positive)) @property def key(self) -> str: """Key of the doc.spans dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. """ return str(self.cfg["spans_key"]) def _allow_extra_label(self) -> None: """Raise an error if the component can not add any more labels.""" nO = None if self.model.has_dim("nO"): nO = self.model.get_dim("nO") elif self.model.has_ref("output_layer") and self.model.get_ref( "output_layer" ).has_dim("nO"): nO = self.model.get_ref("output_layer").get_dim("nO") if nO is not None and nO == self._n_labels: if not self.is_resizable: raise ValueError( Errors.E922.format(name=self.name, nO=self.model.get_dim("nO")) ) def add_label(self, label: str) -> int: """Add a new label to the pipe. label (str): The label to add. RETURNS (int): 0 if label is already present, otherwise 1. DOCS: https://spacy.io/api/spancategorizer#add_label """ if not isinstance(label, str): raise ValueError(Errors.E187) if label in self.labels: return 0 self._allow_extra_label() self.cfg["labels"].append(label) # type: ignore self.vocab.strings.add(label) return 1 @property def labels(self) -> Tuple[str]: """RETURNS (Tuple[str]): The labels currently added to the component. DOCS: https://spacy.io/api/spancategorizer#labels """ return tuple(self.cfg["labels"]) # type: ignore @property def label_data(self) -> List[str]: """RETURNS (List[str]): Information about the component's labels. DOCS: https://spacy.io/api/spancategorizer#label_data """ return list(self.labels) @property def _label_map(self) -> Dict[str, int]: """RETURNS (Dict[str, int]): The label map.""" return {label: i for i, label in enumerate(self.labels)} @property def _n_labels(self) -> int: """RETURNS (int): Number of labels.""" if self.add_negative_label: return len(self.labels) + 1 else: return len(self.labels) @property def _negative_label_i(self) -> Union[int, None]: """RETURNS (Union[int, None]): Index of the negative label.""" if self.add_negative_label: return len(self.label_data) else: return None def predict(self, docs: Iterable[Doc]): """Apply the pipeline's model to a batch of docs, without modifying them. docs (Iterable[Doc]): The documents to predict. RETURNS: The models prediction for each document. DOCS: https://spacy.io/api/spancategorizer#predict """ indices = self.suggester(docs, ops=self.model.ops) if indices.lengths.sum() == 0: scores = self.model.ops.alloc2f(0, 0) else: scores = self.model.predict((docs, indices)) # type: ignore return indices, scores def set_candidates( self, docs: Iterable[Doc], *, candidates_key: str = "candidates" ) -> None: """Use the spancat suggester to add a list of span candidates to a list of docs. This method is intended to be used for debugging purposes. docs (Iterable[Doc]): The documents to modify. candidates_key (str): Key of the Doc.spans dict to save the candidate spans under. DOCS: https://spacy.io/api/spancategorizer#set_candidates """ suggester_output = self.suggester(docs, ops=self.model.ops) for candidates, doc in zip(suggester_output, docs): # type: ignore doc.spans[candidates_key] = [] for index in candidates.dataXd: doc.spans[candidates_key].append(doc[index[0] : index[1]]) def set_annotations(self, docs: Iterable[Doc], indices_scores) -> None: """Modify a batch of Doc objects, using pre-computed scores. docs (Iterable[Doc]): The documents to modify. scores: The scores to set, produced by SpanCategorizer.predict. DOCS: https://spacy.io/api/spancategorizer#set_annotations """ indices, scores = indices_scores offset = 0 for i, doc in enumerate(docs): indices_i = indices[i].dataXd allow_overlap = cast(bool, self.cfg["allow_overlap"]) if self.cfg["max_positive"] == 1: doc.spans[self.key] = self._make_span_group_singlelabel( doc, indices_i, scores[offset : offset + indices.lengths[i]], allow_overlap, ) else: doc.spans[self.key] = self._make_span_group_multilabel( doc, indices_i, scores[offset : offset + indices.lengths[i]], ) offset += indices.lengths[i] def update( self, examples: Iterable[Example], *, drop: float = 0.0, sgd: Optional[Optimizer] = None, losses: Optional[Dict[str, float]] = None, ) -> Dict[str, float]: """Learn from a batch of documents and gold-standard information, updating the pipe's model. Delegates to predict and get_loss. examples (Iterable[Example]): A batch of Example objects. drop (float): The dropout rate. sgd (thinc.api.Optimizer): The optimizer. losses (Dict[str, float]): Optional record of the loss during training. Updated using the component name as the key. RETURNS (Dict[str, float]): The updated losses dictionary. DOCS: https://spacy.io/api/spancategorizer#update """ if losses is None: losses = {} losses.setdefault(self.name, 0.0) validate_examples(examples, "SpanCategorizer.update") self._validate_categories(examples) if not any(len(eg.predicted) if eg.predicted else 0 for eg in examples): # Handle cases where there are no tokens in any docs. return losses docs = [eg.predicted for eg in examples] spans = self.suggester(docs, ops=self.model.ops) if spans.lengths.sum() == 0: return losses set_dropout_rate(self.model, drop) scores, backprop_scores = self.model.begin_update((docs, spans)) loss, d_scores = self.get_loss(examples, (spans, scores)) backprop_scores(d_scores) # type: ignore if sgd is not None: self.finish_update(sgd) losses[self.name] += loss return losses def get_loss( self, examples: Iterable[Example], spans_scores: Tuple[Ragged, Floats2d] ) -> Tuple[float, float]: """Find the loss and gradient of loss for the batch of documents and their predicted scores. examples (Iterable[Examples]): The batch of examples. spans_scores: Scores representing the model's predictions. RETURNS (Tuple[float, float]): The loss and the gradient. DOCS: https://spacy.io/api/spancategorizer#get_loss """ spans, scores = spans_scores spans = Ragged( self.model.ops.to_numpy(spans.data), self.model.ops.to_numpy(spans.lengths) ) target = numpy.zeros(scores.shape, dtype=scores.dtype) if self.add_negative_label: negative_spans = numpy.ones((scores.shape[0])) offset = 0 label_map = self._label_map for i, eg in enumerate(examples): # Map (start, end) offset of spans to the row in the d_scores array, # so that we can adjust the gradient for predictions that were # in the gold standard. spans_index = {} spans_i = spans[i].dataXd for j in range(spans.lengths[i]): start = int(spans_i[j, 0]) # type: ignore end = int(spans_i[j, 1]) # type: ignore spans_index[(start, end)] = offset + j for gold_span in self._get_aligned_spans(eg): key = (gold_span.start, gold_span.end) if key in spans_index: row = spans_index[key] k = label_map[gold_span.label_] target[row, k] = 1.0 if self.add_negative_label: # delete negative label target. negative_spans[row] = 0.0 # The target is a flat array for all docs. Track the position # we're at within the flat array. offset += spans.lengths[i] target = self.model.ops.asarray(target, dtype="f") # type: ignore if self.add_negative_label: negative_samples = numpy.nonzero(negative_spans)[0] target[negative_samples, self._negative_label_i] = 1.0 # type: ignore # The target will have the values 0 (for untrue predictions) or 1 # (for true predictions). # The scores should be in the range [0, 1]. # If the prediction is 0.9 and it's true, the gradient # will be -0.1 (0.9 - 1.0). # If the prediction is 0.9 and it's false, the gradient will be # 0.9 (0.9 - 0.0) d_scores = scores - target if self.add_negative_label: neg_weight = cast(float, self.cfg["negative_weight"]) if neg_weight != 1.0: d_scores[negative_samples] *= neg_weight loss = float((d_scores**2).sum()) return loss, d_scores def initialize( self, get_examples: Callable[[], Iterable[Example]], *, nlp: Optional[Language] = None, labels: Optional[List[str]] = None, ) -> None: """Initialize the pipe for training, using a representative set of data examples. get_examples (Callable[[], Iterable[Example]]): Function that returns a representative sample of gold-standard Example objects. nlp (Optional[Language]): The current nlp object the component is part of. labels (Optional[List[str]]): The labels to add to the component, typically generated by the `init labels` command. If no labels are provided, the get_examples callback is used to extract the labels from the data. DOCS: https://spacy.io/api/spancategorizer#initialize """ subbatch: List[Example] = [] if labels is not None: for label in labels: self.add_label(label) for eg in get_examples(): if labels is None: for span in eg.reference.spans.get(self.key, []): self.add_label(span.label_) if len(subbatch) < 10: subbatch.append(eg) self._require_labels() if subbatch: docs = [eg.x for eg in subbatch] spans = build_ngram_suggester(sizes=[1])(docs) Y = self.model.ops.alloc2f(spans.dataXd.shape[0], self._n_labels) self.model.initialize(X=(docs, spans), Y=Y) else: self.model.initialize() def _validate_categories(self, examples: Iterable[Example]): # TODO pass def _get_aligned_spans(self, eg: Example): return eg.get_aligned_spans_y2x( eg.reference.spans.get(self.key, []), allow_overlap=True ) def _make_span_group_multilabel( self, doc: Doc, indices: Ints2d, scores: Floats2d, ) -> SpanGroup: """Find the top-k labels for each span (k=max_positive).""" spans = SpanGroup(doc, name=self.key) if scores.size == 0: return spans scores = self.model.ops.to_numpy(scores) indices = self.model.ops.to_numpy(indices) threshold = self.cfg["threshold"] max_positive = self.cfg["max_positive"] keeps = scores >= threshold if max_positive is not None: assert isinstance(max_positive, int) if self.add_negative_label: negative_scores = numpy.copy(scores[:, self._negative_label_i]) scores[:, self._negative_label_i] = -numpy.inf ranked = (scores * -1).argsort() # type: ignore scores[:, self._negative_label_i] = negative_scores else: ranked = (scores * -1).argsort() # type: ignore span_filter = ranked[:, max_positive:] for i, row in enumerate(span_filter): keeps[i, row] = False attrs_scores = [] for i in range(indices.shape[0]): start = indices[i, 0] end = indices[i, 1] for j, keep in enumerate(keeps[i]): if keep: if j != self._negative_label_i: spans.append(Span(doc, start, end, label=self.labels[j])) attrs_scores.append(scores[i, j]) spans.attrs["scores"] = numpy.array(attrs_scores) return spans def _make_span_group_singlelabel( self, doc: Doc, indices: Ints2d, scores: Floats2d, allow_overlap: bool = True, ) -> SpanGroup: """Find the argmax label for each span.""" # Handle cases when there are zero suggestions if scores.size == 0: return SpanGroup(doc, name=self.key) scores = self.model.ops.to_numpy(scores) indices = self.model.ops.to_numpy(indices) predicted = scores.argmax(axis=1) argmax_scores = numpy.take_along_axis( scores, numpy.expand_dims(predicted, 1), axis=1 ) keeps = numpy.ones(predicted.shape, dtype=bool) # Remove samples where the negative label is the argmax. if self.add_negative_label: keeps = numpy.logical_and(keeps, predicted != self._negative_label_i) # Filter samples according to threshold. threshold = self.cfg["threshold"] if threshold is not None: keeps = numpy.logical_and(keeps, (argmax_scores >= threshold).squeeze()) # Sort spans according to argmax probability if not allow_overlap: # Get the probabilities sort_idx = (argmax_scores.squeeze() * -1).argsort() argmax_scores = argmax_scores[sort_idx] predicted = predicted[sort_idx] indices = indices[sort_idx] keeps = keeps[sort_idx] seen = _Intervals() spans = SpanGroup(doc, name=self.key) attrs_scores = [] for i in range(indices.shape[0]): if not keeps[i]: continue label = predicted[i] start = indices[i, 0] end = indices[i, 1] if not allow_overlap: if (start, end) in seen: continue else: seen.add(start, end) attrs_scores.append(argmax_scores[i]) spans.append(Span(doc, start, end, label=self.labels[label])) spans.attrs["scores"] = numpy.array(attrs_scores) return spans
30,032
37.112944
101
py
spaCy
spaCy-master/spacy/pipeline/textcat.py
from itertools import islice from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy from thinc.api import Config, Model, Optimizer, get_array_module, set_dropout_rate from thinc.types import Floats2d from ..errors import Errors from ..language import Language from ..scorer import Scorer from ..tokens import Doc from ..training import Example, validate_examples, validate_get_examples from ..util import registry from ..vocab import Vocab from .trainable_pipe import TrainablePipe single_label_default_config = """ [model] @architectures = "spacy.TextCatEnsemble.v2" [model.tok2vec] @architectures = "spacy.Tok2Vec.v2" [model.tok2vec.embed] @architectures = "spacy.MultiHashEmbed.v2" width = 64 rows = [2000, 2000, 500, 1000, 500] attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"] include_static_vectors = false [model.tok2vec.encode] @architectures = "spacy.MaxoutWindowEncoder.v2" width = ${model.tok2vec.embed.width} window_size = 1 maxout_pieces = 3 depth = 2 [model.linear_model] @architectures = "spacy.TextCatBOW.v2" exclusive_classes = true ngram_size = 1 no_output_layer = false """ DEFAULT_SINGLE_TEXTCAT_MODEL = Config().from_str(single_label_default_config)["model"] single_label_bow_config = """ [model] @architectures = "spacy.TextCatBOW.v2" exclusive_classes = true ngram_size = 1 no_output_layer = false """ single_label_cnn_config = """ [model] @architectures = "spacy.TextCatCNN.v2" exclusive_classes = true [model.tok2vec] @architectures = "spacy.HashEmbedCNN.v2" pretrained_vectors = null width = 96 depth = 4 embed_size = 2000 window_size = 1 maxout_pieces = 3 subword_features = true """ @Language.factory( "textcat", assigns=["doc.cats"], default_config={ "threshold": 0.0, "model": DEFAULT_SINGLE_TEXTCAT_MODEL, "scorer": {"@scorers": "spacy.textcat_scorer.v2"}, }, default_score_weights={ "cats_score": 1.0, "cats_score_desc": None, "cats_micro_p": None, "cats_micro_r": None, "cats_micro_f": None, "cats_macro_p": None, "cats_macro_r": None, "cats_macro_f": None, "cats_macro_auc": None, "cats_f_per_type": None, }, ) def make_textcat( nlp: Language, name: str, model: Model[List[Doc], List[Floats2d]], threshold: float, scorer: Optional[Callable], ) -> "TextCategorizer": """Create a TextCategorizer component. The text categorizer predicts categories over a whole document. It can learn one or more labels, and the labels are considered to be mutually exclusive (i.e. one true label per doc). model (Model[List[Doc], List[Floats2d]]): A model instance that predicts scores for each category. threshold (float): Cutoff to consider a prediction "positive". scorer (Optional[Callable]): The scoring method. """ return TextCategorizer(nlp.vocab, model, name, threshold=threshold, scorer=scorer) def textcat_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]: return Scorer.score_cats( examples, "cats", multi_label=False, **kwargs, ) @registry.scorers("spacy.textcat_scorer.v2") def make_textcat_scorer(): return textcat_score class TextCategorizer(TrainablePipe): """Pipeline component for single-label text classification. DOCS: https://spacy.io/api/textcategorizer """ def __init__( self, vocab: Vocab, model: Model, name: str = "textcat", *, threshold: float, scorer: Optional[Callable] = textcat_score, ) -> None: """Initialize a text categorizer for single-label classification. vocab (Vocab): The shared vocabulary. model (thinc.api.Model): The Thinc Model powering the pipeline component. name (str): The component instance name, used to add entries to the losses during training. threshold (float): Unused, not needed for single-label (exclusive classes) classification. scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_cats for the attribute "cats". DOCS: https://spacy.io/api/textcategorizer#init """ self.vocab = vocab self.model = model self.name = name self._rehearsal_model = None cfg: Dict[str, Any] = { "labels": [], "threshold": threshold, "positive_label": None, } self.cfg = dict(cfg) self.scorer = scorer @property def support_missing_values(self): # There are no missing values as the textcat should always # predict exactly one label. All other labels are 0.0 # Subclasses may override this property to change internal behaviour. return False @property def labels(self) -> Tuple[str]: """RETURNS (Tuple[str]): The labels currently added to the component. DOCS: https://spacy.io/api/textcategorizer#labels """ return tuple(self.cfg["labels"]) # type: ignore[arg-type, return-value] @property def label_data(self) -> List[str]: """RETURNS (List[str]): Information about the component's labels. DOCS: https://spacy.io/api/textcategorizer#label_data """ return self.labels # type: ignore[return-value] def predict(self, docs: Iterable[Doc]): """Apply the pipeline's model to a batch of docs, without modifying them. docs (Iterable[Doc]): The documents to predict. RETURNS: The models prediction for each document. DOCS: https://spacy.io/api/textcategorizer#predict """ if not any(len(doc) for doc in docs): # Handle cases where there are no tokens in any docs. tensors = [doc.tensor for doc in docs] xp = self.model.ops.xp scores = xp.zeros((len(list(docs)), len(self.labels))) return scores scores = self.model.predict(docs) scores = self.model.ops.asarray(scores) return scores def set_annotations(self, docs: Iterable[Doc], scores) -> None: """Modify a batch of Doc objects, using pre-computed scores. docs (Iterable[Doc]): The documents to modify. scores: The scores to set, produced by TextCategorizer.predict. DOCS: https://spacy.io/api/textcategorizer#set_annotations """ for i, doc in enumerate(docs): for j, label in enumerate(self.labels): doc.cats[label] = float(scores[i, j]) def update( self, examples: Iterable[Example], *, drop: float = 0.0, sgd: Optional[Optimizer] = None, losses: Optional[Dict[str, float]] = None, ) -> Dict[str, float]: """Learn from a batch of documents and gold-standard information, updating the pipe's model. Delegates to predict and get_loss. examples (Iterable[Example]): A batch of Example objects. drop (float): The dropout rate. sgd (thinc.api.Optimizer): The optimizer. losses (Dict[str, float]): Optional record of the loss during training. Updated using the component name as the key. RETURNS (Dict[str, float]): The updated losses dictionary. DOCS: https://spacy.io/api/textcategorizer#update """ if losses is None: losses = {} losses.setdefault(self.name, 0.0) validate_examples(examples, "TextCategorizer.update") self._validate_categories(examples) if not any(len(eg.predicted) if eg.predicted else 0 for eg in examples): # Handle cases where there are no tokens in any docs. return losses set_dropout_rate(self.model, drop) scores, bp_scores = self.model.begin_update([eg.predicted for eg in examples]) loss, d_scores = self.get_loss(examples, scores) bp_scores(d_scores) if sgd is not None: self.finish_update(sgd) losses[self.name] += loss return losses def rehearse( self, examples: Iterable[Example], *, drop: float = 0.0, sgd: Optional[Optimizer] = None, losses: Optional[Dict[str, float]] = None, ) -> Dict[str, float]: """Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the current model to make predictions similar to an initial model, to try to address the "catastrophic forgetting" problem. This feature is experimental. examples (Iterable[Example]): A batch of Example objects. drop (float): The dropout rate. sgd (thinc.api.Optimizer): The optimizer. losses (Dict[str, float]): Optional record of the loss during training. Updated using the component name as the key. RETURNS (Dict[str, float]): The updated losses dictionary. DOCS: https://spacy.io/api/textcategorizer#rehearse """ if losses is None: losses = {} losses.setdefault(self.name, 0.0) if self._rehearsal_model is None: return losses validate_examples(examples, "TextCategorizer.rehearse") self._validate_categories(examples) docs = [eg.predicted for eg in examples] if not any(len(doc) for doc in docs): # Handle cases where there are no tokens in any docs. return losses set_dropout_rate(self.model, drop) scores, bp_scores = self.model.begin_update(docs) target, _ = self._rehearsal_model.begin_update(docs) gradient = scores - target bp_scores(gradient) if sgd is not None: self.finish_update(sgd) losses[self.name] += (gradient**2).sum() return losses def _examples_to_truth( self, examples: Iterable[Example] ) -> Tuple[numpy.ndarray, numpy.ndarray]: nr_examples = len(list(examples)) truths = numpy.zeros((nr_examples, len(self.labels)), dtype="f") not_missing = numpy.ones((nr_examples, len(self.labels)), dtype="f") for i, eg in enumerate(examples): for j, label in enumerate(self.labels): if label in eg.reference.cats: truths[i, j] = eg.reference.cats[label] elif self.support_missing_values: not_missing[i, j] = 0.0 truths = self.model.ops.asarray(truths) # type: ignore return truths, not_missing # type: ignore def get_loss(self, examples: Iterable[Example], scores) -> Tuple[float, float]: """Find the loss and gradient of loss for the batch of documents and their predicted scores. examples (Iterable[Examples]): The batch of examples. scores: Scores representing the model's predictions. RETURNS (Tuple[float, float]): The loss and the gradient. DOCS: https://spacy.io/api/textcategorizer#get_loss """ validate_examples(examples, "TextCategorizer.get_loss") self._validate_categories(examples) truths, not_missing = self._examples_to_truth(examples) not_missing = self.model.ops.asarray(not_missing) # type: ignore d_scores = scores - truths d_scores *= not_missing mean_square_error = (d_scores**2).mean() return float(mean_square_error), d_scores def add_label(self, label: str) -> int: """Add a new label to the pipe. label (str): The label to add. RETURNS (int): 0 if label is already present, otherwise 1. DOCS: https://spacy.io/api/textcategorizer#add_label """ if not isinstance(label, str): raise ValueError(Errors.E187) if label in self.labels: return 0 self._allow_extra_label() self.cfg["labels"].append(label) # type: ignore[attr-defined] if self.model and "resize_output" in self.model.attrs: self.model = self.model.attrs["resize_output"](self.model, len(self.labels)) self.vocab.strings.add(label) return 1 def initialize( self, get_examples: Callable[[], Iterable[Example]], *, nlp: Optional[Language] = None, labels: Optional[Iterable[str]] = None, positive_label: Optional[str] = None, ) -> None: """Initialize the pipe for training, using a representative set of data examples. get_examples (Callable[[], Iterable[Example]]): Function that returns a representative sample of gold-standard Example objects. nlp (Language): The current nlp object the component is part of. labels (Optional[Iterable[str]]): The labels to add to the component, typically generated by the `init labels` command. If no labels are provided, the get_examples callback is used to extract the labels from the data. positive_label (Optional[str]): The positive label for a binary task with exclusive classes, `None` otherwise and by default. DOCS: https://spacy.io/api/textcategorizer#initialize """ validate_get_examples(get_examples, "TextCategorizer.initialize") self._validate_categories(get_examples()) if labels is None: for example in get_examples(): for cat in example.y.cats: self.add_label(cat) else: for label in labels: self.add_label(label) if len(self.labels) < 2: raise ValueError(Errors.E867) if positive_label is not None: if positive_label not in self.labels: err = Errors.E920.format(pos_label=positive_label, labels=self.labels) raise ValueError(err) if len(self.labels) != 2: err = Errors.E919.format(pos_label=positive_label, labels=self.labels) raise ValueError(err) self.cfg["positive_label"] = positive_label subbatch = list(islice(get_examples(), 10)) doc_sample = [eg.reference for eg in subbatch] label_sample, _ = self._examples_to_truth(subbatch) self._require_labels() assert len(doc_sample) > 0, Errors.E923.format(name=self.name) assert len(label_sample) > 0, Errors.E923.format(name=self.name) self.model.initialize(X=doc_sample, Y=label_sample) def _validate_categories(self, examples: Iterable[Example]): """Check whether the provided examples all have single-label cats annotations.""" for ex in examples: vals = list(ex.reference.cats.values()) if vals.count(1.0) > 1: raise ValueError(Errors.E895.format(value=ex.reference.cats)) for val in vals: if not (val == 1.0 or val == 0.0): raise ValueError(Errors.E851.format(val=val))
14,943
35.537897
104
py
spaCy
spaCy-master/spacy/pipeline/textcat_multilabel.py
from itertools import islice from typing import Any, Callable, Dict, Iterable, List, Optional from thinc.api import Config, Model from thinc.types import Floats2d from ..errors import Errors from ..language import Language from ..scorer import Scorer from ..tokens import Doc from ..training import Example, validate_get_examples from ..util import registry from ..vocab import Vocab from .textcat import TextCategorizer multi_label_default_config = """ [model] @architectures = "spacy.TextCatEnsemble.v2" [model.tok2vec] @architectures = "spacy.Tok2Vec.v2" [model.tok2vec.embed] @architectures = "spacy.MultiHashEmbed.v2" width = 64 rows = [2000, 2000, 500, 1000, 500] attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"] include_static_vectors = false [model.tok2vec.encode] @architectures = "spacy.MaxoutWindowEncoder.v2" width = ${model.tok2vec.embed.width} window_size = 1 maxout_pieces = 3 depth = 2 [model.linear_model] @architectures = "spacy.TextCatBOW.v2" exclusive_classes = false ngram_size = 1 no_output_layer = false """ DEFAULT_MULTI_TEXTCAT_MODEL = Config().from_str(multi_label_default_config)["model"] multi_label_bow_config = """ [model] @architectures = "spacy.TextCatBOW.v2" exclusive_classes = false ngram_size = 1 no_output_layer = false """ multi_label_cnn_config = """ [model] @architectures = "spacy.TextCatCNN.v2" exclusive_classes = false [model.tok2vec] @architectures = "spacy.HashEmbedCNN.v2" pretrained_vectors = null width = 96 depth = 4 embed_size = 2000 window_size = 1 maxout_pieces = 3 subword_features = true """ @Language.factory( "textcat_multilabel", assigns=["doc.cats"], default_config={ "threshold": 0.5, "model": DEFAULT_MULTI_TEXTCAT_MODEL, "scorer": {"@scorers": "spacy.textcat_multilabel_scorer.v2"}, }, default_score_weights={ "cats_score": 1.0, "cats_score_desc": None, "cats_micro_p": None, "cats_micro_r": None, "cats_micro_f": None, "cats_macro_p": None, "cats_macro_r": None, "cats_macro_f": None, "cats_macro_auc": None, "cats_f_per_type": None, }, ) def make_multilabel_textcat( nlp: Language, name: str, model: Model[List[Doc], List[Floats2d]], threshold: float, scorer: Optional[Callable], ) -> "MultiLabel_TextCategorizer": """Create a MultiLabel_TextCategorizer component. The text categorizer predicts categories over a whole document. It can learn one or more labels, and the labels are considered to be non-mutually exclusive, which means that there can be zero or more labels per doc). model (Model[List[Doc], List[Floats2d]]): A model instance that predicts scores for each category. threshold (float): Cutoff to consider a prediction "positive". scorer (Optional[Callable]): The scoring method. """ return MultiLabel_TextCategorizer( nlp.vocab, model, name, threshold=threshold, scorer=scorer ) def textcat_multilabel_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]: return Scorer.score_cats( examples, "cats", multi_label=True, **kwargs, ) @registry.scorers("spacy.textcat_multilabel_scorer.v2") def make_textcat_multilabel_scorer(): return textcat_multilabel_score class MultiLabel_TextCategorizer(TextCategorizer): """Pipeline component for multi-label text classification. DOCS: https://spacy.io/api/textcategorizer """ def __init__( self, vocab: Vocab, model: Model, name: str = "textcat_multilabel", *, threshold: float, scorer: Optional[Callable] = textcat_multilabel_score, ) -> None: """Initialize a text categorizer for multi-label classification. vocab (Vocab): The shared vocabulary. model (thinc.api.Model): The Thinc Model powering the pipeline component. name (str): The component instance name, used to add entries to the losses during training. threshold (float): Cutoff to consider a prediction "positive". scorer (Optional[Callable]): The scoring method. DOCS: https://spacy.io/api/textcategorizer#init """ self.vocab = vocab self.model = model self.name = name self._rehearsal_model = None cfg = {"labels": [], "threshold": threshold} self.cfg = dict(cfg) self.scorer = scorer @property def support_missing_values(self): return True def initialize( # type: ignore[override] self, get_examples: Callable[[], Iterable[Example]], *, nlp: Optional[Language] = None, labels: Optional[Iterable[str]] = None, ): """Initialize the pipe for training, using a representative set of data examples. get_examples (Callable[[], Iterable[Example]]): Function that returns a representative sample of gold-standard Example objects. nlp (Language): The current nlp object the component is part of. labels: The labels to add to the component, typically generated by the `init labels` command. If no labels are provided, the get_examples callback is used to extract the labels from the data. DOCS: https://spacy.io/api/textcategorizer#initialize """ validate_get_examples(get_examples, "MultiLabel_TextCategorizer.initialize") if labels is None: for example in get_examples(): for cat in example.y.cats: self.add_label(cat) else: for label in labels: self.add_label(label) subbatch = list(islice(get_examples(), 10)) self._validate_categories(subbatch) doc_sample = [eg.reference for eg in subbatch] label_sample, _ = self._examples_to_truth(subbatch) self._require_labels() assert len(doc_sample) > 0, Errors.E923.format(name=self.name) assert len(label_sample) > 0, Errors.E923.format(name=self.name) self.model.initialize(X=doc_sample, Y=label_sample) def _validate_categories(self, examples: Iterable[Example]): """This component allows any type of single- or multi-label annotations. This method overwrites the more strict one from 'textcat'.""" # check that annotation values are valid for ex in examples: for val in ex.reference.cats.values(): if not (val == 1.0 or val == 0.0): raise ValueError(Errors.E851.format(val=val))
6,605
30.457143
94
py
spaCy
spaCy-master/spacy/pipeline/tok2vec.py
from itertools import islice from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence from thinc.api import Config, Model, Optimizer, set_dropout_rate from ..errors import Errors from ..language import Language from ..tokens import Doc from ..training import Example, validate_examples, validate_get_examples from ..vocab import Vocab from .trainable_pipe import TrainablePipe default_model_config = """ [model] @architectures = "spacy.HashEmbedCNN.v2" pretrained_vectors = null width = 96 depth = 4 embed_size = 2000 window_size = 1 maxout_pieces = 3 subword_features = true """ DEFAULT_TOK2VEC_MODEL = Config().from_str(default_model_config)["model"] @Language.factory( "tok2vec", assigns=["doc.tensor"], default_config={"model": DEFAULT_TOK2VEC_MODEL} ) def make_tok2vec(nlp: Language, name: str, model: Model) -> "Tok2Vec": return Tok2Vec(nlp.vocab, model, name) class Tok2Vec(TrainablePipe): """Apply a "token-to-vector" model and set its outputs in the doc.tensor attribute. This is mostly useful to share a single subnetwork between multiple components, e.g. to have one embedding and CNN network shared between a parser, tagger and NER. In order to use the `Tok2Vec` predictions, subsequent components should use the `Tok2VecListener` layer as the tok2vec subnetwork of their model. This layer will read data from the `doc.tensor` attribute during prediction. During training, the `Tok2Vec` component will save its prediction and backprop callback for each batch, so that the subsequent components can backpropagate to the shared weights. This implementation is used because it allows us to avoid relying on object identity within the models to achieve the parameter sharing. """ def __init__(self, vocab: Vocab, model: Model, name: str = "tok2vec") -> None: """Initialize a tok2vec component. vocab (Vocab): The shared vocabulary. model (thinc.api.Model[List[Doc], List[Floats2d]]): The Thinc Model powering the pipeline component. It should take a list of Doc objects as input, and output a list of 2d float arrays. name (str): The component instance name. DOCS: https://spacy.io/api/tok2vec#init """ self.vocab = vocab self.model = model self.name = name self.listener_map: Dict[str, List["Tok2VecListener"]] = {} self.cfg: Dict[str, Any] = {} @property def listeners(self) -> List["Tok2VecListener"]: """RETURNS (List[Tok2VecListener]): The listener models listening to this component. Usually internals. """ return [m for c in self.listening_components for m in self.listener_map[c]] @property def listening_components(self) -> List[str]: """RETURNS (List[str]): The downstream components listening to this component. Usually internals. """ return list(self.listener_map.keys()) def add_listener(self, listener: "Tok2VecListener", component_name: str) -> None: """Add a listener for a downstream component. Usually internals.""" self.listener_map.setdefault(component_name, []) if listener not in self.listener_map[component_name]: self.listener_map[component_name].append(listener) def remove_listener(self, listener: "Tok2VecListener", component_name: str) -> bool: """Remove a listener for a downstream component. Usually internals.""" if component_name in self.listener_map: if listener in self.listener_map[component_name]: self.listener_map[component_name].remove(listener) # If no listeners are left, remove entry if not self.listener_map[component_name]: del self.listener_map[component_name] return True return False def find_listeners(self, component) -> None: """Walk over a model of a processing component, looking for layers that are Tok2vecListener subclasses that have an upstream_name that matches this component. Listeners can also set their upstream_name attribute to the wildcard string '*' to match any `Tok2Vec`. You're unlikely to ever need multiple `Tok2Vec` components, so it's fine to leave your listeners upstream_name on '*'. """ names = ("*", self.name) if isinstance(getattr(component, "model", None), Model): for node in component.model.walk(): if isinstance(node, Tok2VecListener) and node.upstream_name in names: self.add_listener(node, component.name) def predict(self, docs: Iterable[Doc]): """Apply the pipeline's model to a batch of docs, without modifying them. Returns a single tensor for a batch of documents. docs (Iterable[Doc]): The documents to predict. RETURNS: Vector representations for each token in the documents. DOCS: https://spacy.io/api/tok2vec#predict """ if not any(len(doc) for doc in docs): # Handle cases where there are no tokens in any docs. width = self.model.get_dim("nO") return [self.model.ops.alloc((0, width)) for doc in docs] tokvecs = self.model.predict(docs) return tokvecs def set_annotations(self, docs: Sequence[Doc], tokvecses) -> None: """Modify a batch of documents, using pre-computed scores. docs (Iterable[Doc]): The documents to modify. tokvecses: The tensors to set, produced by Tok2Vec.predict. DOCS: https://spacy.io/api/tok2vec#set_annotations """ for doc, tokvecs in zip(docs, tokvecses): assert tokvecs.shape[0] == len(doc) doc.tensor = tokvecs def update( self, examples: Iterable[Example], *, drop: float = 0.0, sgd: Optional[Optimizer] = None, losses: Optional[Dict[str, float]] = None, ): """Learn from a batch of documents and gold-standard information, updating the pipe's model. examples (Iterable[Example]): A batch of Example objects. drop (float): The dropout rate. sgd (thinc.api.Optimizer): The optimizer. losses (Dict[str, float]): Optional record of the loss during training. Updated using the component name as the key. RETURNS (Dict[str, float]): The updated losses dictionary. DOCS: https://spacy.io/api/tok2vec#update """ if losses is None: losses = {} validate_examples(examples, "Tok2Vec.update") docs = [eg.predicted for eg in examples] set_dropout_rate(self.model, drop) tokvecs, bp_tokvecs = self.model.begin_update(docs) d_tokvecs = [self.model.ops.alloc2f(*t2v.shape) for t2v in tokvecs] losses.setdefault(self.name, 0.0) def accumulate_gradient(one_d_tokvecs): """Accumulate tok2vec loss and gradient. This is passed as a callback to all but the last listener. Only the last one does the backprop. """ nonlocal d_tokvecs for i in range(len(one_d_tokvecs)): d_tokvecs[i] += one_d_tokvecs[i] losses[self.name] += float((one_d_tokvecs[i] ** 2).sum()) return [self.model.ops.alloc2f(*t2v.shape) for t2v in tokvecs] def backprop(one_d_tokvecs): """Callback to actually do the backprop. Passed to last listener.""" accumulate_gradient(one_d_tokvecs) d_docs = bp_tokvecs(d_tokvecs) if sgd is not None: self.finish_update(sgd) return d_docs batch_id = Tok2VecListener.get_batch_id(docs) for listener in self.listeners[:-1]: listener.receive(batch_id, tokvecs, accumulate_gradient) if self.listeners: self.listeners[-1].receive(batch_id, tokvecs, backprop) return losses def get_loss(self, examples, scores) -> None: pass def initialize( self, get_examples: Callable[[], Iterable[Example]], *, nlp: Optional[Language] = None, ): """Initialize the pipe for training, using a representative set of data examples. get_examples (Callable[[], Iterable[Example]]): Function that returns a representative sample of gold-standard Example objects. nlp (Language): The current nlp object the component is part of. DOCS: https://spacy.io/api/tok2vec#initialize """ validate_get_examples(get_examples, "Tok2Vec.initialize") doc_sample = [] for example in islice(get_examples(), 10): doc_sample.append(example.x) assert doc_sample, Errors.E923.format(name=self.name) self.model.initialize(X=doc_sample) def add_label(self, label): raise NotImplementedError class Tok2VecListener(Model): """A layer that gets fed its answers from an upstream connection, for instance from a component earlier in the pipeline. The Tok2VecListener layer is used as a sublayer within a component such as a parser, NER or text categorizer. Usually you'll have multiple listeners connecting to a single upstream Tok2Vec component, that's earlier in the pipeline. The Tok2VecListener layers act as proxies, passing the predictions from the Tok2Vec component into downstream components, and communicating gradients back upstream. """ name = "tok2vec-listener" def __init__(self, upstream_name: str, width: int) -> None: """ upstream_name (str): A string to identify the 'upstream' Tok2Vec component to communicate with. The upstream name should either be the wildcard string '*', or the name of the `Tok2Vec` component. You'll almost never have multiple upstream Tok2Vec components, so the wildcard string will almost always be fine. width (int): The width of the vectors produced by the upstream tok2vec component. """ Model.__init__(self, name=self.name, forward=forward, dims={"nO": width}) self.upstream_name = upstream_name self._batch_id: Optional[int] = None self._outputs = None self._backprop = None @classmethod def get_batch_id(cls, inputs: Iterable[Doc]) -> int: """Calculate a content-sensitive hash of the batch of documents, to check whether the next batch of documents is unexpected. """ return sum(sum(token.orth for token in doc) for doc in inputs) def receive(self, batch_id: int, outputs, backprop) -> None: """Store a batch of training predictions and a backprop callback. The predictions and callback are produced by the upstream Tok2Vec component, and later will be used when the listener's component's model is called. """ self._batch_id = batch_id self._outputs = outputs self._backprop = backprop def verify_inputs(self, inputs) -> bool: """Check that the batch of Doc objects matches the ones we have a prediction for. """ if self._batch_id is None and self._outputs is None: raise ValueError(Errors.E954) else: batch_id = self.get_batch_id(inputs) if batch_id != self._batch_id: raise ValueError(Errors.E953.format(id1=batch_id, id2=self._batch_id)) else: return True def forward(model: Tok2VecListener, inputs, is_train: bool): """Supply the outputs from the upstream Tok2Vec component.""" if is_train: # This might occur during training when the tok2vec layer is frozen / hasn't been updated. # In that case, it should be set to "annotating" so we can retrieve the embeddings from the doc. if model._batch_id is None: outputs = [] for doc in inputs: if doc.tensor.size == 0: raise ValueError(Errors.E203.format(name="tok2vec")) else: outputs.append(doc.tensor) return outputs, _empty_backprop else: model.verify_inputs(inputs) return model._outputs, model._backprop else: # This is pretty grim, but it's hard to do better :(. # It's hard to avoid relying on the doc.tensor attribute, because the # pipeline components can batch the data differently during prediction. # That doesn't happen in update, where the nlp object works on batches # of data. # When the components batch differently, we don't receive a matching # prediction from the upstream, so we can't predict. outputs = [] width = model.get_dim("nO") for doc in inputs: if doc.tensor.size == 0: # But we do need to do *something* if the tensor hasn't been set. # The compromise is to at least return data of the right shape, # so the output is valid. outputs.append(model.ops.alloc2f(len(doc), width)) else: outputs.append(doc.tensor) return outputs, _empty_backprop def _empty_backprop(dX): # for pickling return []
13,390
40.458204
104
py
spaCy
spaCy-master/spacy/pipeline/_edit_tree_internals/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/pipeline/_edit_tree_internals/schemas.py
from collections import defaultdict from typing import Any, Dict, List, Union from pydantic import BaseModel, Field, ValidationError from pydantic.types import StrictBool, StrictInt, StrictStr class MatchNodeSchema(BaseModel): prefix_len: StrictInt = Field(..., title="Prefix length") suffix_len: StrictInt = Field(..., title="Suffix length") prefix_tree: StrictInt = Field(..., title="Prefix tree") suffix_tree: StrictInt = Field(..., title="Suffix tree") class Config: extra = "forbid" class SubstNodeSchema(BaseModel): orig: Union[int, StrictStr] = Field(..., title="Original substring") subst: Union[int, StrictStr] = Field(..., title="Replacement substring") class Config: extra = "forbid" class EditTreeSchema(BaseModel): __root__: Union[MatchNodeSchema, SubstNodeSchema] def validate_edit_tree(obj: Dict[str, Any]) -> List[str]: """Validate edit tree. obj (Dict[str, Any]): JSON-serializable data to validate. RETURNS (List[str]): A list of error messages, if available. """ try: EditTreeSchema.parse_obj(obj) return [] except ValidationError as e: errors = e.errors() data = defaultdict(list) for error in errors: err_loc = " -> ".join([str(p) for p in error.get("loc", [])]) data[err_loc].append(error.get("msg")) return [f"[{loc}] {', '.join(msg)}" for loc, msg in data.items()] # type: ignore[arg-type]
1,475
31.086957
99
py
spaCy
spaCy-master/spacy/pipeline/_parser_internals/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/pipeline/legacy/__init__.py
from .entity_linker import EntityLinker_v1 __all__ = ["EntityLinker_v1"]
74
17.75
42
py
spaCy
spaCy-master/spacy/pipeline/legacy/entity_linker.py
# This file is present to provide a prior version of the EntityLinker component # for backwards compatability. For details see #9669. import random import warnings from itertools import islice from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Union import srsly from thinc.api import CosineDistance, Model, Optimizer, set_dropout_rate from thinc.types import Floats2d from ... import util from ...errors import Errors, Warnings from ...kb import Candidate, KnowledgeBase from ...language import Language from ...ml import empty_kb from ...scorer import Scorer from ...tokens import Doc, Span from ...training import Example, validate_examples, validate_get_examples from ...util import SimpleFrozenList from ...vocab import Vocab from ..pipe import deserialize_config from ..trainable_pipe import TrainablePipe # See #9050 BACKWARD_OVERWRITE = True def entity_linker_score(examples, **kwargs): return Scorer.score_links(examples, negative_labels=[EntityLinker_v1.NIL], **kwargs) class EntityLinker_v1(TrainablePipe): """Pipeline component for named entity linking. DOCS: https://spacy.io/api/entitylinker """ NIL = "NIL" # string used to refer to a non-existing link def __init__( self, vocab: Vocab, model: Model, name: str = "entity_linker", *, labels_discard: Iterable[str], n_sents: int, incl_prior: bool, incl_context: bool, entity_vector_length: int, get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]], overwrite: bool = BACKWARD_OVERWRITE, scorer: Optional[Callable] = entity_linker_score, ) -> None: """Initialize an entity linker. vocab (Vocab): The shared vocabulary. model (thinc.api.Model): The Thinc Model powering the pipeline component. name (str): The component instance name, used to add entries to the losses during training. labels_discard (Iterable[str]): NER labels that will automatically get a "NIL" prediction. n_sents (int): The number of neighbouring sentences to take into account. incl_prior (bool): Whether or not to include prior probabilities from the KB in the model. incl_context (bool): Whether or not to include the local context in the model. entity_vector_length (int): Size of encoding vectors in the KB. get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that produces a list of candidates, given a certain knowledge base and a textual mention. scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links. DOCS: https://spacy.io/api/entitylinker#init """ self.vocab = vocab self.model = model self.name = name self.labels_discard = list(labels_discard) self.n_sents = n_sents self.incl_prior = incl_prior self.incl_context = incl_context self.get_candidates = get_candidates self.cfg: Dict[str, Any] = {"overwrite": overwrite} self.distance = CosineDistance(normalize=False) # how many neighbour sentences to take into account # create an empty KB by default. If you want to load a predefined one, specify it in 'initialize'. self.kb = empty_kb(entity_vector_length)(self.vocab) self.scorer = scorer def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]): """Define the KB of this pipe by providing a function that will create it using this object's vocab.""" if not callable(kb_loader): raise ValueError(Errors.E885.format(arg_type=type(kb_loader))) self.kb = kb_loader(self.vocab) def validate_kb(self) -> None: # Raise an error if the knowledge base is not initialized. if self.kb is None: raise ValueError(Errors.E1018.format(name=self.name)) if len(self.kb) == 0: raise ValueError(Errors.E139.format(name=self.name)) def initialize( self, get_examples: Callable[[], Iterable[Example]], *, nlp: Optional[Language] = None, kb_loader: Optional[Callable[[Vocab], KnowledgeBase]] = None, ): """Initialize the pipe for training, using a representative set of data examples. get_examples (Callable[[], Iterable[Example]]): Function that returns a representative sample of gold-standard Example objects. nlp (Language): The current nlp object the component is part of. kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates an InMemoryLookupKB from a Vocab instance. Note that providing this argument, will overwrite all data accumulated in the current KB. Use this only when loading a KB as-such from file. DOCS: https://spacy.io/api/entitylinker#initialize """ validate_get_examples(get_examples, "EntityLinker_v1.initialize") if kb_loader is not None: self.set_kb(kb_loader) self.validate_kb() nO = self.kb.entity_vector_length doc_sample = [] vector_sample = [] for example in islice(get_examples(), 10): doc_sample.append(example.x) vector_sample.append(self.model.ops.alloc1f(nO)) assert len(doc_sample) > 0, Errors.E923.format(name=self.name) assert len(vector_sample) > 0, Errors.E923.format(name=self.name) self.model.initialize( X=doc_sample, Y=self.model.ops.asarray(vector_sample, dtype="float32") ) def update( self, examples: Iterable[Example], *, drop: float = 0.0, sgd: Optional[Optimizer] = None, losses: Optional[Dict[str, float]] = None, ) -> Dict[str, float]: """Learn from a batch of documents and gold-standard information, updating the pipe's model. Delegates to predict and get_loss. examples (Iterable[Example]): A batch of Example objects. drop (float): The dropout rate. sgd (thinc.api.Optimizer): The optimizer. losses (Dict[str, float]): Optional record of the loss during training. Updated using the component name as the key. RETURNS (Dict[str, float]): The updated losses dictionary. DOCS: https://spacy.io/api/entitylinker#update """ self.validate_kb() if losses is None: losses = {} losses.setdefault(self.name, 0.0) if not examples: return losses validate_examples(examples, "EntityLinker_v1.update") sentence_docs = [] for eg in examples: sentences = [s for s in eg.reference.sents] kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True) for ent in eg.reference.ents: # KB ID of the first token is the same as the whole span kb_id = kb_ids[ent.start] if kb_id: try: # find the sentence in the list of sentences. sent_index = sentences.index(ent.sent) except AttributeError: # Catch the exception when ent.sent is None and provide a user-friendly warning raise RuntimeError(Errors.E030) from None # get n previous sentences, if there are any start_sentence = max(0, sent_index - self.n_sents) # get n posterior sentences, or as many < n as there are end_sentence = min(len(sentences) - 1, sent_index + self.n_sents) # get token positions start_token = sentences[start_sentence].start end_token = sentences[end_sentence].end # append that span as a doc to training sent_doc = eg.predicted[start_token:end_token].as_doc() sentence_docs.append(sent_doc) set_dropout_rate(self.model, drop) if not sentence_docs: warnings.warn(Warnings.W093.format(name="Entity Linker")) return losses sentence_encodings, bp_context = self.model.begin_update(sentence_docs) loss, d_scores = self.get_loss( sentence_encodings=sentence_encodings, examples=examples ) bp_context(d_scores) if sgd is not None: self.finish_update(sgd) losses[self.name] += loss return losses def get_loss(self, examples: Iterable[Example], sentence_encodings: Floats2d): validate_examples(examples, "EntityLinker_v1.get_loss") entity_encodings = [] for eg in examples: kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True) for ent in eg.reference.ents: kb_id = kb_ids[ent.start] if kb_id: entity_encoding = self.kb.get_vector(kb_id) entity_encodings.append(entity_encoding) entity_encodings = self.model.ops.asarray2f(entity_encodings) if sentence_encodings.shape != entity_encodings.shape: err = Errors.E147.format( method="get_loss", msg="gold entities do not match up" ) raise RuntimeError(err) gradients = self.distance.get_grad(sentence_encodings, entity_encodings) loss = self.distance.get_loss(sentence_encodings, entity_encodings) loss = loss / len(entity_encodings) return float(loss), gradients def predict(self, docs: Iterable[Doc]) -> List[str]: """Apply the pipeline's model to a batch of docs, without modifying them. Returns the KB IDs for each entity in each doc, including NIL if there is no prediction. docs (Iterable[Doc]): The documents to predict. RETURNS (List[str]): The models prediction for each document. DOCS: https://spacy.io/api/entitylinker#predict """ self.validate_kb() entity_count = 0 final_kb_ids: List[str] = [] if not docs: return final_kb_ids if isinstance(docs, Doc): docs = [docs] for i, doc in enumerate(docs): sentences = [s for s in doc.sents] if len(doc) > 0: # Looping through each entity (TODO: rewrite) for ent in doc.ents: sent = ent.sent sent_index = sentences.index(sent) assert sent_index >= 0 # get n_neighbour sentences, clipped to the length of the document start_sentence = max(0, sent_index - self.n_sents) end_sentence = min(len(sentences) - 1, sent_index + self.n_sents) start_token = sentences[start_sentence].start end_token = sentences[end_sentence].end sent_doc = doc[start_token:end_token].as_doc() # currently, the context is the same for each entity in a sentence (should be refined) xp = self.model.ops.xp if self.incl_context: sentence_encoding = self.model.predict([sent_doc])[0] sentence_encoding_t = sentence_encoding.T sentence_norm = xp.linalg.norm(sentence_encoding_t) entity_count += 1 if ent.label_ in self.labels_discard: # ignoring this entity - setting to NIL final_kb_ids.append(self.NIL) else: candidates = list(self.get_candidates(self.kb, ent)) if not candidates: # no prediction possible for this entity - setting to NIL final_kb_ids.append(self.NIL) elif len(candidates) == 1: # shortcut for efficiency reasons: take the 1 candidate final_kb_ids.append(candidates[0].entity_) else: random.shuffle(candidates) # set all prior probabilities to 0 if incl_prior=False prior_probs = xp.asarray([c.prior_prob for c in candidates]) if not self.incl_prior: prior_probs = xp.asarray([0.0 for _ in candidates]) scores = prior_probs # add in similarity from the context if self.incl_context: entity_encodings = xp.asarray( [c.entity_vector for c in candidates] ) entity_norm = xp.linalg.norm(entity_encodings, axis=1) if len(entity_encodings) != len(prior_probs): raise RuntimeError( Errors.E147.format( method="predict", msg="vectors not of equal length", ) ) # cosine similarity sims = xp.dot(entity_encodings, sentence_encoding_t) / ( sentence_norm * entity_norm ) if sims.shape != prior_probs.shape: raise ValueError(Errors.E161) scores = prior_probs + sims - (prior_probs * sims) best_index = scores.argmax().item() best_candidate = candidates[best_index] final_kb_ids.append(best_candidate.entity_) if not (len(final_kb_ids) == entity_count): err = Errors.E147.format( method="predict", msg="result variables not of equal length" ) raise RuntimeError(err) return final_kb_ids def set_annotations(self, docs: Iterable[Doc], kb_ids: List[str]) -> None: """Modify a batch of documents, using pre-computed scores. docs (Iterable[Doc]): The documents to modify. kb_ids (List[str]): The IDs to set, produced by EntityLinker.predict. DOCS: https://spacy.io/api/entitylinker#set_annotations """ count_ents = len([ent for doc in docs for ent in doc.ents]) if count_ents != len(kb_ids): raise ValueError(Errors.E148.format(ents=count_ents, ids=len(kb_ids))) i = 0 overwrite = self.cfg["overwrite"] for doc in docs: for ent in doc.ents: kb_id = kb_ids[i] i += 1 for token in ent: if token.ent_kb_id == 0 or overwrite: token.ent_kb_id_ = kb_id def to_bytes(self, *, exclude=tuple()): """Serialize the pipe to a bytestring. exclude (Iterable[str]): String names of serialization fields to exclude. RETURNS (bytes): The serialized object. DOCS: https://spacy.io/api/entitylinker#to_bytes """ self._validate_serialization_attrs() serialize = {} if hasattr(self, "cfg") and self.cfg is not None: serialize["cfg"] = lambda: srsly.json_dumps(self.cfg) serialize["vocab"] = lambda: self.vocab.to_bytes(exclude=exclude) serialize["kb"] = self.kb.to_bytes serialize["model"] = self.model.to_bytes return util.to_bytes(serialize, exclude) def from_bytes(self, bytes_data, *, exclude=tuple()): """Load the pipe from a bytestring. exclude (Iterable[str]): String names of serialization fields to exclude. RETURNS (TrainablePipe): The loaded object. DOCS: https://spacy.io/api/entitylinker#from_bytes """ self._validate_serialization_attrs() def load_model(b): try: self.model.from_bytes(b) except AttributeError: raise ValueError(Errors.E149) from None deserialize = {} if hasattr(self, "cfg") and self.cfg is not None: deserialize["cfg"] = lambda b: self.cfg.update(srsly.json_loads(b)) deserialize["vocab"] = lambda b: self.vocab.from_bytes(b, exclude=exclude) deserialize["kb"] = lambda b: self.kb.from_bytes(b) deserialize["model"] = load_model util.from_bytes(bytes_data, deserialize, exclude) return self def to_disk( self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() ) -> None: """Serialize the pipe to disk. path (str / Path): Path to a directory. exclude (Iterable[str]): String names of serialization fields to exclude. DOCS: https://spacy.io/api/entitylinker#to_disk """ serialize = {} serialize["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude) serialize["cfg"] = lambda p: srsly.write_json(p, self.cfg) serialize["kb"] = lambda p: self.kb.to_disk(p) serialize["model"] = lambda p: self.model.to_disk(p) util.to_disk(path, serialize, exclude) def from_disk( self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() ) -> "EntityLinker_v1": """Load the pipe from disk. Modifies the object in place and returns it. path (str / Path): Path to a directory. exclude (Iterable[str]): String names of serialization fields to exclude. RETURNS (EntityLinker): The modified EntityLinker object. DOCS: https://spacy.io/api/entitylinker#from_disk """ def load_model(p): try: with p.open("rb") as infile: self.model.from_bytes(infile.read()) except AttributeError: raise ValueError(Errors.E149) from None deserialize: Dict[str, Callable[[Any], Any]] = {} deserialize["cfg"] = lambda p: self.cfg.update(deserialize_config(p)) deserialize["vocab"] = lambda p: self.vocab.from_disk(p, exclude=exclude) deserialize["kb"] = lambda p: self.kb.from_disk(p) deserialize["model"] = load_model util.from_disk(path, deserialize, exclude) return self def rehearse(self, examples, *, sgd=None, losses=None, **config): raise NotImplementedError def add_label(self, label): raise NotImplementedError
18,788
43.41844
120
py
spaCy
spaCy-master/spacy/tests/README.md
<a href="https://explosion.ai"><img src="https://explosion.ai/assets/img/logo.svg" width="125" height="125" align="right" /></a> # spaCy tests spaCy uses the [pytest](http://doc.pytest.org/) framework for testing. For more info on this, see the [pytest documentation](http://docs.pytest.org/en/latest/contents.html). Tests for spaCy modules and classes live in their own directories of the same name. For example, tests for the `Tokenizer` can be found in [`/tests/tokenizer`](tokenizer). All test modules (i.e. directories) also need to be listed in spaCy's [`setup.py`](../setup.py). To be interpreted and run, all test files and test functions need to be prefixed with `test_`. > ⚠️ **Important note:** As part of our new model training infrastructure, we've moved all model tests to the [`spacy-models`](https://github.com/explosion/spacy-models) repository. This allows us to test the models separately from the core library functionality. ## Table of contents 1. [Running the tests](#running-the-tests) 2. [Dos and don'ts](#dos-and-donts) 3. [Parameters](#parameters) 4. [Fixtures](#fixtures) 5. [Helpers and utilities](#helpers-and-utilities) 6. [Contributing to the tests](#contributing-to-the-tests) ## Running the tests To show print statements, run the tests with `py.test -s`. To abort after the first failure, run them with `py.test -x`. ```bash py.test spacy # run basic tests py.test spacy --slow # run basic and slow tests ``` You can also run tests in a specific file or directory, or even only one specific test: ```bash py.test spacy/tests/tokenizer # run all tests in directory py.test spacy/tests/tokenizer/test_exceptions.py # run all tests in file py.test spacy/tests/tokenizer/test_exceptions.py::test_tokenizer_handles_emoji # run specific test ``` ## Dos and don'ts To keep the behavior of the tests consistent and predictable, we try to follow a few basic conventions: - **Test names** should follow a pattern of `test_[module]_[tested behaviour]`. For example: `test_tokenizer_keeps_email` or `test_spans_override_sentiment`. - If you're testing for a bug reported in a specific issue, always create a **regression test**. Regression tests should be named `test_issue[ISSUE NUMBER]` and live in the [`regression`](regression) directory. - Only use `@pytest.mark.xfail` for tests that **should pass, but currently fail**. To test for desired negative behavior, use `assert not` in your test. - Very **extensive tests** that take a long time to run should be marked with `@pytest.mark.slow`. If your slow test is testing important behavior, consider adding an additional simpler version. - If tests require **loading the models**, they should be added to the [`spacy-models`](https://github.com/explosion/spacy-models) tests. - Before requiring the models, always make sure there is no other way to test the particular behavior. In a lot of cases, it's sufficient to simply create a `Doc` object manually. See the section on [helpers and utility functions](#helpers-and-utilities) for more info on this. - **Avoid unnecessary imports.** There should never be a need to explicitly import spaCy at the top of a file, and many components are available as [fixtures](#fixtures). You should also avoid wildcard imports (`from module import *`). - If you're importing from spaCy, **always use absolute imports**. For example: `from spacy.language import Language`. - Try to keep the tests **readable and concise**. Use clear and descriptive variable names (`doc`, `tokens` and `text` are great), keep it short and only test for one behavior at a time. ## Parameters If the test cases can be extracted from the test, always `parametrize` them instead of hard-coding them into the test: ```python @pytest.mark.parametrize('text', ["google.com", "spacy.io"]) def test_tokenizer_keep_urls(tokenizer, text): tokens = tokenizer(text) assert len(tokens) == 1 ``` This will run the test once for each `text` value. Even if you're only testing one example, it's usually best to specify it as a parameter. This will later make it easier for others to quickly add additional test cases without having to modify the test. You can also specify parameters as tuples to test with multiple values per test: ```python @pytest.mark.parametrize('text,length', [("U.S.", 1), ("us.", 2), ("(U.S.", 2)]) ``` To test for combinations of parameters, you can add several `parametrize` markers: ```python @pytest.mark.parametrize('text', ["A test sentence", "Another sentence"]) @pytest.mark.parametrize('punct', ['.', '!', '?']) ``` This will run the test with all combinations of the two parameters `text` and `punct`. **Use this feature sparingly**, though, as it can easily cause unnecessary or undesired test bloat. ## Fixtures Fixtures to create instances of spaCy objects and other components should only be defined once in the global [`conftest.py`](conftest.py). We avoid having per-directory conftest files, as this can easily lead to confusion. These are the main fixtures that are currently available: | Fixture | Description | | ----------------------------------- | ---------------------------------------------------------------------------- | | `tokenizer` | Basic, language-independent tokenizer. Identical to the `xx` language class. | | `en_tokenizer`, `de_tokenizer`, ... | Creates an English, German etc. tokenizer. | | `en_vocab` | Creates an instance of the English `Vocab`. | The fixtures can be used in all tests by simply setting them as an argument, like this: ```python def test_module_do_something(en_tokenizer): tokens = en_tokenizer("Some text here") ``` If all tests in a file require a specific configuration, or use the same complex example, it can be helpful to create a separate fixture. This fixture should be added at the top of each file. Make sure to use descriptive names for these fixtures and don't override any of the global fixtures listed above. **From looking at a test, it should immediately be clear which fixtures are used, and where they are coming from.** ## Helpers and utilities Our new test setup comes with a few handy utility functions that can be imported from [`util.py`](util.py). ### Constructing a `Doc` object manually Loading the models is expensive and not necessary if you're not actually testing the model performance. If all you need is a `Doc` object with annotations like heads, POS tags or the dependency parse, you can construct it manually. ```python def test_doc_token_api_strings(en_vocab): words = ["Give", "it", "back", "!", "He", "pleaded", "."] pos = ['VERB', 'PRON', 'PART', 'PUNCT', 'PRON', 'VERB', 'PUNCT'] heads = [0, 0, 0, 0, 5, 5, 5] deps = ['ROOT', 'dobj', 'prt', 'punct', 'nsubj', 'ROOT', 'punct'] doc = Doc(en_vocab, words=words, pos=pos, heads=heads, deps=deps) assert doc[0].text == 'Give' assert doc[0].lower_ == 'give' assert doc[0].pos_ == 'VERB' assert doc[0].dep_ == 'ROOT' ``` ### Other utilities | Name | Description | | -------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `apply_transition_sequence(parser, doc, sequence)` | Perform a series of pre-specified transitions, to put the parser in a desired state. | | `add_vecs_to_vocab(vocab, vectors)` | Add list of vector tuples (`[("text", [1, 2, 3])]`) to given vocab. All vectors need to have the same length. | | `get_cosine(vec1, vec2)` | Get cosine for two given vectors. | | `assert_docs_equal(doc1, doc2)` | Compare two `Doc` objects and `assert` that they're equal. Tests for tokens, tags, dependencies and entities. | ## Contributing to the tests There's still a long way to go to finally reach **100% test coverage** – and we'd appreciate your help! 🙌 You can open an issue on our [issue tracker](https://github.com/explosion/spaCy/issues) and label it `tests`, or make a [pull request](https://github.com/explosion/spaCy/pulls) to this repository. 📖 **For more information on contributing to spaCy in general, check out our [contribution guidelines](https://github.com/explosion/spaCy/blob/master/CONTRIBUTING.md).**
8,728
62.253623
421
md
spaCy
spaCy-master/spacy/tests/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/conftest.py
import pytest from hypothesis import settings from spacy.util import get_lang_class # Functionally disable deadline settings for tests # to prevent spurious test failures in CI builds. settings.register_profile("no_deadlines", deadline=2 * 60 * 1000) # in ms settings.load_profile("no_deadlines") def pytest_addoption(parser): try: parser.addoption("--slow", action="store_true", help="include slow tests") parser.addoption("--issue", action="store", help="test specific issues") # Options are already added, e.g. if conftest is copied in a build pipeline # and runs twice except ValueError: pass def pytest_runtest_setup(item): def getopt(opt): # When using 'pytest --pyargs spacy' to test an installed copy of # spacy, pytest skips running our pytest_addoption() hook. Later, when # we call getoption(), pytest raises an error, because it doesn't # recognize the option we're asking about. To avoid this, we need to # pass a default value. We default to False, i.e., we act like all the # options weren't given. return item.config.getoption(f"--{opt}", False) # Integration of boolean flags for opt in ["slow"]: if opt in item.keywords and not getopt(opt): pytest.skip(f"need --{opt} option to run") # Special integration to mark tests with issue numbers issues = getopt("issue") if isinstance(issues, str): if "issue" in item.keywords: # Convert issues provided on the CLI to list of ints issue_nos = [int(issue.strip()) for issue in issues.split(",")] # Get all issues specified by decorators and check if they're provided issue_refs = [mark.args[0] for mark in item.iter_markers(name="issue")] if not any([ref in issue_nos for ref in issue_refs]): pytest.skip(f"not referencing specified issues: {issue_nos}") else: pytest.skip("not referencing any issues") # Fixtures for language tokenizers (languages sorted alphabetically) @pytest.fixture(scope="module") def tokenizer(): return get_lang_class("xx")().tokenizer @pytest.fixture(scope="session") def af_tokenizer(): return get_lang_class("af")().tokenizer @pytest.fixture(scope="session") def am_tokenizer(): return get_lang_class("am")().tokenizer @pytest.fixture(scope="session") def ar_tokenizer(): return get_lang_class("ar")().tokenizer @pytest.fixture(scope="session") def bg_tokenizer(): return get_lang_class("bg")().tokenizer @pytest.fixture(scope="session") def bn_tokenizer(): return get_lang_class("bn")().tokenizer @pytest.fixture(scope="session") def ca_tokenizer(): return get_lang_class("ca")().tokenizer @pytest.fixture(scope="session") def cs_tokenizer(): return get_lang_class("cs")().tokenizer @pytest.fixture(scope="session") def da_tokenizer(): return get_lang_class("da")().tokenizer @pytest.fixture(scope="session") def de_tokenizer(): return get_lang_class("de")().tokenizer @pytest.fixture(scope="session") def de_vocab(): return get_lang_class("de")().vocab @pytest.fixture(scope="session") def dsb_tokenizer(): return get_lang_class("dsb")().tokenizer @pytest.fixture(scope="session") def el_tokenizer(): return get_lang_class("el")().tokenizer @pytest.fixture(scope="session") def en_tokenizer(): return get_lang_class("en")().tokenizer @pytest.fixture(scope="session") def en_vocab(): return get_lang_class("en")().vocab @pytest.fixture(scope="session") def en_parser(en_vocab): nlp = get_lang_class("en")(en_vocab) return nlp.create_pipe("parser") @pytest.fixture(scope="session") def es_tokenizer(): return get_lang_class("es")().tokenizer @pytest.fixture(scope="session") def es_vocab(): return get_lang_class("es")().vocab @pytest.fixture(scope="session") def et_tokenizer(): return get_lang_class("et")().tokenizer @pytest.fixture(scope="session") def eu_tokenizer(): return get_lang_class("eu")().tokenizer @pytest.fixture(scope="session") def fa_tokenizer(): return get_lang_class("fa")().tokenizer @pytest.fixture(scope="session") def fi_tokenizer(): return get_lang_class("fi")().tokenizer @pytest.fixture(scope="session") def fr_tokenizer(): return get_lang_class("fr")().tokenizer @pytest.fixture(scope="session") def fr_vocab(): return get_lang_class("fr")().vocab @pytest.fixture(scope="session") def ga_tokenizer(): return get_lang_class("ga")().tokenizer @pytest.fixture(scope="session") def grc_tokenizer(): return get_lang_class("grc")().tokenizer @pytest.fixture(scope="session") def gu_tokenizer(): return get_lang_class("gu")().tokenizer @pytest.fixture(scope="session") def he_tokenizer(): return get_lang_class("he")().tokenizer @pytest.fixture(scope="session") def hi_tokenizer(): return get_lang_class("hi")().tokenizer @pytest.fixture(scope="session") def hr_tokenizer(): return get_lang_class("hr")().tokenizer @pytest.fixture def hu_tokenizer(): return get_lang_class("hu")().tokenizer @pytest.fixture(scope="session") def id_tokenizer(): return get_lang_class("id")().tokenizer @pytest.fixture(scope="session") def is_tokenizer(): return get_lang_class("is")().tokenizer @pytest.fixture(scope="session") def it_tokenizer(): return get_lang_class("it")().tokenizer @pytest.fixture(scope="session") def it_vocab(): return get_lang_class("it")().vocab @pytest.fixture(scope="session") def ja_tokenizer(): pytest.importorskip("sudachipy") return get_lang_class("ja")().tokenizer @pytest.fixture(scope="session") def hsb_tokenizer(): return get_lang_class("hsb")().tokenizer @pytest.fixture(scope="session") def ko_tokenizer(): pytest.importorskip("natto") return get_lang_class("ko")().tokenizer @pytest.fixture(scope="session") def ko_tokenizer_tokenizer(): config = { "nlp": { "tokenizer": { "@tokenizers": "spacy.Tokenizer.v1", } } } nlp = get_lang_class("ko").from_config(config) return nlp.tokenizer @pytest.fixture(scope="module") def la_tokenizer(): return get_lang_class("la")().tokenizer @pytest.fixture(scope="session") def lb_tokenizer(): return get_lang_class("lb")().tokenizer @pytest.fixture(scope="session") def lg_tokenizer(): return get_lang_class("lg")().tokenizer @pytest.fixture(scope="session") def lt_tokenizer(): return get_lang_class("lt")().tokenizer @pytest.fixture(scope="session") def lv_tokenizer(): return get_lang_class("lv")().tokenizer @pytest.fixture(scope="session") def mk_tokenizer(): return get_lang_class("mk")().tokenizer @pytest.fixture(scope="session") def ml_tokenizer(): return get_lang_class("ml")().tokenizer @pytest.fixture(scope="session") def ms_tokenizer(): return get_lang_class("ms")().tokenizer @pytest.fixture(scope="session") def nb_tokenizer(): return get_lang_class("nb")().tokenizer @pytest.fixture(scope="session") def ne_tokenizer(): return get_lang_class("ne")().tokenizer @pytest.fixture(scope="session") def nl_vocab(): return get_lang_class("nl")().vocab @pytest.fixture(scope="session") def nl_tokenizer(): return get_lang_class("nl")().tokenizer @pytest.fixture(scope="session") def pl_tokenizer(): return get_lang_class("pl")().tokenizer @pytest.fixture(scope="session") def pt_tokenizer(): return get_lang_class("pt")().tokenizer @pytest.fixture(scope="session") def pt_vocab(): return get_lang_class("pt")().vocab @pytest.fixture(scope="session") def ro_tokenizer(): return get_lang_class("ro")().tokenizer @pytest.fixture(scope="session") def ru_tokenizer(): pytest.importorskip("pymorphy3") return get_lang_class("ru")().tokenizer @pytest.fixture(scope="session") def ru_lemmatizer(): pytest.importorskip("pymorphy3") return get_lang_class("ru")().add_pipe("lemmatizer") @pytest.fixture(scope="session") def ru_lookup_lemmatizer(): pytest.importorskip("pymorphy3") return get_lang_class("ru")().add_pipe( "lemmatizer", config={"mode": "pymorphy3_lookup"} ) @pytest.fixture(scope="session") def sa_tokenizer(): return get_lang_class("sa")().tokenizer @pytest.fixture(scope="session") def sk_tokenizer(): return get_lang_class("sk")().tokenizer @pytest.fixture(scope="session") def sl_tokenizer(): return get_lang_class("sl")().tokenizer @pytest.fixture(scope="session") def sr_tokenizer(): return get_lang_class("sr")().tokenizer @pytest.fixture(scope="session") def sq_tokenizer(): return get_lang_class("sq")().tokenizer @pytest.fixture(scope="session") def sv_tokenizer(): return get_lang_class("sv")().tokenizer @pytest.fixture(scope="session") def ta_tokenizer(): return get_lang_class("ta")().tokenizer @pytest.fixture(scope="session") def th_tokenizer(): pytest.importorskip("pythainlp") return get_lang_class("th")().tokenizer @pytest.fixture(scope="session") def ti_tokenizer(): return get_lang_class("ti")().tokenizer @pytest.fixture(scope="session") def tl_tokenizer(): return get_lang_class("tl")().tokenizer @pytest.fixture(scope="session") def tr_tokenizer(): return get_lang_class("tr")().tokenizer @pytest.fixture(scope="session") def tt_tokenizer(): return get_lang_class("tt")().tokenizer @pytest.fixture(scope="session") def ky_tokenizer(): return get_lang_class("ky")().tokenizer @pytest.fixture(scope="session") def uk_tokenizer(): pytest.importorskip("pymorphy3") return get_lang_class("uk")().tokenizer @pytest.fixture(scope="session") def uk_lemmatizer(): pytest.importorskip("pymorphy3") pytest.importorskip("pymorphy3_dicts_uk") return get_lang_class("uk")().add_pipe("lemmatizer") @pytest.fixture(scope="session") def uk_lookup_lemmatizer(): pytest.importorskip("pymorphy3") pytest.importorskip("pymorphy3_dicts_uk") return get_lang_class("uk")().add_pipe( "lemmatizer", config={"mode": "pymorphy3_lookup"} ) @pytest.fixture(scope="session") def ur_tokenizer(): return get_lang_class("ur")().tokenizer @pytest.fixture(scope="session") def vi_tokenizer(): pytest.importorskip("pyvi") return get_lang_class("vi")().tokenizer @pytest.fixture(scope="session") def xx_tokenizer(): return get_lang_class("xx")().tokenizer @pytest.fixture(scope="session") def yo_tokenizer(): return get_lang_class("yo")().tokenizer @pytest.fixture(scope="session") def zh_tokenizer_char(): nlp = get_lang_class("zh")() return nlp.tokenizer @pytest.fixture(scope="session") def zh_tokenizer_jieba(): pytest.importorskip("jieba") config = { "nlp": { "tokenizer": { "@tokenizers": "spacy.zh.ChineseTokenizer", "segmenter": "jieba", } } } nlp = get_lang_class("zh").from_config(config) return nlp.tokenizer @pytest.fixture(scope="session") def zh_tokenizer_pkuseg(): pytest.importorskip("spacy_pkuseg") config = { "nlp": { "tokenizer": { "@tokenizers": "spacy.zh.ChineseTokenizer", "segmenter": "pkuseg", } }, "initialize": {"tokenizer": {"pkuseg_model": "web"}}, } nlp = get_lang_class("zh").from_config(config) nlp.initialize() return nlp.tokenizer @pytest.fixture(scope="session") def hy_tokenizer(): return get_lang_class("hy")().tokenizer
11,609
21.764706
83
py
spaCy
spaCy-master/spacy/tests/enable_gpu.py
from spacy import require_gpu require_gpu()
45
10.5
29
py
spaCy
spaCy-master/spacy/tests/test_architectures.py
import pytest from catalogue import RegistryError from thinc.api import Linear from spacy import registry def test_get_architecture(): @registry.architectures("my_test_function") def create_model(nr_in, nr_out): return Linear(nr_in, nr_out) arch = registry.architectures.get("my_test_function") assert arch is create_model with pytest.raises(RegistryError): registry.architectures.get("not_an_existing_key")
448
25.411765
57
py
spaCy
spaCy-master/spacy/tests/test_cli.py
import math import os import time from collections import Counter from pathlib import Path from typing import Any, Dict, List, Tuple import numpy import pytest import srsly from click import NoSuchOption from packaging.specifiers import SpecifierSet from thinc.api import Config, ConfigValidationError import spacy from spacy import about from spacy.cli import info from spacy.cli._util import ( download_file, is_subpath_of, load_project_config, parse_config_overrides, string_to_list, substitute_project_variables, upload_file, validate_project_commands, walk_directory, ) from spacy.cli.apply import apply from spacy.cli.debug_data import ( _compile_gold, _get_distribution, _get_kl_divergence, _get_labels_from_model, _get_labels_from_spancat, _get_span_characteristics, _get_spans_length_freq_dist, _print_span_characteristics, ) from spacy.cli.download import get_compatibility, get_version from spacy.cli.evaluate import render_parses from spacy.cli.find_threshold import find_threshold from spacy.cli.init_config import RECOMMENDATIONS, fill_config, init_config from spacy.cli.init_pipeline import _init_labels from spacy.cli.package import _is_permitted_package_name, get_third_party_dependencies from spacy.cli.project.remote_storage import RemoteStorage from spacy.cli.project.run import _check_requirements from spacy.cli.validate import get_model_pkgs from spacy.lang.en import English from spacy.lang.nl import Dutch from spacy.language import Language from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate from spacy.tokens import Doc, DocBin from spacy.tokens.span import Span from spacy.training import Example, docs_to_json, offsets_to_biluo_tags from spacy.training.converters import conll_ner_to_docs, conllu_to_docs, iob_to_docs from spacy.util import ENV_VARS, get_minor_version, load_config, load_model_from_config from .util import make_tempdir @pytest.mark.issue(4665) def test_cli_converters_conllu_empty_heads_ner(): """ conllu_to_docs should not raise an exception if the HEAD column contains an underscore """ input_data = """ 1 [ _ PUNCT -LRB- _ _ punct _ _ 2 This _ DET DT _ _ det _ _ 3 killing _ NOUN NN _ _ nsubj _ _ 4 of _ ADP IN _ _ case _ _ 5 a _ DET DT _ _ det _ _ 6 respected _ ADJ JJ _ _ amod _ _ 7 cleric _ NOUN NN _ _ nmod _ _ 8 will _ AUX MD _ _ aux _ _ 9 be _ AUX VB _ _ aux _ _ 10 causing _ VERB VBG _ _ root _ _ 11 us _ PRON PRP _ _ iobj _ _ 12 trouble _ NOUN NN _ _ dobj _ _ 13 for _ ADP IN _ _ case _ _ 14 years _ NOUN NNS _ _ nmod _ _ 15 to _ PART TO _ _ mark _ _ 16 come _ VERB VB _ _ acl _ _ 17 . _ PUNCT . _ _ punct _ _ 18 ] _ PUNCT -RRB- _ _ punct _ _ """ docs = list(conllu_to_docs(input_data)) # heads are all 0 assert not all([t.head.i for t in docs[0]]) # NER is unset assert not docs[0].has_annotation("ENT_IOB") @pytest.mark.issue(4924) def test_issue4924(): nlp = Language() example = Example.from_dict(nlp.make_doc(""), {}) nlp.evaluate([example]) @pytest.mark.issue(7055) def test_issue7055(): """Test that fill-config doesn't turn sourced components into factories.""" source_cfg = { "nlp": {"lang": "en", "pipeline": ["tok2vec", "tagger"]}, "components": { "tok2vec": {"factory": "tok2vec"}, "tagger": {"factory": "tagger"}, }, } source_nlp = English.from_config(source_cfg) with make_tempdir() as dir_path: # We need to create a loadable source pipeline source_path = dir_path / "test_model" source_nlp.to_disk(source_path) base_cfg = { "nlp": {"lang": "en", "pipeline": ["tok2vec", "tagger", "ner"]}, "components": { "tok2vec": {"source": str(source_path)}, "tagger": {"source": str(source_path)}, "ner": {"factory": "ner"}, }, } base_cfg = Config(base_cfg) base_path = dir_path / "base.cfg" base_cfg.to_disk(base_path) output_path = dir_path / "config.cfg" fill_config(output_path, base_path, silent=True) filled_cfg = load_config(output_path) assert filled_cfg["components"]["tok2vec"]["source"] == str(source_path) assert filled_cfg["components"]["tagger"]["source"] == str(source_path) assert filled_cfg["components"]["ner"]["factory"] == "ner" assert "model" in filled_cfg["components"]["ner"] @pytest.mark.issue(11235) def test_issue11235(): """ Test that the cli handles interpolation in the directory names correctly when loading project config. """ lang_var = "en" variables = {"lang": lang_var} commands = [{"name": "x", "script": ["hello ${vars.lang}"]}] directories = ["cfg", "${vars.lang}_model"] project = {"commands": commands, "vars": variables, "directories": directories} with make_tempdir() as d: srsly.write_yaml(d / "project.yml", project) cfg = load_project_config(d) # Check that the directories are interpolated and created correctly assert os.path.exists(d / "cfg") assert os.path.exists(d / f"{lang_var}_model") assert cfg["commands"][0]["script"][0] == f"hello {lang_var}" @pytest.mark.issue(12566) @pytest.mark.parametrize( "factory,output_file", [("deps", "parses.html"), ("ents", "entities.html"), ("spans", "spans.html")], ) def test_issue12566(factory: str, output_file: str): """ Test if all displaCy types (ents, dep, spans) produce an HTML file """ with make_tempdir() as tmp_dir: # Create sample spaCy file doc_json = { "ents": [ {"end": 54, "label": "nam_adj_country", "start": 44}, {"end": 83, "label": "nam_liv_person", "start": 69}, {"end": 100, "label": "nam_pro_title_book", "start": 86}, ], "spans": { "sc": [ {"end": 54, "kb_id": "", "label": "nam_adj_country", "start": 44}, {"end": 83, "kb_id": "", "label": "nam_liv_person", "start": 69}, { "end": 100, "kb_id": "", "label": "nam_pro_title_book", "start": 86, }, ] }, "text": "Niedawno czytał em nową książkę znakomitego szkockiego medioznawcy , " "Briana McNaira - Cultural Chaos .", "tokens": [ # fmt: off {"id": 0, "start": 0, "end": 8, "tag": "ADV", "pos": "ADV", "morph": "Degree=Pos", "lemma": "niedawno", "dep": "advmod", "head": 1, }, {"id": 1, "start": 9, "end": 15, "tag": "PRAET", "pos": "VERB", "morph": "Animacy=Hum|Aspect=Imp|Gender=Masc|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act", "lemma": "czytać", "dep": "ROOT", "head": 1, }, {"id": 2, "start": 16, "end": 18, "tag": "AGLT", "pos": "NOUN", "morph": "Animacy=Inan|Case=Ins|Gender=Masc|Number=Sing", "lemma": "em", "dep": "iobj", "head": 1, }, {"id": 3, "start": 19, "end": 23, "tag": "ADJ", "pos": "ADJ", "morph": "Case=Acc|Degree=Pos|Gender=Fem|Number=Sing", "lemma": "nowy", "dep": "amod", "head": 4, }, {"id": 4, "start": 24, "end": 31, "tag": "SUBST", "pos": "NOUN", "morph": "Case=Acc|Gender=Fem|Number=Sing", "lemma": "książka", "dep": "obj", "head": 1, }, {"id": 5, "start": 32, "end": 43, "tag": "ADJ", "pos": "ADJ", "morph": "Animacy=Nhum|Case=Gen|Degree=Pos|Gender=Masc|Number=Sing", "lemma": "znakomit", "dep": "acl", "head": 4, }, {"id": 6, "start": 44, "end": 54, "tag": "ADJ", "pos": "ADJ", "morph": "Animacy=Hum|Case=Gen|Degree=Pos|Gender=Masc|Number=Sing", "lemma": "szkockiy", "dep": "amod", "head": 7, }, {"id": 7, "start": 55, "end": 66, "tag": "SUBST", "pos": "NOUN", "morph": "Animacy=Hum|Case=Gen|Gender=Masc|Number=Sing", "lemma": "medioznawca", "dep": "iobj", "head": 5, }, {"id": 8, "start": 67, "end": 68, "tag": "INTERP", "pos": "PUNCT", "morph": "PunctType=Comm", "lemma": ",", "dep": "punct", "head": 9, }, {"id": 9, "start": 69, "end": 75, "tag": "SUBST", "pos": "PROPN", "morph": "Animacy=Hum|Case=Gen|Gender=Masc|Number=Sing", "lemma": "Brian", "dep": "nmod", "head": 4, }, {"id": 10, "start": 76, "end": 83, "tag": "SUBST", "pos": "PROPN", "morph": "Animacy=Hum|Case=Gen|Gender=Masc|Number=Sing", "lemma": "McNair", "dep": "flat", "head": 9, }, {"id": 11, "start": 84, "end": 85, "tag": "INTERP", "pos": "PUNCT", "morph": "PunctType=Dash", "lemma": "-", "dep": "punct", "head": 12, }, {"id": 12, "start": 86, "end": 94, "tag": "SUBST", "pos": "PROPN", "morph": "Animacy=Inan|Case=Nom|Gender=Masc|Number=Sing", "lemma": "Cultural", "dep": "conj", "head": 4, }, {"id": 13, "start": 95, "end": 100, "tag": "SUBST", "pos": "NOUN", "morph": "Animacy=Inan|Case=Nom|Gender=Masc|Number=Sing", "lemma": "Chaos", "dep": "flat", "head": 12, }, {"id": 14, "start": 101, "end": 102, "tag": "INTERP", "pos": "PUNCT", "morph": "PunctType=Peri", "lemma": ".", "dep": "punct", "head": 1, }, # fmt: on ], } # Create a .spacy file nlp = spacy.blank("pl") doc = Doc(nlp.vocab).from_json(doc_json) # Run the evaluate command and check if the html files exist render_parses( docs=[doc], output_path=tmp_dir, model_name="", limit=1, **{factory: True} ) assert (tmp_dir / output_file).is_file() def test_cli_info(): nlp = Dutch() nlp.add_pipe("textcat") with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) raw_data = info(tmp_dir, exclude=[""]) assert raw_data["lang"] == "nl" assert raw_data["components"] == ["textcat"] def test_cli_converters_conllu_to_docs(): # from NorNE: https://github.com/ltgoslo/norne/blob/3d23274965f513f23aa48455b28b1878dad23c05/ud/nob/no_bokmaal-ud-dev.conllu lines = [ "1\tDommer\tdommer\tNOUN\t_\tDefinite=Ind|Gender=Masc|Number=Sing\t2\tappos\t_\tO", "2\tFinn\tFinn\tPROPN\t_\tGender=Masc\t4\tnsubj\t_\tB-PER", "3\tEilertsen\tEilertsen\tPROPN\t_\t_\t2\tname\t_\tI-PER", "4\tavstår\tavstå\tVERB\t_\tMood=Ind|Tense=Pres|VerbForm=Fin\t0\troot\t_\tO", ] input_data = "\n".join(lines) converted_docs = list(conllu_to_docs(input_data, n_sents=1)) assert len(converted_docs) == 1 converted = [docs_to_json(converted_docs)] assert converted[0]["id"] == 0 assert len(converted[0]["paragraphs"]) == 1 assert len(converted[0]["paragraphs"][0]["sentences"]) == 1 sent = converted[0]["paragraphs"][0]["sentences"][0] assert len(sent["tokens"]) == 4 tokens = sent["tokens"] assert [t["orth"] for t in tokens] == ["Dommer", "Finn", "Eilertsen", "avstår"] assert [t["tag"] for t in tokens] == ["NOUN", "PROPN", "PROPN", "VERB"] assert [t["head"] for t in tokens] == [1, 2, -1, 0] assert [t["dep"] for t in tokens] == ["appos", "nsubj", "name", "ROOT"] ent_offsets = [ (e[0], e[1], e[2]) for e in converted[0]["paragraphs"][0]["entities"] ] biluo_tags = offsets_to_biluo_tags(converted_docs[0], ent_offsets, missing="O") assert biluo_tags == ["O", "B-PER", "L-PER", "O"] @pytest.mark.parametrize( "lines", [ ( "1\tDommer\tdommer\tNOUN\t_\tDefinite=Ind|Gender=Masc|Number=Sing\t2\tappos\t_\tname=O", "2\tFinn\tFinn\tPROPN\t_\tGender=Masc\t4\tnsubj\t_\tSpaceAfter=No|name=B-PER", "3\tEilertsen\tEilertsen\tPROPN\t_\t_\t2\tname\t_\tname=I-PER", "4\tavstår\tavstå\tVERB\t_\tMood=Ind|Tense=Pres|VerbForm=Fin\t0\troot\t_\tSpaceAfter=No|name=O", "5\t.\t$.\tPUNCT\t_\t_\t4\tpunct\t_\tname=B-BAD", ), ( "1\tDommer\tdommer\tNOUN\t_\tDefinite=Ind|Gender=Masc|Number=Sing\t2\tappos\t_\t_", "2\tFinn\tFinn\tPROPN\t_\tGender=Masc\t4\tnsubj\t_\tSpaceAfter=No|NE=B-PER", "3\tEilertsen\tEilertsen\tPROPN\t_\t_\t2\tname\t_\tNE=L-PER", "4\tavstår\tavstå\tVERB\t_\tMood=Ind|Tense=Pres|VerbForm=Fin\t0\troot\t_\tSpaceAfter=No", "5\t.\t$.\tPUNCT\t_\t_\t4\tpunct\t_\tNE=B-BAD", ), ], ) def test_cli_converters_conllu_to_docs_name_ner_map(lines): input_data = "\n".join(lines) converted_docs = list( conllu_to_docs(input_data, n_sents=1, ner_map={"PER": "PERSON", "BAD": ""}) ) assert len(converted_docs) == 1 converted = [docs_to_json(converted_docs)] assert converted[0]["id"] == 0 assert len(converted[0]["paragraphs"]) == 1 assert converted[0]["paragraphs"][0]["raw"] == "Dommer FinnEilertsen avstår. " assert len(converted[0]["paragraphs"][0]["sentences"]) == 1 sent = converted[0]["paragraphs"][0]["sentences"][0] assert len(sent["tokens"]) == 5 tokens = sent["tokens"] assert [t["orth"] for t in tokens] == ["Dommer", "Finn", "Eilertsen", "avstår", "."] assert [t["tag"] for t in tokens] == ["NOUN", "PROPN", "PROPN", "VERB", "PUNCT"] assert [t["head"] for t in tokens] == [1, 2, -1, 0, -1] assert [t["dep"] for t in tokens] == ["appos", "nsubj", "name", "ROOT", "punct"] ent_offsets = [ (e[0], e[1], e[2]) for e in converted[0]["paragraphs"][0]["entities"] ] biluo_tags = offsets_to_biluo_tags(converted_docs[0], ent_offsets, missing="O") assert biluo_tags == ["O", "B-PERSON", "L-PERSON", "O", "O"] def test_cli_converters_conllu_to_docs_subtokens(): # https://raw.githubusercontent.com/ohenrik/nb_news_ud_sm/master/original_data/no-ud-dev-ner.conllu lines = [ "1\tDommer\tdommer\tNOUN\t_\tDefinite=Ind|Gender=Masc|Number=Sing\t2\tappos\t_\tname=O", "2-3\tFE\t_\t_\t_\t_\t_\t_\t_\t_", "2\tFinn\tFinn\tPROPN\t_\tGender=Masc\t4\tnsubj\t_\tname=B-PER", "3\tEilertsen\tEilertsen\tX\t_\tGender=Fem|Tense=past\t2\tname\t_\tname=I-PER", "4\tavstår\tavstå\tVERB\t_\tMood=Ind|Tense=Pres|VerbForm=Fin\t0\troot\t_\tSpaceAfter=No|name=O", "5\t.\t$.\tPUNCT\t_\t_\t4\tpunct\t_\tname=O", ] input_data = "\n".join(lines) converted_docs = list( conllu_to_docs( input_data, n_sents=1, merge_subtokens=True, append_morphology=True ) ) assert len(converted_docs) == 1 converted = [docs_to_json(converted_docs)] assert converted[0]["id"] == 0 assert len(converted[0]["paragraphs"]) == 1 assert converted[0]["paragraphs"][0]["raw"] == "Dommer FE avstår. " assert len(converted[0]["paragraphs"][0]["sentences"]) == 1 sent = converted[0]["paragraphs"][0]["sentences"][0] assert len(sent["tokens"]) == 4 tokens = sent["tokens"] assert [t["orth"] for t in tokens] == ["Dommer", "FE", "avstår", "."] assert [t["tag"] for t in tokens] == [ "NOUN__Definite=Ind|Gender=Masc|Number=Sing", "PROPN_X__Gender=Fem,Masc|Tense=past", "VERB__Mood=Ind|Tense=Pres|VerbForm=Fin", "PUNCT", ] assert [t["pos"] for t in tokens] == ["NOUN", "PROPN", "VERB", "PUNCT"] assert [t["morph"] for t in tokens] == [ "Definite=Ind|Gender=Masc|Number=Sing", "Gender=Fem,Masc|Tense=past", "Mood=Ind|Tense=Pres|VerbForm=Fin", "", ] assert [t["lemma"] for t in tokens] == ["dommer", "Finn Eilertsen", "avstå", "$."] assert [t["head"] for t in tokens] == [1, 1, 0, -1] assert [t["dep"] for t in tokens] == ["appos", "nsubj", "ROOT", "punct"] ent_offsets = [ (e[0], e[1], e[2]) for e in converted[0]["paragraphs"][0]["entities"] ] biluo_tags = offsets_to_biluo_tags(converted_docs[0], ent_offsets, missing="O") assert biluo_tags == ["O", "U-PER", "O", "O"] def test_cli_converters_iob_to_docs(): lines = [ "I|O like|O London|I-GPE and|O New|B-GPE York|I-GPE City|I-GPE .|O", "I|O like|O London|B-GPE and|O New|B-GPE York|I-GPE City|I-GPE .|O", "I|PRP|O like|VBP|O London|NNP|I-GPE and|CC|O New|NNP|B-GPE York|NNP|I-GPE City|NNP|I-GPE .|.|O", "I|PRP|O like|VBP|O London|NNP|B-GPE and|CC|O New|NNP|B-GPE York|NNP|I-GPE City|NNP|I-GPE .|.|O", ] input_data = "\n".join(lines) converted_docs = list(iob_to_docs(input_data, n_sents=10)) assert len(converted_docs) == 1 converted = docs_to_json(converted_docs) assert converted["id"] == 0 assert len(converted["paragraphs"]) == 1 assert len(converted["paragraphs"][0]["sentences"]) == 4 for i in range(0, 4): sent = converted["paragraphs"][0]["sentences"][i] assert len(sent["tokens"]) == 8 tokens = sent["tokens"] expected = ["I", "like", "London", "and", "New", "York", "City", "."] assert [t["orth"] for t in tokens] == expected assert len(converted_docs[0].ents) == 8 for ent in converted_docs[0].ents: assert ent.text in ["New York City", "London"] def test_cli_converters_conll_ner_to_docs(): lines = [ "-DOCSTART- -X- O O", "", "I\tO", "like\tO", "London\tB-GPE", "and\tO", "New\tB-GPE", "York\tI-GPE", "City\tI-GPE", ".\tO", "", "I O", "like O", "London B-GPE", "and O", "New B-GPE", "York I-GPE", "City I-GPE", ". O", "", "I PRP O", "like VBP O", "London NNP B-GPE", "and CC O", "New NNP B-GPE", "York NNP I-GPE", "City NNP I-GPE", ". . O", "", "I PRP _ O", "like VBP _ O", "London NNP _ B-GPE", "and CC _ O", "New NNP _ B-GPE", "York NNP _ I-GPE", "City NNP _ I-GPE", ". . _ O", "", "I\tPRP\t_\tO", "like\tVBP\t_\tO", "London\tNNP\t_\tB-GPE", "and\tCC\t_\tO", "New\tNNP\t_\tB-GPE", "York\tNNP\t_\tI-GPE", "City\tNNP\t_\tI-GPE", ".\t.\t_\tO", ] input_data = "\n".join(lines) converted_docs = list(conll_ner_to_docs(input_data, n_sents=10)) assert len(converted_docs) == 1 converted = docs_to_json(converted_docs) assert converted["id"] == 0 assert len(converted["paragraphs"]) == 1 assert len(converted["paragraphs"][0]["sentences"]) == 5 for i in range(0, 5): sent = converted["paragraphs"][0]["sentences"][i] assert len(sent["tokens"]) == 8 tokens = sent["tokens"] # fmt: off assert [t["orth"] for t in tokens] == ["I", "like", "London", "and", "New", "York", "City", "."] # fmt: on assert len(converted_docs[0].ents) == 10 for ent in converted_docs[0].ents: assert ent.text in ["New York City", "London"] def test_project_config_validation_full(): config = { "vars": {"some_var": 20}, "directories": ["assets", "configs", "corpus", "scripts", "training"], "assets": [ { "dest": "x", "extra": True, "url": "https://example.com", "checksum": "63373dd656daa1fd3043ce166a59474c", }, { "dest": "y", "git": { "repo": "https://github.com/example/repo", "branch": "develop", "path": "y", }, }, { "dest": "z", "extra": False, "url": "https://example.com", "checksum": "63373dd656daa1fd3043ce166a59474c", }, ], "commands": [ { "name": "train", "help": "Train a model", "script": ["python -m spacy train config.cfg -o training"], "deps": ["config.cfg", "corpus/training.spcy"], "outputs": ["training/model-best"], }, {"name": "test", "script": ["pytest", "custom.py"], "no_skip": True}, ], "workflows": {"all": ["train", "test"], "train": ["train"]}, } errors = validate(ProjectConfigSchema, config) assert not errors @pytest.mark.parametrize( "config", [ {"commands": [{"name": "a"}, {"name": "a"}]}, {"commands": [{"name": "a"}], "workflows": {"a": []}}, {"commands": [{"name": "a"}], "workflows": {"b": ["c"]}}, ], ) def test_project_config_validation1(config): with pytest.raises(SystemExit): validate_project_commands(config) @pytest.mark.parametrize( "config,n_errors", [ ({"commands": {"a": []}}, 1), ({"commands": [{"help": "..."}]}, 1), ({"commands": [{"name": "a", "extra": "b"}]}, 1), ({"commands": [{"extra": "b"}]}, 2), ({"commands": [{"name": "a", "deps": [123]}]}, 1), ], ) def test_project_config_validation2(config, n_errors): errors = validate(ProjectConfigSchema, config) assert len(errors) == n_errors @pytest.mark.parametrize( "int_value", [10, pytest.param("10", marks=pytest.mark.xfail)], ) def test_project_config_interpolation(int_value): variables = {"a": int_value, "b": {"c": "foo", "d": True}} commands = [ {"name": "x", "script": ["hello ${vars.a} ${vars.b.c}"]}, {"name": "y", "script": ["${vars.b.c} ${vars.b.d}"]}, ] project = {"commands": commands, "vars": variables} with make_tempdir() as d: srsly.write_yaml(d / "project.yml", project) cfg = load_project_config(d) assert type(cfg) == dict assert type(cfg["commands"]) == list assert cfg["commands"][0]["script"][0] == "hello 10 foo" assert cfg["commands"][1]["script"][0] == "foo true" commands = [{"name": "x", "script": ["hello ${vars.a} ${vars.b.e}"]}] project = {"commands": commands, "vars": variables} with pytest.raises(ConfigValidationError): substitute_project_variables(project) @pytest.mark.parametrize( "greeting", [342, "everyone", "tout le monde", pytest.param("42", marks=pytest.mark.xfail)], ) def test_project_config_interpolation_override(greeting): variables = {"a": "world"} commands = [ {"name": "x", "script": ["hello ${vars.a}"]}, ] overrides = {"vars.a": greeting} project = {"commands": commands, "vars": variables} with make_tempdir() as d: srsly.write_yaml(d / "project.yml", project) cfg = load_project_config(d, overrides=overrides) assert type(cfg) == dict assert type(cfg["commands"]) == list assert cfg["commands"][0]["script"][0] == f"hello {greeting}" def test_project_config_interpolation_env(): variables = {"a": 10} env_var = "SPACY_TEST_FOO" env_vars = {"foo": env_var} commands = [{"name": "x", "script": ["hello ${vars.a} ${env.foo}"]}] project = {"commands": commands, "vars": variables, "env": env_vars} with make_tempdir() as d: srsly.write_yaml(d / "project.yml", project) cfg = load_project_config(d) assert cfg["commands"][0]["script"][0] == "hello 10 " os.environ[env_var] = "123" with make_tempdir() as d: srsly.write_yaml(d / "project.yml", project) cfg = load_project_config(d) assert cfg["commands"][0]["script"][0] == "hello 10 123" @pytest.mark.parametrize( "args,expected", [ # fmt: off (["--x.foo", "10"], {"x.foo": 10}), (["--x.foo=10"], {"x.foo": 10}), (["--x.foo", "bar"], {"x.foo": "bar"}), (["--x.foo=bar"], {"x.foo": "bar"}), (["--x.foo", "--x.bar", "baz"], {"x.foo": True, "x.bar": "baz"}), (["--x.foo", "--x.bar=baz"], {"x.foo": True, "x.bar": "baz"}), (["--x.foo", "10.1", "--x.bar", "--x.baz", "false"], {"x.foo": 10.1, "x.bar": True, "x.baz": False}), (["--x.foo", "10.1", "--x.bar", "--x.baz=false"], {"x.foo": 10.1, "x.bar": True, "x.baz": False}) # fmt: on ], ) def test_parse_config_overrides(args, expected): assert parse_config_overrides(args) == expected @pytest.mark.parametrize("args", [["--foo"], ["--x.foo", "bar", "--baz"]]) def test_parse_config_overrides_invalid(args): with pytest.raises(NoSuchOption): parse_config_overrides(args) @pytest.mark.parametrize("args", [["--x.foo", "bar", "baz"], ["x.foo"]]) def test_parse_config_overrides_invalid_2(args): with pytest.raises(SystemExit): parse_config_overrides(args) def test_parse_cli_overrides(): overrides = "--x.foo bar --x.bar=12 --x.baz false --y.foo=hello" os.environ[ENV_VARS.CONFIG_OVERRIDES] = overrides result = parse_config_overrides([]) assert len(result) == 4 assert result["x.foo"] == "bar" assert result["x.bar"] == 12 assert result["x.baz"] is False assert result["y.foo"] == "hello" os.environ[ENV_VARS.CONFIG_OVERRIDES] = "--x" assert parse_config_overrides([], env_var=None) == {} with pytest.raises(SystemExit): parse_config_overrides([]) os.environ[ENV_VARS.CONFIG_OVERRIDES] = "hello world" with pytest.raises(SystemExit): parse_config_overrides([]) del os.environ[ENV_VARS.CONFIG_OVERRIDES] @pytest.mark.parametrize("lang", ["en", "nl"]) @pytest.mark.parametrize( "pipeline", [ ["tagger", "parser", "ner"], [], ["ner", "textcat", "sentencizer"], ["morphologizer", "spancat", "entity_linker"], ["spancat_singlelabel", "textcat_multilabel"], ], ) @pytest.mark.parametrize("optimize", ["efficiency", "accuracy"]) @pytest.mark.parametrize("pretraining", [True, False]) def test_init_config(lang, pipeline, optimize, pretraining): # TODO: add more tests and also check for GPU with transformers config = init_config( lang=lang, pipeline=pipeline, optimize=optimize, pretraining=pretraining, gpu=False, ) assert isinstance(config, Config) if pretraining: config["paths"]["raw_text"] = "my_data.jsonl" load_model_from_config(config, auto_fill=True) def test_model_recommendations(): for lang, data in RECOMMENDATIONS.items(): assert RecommendationSchema(**data) @pytest.mark.parametrize( "value", [ # fmt: off "parser,textcat,tagger", " parser, textcat ,tagger ", 'parser,textcat,tagger', ' parser, textcat ,tagger ', ' "parser"," textcat " ,"tagger "', " 'parser',' textcat ' ,'tagger '", '[parser,textcat,tagger]', '["parser","textcat","tagger"]', '[" parser" ,"textcat ", " tagger " ]', "[parser,textcat,tagger]", "[ parser, textcat , tagger]", "['parser','textcat','tagger']", "[' parser' , 'textcat', ' tagger ' ]", # fmt: on ], ) def test_string_to_list(value): assert string_to_list(value, intify=False) == ["parser", "textcat", "tagger"] @pytest.mark.parametrize( "value", [ # fmt: off "1,2,3", '[1,2,3]', '["1","2","3"]', '[" 1" ,"2 ", " 3 " ]', "[' 1' , '2', ' 3 ' ]", # fmt: on ], ) def test_string_to_list_intify(value): assert string_to_list(value, intify=False) == ["1", "2", "3"] assert string_to_list(value, intify=True) == [1, 2, 3] def test_download_compatibility(): spec = SpecifierSet("==" + about.__version__) spec.prereleases = False if about.__version__ in spec: model_name = "en_core_web_sm" compatibility = get_compatibility() version = get_version(model_name, compatibility) assert get_minor_version(about.__version__) == get_minor_version(version) def test_validate_compatibility_table(): spec = SpecifierSet("==" + about.__version__) spec.prereleases = False if about.__version__ in spec: model_pkgs, compat = get_model_pkgs() spacy_version = get_minor_version(about.__version__) current_compat = compat.get(spacy_version, {}) assert len(current_compat) > 0 assert "en_core_web_sm" in current_compat @pytest.mark.parametrize("component_name", ["ner", "textcat", "spancat", "tagger"]) def test_init_labels(component_name): nlp = Dutch() component = nlp.add_pipe(component_name) for label in ["T1", "T2", "T3", "T4"]: component.add_label(label) assert len(nlp.get_pipe(component_name).labels) == 4 with make_tempdir() as tmp_dir: _init_labels(nlp, tmp_dir) config = init_config( lang="nl", pipeline=[component_name], optimize="efficiency", gpu=False, ) config["initialize"]["components"][component_name] = { "labels": { "@readers": "spacy.read_labels.v1", "path": f"{tmp_dir}/{component_name}.json", } } nlp2 = load_model_from_config(config, auto_fill=True) assert len(nlp2.get_pipe(component_name).labels) == 0 nlp2.initialize() assert len(nlp2.get_pipe(component_name).labels) == 4 def test_get_third_party_dependencies(): # We can't easily test the detection of third-party packages here, but we # can at least make sure that the function and its importlib magic runs. nlp = Dutch() # Test with component factory based on Cython module nlp.add_pipe("tagger") assert get_third_party_dependencies(nlp.config) == [] # Test with legacy function nlp = Dutch() nlp.add_pipe( "textcat", config={ "model": { # Do not update from legacy architecture spacy.TextCatBOW.v1 "@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "ngram_size": 1, "no_output_layer": False, } }, ) assert get_third_party_dependencies(nlp.config) == [] # Test with lang-specific factory @Dutch.factory("third_party_test") def test_factory(nlp, name): return lambda x: x nlp.add_pipe("third_party_test") # Before #9674 this would throw an exception get_third_party_dependencies(nlp.config) @pytest.mark.parametrize( "parent,child,expected", [ ("/tmp", "/tmp", True), ("/tmp", "/", False), ("/tmp", "/tmp/subdir", True), ("/tmp", "/tmpdir", False), ("/tmp", "/tmp/subdir/..", True), ("/tmp", "/tmp/..", False), ], ) def test_is_subpath_of(parent, child, expected): assert is_subpath_of(parent, child) == expected @pytest.mark.slow @pytest.mark.parametrize( "factory_name,pipe_name", [ ("ner", "ner"), ("ner", "my_ner"), ("spancat", "spancat"), ("spancat", "my_spancat"), ], ) def test_get_labels_from_model(factory_name, pipe_name): labels = ("A", "B") nlp = English() pipe = nlp.add_pipe(factory_name, name=pipe_name) for label in labels: pipe.add_label(label) nlp.initialize() assert nlp.get_pipe(pipe_name).labels == labels if factory_name == "spancat": assert _get_labels_from_spancat(nlp)[pipe.key] == set(labels) else: assert _get_labels_from_model(nlp, factory_name) == set(labels) def test_permitted_package_names(): # https://www.python.org/dev/peps/pep-0426/#name assert _is_permitted_package_name("Meine_Bäume") == False assert _is_permitted_package_name("_package") == False assert _is_permitted_package_name("package_") == False assert _is_permitted_package_name(".package") == False assert _is_permitted_package_name("package.") == False assert _is_permitted_package_name("-package") == False assert _is_permitted_package_name("package-") == False def test_debug_data_compile_gold(): nlp = English() pred = Doc(nlp.vocab, words=["Token", ".", "New", "York", "City"]) ref = Doc( nlp.vocab, words=["Token", ".", "New York City"], sent_starts=[True, False, True], ents=["O", "O", "B-ENT"], ) eg = Example(pred, ref) data = _compile_gold([eg], ["ner"], nlp, True) assert data["boundary_cross_ents"] == 0 pred = Doc(nlp.vocab, words=["Token", ".", "New", "York", "City"]) ref = Doc( nlp.vocab, words=["Token", ".", "New York City"], sent_starts=[True, False, True], ents=["O", "B-ENT", "I-ENT"], ) eg = Example(pred, ref) data = _compile_gold([eg], ["ner"], nlp, True) assert data["boundary_cross_ents"] == 1 @pytest.mark.parametrize("component_name", ["spancat", "spancat_singlelabel"]) def test_debug_data_compile_gold_for_spans(component_name): nlp = English() spans_key = "sc" pred = Doc(nlp.vocab, words=["Welcome", "to", "the", "Bank", "of", "China", "."]) pred.spans[spans_key] = [Span(pred, 3, 6, "ORG"), Span(pred, 5, 6, "GPE")] ref = Doc(nlp.vocab, words=["Welcome", "to", "the", "Bank", "of", "China", "."]) ref.spans[spans_key] = [Span(ref, 3, 6, "ORG"), Span(ref, 5, 6, "GPE")] eg = Example(pred, ref) data = _compile_gold([eg], [component_name], nlp, True) assert data["spancat"][spans_key] == Counter({"ORG": 1, "GPE": 1}) assert data["spans_length"][spans_key] == {"ORG": [3], "GPE": [1]} assert data["spans_per_type"][spans_key] == { "ORG": [Span(ref, 3, 6, "ORG")], "GPE": [Span(ref, 5, 6, "GPE")], } assert data["sb_per_type"][spans_key] == { "ORG": {"start": [ref[2:3]], "end": [ref[6:7]]}, "GPE": {"start": [ref[4:5]], "end": [ref[6:7]]}, } def test_frequency_distribution_is_correct(): nlp = English() docs = [ Doc(nlp.vocab, words=["Bank", "of", "China"]), Doc(nlp.vocab, words=["China"]), ] expected = Counter({"china": 0.5, "bank": 0.25, "of": 0.25}) freq_distribution = _get_distribution(docs, normalize=True) assert freq_distribution == expected def test_kl_divergence_computation_is_correct(): p = Counter({"a": 0.5, "b": 0.25}) q = Counter({"a": 0.25, "b": 0.50, "c": 0.15, "d": 0.10}) result = _get_kl_divergence(p, q) expected = 0.1733 assert math.isclose(result, expected, rel_tol=1e-3) def test_get_span_characteristics_return_value(): nlp = English() spans_key = "sc" pred = Doc(nlp.vocab, words=["Welcome", "to", "the", "Bank", "of", "China", "."]) pred.spans[spans_key] = [Span(pred, 3, 6, "ORG"), Span(pred, 5, 6, "GPE")] ref = Doc(nlp.vocab, words=["Welcome", "to", "the", "Bank", "of", "China", "."]) ref.spans[spans_key] = [Span(ref, 3, 6, "ORG"), Span(ref, 5, 6, "GPE")] eg = Example(pred, ref) examples = [eg] data = _compile_gold(examples, ["spancat"], nlp, True) span_characteristics = _get_span_characteristics( examples=examples, compiled_gold=data, spans_key=spans_key ) assert {"sd", "bd", "lengths"}.issubset(span_characteristics.keys()) assert span_characteristics["min_length"] == 1 assert span_characteristics["max_length"] == 3 def test_ensure_print_span_characteristics_wont_fail(): """Test if interface between two methods aren't destroyed if refactored""" nlp = English() spans_key = "sc" pred = Doc(nlp.vocab, words=["Welcome", "to", "the", "Bank", "of", "China", "."]) pred.spans[spans_key] = [Span(pred, 3, 6, "ORG"), Span(pred, 5, 6, "GPE")] ref = Doc(nlp.vocab, words=["Welcome", "to", "the", "Bank", "of", "China", "."]) ref.spans[spans_key] = [Span(ref, 3, 6, "ORG"), Span(ref, 5, 6, "GPE")] eg = Example(pred, ref) examples = [eg] data = _compile_gold(examples, ["spancat"], nlp, True) span_characteristics = _get_span_characteristics( examples=examples, compiled_gold=data, spans_key=spans_key ) _print_span_characteristics(span_characteristics) @pytest.mark.parametrize("threshold", [70, 80, 85, 90, 95]) def test_span_length_freq_dist_threshold_must_be_correct(threshold): sample_span_lengths = { "span_type_1": [1, 4, 4, 5], "span_type_2": [5, 3, 3, 2], "span_type_3": [3, 1, 3, 3], } span_freqs = _get_spans_length_freq_dist(sample_span_lengths, threshold) assert sum(span_freqs.values()) >= threshold def test_span_length_freq_dist_output_must_be_correct(): sample_span_lengths = { "span_type_1": [1, 4, 4, 5], "span_type_2": [5, 3, 3, 2], "span_type_3": [3, 1, 3, 3], } threshold = 90 span_freqs = _get_spans_length_freq_dist(sample_span_lengths, threshold) assert sum(span_freqs.values()) >= threshold assert list(span_freqs.keys()) == [3, 1, 4, 5, 2] def test_applycli_empty_dir(): with make_tempdir() as data_path: output = data_path / "test.spacy" apply(data_path, output, "blank:en", "text", 1, 1) def test_applycli_docbin(): with make_tempdir() as data_path: output = data_path / "testout.spacy" nlp = spacy.blank("en") doc = nlp("testing apply cli.") # test empty DocBin case docbin = DocBin() docbin.to_disk(data_path / "testin.spacy") apply(data_path, output, "blank:en", "text", 1, 1) docbin.add(doc) docbin.to_disk(data_path / "testin.spacy") apply(data_path, output, "blank:en", "text", 1, 1) def test_applycli_jsonl(): with make_tempdir() as data_path: output = data_path / "testout.spacy" data = [{"field": "Testing apply cli.", "key": 234}] data2 = [{"field": "234"}] srsly.write_jsonl(data_path / "test.jsonl", data) apply(data_path, output, "blank:en", "field", 1, 1) srsly.write_jsonl(data_path / "test2.jsonl", data2) apply(data_path, output, "blank:en", "field", 1, 1) def test_applycli_txt(): with make_tempdir() as data_path: output = data_path / "testout.spacy" with open(data_path / "test.foo", "w") as ftest: ftest.write("Testing apply cli.") apply(data_path, output, "blank:en", "text", 1, 1) def test_applycli_mixed(): with make_tempdir() as data_path: output = data_path / "testout.spacy" text = "Testing apply cli" nlp = spacy.blank("en") doc = nlp(text) jsonl_data = [{"text": text}] srsly.write_jsonl(data_path / "test.jsonl", jsonl_data) docbin = DocBin() docbin.add(doc) docbin.to_disk(data_path / "testin.spacy") with open(data_path / "test.txt", "w") as ftest: ftest.write(text) apply(data_path, output, "blank:en", "text", 1, 1) # Check whether it worked result = list(DocBin().from_disk(output).get_docs(nlp.vocab)) assert len(result) == 3 for doc in result: assert doc.text == text def test_applycli_user_data(): Doc.set_extension("ext", default=0) val = ("ext", 0) with make_tempdir() as data_path: output = data_path / "testout.spacy" nlp = spacy.blank("en") doc = nlp("testing apply cli.") doc._.ext = val docbin = DocBin(store_user_data=True) docbin.add(doc) docbin.to_disk(data_path / "testin.spacy") apply(data_path, output, "blank:en", "", 1, 1) result = list(DocBin().from_disk(output).get_docs(nlp.vocab)) assert result[0]._.ext == val def test_local_remote_storage(): with make_tempdir() as d: filename = "a.txt" content_hashes = ("aaaa", "cccc", "bbbb") for i, content_hash in enumerate(content_hashes): # make sure that each subsequent file has a later timestamp if i > 0: time.sleep(1) content = f"{content_hash} content" loc_file = d / "root" / filename if not loc_file.parent.exists(): loc_file.parent.mkdir(parents=True) with loc_file.open(mode="w") as file_: file_.write(content) # push first version to remote storage remote = RemoteStorage(d / "root", str(d / "remote")) remote.push(filename, "aaaa", content_hash) # retrieve with full hashes loc_file.unlink() remote.pull(filename, command_hash="aaaa", content_hash=content_hash) with loc_file.open(mode="r") as file_: assert file_.read() == content # retrieve with command hash loc_file.unlink() remote.pull(filename, command_hash="aaaa") with loc_file.open(mode="r") as file_: assert file_.read() == content # retrieve with content hash loc_file.unlink() remote.pull(filename, content_hash=content_hash) with loc_file.open(mode="r") as file_: assert file_.read() == content # retrieve with no hashes loc_file.unlink() remote.pull(filename) with loc_file.open(mode="r") as file_: assert file_.read() == content def test_local_remote_storage_pull_missing(): # pulling from a non-existent remote pulls nothing gracefully with make_tempdir() as d: filename = "a.txt" remote = RemoteStorage(d / "root", str(d / "remote")) assert remote.pull(filename, command_hash="aaaa") is None assert remote.pull(filename) is None def test_cli_find_threshold(capsys): def make_examples(nlp: Language) -> List[Example]: docs: List[Example] = [] for t in [ ( "I am angry and confused in the Bank of America.", { "cats": {"ANGRY": 1.0, "CONFUSED": 1.0, "HAPPY": 0.0}, "spans": {"sc": [(31, 46, "ORG")]}, }, ), ( "I am confused but happy in New York.", { "cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0}, "spans": {"sc": [(27, 35, "GPE")]}, }, ), ]: doc = nlp.make_doc(t[0]) docs.append(Example.from_dict(doc, t[1])) return docs def init_nlp( components: Tuple[Tuple[str, Dict[str, Any]], ...] = () ) -> Tuple[Language, List[Example]]: new_nlp = English() new_nlp.add_pipe( # type: ignore factory_name="textcat_multilabel", name="tc_multi", config={"threshold": 0.9}, ) # Append additional components to pipeline. for cfn, comp_config in components: new_nlp.add_pipe(cfn, config=comp_config) new_examples = make_examples(new_nlp) new_nlp.initialize(get_examples=lambda: new_examples) for i in range(5): new_nlp.update(new_examples) return new_nlp, new_examples with make_tempdir() as docs_dir: # Check whether find_threshold() identifies lowest threshold above 0 as (first) ideal threshold, as this matches # the current model behavior with the examples above. This can break once the model behavior changes and serves # mostly as a smoke test. nlp, examples = init_nlp() DocBin(docs=[example.reference for example in examples]).to_disk( docs_dir / "docs.spacy" ) with make_tempdir() as nlp_dir: nlp.to_disk(nlp_dir) best_threshold, best_score, res = find_threshold( model=nlp_dir, data_path=docs_dir / "docs.spacy", pipe_name="tc_multi", threshold_key="threshold", scores_key="cats_macro_f", silent=True, ) assert best_score == max(res.values()) assert res[1.0] == 0.0 # Test with spancat. nlp, _ = init_nlp((("spancat", {}),)) with make_tempdir() as nlp_dir: nlp.to_disk(nlp_dir) best_threshold, best_score, res = find_threshold( model=nlp_dir, data_path=docs_dir / "docs.spacy", pipe_name="spancat", threshold_key="threshold", scores_key="spans_sc_f", silent=True, ) assert best_score == max(res.values()) assert res[1.0] == 0.0 # Having multiple textcat_multilabel components should work, since the name has to be specified. nlp, _ = init_nlp((("textcat_multilabel", {}),)) with make_tempdir() as nlp_dir: nlp.to_disk(nlp_dir) assert find_threshold( model=nlp_dir, data_path=docs_dir / "docs.spacy", pipe_name="tc_multi", threshold_key="threshold", scores_key="cats_macro_f", silent=True, ) # Specifying the name of an non-existing pipe should fail. nlp, _ = init_nlp() with make_tempdir() as nlp_dir: nlp.to_disk(nlp_dir) with pytest.raises(AttributeError): find_threshold( model=nlp_dir, data_path=docs_dir / "docs.spacy", pipe_name="_", threshold_key="threshold", scores_key="cats_macro_f", silent=True, ) @pytest.mark.filterwarnings("ignore::DeprecationWarning") @pytest.mark.parametrize( "reqs,output", [ [ """ spacy # comment thinc""", (False, False), ], [ """# comment --some-flag spacy""", (False, False), ], [ """# comment --some-flag spacy; python_version >= '3.6'""", (False, False), ], [ """# comment spacyunknowndoesnotexist12345""", (True, False), ], ], ) def test_project_check_requirements(reqs, output): import pkg_resources # excessive guard against unlikely package name try: pkg_resources.require("spacyunknowndoesnotexist12345") except pkg_resources.DistributionNotFound: assert output == _check_requirements([req.strip() for req in reqs.split("\n")]) def test_upload_download_local_file(): with make_tempdir() as d1, make_tempdir() as d2: filename = "f.txt" content = "content" local_file = d1 / filename remote_file = d2 / filename with local_file.open(mode="w") as file_: file_.write(content) upload_file(local_file, remote_file) local_file.unlink() download_file(remote_file, local_file) with local_file.open(mode="r") as file_: assert file_.read() == content def test_walk_directory(): with make_tempdir() as d: files = [ "data1.iob", "data2.iob", "data3.json", "data4.conll", "data5.conll", "data6.conll", "data7.txt", ] for f in files: Path(d / f).touch() assert (len(walk_directory(d))) == 7 assert (len(walk_directory(d, suffix=None))) == 7 assert (len(walk_directory(d, suffix="json"))) == 1 assert (len(walk_directory(d, suffix="iob"))) == 2 assert (len(walk_directory(d, suffix="conll"))) == 3 assert (len(walk_directory(d, suffix="pdf"))) == 0 def test_debug_data_trainable_lemmatizer_basic(): examples = [ ("She likes green eggs", {"lemmas": ["she", "like", "green", "egg"]}), ("Eat blue ham", {"lemmas": ["eat", "blue", "ham"]}), ] nlp = Language() train_examples = [] for t in examples: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True) # ref test_edit_tree_lemmatizer::test_initialize_from_labels # this results in 4 trees assert len(data["lemmatizer_trees"]) == 4 def test_debug_data_trainable_lemmatizer_partial(): partial_examples = [ # partial annotation ("She likes green eggs", {"lemmas": ["", "like", "green", ""]}), # misaligned partial annotation ( "He hates green eggs", { "words": ["He", "hat", "es", "green", "eggs"], "lemmas": ["", "hat", "e", "green", ""], }, ), ] nlp = Language() train_examples = [] for t in partial_examples: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True) assert data["partial_lemma_annotations"] == 2 def test_debug_data_trainable_lemmatizer_low_cardinality(): low_cardinality_examples = [ ("She likes green eggs", {"lemmas": ["no", "no", "no", "no"]}), ("Eat blue ham", {"lemmas": ["no", "no", "no"]}), ] nlp = Language() train_examples = [] for t in low_cardinality_examples: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True) assert data["n_low_cardinality_lemmas"] == 2 def test_debug_data_trainable_lemmatizer_not_annotated(): unannotated_examples = [ ("She likes green eggs", {}), ("Eat blue ham", {}), ] nlp = Language() train_examples = [] for t in unannotated_examples: train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True) assert data["no_lemma_annotations"] == 2
49,789
35.799704
229
py
spaCy
spaCy-master/spacy/tests/test_cli_app.py
import os from pathlib import Path import pytest import srsly from typer.testing import CliRunner from spacy.cli._util import app, get_git_version from spacy.tokens import Doc, DocBin from .util import make_tempdir, normalize_whitespace def has_git(): try: get_git_version() return True except RuntimeError: return False def test_convert_auto(): with make_tempdir() as d_in, make_tempdir() as d_out: for f in ["data1.iob", "data2.iob", "data3.iob"]: Path(d_in / f).touch() # ensure that "automatic" suffix detection works result = CliRunner().invoke(app, ["convert", str(d_in), str(d_out)]) assert "Generated output file" in result.stdout out_files = os.listdir(d_out) assert len(out_files) == 3 assert "data1.spacy" in out_files assert "data2.spacy" in out_files assert "data3.spacy" in out_files def test_convert_auto_conflict(): with make_tempdir() as d_in, make_tempdir() as d_out: for f in ["data1.iob", "data2.iob", "data3.json"]: Path(d_in / f).touch() # ensure that "automatic" suffix detection warns when there are different file types result = CliRunner().invoke(app, ["convert", str(d_in), str(d_out)]) assert "All input files must be same type" in result.stdout out_files = os.listdir(d_out) assert len(out_files) == 0 def test_benchmark_accuracy_alias(): # Verify that the `evaluate` alias works correctly. result_benchmark = CliRunner().invoke(app, ["benchmark", "accuracy", "--help"]) result_evaluate = CliRunner().invoke(app, ["evaluate", "--help"]) assert normalize_whitespace(result_benchmark.stdout) == normalize_whitespace( result_evaluate.stdout.replace("spacy evaluate", "spacy benchmark accuracy") ) def test_debug_data_trainable_lemmatizer_cli(en_vocab): train_docs = [ Doc(en_vocab, words=["I", "like", "cats"], lemmas=["I", "like", "cat"]), Doc( en_vocab, words=["Dogs", "are", "great", "too"], lemmas=["dog", "be", "great", "too"], ), ] dev_docs = [ Doc(en_vocab, words=["Cats", "are", "cute"], lemmas=["cat", "be", "cute"]), Doc(en_vocab, words=["Pets", "are", "great"], lemmas=["pet", "be", "great"]), ] with make_tempdir() as d_in: train_bin = DocBin(docs=train_docs) train_bin.to_disk(d_in / "train.spacy") dev_bin = DocBin(docs=dev_docs) dev_bin.to_disk(d_in / "dev.spacy") # `debug data` requires an input pipeline config CliRunner().invoke( app, [ "init", "config", f"{d_in}/config.cfg", "--lang", "en", "--pipeline", "trainable_lemmatizer", ], ) result_debug_data = CliRunner().invoke( app, [ "debug", "data", f"{d_in}/config.cfg", "--paths.train", f"{d_in}/train.spacy", "--paths.dev", f"{d_in}/dev.spacy", ], ) # Instead of checking specific wording of the output, which may change, # we'll check that this section of the debug output is present. assert "= Trainable Lemmatizer =" in result_debug_data.stdout # project tests CFG_FILE = "myconfig.cfg" SAMPLE_PROJECT = { "title": "Sample project", "description": "This is a project for testing", "assets": [ { "dest": "assets/spacy-readme.md", "url": "https://github.com/explosion/spaCy/raw/dec81508d28b47f09a06203c472b37f00db6c869/README.md", "checksum": "411b2c89ccf34288fae8ed126bf652f7", }, { "dest": "assets/citation.cff", "url": "https://github.com/explosion/spaCy/raw/master/CITATION.cff", "checksum": "c996bfd80202d480eb2e592369714e5e", "extra": True, }, ], "commands": [ { "name": "ok", "help": "print ok", "script": ["python -c \"print('okokok')\""], }, { "name": "create", "help": "make a file", "script": [f"python -m spacy init config {CFG_FILE}"], "outputs": [f"{CFG_FILE}"], }, ], } SAMPLE_PROJECT_TEXT = srsly.yaml_dumps(SAMPLE_PROJECT) @pytest.fixture def project_dir(): with make_tempdir() as pdir: (pdir / "project.yml").write_text(SAMPLE_PROJECT_TEXT) yield pdir def test_project_document(project_dir): readme_path = project_dir / "README.md" assert not readme_path.exists(), "README already exists" result = CliRunner().invoke( app, ["project", "document", str(project_dir), "-o", str(readme_path)] ) assert result.exit_code == 0 assert readme_path.is_file() text = readme_path.read_text("utf-8") assert SAMPLE_PROJECT["description"] in text def test_project_assets(project_dir): asset_dir = project_dir / "assets" assert not asset_dir.exists(), "Assets dir is already present" result = CliRunner().invoke(app, ["project", "assets", str(project_dir)]) assert result.exit_code == 0 assert (asset_dir / "spacy-readme.md").is_file(), "Assets not downloaded" # check that extras work result = CliRunner().invoke(app, ["project", "assets", "--extra", str(project_dir)]) assert result.exit_code == 0 assert (asset_dir / "citation.cff").is_file(), "Extras not downloaded" def test_project_run(project_dir): # make sure dry run works test_file = project_dir / CFG_FILE result = CliRunner().invoke( app, ["project", "run", "--dry", "create", str(project_dir)] ) assert result.exit_code == 0 assert not test_file.is_file() result = CliRunner().invoke(app, ["project", "run", "create", str(project_dir)]) assert result.exit_code == 0 assert test_file.is_file() result = CliRunner().invoke(app, ["project", "run", "ok", str(project_dir)]) assert result.exit_code == 0 assert "okokok" in result.stdout @pytest.mark.skipif(not has_git(), reason="git not installed") @pytest.mark.parametrize( "options", [ "", # "--sparse", "--branch v3", "--repo https://github.com/explosion/projects --branch v3", ], ) def test_project_clone(options): with make_tempdir() as workspace: out = workspace / "project" target = "benchmarks/ner_conll03" if not options: options = [] else: options = options.split() result = CliRunner().invoke( app, ["project", "clone", target, *options, str(out)] ) assert result.exit_code == 0 assert (out / "README.md").is_file() def test_project_push_pull(project_dir): proj = dict(SAMPLE_PROJECT) remote = "xyz" with make_tempdir() as remote_dir: proj["remotes"] = {remote: str(remote_dir)} proj_text = srsly.yaml_dumps(proj) (project_dir / "project.yml").write_text(proj_text) test_file = project_dir / CFG_FILE result = CliRunner().invoke(app, ["project", "run", "create", str(project_dir)]) assert result.exit_code == 0 assert test_file.is_file() result = CliRunner().invoke(app, ["project", "push", remote, str(project_dir)]) assert result.exit_code == 0 test_file.unlink() assert not test_file.exists() result = CliRunner().invoke(app, ["project", "pull", remote, str(project_dir)]) assert result.exit_code == 0 assert test_file.is_file()
7,767
31.915254
111
py
spaCy
spaCy-master/spacy/tests/test_displacy.py
import numpy import pytest from spacy import displacy from spacy.displacy.render import DependencyRenderer, EntityRenderer from spacy.lang.en import English from spacy.lang.fa import Persian from spacy.tokens import Doc, Span @pytest.mark.issue(2361) def test_issue2361(de_vocab): """Test if < is escaped when rendering""" chars = ("&lt;", "&gt;", "&amp;", "&quot;") words = ["<", ">", "&", '"'] doc = Doc(de_vocab, words=words, deps=["dep"] * len(words)) html = displacy.render(doc) for char in chars: assert char in html @pytest.mark.issue(2728) def test_issue2728(en_vocab): """Test that displaCy ENT visualizer escapes HTML correctly.""" doc = Doc(en_vocab, words=["test", "<RELEASE>", "test"]) doc.ents = [Span(doc, 0, 1, label="TEST")] html = displacy.render(doc, style="ent") assert "&lt;RELEASE&gt;" in html doc.ents = [Span(doc, 1, 2, label="TEST")] html = displacy.render(doc, style="ent") assert "&lt;RELEASE&gt;" in html @pytest.mark.issue(3288) def test_issue3288(en_vocab): """Test that retokenization works correctly via displaCy when punctuation is merged onto the preceeding token and tensor is resized.""" words = ["Hello", "World", "!", "When", "is", "this", "breaking", "?"] heads = [1, 1, 1, 4, 4, 6, 4, 4] deps = ["intj", "ROOT", "punct", "advmod", "ROOT", "det", "nsubj", "punct"] doc = Doc(en_vocab, words=words, heads=heads, deps=deps) doc.tensor = numpy.zeros((len(words), 96), dtype="float32") displacy.render(doc) @pytest.mark.issue(3531) def test_issue3531(): """Test that displaCy renderer doesn't require "settings" key.""" example_dep = { "words": [ {"text": "But", "tag": "CCONJ"}, {"text": "Google", "tag": "PROPN"}, {"text": "is", "tag": "VERB"}, {"text": "starting", "tag": "VERB"}, {"text": "from", "tag": "ADP"}, {"text": "behind.", "tag": "ADV"}, ], "arcs": [ {"start": 0, "end": 3, "label": "cc", "dir": "left"}, {"start": 1, "end": 3, "label": "nsubj", "dir": "left"}, {"start": 2, "end": 3, "label": "aux", "dir": "left"}, {"start": 3, "end": 4, "label": "prep", "dir": "right"}, {"start": 4, "end": 5, "label": "pcomp", "dir": "right"}, ], } example_ent = { "text": "But Google is starting from behind.", "ents": [{"start": 4, "end": 10, "label": "ORG"}], } dep_html = displacy.render(example_dep, style="dep", manual=True) assert dep_html ent_html = displacy.render(example_ent, style="ent", manual=True) assert ent_html @pytest.mark.issue(3882) def test_issue3882(en_vocab): """Test that displaCy doesn't serialize the doc.user_data when making a copy of the Doc. """ doc = Doc(en_vocab, words=["Hello", "world"], deps=["dep", "dep"]) doc.user_data["test"] = set() displacy.parse_deps(doc) @pytest.mark.issue(5447) def test_issue5447(): """Test that overlapping arcs get separate levels, unless they're identical.""" renderer = DependencyRenderer() words = [ {"text": "This", "tag": "DT"}, {"text": "is", "tag": "VBZ"}, {"text": "a", "tag": "DT"}, {"text": "sentence.", "tag": "NN"}, ] arcs = [ {"start": 0, "end": 1, "label": "nsubj", "dir": "left"}, {"start": 2, "end": 3, "label": "det", "dir": "left"}, {"start": 2, "end": 3, "label": "overlap", "dir": "left"}, {"end": 3, "label": "overlap", "start": 2, "dir": "left"}, {"start": 1, "end": 3, "label": "attr", "dir": "left"}, ] renderer.render([{"words": words, "arcs": arcs}]) assert renderer.highest_level == 3 @pytest.mark.issue(5838) def test_issue5838(): # Displacy's EntityRenderer break line # not working after last entity sample_text = "First line\nSecond line, with ent\nThird line\nFourth line\n" nlp = English() doc = nlp(sample_text) doc.ents = [Span(doc, 7, 8, label="test")] html = displacy.render(doc, style="ent") found = html.count("</br>") assert found == 4 def test_displacy_parse_spans(en_vocab): """Test that spans on a Doc are converted into displaCy's format.""" doc = Doc(en_vocab, words=["Welcome", "to", "the", "Bank", "of", "China"]) doc.spans["sc"] = [Span(doc, 3, 6, "ORG"), Span(doc, 5, 6, "GPE")] spans = displacy.parse_spans(doc) assert isinstance(spans, dict) assert spans["text"] == "Welcome to the Bank of China " assert spans["spans"] == [ { "start": 15, "end": 28, "start_token": 3, "end_token": 6, "label": "ORG", "kb_id": "", "kb_url": "#", }, { "start": 23, "end": 28, "start_token": 5, "end_token": 6, "label": "GPE", "kb_id": "", "kb_url": "#", }, ] def test_displacy_parse_spans_with_kb_id_options(en_vocab): """Test that spans with kb_id on a Doc are converted into displaCy's format""" doc = Doc(en_vocab, words=["Welcome", "to", "the", "Bank", "of", "China"]) doc.spans["sc"] = [ Span(doc, 3, 6, "ORG", kb_id="Q790068"), Span(doc, 5, 6, "GPE", kb_id="Q148"), ] spans = displacy.parse_spans( doc, {"kb_url_template": "https://wikidata.org/wiki/{}"} ) assert isinstance(spans, dict) assert spans["text"] == "Welcome to the Bank of China " assert spans["spans"] == [ { "start": 15, "end": 28, "start_token": 3, "end_token": 6, "label": "ORG", "kb_id": "Q790068", "kb_url": "https://wikidata.org/wiki/Q790068", }, { "start": 23, "end": 28, "start_token": 5, "end_token": 6, "label": "GPE", "kb_id": "Q148", "kb_url": "https://wikidata.org/wiki/Q148", }, ] def test_displacy_parse_spans_different_spans_key(en_vocab): """Test that spans in a different spans key will be parsed""" doc = Doc(en_vocab, words=["Welcome", "to", "the", "Bank", "of", "China"]) doc.spans["sc"] = [Span(doc, 3, 6, "ORG"), Span(doc, 5, 6, "GPE")] doc.spans["custom"] = [Span(doc, 3, 6, "BANK")] spans = displacy.parse_spans(doc, options={"spans_key": "custom"}) assert isinstance(spans, dict) assert spans["text"] == "Welcome to the Bank of China " assert spans["spans"] == [ { "start": 15, "end": 28, "start_token": 3, "end_token": 6, "label": "BANK", "kb_id": "", "kb_url": "#", } ] def test_displacy_parse_empty_spans_key(en_vocab): """Test that having an unset spans key doesn't raise an error""" doc = Doc(en_vocab, words=["Welcome", "to", "the", "Bank", "of", "China"]) doc.spans["custom"] = [Span(doc, 3, 6, "BANK")] with pytest.warns(UserWarning, match="W117"): spans = displacy.parse_spans(doc) assert isinstance(spans, dict) def test_displacy_parse_ents(en_vocab): """Test that named entities on a Doc are converted into displaCy's format.""" doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"]) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])] ents = displacy.parse_ents(doc) assert isinstance(ents, dict) assert ents["text"] == "But Google is starting from behind " assert ents["ents"] == [ {"start": 4, "end": 10, "label": "ORG", "kb_id": "", "kb_url": "#"} ] doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"], kb_id="Q95")] ents = displacy.parse_ents(doc) assert isinstance(ents, dict) assert ents["text"] == "But Google is starting from behind " assert ents["ents"] == [ {"start": 4, "end": 10, "label": "ORG", "kb_id": "Q95", "kb_url": "#"} ] def test_displacy_parse_ents_with_kb_id_options(en_vocab): """Test that named entities with kb_id on a Doc are converted into displaCy's format.""" doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"]) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"], kb_id="Q95")] ents = displacy.parse_ents( doc, {"kb_url_template": "https://www.wikidata.org/wiki/{}"} ) assert isinstance(ents, dict) assert ents["text"] == "But Google is starting from behind " assert ents["ents"] == [ { "start": 4, "end": 10, "label": "ORG", "kb_id": "Q95", "kb_url": "https://www.wikidata.org/wiki/Q95", } ] def test_displacy_parse_deps(en_vocab): """Test that deps and tags on a Doc are converted into displaCy's format.""" words = ["This", "is", "a", "sentence"] heads = [1, 1, 3, 1] pos = ["DET", "VERB", "DET", "NOUN"] tags = ["DT", "VBZ", "DT", "NN"] deps = ["nsubj", "ROOT", "det", "attr"] doc = Doc(en_vocab, words=words, heads=heads, pos=pos, tags=tags, deps=deps) deps = displacy.parse_deps(doc) assert isinstance(deps, dict) assert deps["words"] == [ {"lemma": None, "text": words[0], "tag": pos[0]}, {"lemma": None, "text": words[1], "tag": pos[1]}, {"lemma": None, "text": words[2], "tag": pos[2]}, {"lemma": None, "text": words[3], "tag": pos[3]}, ] assert deps["arcs"] == [ {"start": 0, "end": 1, "label": "nsubj", "dir": "left"}, {"start": 2, "end": 3, "label": "det", "dir": "left"}, {"start": 1, "end": 3, "label": "attr", "dir": "right"}, ] # Test that displacy.parse_deps converts Span to Doc deps = displacy.parse_deps(doc[:]) assert isinstance(deps, dict) assert deps["words"] == [ {"lemma": None, "text": words[0], "tag": pos[0]}, {"lemma": None, "text": words[1], "tag": pos[1]}, {"lemma": None, "text": words[2], "tag": pos[2]}, {"lemma": None, "text": words[3], "tag": pos[3]}, ] assert deps["arcs"] == [ {"start": 0, "end": 1, "label": "nsubj", "dir": "left"}, {"start": 2, "end": 3, "label": "det", "dir": "left"}, {"start": 1, "end": 3, "label": "attr", "dir": "right"}, ] def test_displacy_invalid_arcs(): renderer = DependencyRenderer() words = [{"text": "This", "tag": "DET"}, {"text": "is", "tag": "VERB"}] arcs = [ {"start": 0, "end": 1, "label": "nsubj", "dir": "left"}, {"start": -1, "end": 2, "label": "det", "dir": "left"}, ] with pytest.raises(ValueError): renderer.render([{"words": words, "arcs": arcs}]) def test_displacy_spans(en_vocab): """Test that displaCy can render Spans.""" doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"]) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])] html = displacy.render(doc[1:4], style="ent") assert html.startswith("<div") def test_displacy_raises_for_wrong_type(en_vocab): with pytest.raises(ValueError): displacy.render("hello world") def test_displacy_rtl(): # Source: http://www.sobhe.ir/hazm/ – is this correct? words = ["ما", "بسیار", "کتاب", "می\u200cخوانیم"] # These are (likely) wrong, but it's just for testing pos = ["PRO", "ADV", "N_PL", "V_SUB"] # needs to match lang.fa.tag_map deps = ["foo", "bar", "foo", "baz"] heads = [1, 0, 3, 1] nlp = Persian() doc = Doc(nlp.vocab, words=words, tags=pos, heads=heads, deps=deps) doc.ents = [Span(doc, 1, 3, label="TEST")] html = displacy.render(doc, page=True, style="dep") assert "direction: rtl" in html assert 'direction="rtl"' in html assert f'lang="{nlp.lang}"' in html html = displacy.render(doc, page=True, style="ent") assert "direction: rtl" in html assert f'lang="{nlp.lang}"' in html def test_displacy_render_wrapper(en_vocab): """Test that displaCy accepts custom rendering wrapper.""" def wrapper(html): return "TEST" + html + "TEST" displacy.set_render_wrapper(wrapper) doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"]) doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])] html = displacy.render(doc, style="ent") assert html.startswith("TEST<div") assert html.endswith("/div>TEST") # Restore displacy.set_render_wrapper(lambda html: html) def test_displacy_options_case(): ents = ["foo", "BAR"] colors = {"FOO": "red", "bar": "green"} renderer = EntityRenderer({"ents": ents, "colors": colors}) text = "abcd" labels = ["foo", "bar", "FOO", "BAR"] spans = [{"start": i, "end": i + 1, "label": labels[i]} for i in range(len(text))] result = renderer.render_ents("abcde", spans, None).split("\n\n") assert "red" in result[0] and "foo" in result[0] assert "green" in result[1] and "bar" in result[1] assert "red" in result[2] and "FOO" in result[2] assert "green" in result[3] and "BAR" in result[3] @pytest.mark.issue(10672) def test_displacy_manual_sorted_entities(): doc = { "text": "But Google is starting from behind.", "ents": [ {"start": 14, "end": 22, "label": "SECOND"}, {"start": 4, "end": 10, "label": "FIRST"}, ], "title": None, } html = displacy.render(doc, style="ent", manual=True) assert html.find("FIRST") < html.find("SECOND") @pytest.mark.issue(12816) def test_issue12816(en_vocab) -> None: """Test that displaCy's span visualizer escapes annotated HTML tags correctly.""" # Create a doc containing an annotated word and an unannotated HTML tag doc = Doc(en_vocab, words=["test", "<TEST>"]) doc.spans["sc"] = [Span(doc, 0, 1, label="test")] # Verify that the HTML tag is escaped when unannotated html = displacy.render(doc, style="span") assert "&lt;TEST&gt;" in html # Annotate the HTML tag doc.spans["sc"].append(Span(doc, 1, 2, label="test")) # Verify that the HTML tag is still escaped html = displacy.render(doc, style="span") assert "&lt;TEST&gt;" in html
14,268
34.761905
92
py
spaCy
spaCy-master/spacy/tests/test_errors.py
from inspect import isclass import pytest from spacy.errors import ErrorsWithCodes class Errors(metaclass=ErrorsWithCodes): E001 = "error description" def test_add_codes(): assert Errors.E001 == "[E001] error description" with pytest.raises(AttributeError): Errors.E002 assert isclass(Errors.__class__)
333
18.647059
52
py
spaCy
spaCy-master/spacy/tests/test_language.py
import itertools import logging from unittest import mock import pytest from thinc.api import CupyOps, NumpyOps, get_current_ops import spacy from spacy.lang.de import German from spacy.lang.en import English from spacy.language import Language from spacy.scorer import Scorer from spacy.tokens import Doc, Span from spacy.training import Example from spacy.util import find_matching_language, ignore_error, raise_error, registry from spacy.vocab import Vocab from .util import add_vecs_to_vocab, assert_docs_equal try: import torch # Ensure that we don't deadlock in multiprocessing tests. torch.set_num_threads(1) torch.set_num_interop_threads(1) except ImportError: pass def evil_component(doc): if "2" in doc.text: raise ValueError("no dice") return doc def perhaps_set_sentences(doc): if not doc.text.startswith("4"): doc[-1].is_sent_start = True return doc def assert_sents_error(doc): if not doc.has_annotation("SENT_START"): raise ValueError("no sents") return doc def warn_error(proc_name, proc, docs, e): logger = logging.getLogger("spacy") logger.warning("Trouble with component %s.", proc_name) @pytest.fixture def nlp(): nlp = Language(Vocab()) textcat = nlp.add_pipe("textcat") for label in ("POSITIVE", "NEGATIVE"): textcat.add_label(label) nlp.initialize() return nlp def test_language_update(nlp): text = "hello world" annots = {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}} wrongkeyannots = {"LABEL": True} doc = Doc(nlp.vocab, words=text.split(" ")) example = Example.from_dict(doc, annots) nlp.update([example]) # Not allowed to call with just one Example with pytest.raises(TypeError): nlp.update(example) # Update with text and dict: not supported anymore since v.3 with pytest.raises(TypeError): nlp.update((text, annots)) # Update with doc object and dict with pytest.raises(TypeError): nlp.update((doc, annots)) # Create examples badly with pytest.raises(ValueError): example = Example.from_dict(doc, None) with pytest.raises(KeyError): example = Example.from_dict(doc, wrongkeyannots) def test_language_evaluate(nlp): text = "hello world" annots = {"doc_annotation": {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}} doc = Doc(nlp.vocab, words=text.split(" ")) example = Example.from_dict(doc, annots) scores = nlp.evaluate([example]) assert scores["speed"] > 0 # test with generator scores = nlp.evaluate(eg for eg in [example]) assert scores["speed"] > 0 # Not allowed to call with just one Example with pytest.raises(TypeError): nlp.evaluate(example) # Evaluate with text and dict: not supported anymore since v.3 with pytest.raises(TypeError): nlp.evaluate([(text, annots)]) # Evaluate with doc object and dict with pytest.raises(TypeError): nlp.evaluate([(doc, annots)]) with pytest.raises(TypeError): nlp.evaluate([text, annots]) def test_evaluate_no_pipe(nlp): """Test that docs are processed correctly within Language.pipe if the component doesn't expose a .pipe method.""" @Language.component("test_evaluate_no_pipe") def pipe(doc): return doc text = "hello world" annots = {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}} nlp = Language(Vocab()) doc = nlp(text) nlp.add_pipe("test_evaluate_no_pipe") nlp.evaluate([Example.from_dict(doc, annots)]) def test_evaluate_textcat_multilabel(en_vocab): """Test that evaluate works with a multilabel textcat pipe.""" nlp = Language(en_vocab) textcat_multilabel = nlp.add_pipe("textcat_multilabel") for label in ("FEATURE", "REQUEST", "BUG", "QUESTION"): textcat_multilabel.add_label(label) nlp.initialize() annots = {"cats": {"FEATURE": 1.0, "QUESTION": 1.0}} doc = nlp.make_doc("hello world") example = Example.from_dict(doc, annots) scores = nlp.evaluate([example]) labels = nlp.get_pipe("textcat_multilabel").labels for label in labels: assert scores["cats_f_per_type"].get(label) is not None for key in example.reference.cats.keys(): if key not in labels: assert scores["cats_f_per_type"].get(key) is None def test_evaluate_multiple_textcat_final(en_vocab): """Test that evaluate evaluates the final textcat component in a pipeline with more than one textcat or textcat_multilabel.""" nlp = Language(en_vocab) textcat = nlp.add_pipe("textcat") for label in ("POSITIVE", "NEGATIVE"): textcat.add_label(label) textcat_multilabel = nlp.add_pipe("textcat_multilabel") for label in ("FEATURE", "REQUEST", "BUG", "QUESTION"): textcat_multilabel.add_label(label) nlp.initialize() annots = { "cats": { "POSITIVE": 1.0, "NEGATIVE": 0.0, "FEATURE": 1.0, "QUESTION": 1.0, "POSITIVE": 1.0, "NEGATIVE": 0.0, } } doc = nlp.make_doc("hello world") example = Example.from_dict(doc, annots) scores = nlp.evaluate([example]) # get the labels from the final pipe labels = nlp.get_pipe(nlp.pipe_names[-1]).labels for label in labels: assert scores["cats_f_per_type"].get(label) is not None for key in example.reference.cats.keys(): if key not in labels: assert scores["cats_f_per_type"].get(key) is None def test_evaluate_multiple_textcat_separate(en_vocab): """Test that evaluate can evaluate multiple textcat components separately with custom scorers.""" def custom_textcat_score(examples, **kwargs): scores = Scorer.score_cats( examples, "cats", multi_label=False, **kwargs, ) return {f"custom_{k}": v for k, v in scores.items()} @spacy.registry.scorers("test_custom_textcat_scorer") def make_custom_textcat_scorer(): return custom_textcat_score nlp = Language(en_vocab) textcat = nlp.add_pipe( "textcat", config={"scorer": {"@scorers": "test_custom_textcat_scorer"}}, ) for label in ("POSITIVE", "NEGATIVE"): textcat.add_label(label) textcat_multilabel = nlp.add_pipe("textcat_multilabel") for label in ("FEATURE", "REQUEST", "BUG", "QUESTION"): textcat_multilabel.add_label(label) nlp.initialize() annots = { "cats": { "POSITIVE": 1.0, "NEGATIVE": 0.0, "FEATURE": 1.0, "QUESTION": 1.0, "POSITIVE": 1.0, "NEGATIVE": 0.0, } } doc = nlp.make_doc("hello world") example = Example.from_dict(doc, annots) scores = nlp.evaluate([example]) # check custom scores for the textcat pipe assert "custom_cats_f_per_type" in scores labels = nlp.get_pipe("textcat").labels assert set(scores["custom_cats_f_per_type"].keys()) == set(labels) # check default scores for the textcat_multilabel pipe assert "cats_f_per_type" in scores labels = nlp.get_pipe("textcat_multilabel").labels assert set(scores["cats_f_per_type"].keys()) == set(labels) def vector_modification_pipe(doc): doc.vector += 1 return doc def userdata_pipe(doc): doc.user_data["foo"] = "bar" return doc def ner_pipe(doc): span = Span(doc, 0, 1, label="FIRST") doc.ents += (span,) return doc @pytest.fixture def sample_vectors(): return [ ("spacy", [-0.1, -0.2, -0.3]), ("world", [-0.2, -0.3, -0.4]), ("pipe", [0.7, 0.8, 0.9]), ] @pytest.fixture def nlp2(nlp, sample_vectors): Language.component( "test_language_vector_modification_pipe", func=vector_modification_pipe ) Language.component("test_language_userdata_pipe", func=userdata_pipe) Language.component("test_language_ner_pipe", func=ner_pipe) add_vecs_to_vocab(nlp.vocab, sample_vectors) nlp.add_pipe("test_language_vector_modification_pipe") nlp.add_pipe("test_language_ner_pipe") nlp.add_pipe("test_language_userdata_pipe") return nlp @pytest.fixture def texts(): data = [ "Hello world.", "This is spacy.", "You can use multiprocessing with pipe method.", "Please try!", ] return data @pytest.mark.parametrize("n_process", [1, 2]) def test_language_pipe(nlp2, n_process, texts): ops = get_current_ops() if isinstance(ops, NumpyOps) or n_process < 2: texts = texts * 10 expecteds = [nlp2(text) for text in texts] docs = nlp2.pipe(texts, n_process=n_process, batch_size=2) for doc, expected_doc in zip(docs, expecteds): assert_docs_equal(doc, expected_doc) @pytest.mark.parametrize("n_process", [1, 2]) def test_language_pipe_stream(nlp2, n_process, texts): ops = get_current_ops() if isinstance(ops, NumpyOps) or n_process < 2: # check if nlp.pipe can handle infinite length iterator properly. stream_texts = itertools.cycle(texts) texts0, texts1 = itertools.tee(stream_texts) expecteds = (nlp2(text) for text in texts0) docs = nlp2.pipe(texts1, n_process=n_process, batch_size=2) n_fetch = 20 for doc, expected_doc in itertools.islice(zip(docs, expecteds), n_fetch): assert_docs_equal(doc, expected_doc) @pytest.mark.parametrize("n_process", [1, 2]) def test_language_pipe_error_handler(n_process): """Test that the error handling of nlp.pipe works well""" ops = get_current_ops() if isinstance(ops, NumpyOps) or n_process < 2: nlp = English() nlp.add_pipe("merge_subtokens") nlp.initialize() texts = ["Curious to see what will happen to this text.", "And this one."] # the pipeline fails because there's no parser with pytest.raises(ValueError): nlp(texts[0]) with pytest.raises(ValueError): list(nlp.pipe(texts, n_process=n_process)) nlp.set_error_handler(raise_error) with pytest.raises(ValueError): list(nlp.pipe(texts, n_process=n_process)) # set explicitely to ignoring nlp.set_error_handler(ignore_error) docs = list(nlp.pipe(texts, n_process=n_process)) assert len(docs) == 0 nlp(texts[0]) @pytest.mark.parametrize("n_process", [1, 2]) def test_language_pipe_error_handler_custom(en_vocab, n_process): """Test the error handling of a custom component that has no pipe method""" Language.component("my_evil_component", func=evil_component) ops = get_current_ops() if isinstance(ops, NumpyOps) or n_process < 2: nlp = English() nlp.add_pipe("my_evil_component") texts = ["TEXT 111", "TEXT 222", "TEXT 333", "TEXT 342", "TEXT 666"] with pytest.raises(ValueError): # the evil custom component throws an error list(nlp.pipe(texts)) nlp.set_error_handler(warn_error) logger = logging.getLogger("spacy") with mock.patch.object(logger, "warning") as mock_warning: # the errors by the evil custom component raise a warning for each # bad doc docs = list(nlp.pipe(texts, n_process=n_process)) # HACK/TODO? the warnings in child processes don't seem to be # detected by the mock logger if n_process == 1: mock_warning.assert_called() assert mock_warning.call_count == 2 assert len(docs) + mock_warning.call_count == len(texts) assert [doc.text for doc in docs] == ["TEXT 111", "TEXT 333", "TEXT 666"] @pytest.mark.parametrize("n_process", [1, 2]) def test_language_pipe_error_handler_input_as_tuples(en_vocab, n_process): """Test the error handling of nlp.pipe with input as tuples""" Language.component("my_evil_component", func=evil_component) ops = get_current_ops() if isinstance(ops, NumpyOps) or n_process < 2: nlp = English() nlp.add_pipe("my_evil_component") texts = [ ("TEXT 111", 111), ("TEXT 222", 222), ("TEXT 333", 333), ("TEXT 342", 342), ("TEXT 666", 666), ] with pytest.raises(ValueError): list(nlp.pipe(texts, as_tuples=True)) nlp.set_error_handler(warn_error) logger = logging.getLogger("spacy") with mock.patch.object(logger, "warning") as mock_warning: tuples = list(nlp.pipe(texts, as_tuples=True, n_process=n_process)) # HACK/TODO? the warnings in child processes don't seem to be # detected by the mock logger if n_process == 1: mock_warning.assert_called() assert mock_warning.call_count == 2 assert len(tuples) + mock_warning.call_count == len(texts) assert (tuples[0][0].text, tuples[0][1]) == ("TEXT 111", 111) assert (tuples[1][0].text, tuples[1][1]) == ("TEXT 333", 333) assert (tuples[2][0].text, tuples[2][1]) == ("TEXT 666", 666) @pytest.mark.parametrize("n_process", [1, 2]) def test_language_pipe_error_handler_pipe(en_vocab, n_process): """Test the error handling of a component's pipe method""" Language.component("my_perhaps_sentences", func=perhaps_set_sentences) Language.component("assert_sents_error", func=assert_sents_error) ops = get_current_ops() if isinstance(ops, NumpyOps) or n_process < 2: texts = [f"{str(i)} is enough. Done" for i in range(100)] nlp = English() nlp.add_pipe("my_perhaps_sentences") nlp.add_pipe("assert_sents_error") nlp.initialize() with pytest.raises(ValueError): # assert_sents_error requires sentence boundaries, will throw an error otherwise docs = list(nlp.pipe(texts, n_process=n_process, batch_size=10)) nlp.set_error_handler(ignore_error) docs = list(nlp.pipe(texts, n_process=n_process, batch_size=10)) # we lose/ignore the failing 4,40-49 docs assert len(docs) == 89 @pytest.mark.parametrize("n_process", [1, 2]) def test_language_pipe_error_handler_make_doc_actual(n_process): """Test the error handling for make_doc""" # TODO: fix so that the following test is the actual behavior ops = get_current_ops() if isinstance(ops, NumpyOps) or n_process < 2: nlp = English() nlp.max_length = 10 texts = ["12345678901234567890", "12345"] * 10 with pytest.raises(ValueError): list(nlp.pipe(texts, n_process=n_process)) nlp.default_error_handler = ignore_error if n_process == 1: with pytest.raises(ValueError): list(nlp.pipe(texts, n_process=n_process)) else: docs = list(nlp.pipe(texts, n_process=n_process)) assert len(docs) == 0 @pytest.mark.xfail @pytest.mark.parametrize("n_process", [1, 2]) def test_language_pipe_error_handler_make_doc_preferred(n_process): """Test the error handling for make_doc""" ops = get_current_ops() if isinstance(ops, NumpyOps) or n_process < 2: nlp = English() nlp.max_length = 10 texts = ["12345678901234567890", "12345"] * 10 with pytest.raises(ValueError): list(nlp.pipe(texts, n_process=n_process)) nlp.default_error_handler = ignore_error docs = list(nlp.pipe(texts, n_process=n_process)) assert len(docs) == 0 def test_language_from_config_before_after_init(): name = "test_language_from_config_before_after_init" ran_before = False ran_after = False ran_after_pipeline = False ran_before_init = False ran_after_init = False @registry.callbacks(f"{name}_before") def make_before_creation(): def before_creation(lang_cls): nonlocal ran_before ran_before = True assert lang_cls is English lang_cls.Defaults.foo = "bar" return lang_cls return before_creation @registry.callbacks(f"{name}_after") def make_after_creation(): def after_creation(nlp): nonlocal ran_after ran_after = True assert isinstance(nlp, English) assert nlp.pipe_names == [] assert nlp.Defaults.foo == "bar" nlp.meta["foo"] = "bar" return nlp return after_creation @registry.callbacks(f"{name}_after_pipeline") def make_after_pipeline_creation(): def after_pipeline_creation(nlp): nonlocal ran_after_pipeline ran_after_pipeline = True assert isinstance(nlp, English) assert nlp.pipe_names == ["sentencizer"] assert nlp.Defaults.foo == "bar" assert nlp.meta["foo"] == "bar" nlp.meta["bar"] = "baz" return nlp return after_pipeline_creation @registry.callbacks(f"{name}_before_init") def make_before_init(): def before_init(nlp): nonlocal ran_before_init ran_before_init = True nlp.meta["before_init"] = "before" return nlp return before_init @registry.callbacks(f"{name}_after_init") def make_after_init(): def after_init(nlp): nonlocal ran_after_init ran_after_init = True nlp.meta["after_init"] = "after" return nlp return after_init config = { "nlp": { "pipeline": ["sentencizer"], "before_creation": {"@callbacks": f"{name}_before"}, "after_creation": {"@callbacks": f"{name}_after"}, "after_pipeline_creation": {"@callbacks": f"{name}_after_pipeline"}, }, "components": {"sentencizer": {"factory": "sentencizer"}}, "initialize": { "before_init": {"@callbacks": f"{name}_before_init"}, "after_init": {"@callbacks": f"{name}_after_init"}, }, } nlp = English.from_config(config) assert nlp.Defaults.foo == "bar" assert nlp.meta["foo"] == "bar" assert nlp.meta["bar"] == "baz" assert "before_init" not in nlp.meta assert "after_init" not in nlp.meta assert nlp.pipe_names == ["sentencizer"] assert nlp("text") nlp.initialize() assert nlp.meta["before_init"] == "before" assert nlp.meta["after_init"] == "after" assert all( [ran_before, ran_after, ran_after_pipeline, ran_before_init, ran_after_init] ) def test_language_from_config_before_after_init_invalid(): """Check that an error is raised if function doesn't return nlp.""" name = "test_language_from_config_before_after_init_invalid" registry.callbacks(f"{name}_before1", func=lambda: lambda nlp: None) registry.callbacks(f"{name}_before2", func=lambda: lambda nlp: nlp()) registry.callbacks(f"{name}_after1", func=lambda: lambda nlp: None) registry.callbacks(f"{name}_after1", func=lambda: lambda nlp: English) for callback_name in [f"{name}_before1", f"{name}_before2"]: config = {"nlp": {"before_creation": {"@callbacks": callback_name}}} with pytest.raises(ValueError): English.from_config(config) for callback_name in [f"{name}_after1", f"{name}_after2"]: config = {"nlp": {"after_creation": {"@callbacks": callback_name}}} with pytest.raises(ValueError): English.from_config(config) for callback_name in [f"{name}_after1", f"{name}_after2"]: config = {"nlp": {"after_pipeline_creation": {"@callbacks": callback_name}}} with pytest.raises(ValueError): English.from_config(config) def test_language_whitespace_tokenizer(): """Test the custom whitespace tokenizer from the docs.""" class WhitespaceTokenizer: def __init__(self, vocab): self.vocab = vocab def __call__(self, text): words = text.split(" ") spaces = [True] * len(words) # Avoid zero-length tokens for i, word in enumerate(words): if word == "": words[i] = " " spaces[i] = False # Remove the final trailing space if words[-1] == " ": words = words[0:-1] spaces = spaces[0:-1] else: spaces[-1] = False return Doc(self.vocab, words=words, spaces=spaces) nlp = spacy.blank("en") nlp.tokenizer = WhitespaceTokenizer(nlp.vocab) text = " What's happened to me? he thought. It wasn't a dream. " doc = nlp(text) assert doc.text == text def test_language_custom_tokenizer(): """Test that a fully custom tokenizer can be plugged in via the registry.""" name = "test_language_custom_tokenizer" class CustomTokenizer: """Dummy "tokenizer" that splits on spaces and adds prefix to each word.""" def __init__(self, nlp, prefix): self.vocab = nlp.vocab self.prefix = prefix def __call__(self, text): words = [f"{self.prefix}{word}" for word in text.split(" ")] return Doc(self.vocab, words=words) @registry.tokenizers(name) def custom_create_tokenizer(prefix: str = "_"): def create_tokenizer(nlp): return CustomTokenizer(nlp, prefix=prefix) return create_tokenizer config = {"nlp": {"tokenizer": {"@tokenizers": name}}} nlp = English.from_config(config) doc = nlp("hello world") assert [t.text for t in doc] == ["_hello", "_world"] doc = list(nlp.pipe(["hello world"]))[0] assert [t.text for t in doc] == ["_hello", "_world"] def test_language_from_config_invalid_lang(): """Test that calling Language.from_config raises an error and lang defined in config needs to match language-specific subclasses.""" config = {"nlp": {"lang": "en"}} with pytest.raises(ValueError): Language.from_config(config) with pytest.raises(ValueError): German.from_config(config) def test_spacy_blank(): nlp = spacy.blank("en") assert nlp.config["training"]["dropout"] == 0.1 config = {"training": {"dropout": 0.2}} meta = {"name": "my_custom_model"} nlp = spacy.blank("en", config=config, meta=meta) assert nlp.config["training"]["dropout"] == 0.2 assert nlp.meta["name"] == "my_custom_model" @pytest.mark.parametrize( "lang,target", [ ("en", "en"), ("fra", "fr"), ("fre", "fr"), ("iw", "he"), ("mo", "ro"), ("mul", "xx"), ("no", "nb"), ("pt-BR", "pt"), ("xx", "xx"), ("zh-Hans", "zh"), ("zh-Hant", None), ("zxx", None), ], ) def test_language_matching(lang, target): """ Test that we can look up languages by equivalent or nearly-equivalent language codes. """ assert find_matching_language(lang) == target @pytest.mark.parametrize( "lang,target", [ ("en", "en"), ("fra", "fr"), ("fre", "fr"), ("iw", "he"), ("mo", "ro"), ("mul", "xx"), ("no", "nb"), ("pt-BR", "pt"), ("xx", "xx"), ("zh-Hans", "zh"), ], ) def test_blank_languages(lang, target): """ Test that we can get spacy.blank in various languages, including codes that are defined to be equivalent or that match by CLDR language matching. """ nlp = spacy.blank(lang) assert nlp.lang == target @pytest.mark.parametrize("value", [False, None, ["x", "y"], Language, Vocab]) def test_language_init_invalid_vocab(value): err_fragment = "invalid value" with pytest.raises(ValueError) as e: Language(value) assert err_fragment in str(e.value) def test_language_source_and_vectors(nlp2): nlp = Language(Vocab()) textcat = nlp.add_pipe("textcat") for label in ("POSITIVE", "NEGATIVE"): textcat.add_label(label) nlp.initialize() long_string = "thisisalongstring" assert long_string not in nlp.vocab.strings assert long_string not in nlp2.vocab.strings nlp.vocab.strings.add(long_string) assert nlp.vocab.vectors.to_bytes() != nlp2.vocab.vectors.to_bytes() vectors_bytes = nlp.vocab.vectors.to_bytes() with pytest.warns(UserWarning): nlp2.add_pipe("textcat", name="textcat2", source=nlp) # strings should be added assert long_string in nlp2.vocab.strings # vectors should remain unmodified assert nlp.vocab.vectors.to_bytes() == vectors_bytes @pytest.mark.parametrize("n_process", [1, 2]) def test_pass_doc_to_pipeline(nlp, n_process): texts = ["cats", "dogs", "guinea pigs"] docs = [nlp.make_doc(text) for text in texts] assert not any(len(doc.cats) for doc in docs) doc = nlp(docs[0]) assert doc.text == texts[0] assert len(doc.cats) > 0 if isinstance(get_current_ops(), NumpyOps) or n_process < 2: docs = nlp.pipe(docs, n_process=n_process) assert [doc.text for doc in docs] == texts assert all(len(doc.cats) for doc in docs) def test_invalid_arg_to_pipeline(nlp): str_list = ["This is a text.", "This is another."] with pytest.raises(ValueError): nlp(str_list) # type: ignore assert len(list(nlp.pipe(str_list))) == 2 int_list = [1, 2, 3] with pytest.raises(ValueError): list(nlp.pipe(int_list)) # type: ignore with pytest.raises(ValueError): nlp(int_list) # type: ignore @pytest.mark.skipif( not isinstance(get_current_ops(), CupyOps), reason="test requires GPU" ) def test_multiprocessing_gpu_warning(nlp2, texts): texts = texts * 10 docs = nlp2.pipe(texts, n_process=2, batch_size=2) with pytest.warns(UserWarning, match="multiprocessing with GPU models"): with pytest.raises(ValueError): # Trigger multi-processing. for _ in docs: pass def test_dot_in_factory_names(nlp): Language.component("my_evil_component", func=evil_component) nlp.add_pipe("my_evil_component") with pytest.raises(ValueError, match="not permitted"): Language.component("my.evil.component.v1", func=evil_component) with pytest.raises(ValueError, match="not permitted"): Language.factory("my.evil.component.v1", func=evil_component) def test_component_return(): """Test that an error is raised if components return a type other than a doc.""" nlp = English() @Language.component("test_component_good_pipe") def good_pipe(doc): return doc nlp.add_pipe("test_component_good_pipe") nlp("text") nlp.remove_pipe("test_component_good_pipe") @Language.component("test_component_bad_pipe") def bad_pipe(doc): return doc.text nlp.add_pipe("test_component_bad_pipe") with pytest.raises(ValueError, match="instead of a Doc"): nlp("text")
26,993
32.616438
92
py
spaCy
spaCy-master/spacy/tests/test_misc.py
import ctypes import os from pathlib import Path import pytest from pydantic import ValidationError from thinc.api import ( Config, ConfigValidationError, CupyOps, MPSOps, NumpyOps, Optimizer, get_current_ops, set_current_ops, ) from thinc.compat import has_cupy_gpu, has_torch_mps_gpu from spacy import prefer_gpu, require_cpu, require_gpu, util from spacy.about import __version__ as spacy_version from spacy.lang.en import English from spacy.lang.nl import Dutch from spacy.language import DEFAULT_CONFIG_PATH from spacy.ml._precomputable_affine import ( PrecomputableAffine, _backprop_precomputable_affine_padding, ) from spacy.schemas import ConfigSchemaTraining, TokenPattern, TokenPatternSchema from spacy.training.batchers import minibatch_by_words from spacy.util import ( SimpleFrozenList, dot_to_object, find_available_port, import_file, to_ternary_int, ) from .util import get_random_doc, make_tempdir @pytest.fixture def is_admin(): """Determine if the tests are run as admin or not.""" try: admin = os.getuid() == 0 except AttributeError: admin = ctypes.windll.shell32.IsUserAnAdmin() != 0 return admin @pytest.mark.issue(6207) def test_issue6207(en_tokenizer): doc = en_tokenizer("zero one two three four five six") # Make spans s1 = doc[:4] s2 = doc[3:6] # overlaps with s1 s3 = doc[5:7] # overlaps with s2, not s1 result = util.filter_spans((s1, s2, s3)) assert s1 in result assert s2 not in result assert s3 in result @pytest.mark.issue(6258) def test_issue6258(): """Test that the non-empty constraint pattern field is respected""" # These one is valid TokenPatternSchema(pattern=[TokenPattern()]) # But an empty pattern list should fail to validate # based on the schema's constraint with pytest.raises(ValidationError): TokenPatternSchema(pattern=[]) @pytest.mark.parametrize("text", ["hello/world", "hello world"]) def test_util_ensure_path_succeeds(text): path = util.ensure_path(text) assert isinstance(path, Path) @pytest.mark.parametrize( "package,result", [("numpy", True), ("sfkodskfosdkfpsdpofkspdof", False)] ) def test_util_is_package(package, result): """Test that an installed package via pip is recognised by util.is_package.""" assert util.is_package(package) is result @pytest.mark.parametrize("package", ["thinc"]) def test_util_get_package_path(package): """Test that a Path object is returned for a package name.""" path = util.get_package_path(package) assert isinstance(path, Path) def test_PrecomputableAffine(nO=4, nI=5, nF=3, nP=2): model = PrecomputableAffine(nO=nO, nI=nI, nF=nF, nP=nP).initialize() assert model.get_param("W").shape == (nF, nO, nP, nI) tensor = model.ops.alloc((10, nI)) Y, get_dX = model.begin_update(tensor) assert Y.shape == (tensor.shape[0] + 1, nF, nO, nP) dY = model.ops.alloc((15, nO, nP)) ids = model.ops.alloc((15, nF)) ids[1, 2] = -1 dY[1] = 1 assert not model.has_grad("pad") d_pad = _backprop_precomputable_affine_padding(model, dY, ids) assert d_pad[0, 2, 0, 0] == 1.0 ids.fill(0.0) dY.fill(0.0) dY[0] = 0 ids[1, 2] = 0 ids[1, 1] = -1 ids[1, 0] = -1 dY[1] = 1 ids[2, 0] = -1 dY[2] = 5 d_pad = _backprop_precomputable_affine_padding(model, dY, ids) assert d_pad[0, 0, 0, 0] == 6 assert d_pad[0, 1, 0, 0] == 1 assert d_pad[0, 2, 0, 0] == 0 def test_prefer_gpu(): current_ops = get_current_ops() if has_cupy_gpu: assert prefer_gpu() assert isinstance(get_current_ops(), CupyOps) elif has_torch_mps_gpu: assert prefer_gpu() assert isinstance(get_current_ops(), MPSOps) else: assert not prefer_gpu() set_current_ops(current_ops) def test_require_gpu(): current_ops = get_current_ops() if has_cupy_gpu: require_gpu() assert isinstance(get_current_ops(), CupyOps) elif has_torch_mps_gpu: require_gpu() assert isinstance(get_current_ops(), MPSOps) set_current_ops(current_ops) def test_require_cpu(): current_ops = get_current_ops() require_cpu() assert isinstance(get_current_ops(), NumpyOps) try: import cupy # noqa: F401 require_gpu() assert isinstance(get_current_ops(), CupyOps) except ImportError: pass require_cpu() assert isinstance(get_current_ops(), NumpyOps) set_current_ops(current_ops) def test_ascii_filenames(): """Test that all filenames in the project are ASCII. See: https://twitter.com/_inesmontani/status/1177941471632211968 """ root = Path(__file__).parent.parent for path in root.glob("**/*"): assert all(ord(c) < 128 for c in path.name), path.name def test_load_model_blank_shortcut(): """Test that using a model name like "blank:en" works as a shortcut for spacy.blank("en"). """ nlp = util.load_model("blank:en") assert nlp.lang == "en" assert nlp.pipeline == [] # ImportError for loading an unsupported language with pytest.raises(ImportError): util.load_model("blank:zxx") # ImportError for requesting an invalid language code that isn't registered with pytest.raises(ImportError): util.load_model("blank:fjsfijsdof") @pytest.mark.parametrize( "version,constraint,compatible", [ (spacy_version, spacy_version, True), (spacy_version, f">={spacy_version}", True), ("3.0.0", "2.0.0", False), ("3.2.1", ">=2.0.0", True), ("2.2.10a1", ">=1.0.0,<2.1.1", False), ("3.0.0.dev3", ">=1.2.3,<4.5.6", True), ("n/a", ">=1.2.3,<4.5.6", None), ("1.2.3", "n/a", None), ("n/a", "n/a", None), ], ) def test_is_compatible_version(version, constraint, compatible): assert util.is_compatible_version(version, constraint) is compatible @pytest.mark.parametrize( "constraint,expected", [ ("3.0.0", False), ("==3.0.0", False), (">=2.3.0", True), (">2.0.0", True), ("<=2.0.0", True), (">2.0.0,<3.0.0", False), (">=2.0.0,<3.0.0", False), ("!=1.1,>=1.0,~=1.0", True), ("n/a", None), ], ) def test_is_unconstrained_version(constraint, expected): assert util.is_unconstrained_version(constraint) is expected @pytest.mark.parametrize( "a1,a2,b1,b2,is_match", [ ("3.0.0", "3.0", "3.0.1", "3.0", True), ("3.1.0", "3.1", "3.2.1", "3.2", False), ("xxx", None, "1.2.3.dev0", "1.2", False), ], ) def test_minor_version(a1, a2, b1, b2, is_match): assert util.get_minor_version(a1) == a2 assert util.get_minor_version(b1) == b2 assert util.is_minor_version_match(a1, b1) is is_match assert util.is_minor_version_match(a2, b2) is is_match @pytest.mark.parametrize( "dot_notation,expected", [ ( {"token.pos": True, "token._.xyz": True}, {"token": {"pos": True, "_": {"xyz": True}}}, ), ( {"training.batch_size": 128, "training.optimizer.learn_rate": 0.01}, {"training": {"batch_size": 128, "optimizer": {"learn_rate": 0.01}}}, ), ( {"attribute_ruler.scorer.@scorers": "spacy.tagger_scorer.v1"}, {"attribute_ruler": {"scorer": {"@scorers": "spacy.tagger_scorer.v1"}}}, ), ], ) def test_dot_to_dict(dot_notation, expected): result = util.dot_to_dict(dot_notation) assert result == expected assert util.dict_to_dot(result) == dot_notation @pytest.mark.parametrize( "dot_notation,expected", [ ( {"token.pos": True, "token._.xyz": True}, {"token": {"pos": True, "_": {"xyz": True}}}, ), ( {"training.batch_size": 128, "training.optimizer.learn_rate": 0.01}, {"training": {"batch_size": 128, "optimizer": {"learn_rate": 0.01}}}, ), ( {"attribute_ruler.scorer": {"@scorers": "spacy.tagger_scorer.v1"}}, {"attribute_ruler": {"scorer": {"@scorers": "spacy.tagger_scorer.v1"}}}, ), ], ) def test_dot_to_dict_overrides(dot_notation, expected): result = util.dot_to_dict(dot_notation) assert result == expected assert util.dict_to_dot(result, for_overrides=True) == dot_notation def test_set_dot_to_object(): config = {"foo": {"bar": 1, "baz": {"x": "y"}}, "test": {"a": {"b": "c"}}} with pytest.raises(KeyError): util.set_dot_to_object(config, "foo.bar.baz", 100) with pytest.raises(KeyError): util.set_dot_to_object(config, "hello.world", 100) with pytest.raises(KeyError): util.set_dot_to_object(config, "test.a.b.c", 100) util.set_dot_to_object(config, "foo.bar", 100) assert config["foo"]["bar"] == 100 util.set_dot_to_object(config, "foo.baz.x", {"hello": "world"}) assert config["foo"]["baz"]["x"]["hello"] == "world" assert config["test"]["a"]["b"] == "c" util.set_dot_to_object(config, "foo", 123) assert config["foo"] == 123 util.set_dot_to_object(config, "test", "hello") assert dict(config) == {"foo": 123, "test": "hello"} @pytest.mark.parametrize( "doc_sizes, expected_batches", [ ([400, 400, 199], [3]), ([400, 400, 199, 3], [4]), ([400, 400, 199, 3, 200], [3, 2]), ([400, 400, 199, 3, 1], [5]), ([400, 400, 199, 3, 1, 1500], [5]), # 1500 will be discarded ([400, 400, 199, 3, 1, 200], [3, 3]), ([400, 400, 199, 3, 1, 999], [3, 3]), ([400, 400, 199, 3, 1, 999, 999], [3, 2, 1, 1]), ([1, 2, 999], [3]), ([1, 2, 999, 1], [4]), ([1, 200, 999, 1], [2, 2]), ([1, 999, 200, 1], [2, 2]), ], ) def test_util_minibatch(doc_sizes, expected_batches): docs = [get_random_doc(doc_size) for doc_size in doc_sizes] tol = 0.2 batch_size = 1000 batches = list( minibatch_by_words(docs, size=batch_size, tolerance=tol, discard_oversize=True) ) assert [len(batch) for batch in batches] == expected_batches max_size = batch_size + batch_size * tol for batch in batches: assert sum([len(doc) for doc in batch]) < max_size @pytest.mark.parametrize( "doc_sizes, expected_batches", [ ([400, 4000, 199], [1, 2]), ([400, 400, 199, 3000, 200], [1, 4]), ([400, 400, 199, 3, 1, 1500], [1, 5]), ([400, 400, 199, 3000, 2000, 200, 200], [1, 1, 3, 2]), ([1, 2, 9999], [1, 2]), ([2000, 1, 2000, 1, 1, 1, 2000], [1, 1, 1, 4]), ], ) def test_util_minibatch_oversize(doc_sizes, expected_batches): """Test that oversized documents are returned in their own batch""" docs = [get_random_doc(doc_size) for doc_size in doc_sizes] tol = 0.2 batch_size = 1000 batches = list( minibatch_by_words(docs, size=batch_size, tolerance=tol, discard_oversize=False) ) assert [len(batch) for batch in batches] == expected_batches def test_util_dot_section(): cfg_string = """ [nlp] lang = "en" pipeline = ["textcat"] [components] [components.textcat] factory = "textcat" [components.textcat.model] @architectures = "spacy.TextCatBOW.v2" exclusive_classes = true ngram_size = 1 no_output_layer = false """ nlp_config = Config().from_str(cfg_string) en_nlp = util.load_model_from_config(nlp_config, auto_fill=True) default_config = Config().from_disk(DEFAULT_CONFIG_PATH) default_config["nlp"]["lang"] = "nl" nl_nlp = util.load_model_from_config(default_config, auto_fill=True) # Test that creation went OK assert isinstance(en_nlp, English) assert isinstance(nl_nlp, Dutch) assert nl_nlp.pipe_names == [] assert en_nlp.pipe_names == ["textcat"] # not exclusive_classes assert en_nlp.get_pipe("textcat").model.attrs["multi_label"] is False # Test that default values got overwritten assert en_nlp.config["nlp"]["pipeline"] == ["textcat"] assert nl_nlp.config["nlp"]["pipeline"] == [] # default value [] # Test proper functioning of 'dot_to_object' with pytest.raises(KeyError): dot_to_object(en_nlp.config, "nlp.pipeline.tagger") with pytest.raises(KeyError): dot_to_object(en_nlp.config, "nlp.unknownattribute") T = util.registry.resolve(nl_nlp.config["training"], schema=ConfigSchemaTraining) assert isinstance(dot_to_object({"training": T}, "training.optimizer"), Optimizer) def test_simple_frozen_list(): t = SimpleFrozenList(["foo", "bar"]) assert t == ["foo", "bar"] assert t.index("bar") == 1 # okay method with pytest.raises(NotImplementedError): t.append("baz") with pytest.raises(NotImplementedError): t.sort() with pytest.raises(NotImplementedError): t.extend(["baz"]) with pytest.raises(NotImplementedError): t.pop() t = SimpleFrozenList(["foo", "bar"], error="Error!") with pytest.raises(NotImplementedError): t.append("baz") def test_resolve_dot_names(): config = { "training": {"optimizer": {"@optimizers": "Adam.v1"}}, "foo": {"bar": "training.optimizer", "baz": "training.xyz"}, } result = util.resolve_dot_names(config, ["training.optimizer"]) assert isinstance(result[0], Optimizer) with pytest.raises(ConfigValidationError) as e: util.resolve_dot_names(config, ["training.xyz", "training.optimizer"]) errors = e.value.errors assert len(errors) == 1 assert errors[0]["loc"] == ["training", "xyz"] def test_import_code(): code_str = """ from spacy import Language class DummyComponent: def __init__(self, vocab, name): pass def initialize(self, get_examples, *, nlp, dummy_param: int): pass @Language.factory( "dummy_component", ) def make_dummy_component( nlp: Language, name: str ): return DummyComponent(nlp.vocab, name) """ with make_tempdir() as temp_dir: code_path = os.path.join(temp_dir, "code.py") with open(code_path, "w") as fileh: fileh.write(code_str) import_file("python_code", code_path) config = {"initialize": {"components": {"dummy_component": {"dummy_param": 1}}}} nlp = English.from_config(config) nlp.add_pipe("dummy_component") nlp.initialize() def test_to_ternary_int(): assert to_ternary_int(True) == 1 assert to_ternary_int(None) == 0 assert to_ternary_int(False) == -1 assert to_ternary_int(1) == 1 assert to_ternary_int(1.0) == 1 assert to_ternary_int(0) == 0 assert to_ternary_int(0.0) == 0 assert to_ternary_int(-1) == -1 assert to_ternary_int(5) == -1 assert to_ternary_int(-10) == -1 assert to_ternary_int("string") == -1 assert to_ternary_int([0, "string"]) == -1 def test_find_available_port(): host = "0.0.0.0" port = 5000 assert find_available_port(port, host) == port, "Port 5000 isn't free" from wsgiref.simple_server import demo_app, make_server with make_server(host, port, demo_app) as httpd: with pytest.warns(UserWarning, match="already in use"): found_port = find_available_port(port, host, auto_select=True) assert found_port == port + 1, "Didn't find next port"
15,386
30.27439
88
py
spaCy
spaCy-master/spacy/tests/test_models.py
from typing import List import numpy import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal from thinc.api import ( Adam, Logistic, Ragged, Relu, chain, fix_random_seed, reduce_mean, set_dropout_rate, ) from spacy.lang.en import English from spacy.lang.en.examples import sentences as EN_SENTENCES from spacy.ml.extract_spans import _get_span_indices, extract_spans from spacy.ml.models import ( MaxoutWindowEncoder, MultiHashEmbed, build_bow_text_classifier, build_simple_cnn_text_classifier, build_spancat_model, build_Tok2Vec_model, ) from spacy.ml.staticvectors import StaticVectors def get_textcat_bow_kwargs(): return { "exclusive_classes": True, "ngram_size": 1, "no_output_layer": False, "nO": 34, } def get_textcat_cnn_kwargs(): return {"tok2vec": make_test_tok2vec(), "exclusive_classes": False, "nO": 13} def get_all_params(model): params = [] for node in model.walk(): for name in node.param_names: params.append(node.get_param(name).ravel()) return node.ops.xp.concatenate(params) def get_docs(): nlp = English() return list(nlp.pipe(EN_SENTENCES + [" ".join(EN_SENTENCES)])) def get_gradient(model, Y): if isinstance(Y, model.ops.xp.ndarray): dY = model.ops.alloc(Y.shape, dtype=Y.dtype) dY += model.ops.xp.random.uniform(-1.0, 1.0, Y.shape) return dY elif isinstance(Y, List): return [get_gradient(model, y) for y in Y] else: raise ValueError(f"Could not get gradient for type {type(Y)}") def get_tok2vec_kwargs(): # This actually creates models, so seems best to put it in a function. return { "embed": MultiHashEmbed( width=32, rows=[500, 500, 500], attrs=["NORM", "PREFIX", "SHAPE"], include_static_vectors=False, ), "encode": MaxoutWindowEncoder( width=32, depth=2, maxout_pieces=2, window_size=1 ), } def make_test_tok2vec(): return build_Tok2Vec_model(**get_tok2vec_kwargs()) def test_multi_hash_embed(): embed = MultiHashEmbed( width=32, rows=[500, 500, 500], attrs=["NORM", "PREFIX", "SHAPE"], include_static_vectors=False, ) hash_embeds = [node for node in embed.walk() if node.name == "hashembed"] assert len(hash_embeds) == 3 # Check they look at different columns. assert list(sorted(he.attrs["column"] for he in hash_embeds)) == [0, 1, 2] # Check they use different seeds assert len(set(he.attrs["seed"] for he in hash_embeds)) == 3 # Check they all have the same number of rows assert [he.get_dim("nV") for he in hash_embeds] == [500, 500, 500] # Now try with different row factors embed = MultiHashEmbed( width=32, rows=[1000, 50, 250], attrs=["NORM", "PREFIX", "SHAPE"], include_static_vectors=False, ) hash_embeds = [node for node in embed.walk() if node.name == "hashembed"] assert [he.get_dim("nV") for he in hash_embeds] == [1000, 50, 250] @pytest.mark.parametrize( "seed,model_func,kwargs", [ (0, build_Tok2Vec_model, get_tok2vec_kwargs()), (0, build_bow_text_classifier, get_textcat_bow_kwargs()), (0, build_simple_cnn_text_classifier, get_textcat_cnn_kwargs()), ], ) def test_models_initialize_consistently(seed, model_func, kwargs): fix_random_seed(seed) model1 = model_func(**kwargs) model1.initialize() fix_random_seed(seed) model2 = model_func(**kwargs) model2.initialize() params1 = get_all_params(model1) params2 = get_all_params(model2) assert_array_equal(model1.ops.to_numpy(params1), model2.ops.to_numpy(params2)) @pytest.mark.parametrize( "seed,model_func,kwargs,get_X", [ (0, build_Tok2Vec_model, get_tok2vec_kwargs(), get_docs), (0, build_bow_text_classifier, get_textcat_bow_kwargs(), get_docs), (0, build_simple_cnn_text_classifier, get_textcat_cnn_kwargs(), get_docs), ], ) def test_models_predict_consistently(seed, model_func, kwargs, get_X): fix_random_seed(seed) model1 = model_func(**kwargs).initialize() Y1 = model1.predict(get_X()) fix_random_seed(seed) model2 = model_func(**kwargs).initialize() Y2 = model2.predict(get_X()) if model1.has_ref("tok2vec"): tok2vec1 = model1.get_ref("tok2vec").predict(get_X()) tok2vec2 = model2.get_ref("tok2vec").predict(get_X()) for i in range(len(tok2vec1)): for j in range(len(tok2vec1[i])): assert_array_equal( numpy.asarray(model1.ops.to_numpy(tok2vec1[i][j])), numpy.asarray(model2.ops.to_numpy(tok2vec2[i][j])), ) try: Y1 = model1.ops.to_numpy(Y1) Y2 = model2.ops.to_numpy(Y2) except Exception: pass if isinstance(Y1, numpy.ndarray): assert_array_equal(Y1, Y2) elif isinstance(Y1, List): assert len(Y1) == len(Y2) for y1, y2 in zip(Y1, Y2): try: y1 = model1.ops.to_numpy(y1) y2 = model2.ops.to_numpy(y2) except Exception: pass assert_array_equal(y1, y2) else: raise ValueError(f"Could not compare type {type(Y1)}") @pytest.mark.parametrize( "seed,dropout,model_func,kwargs,get_X", [ (0, 0.2, build_Tok2Vec_model, get_tok2vec_kwargs(), get_docs), (0, 0.2, build_bow_text_classifier, get_textcat_bow_kwargs(), get_docs), (0, 0.2, build_simple_cnn_text_classifier, get_textcat_cnn_kwargs(), get_docs), ], ) def test_models_update_consistently(seed, dropout, model_func, kwargs, get_X): def get_updated_model(): fix_random_seed(seed) optimizer = Adam(0.001) model = model_func(**kwargs).initialize() initial_params = get_all_params(model) set_dropout_rate(model, dropout) for _ in range(5): Y, get_dX = model.begin_update(get_X()) dY = get_gradient(model, Y) get_dX(dY) model.finish_update(optimizer) updated_params = get_all_params(model) with pytest.raises(AssertionError): assert_array_equal( model.ops.to_numpy(initial_params), model.ops.to_numpy(updated_params) ) return model model1 = get_updated_model() model2 = get_updated_model() assert_array_almost_equal( model1.ops.to_numpy(get_all_params(model1)), model2.ops.to_numpy(get_all_params(model2)), decimal=5, ) @pytest.mark.parametrize("model_func,kwargs", [(StaticVectors, {"nO": 128, "nM": 300})]) def test_empty_docs(model_func, kwargs): nlp = English() model = model_func(**kwargs).initialize() # Test the layer can be called successfully with 0, 1 and 2 empty docs. for n_docs in range(3): docs = [nlp("") for _ in range(n_docs)] # Test predict model.predict(docs) # Test backprop output, backprop = model.begin_update(docs) backprop(output) def test_init_extract_spans(): extract_spans().initialize() def test_extract_spans_span_indices(): model = extract_spans().initialize() spans = Ragged( model.ops.asarray([[0, 3], [2, 3], [5, 7]], dtype="i"), model.ops.asarray([2, 1], dtype="i"), ) x_lengths = model.ops.asarray([5, 10], dtype="i") indices = _get_span_indices(model.ops, spans, x_lengths) assert list(indices) == [0, 1, 2, 2, 10, 11] def test_extract_spans_forward_backward(): model = extract_spans().initialize() X = Ragged(model.ops.alloc2f(15, 4), model.ops.asarray([5, 10], dtype="i")) spans = Ragged( model.ops.asarray([[0, 3], [2, 3], [5, 7]], dtype="i"), model.ops.asarray([2, 1], dtype="i"), ) Y, backprop = model.begin_update((X, spans)) assert list(Y.lengths) == [3, 1, 2] assert Y.dataXd.shape == (6, 4) dX, spans2 = backprop(Y) assert spans2 is spans assert dX.dataXd.shape == X.dataXd.shape assert list(dX.lengths) == list(X.lengths) def test_spancat_model_init(): model = build_spancat_model( build_Tok2Vec_model(**get_tok2vec_kwargs()), reduce_mean(), Logistic() ) model.initialize() def test_spancat_model_forward_backward(nO=5): tok2vec = build_Tok2Vec_model(**get_tok2vec_kwargs()) docs = get_docs() spans_list = [] lengths = [] for doc in docs: spans_list.append(doc[:2]) spans_list.append(doc[1:4]) lengths.append(2) spans = Ragged( tok2vec.ops.asarray([[s.start, s.end] for s in spans_list], dtype="i"), tok2vec.ops.asarray(lengths, dtype="i"), ) model = build_spancat_model( tok2vec, reduce_mean(), chain(Relu(nO=nO), Logistic()) ).initialize(X=(docs, spans)) Y, backprop = model((docs, spans), is_train=True) assert Y.shape == (spans.dataXd.shape[0], nO) backprop(Y)
9,082
30.648084
88
py
spaCy
spaCy-master/spacy/tests/test_pickles.py
import numpy import pytest import srsly from spacy.attrs import NORM from spacy.lang.en import English from spacy.strings import StringStore from spacy.tokens import Doc from spacy.vocab import Vocab @pytest.mark.parametrize("text1,text2", [("hello", "bye")]) def test_pickle_string_store(text1, text2): stringstore = StringStore() store1 = stringstore[text1] store2 = stringstore[text2] data = srsly.pickle_dumps(stringstore, protocol=-1) unpickled = srsly.pickle_loads(data) assert unpickled[text1] == store1 assert unpickled[text2] == store2 assert len(stringstore) == len(unpickled) @pytest.mark.parametrize("text1,text2", [("dog", "cat")]) def test_pickle_vocab(text1, text2): vocab = Vocab( lex_attr_getters={int(NORM): lambda string: string[:-1]}, get_noun_chunks=English.Defaults.syntax_iterators.get("noun_chunks"), ) vocab.set_vector("dog", numpy.ones((5,), dtype="f")) lex1 = vocab[text1] lex2 = vocab[text2] assert lex1.norm_ == text1[:-1] assert lex2.norm_ == text2[:-1] data = srsly.pickle_dumps(vocab) unpickled = srsly.pickle_loads(data) assert unpickled[text1].orth == lex1.orth assert unpickled[text2].orth == lex2.orth assert unpickled[text1].norm == lex1.norm assert unpickled[text2].norm == lex2.norm assert unpickled[text1].norm != unpickled[text2].norm assert unpickled.vectors is not None assert unpickled.get_noun_chunks is not None assert list(vocab["dog"].vector) == [1.0, 1.0, 1.0, 1.0, 1.0] def test_pickle_doc(en_vocab): words = ["a", "b", "c"] deps = ["dep"] * len(words) heads = [0] * len(words) doc = Doc( en_vocab, words=words, deps=deps, heads=heads, ) data = srsly.pickle_dumps(doc) unpickled = srsly.pickle_loads(data) assert [t.text for t in unpickled] == words assert [t.dep_ for t in unpickled] == deps assert [t.head.i for t in unpickled] == heads assert list(doc.noun_chunks) == []
2,023
31.126984
77
py
spaCy
spaCy-master/spacy/tests/test_scorer.py
import pytest from numpy.testing import assert_almost_equal, assert_array_almost_equal from pytest import approx from spacy.lang.en import English from spacy.scorer import PRFScore, ROCAUCScore, Scorer, _roc_auc_score, _roc_curve from spacy.tokens import Doc, Span from spacy.training import Example from spacy.training.iob_utils import offsets_to_biluo_tags test_las_apple = [ [ "Apple is looking at buying U.K. startup for $ 1 billion", { "heads": [2, 2, 2, 2, 3, 6, 4, 4, 10, 10, 7], "deps": [ "nsubj", "aux", "ROOT", "prep", "pcomp", "compound", "dobj", "prep", "quantmod", "compound", "pobj", ], }, ] ] test_ner_cardinal = [ ["100 - 200", {"entities": [[0, 3, "CARDINAL"], [6, 9, "CARDINAL"]]}] ] test_ner_apple = [ [ "Apple is looking at buying U.K. startup for $1 billion", {"entities": [(0, 5, "ORG"), (27, 31, "GPE"), (44, 54, "MONEY")]}, ] ] @pytest.fixture def tagged_doc(): text = "Sarah's sister flew to Silicon Valley via London." tags = ["NNP", "POS", "NN", "VBD", "IN", "NNP", "NNP", "IN", "NNP", "."] pos = [ "PROPN", "PART", "NOUN", "VERB", "ADP", "PROPN", "PROPN", "ADP", "PROPN", "PUNCT", ] morphs = [ "NounType=prop|Number=sing", "Poss=yes", "Number=sing", "Tense=past|VerbForm=fin", "", "NounType=prop|Number=sing", "NounType=prop|Number=sing", "", "NounType=prop|Number=sing", "PunctType=peri", ] nlp = English() doc = nlp(text) for i in range(len(tags)): doc[i].tag_ = tags[i] doc[i].pos_ = pos[i] doc[i].set_morph(morphs[i]) if i > 0: doc[i].is_sent_start = False return doc @pytest.fixture def sented_doc(): text = "One sentence. Two sentences. Three sentences." nlp = English() doc = nlp(text) for i in range(len(doc)): if i % 3 == 0: doc[i].is_sent_start = True else: doc[i].is_sent_start = False return doc def test_tokenization(sented_doc): scorer = Scorer() gold = {"sent_starts": [t.sent_start for t in sented_doc]} example = Example.from_dict(sented_doc, gold) scores = scorer.score([example]) assert scores["token_acc"] == 1.0 nlp = English() example.predicted = Doc( nlp.vocab, words=["One", "sentence.", "Two", "sentences.", "Three", "sentences."], spaces=[True, True, True, True, True, False], ) example.predicted[1].is_sent_start = False scores = scorer.score([example]) assert scores["token_acc"] == 0.5 assert scores["token_p"] == 0.5 assert scores["token_r"] == approx(0.33333333) assert scores["token_f"] == 0.4 # per-component scoring scorer = Scorer() scores = scorer.score([example], per_component=True) assert scores["tokenizer"]["token_acc"] == 0.5 assert scores["tokenizer"]["token_p"] == 0.5 assert scores["tokenizer"]["token_r"] == approx(0.33333333) assert scores["tokenizer"]["token_f"] == 0.4 def test_sents(sented_doc): scorer = Scorer() gold = {"sent_starts": [t.sent_start for t in sented_doc]} example = Example.from_dict(sented_doc, gold) scores = scorer.score([example]) assert scores["sents_f"] == 1.0 # One sentence start is moved gold["sent_starts"][3] = 0 gold["sent_starts"][4] = 1 example = Example.from_dict(sented_doc, gold) scores = scorer.score([example]) assert scores["sents_f"] == approx(0.3333333) def test_las_per_type(en_vocab): # Gold and Doc are identical scorer = Scorer() examples = [] for input_, annot in test_las_apple: doc = Doc( en_vocab, words=input_.split(" "), heads=annot["heads"], deps=annot["deps"] ) gold = {"heads": annot["heads"], "deps": annot["deps"]} example = Example.from_dict(doc, gold) examples.append(example) results = scorer.score(examples) assert results["dep_uas"] == 1.0 assert results["dep_las"] == 1.0 assert results["dep_las_per_type"]["nsubj"]["p"] == 1.0 assert results["dep_las_per_type"]["nsubj"]["r"] == 1.0 assert results["dep_las_per_type"]["nsubj"]["f"] == 1.0 assert results["dep_las_per_type"]["compound"]["p"] == 1.0 assert results["dep_las_per_type"]["compound"]["r"] == 1.0 assert results["dep_las_per_type"]["compound"]["f"] == 1.0 # One dep is incorrect in Doc scorer = Scorer() examples = [] for input_, annot in test_las_apple: doc = Doc( en_vocab, words=input_.split(" "), heads=annot["heads"], deps=annot["deps"] ) gold = {"heads": annot["heads"], "deps": annot["deps"]} doc[0].dep_ = "compound" example = Example.from_dict(doc, gold) examples.append(example) results = scorer.score(examples) assert results["dep_uas"] == 1.0 assert_almost_equal(results["dep_las"], 0.9090909) assert results["dep_las_per_type"]["nsubj"]["p"] == 0 assert results["dep_las_per_type"]["nsubj"]["r"] == 0 assert results["dep_las_per_type"]["nsubj"]["f"] == 0 assert_almost_equal(results["dep_las_per_type"]["compound"]["p"], 0.666666666) assert results["dep_las_per_type"]["compound"]["r"] == 1.0 assert results["dep_las_per_type"]["compound"]["f"] == 0.8 def test_ner_per_type(en_vocab): # Gold and Doc are identical scorer = Scorer() examples = [] for input_, annot in test_ner_cardinal: doc = Doc( en_vocab, words=input_.split(" "), ents=["B-CARDINAL", "O", "B-CARDINAL"] ) entities = offsets_to_biluo_tags(doc, annot["entities"]) example = Example.from_dict(doc, {"entities": entities}) # a hack for sentence boundaries example.predicted[1].is_sent_start = False example.reference[1].is_sent_start = False examples.append(example) results = scorer.score(examples) assert results["ents_p"] == 1.0 assert results["ents_r"] == 1.0 assert results["ents_f"] == 1.0 assert results["ents_per_type"]["CARDINAL"]["p"] == 1.0 assert results["ents_per_type"]["CARDINAL"]["r"] == 1.0 assert results["ents_per_type"]["CARDINAL"]["f"] == 1.0 # Doc has one missing and one extra entity # Entity type MONEY is not present in Doc scorer = Scorer() examples = [] for input_, annot in test_ner_apple: doc = Doc( en_vocab, words=input_.split(" "), ents=["B-ORG", "O", "O", "O", "O", "B-GPE", "B-ORG", "O", "O", "O"], ) entities = offsets_to_biluo_tags(doc, annot["entities"]) example = Example.from_dict(doc, {"entities": entities}) # a hack for sentence boundaries example.predicted[1].is_sent_start = False example.reference[1].is_sent_start = False examples.append(example) results = scorer.score(examples) assert results["ents_p"] == approx(0.6666666) assert results["ents_r"] == approx(0.6666666) assert results["ents_f"] == approx(0.6666666) assert "GPE" in results["ents_per_type"] assert "MONEY" in results["ents_per_type"] assert "ORG" in results["ents_per_type"] assert results["ents_per_type"]["GPE"]["p"] == 1.0 assert results["ents_per_type"]["GPE"]["r"] == 1.0 assert results["ents_per_type"]["GPE"]["f"] == 1.0 assert results["ents_per_type"]["MONEY"]["p"] == 0 assert results["ents_per_type"]["MONEY"]["r"] == 0 assert results["ents_per_type"]["MONEY"]["f"] == 0 assert results["ents_per_type"]["ORG"]["p"] == 0.5 assert results["ents_per_type"]["ORG"]["r"] == 1.0 assert results["ents_per_type"]["ORG"]["f"] == approx(0.6666666) def test_tag_score(tagged_doc): # Gold and Doc are identical scorer = Scorer() gold = { "tags": [t.tag_ for t in tagged_doc], "pos": [t.pos_ for t in tagged_doc], "morphs": [str(t.morph) for t in tagged_doc], "sent_starts": [1 if t.is_sent_start else -1 for t in tagged_doc], } example = Example.from_dict(tagged_doc, gold) results = scorer.score([example]) assert results["tag_acc"] == 1.0 assert results["pos_acc"] == 1.0 assert results["morph_acc"] == 1.0 assert results["morph_micro_f"] == 1.0 assert results["morph_per_feat"]["NounType"]["f"] == 1.0 # Gold annotation is modified scorer = Scorer() tags = [t.tag_ for t in tagged_doc] tags[0] = "NN" pos = [t.pos_ for t in tagged_doc] pos[1] = "X" morphs = [str(t.morph) for t in tagged_doc] morphs[1] = "Number=sing" morphs[2] = "Number=plur" gold = { "tags": tags, "pos": pos, "morphs": morphs, "sent_starts": gold["sent_starts"], } example = Example.from_dict(tagged_doc, gold) results = scorer.score([example]) assert results["tag_acc"] == 0.9 assert results["pos_acc"] == 0.9 assert results["morph_acc"] == approx(0.8) assert results["morph_micro_f"] == approx(0.8461538) assert results["morph_per_feat"]["NounType"]["f"] == 1.0 assert results["morph_per_feat"]["Poss"]["f"] == 0.0 assert results["morph_per_feat"]["Number"]["f"] == approx(0.72727272) # per-component scoring scorer = Scorer() results = scorer.score([example], per_component=True) assert results["tagger"]["tag_acc"] == 0.9 assert results["morphologizer"]["pos_acc"] == 0.9 assert results["morphologizer"]["morph_acc"] == approx(0.8) def test_partial_annotation(en_tokenizer): pred_doc = en_tokenizer("a b c d e") pred_doc[0].tag_ = "A" pred_doc[0].pos_ = "X" pred_doc[0].set_morph("Feat=Val") pred_doc[0].dep_ = "dep" # unannotated reference ref_doc = en_tokenizer("a b c d e") ref_doc.has_unknown_spaces = True example = Example(pred_doc, ref_doc) scorer = Scorer() scores = scorer.score([example]) for key in scores: # cats doesn't have an unset state if key.startswith("cats"): continue assert scores[key] is None # partially annotated reference, not overlapping with predicted annotation ref_doc = en_tokenizer("a b c d e") ref_doc.has_unknown_spaces = True ref_doc[1].tag_ = "A" ref_doc[1].pos_ = "X" ref_doc[1].set_morph("Feat=Val") ref_doc[1].dep_ = "dep" example = Example(pred_doc, ref_doc) scorer = Scorer() scores = scorer.score([example]) assert scores["token_acc"] is None assert scores["tag_acc"] == 0.0 assert scores["pos_acc"] == 0.0 assert scores["morph_acc"] == 0.0 assert scores["dep_uas"] == 1.0 assert scores["dep_las"] == 0.0 assert scores["sents_f"] is None # partially annotated reference, overlapping with predicted annotation ref_doc = en_tokenizer("a b c d e") ref_doc.has_unknown_spaces = True ref_doc[0].tag_ = "A" ref_doc[0].pos_ = "X" ref_doc[1].set_morph("Feat=Val") ref_doc[1].dep_ = "dep" example = Example(pred_doc, ref_doc) scorer = Scorer() scores = scorer.score([example]) assert scores["token_acc"] is None assert scores["tag_acc"] == 1.0 assert scores["pos_acc"] == 1.0 assert scores["morph_acc"] == 0.0 assert scores["dep_uas"] == 1.0 assert scores["dep_las"] == 0.0 assert scores["sents_f"] is None def test_roc_auc_score(): # Binary classification, toy tests from scikit-learn test suite y_true = [0, 1] y_score = [0, 1] tpr, fpr, _ = _roc_curve(y_true, y_score) roc_auc = _roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 0, 1]) assert_array_almost_equal(fpr, [0, 1, 1]) assert_almost_equal(roc_auc, 1.0) y_true = [0, 1] y_score = [1, 0] tpr, fpr, _ = _roc_curve(y_true, y_score) roc_auc = _roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1, 1]) assert_array_almost_equal(fpr, [0, 0, 1]) assert_almost_equal(roc_auc, 0.0) y_true = [1, 0] y_score = [1, 1] tpr, fpr, _ = _roc_curve(y_true, y_score) roc_auc = _roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, 0.5) y_true = [1, 0] y_score = [1, 0] tpr, fpr, _ = _roc_curve(y_true, y_score) roc_auc = _roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 0, 1]) assert_array_almost_equal(fpr, [0, 1, 1]) assert_almost_equal(roc_auc, 1.0) y_true = [1, 0] y_score = [0.5, 0.5] tpr, fpr, _ = _roc_curve(y_true, y_score) roc_auc = _roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, 0.5) # same result as above with ROCAUCScore wrapper score = ROCAUCScore() score.score_set(0.5, 1) score.score_set(0.5, 0) assert_almost_equal(score.score, 0.5) # check that errors are raised in undefined cases and score is -inf y_true = [0, 0] y_score = [0.25, 0.75] with pytest.raises(ValueError): _roc_auc_score(y_true, y_score) score = ROCAUCScore() score.score_set(0.25, 0) score.score_set(0.75, 0) with pytest.raises(ValueError): _ = score.score # noqa: F841 y_true = [1, 1] y_score = [0.25, 0.75] with pytest.raises(ValueError): _roc_auc_score(y_true, y_score) score = ROCAUCScore() score.score_set(0.25, 1) score.score_set(0.75, 1) with pytest.raises(ValueError): _ = score.score # noqa: F841 def test_score_spans(): nlp = English() text = "This is just a random sentence." key = "my_spans" gold = nlp.make_doc(text) pred = nlp.make_doc(text) spans = [] spans.append(gold.char_span(0, 4, label="PERSON")) spans.append(gold.char_span(0, 7, label="ORG")) spans.append(gold.char_span(8, 12, label="ORG")) gold.spans[key] = spans def span_getter(doc, span_key): return doc.spans[span_key] # Predict exactly the same, but overlapping spans will be discarded pred.spans[key] = gold.spans[key].copy(doc=pred) eg = Example(pred, gold) scores = Scorer.score_spans([eg], attr=key, getter=span_getter) assert scores[f"{key}_p"] == 1.0 assert scores[f"{key}_r"] < 1.0 # Allow overlapping, now both precision and recall should be 100% pred.spans[key] = gold.spans[key].copy(doc=pred) eg = Example(pred, gold) scores = Scorer.score_spans([eg], attr=key, getter=span_getter, allow_overlap=True) assert scores[f"{key}_p"] == 1.0 assert scores[f"{key}_r"] == 1.0 # Change the predicted labels new_spans = [Span(pred, span.start, span.end, label="WRONG") for span in spans] pred.spans[key] = new_spans eg = Example(pred, gold) scores = Scorer.score_spans([eg], attr=key, getter=span_getter, allow_overlap=True) assert scores[f"{key}_p"] == 0.0 assert scores[f"{key}_r"] == 0.0 assert f"{key}_per_type" in scores # Discard labels from the evaluation scores = Scorer.score_spans( [eg], attr=key, getter=span_getter, allow_overlap=True, labeled=False ) assert scores[f"{key}_p"] == 1.0 assert scores[f"{key}_r"] == 1.0 assert f"{key}_per_type" not in scores def test_prf_score(): cand = {"hi", "ho"} gold1 = {"yo", "hi"} gold2 = set() a = PRFScore() a.score_set(cand=cand, gold=gold1) assert (a.precision, a.recall, a.fscore) == approx((0.5, 0.5, 0.5)) b = PRFScore() b.score_set(cand=cand, gold=gold2) assert (b.precision, b.recall, b.fscore) == approx((0.0, 0.0, 0.0)) c = a + b assert (c.precision, c.recall, c.fscore) == approx((0.25, 0.5, 0.33333333)) a += b assert (a.precision, a.recall, a.fscore) == approx( (c.precision, c.recall, c.fscore) ) def test_score_cats(en_tokenizer): text = "some text" gold_doc = en_tokenizer(text) gold_doc.cats = {"POSITIVE": 1.0, "NEGATIVE": 0.0} pred_doc = en_tokenizer(text) pred_doc.cats = {"POSITIVE": 0.75, "NEGATIVE": 0.25} example = Example(pred_doc, gold_doc) # threshold is ignored for multi_label=False scores1 = Scorer.score_cats( [example], "cats", labels=list(gold_doc.cats.keys()), multi_label=False, positive_label="POSITIVE", threshold=0.1, ) scores2 = Scorer.score_cats( [example], "cats", labels=list(gold_doc.cats.keys()), multi_label=False, positive_label="POSITIVE", threshold=0.9, ) assert scores1["cats_score"] == 1.0 assert scores2["cats_score"] == 1.0 assert scores1 == scores2 # threshold is relevant for multi_label=True scores = Scorer.score_cats( [example], "cats", labels=list(gold_doc.cats.keys()), multi_label=True, threshold=0.9, ) assert scores["cats_macro_f"] == 0.0 # threshold is relevant for multi_label=True scores = Scorer.score_cats( [example], "cats", labels=list(gold_doc.cats.keys()), multi_label=True, threshold=0.1, ) assert scores["cats_macro_f"] == 0.5
17,478
31.488848
87
py
spaCy
spaCy-master/spacy/tests/test_ty.py
import spacy from spacy import ty def test_component_types(): nlp = spacy.blank("en") tok2vec = nlp.create_pipe("tok2vec") tagger = nlp.create_pipe("tagger") entity_ruler = nlp.create_pipe("entity_ruler") assert isinstance(tok2vec, ty.TrainableComponent) assert isinstance(tagger, ty.TrainableComponent) assert not isinstance(entity_ruler, ty.TrainableComponent) assert isinstance(tok2vec, ty.InitializableComponent) assert isinstance(tagger, ty.InitializableComponent) assert isinstance(entity_ruler, ty.InitializableComponent) assert isinstance(tok2vec, ty.ListenedToComponent) assert not isinstance(tagger, ty.ListenedToComponent) assert not isinstance(entity_ruler, ty.ListenedToComponent)
748
38.421053
63
py
spaCy
spaCy-master/spacy/tests/util.py
import contextlib import re import tempfile import numpy import srsly from thinc.api import get_current_ops from spacy.tokens import Doc from spacy.training import split_bilu_label from spacy.util import make_tempdir # noqa: F401 from spacy.vocab import Vocab @contextlib.contextmanager def make_tempfile(mode="r"): f = tempfile.TemporaryFile(mode=mode) yield f f.close() def get_batch(batch_size): vocab = Vocab() docs = [] start = 0 for size in range(1, batch_size + 1): # Make the words numbers, so that they're distinct # across the batch, and easy to track. numbers = [str(i) for i in range(start, start + size)] docs.append(Doc(vocab, words=numbers)) start += size return docs def get_random_doc(n_words): vocab = Vocab() # Make the words numbers, so that they're easy to track. numbers = [str(i) for i in range(0, n_words)] return Doc(vocab, words=numbers) def apply_transition_sequence(parser, doc, sequence): """Perform a series of pre-specified transitions, to put the parser in a desired state.""" for action_name in sequence: if "-" in action_name: move, label = split_bilu_label(action_name) parser.add_label(label) with parser.step_through(doc) as stepwise: for transition in sequence: stepwise.transition(transition) def add_vecs_to_vocab(vocab, vectors): """Add list of vector tuples to given vocab. All vectors need to have the same length. Format: [("text", [1, 2, 3])]""" length = len(vectors[0][1]) vocab.reset_vectors(width=length) for word, vec in vectors: vocab.set_vector(word, vector=vec) return vocab def get_cosine(vec1, vec2): """Get cosine for two given vectors""" OPS = get_current_ops() v1 = OPS.to_numpy(OPS.asarray(vec1)) v2 = OPS.to_numpy(OPS.asarray(vec2)) return numpy.dot(v1, v2) / (numpy.linalg.norm(v1) * numpy.linalg.norm(v2)) def assert_docs_equal(doc1, doc2): """Compare two Doc objects and assert that they're equal. Tests for tokens, tags, dependencies and entities.""" assert [t.orth for t in doc1] == [t.orth for t in doc2] assert [t.pos for t in doc1] == [t.pos for t in doc2] assert [t.tag for t in doc1] == [t.tag for t in doc2] assert [t.head.i for t in doc1] == [t.head.i for t in doc2] assert [t.dep for t in doc1] == [t.dep for t in doc2] assert [t.is_sent_start for t in doc1] == [t.is_sent_start for t in doc2] assert [t.ent_type for t in doc1] == [t.ent_type for t in doc2] assert [t.ent_iob for t in doc1] == [t.ent_iob for t in doc2] for ent1, ent2 in zip(doc1.ents, doc2.ents): assert ent1.start == ent2.start assert ent1.end == ent2.end assert ent1.label == ent2.label assert ent1.kb_id == ent2.kb_id def assert_packed_msg_equal(b1, b2): """Assert that two packed msgpack messages are equal.""" msg1 = srsly.msgpack_loads(b1) msg2 = srsly.msgpack_loads(b2) assert sorted(msg1.keys()) == sorted(msg2.keys()) for (k1, v1), (k2, v2) in zip(sorted(msg1.items()), sorted(msg2.items())): assert k1 == k2 assert v1 == v2 def normalize_whitespace(s): return re.sub(r"\s+", " ", s)
3,281
30.257143
79
py
spaCy
spaCy-master/spacy/tests/doc/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/doc/test_add_entities.py
import pytest from spacy import registry from spacy.pipeline import EntityRecognizer from spacy.pipeline.ner import DEFAULT_NER_MODEL from spacy.tokens import Doc, Span from spacy.training import Example def _ner_example(ner): doc = Doc( ner.vocab, words=["Joe", "loves", "visiting", "London", "during", "the", "weekend"], ) gold = {"entities": [(0, 3, "PERSON"), (19, 25, "LOC")]} return Example.from_dict(doc, gold) def test_doc_add_entities_set_ents_iob(en_vocab): text = ["This", "is", "a", "lion"] doc = Doc(en_vocab, words=text) cfg = {"model": DEFAULT_NER_MODEL} model = registry.resolve(cfg, validate=True)["model"] ner = EntityRecognizer(en_vocab, model) ner.initialize(lambda: [_ner_example(ner)]) ner(doc) doc.ents = [("ANIMAL", 3, 4)] assert [w.ent_iob_ for w in doc] == ["O", "O", "O", "B"] doc.ents = [("WORD", 0, 2)] assert [w.ent_iob_ for w in doc] == ["B", "I", "O", "O"] def test_ents_reset(en_vocab): """Ensure that resetting doc.ents does not change anything""" text = ["This", "is", "a", "lion"] doc = Doc(en_vocab, words=text) cfg = {"model": DEFAULT_NER_MODEL} model = registry.resolve(cfg, validate=True)["model"] ner = EntityRecognizer(en_vocab, model) ner.initialize(lambda: [_ner_example(ner)]) ner(doc) orig_iobs = [t.ent_iob_ for t in doc] doc.ents = list(doc.ents) assert [t.ent_iob_ for t in doc] == orig_iobs def test_add_overlapping_entities(en_vocab): text = ["Louisiana", "Office", "of", "Conservation"] doc = Doc(en_vocab, words=text) entity = Span(doc, 0, 4, label=391) doc.ents = [entity] new_entity = Span(doc, 0, 1, label=392) with pytest.raises(ValueError): doc.ents = list(doc.ents) + [new_entity]
1,804
30.12069
81
py
spaCy
spaCy-master/spacy/tests/doc/test_array.py
import numpy import pytest from spacy.attrs import DEP, MORPH, ORTH, POS, SHAPE from spacy.tokens import Doc @pytest.mark.issue(2203) def test_issue2203(en_vocab): """Test that lemmas are set correctly in doc.from_array.""" words = ["I", "'ll", "survive"] tags = ["PRP", "MD", "VB"] lemmas = ["-PRON-", "will", "survive"] tag_ids = [en_vocab.strings.add(tag) for tag in tags] lemma_ids = [en_vocab.strings.add(lemma) for lemma in lemmas] doc = Doc(en_vocab, words=words) # Work around lemma corruption problem and set lemmas after tags doc.from_array("TAG", numpy.array(tag_ids, dtype="uint64")) doc.from_array("LEMMA", numpy.array(lemma_ids, dtype="uint64")) assert [t.tag_ for t in doc] == tags assert [t.lemma_ for t in doc] == lemmas # We need to serialize both tag and lemma, since this is what causes the bug doc_array = doc.to_array(["TAG", "LEMMA"]) new_doc = Doc(doc.vocab, words=words).from_array(["TAG", "LEMMA"], doc_array) assert [t.tag_ for t in new_doc] == tags assert [t.lemma_ for t in new_doc] == lemmas def test_doc_array_attr_of_token(en_vocab): doc = Doc(en_vocab, words=["An", "example", "sentence"]) example = doc.vocab["example"] assert example.orth != example.shape feats_array = doc.to_array((ORTH, SHAPE)) assert feats_array[0][0] != feats_array[0][1] assert feats_array[0][0] != feats_array[0][1] def test_doc_stringy_array_attr_of_token(en_vocab): doc = Doc(en_vocab, words=["An", "example", "sentence"]) example = doc.vocab["example"] assert example.orth != example.shape feats_array = doc.to_array((ORTH, SHAPE)) feats_array_stringy = doc.to_array(("ORTH", "SHAPE")) assert feats_array_stringy[0][0] == feats_array[0][0] assert feats_array_stringy[0][1] == feats_array[0][1] def test_doc_scalar_attr_of_token(en_vocab): doc = Doc(en_vocab, words=["An", "example", "sentence"]) example = doc.vocab["example"] assert example.orth != example.shape feats_array = doc.to_array(ORTH) assert feats_array.shape == (3,) def test_doc_array_tag(en_vocab): words = ["A", "nice", "sentence", "."] pos = ["DET", "ADJ", "NOUN", "PUNCT"] doc = Doc(en_vocab, words=words, pos=pos) assert doc[0].pos != doc[1].pos != doc[2].pos != doc[3].pos feats_array = doc.to_array((ORTH, POS)) assert feats_array[0][1] == doc[0].pos assert feats_array[1][1] == doc[1].pos assert feats_array[2][1] == doc[2].pos assert feats_array[3][1] == doc[3].pos def test_doc_array_morph(en_vocab): words = ["Eat", "blue", "ham"] morph = ["Feat=V", "Feat=J", "Feat=N"] doc = Doc(en_vocab, words=words, morphs=morph) assert morph[0] == str(doc[0].morph) assert morph[1] == str(doc[1].morph) assert morph[2] == str(doc[2].morph) feats_array = doc.to_array((ORTH, MORPH)) assert feats_array[0][1] == doc[0].morph.key assert feats_array[1][1] == doc[1].morph.key assert feats_array[2][1] == doc[2].morph.key def test_doc_array_dep(en_vocab): words = ["A", "nice", "sentence", "."] deps = ["det", "amod", "ROOT", "punct"] doc = Doc(en_vocab, words=words, deps=deps) feats_array = doc.to_array((ORTH, DEP)) assert feats_array[0][1] == doc[0].dep assert feats_array[1][1] == doc[1].dep assert feats_array[2][1] == doc[2].dep assert feats_array[3][1] == doc[3].dep @pytest.mark.parametrize("attrs", [["ORTH", "SHAPE"], "IS_ALPHA"]) def test_doc_array_to_from_string_attrs(en_vocab, attrs): """Test that both Doc.to_array and Doc.from_array accept string attrs, as well as single attrs and sequences of attrs. """ words = ["An", "example", "sentence"] doc = Doc(en_vocab, words=words) Doc(en_vocab, words=words).from_array(attrs, doc.to_array(attrs)) def test_doc_array_idx(en_vocab): """Test that Doc.to_array can retrieve token start indices""" words = ["An", "example", "sentence"] offsets = Doc(en_vocab, words=words).to_array("IDX") assert offsets[0] == 0 assert offsets[1] == 3 assert offsets[2] == 11 def test_doc_from_array_heads_in_bounds(en_vocab): """Test that Doc.from_array doesn't set heads that are out of bounds.""" words = ["This", "is", "a", "sentence", "."] doc = Doc(en_vocab, words=words) for token in doc: token.head = doc[0] # correct arr = doc.to_array(["HEAD"]) doc_from_array = Doc(en_vocab, words=words) doc_from_array.from_array(["HEAD"], arr) # head before start arr = doc.to_array(["HEAD"]) arr[0] = numpy.int32(-1).astype(numpy.uint64) doc_from_array = Doc(en_vocab, words=words) with pytest.raises(ValueError): doc_from_array.from_array(["HEAD"], arr) # head after end arr = doc.to_array(["HEAD"]) arr[0] = numpy.int32(5).astype(numpy.uint64) doc_from_array = Doc(en_vocab, words=words) with pytest.raises(ValueError): doc_from_array.from_array(["HEAD"], arr)
4,988
35.416058
81
py
spaCy
spaCy-master/spacy/tests/doc/test_creation.py
import pytest from spacy import util from spacy.tokens import Doc from spacy.vocab import Vocab @pytest.fixture def vocab(): return Vocab() def test_empty_doc(vocab): doc = Doc(vocab) assert len(doc) == 0 def test_single_word(vocab): doc = Doc(vocab, words=["a"]) assert doc.text == "a " doc = Doc(vocab, words=["a"], spaces=[False]) assert doc.text == "a" def test_create_from_words_and_text(vocab): # no whitespace in words words = ["'", "dogs", "'", "run"] text = " 'dogs'\n\nrun " (words, spaces) = util.get_words_and_spaces(words, text) doc = Doc(vocab, words=words, spaces=spaces) assert [t.text for t in doc] == [" ", "'", "dogs", "'", "\n\n", "run", " "] assert [t.whitespace_ for t in doc] == ["", "", "", "", "", " ", ""] assert doc.text == text assert [t.text for t in doc if not t.text.isspace()] == [ word for word in words if not word.isspace() ] # partial whitespace in words words = [" ", "'", "dogs", "'", "\n\n", "run", " "] text = " 'dogs'\n\nrun " (words, spaces) = util.get_words_and_spaces(words, text) doc = Doc(vocab, words=words, spaces=spaces) assert [t.text for t in doc] == [" ", "'", "dogs", "'", "\n\n", "run", " "] assert [t.whitespace_ for t in doc] == ["", "", "", "", "", " ", ""] assert doc.text == text assert [t.text for t in doc if not t.text.isspace()] == [ word for word in words if not word.isspace() ] # non-standard whitespace tokens words = [" ", " ", "'", "dogs", "'", "\n\n", "run"] text = " 'dogs'\n\nrun " (words, spaces) = util.get_words_and_spaces(words, text) doc = Doc(vocab, words=words, spaces=spaces) assert [t.text for t in doc] == [" ", "'", "dogs", "'", "\n\n", "run", " "] assert [t.whitespace_ for t in doc] == ["", "", "", "", "", " ", ""] assert doc.text == text assert [t.text for t in doc if not t.text.isspace()] == [ word for word in words if not word.isspace() ] # mismatch between words and text with pytest.raises(ValueError): words = [" ", " ", "'", "dogs", "'", "\n\n", "run"] text = " 'dogs'\n\nrun " (words, spaces) = util.get_words_and_spaces(words + ["away"], text) def test_create_with_heads_and_no_deps(vocab): words = "I like ginger".split() heads = list(range(len(words))) with pytest.raises(ValueError): Doc(vocab, words=words, heads=heads) def test_create_invalid_pos(vocab): words = "I like ginger".split() pos = "QQ ZZ XX".split() with pytest.raises(ValueError): Doc(vocab, words=words, pos=pos)
2,651
31.740741
80
py
spaCy
spaCy-master/spacy/tests/doc/test_doc_api.py
import warnings import weakref import numpy import pytest from numpy.testing import assert_array_equal from thinc.api import NumpyOps, get_current_ops from spacy.attrs import ( DEP, ENT_IOB, ENT_TYPE, HEAD, IS_ALPHA, MORPH, POS, SENT_START, TAG, ) from spacy.lang.en import English from spacy.lang.xx import MultiLanguage from spacy.language import Language from spacy.lexeme import Lexeme from spacy.tokens import Doc, Span, SpanGroup, Token from spacy.vocab import Vocab from .test_underscore import clean_underscore # noqa: F401 def test_doc_api_init(en_vocab): words = ["a", "b", "c", "d"] heads = [0, 0, 2, 2] # set sent_start by sent_starts doc = Doc(en_vocab, words=words, sent_starts=[True, False, True, False]) assert [t.is_sent_start for t in doc] == [True, False, True, False] # set sent_start by heads doc = Doc(en_vocab, words=words, heads=heads, deps=["dep"] * 4) assert [t.is_sent_start for t in doc] == [True, False, True, False] # heads override sent_starts doc = Doc( en_vocab, words=words, sent_starts=[True] * 4, heads=heads, deps=["dep"] * 4 ) assert [t.is_sent_start for t in doc] == [True, False, True, False] @pytest.mark.issue(1547) def test_issue1547(): """Test that entity labels still match after merging tokens.""" words = ["\n", "worda", ".", "\n", "wordb", "-", "Biosphere", "2", "-", " \n"] doc = Doc(Vocab(), words=words) doc.ents = [Span(doc, 6, 8, label=doc.vocab.strings["PRODUCT"])] with doc.retokenize() as retokenizer: retokenizer.merge(doc[5:7]) assert [ent.text for ent in doc.ents] @pytest.mark.issue(1757) def test_issue1757(): """Test comparison against None doesn't cause segfault.""" doc = Doc(Vocab(), words=["a", "b", "c"]) assert not doc[0] < None assert not doc[0] is None assert doc[0] >= None assert not doc[:2] < None assert not doc[:2] is None assert doc[:2] >= None assert not doc.vocab["a"] is None assert not doc.vocab["a"] < None @pytest.mark.issue(2396) def test_issue2396(en_vocab): words = ["She", "created", "a", "test", "for", "spacy"] heads = [1, 1, 3, 1, 3, 4] deps = ["dep"] * len(heads) matrix = numpy.array( [ [0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 2, 3, 3, 3], [1, 1, 3, 3, 3, 3], [1, 1, 3, 3, 4, 4], [1, 1, 3, 3, 4, 5], ], dtype=numpy.int32, ) doc = Doc(en_vocab, words=words, heads=heads, deps=deps) span = doc[:] assert (doc.get_lca_matrix() == matrix).all() assert (span.get_lca_matrix() == matrix).all() @pytest.mark.issue(11499) def test_init_args_unmodified(en_vocab): words = ["A", "sentence"] ents = ["B-TYPE1", ""] sent_starts = [True, False] Doc( vocab=en_vocab, words=words, ents=ents, sent_starts=sent_starts, ) assert ents == ["B-TYPE1", ""] assert sent_starts == [True, False] @pytest.mark.parametrize("text", ["-0.23", "+123,456", "±1"]) @pytest.mark.parametrize("lang_cls", [English, MultiLanguage]) @pytest.mark.issue(2782) def test_issue2782(text, lang_cls): """Check that like_num handles + and - before number.""" nlp = lang_cls() doc = nlp(text) assert len(doc) == 1 assert doc[0].like_num @pytest.mark.parametrize( "sentence", [ "The story was to the effect that a young American student recently called on Professor Christlieb with a letter of introduction.", "The next month Barry Siddall joined Stoke City on a free transfer, after Chris Pearce had established himself as the Vale's #1.", "The next month Barry Siddall joined Stoke City on a free transfer, after Chris Pearce had established himself as the Vale's number one", "Indeed, making the one who remains do all the work has installed him into a position of such insolent tyranny, it will take a month at least to reduce him to his proper proportions.", "It was a missed assignment, but it shouldn't have resulted in a turnover ...", ], ) @pytest.mark.issue(3869) def test_issue3869(sentence): """Test that the Doc's count_by function works consistently""" nlp = English() doc = nlp(sentence) count = 0 for token in doc: count += token.is_alpha assert count == doc.count_by(IS_ALPHA).get(1, 0) @pytest.mark.issue(3962) def test_issue3962(en_vocab): """Ensure that as_doc does not result in out-of-bound access of tokens. This is achieved by setting the head to itself if it would lie out of the span otherwise.""" # fmt: off words = ["He", "jests", "at", "scars", ",", "that", "never", "felt", "a", "wound", "."] heads = [1, 7, 1, 2, 7, 7, 7, 7, 9, 7, 7] deps = ["nsubj", "ccomp", "prep", "pobj", "punct", "nsubj", "neg", "ROOT", "det", "dobj", "punct"] # fmt: on doc = Doc(en_vocab, words=words, heads=heads, deps=deps) span2 = doc[1:5] # "jests at scars ," doc2 = span2.as_doc() doc2_json = doc2.to_json() assert doc2_json # head set to itself, being the new artificial root assert doc2[0].head.text == "jests" assert doc2[0].dep_ == "dep" assert doc2[1].head.text == "jests" assert doc2[1].dep_ == "prep" assert doc2[2].head.text == "at" assert doc2[2].dep_ == "pobj" assert doc2[3].head.text == "jests" # head set to the new artificial root assert doc2[3].dep_ == "dep" # We should still have 1 sentence assert len(list(doc2.sents)) == 1 span3 = doc[6:9] # "never felt a" doc3 = span3.as_doc() doc3_json = doc3.to_json() assert doc3_json assert doc3[0].head.text == "felt" assert doc3[0].dep_ == "neg" assert doc3[1].head.text == "felt" assert doc3[1].dep_ == "ROOT" assert doc3[2].head.text == "felt" # head set to ancestor assert doc3[2].dep_ == "dep" # We should still have 1 sentence as "a" can be attached to "felt" instead of "wound" assert len(list(doc3.sents)) == 1 @pytest.mark.issue(3962) def test_issue3962_long(en_vocab): """Ensure that as_doc does not result in out-of-bound access of tokens. This is achieved by setting the head to itself if it would lie out of the span otherwise.""" # fmt: off words = ["He", "jests", "at", "scars", ".", "They", "never", "felt", "a", "wound", "."] heads = [1, 1, 1, 2, 1, 7, 7, 7, 9, 7, 7] deps = ["nsubj", "ROOT", "prep", "pobj", "punct", "nsubj", "neg", "ROOT", "det", "dobj", "punct"] # fmt: on two_sent_doc = Doc(en_vocab, words=words, heads=heads, deps=deps) span2 = two_sent_doc[1:7] # "jests at scars. They never" doc2 = span2.as_doc() doc2_json = doc2.to_json() assert doc2_json # head set to itself, being the new artificial root (in sentence 1) assert doc2[0].head.text == "jests" assert doc2[0].dep_ == "ROOT" assert doc2[1].head.text == "jests" assert doc2[1].dep_ == "prep" assert doc2[2].head.text == "at" assert doc2[2].dep_ == "pobj" assert doc2[3].head.text == "jests" assert doc2[3].dep_ == "punct" # head set to itself, being the new artificial root (in sentence 2) assert doc2[4].head.text == "They" assert doc2[4].dep_ == "dep" # head set to the new artificial head (in sentence 2) assert doc2[4].head.text == "They" assert doc2[4].dep_ == "dep" # We should still have 2 sentences sents = list(doc2.sents) assert len(sents) == 2 assert sents[0].text == "jests at scars ." assert sents[1].text == "They never" @Language.factory("my_pipe") class CustomPipe: def __init__(self, nlp, name="my_pipe"): self.name = name Span.set_extension("my_ext", getter=self._get_my_ext) Doc.set_extension("my_ext", default=None) def __call__(self, doc): gathered_ext = [] for sent in doc.sents: sent_ext = self._get_my_ext(sent) sent._.set("my_ext", sent_ext) gathered_ext.append(sent_ext) doc._.set("my_ext", "\n".join(gathered_ext)) return doc @staticmethod def _get_my_ext(span): return str(span.end) @pytest.mark.issue(4903) def test_issue4903(): """Ensure that this runs correctly and doesn't hang or crash on Windows / macOS.""" nlp = English() nlp.add_pipe("sentencizer") nlp.add_pipe("my_pipe", after="sentencizer") text = ["I like bananas.", "Do you like them?", "No, I prefer wasabi."] if isinstance(get_current_ops(), NumpyOps): docs = list(nlp.pipe(text, n_process=2)) assert docs[0].text == "I like bananas." assert docs[1].text == "Do you like them?" assert docs[2].text == "No, I prefer wasabi." @pytest.mark.issue(5048) def test_issue5048(en_vocab): words = ["This", "is", "a", "sentence"] pos_s = ["DET", "VERB", "DET", "NOUN"] spaces = [" ", " ", " ", ""] deps_s = ["dep", "adj", "nn", "atm"] tags_s = ["DT", "VBZ", "DT", "NN"] strings = en_vocab.strings for w in words: strings.add(w) deps = [strings.add(d) for d in deps_s] pos = [strings.add(p) for p in pos_s] tags = [strings.add(t) for t in tags_s] attrs = [POS, DEP, TAG] array = numpy.array(list(zip(pos, deps, tags)), dtype="uint64") doc = Doc(en_vocab, words=words, spaces=spaces) doc.from_array(attrs, array) v1 = [(token.text, token.pos_, token.tag_) for token in doc] doc2 = Doc(en_vocab, words=words, pos=pos_s, deps=deps_s, tags=tags_s) v2 = [(token.text, token.pos_, token.tag_) for token in doc2] assert v1 == v2 @pytest.mark.parametrize("text", [["one", "two", "three"]]) def test_doc_api_compare_by_string_position(en_vocab, text): doc = Doc(en_vocab, words=text) # Get the tokens in this order, so their ID ordering doesn't match the idx token3 = doc[-1] token2 = doc[-2] token1 = doc[-1] token1, token2, token3 = doc assert token1 < token2 < token3 assert not token1 > token2 assert token2 > token1 assert token2 <= token3 assert token3 >= token1 def test_doc_api_getitem(en_tokenizer): text = "Give it back! He pleaded." tokens = en_tokenizer(text) assert tokens[0].text == "Give" assert tokens[-1].text == "." with pytest.raises(IndexError): tokens[len(tokens)] def to_str(span): return "/".join(token.text for token in span) span = tokens[1:1] assert not to_str(span) span = tokens[1:4] assert to_str(span) == "it/back/!" span = tokens[1:4:1] assert to_str(span) == "it/back/!" with pytest.raises(ValueError): tokens[1:4:2] with pytest.raises(ValueError): tokens[1:4:-1] span = tokens[-3:6] assert to_str(span) == "He/pleaded" span = tokens[4:-1] assert to_str(span) == "He/pleaded" span = tokens[-5:-3] assert to_str(span) == "back/!" span = tokens[5:4] assert span.start == span.end == 5 and not to_str(span) span = tokens[4:-3] assert span.start == span.end == 4 and not to_str(span) span = tokens[:] assert to_str(span) == "Give/it/back/!/He/pleaded/." span = tokens[4:] assert to_str(span) == "He/pleaded/." span = tokens[:4] assert to_str(span) == "Give/it/back/!" span = tokens[:-3] assert to_str(span) == "Give/it/back/!" span = tokens[-3:] assert to_str(span) == "He/pleaded/." span = tokens[4:50] assert to_str(span) == "He/pleaded/." span = tokens[-50:4] assert to_str(span) == "Give/it/back/!" span = tokens[-50:-40] assert span.start == span.end == 0 and not to_str(span) span = tokens[40:50] assert span.start == span.end == 7 and not to_str(span) span = tokens[1:4] assert span[0].orth_ == "it" subspan = span[:] assert to_str(subspan) == "it/back/!" subspan = span[:2] assert to_str(subspan) == "it/back" subspan = span[1:] assert to_str(subspan) == "back/!" subspan = span[:-1] assert to_str(subspan) == "it/back" subspan = span[-2:] assert to_str(subspan) == "back/!" subspan = span[1:2] assert to_str(subspan) == "back" subspan = span[-2:-1] assert to_str(subspan) == "back" subspan = span[-50:50] assert to_str(subspan) == "it/back/!" subspan = span[50:-50] assert subspan.start == subspan.end == 4 and not to_str(subspan) @pytest.mark.parametrize( "text", ["Give it back! He pleaded.", " Give it back! He pleaded. "] ) def test_doc_api_serialize(en_tokenizer, text): tokens = en_tokenizer(text) tokens[0].lemma_ = "lemma" tokens[0].norm_ = "norm" tokens.ents = [(tokens.vocab.strings["PRODUCT"], 0, 1)] tokens[0].ent_kb_id_ = "ent_kb_id" tokens[0].ent_id_ = "ent_id" new_tokens = Doc(tokens.vocab).from_bytes(tokens.to_bytes()) assert tokens.text == new_tokens.text assert [t.text for t in tokens] == [t.text for t in new_tokens] assert [t.orth for t in tokens] == [t.orth for t in new_tokens] assert new_tokens[0].lemma_ == "lemma" assert new_tokens[0].norm_ == "norm" assert new_tokens[0].ent_kb_id_ == "ent_kb_id" assert new_tokens[0].ent_id_ == "ent_id" new_tokens = Doc(tokens.vocab).from_bytes( tokens.to_bytes(exclude=["tensor"]), exclude=["tensor"] ) assert tokens.text == new_tokens.text assert [t.text for t in tokens] == [t.text for t in new_tokens] assert [t.orth for t in tokens] == [t.orth for t in new_tokens] new_tokens = Doc(tokens.vocab).from_bytes( tokens.to_bytes(exclude=["sentiment"]), exclude=["sentiment"] ) assert tokens.text == new_tokens.text assert [t.text for t in tokens] == [t.text for t in new_tokens] assert [t.orth for t in tokens] == [t.orth for t in new_tokens] def inner_func(d1, d2): return "hello!" _ = tokens.to_bytes() # noqa: F841 with pytest.warns(UserWarning): tokens.user_hooks["similarity"] = inner_func _ = tokens.to_bytes() # noqa: F841 def test_doc_api_set_ents(en_tokenizer): text = "I use goggle chrone to surf the web" tokens = en_tokenizer(text) assert len(tokens.ents) == 0 tokens.ents = [(tokens.vocab.strings["PRODUCT"], 2, 4)] assert len(list(tokens.ents)) == 1 assert [t.ent_iob for t in tokens] == [2, 2, 3, 1, 2, 2, 2, 2] assert tokens.ents[0].label_ == "PRODUCT" assert tokens.ents[0].start == 2 assert tokens.ents[0].end == 4 def test_doc_api_sents_empty_string(en_tokenizer): doc = en_tokenizer("") sents = list(doc.sents) assert len(sents) == 0 def test_doc_api_runtime_error(en_tokenizer): # Example that caused run-time error while parsing Reddit # fmt: off text = "67% of black households are single parent \n\n72% of all black babies born out of wedlock \n\n50% of all black kids don\u2019t finish high school" deps = ["nummod", "nsubj", "prep", "amod", "pobj", "ROOT", "amod", "attr", "", "nummod", "appos", "prep", "det", "amod", "pobj", "acl", "prep", "prep", "pobj", "", "nummod", "nsubj", "prep", "det", "amod", "pobj", "aux", "neg", "ccomp", "amod", "dobj"] # fmt: on tokens = en_tokenizer(text) doc = Doc(tokens.vocab, words=[t.text for t in tokens], deps=deps) nps = [] for np in doc.noun_chunks: while len(np) > 1 and np[0].dep_ not in ("advmod", "amod", "compound"): np = np[1:] if len(np) > 1: nps.append(np) with doc.retokenize() as retokenizer: for np in nps: attrs = { "tag": np.root.tag_, "lemma": np.text, "ent_type": np.root.ent_type_, } retokenizer.merge(np, attrs=attrs) def test_doc_api_right_edge(en_vocab): """Test for bug occurring from Unshift action, causing incorrect right edge""" # fmt: off words = [ "I", "have", "proposed", "to", "myself", ",", "for", "the", "sake", "of", "such", "as", "live", "under", "the", "government", "of", "the", "Romans", ",", "to", "translate", "those", "books", "into", "the", "Greek", "tongue", "." ] heads = [2, 2, 2, 2, 3, 2, 21, 8, 6, 8, 11, 8, 11, 12, 15, 13, 15, 18, 16, 12, 21, 2, 23, 21, 21, 27, 27, 24, 2] deps = ["dep"] * len(heads) # fmt: on doc = Doc(en_vocab, words=words, heads=heads, deps=deps) assert doc[6].text == "for" subtree = [w.text for w in doc[6].subtree] # fmt: off assert subtree == ["for", "the", "sake", "of", "such", "as", "live", "under", "the", "government", "of", "the", "Romans", ","] # fmt: on assert doc[6].right_edge.text == "," def test_doc_api_has_vector(): vocab = Vocab() vocab.reset_vectors(width=2) vocab.set_vector("kitten", vector=numpy.asarray([0.0, 2.0], dtype="f")) doc = Doc(vocab, words=["kitten"]) assert doc.has_vector def test_doc_api_similarity_match(): doc = Doc(Vocab(), words=["a"]) assert doc.similarity(doc[0]) == 1.0 assert doc.similarity(doc.vocab["a"]) == 1.0 doc2 = Doc(doc.vocab, words=["a", "b", "c"]) with pytest.warns(UserWarning): assert doc.similarity(doc2[:1]) == 1.0 assert doc.similarity(doc2) == 0.0 @pytest.mark.parametrize( "words,heads,lca_matrix", [ ( ["the", "lazy", "dog", "slept"], [2, 2, 3, 3], numpy.array([[0, 2, 2, 3], [2, 1, 2, 3], [2, 2, 2, 3], [3, 3, 3, 3]]), ), ( ["The", "lazy", "dog", "slept", ".", "The", "quick", "fox", "jumped"], [2, 2, 3, 3, 3, 7, 7, 8, 8], numpy.array( [ [0, 2, 2, 3, 3, -1, -1, -1, -1], [2, 1, 2, 3, 3, -1, -1, -1, -1], [2, 2, 2, 3, 3, -1, -1, -1, -1], [3, 3, 3, 3, 3, -1, -1, -1, -1], [3, 3, 3, 3, 4, -1, -1, -1, -1], [-1, -1, -1, -1, -1, 5, 7, 7, 8], [-1, -1, -1, -1, -1, 7, 6, 7, 8], [-1, -1, -1, -1, -1, 7, 7, 7, 8], [-1, -1, -1, -1, -1, 8, 8, 8, 8], ] ), ), ], ) def test_lowest_common_ancestor(en_vocab, words, heads, lca_matrix): doc = Doc(en_vocab, words, heads=heads, deps=["dep"] * len(heads)) lca = doc.get_lca_matrix() assert (lca == lca_matrix).all() assert lca[1, 1] == 1 assert lca[0, 1] == 2 assert lca[1, 2] == 2 def test_doc_is_nered(en_vocab): words = ["I", "live", "in", "New", "York"] doc = Doc(en_vocab, words=words) assert not doc.has_annotation("ENT_IOB") doc.ents = [Span(doc, 3, 5, label="GPE")] assert doc.has_annotation("ENT_IOB") # Test creating doc from array with unknown values arr = numpy.array([[0, 0], [0, 0], [0, 0], [384, 3], [384, 1]], dtype="uint64") doc = Doc(en_vocab, words=words).from_array([ENT_TYPE, ENT_IOB], arr) assert doc.has_annotation("ENT_IOB") # Test serialization new_doc = Doc(en_vocab).from_bytes(doc.to_bytes()) assert new_doc.has_annotation("ENT_IOB") def test_doc_from_array_sent_starts(en_vocab): # fmt: off words = ["I", "live", "in", "New", "York", ".", "I", "like", "cats", "."] heads = [0, 0, 0, 0, 0, 0, 6, 6, 6, 6] deps = ["ROOT", "dep", "dep", "dep", "dep", "dep", "ROOT", "dep", "dep", "dep"] # fmt: on doc = Doc(en_vocab, words=words, heads=heads, deps=deps) # HEAD overrides SENT_START without warning attrs = [SENT_START, HEAD] arr = doc.to_array(attrs) new_doc = Doc(en_vocab, words=words) new_doc.from_array(attrs, arr) # no warning using default attrs attrs = doc._get_array_attrs() arr = doc.to_array(attrs) with warnings.catch_warnings(): warnings.simplefilter("error") new_doc.from_array(attrs, arr) # only SENT_START uses SENT_START attrs = [SENT_START] arr = doc.to_array(attrs) new_doc = Doc(en_vocab, words=words) new_doc.from_array(attrs, arr) assert [t.is_sent_start for t in doc] == [t.is_sent_start for t in new_doc] assert not new_doc.has_annotation("DEP") # only HEAD uses HEAD attrs = [HEAD, DEP] arr = doc.to_array(attrs) new_doc = Doc(en_vocab, words=words) new_doc.from_array(attrs, arr) assert [t.is_sent_start for t in doc] == [t.is_sent_start for t in new_doc] assert new_doc.has_annotation("DEP") def test_doc_from_array_morph(en_vocab): # fmt: off words = ["I", "live", "in", "New", "York", "."] morphs = ["Feat1=A", "Feat1=B", "Feat1=C", "Feat1=A|Feat2=D", "Feat2=E", "Feat3=F"] # fmt: on doc = Doc(en_vocab, words=words, morphs=morphs) attrs = [MORPH] arr = doc.to_array(attrs) new_doc = Doc(en_vocab, words=words) new_doc.from_array(attrs, arr) assert [str(t.morph) for t in new_doc] == morphs assert [str(t.morph) for t in doc] == [str(t.morph) for t in new_doc] @pytest.mark.usefixtures("clean_underscore") def test_doc_api_from_docs(en_tokenizer, de_tokenizer): en_texts = [ "Merging the docs is fun.", "", "They don't think alike. ", "", "Another doc.", ] en_texts_without_empty = [t for t in en_texts if len(t)] de_text = "Wie war die Frage?" en_docs = [en_tokenizer(text) for text in en_texts] en_docs[0].spans["group"] = [en_docs[0][1:4]] en_docs[2].spans["group"] = [en_docs[2][1:4]] en_docs[4].spans["group"] = [en_docs[4][0:1]] span_group_texts = sorted( [en_docs[0][1:4].text, en_docs[2][1:4].text, en_docs[4][0:1].text] ) de_doc = de_tokenizer(de_text) Token.set_extension("is_ambiguous", default=False) en_docs[0][2]._.is_ambiguous = True # docs en_docs[2][3]._.is_ambiguous = True # think assert Doc.from_docs([]) is None assert de_doc is not Doc.from_docs([de_doc]) assert str(de_doc) == str(Doc.from_docs([de_doc])) with pytest.raises(ValueError): Doc.from_docs(en_docs + [de_doc]) m_doc = Doc.from_docs(en_docs) assert len(en_texts_without_empty) == len(list(m_doc.sents)) assert len(m_doc.text) > len(en_texts[0]) + len(en_texts[1]) assert m_doc.text == " ".join([t.strip() for t in en_texts_without_empty]) p_token = m_doc[len(en_docs[0]) - 1] assert p_token.text == "." and bool(p_token.whitespace_) en_docs_tokens = [t for doc in en_docs for t in doc] assert len(m_doc) == len(en_docs_tokens) think_idx = len(en_texts[0]) + 1 + en_texts[2].index("think") assert m_doc[2]._.is_ambiguous is True assert m_doc[9].idx == think_idx assert m_doc[9]._.is_ambiguous is True assert not any([t._.is_ambiguous for t in m_doc[3:8]]) assert "group" in m_doc.spans assert span_group_texts == sorted([s.text for s in m_doc.spans["group"]]) assert bool(m_doc[11].whitespace_) m_doc = Doc.from_docs(en_docs, ensure_whitespace=False) assert len(en_texts_without_empty) == len(list(m_doc.sents)) assert len(m_doc.text) == sum(len(t) for t in en_texts) assert m_doc.text == "".join(en_texts_without_empty) p_token = m_doc[len(en_docs[0]) - 1] assert p_token.text == "." and not bool(p_token.whitespace_) en_docs_tokens = [t for doc in en_docs for t in doc] assert len(m_doc) == len(en_docs_tokens) think_idx = len(en_texts[0]) + 0 + en_texts[2].index("think") assert m_doc[9].idx == think_idx assert "group" in m_doc.spans assert span_group_texts == sorted([s.text for s in m_doc.spans["group"]]) assert bool(m_doc[11].whitespace_) m_doc = Doc.from_docs(en_docs, attrs=["lemma", "length", "pos"]) assert len(m_doc.text) > len(en_texts[0]) + len(en_texts[1]) # space delimiter considered, although spacy attribute was missing assert m_doc.text == " ".join([t.strip() for t in en_texts_without_empty]) p_token = m_doc[len(en_docs[0]) - 1] assert p_token.text == "." and bool(p_token.whitespace_) en_docs_tokens = [t for doc in en_docs for t in doc] assert len(m_doc) == len(en_docs_tokens) think_idx = len(en_texts[0]) + 1 + en_texts[2].index("think") assert m_doc[9].idx == think_idx assert "group" in m_doc.spans assert span_group_texts == sorted([s.text for s in m_doc.spans["group"]]) # can exclude spans m_doc = Doc.from_docs(en_docs, exclude=["spans"]) assert "group" not in m_doc.spans # can exclude user_data m_doc = Doc.from_docs(en_docs, exclude=["user_data"]) assert m_doc.user_data == {} # can merge empty docs doc = Doc.from_docs([en_tokenizer("")] * 10) # empty but set spans keys are preserved en_docs = [en_tokenizer(text) for text in en_texts] m_doc = Doc.from_docs(en_docs) assert "group" not in m_doc.spans for doc in en_docs: doc.spans["group"] = [] m_doc = Doc.from_docs(en_docs) assert "group" in m_doc.spans assert len(m_doc.spans["group"]) == 0 # with tensor ops = get_current_ops() for doc in en_docs: doc.tensor = ops.asarray([[len(t.text), 0.0] for t in doc]) m_doc = Doc.from_docs(en_docs) assert_array_equal( ops.to_numpy(m_doc.tensor), ops.to_numpy(ops.xp.vstack([doc.tensor for doc in en_docs if len(doc)])), ) # can exclude tensor m_doc = Doc.from_docs(en_docs, exclude=["tensor"]) assert m_doc.tensor.shape == (0,) def test_doc_api_from_docs_ents(en_tokenizer): texts = ["Merging the docs is fun.", "They don't think alike."] docs = [en_tokenizer(t) for t in texts] docs[0].ents = () docs[1].ents = (Span(docs[1], 0, 1, label="foo"),) doc = Doc.from_docs(docs) assert len(doc.ents) == 1 def test_doc_lang(en_vocab): doc = Doc(en_vocab, words=["Hello", "world"]) assert doc.lang_ == "en" assert doc.lang == en_vocab.strings["en"] assert doc[0].lang_ == "en" assert doc[0].lang == en_vocab.strings["en"] nlp = English() doc = nlp("Hello world") assert doc.lang_ == "en" assert doc.lang == en_vocab.strings["en"] assert doc[0].lang_ == "en" assert doc[0].lang == en_vocab.strings["en"] def test_token_lexeme(en_vocab): """Test that tokens expose their lexeme.""" token = Doc(en_vocab, words=["Hello", "world"])[0] assert isinstance(token.lex, Lexeme) assert token.lex.text == token.text assert en_vocab[token.orth] == token.lex def test_has_annotation(en_vocab): doc = Doc(en_vocab, words=["Hello", "world"]) attrs = ("TAG", "POS", "MORPH", "LEMMA", "DEP", "HEAD", "ENT_IOB", "ENT_TYPE") for attr in attrs: assert not doc.has_annotation(attr) assert not doc.has_annotation(attr, require_complete=True) doc[0].tag_ = "A" doc[0].pos_ = "X" doc[0].set_morph("Feat=Val") doc[0].lemma_ = "a" doc[0].dep_ = "dep" doc[0].head = doc[1] doc.set_ents([Span(doc, 0, 1, label="HELLO")], default="missing") for attr in attrs: assert doc.has_annotation(attr) assert not doc.has_annotation(attr, require_complete=True) doc[1].tag_ = "A" doc[1].pos_ = "X" doc[1].set_morph("") doc[1].lemma_ = "a" doc[1].dep_ = "dep" doc.ents = [Span(doc, 0, 2, label="HELLO")] for attr in attrs: assert doc.has_annotation(attr) assert doc.has_annotation(attr, require_complete=True) def test_has_annotation_sents(en_vocab): doc = Doc(en_vocab, words=["Hello", "beautiful", "world"]) attrs = ("SENT_START", "IS_SENT_START", "IS_SENT_END") for attr in attrs: assert not doc.has_annotation(attr) assert not doc.has_annotation(attr, require_complete=True) # The first token (index 0) is always assumed to be a sentence start, # and ignored by the check in doc.has_annotation doc[1].is_sent_start = False for attr in attrs: assert doc.has_annotation(attr) assert not doc.has_annotation(attr, require_complete=True) doc[2].is_sent_start = False for attr in attrs: assert doc.has_annotation(attr) assert doc.has_annotation(attr, require_complete=True) def test_is_flags_deprecated(en_tokenizer): doc = en_tokenizer("test") with pytest.deprecated_call(): doc.is_tagged with pytest.deprecated_call(): doc.is_parsed with pytest.deprecated_call(): doc.is_nered with pytest.deprecated_call(): doc.is_sentenced def test_doc_set_ents(en_tokenizer): # set ents doc = en_tokenizer("a b c d e") doc.set_ents([Span(doc, 0, 1, 10), Span(doc, 1, 3, 11)]) assert [t.ent_iob for t in doc] == [3, 3, 1, 2, 2] assert [t.ent_type for t in doc] == [10, 11, 11, 0, 0] # add ents, invalid IOB repaired doc = en_tokenizer("a b c d e") doc.set_ents([Span(doc, 0, 1, 10), Span(doc, 1, 3, 11)]) doc.set_ents([Span(doc, 0, 2, 12)], default="unmodified") assert [t.ent_iob for t in doc] == [3, 1, 3, 2, 2] assert [t.ent_type for t in doc] == [12, 12, 11, 0, 0] # missing ents doc = en_tokenizer("a b c d e") doc.set_ents([Span(doc, 0, 1, 10), Span(doc, 1, 3, 11)], missing=[doc[4:5]]) assert [t.ent_iob for t in doc] == [3, 3, 1, 2, 0] assert [t.ent_type for t in doc] == [10, 11, 11, 0, 0] # outside ents doc = en_tokenizer("a b c d e") doc.set_ents( [Span(doc, 0, 1, 10), Span(doc, 1, 3, 11)], outside=[doc[4:5]], default="missing", ) assert [t.ent_iob for t in doc] == [3, 3, 1, 0, 2] assert [t.ent_type for t in doc] == [10, 11, 11, 0, 0] # blocked ents doc = en_tokenizer("a b c d e") doc.set_ents([], blocked=[doc[1:2], doc[3:5]], default="unmodified") assert [t.ent_iob for t in doc] == [0, 3, 0, 3, 3] assert [t.ent_type for t in doc] == [0, 0, 0, 0, 0] assert doc.ents == tuple() # invalid IOB repaired after blocked doc.ents = [Span(doc, 3, 5, "ENT")] assert [t.ent_iob for t in doc] == [2, 2, 2, 3, 1] doc.set_ents([], blocked=[doc[3:4]], default="unmodified") assert [t.ent_iob for t in doc] == [2, 2, 2, 3, 3] # all types doc = en_tokenizer("a b c d e") doc.set_ents( [Span(doc, 0, 1, 10)], blocked=[doc[1:2]], missing=[doc[2:3]], outside=[doc[3:4]], default="unmodified", ) assert [t.ent_iob for t in doc] == [3, 3, 0, 2, 0] assert [t.ent_type for t in doc] == [10, 0, 0, 0, 0] doc = en_tokenizer("a b c d e") # single span instead of a list with pytest.raises(ValueError): doc.set_ents([], missing=doc[1:2]) # invalid default mode with pytest.raises(ValueError): doc.set_ents([], missing=[doc[1:2]], default="none") # conflicting/overlapping specifications with pytest.raises(ValueError): doc.set_ents([], missing=[doc[1:2]], outside=[doc[1:2]]) def test_doc_ents_setter(): """Test that both strings and integers can be used to set entities in tuple format via doc.ents.""" words = ["a", "b", "c", "d", "e"] doc = Doc(Vocab(), words=words) doc.ents = [("HELLO", 0, 2), (doc.vocab.strings.add("WORLD"), 3, 5)] assert [e.label_ for e in doc.ents] == ["HELLO", "WORLD"] vocab = Vocab() ents = [("HELLO", 0, 2), (vocab.strings.add("WORLD"), 3, 5)] ents = ["B-HELLO", "I-HELLO", "O", "B-WORLD", "I-WORLD"] doc = Doc(vocab, words=words, ents=ents) assert [e.label_ for e in doc.ents] == ["HELLO", "WORLD"] def test_doc_morph_setter(en_tokenizer, de_tokenizer): doc1 = en_tokenizer("a b") doc1b = en_tokenizer("c d") doc2 = de_tokenizer("a b") # unset values can be copied doc1[0].morph = doc1[1].morph assert doc1[0].morph.key == 0 assert doc1[1].morph.key == 0 # morph values from the same vocab can be copied doc1[0].set_morph("Feat=Val") doc1[1].morph = doc1[0].morph assert doc1[0].morph == doc1[1].morph # ... also across docs doc1b[0].morph = doc1[0].morph assert doc1[0].morph == doc1b[0].morph doc2[0].set_morph("Feat2=Val2") # the morph value must come from the same vocab with pytest.raises(ValueError): doc1[0].morph = doc2[0].morph def test_doc_init_iob(): """Test ents validation/normalization in Doc.__init__""" words = ["a", "b", "c", "d", "e"] ents = ["O"] * len(words) doc = Doc(Vocab(), words=words, ents=ents) assert doc.ents == () ents = ["B-PERSON", "I-PERSON", "O", "I-PERSON", "I-PERSON"] doc = Doc(Vocab(), words=words, ents=ents) assert len(doc.ents) == 2 ents = ["B-PERSON", "I-PERSON", "O", "I-PERSON", "I-GPE"] doc = Doc(Vocab(), words=words, ents=ents) assert len(doc.ents) == 3 # None is missing ents = ["B-PERSON", "I-PERSON", "O", None, "I-GPE"] doc = Doc(Vocab(), words=words, ents=ents) assert len(doc.ents) == 2 # empty tag is missing ents = ["", "B-PERSON", "O", "B-PERSON", "I-PERSON"] doc = Doc(Vocab(), words=words, ents=ents) assert len(doc.ents) == 2 # invalid IOB ents = ["Q-PERSON", "I-PERSON", "O", "I-PERSON", "I-GPE"] with pytest.raises(ValueError): doc = Doc(Vocab(), words=words, ents=ents) # no dash ents = ["OPERSON", "I-PERSON", "O", "I-PERSON", "I-GPE"] with pytest.raises(ValueError): doc = Doc(Vocab(), words=words, ents=ents) # no ent type ents = ["O", "B-", "O", "I-PERSON", "I-GPE"] with pytest.raises(ValueError): doc = Doc(Vocab(), words=words, ents=ents) # not strings or None ents = [0, "B-", "O", "I-PERSON", "I-GPE"] with pytest.raises(ValueError): doc = Doc(Vocab(), words=words, ents=ents) def test_doc_set_ents_invalid_spans(en_tokenizer): doc = en_tokenizer("Some text about Colombia and the Czech Republic") spans = [Span(doc, 3, 4, label="GPE"), Span(doc, 6, 8, label="GPE")] with doc.retokenize() as retokenizer: for span in spans: retokenizer.merge(span) with pytest.raises(IndexError): doc.ents = spans def test_doc_noun_chunks_not_implemented(): """Test that a language without noun_chunk iterator, throws a NotImplementedError""" text = "Může data vytvářet a spravovat, ale především je dokáže analyzovat, najít v nich nové vztahy a vše přehledně vizualizovat." nlp = MultiLanguage() doc = nlp(text) with pytest.raises(NotImplementedError): _ = list(doc.noun_chunks) # noqa: F841 def test_span_groups(en_tokenizer): doc = en_tokenizer("Some text about Colombia and the Czech Republic") doc.spans["hi"] = [Span(doc, 3, 4, label="bye")] assert "hi" in doc.spans assert "bye" not in doc.spans assert len(doc.spans["hi"]) == 1 assert doc.spans["hi"][0].label_ == "bye" doc.spans["hi"].append(doc[0:3]) assert len(doc.spans["hi"]) == 2 assert doc.spans["hi"][1].text == "Some text about" assert [span.text for span in doc.spans["hi"]] == ["Colombia", "Some text about"] assert not doc.spans["hi"].has_overlap doc.ents = [Span(doc, 3, 4, label="GPE"), Span(doc, 6, 8, label="GPE")] doc.spans["hi"].extend(doc.ents) assert len(doc.spans["hi"]) == 4 assert [span.label_ for span in doc.spans["hi"]] == ["bye", "", "GPE", "GPE"] assert doc.spans["hi"].has_overlap del doc.spans["hi"] assert "hi" not in doc.spans def test_doc_spans_copy(en_tokenizer): doc1 = en_tokenizer("Some text about Colombia and the Czech Republic") assert weakref.ref(doc1) == doc1.spans.doc_ref doc2 = doc1.copy() assert weakref.ref(doc2) == doc2.spans.doc_ref def test_doc_spans_setdefault(en_tokenizer): doc = en_tokenizer("Some text about Colombia and the Czech Republic") doc.spans.setdefault("key1") assert len(doc.spans["key1"]) == 0 doc.spans.setdefault("key2", default=[doc[0:1]]) assert len(doc.spans["key2"]) == 1 doc.spans.setdefault("key3", default=SpanGroup(doc, spans=[doc[0:1], doc[1:2]])) assert len(doc.spans["key3"]) == 2
35,906
34.835329
192
py
spaCy
spaCy-master/spacy/tests/doc/test_graph.py
from spacy.tokens.doc import Doc from spacy.tokens.graph import Graph from spacy.vocab import Vocab def test_graph_init(): doc = Doc(Vocab(), words=["a", "b", "c", "d"]) graph = Graph(doc, name="hello") assert graph.name == "hello" assert graph.doc is doc def test_graph_edges_and_nodes(): doc = Doc(Vocab(), words=["a", "b", "c", "d"]) graph = Graph(doc, name="hello") node1 = graph.add_node((0,)) assert graph.get_node((0,)) == node1 node2 = graph.add_node((1, 3)) assert list(node2) == [1, 3] graph.add_edge(node1, node2, label="one", weight=-10.5) assert graph.has_edge(node1, node2, label="one") assert node1.heads() == [] assert [tuple(h) for h in node2.heads()] == [(0,)] assert [tuple(t) for t in node1.tails()] == [(1, 3)] assert [tuple(t) for t in node2.tails()] == [] def test_graph_walk(): doc = Doc(Vocab(), words=["a", "b", "c", "d"]) graph = Graph( doc, name="hello", nodes=[(0,), (1,), (2,), (3,)], edges=[(0, 1), (0, 2), (0, 3), (3, 0)], labels=None, weights=None, ) node0, node1, node2, node3 = list(graph.nodes) assert [tuple(h) for h in node0.heads()] == [(3,)] assert [tuple(h) for h in node1.heads()] == [(0,)] assert [tuple(h) for h in node0.walk_heads()] == [(3,), (0,)] assert [tuple(h) for h in node1.walk_heads()] == [(0,), (3,), (0,)] assert [tuple(h) for h in node2.walk_heads()] == [(0,), (3,), (0,)] assert [tuple(h) for h in node3.walk_heads()] == [(0,), (3,)] assert [tuple(t) for t in node0.walk_tails()] == [(1,), (2,), (3,), (0,)] assert [tuple(t) for t in node1.walk_tails()] == [] assert [tuple(t) for t in node2.walk_tails()] == [] assert [tuple(t) for t in node3.walk_tails()] == [(0,), (1,), (2,), (3,)]
1,819
36.142857
77
py
spaCy
spaCy-master/spacy/tests/doc/test_json_doc_conversion.py
import pytest import srsly import spacy from spacy import schemas from spacy.tokens import Doc, Span, Token from .test_underscore import clean_underscore # noqa: F401 @pytest.fixture() def doc(en_vocab): words = ["c", "d", "e"] spaces = [True, True, True] pos = ["VERB", "NOUN", "NOUN"] tags = ["VBP", "NN", "NN"] heads = [0, 0, 1] deps = ["ROOT", "dobj", "dobj"] ents = ["O", "B-ORG", "O"] morphs = ["Feat1=A", "Feat1=B", "Feat1=A|Feat2=D"] return Doc( en_vocab, words=words, spaces=spaces, pos=pos, tags=tags, heads=heads, deps=deps, ents=ents, morphs=morphs, ) @pytest.fixture() def doc_without_deps(en_vocab): words = ["c", "d", "e"] pos = ["VERB", "NOUN", "NOUN"] tags = ["VBP", "NN", "NN"] ents = ["O", "B-ORG", "O"] morphs = ["Feat1=A", "Feat1=B", "Feat1=A|Feat2=D"] return Doc( en_vocab, words=words, pos=pos, tags=tags, ents=ents, morphs=morphs, sent_starts=[True, False, True], ) @pytest.fixture() def doc_json(): return { "text": "c d e ", "ents": [{"start": 2, "end": 3, "label": "ORG"}], "sents": [{"start": 0, "end": 5}], "tokens": [ { "id": 0, "start": 0, "end": 1, "tag": "VBP", "pos": "VERB", "morph": "Feat1=A", "dep": "ROOT", "head": 0, }, { "id": 1, "start": 2, "end": 3, "tag": "NN", "pos": "NOUN", "morph": "Feat1=B", "dep": "dobj", "head": 0, }, { "id": 2, "start": 4, "end": 5, "tag": "NN", "pos": "NOUN", "morph": "Feat1=A|Feat2=D", "dep": "dobj", "head": 1, }, ], } def test_doc_to_json(doc): json_doc = doc.to_json() assert json_doc["text"] == "c d e " assert len(json_doc["tokens"]) == 3 assert json_doc["tokens"][0]["pos"] == "VERB" assert json_doc["tokens"][0]["tag"] == "VBP" assert json_doc["tokens"][0]["dep"] == "ROOT" assert len(json_doc["ents"]) == 1 assert json_doc["ents"][0]["start"] == 2 # character offset! assert json_doc["ents"][0]["end"] == 3 # character offset! assert json_doc["ents"][0]["label"] == "ORG" assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc def test_doc_to_json_underscore(doc): Doc.set_extension("json_test1", default=False) Doc.set_extension("json_test2", default=False) doc._.json_test1 = "hello world" doc._.json_test2 = [1, 2, 3] json_doc = doc.to_json(underscore=["json_test1", "json_test2"]) assert "_" in json_doc assert json_doc["_"]["json_test1"] == "hello world" assert json_doc["_"]["json_test2"] == [1, 2, 3] assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc def test_doc_to_json_with_token_span_attributes(doc): Doc.set_extension("json_test1", default=False) Doc.set_extension("json_test2", default=False) Token.set_extension("token_test", default=False) Span.set_extension("span_test", default=False) doc._.json_test1 = "hello world" doc._.json_test2 = [1, 2, 3] doc[0:1]._.span_test = "span_attribute" doc[0:2]._.span_test = "span_attribute_2" doc[0]._.token_test = 117 doc[1]._.token_test = 118 doc.spans["span_group"] = [doc[0:1]] json_doc = doc.to_json( underscore=["json_test1", "json_test2", "token_test", "span_test"] ) assert "_" in json_doc assert json_doc["_"]["json_test1"] == "hello world" assert json_doc["_"]["json_test2"] == [1, 2, 3] assert "underscore_token" in json_doc assert "underscore_span" in json_doc assert json_doc["underscore_token"]["token_test"][0]["value"] == 117 assert json_doc["underscore_token"]["token_test"][1]["value"] == 118 assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute" assert json_doc["underscore_span"]["span_test"][1]["value"] == "span_attribute_2" assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc def test_doc_to_json_with_custom_user_data(doc): Doc.set_extension("json_test", default=False) Token.set_extension("token_test", default=False) Span.set_extension("span_test", default=False) doc._.json_test = "hello world" doc[0:1]._.span_test = "span_attribute" doc[0]._.token_test = 117 json_doc = doc.to_json(underscore=["json_test", "token_test", "span_test"]) doc.user_data["user_data_test"] = 10 doc.user_data[("user_data_test2", True)] = 10 assert "_" in json_doc assert json_doc["_"]["json_test"] == "hello world" assert "underscore_token" in json_doc assert "underscore_span" in json_doc assert json_doc["underscore_token"]["token_test"][0]["value"] == 117 assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute" assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc def test_doc_to_json_with_token_span_same_identifier(doc): Doc.set_extension("my_ext", default=False) Token.set_extension("my_ext", default=False) Span.set_extension("my_ext", default=False) doc._.my_ext = "hello world" doc[0:1]._.my_ext = "span_attribute" doc[0]._.my_ext = 117 json_doc = doc.to_json(underscore=["my_ext"]) assert "_" in json_doc assert json_doc["_"]["my_ext"] == "hello world" assert "underscore_token" in json_doc assert "underscore_span" in json_doc assert json_doc["underscore_token"]["my_ext"][0]["value"] == 117 assert json_doc["underscore_span"]["my_ext"][0]["value"] == "span_attribute" assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc def test_doc_to_json_with_token_attributes_missing(doc): Token.set_extension("token_test", default=False) Span.set_extension("span_test", default=False) doc[0:1]._.span_test = "span_attribute" doc[0]._.token_test = 117 json_doc = doc.to_json(underscore=["span_test"]) assert "underscore_span" in json_doc assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute" assert "underscore_token" not in json_doc assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 def test_doc_to_json_underscore_error_attr(doc): """Test that Doc.to_json() raises an error if a custom attribute doesn't exist in the ._ space.""" with pytest.raises(ValueError): doc.to_json(underscore=["json_test3"]) def test_doc_to_json_underscore_error_serialize(doc): """Test that Doc.to_json() raises an error if a custom attribute value isn't JSON-serializable.""" Doc.set_extension("json_test4", method=lambda doc: doc.text) with pytest.raises(ValueError): doc.to_json(underscore=["json_test4"]) def test_doc_to_json_span(doc): """Test that Doc.to_json() includes spans""" doc.spans["test"] = [Span(doc, 0, 2, "test"), Span(doc, 0, 1, "test")] json_doc = doc.to_json() assert "spans" in json_doc assert len(json_doc["spans"]) == 1 assert len(json_doc["spans"]["test"]) == 2 assert json_doc["spans"]["test"][0]["start"] == 0 assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 def test_json_to_doc(doc): json_doc = doc.to_json() json_doc = srsly.json_loads(srsly.json_dumps(json_doc)) new_doc = Doc(doc.vocab).from_json(json_doc, validate=True) assert new_doc.text == doc.text == "c d e " assert len(new_doc) == len(doc) == 3 assert new_doc[0].pos == doc[0].pos assert new_doc[0].tag == doc[0].tag assert new_doc[0].dep == doc[0].dep assert new_doc[0].head.idx == doc[0].head.idx assert new_doc[0].lemma == doc[0].lemma assert len(new_doc.ents) == 1 assert new_doc.ents[0].start == 1 assert new_doc.ents[0].end == 2 assert new_doc.ents[0].label_ == "ORG" assert doc.to_bytes() == new_doc.to_bytes() def test_json_to_doc_compat(doc, doc_json): new_doc = Doc(doc.vocab).from_json(doc_json, validate=True) new_tokens = [token for token in new_doc] assert new_doc.text == doc.text == "c d e " assert len(new_tokens) == len([token for token in doc]) == 3 assert new_tokens[0].pos == doc[0].pos assert new_tokens[0].tag == doc[0].tag assert new_tokens[0].dep == doc[0].dep assert new_tokens[0].head.idx == doc[0].head.idx assert new_tokens[0].lemma == doc[0].lemma assert len(new_doc.ents) == 1 assert new_doc.ents[0].start == 1 assert new_doc.ents[0].end == 2 assert new_doc.ents[0].label_ == "ORG" def test_json_to_doc_underscore(doc): Doc.set_extension("json_test1", default=False) Doc.set_extension("json_test2", default=False) doc._.json_test1 = "hello world" doc._.json_test2 = [1, 2, 3] json_doc = doc.to_json(underscore=["json_test1", "json_test2"]) new_doc = Doc(doc.vocab).from_json(json_doc, validate=True) assert all([new_doc.has_extension(f"json_test{i}") for i in range(1, 3)]) assert new_doc._.json_test1 == "hello world" assert new_doc._.json_test2 == [1, 2, 3] assert doc.to_bytes() == new_doc.to_bytes() def test_json_to_doc_with_token_span_attributes(doc): Doc.set_extension("json_test1", default=False) Doc.set_extension("json_test2", default=False) Token.set_extension("token_test", default=False) Span.set_extension("span_test", default=False) doc._.json_test1 = "hello world" doc._.json_test2 = [1, 2, 3] doc[0:1]._.span_test = "span_attribute" doc[0:2]._.span_test = "span_attribute_2" doc[0]._.token_test = 117 doc[1]._.token_test = 118 json_doc = doc.to_json( underscore=["json_test1", "json_test2", "token_test", "span_test"] ) json_doc = srsly.json_loads(srsly.json_dumps(json_doc)) new_doc = Doc(doc.vocab).from_json(json_doc, validate=True) assert all([new_doc.has_extension(f"json_test{i}") for i in range(1, 3)]) assert new_doc._.json_test1 == "hello world" assert new_doc._.json_test2 == [1, 2, 3] assert new_doc[0]._.token_test == 117 assert new_doc[1]._.token_test == 118 assert new_doc[0:1]._.span_test == "span_attribute" assert new_doc[0:2]._.span_test == "span_attribute_2" assert new_doc.user_data == doc.user_data assert new_doc.to_bytes(exclude=["user_data"]) == doc.to_bytes( exclude=["user_data"] ) def test_json_to_doc_spans(doc): """Test that Doc.from_json() includes correct.spans.""" doc.spans["test"] = [ Span(doc, 0, 2, label="test"), Span(doc, 0, 1, label="test", kb_id=7), ] json_doc = doc.to_json() new_doc = Doc(doc.vocab).from_json(json_doc, validate=True) assert len(new_doc.spans) == 1 assert len(new_doc.spans["test"]) == 2 for i in range(2): assert new_doc.spans["test"][i].start == doc.spans["test"][i].start assert new_doc.spans["test"][i].end == doc.spans["test"][i].end assert new_doc.spans["test"][i].label == doc.spans["test"][i].label assert new_doc.spans["test"][i].kb_id == doc.spans["test"][i].kb_id def test_json_to_doc_sents(doc, doc_without_deps): """Test that Doc.from_json() includes correct.sents.""" for test_doc in (doc, doc_without_deps): json_doc = test_doc.to_json() new_doc = Doc(doc.vocab).from_json(json_doc, validate=True) assert [sent.text for sent in test_doc.sents] == [ sent.text for sent in new_doc.sents ] assert [token.is_sent_start for token in test_doc] == [ token.is_sent_start for token in new_doc ] def test_json_to_doc_cats(doc): """Test that Doc.from_json() includes correct .cats.""" cats = {"A": 0.3, "B": 0.7} doc.cats = cats json_doc = doc.to_json() new_doc = Doc(doc.vocab).from_json(json_doc, validate=True) assert new_doc.cats == cats def test_json_to_doc_spaces(): """Test that Doc.from_json() preserves spaces correctly.""" doc = spacy.blank("en")("This is just brilliant.") json_doc = doc.to_json() new_doc = Doc(doc.vocab).from_json(json_doc, validate=True) assert doc.text == new_doc.text def test_json_to_doc_attribute_consistency(doc): """Test that Doc.from_json() raises an exception if tokens don't all have the same set of properties.""" doc_json = doc.to_json() doc_json["tokens"][1].pop("morph") with pytest.raises(ValueError): Doc(doc.vocab).from_json(doc_json) def test_json_to_doc_validation_error(doc): """Test that Doc.from_json() raises an exception when validating invalid input.""" doc_json = doc.to_json() doc_json.pop("tokens") with pytest.raises(ValueError): Doc(doc.vocab).from_json(doc_json, validate=True) def test_to_json_underscore_doc_getters(doc): def get_text_length(doc): return len(doc.text) Doc.set_extension("text_length", getter=get_text_length) doc_json = doc.to_json(underscore=["text_length"]) assert doc_json["_"]["text_length"] == get_text_length(doc)
13,660
34.575521
108
py
spaCy
spaCy-master/spacy/tests/doc/test_morphanalysis.py
import pytest @pytest.fixture def i_has(en_tokenizer): doc = en_tokenizer("I has") doc[0].set_morph({"PronType": "prs"}) doc[1].set_morph( { "VerbForm": "fin", "Tense": "pres", "Number": "sing", "Person": "three", } ) return doc def test_token_morph_eq(i_has): assert i_has[0].morph is not i_has[0].morph assert i_has[0].morph == i_has[0].morph assert i_has[0].morph != i_has[1].morph def test_token_morph_key(i_has): assert i_has[0].morph.key != 0 assert i_has[1].morph.key != 0 assert i_has[0].morph.key == i_has[0].morph.key assert i_has[0].morph.key != i_has[1].morph.key def test_morph_props(i_has): assert i_has[0].morph.get("PronType") == ["prs"] assert i_has[1].morph.get("PronType") == [] assert i_has[1].morph.get("AsdfType", ["asdf"]) == ["asdf"] assert i_has[1].morph.get("AsdfType", default=["asdf", "qwer"]) == ["asdf", "qwer"] def test_morph_iter(i_has): assert set(i_has[0].morph) == set(["PronType=prs"]) assert set(i_has[1].morph) == set( ["Number=sing", "Person=three", "Tense=pres", "VerbForm=fin"] ) def test_morph_get(i_has): assert i_has[0].morph.get("PronType") == ["prs"] def test_morph_set(i_has): assert i_has[0].morph.get("PronType") == ["prs"] # set by string i_has[0].set_morph("PronType=unk") assert i_has[0].morph.get("PronType") == ["unk"] # set by string, fields are alphabetized i_has[0].set_morph("PronType=123|NounType=unk") assert str(i_has[0].morph) == "NounType=unk|PronType=123" # set by dict i_has[0].set_morph({"AType": "123", "BType": "unk"}) assert str(i_has[0].morph) == "AType=123|BType=unk" # set by string with multiple values, fields and values are alphabetized i_has[0].set_morph("BType=c|AType=b,a") assert str(i_has[0].morph) == "AType=a,b|BType=c" # set by dict with multiple values, fields and values are alphabetized i_has[0].set_morph({"AType": "b,a", "BType": "c"}) assert str(i_has[0].morph) == "AType=a,b|BType=c" def test_morph_str(i_has): assert str(i_has[0].morph) == "PronType=prs" assert str(i_has[1].morph) == "Number=sing|Person=three|Tense=pres|VerbForm=fin" def test_morph_property(tokenizer): doc = tokenizer("a dog") # set through token.morph_ doc[0].set_morph("PronType=prs") assert str(doc[0].morph) == "PronType=prs" assert doc.to_array(["MORPH"])[0] != 0 # unset with token.morph doc[0].set_morph(None) assert doc.to_array(["MORPH"])[0] == 0 # empty morph is equivalent to "_" doc[0].set_morph("") assert str(doc[0].morph) == "" assert doc.to_array(["MORPH"])[0] == tokenizer.vocab.strings["_"] # "_" morph is also equivalent to empty morph doc[0].set_morph("_") assert str(doc[0].morph) == "" assert doc.to_array(["MORPH"])[0] == tokenizer.vocab.strings["_"] # set through existing hash with token.morph tokenizer.vocab.strings.add("Feat=Val") doc[0].set_morph(tokenizer.vocab.strings.add("Feat=Val")) assert str(doc[0].morph) == "Feat=Val"
3,145
30.148515
87
py
spaCy
spaCy-master/spacy/tests/doc/test_pickle_doc.py
from spacy.compat import pickle from spacy.language import Language def test_pickle_single_doc(): nlp = Language() doc = nlp("pickle roundtrip") data = pickle.dumps(doc, 1) doc2 = pickle.loads(data) assert doc2.text == "pickle roundtrip" def test_list_of_docs_pickles_efficiently(): nlp = Language() for i in range(10000): _ = nlp.vocab[str(i)] # noqa: F841 one_pickled = pickle.dumps(nlp("0"), -1) docs = list(nlp.pipe(str(i) for i in range(100))) many_pickled = pickle.dumps(docs, -1) assert len(many_pickled) < (len(one_pickled) * 2) many_unpickled = pickle.loads(many_pickled) assert many_unpickled[0].text == "0" assert many_unpickled[-1].text == "99" assert len(many_unpickled) == 100 def test_user_data_from_disk(): nlp = Language() doc = nlp("Hello") doc.user_data[(0, 1)] = False b = doc.to_bytes() doc2 = doc.__class__(doc.vocab).from_bytes(b) assert doc2.user_data[(0, 1)] is False def test_user_data_unpickles(): nlp = Language() doc = nlp("Hello") doc.user_data[(0, 1)] = False b = pickle.dumps(doc) doc2 = pickle.loads(b) assert doc2.user_data[(0, 1)] is False def test_hooks_unpickle(): def inner_func(d1, d2): return "hello!" nlp = Language() doc = nlp("Hello") doc.user_hooks["similarity"] = inner_func b = pickle.dumps(doc) doc2 = pickle.loads(b) assert doc2.similarity(None) == "hello!"
1,470
25.745455
53
py
spaCy
spaCy-master/spacy/tests/doc/test_retokenize_merge.py
import pytest from spacy.attrs import LEMMA from spacy.tokens import Doc, Token from spacy.vocab import Vocab def test_doc_retokenize_merge(en_tokenizer): text = "WKRO played songs by the beach boys all night" attrs = { "tag": "NAMED", "lemma": "LEMMA", "ent_type": "TYPE", "morph": "Number=Plur", } doc = en_tokenizer(text) assert len(doc) == 9 with doc.retokenize() as retokenizer: retokenizer.merge(doc[4:7], attrs=attrs) retokenizer.merge(doc[7:9], attrs=attrs) assert len(doc) == 6 assert doc[4].text == "the beach boys" assert doc[4].text_with_ws == "the beach boys " assert doc[4].tag_ == "NAMED" assert doc[4].lemma_ == "LEMMA" assert str(doc[4].morph) == "Number=Plur" assert doc[5].text == "all night" assert doc[5].text_with_ws == "all night" assert doc[5].tag_ == "NAMED" assert str(doc[5].morph) == "Number=Plur" assert doc[5].lemma_ == "LEMMA" def test_doc_retokenize_merge_children(en_tokenizer): """Test that attachments work correctly after merging.""" text = "WKRO played songs by the beach boys all night" attrs = {"tag": "NAMED", "lemma": "LEMMA", "ent_type": "TYPE"} doc = en_tokenizer(text) assert len(doc) == 9 with doc.retokenize() as retokenizer: retokenizer.merge(doc[4:7], attrs=attrs) for word in doc: if word.i < word.head.i: assert word in list(word.head.lefts) elif word.i > word.head.i: assert word in list(word.head.rights) def test_doc_retokenize_merge_hang(en_tokenizer): text = "through North and South Carolina" doc = en_tokenizer(text) with doc.retokenize() as retokenizer: retokenizer.merge(doc[3:5], attrs={"lemma": "", "ent_type": "ORG"}) retokenizer.merge(doc[1:2], attrs={"lemma": "", "ent_type": "ORG"}) def test_doc_retokenize_retokenizer(en_tokenizer): doc = en_tokenizer("WKRO played songs by the beach boys all night") with doc.retokenize() as retokenizer: retokenizer.merge(doc[4:7]) assert len(doc) == 7 assert doc[4].text == "the beach boys" def test_doc_retokenize_retokenizer_attrs(en_tokenizer): doc = en_tokenizer("WKRO played songs by the beach boys all night") # test both string and integer attributes and values attrs = {LEMMA: "boys", "ENT_TYPE": doc.vocab.strings["ORG"]} with doc.retokenize() as retokenizer: retokenizer.merge(doc[4:7], attrs=attrs) assert len(doc) == 7 assert doc[4].text == "the beach boys" assert doc[4].lemma_ == "boys" assert doc[4].ent_type_ == "ORG" def test_doc_retokenize_lex_attrs(en_tokenizer): """Test that lexical attributes can be changed (see #2390).""" doc = en_tokenizer("WKRO played beach boys songs") assert not any(token.is_stop for token in doc) with doc.retokenize() as retokenizer: retokenizer.merge(doc[2:4], attrs={"LEMMA": "boys", "IS_STOP": True}) assert doc[2].text == "beach boys" assert doc[2].lemma_ == "boys" assert doc[2].is_stop new_doc = Doc(doc.vocab, words=["beach boys"]) assert new_doc[0].is_stop def test_doc_retokenize_spans_merge_tokens(en_tokenizer): text = "Los Angeles start." heads = [1, 2, 2, 2] deps = ["dep"] * len(heads) tokens = en_tokenizer(text) doc = Doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps) assert len(doc) == 4 assert doc[0].head.text == "Angeles" assert doc[1].head.text == "start" with doc.retokenize() as retokenizer: attrs = {"tag": "NNP", "lemma": "Los Angeles", "ent_type": "GPE"} retokenizer.merge(doc[0:2], attrs=attrs) assert len(doc) == 3 assert doc[0].text == "Los Angeles" assert doc[0].head.text == "start" assert doc[0].ent_type_ == "GPE" def test_doc_retokenize_spans_merge_tokens_default_attrs(en_vocab): words = ["The", "players", "start", "."] lemmas = [t.lower() for t in words] heads = [1, 2, 2, 2] deps = ["dep"] * len(heads) tags = ["DT", "NN", "VBZ", "."] pos = ["DET", "NOUN", "VERB", "PUNCT"] doc = Doc( en_vocab, words=words, tags=tags, pos=pos, heads=heads, deps=deps, lemmas=lemmas ) assert len(doc) == 4 assert doc[0].text == "The" assert doc[0].tag_ == "DT" assert doc[0].pos_ == "DET" assert doc[0].lemma_ == "the" with doc.retokenize() as retokenizer: retokenizer.merge(doc[0:2]) assert len(doc) == 3 assert doc[0].text == "The players" assert doc[0].tag_ == "NN" assert doc[0].pos_ == "NOUN" assert doc[0].lemma_ == "the players" doc = Doc( en_vocab, words=words, tags=tags, pos=pos, heads=heads, deps=deps, lemmas=lemmas ) assert len(doc) == 4 assert doc[0].text == "The" assert doc[0].tag_ == "DT" assert doc[0].pos_ == "DET" assert doc[0].lemma_ == "the" with doc.retokenize() as retokenizer: retokenizer.merge(doc[0:2]) retokenizer.merge(doc[2:4]) assert len(doc) == 2 assert doc[0].text == "The players" assert doc[0].tag_ == "NN" assert doc[0].pos_ == "NOUN" assert doc[0].lemma_ == "the players" assert doc[1].text == "start ." assert doc[1].tag_ == "VBZ" assert doc[1].pos_ == "VERB" assert doc[1].lemma_ == "start ." def test_doc_retokenize_spans_merge_heads(en_vocab): words = ["I", "found", "a", "pilates", "class", "near", "work", "."] heads = [1, 1, 4, 6, 1, 4, 5, 1] deps = ["dep"] * len(heads) doc = Doc(en_vocab, words=words, heads=heads, deps=deps) assert len(doc) == 8 with doc.retokenize() as retokenizer: attrs = {"tag": doc[4].tag_, "lemma": "pilates class", "ent_type": "O"} retokenizer.merge(doc[3:5], attrs=attrs) assert len(doc) == 7 assert doc[0].head.i == 1 assert doc[1].head.i == 1 assert doc[2].head.i == 3 assert doc[3].head.i == 1 assert doc[4].head.i in [1, 3] assert doc[5].head.i == 4 def test_doc_retokenize_spans_merge_non_disjoint(en_tokenizer): text = "Los Angeles start." doc = en_tokenizer(text) with pytest.raises(ValueError): with doc.retokenize() as retokenizer: retokenizer.merge( doc[0:2], attrs={"tag": "NNP", "lemma": "Los Angeles", "ent_type": "GPE"}, ) retokenizer.merge( doc[0:1], attrs={"tag": "NNP", "lemma": "Los Angeles", "ent_type": "GPE"}, ) def test_doc_retokenize_span_np_merges(en_tokenizer): text = "displaCy is a parse tool built with Javascript" heads = [1, 1, 4, 4, 1, 4, 5, 6] deps = ["dep"] * len(heads) tokens = en_tokenizer(text) doc = Doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps) assert doc[4].head.i == 1 with doc.retokenize() as retokenizer: attrs = {"tag": "NP", "lemma": "tool", "ent_type": "O"} retokenizer.merge(doc[2:5], attrs=attrs) assert doc[2].head.i == 1 text = "displaCy is a lightweight and modern dependency parse tree visualization tool built with CSS3 and JavaScript." heads = [1, 1, 10, 7, 3, 3, 7, 10, 9, 10, 1, 10, 11, 12, 13, 13, 1] deps = ["dep"] * len(heads) tokens = en_tokenizer(text) doc = Doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps) with doc.retokenize() as retokenizer: for ent in doc.ents: attrs = {"tag": ent.label_, "lemma": ent.lemma_, "ent_type": ent.label_} retokenizer.merge(ent, attrs=attrs) text = "One test with entities like New York City so the ents list is not void" heads = [1, 1, 1, 2, 3, 6, 7, 4, 12, 11, 11, 12, 1, 12, 12] deps = ["dep"] * len(heads) tokens = en_tokenizer(text) doc = Doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps) with doc.retokenize() as retokenizer: for ent in doc.ents: retokenizer.merge(ent) def test_doc_retokenize_spans_entity_merge(en_tokenizer): # fmt: off text = "Stewart Lee is a stand up comedian who lives in England and loves Joe Pasquale.\n" heads = [1, 2, 2, 4, 6, 4, 2, 8, 6, 8, 9, 8, 8, 14, 12, 2, 15] deps = ["dep"] * len(heads) tags = ["NNP", "NNP", "VBZ", "DT", "VB", "RP", "NN", "WP", "VBZ", "IN", "NNP", "CC", "VBZ", "NNP", "NNP", ".", "SP"] ents = [("PERSON", 0, 2), ("GPE", 10, 11), ("PERSON", 13, 15)] ents = ["O"] * len(heads) ents[0] = "B-PERSON" ents[1] = "I-PERSON" ents[10] = "B-GPE" ents[13] = "B-PERSON" ents[14] = "I-PERSON" # fmt: on tokens = en_tokenizer(text) doc = Doc( tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps, tags=tags, ents=ents, ) assert len(doc) == 17 with doc.retokenize() as retokenizer: for ent in doc.ents: ent_type = max(w.ent_type_ for w in ent) attrs = {"lemma": ent.root.lemma_, "ent_type": ent_type} retokenizer.merge(ent, attrs=attrs) # check looping is ok assert len(doc) == 15 def test_doc_retokenize_spans_entity_merge_iob(en_vocab): # Test entity IOB stays consistent after merging words = ["a", "b", "c", "d", "e"] doc = Doc(Vocab(), words=words) doc.ents = [ (doc.vocab.strings.add("ent-abc"), 0, 3), (doc.vocab.strings.add("ent-d"), 3, 4), ] assert doc[0].ent_iob_ == "B" assert doc[1].ent_iob_ == "I" assert doc[2].ent_iob_ == "I" assert doc[3].ent_iob_ == "B" with doc.retokenize() as retokenizer: retokenizer.merge(doc[0:2]) assert len(doc) == len(words) - 1 assert doc[0].ent_iob_ == "B" assert doc[1].ent_iob_ == "I" # Test that IOB stays consistent with provided IOB words = ["a", "b", "c", "d", "e"] doc = Doc(Vocab(), words=words) with doc.retokenize() as retokenizer: attrs = {"ent_type": "ent-abc", "ent_iob": 1} retokenizer.merge(doc[0:3], attrs=attrs) retokenizer.merge(doc[3:5], attrs=attrs) assert doc[0].ent_iob_ == "B" assert doc[1].ent_iob_ == "I" # if no parse/heads, the first word in the span is the root and provides # default values words = ["a", "b", "c", "d", "e", "f", "g", "h", "i"] doc = Doc(Vocab(), words=words) doc.ents = [ (doc.vocab.strings.add("ent-de"), 3, 5), (doc.vocab.strings.add("ent-fg"), 5, 7), ] assert doc[3].ent_iob_ == "B" assert doc[4].ent_iob_ == "I" assert doc[5].ent_iob_ == "B" assert doc[6].ent_iob_ == "I" with doc.retokenize() as retokenizer: retokenizer.merge(doc[2:4]) retokenizer.merge(doc[4:6]) retokenizer.merge(doc[7:9]) assert len(doc) == 6 assert doc[3].ent_iob_ == "B" assert doc[3].ent_type_ == "ent-de" assert doc[4].ent_iob_ == "B" assert doc[4].ent_type_ == "ent-fg" # if there is a parse, span.root provides default values words = ["a", "b", "c", "d", "e", "f", "g", "h", "i"] heads = [0, 0, 3, 0, 0, 0, 5, 0, 0] ents = ["O"] * len(words) ents[3] = "B-ent-de" ents[4] = "I-ent-de" ents[5] = "B-ent-fg" ents[6] = "I-ent-fg" deps = ["dep"] * len(words) en_vocab.strings.add("ent-de") en_vocab.strings.add("ent-fg") en_vocab.strings.add("dep") doc = Doc(en_vocab, words=words, heads=heads, deps=deps, ents=ents) assert doc[2:4].root == doc[3] # root of 'c d' is d assert doc[4:6].root == doc[4] # root is 'e f' is e with doc.retokenize() as retokenizer: retokenizer.merge(doc[2:4]) retokenizer.merge(doc[4:6]) retokenizer.merge(doc[7:9]) assert len(doc) == 6 assert doc[2].ent_iob_ == "B" assert doc[2].ent_type_ == "ent-de" assert doc[3].ent_iob_ == "I" assert doc[3].ent_type_ == "ent-de" assert doc[4].ent_iob_ == "B" assert doc[4].ent_type_ == "ent-fg" # check that B is preserved if span[start] is B words = ["a", "b", "c", "d", "e", "f", "g", "h", "i"] heads = [0, 0, 3, 4, 0, 0, 5, 0, 0] ents = ["O"] * len(words) ents[3] = "B-ent-de" ents[4] = "I-ent-de" ents[5] = "B-ent-de" ents[6] = "I-ent-de" deps = ["dep"] * len(words) doc = Doc(en_vocab, words=words, heads=heads, deps=deps, ents=ents) with doc.retokenize() as retokenizer: retokenizer.merge(doc[3:5]) retokenizer.merge(doc[5:7]) assert len(doc) == 7 assert doc[3].ent_iob_ == "B" assert doc[3].ent_type_ == "ent-de" assert doc[4].ent_iob_ == "B" assert doc[4].ent_type_ == "ent-de" def test_doc_retokenize_spans_sentence_update_after_merge(en_tokenizer): # fmt: off text = "Stewart Lee is a stand up comedian. He lives in England and loves Joe Pasquale." heads = [1, 2, 2, 4, 2, 4, 4, 2, 9, 9, 9, 10, 9, 9, 15, 13, 9] deps = ['compound', 'nsubj', 'ROOT', 'det', 'amod', 'prt', 'attr', 'punct', 'nsubj', 'ROOT', 'prep', 'pobj', 'cc', 'conj', 'compound', 'dobj', 'punct'] # fmt: on tokens = en_tokenizer(text) doc = Doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps) sent1, sent2 = list(doc.sents) init_len = len(sent1) init_len2 = len(sent2) with doc.retokenize() as retokenizer: attrs = {"lemma": "none", "ent_type": "none"} retokenizer.merge(doc[0:2], attrs=attrs) retokenizer.merge(doc[-2:], attrs=attrs) sent1, sent2 = list(doc.sents) assert len(sent1) == init_len - 1 assert len(sent2) == init_len2 - 1 def test_doc_retokenize_spans_subtree_size_check(en_tokenizer): # fmt: off text = "Stewart Lee is a stand up comedian who lives in England and loves Joe Pasquale" heads = [1, 2, 2, 4, 6, 4, 2, 8, 6, 8, 9, 8, 8, 14, 12] deps = ["compound", "nsubj", "ROOT", "det", "amod", "prt", "attr", "nsubj", "relcl", "prep", "pobj", "cc", "conj", "compound", "dobj"] # fmt: on tokens = en_tokenizer(text) doc = Doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps) sent1 = list(doc.sents)[0] init_len = len(list(sent1.root.subtree)) with doc.retokenize() as retokenizer: attrs = {"lemma": "none", "ent_type": "none"} retokenizer.merge(doc[0:2], attrs=attrs) assert len(list(sent1.root.subtree)) == init_len - 1 def test_doc_retokenize_merge_extension_attrs(en_vocab): Token.set_extension("a", default=False, force=True) Token.set_extension("b", default="nothing", force=True) doc = Doc(en_vocab, words=["hello", "world", "!"]) # Test regular merging with doc.retokenize() as retokenizer: attrs = {"lemma": "hello world", "_": {"a": True, "b": "1"}} retokenizer.merge(doc[0:2], attrs=attrs) assert doc[0].lemma_ == "hello world" assert doc[0]._.a is True assert doc[0]._.b == "1" # Test bulk merging doc = Doc(en_vocab, words=["hello", "world", "!", "!"]) with doc.retokenize() as retokenizer: retokenizer.merge(doc[0:2], attrs={"_": {"a": True, "b": "1"}}) retokenizer.merge(doc[2:4], attrs={"_": {"a": None, "b": "2"}}) assert doc[0]._.a is True assert doc[0]._.b == "1" assert doc[1]._.a is None assert doc[1]._.b == "2" @pytest.mark.parametrize("underscore_attrs", [{"a": "x"}, {"b": "x"}, {"c": "x"}, [1]]) def test_doc_retokenize_merge_extension_attrs_invalid(en_vocab, underscore_attrs): Token.set_extension("a", getter=lambda x: x, force=True) Token.set_extension("b", method=lambda x: x, force=True) doc = Doc(en_vocab, words=["hello", "world", "!"]) attrs = {"_": underscore_attrs} with pytest.raises(ValueError): with doc.retokenize() as retokenizer: retokenizer.merge(doc[0:2], attrs=attrs) def test_doc_retokenizer_merge_lex_attrs(en_vocab): """Test that retokenization also sets attributes on the lexeme if they're lexical attributes. For example, if a user sets IS_STOP, it should mean that "all tokens with that lexeme" are marked as a stop word, so the ambiguity here is acceptable. Also see #2390. """ # Test regular merging doc = Doc(en_vocab, words=["hello", "world", "!"]) assert not any(t.is_stop for t in doc) with doc.retokenize() as retokenizer: retokenizer.merge(doc[0:2], attrs={"lemma": "hello world", "is_stop": True}) assert doc[0].lemma_ == "hello world" assert doc[0].is_stop # Test bulk merging doc = Doc(en_vocab, words=["eins", "zwei", "!", "!"]) assert not any(t.like_num for t in doc) assert not any(t.is_stop for t in doc) with doc.retokenize() as retokenizer: retokenizer.merge(doc[0:2], attrs={"like_num": True}) retokenizer.merge(doc[2:4], attrs={"is_stop": True}) assert doc[0].like_num assert doc[1].is_stop assert not doc[0].is_stop assert not doc[1].like_num # Test that norm is only set on tokens doc = Doc(en_vocab, words=["eins", "zwei", "!", "!"]) assert doc[0].norm_ == "eins" with doc.retokenize() as retokenizer: retokenizer.merge(doc[0:1], attrs={"norm": "1"}) assert doc[0].norm_ == "1" assert en_vocab["eins"].norm_ == "eins" def test_retokenize_skip_duplicates(en_vocab): """Test that the retokenizer automatically skips duplicate spans instead of complaining about overlaps. See #3687.""" doc = Doc(en_vocab, words=["hello", "world", "!"]) with doc.retokenize() as retokenizer: retokenizer.merge(doc[0:2]) retokenizer.merge(doc[0:2]) assert len(doc) == 2 assert doc[0].text == "hello world" def test_retokenize_disallow_zero_length(en_vocab): doc = Doc(en_vocab, words=["hello", "world", "!"]) with pytest.raises(ValueError): with doc.retokenize() as retokenizer: retokenizer.merge(doc[1:1]) def test_doc_retokenize_merge_without_parse_keeps_sents(en_tokenizer): text = "displaCy is a parse tool built with Javascript" sent_starts = [1, 0, 0, 0, 1, 0, 0, 0] tokens = en_tokenizer(text) # merging within a sentence keeps all sentence boundaries doc = Doc(tokens.vocab, words=[t.text for t in tokens], sent_starts=sent_starts) assert len(list(doc.sents)) == 2 with doc.retokenize() as retokenizer: retokenizer.merge(doc[1:3]) assert len(list(doc.sents)) == 2 # merging over a sentence boundary unsets it by default doc = Doc(tokens.vocab, words=[t.text for t in tokens], sent_starts=sent_starts) assert len(list(doc.sents)) == 2 with doc.retokenize() as retokenizer: retokenizer.merge(doc[3:6]) assert doc[3].is_sent_start is None # merging over a sentence boundary and setting sent_start doc = Doc(tokens.vocab, words=[t.text for t in tokens], sent_starts=sent_starts) assert len(list(doc.sents)) == 2 with doc.retokenize() as retokenizer: retokenizer.merge(doc[3:6], attrs={"sent_start": True}) assert len(list(doc.sents)) == 2
18,956
37.219758
122
py
spaCy
spaCy-master/spacy/tests/doc/test_retokenize_split.py
import numpy import pytest from spacy.tokens import Doc, Token from spacy.vocab import Vocab @pytest.mark.issue(3540) def test_issue3540(en_vocab): words = ["I", "live", "in", "NewYork", "right", "now"] tensor = numpy.asarray( [[1.0, 1.1], [2.0, 2.1], [3.0, 3.1], [4.0, 4.1], [5.0, 5.1], [6.0, 6.1]], dtype="f", ) doc = Doc(en_vocab, words=words) doc.tensor = tensor gold_text = ["I", "live", "in", "NewYork", "right", "now"] assert [token.text for token in doc] == gold_text gold_lemma = ["I", "live", "in", "NewYork", "right", "now"] for i, lemma in enumerate(gold_lemma): doc[i].lemma_ = lemma assert [token.lemma_ for token in doc] == gold_lemma vectors_1 = [token.vector for token in doc] assert len(vectors_1) == len(doc) with doc.retokenize() as retokenizer: heads = [(doc[3], 1), doc[2]] attrs = { "POS": ["PROPN", "PROPN"], "LEMMA": ["New", "York"], "DEP": ["pobj", "compound"], } retokenizer.split(doc[3], ["New", "York"], heads=heads, attrs=attrs) gold_text = ["I", "live", "in", "New", "York", "right", "now"] assert [token.text for token in doc] == gold_text gold_lemma = ["I", "live", "in", "New", "York", "right", "now"] assert [token.lemma_ for token in doc] == gold_lemma vectors_2 = [token.vector for token in doc] assert len(vectors_2) == len(doc) assert vectors_1[0].tolist() == vectors_2[0].tolist() assert vectors_1[1].tolist() == vectors_2[1].tolist() assert vectors_1[2].tolist() == vectors_2[2].tolist() assert vectors_1[4].tolist() == vectors_2[5].tolist() assert vectors_1[5].tolist() == vectors_2[6].tolist() def test_doc_retokenize_split(en_vocab): words = ["LosAngeles", "start", "."] heads = [1, 2, 2] deps = ["dep"] * len(heads) doc = Doc(en_vocab, words=words, heads=heads, deps=deps) assert len(doc) == 3 assert len(str(doc)) == 19 assert doc[0].head.text == "start" assert doc[1].head.text == "." with doc.retokenize() as retokenizer: retokenizer.split( doc[0], ["Los", "Angeles"], [(doc[0], 1), doc[1]], attrs={ "tag": ["NNP"] * 2, "lemma": ["Los", "Angeles"], "ent_type": ["GPE"] * 2, "morph": ["Number=Sing"] * 2, }, ) assert len(doc) == 4 assert doc[0].text == "Los" assert doc[0].head.text == "Angeles" assert doc[0].idx == 0 assert str(doc[0].morph) == "Number=Sing" assert doc[1].idx == 3 assert doc[1].text == "Angeles" assert doc[1].head.text == "start" assert str(doc[1].morph) == "Number=Sing" assert doc[2].text == "start" assert doc[2].head.text == "." assert doc[3].text == "." assert doc[3].head.text == "." assert len(str(doc)) == 19 def test_doc_retokenize_split_lemmas(en_vocab): # If lemmas are not set, leave unset words = ["LosAngeles", "start", "."] heads = [1, 2, 2] deps = ["dep"] * len(heads) doc = Doc(en_vocab, words=words, heads=heads, deps=deps) with doc.retokenize() as retokenizer: retokenizer.split( doc[0], ["Los", "Angeles"], [(doc[0], 1), doc[1]], ) assert doc[0].lemma_ == "" assert doc[1].lemma_ == "" # If lemmas are set, use split orth as default lemma words = ["LosAngeles", "start", "."] heads = [1, 2, 2] deps = ["dep"] * len(heads) doc = Doc(en_vocab, words=words, heads=heads, deps=deps) for t in doc: t.lemma_ = "a" with doc.retokenize() as retokenizer: retokenizer.split( doc[0], ["Los", "Angeles"], [(doc[0], 1), doc[1]], ) assert doc[0].lemma_ == "Los" assert doc[1].lemma_ == "Angeles" def test_doc_retokenize_split_dependencies(en_vocab): doc = Doc(en_vocab, words=["LosAngeles", "start", "."]) dep1 = doc.vocab.strings.add("amod") dep2 = doc.vocab.strings.add("subject") with doc.retokenize() as retokenizer: retokenizer.split( doc[0], ["Los", "Angeles"], [(doc[0], 1), doc[1]], attrs={"dep": [dep1, dep2]}, ) assert doc[0].dep == dep1 assert doc[1].dep == dep2 def test_doc_retokenize_split_heads_error(en_vocab): doc = Doc(en_vocab, words=["LosAngeles", "start", "."]) # Not enough heads with pytest.raises(ValueError): with doc.retokenize() as retokenizer: retokenizer.split(doc[0], ["Los", "Angeles"], [doc[1]]) # Too many heads with pytest.raises(ValueError): with doc.retokenize() as retokenizer: retokenizer.split(doc[0], ["Los", "Angeles"], [doc[1], doc[1], doc[1]]) def test_doc_retokenize_spans_entity_split_iob(): # Test entity IOB stays consistent after merging words = ["abc", "d", "e"] doc = Doc(Vocab(), words=words) doc.ents = [(doc.vocab.strings.add("ent-abcd"), 0, 2)] assert doc[0].ent_iob_ == "B" assert doc[1].ent_iob_ == "I" with doc.retokenize() as retokenizer: retokenizer.split(doc[0], ["a", "b", "c"], [(doc[0], 1), (doc[0], 2), doc[1]]) assert doc[0].ent_iob_ == "B" assert doc[1].ent_iob_ == "I" assert doc[2].ent_iob_ == "I" assert doc[3].ent_iob_ == "I" def test_doc_retokenize_spans_sentence_update_after_split(en_vocab): # fmt: off words = ["StewartLee", "is", "a", "stand", "up", "comedian", ".", "He", "lives", "in", "England", "and", "loves", "JoePasquale", "."] heads = [1, 1, 3, 5, 3, 1, 1, 8, 8, 8, 9, 8, 8, 14, 12] deps = ["nsubj", "ROOT", "det", "amod", "prt", "attr", "punct", "nsubj", "ROOT", "prep", "pobj", "cc", "conj", "compound", "punct"] # fmt: on doc = Doc(en_vocab, words=words, heads=heads, deps=deps) sent1, sent2 = list(doc.sents) init_len = len(sent1) init_len2 = len(sent2) with doc.retokenize() as retokenizer: retokenizer.split( doc[0], ["Stewart", "Lee"], [(doc[0], 1), doc[1]], attrs={"dep": ["compound", "nsubj"]}, ) retokenizer.split( doc[13], ["Joe", "Pasquale"], [(doc[13], 1), doc[12]], attrs={"dep": ["compound", "dobj"]}, ) sent1, sent2 = list(doc.sents) assert len(sent1) == init_len + 1 assert len(sent2) == init_len2 + 1 def test_doc_retokenize_split_orths_mismatch(en_vocab): """Test that the regular retokenizer.split raises an error if the orths don't match the original token text. There might still be a method that allows this, but for the default use cases, merging and splitting should always conform with spaCy's non-destructive tokenization policy. Otherwise, it can lead to very confusing and unexpected results. """ doc = Doc(en_vocab, words=["LosAngeles", "start", "."]) with pytest.raises(ValueError): with doc.retokenize() as retokenizer: retokenizer.split(doc[0], ["L", "A"], [(doc[0], 0), (doc[0], 0)]) def test_doc_retokenize_split_extension_attrs(en_vocab): Token.set_extension("a", default=False, force=True) Token.set_extension("b", default="nothing", force=True) doc = Doc(en_vocab, words=["LosAngeles", "start"]) with doc.retokenize() as retokenizer: heads = [(doc[0], 1), doc[1]] underscore = [{"a": True, "b": "1"}, {"b": "2"}] attrs = {"lemma": ["los", "angeles"], "_": underscore} retokenizer.split(doc[0], ["Los", "Angeles"], heads, attrs=attrs) assert doc[0].lemma_ == "los" assert doc[0]._.a is True assert doc[0]._.b == "1" assert doc[1].lemma_ == "angeles" assert doc[1]._.a is False assert doc[1]._.b == "2" @pytest.mark.parametrize( "underscore_attrs", [ [{"a": "x"}, {}], # Overwriting getter without setter [{"b": "x"}, {}], # Overwriting method [{"c": "x"}, {}], # Overwriting nonexistent attribute [{"a": "x"}, {"x": "x"}], # Combination [{"a": "x", "x": "x"}, {"x": "x"}], # Combination {"x": "x"}, # Not a list of dicts ], ) def test_doc_retokenize_split_extension_attrs_invalid(en_vocab, underscore_attrs): Token.set_extension("x", default=False, force=True) Token.set_extension("a", getter=lambda x: x, force=True) Token.set_extension("b", method=lambda x: x, force=True) doc = Doc(en_vocab, words=["LosAngeles", "start"]) attrs = {"_": underscore_attrs} with pytest.raises(ValueError): with doc.retokenize() as retokenizer: heads = [(doc[0], 1), doc[1]] retokenizer.split(doc[0], ["Los", "Angeles"], heads, attrs=attrs) def test_doc_retokenizer_split_lex_attrs(en_vocab): """Test that retokenization also sets attributes on the lexeme if they're lexical attributes. For example, if a user sets IS_STOP, it should mean that "all tokens with that lexeme" are marked as a stop word, so the ambiguity here is acceptable. Also see #2390. """ assert not Doc(en_vocab, words=["Los"])[0].is_stop assert not Doc(en_vocab, words=["Angeles"])[0].is_stop doc = Doc(en_vocab, words=["LosAngeles", "start"]) assert not doc[0].is_stop with doc.retokenize() as retokenizer: attrs = {"is_stop": [True, False]} heads = [(doc[0], 1), doc[1]] retokenizer.split(doc[0], ["Los", "Angeles"], heads, attrs=attrs) assert doc[0].is_stop assert not doc[1].is_stop def test_doc_retokenizer_realloc(en_vocab): """#4604: realloc correctly when new tokens outnumber original tokens""" text = "Hyperglycemic adverse events following antipsychotic drug administration in the" doc = Doc(en_vocab, words=text.split()[:-1]) with doc.retokenize() as retokenizer: token = doc[0] heads = [(token, 0)] * len(token) retokenizer.split(doc[token.i], list(token.text), heads=heads) doc = Doc(en_vocab, words=text.split()) with doc.retokenize() as retokenizer: token = doc[0] heads = [(token, 0)] * len(token) retokenizer.split(doc[token.i], list(token.text), heads=heads) def test_doc_retokenizer_split_norm(en_vocab): """#6060: reset norm in split""" text = "The quick brownfoxjumpsoverthe lazy dog w/ white spots" doc = Doc(en_vocab, words=text.split()) # Set custom norm on the w/ token. doc[5].norm_ = "with" # Retokenize to split out the words in the token at doc[2]. token = doc[2] with doc.retokenize() as retokenizer: retokenizer.split( token, ["brown", "fox", "jumps", "over", "the"], heads=[(token, idx) for idx in range(5)], ) assert doc[9].text == "w/" assert doc[9].norm_ == "with" assert doc[5].text == "over" assert doc[5].norm_ == "over"
10,937
35.828283
92
py
spaCy
spaCy-master/spacy/tests/doc/test_span.py
import numpy import pytest from numpy.testing import assert_array_equal from thinc.api import get_current_ops from spacy.attrs import LENGTH, ORTH from spacy.lang.en import English from spacy.tokens import Doc, Span, Token from spacy.util import filter_spans from spacy.vocab import Vocab from ..util import add_vecs_to_vocab from .test_underscore import clean_underscore # noqa: F401 @pytest.fixture def doc(en_tokenizer): # fmt: off text = "This is a sentence. This is another sentence. And a third." heads = [1, 1, 3, 1, 1, 6, 6, 8, 6, 6, 12, 12, 12, 12] deps = ["nsubj", "ROOT", "det", "attr", "punct", "nsubj", "ROOT", "det", "attr", "punct", "ROOT", "det", "npadvmod", "punct"] ents = ["O", "O", "B-ENT", "I-ENT", "I-ENT", "I-ENT", "I-ENT", "O", "O", "O", "O", "O", "O", "O"] # fmt: on tokens = en_tokenizer(text) lemmas = [t.text for t in tokens] # this is not correct, just a placeholder spaces = [bool(t.whitespace_) for t in tokens] return Doc( tokens.vocab, words=[t.text for t in tokens], spaces=spaces, heads=heads, deps=deps, ents=ents, lemmas=lemmas, ) @pytest.fixture def doc_not_parsed(en_tokenizer): text = "This is a sentence. This is another sentence. And a third." tokens = en_tokenizer(text) doc = Doc(tokens.vocab, words=[t.text for t in tokens]) return doc @pytest.mark.issue(1537) def test_issue1537(): """Test that Span.as_doc() doesn't segfault.""" string = "The sky is blue . The man is pink . The dog is purple ." doc = Doc(Vocab(), words=string.split()) doc[0].sent_start = True for word in doc[1:]: if word.nbor(-1).text == ".": word.sent_start = True else: word.sent_start = False sents = list(doc.sents) sent0 = sents[0].as_doc() sent1 = sents[1].as_doc() assert isinstance(sent0, Doc) assert isinstance(sent1, Doc) @pytest.mark.issue(1612) def test_issue1612(en_tokenizer): """Test that span.orth_ is identical to span.text""" doc = en_tokenizer("The black cat purrs.") span = doc[1:3] assert span.orth_ == span.text @pytest.mark.issue(3199) def test_issue3199(): """Test that Span.noun_chunks works correctly if no noun chunks iterator is available. To make this test future-proof, we're constructing a Doc with a new Vocab here and a parse tree to make sure the noun chunks run. """ words = ["This", "is", "a", "sentence"] doc = Doc(Vocab(), words=words, heads=[0] * len(words), deps=["dep"] * len(words)) with pytest.raises(NotImplementedError): list(doc[0:3].noun_chunks) @pytest.mark.issue(5152) def test_issue5152(): # Test that the comparison between a Span and a Token, goes well # There was a bug when the number of tokens in the span equaled the number of characters in the token (!) nlp = English() text = nlp("Talk about being boring!") text_var = nlp("Talk of being boring!") y = nlp("Let") span = text[0:3] # Talk about being span_2 = text[0:3] # Talk about being span_3 = text_var[0:3] # Talk of being token = y[0] # Let with pytest.warns(UserWarning): assert span.similarity(token) == 0.0 assert span.similarity(span_2) == 1.0 with pytest.warns(UserWarning): assert span_2.similarity(span_3) < 1.0 @pytest.mark.issue(6755) def test_issue6755(en_tokenizer): doc = en_tokenizer("This is a magnificent sentence.") span = doc[:0] assert span.text_with_ws == "" assert span.text == "" @pytest.mark.parametrize( "sentence, start_idx,end_idx,label", [("Welcome to Mumbai, my friend", 11, 17, "GPE")], ) @pytest.mark.issue(6815) def test_issue6815_1(sentence, start_idx, end_idx, label): nlp = English() doc = nlp(sentence) span = doc[:].char_span(start_idx, end_idx, label=label) assert span.label_ == label @pytest.mark.parametrize( "sentence, start_idx,end_idx,kb_id", [("Welcome to Mumbai, my friend", 11, 17, 5)] ) @pytest.mark.issue(6815) def test_issue6815_2(sentence, start_idx, end_idx, kb_id): nlp = English() doc = nlp(sentence) span = doc[:].char_span(start_idx, end_idx, kb_id=kb_id) assert span.kb_id == kb_id @pytest.mark.parametrize( "sentence, start_idx,end_idx,vector", [("Welcome to Mumbai, my friend", 11, 17, numpy.array([0.1, 0.2, 0.3]))], ) @pytest.mark.issue(6815) def test_issue6815_3(sentence, start_idx, end_idx, vector): nlp = English() doc = nlp(sentence) span = doc[:].char_span(start_idx, end_idx, vector=vector) assert (span.vector == vector).all() @pytest.mark.parametrize( "i_sent,i,j,text", [ (0, 0, len("This is a"), "This is a"), (1, 0, len("This is another"), "This is another"), (2, len("And "), len("And ") + len("a third"), "a third"), (0, 1, 2, None), ], ) def test_char_span(doc, i_sent, i, j, text): sents = list(doc.sents) span = sents[i_sent].char_span(i, j) if not text: assert not span else: assert span.text == text def test_char_span_attributes(doc): label = "LABEL" kb_id = "KB_ID" span_id = "SPAN_ID" span1 = doc.char_span(20, 45, label=label, kb_id=kb_id, span_id=span_id) span2 = doc[1:].char_span(15, 40, label=label, kb_id=kb_id, span_id=span_id) assert span1.text == span2.text assert span1.label_ == span2.label_ == label assert span1.kb_id_ == span2.kb_id_ == kb_id assert span1.id_ == span2.id_ == span_id def test_spans_sent_spans(doc): sents = list(doc.sents) assert sents[0].start == 0 assert sents[0].end == 5 assert len(sents) == 3 assert sum(len(sent) for sent in sents) == len(doc) def test_spans_root(doc): span = doc[2:4] assert len(span) == 2 assert span.text == "a sentence" assert span.root.text == "sentence" assert span.root.head.text == "is" def test_spans_string_fn(doc): span = doc[0:4] assert len(span) == 4 assert span.text == "This is a sentence" def test_spans_root2(en_tokenizer): text = "through North and South Carolina" heads = [0, 4, 1, 1, 0] deps = ["dep"] * len(heads) tokens = en_tokenizer(text) doc = Doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps) assert doc[-2:].root.text == "Carolina" def test_spans_span_sent(doc, doc_not_parsed): """Test span.sent property""" assert len(list(doc.sents)) assert doc[:2].sent.root.text == "is" assert doc[:2].sent.text == "This is a sentence." assert doc[6:7].sent.root.left_edge.text == "This" assert doc[0 : len(doc)].sent == list(doc.sents)[0] assert list(doc[0 : len(doc)].sents) == list(doc.sents) with pytest.raises(ValueError): doc_not_parsed[:2].sent # test on manual sbd doc_not_parsed[0].is_sent_start = True doc_not_parsed[5].is_sent_start = True assert doc_not_parsed[1:3].sent == doc_not_parsed[0:5] assert doc_not_parsed[10:14].sent == doc_not_parsed[5:] @pytest.mark.parametrize( "start,end,expected_sentence", [ (0, 14, "This is"), # Entire doc (1, 4, "This is"), # Overlapping with 2 sentences (0, 2, "This is"), # Beginning of the Doc. Full sentence (0, 1, "This is"), # Beginning of the Doc. Part of a sentence (10, 14, "And a"), # End of the Doc. Overlapping with 2 senteces (12, 14, "third."), # End of the Doc. Full sentence (1, 1, "This is"), # Empty Span ], ) def test_spans_span_sent_user_hooks(doc, start, end, expected_sentence): # Doc-level sents hook def user_hook(doc): return [doc[ii : ii + 2] for ii in range(0, len(doc), 2)] doc.user_hooks["sents"] = user_hook # Make sure doc-level sents hook works assert doc[start:end].sent.text == expected_sentence # Span-level sent hook doc.user_span_hooks["sent"] = lambda x: x # Now, span=level sent hook overrides the doc-level sents hook assert doc[start:end].sent == doc[start:end] def test_spans_lca_matrix(en_tokenizer): """Test span's lca matrix generation""" tokens = en_tokenizer("the lazy dog slept") doc = Doc( tokens.vocab, words=[t.text for t in tokens], heads=[2, 2, 3, 3], deps=["dep"] * 4, ) lca = doc[:2].get_lca_matrix() assert lca.shape == (2, 2) assert lca[0, 0] == 0 # the & the -> the assert lca[0, 1] == -1 # the & lazy -> dog (out of span) assert lca[1, 0] == -1 # lazy & the -> dog (out of span) assert lca[1, 1] == 1 # lazy & lazy -> lazy lca = doc[1:].get_lca_matrix() assert lca.shape == (3, 3) assert lca[0, 0] == 0 # lazy & lazy -> lazy assert lca[0, 1] == 1 # lazy & dog -> dog assert lca[0, 2] == 2 # lazy & slept -> slept lca = doc[2:].get_lca_matrix() assert lca.shape == (2, 2) assert lca[0, 0] == 0 # dog & dog -> dog assert lca[0, 1] == 1 # dog & slept -> slept assert lca[1, 0] == 1 # slept & dog -> slept assert lca[1, 1] == 1 # slept & slept -> slept # example from Span API docs tokens = en_tokenizer("I like New York in Autumn") doc = Doc( tokens.vocab, words=[t.text for t in tokens], heads=[1, 1, 3, 1, 3, 4], deps=["dep"] * len(tokens), ) lca = doc[1:4].get_lca_matrix() assert_array_equal(lca, numpy.asarray([[0, 0, 0], [0, 1, 2], [0, 2, 2]])) def test_span_similarity_match(): doc = Doc(Vocab(), words=["a", "b", "a", "b"]) span1 = doc[:2] span2 = doc[2:] with pytest.warns(UserWarning): assert span1.similarity(span2) == 1.0 assert span1.similarity(doc) == 0.0 assert span1[:1].similarity(doc.vocab["a"]) == 1.0 def test_spans_default_sentiment(en_tokenizer): """Test span.sentiment property's default averaging behaviour""" text = "good stuff bad stuff" tokens = en_tokenizer(text) tokens.vocab[tokens[0].text].sentiment = 3.0 tokens.vocab[tokens[2].text].sentiment = -2.0 doc = Doc(tokens.vocab, words=[t.text for t in tokens]) assert doc[:2].sentiment == 3.0 / 2 assert doc[-2:].sentiment == -2.0 / 2 assert doc[:-1].sentiment == (3.0 + -2) / 3.0 def test_spans_override_sentiment(en_tokenizer): """Test span.sentiment property's default averaging behaviour""" text = "good stuff bad stuff" tokens = en_tokenizer(text) tokens.vocab[tokens[0].text].sentiment = 3.0 tokens.vocab[tokens[2].text].sentiment = -2.0 doc = Doc(tokens.vocab, words=[t.text for t in tokens]) doc.user_span_hooks["sentiment"] = lambda span: 10.0 assert doc[:2].sentiment == 10.0 assert doc[-2:].sentiment == 10.0 assert doc[:-1].sentiment == 10.0 def test_spans_are_hashable(en_tokenizer): """Test spans can be hashed.""" text = "good stuff bad stuff" tokens = en_tokenizer(text) span1 = tokens[:2] span2 = tokens[2:4] assert hash(span1) != hash(span2) span3 = tokens[0:2] assert hash(span3) == hash(span1) def test_spans_by_character(doc): span1 = doc[1:-2] # default and specified alignment mode "strict" span2 = doc.char_span(span1.start_char, span1.end_char, label="GPE") assert span1.start_char == span2.start_char assert span1.end_char == span2.end_char assert span2.label_ == "GPE" span2 = doc.char_span( span1.start_char, span1.end_char, label="GPE", alignment_mode="strict" ) assert span1.start_char == span2.start_char assert span1.end_char == span2.end_char assert span2.label_ == "GPE" # alignment mode "contract" span2 = doc.char_span( span1.start_char - 3, span1.end_char, label="GPE", alignment_mode="contract" ) assert span1.start_char == span2.start_char assert span1.end_char == span2.end_char assert span2.label_ == "GPE" # alignment mode "expand" span2 = doc.char_span( span1.start_char + 1, span1.end_char, label="GPE", alignment_mode="expand" ) assert span1.start_char == span2.start_char assert span1.end_char == span2.end_char assert span2.label_ == "GPE" # unsupported alignment mode with pytest.raises(ValueError): span2 = doc.char_span( span1.start_char + 1, span1.end_char, label="GPE", alignment_mode="unk" ) # Span.char_span + alignment mode "contract" span2 = doc[0:2].char_span( span1.start_char - 3, span1.end_char, label="GPE", alignment_mode="contract" ) assert span1.start_char == span2.start_char assert span1.end_char == span2.end_char assert span2.label_ == "GPE" def test_span_to_array(doc): span = doc[1:-2] arr = span.to_array([ORTH, LENGTH]) assert arr.shape == (len(span), 2) assert arr[0, 0] == span[0].orth assert arr[0, 1] == len(span[0]) def test_span_as_doc(doc): span = doc[4:10] span_doc = span.as_doc() assert span.text == span_doc.text.strip() assert isinstance(span_doc, doc.__class__) assert span_doc is not doc assert span_doc[0].idx == 0 # partial initial entity is removed assert len(span_doc.ents) == 0 # full entity is preserved span_doc = doc[2:10].as_doc() assert len(span_doc.ents) == 1 # partial final entity is removed span_doc = doc[0:5].as_doc() assert len(span_doc.ents) == 0 @pytest.mark.usefixtures("clean_underscore") def test_span_as_doc_user_data(doc): """Test that the user_data can be preserved (but not by default).""" my_key = "my_info" my_value = 342 doc.user_data[my_key] = my_value Token.set_extension("is_x", default=False) doc[7]._.is_x = True span = doc[4:10] span_doc_with = span.as_doc(copy_user_data=True) span_doc_without = span.as_doc() assert doc.user_data.get(my_key, None) is my_value assert span_doc_with.user_data.get(my_key, None) is my_value assert span_doc_without.user_data.get(my_key, None) is None for i in range(len(span_doc_with)): if i != 3: assert span_doc_with[i]._.is_x is False else: assert span_doc_with[i]._.is_x is True assert not any([t._.is_x for t in span_doc_without]) def test_span_string_label_kb_id(doc): span = Span(doc, 0, 1, label="hello", kb_id="Q342") assert span.label_ == "hello" assert span.label == doc.vocab.strings["hello"] assert span.kb_id_ == "Q342" assert span.kb_id == doc.vocab.strings["Q342"] def test_span_string_label_id(doc): span = Span(doc, 0, 1, label="hello", span_id="Q342") assert span.label_ == "hello" assert span.label == doc.vocab.strings["hello"] assert span.id_ == "Q342" assert span.id == doc.vocab.strings["Q342"] def test_span_attrs_writable(doc): span = Span(doc, 0, 1) span.label_ = "label" span.kb_id_ = "kb_id" span.id_ = "id" def test_span_ents_property(doc): doc.ents = [ (doc.vocab.strings["PRODUCT"], 0, 1), (doc.vocab.strings["PRODUCT"], 7, 8), (doc.vocab.strings["PRODUCT"], 11, 14), ] assert len(list(doc.ents)) == 3 sentences = list(doc.sents) assert len(sentences) == 3 assert len(sentences[0].ents) == 1 # First sentence, also tests start of sentence assert sentences[0].ents[0].text == "This" assert sentences[0].ents[0].label_ == "PRODUCT" assert sentences[0].ents[0].start == 0 assert sentences[0].ents[0].end == 1 # Second sentence assert len(sentences[1].ents) == 1 assert sentences[1].ents[0].text == "another" assert sentences[1].ents[0].label_ == "PRODUCT" assert sentences[1].ents[0].start == 7 assert sentences[1].ents[0].end == 8 # Third sentence ents, Also tests end of sentence assert sentences[2].ents[0].text == "a third." assert sentences[2].ents[0].label_ == "PRODUCT" assert sentences[2].ents[0].start == 11 assert sentences[2].ents[0].end == 14 def test_filter_spans(doc): # Test filtering duplicates spans = [doc[1:4], doc[6:8], doc[1:4], doc[10:14]] filtered = filter_spans(spans) assert len(filtered) == 3 assert filtered[0].start == 1 and filtered[0].end == 4 assert filtered[1].start == 6 and filtered[1].end == 8 assert filtered[2].start == 10 and filtered[2].end == 14 # Test filtering overlaps with longest preference spans = [doc[1:4], doc[1:3], doc[5:10], doc[7:9], doc[1:4]] filtered = filter_spans(spans) assert len(filtered) == 2 assert len(filtered[0]) == 3 assert len(filtered[1]) == 5 assert filtered[0].start == 1 and filtered[0].end == 4 assert filtered[1].start == 5 and filtered[1].end == 10 # Test filtering overlaps with earlier preference for identical length spans = [doc[1:4], doc[2:5], doc[5:10], doc[7:9], doc[1:4]] filtered = filter_spans(spans) assert len(filtered) == 2 assert len(filtered[0]) == 3 assert len(filtered[1]) == 5 assert filtered[0].start == 1 and filtered[0].end == 4 assert filtered[1].start == 5 and filtered[1].end == 10 def test_span_eq_hash(doc, doc_not_parsed): assert doc[0:2] == doc[0:2] assert doc[0:2] != doc[1:3] assert doc[0:2] != doc_not_parsed[0:2] assert hash(doc[0:2]) == hash(doc[0:2]) assert hash(doc[0:2]) != hash(doc[1:3]) assert hash(doc[0:2]) != hash(doc_not_parsed[0:2]) # check that an out-of-bounds is not equivalent to the span of the full doc assert doc[0 : len(doc)] != doc[len(doc) : len(doc) + 1] def test_span_boundaries(doc): start = 1 end = 5 span = doc[start:end] for i in range(start, end): assert span[i - start] == doc[i] with pytest.raises(IndexError): span[-5] with pytest.raises(IndexError): span[5] empty_span_0 = doc[0:0] assert empty_span_0.text == "" assert empty_span_0.start == 0 assert empty_span_0.end == 0 assert empty_span_0.start_char == 0 assert empty_span_0.end_char == 0 empty_span_1 = doc[1:1] assert empty_span_1.text == "" assert empty_span_1.start == 1 assert empty_span_1.end == 1 assert empty_span_1.start_char == empty_span_1.end_char oob_span_start = doc[-len(doc) - 1 : -len(doc) - 10] assert oob_span_start.text == "" assert oob_span_start.start == 0 assert oob_span_start.end == 0 assert oob_span_start.start_char == 0 assert oob_span_start.end_char == 0 oob_span_end = doc[len(doc) + 1 : len(doc) + 10] assert oob_span_end.text == "" assert oob_span_end.start == len(doc) assert oob_span_end.end == len(doc) assert oob_span_end.start_char == len(doc.text) assert oob_span_end.end_char == len(doc.text) def test_span_lemma(doc): # span lemmas should have the same number of spaces as the span sp = doc[1:5] assert len(sp.text.split(" ")) == len(sp.lemma_.split(" ")) def test_sent(en_tokenizer): doc = en_tokenizer("Check span.sent raises error if doc is not sentencized.") span = doc[1:3] assert not span.doc.has_annotation("SENT_START") with pytest.raises(ValueError): span.sent def test_span_with_vectors(doc): ops = get_current_ops() prev_vectors = doc.vocab.vectors vectors = [ ("apple", ops.asarray([1, 2, 3])), ("orange", ops.asarray([-1, -2, -3])), ("And", ops.asarray([-1, -1, -1])), ("juice", ops.asarray([5, 5, 10])), ("pie", ops.asarray([7, 6.3, 8.9])), ] add_vecs_to_vocab(doc.vocab, vectors) # 0-length span assert_array_equal(ops.to_numpy(doc[0:0].vector), numpy.zeros((3,))) # longer span with no vector assert_array_equal(ops.to_numpy(doc[0:4].vector), numpy.zeros((3,))) # single-token span with vector assert_array_equal(ops.to_numpy(doc[10:11].vector), [-1, -1, -1]) doc.vocab.vectors = prev_vectors # fmt: off def test_span_comparison(doc): # Identical start, end, only differ in label and kb_id assert Span(doc, 0, 3) == Span(doc, 0, 3) assert Span(doc, 0, 3, "LABEL") == Span(doc, 0, 3, "LABEL") assert Span(doc, 0, 3, "LABEL", kb_id="KB_ID") == Span(doc, 0, 3, "LABEL", kb_id="KB_ID") assert Span(doc, 0, 3) != Span(doc, 0, 3, "LABEL") assert Span(doc, 0, 3) != Span(doc, 0, 3, "LABEL", kb_id="KB_ID") assert Span(doc, 0, 3, "LABEL") != Span(doc, 0, 3, "LABEL", kb_id="KB_ID") assert Span(doc, 0, 3) <= Span(doc, 0, 3) and Span(doc, 0, 3) >= Span(doc, 0, 3) assert Span(doc, 0, 3, "LABEL") <= Span(doc, 0, 3, "LABEL") and Span(doc, 0, 3, "LABEL") >= Span(doc, 0, 3, "LABEL") assert Span(doc, 0, 3, "LABEL", kb_id="KB_ID") <= Span(doc, 0, 3, "LABEL", kb_id="KB_ID") assert Span(doc, 0, 3, "LABEL", kb_id="KB_ID") >= Span(doc, 0, 3, "LABEL", kb_id="KB_ID") assert (Span(doc, 0, 3) < Span(doc, 0, 3, "", kb_id="KB_ID") < Span(doc, 0, 3, "LABEL") < Span(doc, 0, 3, "LABEL", kb_id="KB_ID")) assert (Span(doc, 0, 3) <= Span(doc, 0, 3, "", kb_id="KB_ID") <= Span(doc, 0, 3, "LABEL") <= Span(doc, 0, 3, "LABEL", kb_id="KB_ID")) assert (Span(doc, 0, 3, "LABEL", kb_id="KB_ID") > Span(doc, 0, 3, "LABEL") > Span(doc, 0, 3, "", kb_id="KB_ID") > Span(doc, 0, 3)) assert (Span(doc, 0, 3, "LABEL", kb_id="KB_ID") >= Span(doc, 0, 3, "LABEL") >= Span(doc, 0, 3, "", kb_id="KB_ID") >= Span(doc, 0, 3)) # Different end assert Span(doc, 0, 3, "LABEL", kb_id="KB_ID") < Span(doc, 0, 4, "LABEL", kb_id="KB_ID") assert Span(doc, 0, 3, "LABEL", kb_id="KB_ID") < Span(doc, 0, 4) assert Span(doc, 0, 3, "LABEL", kb_id="KB_ID") <= Span(doc, 0, 4) assert Span(doc, 0, 4) > Span(doc, 0, 3, "LABEL", kb_id="KB_ID") assert Span(doc, 0, 4) >= Span(doc, 0, 3, "LABEL", kb_id="KB_ID") # Different start assert Span(doc, 0, 3, "LABEL", kb_id="KB_ID") != Span(doc, 1, 3, "LABEL", kb_id="KB_ID") assert Span(doc, 0, 3, "LABEL", kb_id="KB_ID") < Span(doc, 1, 3) assert Span(doc, 0, 3, "LABEL", kb_id="KB_ID") <= Span(doc, 1, 3) assert Span(doc, 1, 3) > Span(doc, 0, 3, "LABEL", kb_id="KB_ID") assert Span(doc, 1, 3) >= Span(doc, 0, 3, "LABEL", kb_id="KB_ID") # Different start & different end assert Span(doc, 0, 4, "LABEL", kb_id="KB_ID") != Span(doc, 1, 3, "LABEL", kb_id="KB_ID") assert Span(doc, 0, 4, "LABEL", kb_id="KB_ID") < Span(doc, 1, 3) assert Span(doc, 0, 4, "LABEL", kb_id="KB_ID") <= Span(doc, 1, 3) assert Span(doc, 1, 3) > Span(doc, 0, 4, "LABEL", kb_id="KB_ID") assert Span(doc, 1, 3) >= Span(doc, 0, 4, "LABEL", kb_id="KB_ID") # Different id assert Span(doc, 1, 3, span_id="AAA") < Span(doc, 1, 3, span_id="BBB") # fmt: on @pytest.mark.parametrize( "start,end,expected_sentences,expected_sentences_with_hook", [ (0, 14, 3, 7), # Entire doc (3, 6, 2, 2), # Overlapping with 2 sentences (0, 4, 1, 2), # Beginning of the Doc. Full sentence (0, 3, 1, 2), # Beginning of the Doc. Part of a sentence (9, 14, 2, 3), # End of the Doc. Overlapping with 2 senteces (10, 14, 1, 2), # End of the Doc. Full sentence (11, 14, 1, 2), # End of the Doc. Partial sentence (0, 0, 1, 1), # Empty Span ], ) def test_span_sents(doc, start, end, expected_sentences, expected_sentences_with_hook): assert len(list(doc[start:end].sents)) == expected_sentences def user_hook(doc): return [doc[ii : ii + 2] for ii in range(0, len(doc), 2)] doc.user_hooks["sents"] = user_hook assert len(list(doc[start:end].sents)) == expected_sentences_with_hook doc.user_span_hooks["sents"] = lambda x: [x] assert list(doc[start:end].sents)[0] == doc[start:end] assert len(list(doc[start:end].sents)) == 1 def test_span_sents_not_parsed(doc_not_parsed): with pytest.raises(ValueError): list(Span(doc_not_parsed, 0, 3).sents) def test_span_group_copy(doc): doc.spans["test"] = [doc[0:1], doc[2:4]] assert len(doc.spans["test"]) == 2 doc_copy = doc.copy() # check that the spans were indeed copied assert len(doc_copy.spans["test"]) == 2 # add a new span to the original doc doc.spans["test"].append(doc[3:4]) assert len(doc.spans["test"]) == 3 # check that the copy spans were not modified and this is an isolated doc assert len(doc_copy.spans["test"]) == 2 def test_for_partial_ent_sents(): """Spans may be associated with multiple sentences. These .sents should always be complete, not partial, sentences, which this tests for. """ doc = Doc( English().vocab, words=["Mahler's", "Symphony", "No.", "8", "was", "beautiful."], sent_starts=[1, 0, 0, 1, 0, 0], ) doc.set_ents([Span(doc, 1, 4, "WORK")]) # The specified entity is associated with both sentences in this doc, so we expect all sentences in the doc to be # equal to the sentences referenced in ent.sents. for doc_sent, ent_sent in zip(doc.sents, doc.ents[0].sents): assert doc_sent == ent_sent def test_for_no_ent_sents(): """Span.sents() should set .sents correctly, even if Span in question is trailing and doesn't form a full sentence. """ doc = Doc( English().vocab, words=["This", "is", "a", "test.", "ENTITY"], sent_starts=[1, 0, 0, 0, 1], ) doc.set_ents([Span(doc, 4, 5, "WORK")]) sents = list(doc.ents[0].sents) assert len(sents) == 1 assert str(sents[0]) == str(doc.ents[0].sent) == "ENTITY"
25,422
33.63624
137
py
spaCy
spaCy-master/spacy/tests/doc/test_span_group.py
from random import Random from typing import List import pytest from spacy.matcher import Matcher from spacy.tokens import Doc, Span, SpanGroup from spacy.util import filter_spans @pytest.fixture def doc(en_tokenizer): doc = en_tokenizer("0 1 2 3 4 5 6") matcher = Matcher(en_tokenizer.vocab, validate=True) # fmt: off matcher.add("4", [[{}, {}, {}, {}]]) matcher.add("2", [[{}, {}, ]]) matcher.add("1", [[{}, ]]) # fmt: on matches = matcher(doc) spans = [] for match in matches: spans.append( Span(doc, match[1], match[2], en_tokenizer.vocab.strings[match[0]]) ) Random(42).shuffle(spans) doc.spans["SPANS"] = SpanGroup( doc, name="SPANS", attrs={"key": "value"}, spans=spans ) return doc @pytest.fixture def other_doc(en_tokenizer): doc = en_tokenizer("0 1 2 3 4 5 6") matcher = Matcher(en_tokenizer.vocab, validate=True) # fmt: off matcher.add("4", [[{}, {}, {}, {}]]) matcher.add("2", [[{}, {}, ]]) matcher.add("1", [[{}, ]]) # fmt: on matches = matcher(doc) spans = [] for match in matches: spans.append( Span(doc, match[1], match[2], en_tokenizer.vocab.strings[match[0]]) ) Random(42).shuffle(spans) doc.spans["SPANS"] = SpanGroup( doc, name="SPANS", attrs={"key": "value"}, spans=spans ) return doc @pytest.fixture def span_group(en_tokenizer): doc = en_tokenizer("0 1 2 3 4 5 6") matcher = Matcher(en_tokenizer.vocab, validate=True) # fmt: off matcher.add("4", [[{}, {}, {}, {}]]) matcher.add("2", [[{}, {}, ]]) matcher.add("1", [[{}, ]]) # fmt: on matches = matcher(doc) spans = [] for match in matches: spans.append( Span(doc, match[1], match[2], en_tokenizer.vocab.strings[match[0]]) ) Random(42).shuffle(spans) doc.spans["SPANS"] = SpanGroup( doc, name="SPANS", attrs={"key": "value"}, spans=spans ) def test_span_group_copy(doc): span_group = doc.spans["SPANS"] clone = span_group.copy() assert clone != span_group assert clone.name == span_group.name assert clone.attrs == span_group.attrs assert len(clone) == len(span_group) assert list(span_group) == list(clone) clone.name = "new_name" clone.attrs["key"] = "new_value" clone.append(Span(doc, 0, 6, "LABEL")) assert clone.name != span_group.name assert clone.attrs != span_group.attrs assert span_group.attrs["key"] == "value" assert list(span_group) != list(clone) # can't copy if the character offsets don't align to tokens doc2 = Doc(doc.vocab, words=[t.text + "x" for t in doc]) with pytest.raises(ValueError): span_group.copy(doc=doc2) # can copy with valid character offsets despite different tokenization doc3 = doc.copy() with doc3.retokenize() as retokenizer: retokenizer.merge(doc3[0:2]) retokenizer.merge(doc3[3:6]) span_group = SpanGroup(doc, spans=[doc[0:6], doc[3:6]]) for span1, span2 in zip(span_group, span_group.copy(doc=doc3)): assert span1.start_char == span2.start_char assert span1.end_char == span2.end_char def test_span_group_set_item(doc, other_doc): span_group = doc.spans["SPANS"] index = 5 span = span_group[index] span.label_ = "NEW LABEL" span.kb_id = doc.vocab.strings["KB_ID"] assert span_group[index].label != span.label assert span_group[index].kb_id != span.kb_id span_group[index] = span assert span_group[index].start == span.start assert span_group[index].end == span.end assert span_group[index].label == span.label assert span_group[index].kb_id == span.kb_id assert span_group[index] == span with pytest.raises(IndexError): span_group[-100] = span with pytest.raises(IndexError): span_group[100] = span span = Span(other_doc, 0, 2) with pytest.raises(ValueError): span_group[index] = span def test_span_group_has_overlap(doc): span_group = doc.spans["SPANS"] assert span_group.has_overlap def test_span_group_concat(doc, other_doc): span_group_1 = doc.spans["SPANS"] spans = [doc[0:5], doc[0:6]] span_group_2 = SpanGroup( doc, name="MORE_SPANS", attrs={"key": "new_value", "new_key": "new_value"}, spans=spans, ) span_group_3 = span_group_1._concat(span_group_2) assert span_group_3.name == span_group_1.name assert span_group_3.attrs == {"key": "value", "new_key": "new_value"} span_list_expected = list(span_group_1) + list(span_group_2) assert list(span_group_3) == list(span_list_expected) # Inplace span_list_expected = list(span_group_1) + list(span_group_2) span_group_3 = span_group_1._concat(span_group_2, inplace=True) assert span_group_3 == span_group_1 assert span_group_3.name == span_group_1.name assert span_group_3.attrs == {"key": "value", "new_key": "new_value"} assert list(span_group_3) == list(span_list_expected) span_group_2 = other_doc.spans["SPANS"] with pytest.raises(ValueError): span_group_1._concat(span_group_2) def test_span_doc_delitem(doc): span_group = doc.spans["SPANS"] length = len(span_group) index = 5 span = span_group[index] next_span = span_group[index + 1] del span_group[index] assert len(span_group) == length - 1 assert span_group[index] != span assert span_group[index] == next_span with pytest.raises(IndexError): del span_group[-100] with pytest.raises(IndexError): del span_group[100] def test_span_group_add(doc): span_group_1 = doc.spans["SPANS"] spans = [doc[0:5], doc[0:6]] span_group_2 = SpanGroup( doc, name="MORE_SPANS", attrs={"key": "new_value", "new_key": "new_value"}, spans=spans, ) span_group_3_expected = span_group_1._concat(span_group_2) span_group_3 = span_group_1 + span_group_2 assert len(span_group_3) == len(span_group_3_expected) assert span_group_3.attrs == {"key": "value", "new_key": "new_value"} assert list(span_group_3) == list(span_group_3_expected) def test_span_group_iadd(doc): span_group_1 = doc.spans["SPANS"].copy() spans = [doc[0:5], doc[0:6]] span_group_2 = SpanGroup( doc, name="MORE_SPANS", attrs={"key": "new_value", "new_key": "new_value"}, spans=spans, ) span_group_1_expected = span_group_1._concat(span_group_2) span_group_1 += span_group_2 assert len(span_group_1) == len(span_group_1_expected) assert span_group_1.attrs == {"key": "value", "new_key": "new_value"} assert list(span_group_1) == list(span_group_1_expected) span_group_1 = doc.spans["SPANS"].copy() span_group_1 += spans assert len(span_group_1) == len(span_group_1_expected) assert span_group_1.attrs == { "key": "value", } assert list(span_group_1) == list(span_group_1_expected) def test_span_group_extend(doc): span_group_1 = doc.spans["SPANS"].copy() spans = [doc[0:5], doc[0:6]] span_group_2 = SpanGroup( doc, name="MORE_SPANS", attrs={"key": "new_value", "new_key": "new_value"}, spans=spans, ) span_group_1_expected = span_group_1._concat(span_group_2) span_group_1.extend(span_group_2) assert len(span_group_1) == len(span_group_1_expected) assert span_group_1.attrs == {"key": "value", "new_key": "new_value"} assert list(span_group_1) == list(span_group_1_expected) span_group_1 = doc.spans["SPANS"] span_group_1.extend(spans) assert len(span_group_1) == len(span_group_1_expected) assert span_group_1.attrs == {"key": "value"} assert list(span_group_1) == list(span_group_1_expected) def test_span_group_dealloc(span_group): with pytest.raises(AttributeError): print(span_group.doc) @pytest.mark.issue(11975) def test_span_group_typing(doc: Doc): """Tests whether typing of `SpanGroup` as `Iterable[Span]`-like object is accepted by mypy.""" span_group: SpanGroup = doc.spans["SPANS"] spans: List[Span] = list(span_group) for i, span in enumerate(span_group): assert span == span_group[i] == spans[i] filter_spans(span_group) def test_span_group_init_doc(en_tokenizer): """Test that all spans must come from the specified doc.""" doc1 = en_tokenizer("a b c") doc2 = en_tokenizer("a b c") span_group = SpanGroup(doc1, spans=[doc1[0:1], doc1[1:2]]) with pytest.raises(ValueError): span_group = SpanGroup(doc1, spans=[doc1[0:1], doc2[1:2]])
8,667
29.846975
98
py
spaCy
spaCy-master/spacy/tests/doc/test_token_api.py
import numpy import pytest from spacy.attrs import IS_ALPHA, IS_DIGIT, IS_LOWER, IS_PUNCT, IS_STOP, IS_TITLE from spacy.symbols import VERB from spacy.tokens import Doc from spacy.training import Example from spacy.vocab import Vocab @pytest.fixture def doc(en_vocab): # fmt: off words = ["This", "is", "a", "sentence", ".", "This", "is", "another", "sentence", ".", "And", "a", "third", "."] heads = [1, 1, 3, 1, 1, 6, 6, 8, 6, 6, 10, 12, 10, 12] deps = ["nsubj", "ROOT", "det", "attr", "punct", "nsubj", "ROOT", "det", "attr", "punct", "ROOT", "det", "npadvmod", "punct"] # fmt: on return Doc(en_vocab, words=words, heads=heads, deps=deps) def test_doc_token_api_strings(en_vocab): words = ["Give", "it", "back", "!", "He", "pleaded", "."] pos = ["VERB", "PRON", "PART", "PUNCT", "PRON", "VERB", "PUNCT"] heads = [0, 0, 0, 0, 5, 5, 5] deps = ["ROOT", "dobj", "prt", "punct", "nsubj", "ROOT", "punct"] doc = Doc(en_vocab, words=words, pos=pos, heads=heads, deps=deps) assert doc[0].orth_ == "Give" assert doc[0].text == "Give" assert doc[0].text_with_ws == "Give " assert doc[0].lower_ == "give" assert doc[0].shape_ == "Xxxx" assert doc[0].prefix_ == "G" assert doc[0].suffix_ == "ive" assert doc[0].pos_ == "VERB" assert doc[0].dep_ == "ROOT" def test_doc_token_api_flags(en_tokenizer): text = "Give it back! He pleaded." tokens = en_tokenizer(text) assert tokens[0].check_flag(IS_ALPHA) assert not tokens[0].check_flag(IS_DIGIT) assert tokens[0].check_flag(IS_TITLE) assert tokens[1].check_flag(IS_LOWER) assert tokens[3].check_flag(IS_PUNCT) assert tokens[2].check_flag(IS_STOP) assert not tokens[5].check_flag(IS_STOP) # TODO: Test more of these, esp. if a bug is found @pytest.mark.parametrize("text", ["Give it back! He pleaded."]) def test_doc_token_api_prob_inherited_from_vocab(en_tokenizer, text): word = text.split()[0] en_tokenizer.vocab[word].prob = -1 tokens = en_tokenizer(text) assert tokens[0].prob != 0 @pytest.mark.parametrize("text", ["one two"]) def test_doc_token_api_str_builtin(en_tokenizer, text): tokens = en_tokenizer(text) assert str(tokens[0]) == text.split(" ")[0] assert str(tokens[1]) == text.split(" ")[1] def test_doc_token_api_is_properties(en_vocab): doc = Doc(en_vocab, words=["Hi", ",", "my", "email", "is", "[email protected]"]) assert doc[0].is_title assert doc[0].is_alpha assert not doc[0].is_digit assert doc[1].is_punct assert doc[3].is_ascii assert not doc[3].like_url assert doc[4].is_lower assert doc[5].like_email def test_doc_token_api_vectors(): vocab = Vocab() vocab.reset_vectors(width=2) vocab.set_vector("apples", vector=numpy.asarray([0.0, 2.0], dtype="f")) vocab.set_vector("oranges", vector=numpy.asarray([0.0, 1.0], dtype="f")) doc = Doc(vocab, words=["apples", "oranges", "oov"]) assert doc.has_vector assert doc[0].has_vector assert doc[1].has_vector assert not doc[2].has_vector apples_norm = (0 * 0 + 2 * 2) ** 0.5 oranges_norm = (0 * 0 + 1 * 1) ** 0.5 cosine = ((0 * 0) + (2 * 1)) / (apples_norm * oranges_norm) assert doc[0].similarity(doc[1]) == cosine def test_doc_token_api_ancestors(en_vocab): # the structure of this sentence depends on the English annotation scheme words = ["Yesterday", "I", "saw", "a", "dog", "that", "barked", "loudly", "."] heads = [2, 2, 2, 4, 2, 6, 4, 6, 2] deps = ["dep"] * len(heads) doc = Doc(en_vocab, words=words, heads=heads, deps=deps) assert [t.text for t in doc[6].ancestors] == ["dog", "saw"] assert [t.text for t in doc[1].ancestors] == ["saw"] assert [t.text for t in doc[2].ancestors] == [] assert doc[2].is_ancestor(doc[7]) assert not doc[6].is_ancestor(doc[2]) def test_doc_token_api_head_setter(en_vocab): words = ["Yesterday", "I", "saw", "a", "dog", "that", "barked", "loudly", "."] heads = [2, 2, 2, 4, 2, 6, 4, 6, 2] deps = ["dep"] * len(heads) doc = Doc(en_vocab, words=words, heads=heads, deps=deps) assert doc[6].n_lefts == 1 assert doc[6].n_rights == 1 assert doc[6].left_edge.i == 5 assert doc[6].right_edge.i == 7 assert doc[4].n_lefts == 1 assert doc[4].n_rights == 1 assert doc[4].left_edge.i == 3 assert doc[4].right_edge.i == 7 assert doc[3].n_lefts == 0 assert doc[3].n_rights == 0 assert doc[3].left_edge.i == 3 assert doc[3].right_edge.i == 3 assert doc[2].left_edge.i == 0 assert doc[2].right_edge.i == 8 doc[6].head = doc[3] assert doc[6].n_lefts == 1 assert doc[6].n_rights == 1 assert doc[6].left_edge.i == 5 assert doc[6].right_edge.i == 7 assert doc[3].n_lefts == 0 assert doc[3].n_rights == 1 assert doc[3].left_edge.i == 3 assert doc[3].right_edge.i == 7 assert doc[4].n_lefts == 1 assert doc[4].n_rights == 0 assert doc[4].left_edge.i == 3 assert doc[4].right_edge.i == 7 assert doc[2].left_edge.i == 0 assert doc[2].right_edge.i == 8 doc[0].head = doc[5] assert doc[5].left_edge.i == 0 assert doc[6].left_edge.i == 0 assert doc[3].left_edge.i == 0 assert doc[4].left_edge.i == 0 assert doc[2].left_edge.i == 0 # head token must be from the same document doc2 = Doc(en_vocab, words=words, heads=heads, deps=["dep"] * len(heads)) with pytest.raises(ValueError): doc[0].head = doc2[0] # test sentence starts when two sentences are joined # fmt: off words = ["This", "is", "one", "sentence", ".", "This", "is", "another", "sentence", "."] heads = [0, 0, 0, 0, 0, 5, 5, 5, 5, 5] # fmt: on doc = Doc(en_vocab, words=words, heads=heads, deps=["dep"] * len(heads)) # initially two sentences assert doc[0].is_sent_start assert doc[5].is_sent_start assert doc[0].left_edge == doc[0] assert doc[0].right_edge == doc[4] assert doc[5].left_edge == doc[5] assert doc[5].right_edge == doc[9] # modifying with a sentence doesn't change sent starts doc[2].head = doc[3] assert doc[0].is_sent_start assert doc[5].is_sent_start assert doc[0].left_edge == doc[0] assert doc[0].right_edge == doc[4] assert doc[5].left_edge == doc[5] assert doc[5].right_edge == doc[9] # attach the second sentence to the first, resulting in one sentence doc[5].head = doc[0] assert doc[0].is_sent_start assert not doc[5].is_sent_start assert doc[0].left_edge == doc[0] assert doc[0].right_edge == doc[9] def test_is_sent_start(en_tokenizer): doc = en_tokenizer("This is a sentence. This is another.") assert doc[5].is_sent_start is None doc[5].is_sent_start = True assert doc[5].is_sent_start is True assert len(list(doc.sents)) == 2 def test_is_sent_end(en_tokenizer): doc = en_tokenizer("This is a sentence. This is another.") assert doc[4].is_sent_end is None doc[5].is_sent_start = True assert doc[4].is_sent_end is True assert len(list(doc.sents)) == 2 def test_set_pos(): doc = Doc(Vocab(), words=["hello", "world"]) doc[0].pos_ = "NOUN" assert doc[0].pos_ == "NOUN" doc[1].pos = VERB assert doc[1].pos_ == "VERB" def test_set_invalid_pos(): doc = Doc(Vocab(), words=["hello", "world"]) with pytest.raises(ValueError): doc[0].pos_ = "blah" def test_tokens_sent(doc): """Test token.sent property""" assert len(list(doc.sents)) == 3 assert doc[1].sent.text == "This is a sentence ." assert doc[7].sent.text == "This is another sentence ." assert doc[1].sent.root.left_edge.text == "This" assert doc[7].sent.root.left_edge.text == "This" def test_token0_has_sent_start_true(): doc = Doc(Vocab(), words=["hello", "world"]) assert doc[0].is_sent_start is True assert doc[1].is_sent_start is None assert not doc.has_annotation("SENT_START") def test_tokenlast_has_sent_end_true(): doc = Doc(Vocab(), words=["hello", "world"]) assert doc[0].is_sent_end is None assert doc[1].is_sent_end is True assert not doc.has_annotation("SENT_START") def test_token_api_conjuncts_chain(en_vocab): words = ["The", "boy", "and", "the", "girl", "and", "the", "man", "went", "."] heads = [1, 8, 1, 4, 1, 4, 7, 4, 8, 8] deps = ["det", "nsubj", "cc", "det", "conj", "cc", "det", "conj", "ROOT", "punct"] doc = Doc(en_vocab, words=words, heads=heads, deps=deps) assert [w.text for w in doc[1].conjuncts] == ["girl", "man"] assert [w.text for w in doc[4].conjuncts] == ["boy", "man"] assert [w.text for w in doc[7].conjuncts] == ["boy", "girl"] def test_token_api_conjuncts_simple(en_vocab): words = ["They", "came", "and", "went", "."] heads = [1, 1, 1, 1, 3] deps = ["nsubj", "ROOT", "cc", "conj", "dep"] doc = Doc(en_vocab, words=words, heads=heads, deps=deps) assert [w.text for w in doc[1].conjuncts] == ["went"] assert [w.text for w in doc[3].conjuncts] == ["came"] def test_token_api_non_conjuncts(en_vocab): words = ["They", "came", "."] heads = [1, 1, 1] deps = ["nsubj", "ROOT", "punct"] doc = Doc(en_vocab, words=words, heads=heads, deps=deps) assert [w.text for w in doc[0].conjuncts] == [] assert [w.text for w in doc[1].conjuncts] == [] def test_missing_head_dep(en_vocab): """Check that the Doc constructor and Example.from_dict parse missing information the same""" heads = [1, 1, 1, 1, 2, None] # element 5 is missing deps = ["", "ROOT", "dobj", "cc", "conj", None] # element 0 and 5 are missing words = ["I", "like", "London", "and", "Berlin", "."] doc = Doc(en_vocab, words=words, heads=heads, deps=deps) pred_has_heads = [t.has_head() for t in doc] pred_has_deps = [t.has_dep() for t in doc] pred_heads = [t.head.i for t in doc] pred_deps = [t.dep_ for t in doc] pred_sent_starts = [t.is_sent_start for t in doc] assert pred_has_heads == [False, True, True, True, True, False] assert pred_has_deps == [False, True, True, True, True, False] assert pred_heads[1:5] == [1, 1, 1, 2] assert pred_deps[1:5] == ["ROOT", "dobj", "cc", "conj"] assert pred_sent_starts == [True, False, False, False, False, False] example = Example.from_dict(doc, {"heads": heads, "deps": deps}) ref_has_heads = [t.has_head() for t in example.reference] ref_has_deps = [t.has_dep() for t in example.reference] ref_heads = [t.head.i for t in example.reference] ref_deps = [t.dep_ for t in example.reference] ref_sent_starts = [t.is_sent_start for t in example.reference] assert ref_has_heads == pred_has_heads assert ref_has_deps == pred_has_heads assert ref_heads == pred_heads assert ref_deps == pred_deps assert ref_sent_starts == pred_sent_starts # check that the aligned parse preserves the missing information aligned_heads, aligned_deps = example.get_aligned_parse(projectivize=True) assert aligned_deps[0] == ref_deps[0] assert aligned_heads[0] == ref_heads[0] assert aligned_deps[5] == ref_deps[5] assert aligned_heads[5] == ref_heads[5]
11,165
36.59596
116
py
spaCy
spaCy-master/spacy/tests/doc/test_underscore.py
import pytest from mock import Mock from spacy.tokens import Doc, Span, Token from spacy.tokens.underscore import Underscore @pytest.fixture(scope="function", autouse=True) def clean_underscore(): # reset the Underscore object after the test, to avoid having state copied across tests yield Underscore.doc_extensions = {} Underscore.span_extensions = {} Underscore.token_extensions = {} def test_create_doc_underscore(): doc = Mock() doc.doc = doc uscore = Underscore(Underscore.doc_extensions, doc) assert uscore._doc is doc assert uscore._start is None assert uscore._end is None def test_doc_underscore_getattr_setattr(): doc = Mock() doc.doc = doc doc.user_data = {} Underscore.doc_extensions["hello"] = (False, None, None, None) doc._ = Underscore(Underscore.doc_extensions, doc) assert doc._.hello is False doc._.hello = True assert doc._.hello is True def test_create_span_underscore(): span = Mock(doc=Mock(), start=0, end=2) uscore = Underscore( Underscore.span_extensions, span, start=span.start, end=span.end ) assert uscore._doc is span.doc assert uscore._start is span.start assert uscore._end is span.end def test_span_underscore_getter_setter(): span = Mock(doc=Mock(), start=0, end=2) Underscore.span_extensions["hello"] = ( None, None, lambda s: (s.start, "hi"), lambda s, value: setattr(s, "start", value), ) span._ = Underscore( Underscore.span_extensions, span, start=span.start, end=span.end ) assert span._.hello == (0, "hi") span._.hello = 1 assert span._.hello == (1, "hi") def test_token_underscore_method(): token = Mock(doc=Mock(), idx=7, say_cheese=lambda token: "cheese") Underscore.token_extensions["hello"] = (None, token.say_cheese, None, None) token._ = Underscore(Underscore.token_extensions, token, start=token.idx) assert token._.hello() == "cheese" @pytest.mark.parametrize("obj", [Doc, Span, Token]) def test_doc_underscore_remove_extension(obj): ext_name = "to_be_removed" obj.set_extension(ext_name, default=False) assert obj.has_extension(ext_name) obj.remove_extension(ext_name) assert not obj.has_extension(ext_name) @pytest.mark.parametrize("obj", [Doc, Span, Token]) def test_underscore_raises_for_dup(obj): obj.set_extension("test", default=None) with pytest.raises(ValueError): obj.set_extension("test", default=None) @pytest.mark.parametrize( "invalid_kwargs", [ {"getter": None, "setter": lambda: None}, {"default": None, "method": lambda: None, "getter": lambda: None}, {"setter": lambda: None}, {"default": None, "method": lambda: None}, {"getter": True}, ], ) def test_underscore_raises_for_invalid(invalid_kwargs): invalid_kwargs["force"] = True with pytest.raises(ValueError): Doc.set_extension("test", **invalid_kwargs) @pytest.mark.parametrize( "valid_kwargs", [ {"getter": lambda: None}, {"getter": lambda: None, "setter": lambda: None}, {"default": "hello"}, {"default": None}, {"method": lambda: None}, ], ) def test_underscore_accepts_valid(valid_kwargs): valid_kwargs["force"] = True Doc.set_extension("test", **valid_kwargs) def test_underscore_mutable_defaults_list(en_vocab): """Test that mutable default arguments are handled correctly (see #2581).""" Doc.set_extension("mutable", default=[]) doc1 = Doc(en_vocab, words=["one"]) doc2 = Doc(en_vocab, words=["two"]) doc1._.mutable.append("foo") assert len(doc1._.mutable) == 1 assert doc1._.mutable[0] == "foo" assert len(doc2._.mutable) == 0 doc1._.mutable = ["bar", "baz"] doc1._.mutable.append("foo") assert len(doc1._.mutable) == 3 assert len(doc2._.mutable) == 0 def test_underscore_mutable_defaults_dict(en_vocab): """Test that mutable default arguments are handled correctly (see #2581).""" Token.set_extension("mutable", default={}) token1 = Doc(en_vocab, words=["one"])[0] token2 = Doc(en_vocab, words=["two"])[0] token1._.mutable["foo"] = "bar" assert len(token1._.mutable) == 1 assert token1._.mutable["foo"] == "bar" assert len(token2._.mutable) == 0 token1._.mutable["foo"] = "baz" assert len(token1._.mutable) == 1 assert token1._.mutable["foo"] == "baz" token1._.mutable["x"] = [] token1._.mutable["x"].append("y") assert len(token1._.mutable) == 2 assert token1._.mutable["x"] == ["y"] assert len(token2._.mutable) == 0 def test_underscore_dir(en_vocab): """Test that dir() correctly returns extension attributes. This enables things like tab-completion for the attributes in doc._.""" Doc.set_extension("test_dir", default=None) doc = Doc(en_vocab, words=["hello", "world"]) assert "_" in dir(doc) assert "test_dir" in dir(doc._) assert "test_dir" not in dir(doc[0]._) assert "test_dir" not in dir(doc[0:2]._) def test_underscore_docstring(en_vocab): """Test that docstrings are available for extension methods, even though they're partials.""" def test_method(doc, arg1=1, arg2=2): """I am a docstring""" return (arg1, arg2) Doc.set_extension("test_docstrings", method=test_method) doc = Doc(en_vocab, words=["hello", "world"]) assert test_method.__doc__ == "I am a docstring" assert doc._.test_docstrings.__doc__.rsplit(". ")[-1] == "I am a docstring"
5,565
30.805714
91
py
spaCy
spaCy-master/spacy/tests/lang/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/test_attrs.py
import pytest from spacy.attrs import ENT_IOB, IS_ALPHA, LEMMA, NORM, ORTH, intify_attrs from spacy.lang.en.stop_words import STOP_WORDS from spacy.lang.lex_attrs import ( is_ascii, is_currency, is_punct, is_stop, like_url, word_shape, ) @pytest.mark.parametrize("word", ["the"]) @pytest.mark.issue(1889) def test_issue1889(word): assert is_stop(word, STOP_WORDS) == is_stop(word.upper(), STOP_WORDS) @pytest.mark.parametrize("text", ["dog"]) def test_attrs_key(text): assert intify_attrs({"ORTH": text}) == {ORTH: text} assert intify_attrs({"NORM": text}) == {NORM: text} assert intify_attrs({"lemma": text}, strings_map={text: 10}) == {LEMMA: 10} @pytest.mark.parametrize("text", ["dog"]) def test_attrs_idempotence(text): int_attrs = intify_attrs({"lemma": text, "is_alpha": True}, strings_map={text: 10}) assert intify_attrs(int_attrs) == {LEMMA: 10, IS_ALPHA: True} @pytest.mark.parametrize("text", ["dog"]) def test_attrs_do_deprecated(text): int_attrs = intify_attrs( {"F": text, "is_alpha": True}, strings_map={text: 10}, _do_deprecated=True ) assert int_attrs == {ORTH: 10, IS_ALPHA: True} def test_attrs_ent_iob_intify(): int_attrs = intify_attrs({"ENT_IOB": ""}) assert int_attrs == {ENT_IOB: 0} int_attrs = intify_attrs({"ENT_IOB": "I"}) assert int_attrs == {ENT_IOB: 1} int_attrs = intify_attrs({"ENT_IOB": "O"}) assert int_attrs == {ENT_IOB: 2} int_attrs = intify_attrs({"ENT_IOB": "B"}) assert int_attrs == {ENT_IOB: 3} int_attrs = intify_attrs({ENT_IOB: ""}) assert int_attrs == {ENT_IOB: 0} int_attrs = intify_attrs({ENT_IOB: "I"}) assert int_attrs == {ENT_IOB: 1} int_attrs = intify_attrs({ENT_IOB: "O"}) assert int_attrs == {ENT_IOB: 2} int_attrs = intify_attrs({ENT_IOB: "B"}) assert int_attrs == {ENT_IOB: 3} with pytest.raises(ValueError): int_attrs = intify_attrs({"ENT_IOB": "XX"}) with pytest.raises(ValueError): int_attrs = intify_attrs({ENT_IOB: "XX"}) @pytest.mark.parametrize("text,match", [(",", True), (" ", False), ("a", False)]) def test_lex_attrs_is_punct(text, match): assert is_punct(text) == match @pytest.mark.parametrize("text,match", [(",", True), ("£", False), ("♥", False)]) def test_lex_attrs_is_ascii(text, match): assert is_ascii(text) == match @pytest.mark.parametrize( "text,match", [ ("$", True), ("£", True), ("♥", False), ("€", True), ("¥", True), ("¢", True), ("a", False), ("www.google.com", False), ("dog", False), ], ) def test_lex_attrs_is_currency(text, match): assert is_currency(text) == match @pytest.mark.parametrize( "text,match", [ ("www.google.com", True), ("google.com", True), ("sydney.com", True), ("1abc2def.org", True), ("http://stupid", True), ("www.hi", True), ("example.com/example", True), ("dog", False), ("1.2", False), ("1.a", False), ("hello.There", False), ], ) def test_lex_attrs_like_url(text, match): assert like_url(text) == match @pytest.mark.parametrize( "text,shape", [ ("Nasa", "Xxxx"), ("capitalized", "xxxx"), ("999999999", "dddd"), ("C3P0", "XdXd"), (",", ","), ("\n", "\n"), ("``,-", "``,-"), ], ) def test_lex_attrs_word_shape(text, shape): assert word_shape(text) == shape
3,522
24.904412
87
py
spaCy
spaCy-master/spacy/tests/lang/test_initialize.py
import pytest from spacy.util import get_lang_class # fmt: off # Only include languages with no external dependencies # excluded: ja, ko, th, vi, zh LANGUAGES = ["af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", "et", "eu", "fa", "fi", "fr", "ga", "gu", "he", "hi", "hr", "hu", "hy", "id", "is", "it", "kn", "ky", "lb", "lt", "lv", "mk", "ml", "mr", "nb", "ne", "nl", "pl", "pt", "ro", "ru", "sa", "si", "sk", "sl", "sq", "sr", "sv", "ta", "te", "ti", "tl", "tn", "tr", "tt", "uk", "ur", "xx", "yo"] # fmt: on @pytest.mark.parametrize("lang", LANGUAGES) def test_lang_initialize(lang, capfd): """Test that languages can be initialized.""" nlp = get_lang_class(lang)() # Check for stray print statements (see #3342) doc = nlp("test") # noqa: F841 captured = capfd.readouterr() assert not captured.out
922
35.92
78
py
spaCy
spaCy-master/spacy/tests/lang/test_lemmatizers.py
import pytest from spacy import registry from spacy.lookups import Lookups from spacy.util import get_lang_class # fmt: off # Only include languages with no external dependencies # excluded: ru, uk # excluded for custom tables: es, pl LANGUAGES = ["bn", "ca", "el", "en", "fa", "fr", "nb", "nl", "sv"] # fmt: on @pytest.mark.parametrize("lang", LANGUAGES) def test_lemmatizer_initialize(lang, capfd): @registry.misc("lemmatizer_init_lookups") def lemmatizer_init_lookups(): lookups = Lookups() lookups.add_table("lemma_lookup", {"cope": "cope", "x": "y"}) lookups.add_table("lemma_index", {"verb": ("cope", "cop")}) lookups.add_table("lemma_exc", {"verb": {"coping": ("cope",)}}) lookups.add_table("lemma_rules", {"verb": [["ing", ""]]}) return lookups lang_cls = get_lang_class(lang) # Test that languages can be initialized nlp = lang_cls() lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"}) assert not lemmatizer.lookups.tables nlp.config["initialize"]["components"]["lemmatizer"] = { "lookups": {"@misc": "lemmatizer_init_lookups"} } with pytest.raises(ValueError): nlp("x") nlp.initialize() assert lemmatizer.lookups.tables doc = nlp("x") # Check for stray print statements (see #3342) captured = capfd.readouterr() assert not captured.out assert doc[0].lemma_ == "y" # Test initialization by calling .initialize() directly nlp = lang_cls() lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"}) lemmatizer.initialize(lookups=lemmatizer_init_lookups()) assert nlp("x")[0].lemma_ == "y" # Test lookups config format for mode in ("rule", "lookup", "pos_lookup"): required, optional = lemmatizer.get_lookups_config(mode) assert isinstance(required, list) assert isinstance(optional, list)
1,910
33.745455
71
py
spaCy
spaCy-master/spacy/tests/lang/af/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/af/test_text.py
import pytest def test_long_text(af_tokenizer): # Excerpt: Universal Declaration of Human Rights; “'n” changed to “die” in first sentence text = """ Hierdie Universele Verklaring van Menseregte as die algemene standaard vir die verwesenliking deur alle mense en nasies, om te verseker dat elke individu en elke deel van die gemeenskap hierdie Verklaring in ag sal neem en deur opvoeding, respek vir hierdie regte en vryhede te bevorder, op nasionale en internasionale vlak, daarna sal strewe om die universele en effektiewe erkenning en agting van hierdie regte te verseker, nie net vir die mense van die Lidstate nie, maar ook vir die mense in die gebiede onder hul jurisdiksie. """ tokens = af_tokenizer(text) assert len(tokens) == 100 @pytest.mark.xfail def test_indefinite_article(af_tokenizer): text = "as 'n algemene standaard" tokens = af_tokenizer(text) assert len(tokens) == 4
923
39.173913
122
py
spaCy
spaCy-master/spacy/tests/lang/af/test_tokenizer.py
import pytest AF_BASIC_TOKENIZATION_TESTS = [ ( "Elkeen het die reg tot lewe, vryheid en sekuriteit van persoon.", [ "Elkeen", "het", "die", "reg", "tot", "lewe", ",", "vryheid", "en", "sekuriteit", "van", "persoon", ".", ], ), ] @pytest.mark.parametrize("text,expected_tokens", AF_BASIC_TOKENIZATION_TESTS) def test_af_tokenizer_basic(af_tokenizer, text, expected_tokens): tokens = af_tokenizer(text) token_list = [token.text for token in tokens if not token.is_space] assert expected_tokens == token_list
710
22.7
77
py
spaCy
spaCy-master/spacy/tests/lang/am/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/am/test_exception.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/am/test_text.py
import pytest def test_am_tokenizer_handles_long_text(am_tokenizer): text = """ሆሴ ሙጂካ በበጋ ወቅት በኦክስፎርድ ንግግር አንድያቀርቡ ሲጋበዙ ጭንቅላታቸው "ፈነዳ"። “እጅግ ጥንታዊ” የእንግሊዝኛ ተናጋሪ ዩኒቨርስቲ፣ በአስር ሺዎች የሚቆጠሩ ዩሮዎችን ለተማሪዎች በማስተማር የሚያስከፍለው እና ከማርጋሬት ታቸር እስከ ስቲቨን ሆኪንግ በአዳራሾቻቸው ውስጥ ንግግር ያደረጉበት የትምህርት ማዕከል፣ በሞንቴቪዴኦ በሚገኘው የመንግስት ትምህርት ቤት የሰለጠኑትን የ81 ዓመቱ አዛውንት አገልግሎት ጠየቁ።""" tokens = am_tokenizer(text) assert len(tokens) == 56 @pytest.mark.parametrize( "text,length", [ ("ሆሴ ሙጂካ ለምን ተመረጠ?", 5), ("“በፍፁም?”", 4), ("""አዎ! ሆዜ አርካዲዮ ቡንዲያ “እንሂድ” ሲል መለሰ።""", 11), ("እነሱ በግምት 10ኪ.ሜ. ሮጡ።", 7), ("እና ከዚያ ለምን...", 4), ], ) def test_am_tokenizer_handles_cnts(am_tokenizer, text, length): tokens = am_tokenizer(text) assert len(tokens) == length @pytest.mark.parametrize( "text,match", [ ("10", True), ("1", True), ("10.000", True), ("1000", True), ("999,0", True), ("አንድ", True), ("ሁለት", True), ("ትሪሊዮን", True), ("ውሻ", False), (",", False), ("1/2", True), ], ) def test_lex_attrs_like_number(am_tokenizer, text, match): tokens = am_tokenizer(text) assert len(tokens) == 1 assert tokens[0].like_num == match
1,259
23.230769
75
py
spaCy
spaCy-master/spacy/tests/lang/ar/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/ar/test_exceptions.py
import pytest @pytest.mark.parametrize("text", ["ق.م", "إلخ", "ص.ب", "ت."]) def test_ar_tokenizer_handles_abbr(ar_tokenizer, text): tokens = ar_tokenizer(text) assert len(tokens) == 1 def test_ar_tokenizer_handles_exc_in_text(ar_tokenizer): text = "تعود الكتابة الهيروغليفية إلى سنة 3200 ق.م" tokens = ar_tokenizer(text) assert len(tokens) == 7 assert tokens[6].text == "ق.م" def test_ar_tokenizer_handles_exc_in_text_2(ar_tokenizer): text = "يبلغ طول مضيق طارق 14كم " tokens = ar_tokenizer(text) assert len(tokens) == 6
563
25.857143
61
py
spaCy
spaCy-master/spacy/tests/lang/ar/test_text.py
def test_ar_tokenizer_handles_long_text(ar_tokenizer): text = """نجيب محفوظ مؤلف و كاتب روائي عربي، يعد من أهم الأدباء العرب خلال القرن العشرين. ولد نجيب محفوظ في مدينة القاهرة، حيث ترعرع و تلقى تعليمه الجامعي في جامعتها، فتمكن من نيل شهادة في الفلسفة. ألف محفوظ على مدار حياته الكثير من الأعمال الأدبية، و في مقدمتها ثلاثيته الشهيرة. و قد نجح في الحصول على جائزة نوبل للآداب، ليكون بذلك العربي الوحيد الذي فاز بها.""" tokens = ar_tokenizer(text) assert tokens[3].is_stop is True assert len(tokens) == 77
539
53
118
py
spaCy
spaCy-master/spacy/tests/lang/bg/test_text.py
import pytest @pytest.mark.parametrize( "word,match", [ ("10", True), ("1", True), ("10000", True), ("1.000", True), ("бројка", False), ("999,23", True), ("едно", True), ("две", True), ("цифра", False), ("единайсет", True), ("десет", True), ("сто", True), ("брой", False), ("хиляда", True), ("милион", True), (",", False), ("милиарда", True), ("билион", True), ], ) def test_bg_lex_attrs_like_number(bg_tokenizer, word, match): tokens = bg_tokenizer(word) assert len(tokens) == 1 assert tokens[0].like_num == match
685
21.129032
61
py
spaCy
spaCy-master/spacy/tests/lang/bg/test_tokenizer.py
import pytest def test_bg_tokenizer_handles_final_diacritics(bg_tokenizer): text = "Ня̀маше яйца̀. Ня̀маше яйца̀." tokens = bg_tokenizer(text) assert tokens[1].text == "яйца̀" assert tokens[2].text == "."
223
23.888889
61
py
spaCy
spaCy-master/spacy/tests/lang/bn/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/bn/test_tokenizer.py
import pytest # fmt: off TESTCASES = [ # Punctuation tests ("আমি বাংলায় গান গাই!", ["আমি", "বাংলায়", "গান", "গাই", "!"]), ("আমি বাংলায় কথা কই।", ["আমি", "বাংলায়", "কথা", "কই", "।"]), ("বসুন্ধরা জনসম্মুখে দোষ স্বীকার করলো না?", ["বসুন্ধরা", "জনসম্মুখে", "দোষ", "স্বীকার", "করলো", "না", "?"]), ("টাকা থাকলে কি না হয়!", ["টাকা", "থাকলে", "কি", "না", "হয়", "!"]), ("সরকারি বিশ্ববিদ্যালয়-এর ছাত্র নই বলেই কি এমন আচরণ?", ["সরকারি", "বিশ্ববিদ্যালয়", "-", "এর", "ছাত্র", "নই", "বলেই", "কি", "এমন", "আচরণ", "?"]), ('তারা বলে, "ওরা খামারের মুরগি।"', ["তারা", "বলে", ",", '"', "ওরা", "খামারের", "মুরগি", "।", '"']), ("৩*৩=৬?", ["৩", "*", "৩", "=", "৬", "?"]), ("কাঁঠাল-এর গন্ধই অন্যরকম", ["কাঁঠাল", "-", "এর", "গন্ধই", "অন্যরকম"]), # Abbreviations ("ডঃ খালেদ বললেন ঢাকায় ৩৫ ডিগ্রি সে.।", ["ডঃ", "খালেদ", "বললেন", "ঢাকায়", "৩৫", "ডিগ্রি", "সে.", "।"]), ] # fmt: on @pytest.mark.parametrize("text,expected_tokens", TESTCASES) def test_bn_tokenizer_handles_testcases(bn_tokenizer, text, expected_tokens): tokens = bn_tokenizer(text) token_list = [token.text for token in tokens if not token.is_space] assert expected_tokens == token_list def test_bn_tokenizer_handles_long_text(bn_tokenizer): text = """নর্থ সাউথ বিশ্ববিদ্যালয়ে সারাবছর কোন না কোন বিষয়ে গবেষণা চলতেই থাকে। \ অভিজ্ঞ ফ্যাকাল্টি মেম্বারগণ প্রায়ই শিক্ষার্থীদের নিয়ে বিভিন্ন গবেষণা প্রকল্পে কাজ করেন, \ যার মধ্যে রয়েছে রোবট থেকে মেশিন লার্নিং সিস্টেম ও আর্টিফিশিয়াল ইন্টেলিজেন্স। \ এসকল প্রকল্পে কাজ করার মাধ্যমে সংশ্লিষ্ট ক্ষেত্রে যথেষ্ঠ পরিমাণ স্পেশালাইজড হওয়া সম্ভব। \ আর গবেষণার কাজ তোমার ক্যারিয়ারকে ঠেলে নিয়ে যাবে অনেকখানি! \ কন্টেস্ট প্রোগ্রামার হও, গবেষক কিংবা ডেভেলপার - নর্থ সাউথ ইউনিভার্সিটিতে তোমার প্রতিভা বিকাশের সুযোগ রয়েছেই। \ নর্থ সাউথের অসাধারণ কমিউনিটিতে তোমাকে সাদর আমন্ত্রণ।""" tokens = bn_tokenizer(text) assert len(tokens) == 84
1,890
50.108108
148
py
spaCy
spaCy-master/spacy/tests/lang/ca/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/ca/test_exception.py
import pytest @pytest.mark.parametrize( "text,lemma", [("aprox.", "aproximadament"), ("pàg.", "pàgina"), ("p.ex.", "per exemple")], ) def test_ca_tokenizer_handles_abbr(ca_tokenizer, text, lemma): tokens = ca_tokenizer(text) assert len(tokens) == 1 def test_ca_tokenizer_handles_exc_in_text(ca_tokenizer): text = "La Dra. Puig viu a la pl. dels Til·lers." doc = ca_tokenizer(text) assert [t.text for t in doc] == [ "La", "Dra.", "Puig", "viu", "a", "la", "pl.", "d", "els", "Til·lers", ".", ]
617
20.310345
81
py
spaCy
spaCy-master/spacy/tests/lang/ca/test_prefix_suffix_infix.py
import pytest @pytest.mark.parametrize( "text,expected_tokens", [ ("d'un", ["d'", "un"]), ("s'ha", ["s'", "ha"]), ("del", ["d", "el"]), ("cantar-te", ["cantar", "-te"]), ("-hola", ["-", "hola"]), ], ) def test_contractions(ca_tokenizer, text, expected_tokens): """Test that the contractions are split into two tokens""" tokens = ca_tokenizer(text) assert len(tokens) == 2 assert [t.text for t in tokens] == expected_tokens
493
25
62
py
spaCy
spaCy-master/spacy/tests/lang/ca/test_text.py
"""Test that longer and mixed texts are tokenized correctly.""" import pytest def test_ca_tokenizer_handles_long_text(ca_tokenizer): text = """Una taula amb grans gerres de begudes i palles de coloraines com a reclam. Una carta cridanera amb ofertes de tapes, paelles i sangria. Un cambrer amb un somriure que convida a seure. La ubicació perfecta: el bell mig de la Rambla. Però és la una del migdia d’un dimecres de tardor i no hi ha ningú assegut a la terrassa del local. El dia és rúfol, però no fa fred i a la majoria de terrasses de la Rambla hi ha poca gent. La immensa majoria dels clients -tret d’alguna excepció com al restaurant Núria- són turistes. I la immensa majoria tenen entre mans una gerra de cervesa. Ens asseiem -fotògraf i periodista- en una terrassa buida.""" tokens = ca_tokenizer(text) assert len(tokens) == 146 @pytest.mark.parametrize( "text,length", [ ("Perquè va anar-hi?", 5), ("El cotxe dels veins.", 6), ("“Ah no?”", 5), ("""Sí! "Anem", va contestar el Joan Carles""", 11), ("Van córrer aprox. 10km", 5), ("Llavors perqué...", 3), ("Vull parlar-te'n demà al matí", 8), ("Vull explicar-t'ho demà al matí", 8), ], ) def test_ca_tokenizer_handles_cnts(ca_tokenizer, text, length): tokens = ca_tokenizer(text) assert len(tokens) == length @pytest.mark.parametrize( "text,match", [ ("10", True), ("1", True), ("10,000", True), ("10,00", True), ("999.0", True), ("un", True), ("dos", True), ("bilió", True), ("gos", False), (",", False), ("1/2", True), ], ) def test_ca_lex_attrs_like_number(ca_tokenizer, text, match): tokens = ca_tokenizer(text) assert len(tokens) == 1 assert tokens[0].like_num == match
1,872
32.446429
98
py
spaCy
spaCy-master/spacy/tests/lang/cs/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/cs/test_text.py
import pytest @pytest.mark.parametrize( "text,match", [ ("10", True), ("1", True), ("10.000", True), ("1000", True), ("999,0", True), ("devatenáct", True), ("osmdesát", True), ("kvadrilion", True), ("Pes", False), (",", False), ("1/2", True), ], ) def test_lex_attrs_like_number(cs_tokenizer, text, match): tokens = cs_tokenizer(text) assert len(tokens) == 1 assert tokens[0].like_num == match
508
20.208333
58
py
spaCy
spaCy-master/spacy/tests/lang/da/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/da/test_exceptions.py
import pytest @pytest.mark.parametrize("text", ["ca.", "m.a.o.", "Jan.", "Dec.", "kr.", "jf."]) def test_da_tokenizer_handles_abbr(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 1 @pytest.mark.parametrize("text", ["Jul.", "jul.", "Tor.", "Tors."]) def test_da_tokenizer_handles_ambiguous_abbr(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 2 @pytest.mark.parametrize("text", ["1.", "10.", "31."]) def test_da_tokenizer_handles_dates(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 1 def test_da_tokenizer_handles_exc_in_text(da_tokenizer): text = "Det er bl.a. ikke meningen" tokens = da_tokenizer(text) assert len(tokens) == 5 assert tokens[2].text == "bl.a." def test_da_tokenizer_handles_custom_base_exc(da_tokenizer): text = "Her er noget du kan kigge i." tokens = da_tokenizer(text) assert len(tokens) == 8 assert tokens[6].text == "i" assert tokens[7].text == "." @pytest.mark.parametrize( "text,n_tokens", [ ("Godt og/eller skidt", 3), ("Kør 4 km/t på vejen", 5), ("Det blæser 12 m/s.", 5), ("Det blæser 12 m/sek. på havnen", 6), ("Windows 8/Windows 10", 5), ("Billeten virker til bus/tog/metro", 8), ("26/02/2019", 1), ("Kristiansen c/o Madsen", 3), ("Sprogteknologi a/s", 2), ("De boede i A/B Bellevue", 5), # note: skipping due to weirdness in UD_Danish-DDT # ("Rotorhastigheden er 3400 o/m.", 5), ("Jeg købte billet t/r.", 5), ("Murerarbejdsmand m/k søges", 3), ("Netværket kører over TCP/IP", 4), ], ) def test_da_tokenizer_slash(da_tokenizer, text, n_tokens): tokens = da_tokenizer(text) assert len(tokens) == n_tokens
1,815
29.266667
81
py
spaCy
spaCy-master/spacy/tests/lang/da/test_noun_chunks.py
import pytest from spacy.tokens import Doc def test_noun_chunks_is_parsed(da_tokenizer): """Test that noun_chunks raises Value Error for 'da' language if Doc is not parsed. To check this test, we're constructing a Doc with a new Vocab here and forcing is_parsed to 'False' to make sure the noun chunks don't run. """ doc = da_tokenizer("Det er en sætning") with pytest.raises(ValueError): list(doc.noun_chunks) DA_NP_TEST_EXAMPLES = [ ( "Hun elsker at plukker frugt.", ["PRON", "VERB", "PART", "VERB", "NOUN", "PUNCT"], ["nsubj", "ROOT", "mark", "obj", "obj", "punct"], [1, 0, 1, -2, -1, -4], ["Hun", "frugt"], ), ( "Påfugle er de smukkeste fugle.", ["NOUN", "AUX", "DET", "ADJ", "NOUN", "PUNCT"], ["nsubj", "cop", "det", "amod", "ROOT", "punct"], [4, 3, 2, 1, 0, -1], ["Påfugle", "de smukkeste fugle"], ), ( "Rikke og Jacob Jensen glæder sig til en hyggelig skovtur", [ "PROPN", "CCONJ", "PROPN", "PROPN", "VERB", "PRON", "ADP", "DET", "ADJ", "NOUN", ], ["nsubj", "cc", "conj", "flat", "ROOT", "obj", "case", "det", "amod", "obl"], [4, 1, -2, -1, 0, -1, 3, 2, 1, -5], ["Rikke", "Jacob Jensen", "sig", "en hyggelig skovtur"], ), ] @pytest.mark.parametrize( "text,pos,deps,heads,expected_noun_chunks", DA_NP_TEST_EXAMPLES ) def test_da_noun_chunks(da_tokenizer, text, pos, deps, heads, expected_noun_chunks): tokens = da_tokenizer(text) assert len(heads) == len(pos) doc = Doc( tokens.vocab, words=[t.text for t in tokens], heads=[head + i for i, head in enumerate(heads)], deps=deps, pos=pos, ) noun_chunks = list(doc.noun_chunks) assert len(noun_chunks) == len(expected_noun_chunks) for i, np in enumerate(noun_chunks): assert np.text == expected_noun_chunks[i]
2,060
27.625
87
py
spaCy
spaCy-master/spacy/tests/lang/da/test_prefix_suffix_infix.py
import pytest @pytest.mark.parametrize("text", ["(under)"]) def test_da_tokenizer_splits_no_special(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["ta'r", "Søren's", "Lars'"]) def test_da_tokenizer_handles_no_punct(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 1 @pytest.mark.parametrize("text", ["(ta'r"]) def test_da_tokenizer_splits_prefix_punct(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 2 assert tokens[0].text == "(" assert tokens[1].text == "ta'r" @pytest.mark.parametrize("text", ["ta'r)"]) def test_da_tokenizer_splits_suffix_punct(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 2 assert tokens[0].text == "ta'r" assert tokens[1].text == ")" @pytest.mark.parametrize( "text,expected", [("(ta'r)", ["(", "ta'r", ")"]), ("'ta'r'", ["'", "ta'r", "'"])] ) def test_da_tokenizer_splits_even_wrap(da_tokenizer, text, expected): tokens = da_tokenizer(text) assert len(tokens) == len(expected) assert [t.text for t in tokens] == expected @pytest.mark.parametrize("text", ["(ta'r?)"]) def test_da_tokenizer_splits_uneven_wrap(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 4 assert tokens[0].text == "(" assert tokens[1].text == "ta'r" assert tokens[2].text == "?" assert tokens[3].text == ")" @pytest.mark.parametrize( "text,expected", [("f.eks.", ["f.eks."]), ("fe.", ["fe", "."]), ("(f.eks.", ["(", "f.eks."])], ) def test_da_tokenizer_splits_prefix_interact(da_tokenizer, text, expected): tokens = da_tokenizer(text) assert len(tokens) == len(expected) assert [t.text for t in tokens] == expected @pytest.mark.parametrize("text", ["f.eks.)"]) def test_da_tokenizer_splits_suffix_interact(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 2 assert tokens[0].text == "f.eks." assert tokens[1].text == ")" @pytest.mark.parametrize("text", ["(f.eks.)"]) def test_da_tokenizer_splits_even_wrap_interact(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 3 assert tokens[0].text == "(" assert tokens[1].text == "f.eks." assert tokens[2].text == ")" @pytest.mark.parametrize("text", ["(f.eks.?)"]) def test_da_tokenizer_splits_uneven_wrap_interact(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 4 assert tokens[0].text == "(" assert tokens[1].text == "f.eks." assert tokens[2].text == "?" assert tokens[3].text == ")" @pytest.mark.parametrize("text", ["0,1-13,5", "0,0-0,1", "103,27-300", "1/2-3/4"]) def test_da_tokenizer_handles_numeric_range(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 1 @pytest.mark.parametrize("text", ["sort.Gul", "Hej.Verden"]) def test_da_tokenizer_splits_period_infix(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["Hej,Verden", "en,to"]) def test_da_tokenizer_splits_comma_infix(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 3 assert tokens[0].text == text.split(",")[0] assert tokens[1].text == "," assert tokens[2].text == text.split(",")[1] @pytest.mark.parametrize("text", ["sort...Gul", "sort...gul"]) def test_da_tokenizer_splits_ellipsis_infix(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize( "text", ["gå-på-mod", "4-hjulstræk", "100-Pfennig-frimærke", "TV-2-spots", "trofæ-vaeggen"], ) def test_da_tokenizer_keeps_hyphens(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 1 def test_da_tokenizer_splits_double_hyphen_infix(da_tokenizer): tokens = da_tokenizer( "Mange regler--eksempelvis bindestregs-reglerne--er komplicerede." ) assert len(tokens) == 9 assert tokens[0].text == "Mange" assert tokens[1].text == "regler" assert tokens[2].text == "--" assert tokens[3].text == "eksempelvis" assert tokens[4].text == "bindestregs-reglerne" assert tokens[5].text == "--" assert tokens[6].text == "er" assert tokens[7].text == "komplicerede" def test_da_tokenizer_handles_posessives_and_contractions(da_tokenizer): tokens = da_tokenizer( "'DBA's, Lars' og Liz' bil sku' sgu' ik' ha' en bule, det ka' han ik' li' mere', sagde hun." ) assert len(tokens) == 25 assert tokens[0].text == "'" assert tokens[1].text == "DBA's" assert tokens[2].text == "," assert tokens[3].text == "Lars'" assert tokens[4].text == "og" assert tokens[5].text == "Liz'" assert tokens[6].text == "bil" assert tokens[7].text == "sku'" assert tokens[8].text == "sgu'" assert tokens[9].text == "ik'" assert tokens[10].text == "ha'" assert tokens[11].text == "en" assert tokens[12].text == "bule" assert tokens[13].text == "," assert tokens[14].text == "det" assert tokens[15].text == "ka'" assert tokens[16].text == "han" assert tokens[17].text == "ik'" assert tokens[18].text == "li'" assert tokens[19].text == "mere" assert tokens[20].text == "'" assert tokens[21].text == "," assert tokens[22].text == "sagde" assert tokens[23].text == "hun" assert tokens[24].text == "."
5,417
31.059172
100
py
spaCy
spaCy-master/spacy/tests/lang/da/test_text.py
import pytest from spacy.lang.da.lex_attrs import like_num def test_da_tokenizer_handles_long_text(da_tokenizer): text = """Der var så dejligt ude på landet. Det var sommer, kornet stod gult, havren grøn, høet var rejst i stakke nede i de grønne enge, og der gik storken på sine lange, røde ben og snakkede ægyptisk, for det sprog havde han lært af sin moder. Rundt om ager og eng var der store skove, og midt i skovene dybe søer; jo, der var rigtignok dejligt derude på landet!""" tokens = da_tokenizer(text) assert len(tokens) == 84 @pytest.mark.parametrize( "text,match", [ ("10", True), ("1", True), ("10.000", True), ("10.00", True), ("999,0", True), ("en", True), ("treoghalvfemsindstyvende", True), ("hundrede", True), ("hund", False), (",", False), ("1/2", True), ], ) def test_lex_attrs_like_number(da_tokenizer, text, match): tokens = da_tokenizer(text) assert len(tokens) == 1 assert tokens[0].like_num == match @pytest.mark.parametrize("word", ["elleve", "første"]) def test_da_lex_attrs_capitals(word): assert like_num(word) assert like_num(word.upper())
1,208
27.785714
121
py
spaCy
spaCy-master/spacy/tests/lang/de/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/de/test_exceptions.py
import pytest @pytest.mark.parametrize("text", ["auf'm", "du's", "über'm", "wir's"]) def test_de_tokenizer_splits_contractions(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 2 @pytest.mark.parametrize("text", ["z.B.", "d.h.", "Jan.", "Dez.", "Chr."]) def test_de_tokenizer_handles_abbr(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 1 def test_de_tokenizer_handles_exc_in_text(de_tokenizer): text = "Ich bin z.Zt. im Urlaub." tokens = de_tokenizer(text) assert len(tokens) == 6 assert tokens[2].text == "z.Zt."
597
27.47619
74
py
spaCy
spaCy-master/spacy/tests/lang/de/test_noun_chunks.py
import pytest def test_noun_chunks_is_parsed_de(de_tokenizer): """Test that noun_chunks raises Value Error for 'de' language if Doc is not parsed.""" doc = de_tokenizer("Er lag auf seinem") with pytest.raises(ValueError): list(doc.noun_chunks)
266
28.666667
90
py
spaCy
spaCy-master/spacy/tests/lang/de/test_parser.py
from spacy.tokens import Doc def test_de_parser_noun_chunks_standard_de(de_vocab): words = ["Eine", "Tasse", "steht", "auf", "dem", "Tisch", "."] heads = [1, 2, 2, 2, 5, 3, 2] pos = ["DET", "NOUN", "VERB", "ADP", "DET", "NOUN", "PUNCT"] deps = ["nk", "sb", "ROOT", "mo", "nk", "nk", "punct"] doc = Doc(de_vocab, words=words, pos=pos, deps=deps, heads=heads) chunks = list(doc.noun_chunks) assert len(chunks) == 2 assert chunks[0].text_with_ws == "Eine Tasse " assert chunks[1].text_with_ws == "dem Tisch " def test_de_extended_chunk(de_vocab): # fmt: off words = ["Die", "Sängerin", "singt", "mit", "einer", "Tasse", "Kaffee", "Arien", "."] heads = [1, 2, 2, 2, 5, 3, 5, 2, 2] pos = ["DET", "NOUN", "VERB", "ADP", "DET", "NOUN", "NOUN", "NOUN", "PUNCT"] deps = ["nk", "sb", "ROOT", "mo", "nk", "nk", "nk", "oa", "punct"] # fmt: on doc = Doc(de_vocab, words=words, pos=pos, deps=deps, heads=heads) chunks = list(doc.noun_chunks) assert len(chunks) == 3 assert chunks[0].text_with_ws == "Die Sängerin " assert chunks[1].text_with_ws == "einer Tasse Kaffee " assert chunks[2].text_with_ws == "Arien "
1,186
39.931034
89
py
spaCy
spaCy-master/spacy/tests/lang/de/test_prefix_suffix_infix.py
import pytest @pytest.mark.parametrize("text", ["(unter)"]) def test_de_tokenizer_splits_no_special(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["unter'm"]) def test_de_tokenizer_splits_no_punct(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 2 @pytest.mark.parametrize("text", ["(unter'm"]) def test_de_tokenizer_splits_prefix_punct(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["unter'm)"]) def test_de_tokenizer_splits_suffix_punct(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["(unter'm)"]) def test_de_tokenizer_splits_even_wrap(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 4 @pytest.mark.parametrize("text", ["(unter'm?)"]) def test_de_tokenizer_splits_uneven_wrap(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 5 @pytest.mark.parametrize("text,length", [("z.B.", 1), ("zb.", 2), ("(z.B.", 2)]) def test_de_tokenizer_splits_prefix_interact(de_tokenizer, text, length): tokens = de_tokenizer(text) assert len(tokens) == length @pytest.mark.parametrize("text", ["z.B.)"]) def test_de_tokenizer_splits_suffix_interact(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 2 @pytest.mark.parametrize("text", ["(z.B.)"]) def test_de_tokenizer_splits_even_wrap_interact(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["(z.B.?)"]) def test_de_tokenizer_splits_uneven_wrap_interact(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 4 @pytest.mark.parametrize("text", ["0.1-13.5", "0.0-0.1", "103.27-300"]) def test_de_tokenizer_splits_numeric_range(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["blau.Rot", "Hallo.Welt"]) def test_de_tokenizer_splits_period_infix(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["Hallo,Welt", "eins,zwei"]) def test_de_tokenizer_splits_comma_infix(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 3 assert tokens[0].text == text.split(",")[0] assert tokens[1].text == "," assert tokens[2].text == text.split(",")[1] @pytest.mark.parametrize("text", ["blau...Rot", "blau...rot"]) def test_de_tokenizer_splits_ellipsis_infix(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["Islam-Konferenz", "Ost-West-Konflikt"]) def test_de_tokenizer_keeps_hyphens(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 1 def test_de_tokenizer_splits_double_hyphen_infix(de_tokenizer): tokens = de_tokenizer("Viele Regeln--wie die Bindestrich-Regeln--sind kompliziert.") assert len(tokens) == 10 assert tokens[0].text == "Viele" assert tokens[1].text == "Regeln" assert tokens[2].text == "--" assert tokens[3].text == "wie" assert tokens[4].text == "die" assert tokens[5].text == "Bindestrich-Regeln" assert tokens[6].text == "--" assert tokens[7].text == "sind" assert tokens[8].text == "kompliziert"
3,395
30.155963
88
py
spaCy
spaCy-master/spacy/tests/lang/de/test_text.py
import pytest def test_de_tokenizer_handles_long_text(de_tokenizer): text = """Die Verwandlung Als Gregor Samsa eines Morgens aus unruhigen Träumen erwachte, fand er sich in seinem Bett zu einem ungeheueren Ungeziefer verwandelt. Er lag auf seinem panzerartig harten Rücken und sah, wenn er den Kopf ein wenig hob, seinen gewölbten, braunen, von bogenförmigen Versteifungen geteilten Bauch, auf dessen Höhe sich die Bettdecke, zum gänzlichen Niedergleiten bereit, kaum noch erhalten konnte. Seine vielen, im Vergleich zu seinem sonstigen Umfang kläglich dünnen Beine flimmerten ihm hilflos vor den Augen. »Was ist mit mir geschehen?«, dachte er.""" tokens = de_tokenizer(text) assert len(tokens) == 109 @pytest.mark.parametrize( "text", [ "Donaudampfschifffahrtsgesellschaftskapitänsanwärterposten", "Rindfleischetikettierungsüberwachungsaufgabenübertragungsgesetz", "Kraftfahrzeug-Haftpflichtversicherung", "Vakuum-Mittelfrequenz-Induktionsofen", ], ) def test_de_tokenizer_handles_long_words(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 1 @pytest.mark.parametrize( "text,length", [ ("»Was ist mit mir geschehen?«, dachte er.", 12), ("“Dies frühzeitige Aufstehen”, dachte er, “macht einen ganz blödsinnig. ", 15), ], ) def test_de_tokenizer_handles_examples(de_tokenizer, text, length): tokens = de_tokenizer(text) assert len(tokens) == length
1,481
31.217391
88
py
spaCy
spaCy-master/spacy/tests/lang/dsb/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/dsb/test_text.py
import pytest @pytest.mark.parametrize( "text,match", [ ("10", True), ("1", True), ("10,000", True), ("10,00", True), ("jadno", True), ("dwanassćo", True), ("milion", True), ("sto", True), ("ceła", False), ("kopica", False), ("narěcow", False), (",", False), ("1/2", True), ], ) def test_lex_attrs_like_number(dsb_tokenizer, text, match): tokens = dsb_tokenizer(text) assert len(tokens) == 1 assert tokens[0].like_num == match
557
20.461538
59
py
spaCy
spaCy-master/spacy/tests/lang/dsb/test_tokenizer.py
import pytest DSB_BASIC_TOKENIZATION_TESTS = [ ( "Ale eksistěrujo mimo togo ceła kopica narěcow, ako na pśikład slěpjańska.", [ "Ale", "eksistěrujo", "mimo", "togo", "ceła", "kopica", "narěcow", ",", "ako", "na", "pśikład", "slěpjańska", ".", ], ), ] @pytest.mark.parametrize("text,expected_tokens", DSB_BASIC_TOKENIZATION_TESTS) def test_dsb_tokenizer_basic(dsb_tokenizer, text, expected_tokens): tokens = dsb_tokenizer(text) token_list = [token.text for token in tokens if not token.is_space] assert expected_tokens == token_list
735
23.533333
84
py
spaCy
spaCy-master/spacy/tests/lang/el/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/el/test_exception.py
import pytest @pytest.mark.parametrize("text", ["αριθ.", "τρισ.", "δισ.", "σελ."]) def test_el_tokenizer_handles_abbr(el_tokenizer, text): tokens = el_tokenizer(text) assert len(tokens) == 1 def test_el_tokenizer_handles_exc_in_text(el_tokenizer): text = "Στα 14 τρισ. δολάρια το κόστος από την άνοδο της στάθμης της θάλασσας." tokens = el_tokenizer(text) assert len(tokens) == 14 assert tokens[2].text == "τρισ."
442
28.533333
83
py
spaCy
spaCy-master/spacy/tests/lang/el/test_noun_chunks.py
import pytest def test_noun_chunks_is_parsed_el(el_tokenizer): """Test that noun_chunks raises Value Error for 'el' language if Doc is not parsed.""" doc = el_tokenizer("είναι χώρα της νοτιοανατολικής") with pytest.raises(ValueError): list(doc.noun_chunks)
279
30.111111
90
py
spaCy
spaCy-master/spacy/tests/lang/el/test_text.py
import pytest def test_el_tokenizer_handles_long_text(el_tokenizer): text = """Η Ελλάδα (παλαιότερα Ελλάς), επίσημα γνωστή ως Ελληνική Δημοκρατία,\ είναι χώρα της νοτιοανατολικής Ευρώπης στο νοτιότερο άκρο της Βαλκανικής χερσονήσου.\ Συνορεύει στα βορειοδυτικά με την Αλβανία, στα βόρεια με την πρώην\ Γιουγκοσλαβική Δημοκρατία της Μακεδονίας και τη Βουλγαρία και στα βορειοανατολικά με την Τουρκία.""" tokens = el_tokenizer(text) assert len(tokens) == 54 @pytest.mark.parametrize( "text,length", [ ("Διοικητικά η Ελλάδα διαιρείται σε 13 Περιφέρειες.", 8), ("Η εκπαίδευση στην Ελλάδα χωρίζεται κυρίως σε τρία επίπεδα.", 10), ( "Η Ελλάδα είναι μία από τις χώρες της Ευρωπαϊκής Ένωσης (ΕΕ) που διαθέτει σηµαντικό ορυκτό πλούτο.", 19, ), ( "Η ναυτιλία αποτέλεσε ένα σημαντικό στοιχείο της Ελληνικής οικονομικής δραστηριότητας από τα αρχαία χρόνια.", 15, ), ("Η Ελλάδα είναι μέλος σε αρκετούς διεθνείς οργανισμούς.", 9), ], ) def test_el_tokenizer_handles_cnts(el_tokenizer, text, length): tokens = el_tokenizer(text) assert len(tokens) == length
1,195
36.375
121
py
spaCy
spaCy-master/spacy/tests/lang/en/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/en/test_customized_tokenizer.py
import re import pytest from spacy.lang.en import English from spacy.tokenizer import Tokenizer from spacy.util import compile_infix_regex, compile_prefix_regex, compile_suffix_regex @pytest.fixture def custom_en_tokenizer(en_vocab): prefix_re = compile_prefix_regex(English.Defaults.prefixes) suffix_re = compile_suffix_regex(English.Defaults.suffixes) custom_infixes = [ r"\.\.\.+", r"(?<=[0-9])-(?=[0-9])", r"[0-9]+(,[0-9]+)+", r"[\[\]!&:,()\*—–\/-]", ] infix_re = compile_infix_regex(custom_infixes) token_match_re = re.compile("a-b") return Tokenizer( en_vocab, English.Defaults.tokenizer_exceptions, prefix_re.search, suffix_re.search, infix_re.finditer, token_match=token_match_re.match, ) def test_en_customized_tokenizer_handles_infixes(custom_en_tokenizer): sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion." context = [word.text for word in custom_en_tokenizer(sentence)] assert context == [ "The", "8", "and", "10", "-", "county", "definitions", "are", "not", "used", "for", "the", "greater", "Southern", "California", "Megaregion", ".", ] # the trailing '-' may cause Assertion Error sentence = "The 8- and 10-county definitions are not used for the greater Southern California Megaregion." context = [word.text for word in custom_en_tokenizer(sentence)] assert context == [ "The", "8", "-", "and", "10", "-", "county", "definitions", "are", "not", "used", "for", "the", "greater", "Southern", "California", "Megaregion", ".", ] def test_en_customized_tokenizer_handles_token_match(custom_en_tokenizer): sentence = "The 8 and 10-county definitions a-b not used for the greater Southern California Megaregion." context = [word.text for word in custom_en_tokenizer(sentence)] assert context == [ "The", "8", "and", "10", "-", "county", "definitions", "a-b", "not", "used", "for", "the", "greater", "Southern", "California", "Megaregion", ".", ] def test_en_customized_tokenizer_handles_rules(custom_en_tokenizer): sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion. :)" context = [word.text for word in custom_en_tokenizer(sentence)] assert context == [ "The", "8", "and", "10", "-", "county", "definitions", "are", "not", "used", "for", "the", "greater", "Southern", "California", "Megaregion", ".", ":)", ] def test_en_customized_tokenizer_handles_rules_property(custom_en_tokenizer): sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion. :)" rules = custom_en_tokenizer.rules del rules[":)"] custom_en_tokenizer.rules = rules context = [word.text for word in custom_en_tokenizer(sentence)] assert context == [ "The", "8", "and", "10", "-", "county", "definitions", "are", "not", "used", "for", "the", "greater", "Southern", "California", "Megaregion", ".", ":", ")", ]
3,774
23.354839
112
py
spaCy
spaCy-master/spacy/tests/lang/en/test_exceptions.py
import pytest def test_en_tokenizer_handles_basic_contraction(en_tokenizer): text = "don't giggle" tokens = en_tokenizer(text) assert len(tokens) == 3 assert tokens[1].text == "n't" text = "i said don't!" tokens = en_tokenizer(text) assert len(tokens) == 5 assert tokens[4].text == "!" @pytest.mark.parametrize("text", ["`ain't", """"isn't""", "can't!"]) def test_en_tokenizer_handles_basic_contraction_punct(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize( "text_poss,text", [("Robin's", "Robin"), ("Alexis's", "Alexis")] ) def test_en_tokenizer_handles_poss_contraction(en_tokenizer, text_poss, text): tokens = en_tokenizer(text_poss) assert len(tokens) == 2 assert tokens[0].text == text assert tokens[1].text == "'s" @pytest.mark.parametrize("text", ["schools'", "Alexis'"]) def test_en_tokenizer_splits_trailing_apos(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 2 assert tokens[0].text == text.split("'")[0] assert tokens[1].text == "'" @pytest.mark.parametrize("text", ["'em", "nothin'", "ol'"]) def test_en_tokenizer_doesnt_split_apos_exc(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 1 assert tokens[0].text == text @pytest.mark.parametrize("text", ["we'll", "You'll", "there'll", "this'll", "those'll"]) def test_en_tokenizer_handles_ll_contraction(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 2 assert tokens[0].text == text.split("'")[0] assert tokens[1].text == "'ll" @pytest.mark.parametrize( "text_lower,text_title", [("can't", "Can't"), ("ain't", "Ain't")] ) def test_en_tokenizer_handles_capitalization(en_tokenizer, text_lower, text_title): tokens_lower = en_tokenizer(text_lower) tokens_title = en_tokenizer(text_title) assert tokens_title[0].text == tokens_lower[0].text.title() assert tokens_lower[0].text == tokens_title[0].text.lower() assert tokens_lower[1].text == tokens_title[1].text @pytest.mark.parametrize("pron", ["I", "You", "He", "She", "It", "We", "They"]) @pytest.mark.parametrize("contraction", ["'ll", "'d"]) def test_en_tokenizer_keeps_title_case(en_tokenizer, pron, contraction): tokens = en_tokenizer(pron + contraction) assert tokens[0].text == pron assert tokens[1].text == contraction @pytest.mark.parametrize("exc", ["Ill", "ill", "Hell", "hell", "Well", "well"]) def test_en_tokenizer_excludes_ambiguous(en_tokenizer, exc): tokens = en_tokenizer(exc) assert len(tokens) == 1 @pytest.mark.parametrize( "wo_punct,w_punct", [("We've", "`We've"), ("couldn't", "couldn't)")] ) def test_en_tokenizer_splits_defined_punct(en_tokenizer, wo_punct, w_punct): tokens = en_tokenizer(wo_punct) assert len(tokens) == 2 tokens = en_tokenizer(w_punct) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["e.g.", "p.m.", "Jan.", "Dec.", "Inc."]) def test_en_tokenizer_handles_abbr(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 1 def test_en_tokenizer_handles_exc_in_text(en_tokenizer): text = "It's mediocre i.e. bad." tokens = en_tokenizer(text) assert len(tokens) == 6 assert tokens[3].text == "i.e." @pytest.mark.parametrize("text", ["1am", "12a.m.", "11p.m.", "4pm"]) def test_en_tokenizer_handles_times(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 2 @pytest.mark.parametrize( "text,norms", [ ("I'm", ["i", "am"]), ("shan't", ["shall", "not"]), ( "Many factors cause cancer 'cause it is complex", ["many", "factors", "cause", "cancer", "because", "it", "is", "complex"], ), ], ) def test_en_tokenizer_norm_exceptions(en_tokenizer, text, norms): tokens = en_tokenizer(text) assert [token.norm_ for token in tokens] == norms @pytest.mark.parametrize("text,norm", [("Jan.", "January"), ("'cuz", "because")]) def test_en_lex_attrs_norm_exceptions(en_tokenizer, text, norm): tokens = en_tokenizer(text) assert tokens[0].norm_ == norm
4,158
31.492188
88
py
spaCy
spaCy-master/spacy/tests/lang/en/test_indices.py
def test_en_simple_punct(en_tokenizer): text = "to walk, do foo" tokens = en_tokenizer(text) assert tokens[0].idx == 0 assert tokens[1].idx == 3 assert tokens[2].idx == 7 assert tokens[3].idx == 9 assert tokens[4].idx == 12 def test_en_complex_punct(en_tokenizer): text = "Tom (D., Ill.)!" tokens = en_tokenizer(text) assert tokens[0].idx == 0 assert len(tokens[0]) == 3 assert tokens[1].idx == 4 assert len(tokens[1]) == 1 assert tokens[2].idx == 5 assert len(tokens[2]) == 2 assert tokens[3].idx == 7 assert len(tokens[3]) == 1 assert tokens[4].idx == 9 assert len(tokens[4]) == 4 assert tokens[5].idx == 13 assert tokens[6].idx == 14
723
26.846154
40
py
spaCy
spaCy-master/spacy/tests/lang/en/test_noun_chunks.py
import pytest from spacy.tokens import Doc @pytest.fixture def doc(en_vocab): words = ["Peter", "has", "chronic", "command", "and", "control", "issues"] heads = [1, 1, 6, 6, 3, 3, 1] deps = ["nsubj", "ROOT", "amod", "nmod", "cc", "conj", "dobj"] pos = ["PROPN", "VERB", "ADJ", "NOUN", "CCONJ", "NOUN", "NOUN"] return Doc(en_vocab, words=words, heads=heads, deps=deps, pos=pos) def test_noun_chunks_is_parsed(en_tokenizer): """Test that noun_chunks raises Value Error for 'en' language if Doc is not parsed.""" doc = en_tokenizer("This is a sentence") with pytest.raises(ValueError): list(doc.noun_chunks) def test_en_noun_chunks_not_nested(doc, en_vocab): """Test that each token only appears in one noun chunk at most""" word_occurred = {} chunks = list(doc.noun_chunks) assert len(chunks) > 1 for chunk in chunks: for word in chunk: word_occurred.setdefault(word.text, 0) word_occurred[word.text] += 1 assert len(word_occurred) > 0 for word, freq in word_occurred.items(): assert freq == 1, (word, [chunk.text for chunk in doc.noun_chunks]) def test_noun_chunks_span(doc, en_tokenizer): """Test that the span.noun_chunks property works correctly""" doc_chunks = list(doc.noun_chunks) span = doc[0:3] span_chunks = list(span.noun_chunks) assert 0 < len(span_chunks) < len(doc_chunks) for chunk in span_chunks: assert chunk in doc_chunks assert chunk.start >= 0 assert chunk.end <= 3
1,549
32.695652
90
py
spaCy
spaCy-master/spacy/tests/lang/en/test_parser.py
from spacy.tokens import Doc def test_en_parser_noun_chunks_standard(en_vocab): words = ["A", "base", "phrase", "should", "be", "recognized", "."] heads = [2, 2, 5, 5, 5, 5, 5] pos = ["DET", "ADJ", "NOUN", "AUX", "VERB", "VERB", "PUNCT"] deps = ["det", "amod", "nsubjpass", "aux", "auxpass", "ROOT", "punct"] doc = Doc(en_vocab, words=words, pos=pos, deps=deps, heads=heads) chunks = list(doc.noun_chunks) assert len(chunks) == 1 assert chunks[0].text_with_ws == "A base phrase " def test_en_parser_noun_chunks_coordinated(en_vocab): # fmt: off words = ["A", "base", "phrase", "and", "a", "good", "phrase", "are", "often", "the", "same", "."] heads = [2, 2, 7, 2, 6, 6, 2, 7, 7, 10, 7, 7] pos = ["DET", "NOUN", "NOUN", "CCONJ", "DET", "ADJ", "NOUN", "VERB", "ADV", "DET", "ADJ", "PUNCT"] deps = ["det", "compound", "nsubj", "cc", "det", "amod", "conj", "ROOT", "advmod", "det", "attr", "punct"] # fmt: on doc = Doc(en_vocab, words=words, pos=pos, deps=deps, heads=heads) chunks = list(doc.noun_chunks) assert len(chunks) == 2 assert chunks[0].text_with_ws == "A base phrase " assert chunks[1].text_with_ws == "a good phrase " def test_en_parser_noun_chunks_pp_chunks(en_vocab): words = ["A", "phrase", "with", "another", "phrase", "occurs", "."] heads = [1, 5, 1, 4, 2, 5, 5] pos = ["DET", "NOUN", "ADP", "DET", "NOUN", "VERB", "PUNCT"] deps = ["det", "nsubj", "prep", "det", "pobj", "ROOT", "punct"] doc = Doc(en_vocab, words=words, pos=pos, deps=deps, heads=heads) chunks = list(doc.noun_chunks) assert len(chunks) == 2 assert chunks[0].text_with_ws == "A phrase " assert chunks[1].text_with_ws == "another phrase " def test_en_parser_noun_chunks_appositional_modifiers(en_vocab): # fmt: off words = ["Sam", ",", "my", "brother", ",", "arrived", "to", "the", "house", "."] heads = [5, 0, 3, 0, 0, 5, 5, 8, 6, 5] pos = ["PROPN", "PUNCT", "DET", "NOUN", "PUNCT", "VERB", "ADP", "DET", "NOUN", "PUNCT"] deps = ["nsubj", "punct", "poss", "appos", "punct", "ROOT", "prep", "det", "pobj", "punct"] # fmt: on doc = Doc(en_vocab, words=words, pos=pos, deps=deps, heads=heads) chunks = list(doc.noun_chunks) assert len(chunks) == 3 assert chunks[0].text_with_ws == "Sam " assert chunks[1].text_with_ws == "my brother " assert chunks[2].text_with_ws == "the house " def test_en_parser_noun_chunks_dative(en_vocab): words = ["She", "gave", "Bob", "a", "raise", "."] heads = [1, 1, 1, 4, 1, 1] pos = ["PRON", "VERB", "PROPN", "DET", "NOUN", "PUNCT"] deps = ["nsubj", "ROOT", "dative", "det", "dobj", "punct"] doc = Doc(en_vocab, words=words, pos=pos, deps=deps, heads=heads) chunks = list(doc.noun_chunks) assert len(chunks) == 3 assert chunks[0].text_with_ws == "She " assert chunks[1].text_with_ws == "Bob " assert chunks[2].text_with_ws == "a raise "
2,954
43.104478
110
py
spaCy
spaCy-master/spacy/tests/lang/en/test_prefix_suffix_infix.py
import pytest @pytest.mark.parametrize("text", ["(can)"]) def test_en_tokenizer_splits_no_special(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["can't"]) def test_en_tokenizer_splits_no_punct(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 2 @pytest.mark.parametrize("text", ["(can't"]) def test_en_tokenizer_splits_prefix_punct(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["can't)"]) def test_en_tokenizer_splits_suffix_punct(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["(can't)"]) def test_en_tokenizer_splits_even_wrap(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 4 @pytest.mark.parametrize("text", ["(can't?)"]) def test_en_tokenizer_splits_uneven_wrap(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 5 @pytest.mark.parametrize("text,length", [("U.S.", 1), ("us.", 2), ("(U.S.", 2)]) def test_en_tokenizer_splits_prefix_interact(en_tokenizer, text, length): tokens = en_tokenizer(text) assert len(tokens) == length @pytest.mark.parametrize("text", ["U.S.)"]) def test_en_tokenizer_splits_suffix_interact(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 2 @pytest.mark.parametrize("text", ["(U.S.)"]) def test_en_tokenizer_splits_even_wrap_interact(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["(U.S.?)"]) def test_en_tokenizer_splits_uneven_wrap_interact(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 4 @pytest.mark.parametrize("text", ["best-known"]) def test_en_tokenizer_splits_hyphens(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["0.1-13.5", "0.0-0.1", "103.27-300"]) def test_en_tokenizer_splits_numeric_range(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["best.Known", "Hello.World"]) def test_en_tokenizer_splits_period_infix(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 3 @pytest.mark.parametrize("text", ["Hello,world", "one,two"]) def test_en_tokenizer_splits_comma_infix(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 3 assert tokens[0].text == text.split(",")[0] assert tokens[1].text == "," assert tokens[2].text == text.split(",")[1] @pytest.mark.parametrize("text", ["best...Known", "best...known"]) def test_en_tokenizer_splits_ellipsis_infix(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 3 def test_en_tokenizer_splits_double_hyphen_infix(en_tokenizer): tokens = en_tokenizer("No decent--let alone well-bred--people.") assert tokens[0].text == "No" assert tokens[1].text == "decent" assert tokens[2].text == "--" assert tokens[3].text == "let" assert tokens[4].text == "alone" assert tokens[5].text == "well" assert tokens[6].text == "-" assert tokens[7].text == "bred" assert tokens[8].text == "--" assert tokens[9].text == "people" def test_en_tokenizer_splits_period_abbr(en_tokenizer): text = "Today is Tuesday.Mr." tokens = en_tokenizer(text) assert len(tokens) == 5 assert tokens[0].text == "Today" assert tokens[1].text == "is" assert tokens[2].text == "Tuesday" assert tokens[3].text == "." assert tokens[4].text == "Mr." @pytest.mark.issue(225) @pytest.mark.xfail(reason="Issue #225 - not yet implemented") def test_en_tokenizer_splits_em_dash_infix(en_tokenizer): tokens = en_tokenizer( """Will this road take me to Puddleton?\u2014No, """ """you'll have to walk there.\u2014Ariel.""" ) assert tokens[6].text == "Puddleton" assert tokens[7].text == "?" assert tokens[8].text == "\u2014" @pytest.mark.parametrize("text,length", [("_MATH_", 3), ("_MATH_.", 4)]) def test_final_period(en_tokenizer, text, length): tokens = en_tokenizer(text) assert len(tokens) == length
4,254
29.833333
80
py
spaCy
spaCy-master/spacy/tests/lang/en/test_punct.py
import pytest from spacy.lang.punctuation import TOKENIZER_PREFIXES from spacy.util import compile_prefix_regex PUNCT_OPEN = ["(", "[", "{", "*"] PUNCT_CLOSE = [")", "]", "}", "*"] PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")] @pytest.mark.parametrize("text", ["(", "((", "<"]) def test_en_tokenizer_handles_only_punct(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == len(text) @pytest.mark.parametrize("punct", PUNCT_OPEN) @pytest.mark.parametrize("text", ["Hello"]) def test_en_tokenizer_splits_open_punct(en_tokenizer, punct, text): tokens = en_tokenizer(punct + text) assert len(tokens) == 2 assert tokens[0].text == punct assert tokens[1].text == text @pytest.mark.parametrize("punct", PUNCT_CLOSE) @pytest.mark.parametrize("text", ["Hello"]) def test_en_tokenizer_splits_close_punct(en_tokenizer, punct, text): tokens = en_tokenizer(text + punct) assert len(tokens) == 2 assert tokens[0].text == text assert tokens[1].text == punct @pytest.mark.parametrize("punct", PUNCT_OPEN) @pytest.mark.parametrize("punct_add", ["`"]) @pytest.mark.parametrize("text", ["Hello"]) def test_en_tokenizer_splits_two_diff_open_punct(en_tokenizer, punct, punct_add, text): tokens = en_tokenizer(punct + punct_add + text) assert len(tokens) == 3 assert tokens[0].text == punct assert tokens[1].text == punct_add assert tokens[2].text == text @pytest.mark.parametrize("punct", PUNCT_CLOSE) @pytest.mark.parametrize("punct_add", ["'"]) @pytest.mark.parametrize("text", ["Hello"]) def test_en_tokenizer_splits_two_diff_close_punct(en_tokenizer, punct, punct_add, text): tokens = en_tokenizer(text + punct + punct_add) assert len(tokens) == 3 assert tokens[0].text == text assert tokens[1].text == punct assert tokens[2].text == punct_add @pytest.mark.parametrize("punct", PUNCT_OPEN) @pytest.mark.parametrize("text", ["Hello"]) def test_en_tokenizer_splits_same_open_punct(en_tokenizer, punct, text): tokens = en_tokenizer(punct + punct + punct + text) assert len(tokens) == 4 assert tokens[0].text == punct assert tokens[3].text == text @pytest.mark.parametrize("punct", PUNCT_CLOSE) @pytest.mark.parametrize("text", ["Hello"]) def test_en_tokenizer_splits_same_close_punct(en_tokenizer, punct, text): tokens = en_tokenizer(text + punct + punct + punct) assert len(tokens) == 4 assert tokens[0].text == text assert tokens[1].text == punct @pytest.mark.parametrize("text", ["'The"]) def test_en_tokenizer_splits_open_appostrophe(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 2 assert tokens[0].text == "'" @pytest.mark.parametrize("text", ["Hello''"]) def test_en_tokenizer_splits_double_end_quote(en_tokenizer, text): tokens = en_tokenizer(text) assert len(tokens) == 2 tokens_punct = en_tokenizer("''") assert len(tokens_punct) == 1 @pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED) @pytest.mark.parametrize("text", ["Hello"]) def test_en_tokenizer_splits_open_close_punct( en_tokenizer, punct_open, punct_close, text ): tokens = en_tokenizer(punct_open + text + punct_close) assert len(tokens) == 3 assert tokens[0].text == punct_open assert tokens[1].text == text assert tokens[2].text == punct_close @pytest.mark.parametrize("punct_open,punct_close", PUNCT_PAIRED) @pytest.mark.parametrize("punct_open2,punct_close2", [("`", "'")]) @pytest.mark.parametrize("text", ["Hello"]) def test_en_tokenizer_two_diff_punct( en_tokenizer, punct_open, punct_close, punct_open2, punct_close2, text ): tokens = en_tokenizer(punct_open2 + punct_open + text + punct_close + punct_close2) assert len(tokens) == 5 assert tokens[0].text == punct_open2 assert tokens[1].text == punct_open assert tokens[2].text == text assert tokens[3].text == punct_close assert tokens[4].text == punct_close2 @pytest.mark.parametrize("text,punct", [("(can't", "(")]) def test_en_tokenizer_splits_pre_punct_regex(text, punct): en_search_prefixes = compile_prefix_regex(TOKENIZER_PREFIXES).search match = en_search_prefixes(text) assert match.group() == punct def test_en_tokenizer_splits_bracket_period(en_tokenizer): text = "(And a 6a.m. run through Washington Park)." tokens = en_tokenizer(text) assert tokens[len(tokens) - 1].text == "."
4,422
33.554688
88
py
spaCy
spaCy-master/spacy/tests/lang/en/test_sbd.py
import pytest from spacy.tokens import Doc from ...util import apply_transition_sequence @pytest.mark.issue(309) def test_issue309(en_vocab): """Test Issue #309: SBD fails on empty string""" doc = Doc(en_vocab, words=[" "], heads=[0], deps=["ROOT"]) assert len(doc) == 1 sents = list(doc.sents) assert len(sents) == 1 @pytest.mark.parametrize("words", [["A", "test", "sentence"]]) @pytest.mark.parametrize("punct", [".", "!", "?", ""]) def test_en_sbd_single_punct(en_vocab, words, punct): heads = [2, 2, 2, 2] if punct else [2, 2, 2] deps = ["dep"] * len(heads) words = [*words, punct] if punct else words doc = Doc(en_vocab, words=words, heads=heads, deps=deps) assert len(doc) == 4 if punct else 3 assert len(list(doc.sents)) == 1 assert sum(len(sent) for sent in doc.sents) == len(doc) @pytest.mark.skip( reason="The step_through API was removed (but should be brought back)" ) def test_en_sentence_breaks(en_vocab, en_parser): # fmt: off words = ["This", "is", "a", "sentence", ".", "This", "is", "another", "one", "."] heads = [1, 1, 3, 1, 1, 6, 6, 8, 6, 6] deps = ["nsubj", "ROOT", "det", "attr", "punct", "nsubj", "ROOT", "det", "attr", "punct"] transition = ["L-nsubj", "S", "L-det", "R-attr", "D", "R-punct", "B-ROOT", "L-nsubj", "S", "L-attr", "R-attr", "D", "R-punct"] # fmt: on doc = Doc(en_vocab, words=words, heads=heads, deps=deps) apply_transition_sequence(en_parser, doc, transition) assert len(list(doc.sents)) == 2 for token in doc: assert token.dep != 0 or token.is_space assert [token.head.i for token in doc] == [1, 1, 3, 1, 1, 6, 6, 8, 6, 6]
1,708
35.361702
85
py
spaCy
spaCy-master/spacy/tests/lang/en/test_text.py
import pytest from spacy.lang.en.lex_attrs import like_num def test_en_tokenizer_handles_long_text(en_tokenizer): text = """Tributes pour in for late British Labour Party leader Tributes poured in from around the world Thursday to the late Labour Party leader John Smith, who died earlier from a massive heart attack aged 55. In Washington, the US State Department issued a statement regretting "the untimely death" of the rapier-tongued Scottish barrister and parliamentarian. "Mr. Smith, throughout his distinguished""" tokens = en_tokenizer(text) assert len(tokens) == 76 @pytest.mark.parametrize( "text,length", [ ("The U.S. Army likes Shock and Awe.", 8), ("U.N. regulations are not a part of their concern.", 10), ("“Isn't it?”", 6), ("""Yes! "I'd rather have a walk", Ms. Comble sighed. """, 15), ("""'Me too!', Mr. P. Delaware cried. """, 11), ("They ran about 10km.", 6), ("But then the 6,000-year ice age came...", 10), ], ) def test_en_tokenizer_handles_cnts(en_tokenizer, text, length): tokens = en_tokenizer(text) assert len(tokens) == length @pytest.mark.parametrize( "text,match", [ ("10", True), ("1", True), ("10,000", True), ("10,00", True), ("999.0", True), ("one", True), ("two", True), ("billion", True), ("dog", False), (",", False), ("1/2", True), ], ) def test_lex_attrs_like_number(en_tokenizer, text, match): tokens = en_tokenizer(text) assert len(tokens) == 1 assert tokens[0].like_num == match @pytest.mark.parametrize( "word", ["third", "Millionth", "100th", "Hundredth", "23rd", "52nd"] ) def test_en_lex_attrs_like_number_for_ordinal(word): assert like_num(word) @pytest.mark.parametrize("word", ["eleven"]) def test_en_lex_attrs_capitals(word): assert like_num(word) assert like_num(word.upper())
1,959
26.605634
77
py
spaCy
spaCy-master/spacy/tests/lang/en/test_tokenizer.py
import pytest @pytest.mark.issue(351) def test_issue351(en_tokenizer): doc = en_tokenizer(" This is a cat.") assert doc[0].idx == 0 assert len(doc[0]) == 3 assert doc[1].idx == 3 @pytest.mark.issue(360) def test_issue360(en_tokenizer): """Test tokenization of big ellipsis""" tokens = en_tokenizer("$45...............Asking") assert len(tokens) > 2 @pytest.mark.issue(736) @pytest.mark.parametrize("text,number", [("7am", "7"), ("11p.m.", "11")]) def test_issue736(en_tokenizer, text, number): """Test that times like "7am" are tokenized correctly and that numbers are converted to string.""" tokens = en_tokenizer(text) assert len(tokens) == 2 assert tokens[0].text == number @pytest.mark.issue(740) @pytest.mark.parametrize("text", ["3/4/2012", "01/12/1900"]) def test_issue740(en_tokenizer, text): """Test that dates are not split and kept as one token. This behaviour is currently inconsistent, since dates separated by hyphens are still split. This will be hard to prevent without causing clashes with numeric ranges.""" tokens = en_tokenizer(text) assert len(tokens) == 1 @pytest.mark.issue(744) @pytest.mark.parametrize("text", ["We were scared", "We Were Scared"]) def test_issue744(en_tokenizer, text): """Test that 'were' and 'Were' are excluded from the contractions generated by the English tokenizer exceptions.""" tokens = en_tokenizer(text) assert len(tokens) == 3 assert tokens[1].text.lower() == "were" @pytest.mark.issue(759) @pytest.mark.parametrize( "text,is_num", [("one", True), ("ten", True), ("teneleven", False)] ) def test_issue759(en_tokenizer, text, is_num): tokens = en_tokenizer(text) assert tokens[0].like_num == is_num @pytest.mark.issue(775) @pytest.mark.parametrize("text", ["Shell", "shell", "Shed", "shed"]) def test_issue775(en_tokenizer, text): """Test that 'Shell' and 'shell' are excluded from the contractions generated by the English tokenizer exceptions.""" tokens = en_tokenizer(text) assert len(tokens) == 1 assert tokens[0].text == text @pytest.mark.issue(792) @pytest.mark.parametrize("text", ["This is a string ", "This is a string\u0020"]) def test_issue792(en_tokenizer, text): """Test for Issue #792: Trailing whitespace is removed after tokenization.""" doc = en_tokenizer(text) assert "".join([token.text_with_ws for token in doc]) == text @pytest.mark.issue(792) @pytest.mark.parametrize("text", ["This is a string", "This is a string\n"]) def test_control_issue792(en_tokenizer, text): """Test base case for Issue #792: Non-trailing whitespace""" doc = en_tokenizer(text) assert "".join([token.text_with_ws for token in doc]) == text @pytest.mark.issue(859) @pytest.mark.parametrize( "text", ["[email protected]\nThank you!", "[email protected] \nThank you!"] ) def test_issue859(en_tokenizer, text): """Test that no extra space is added in doc.text method.""" doc = en_tokenizer(text) assert doc.text == text @pytest.mark.issue(886) @pytest.mark.parametrize("text", ["Datum:2014-06-02\nDokument:76467"]) def test_issue886(en_tokenizer, text): """Test that token.idx matches the original text index for texts with newlines.""" doc = en_tokenizer(text) for token in doc: assert len(token.text) == len(token.text_with_ws) assert text[token.idx] == token.text[0] @pytest.mark.issue(891) @pytest.mark.parametrize("text", ["want/need"]) def test_issue891(en_tokenizer, text): """Test that / infixes are split correctly.""" tokens = en_tokenizer(text) assert len(tokens) == 3 assert tokens[1].text == "/" @pytest.mark.issue(957) @pytest.mark.slow def test_issue957(en_tokenizer): """Test that spaCy doesn't hang on many punctuation characters. If this test hangs, check (new) regular expressions for conflicting greedy operators """ # Skip test if pytest-timeout is not installed pytest.importorskip("pytest_timeout") for punct in [".", ",", "'", '"', ":", "?", "!", ";", "-"]: string = "0" for i in range(1, 100): string += punct + str(i) doc = en_tokenizer(string) assert doc @pytest.mark.parametrize("text", ["[email protected]", "[email protected]"]) @pytest.mark.issue(1698) def test_issue1698(en_tokenizer, text): """Test that doc doesn't identify email-addresses as URLs""" doc = en_tokenizer(text) assert len(doc) == 1 assert not doc[0].like_url @pytest.mark.issue(1758) def test_issue1758(en_tokenizer): """Test that "would've" is handled by the English tokenizer exceptions.""" tokens = en_tokenizer("would've") assert len(tokens) == 2 @pytest.mark.issue(1773) def test_issue1773(en_tokenizer): """Test that spaces don't receive a POS but no TAG. This is the root cause of the serialization issue reported in #1773.""" doc = en_tokenizer("\n") if doc[0].pos_ == "SPACE": assert doc[0].tag_ != "" @pytest.mark.issue(3277) def test_issue3277(es_tokenizer): """Test that hyphens are split correctly as prefixes.""" doc = es_tokenizer("—Yo me llamo... –murmuró el niño– Emilio Sánchez Pérez.") assert len(doc) == 14 assert doc[0].text == "\u2014" assert doc[5].text == "\u2013" assert doc[9].text == "\u2013" @pytest.mark.parametrize("word", ["don't", "don’t", "I'd", "I’d"]) @pytest.mark.issue(3521) def test_issue3521(en_tokenizer, word): tok = en_tokenizer(word)[1] # 'not' and 'would' should be stopwords, also in their abbreviated forms assert tok.is_stop @pytest.mark.issue(10699) @pytest.mark.parametrize("text", ["theses", "thisre"]) def test_issue10699(en_tokenizer, text): """Test that 'theses' and 'thisre' are excluded from the contractions generated by the English tokenizer exceptions.""" tokens = en_tokenizer(text) assert len(tokens) == 1
5,924
32.100559
88
py
spaCy
spaCy-master/spacy/tests/lang/es/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/es/test_exception.py
import pytest @pytest.mark.parametrize( "text,lemma", [ ("aprox.", "aproximadamente"), ("esq.", "esquina"), ("pág.", "página"), ("p.ej.", "por ejemplo"), ], ) def test_es_tokenizer_handles_abbr(es_tokenizer, text, lemma): tokens = es_tokenizer(text) assert len(tokens) == 1 def test_es_tokenizer_handles_exc_in_text(es_tokenizer): text = "Mariano Rajoy ha corrido aprox. medio kilómetro" tokens = es_tokenizer(text) assert len(tokens) == 7 assert tokens[4].text == "aprox."
546
22.782609
62
py
spaCy
spaCy-master/spacy/tests/lang/es/test_noun_chunks.py
import pytest from spacy.tokens import Doc # fmt: off @pytest.mark.parametrize( "words,heads,deps,pos,chunk_offsets", [ # un gato -> "un gato" ( ["un", "gato"], [1, 1], ["det", "ROOT"], ["DET", "NOUN"], [(0, 2)], ), # la camisa negra -> "la camisa negra" ( ["la", "camisa", "negra"], [1, 1, 1], ["det", "ROOT", "amod"], ["DET", "NOUN", "ADJ"], [(0, 3)], ), # un lindo gatito -> "un lindo gatito" ( ["Un", "lindo", "gatito"], [2, 2, 2], ["det", "amod", "ROOT"], ["DET", "ADJ", "NOUN"], [(0,3)] ), # una chica hermosa e inteligente -> una chica hermosa e inteligente ( ["Una", "chica", "hermosa", "e", "inteligente"], [1, 1, 1, 4, 2], ["det", "ROOT", "amod", "cc", "conj"], ["DET", "NOUN", "ADJ", "CCONJ", "ADJ"], [(0,5)] ), # el fabuloso gato pardo -> "el fabuloso gato pardo" ( ["el", "fabuloso", "gato", "pardo"], [2, 2, 2, 2], ["det", "amod", "ROOT", "amod"], ["DET", "ADJ", "NOUN", "ADJ"], [(0,4)] ), # Tengo un gato y un perro -> un gato, un perro ( ["Tengo", "un", "gato", "y", "un", "perro"], [0, 2, 0, 5, 5, 0], ["ROOT", "det", "obj", "cc", "det", "conj"], ["VERB", "DET", "NOUN", "CCONJ", "DET", "NOUN"], [(1,3), (4,6)] ), # Dom Pedro II -> Dom Pedro II ( ["Dom", "Pedro", "II"], [0, 0, 0], ["ROOT", "flat", "flat"], ["PROPN", "PROPN", "PROPN"], [(0,3)] ), # los Estados Unidos -> los Estados Unidos ( ["los", "Estados", "Unidos"], [1, 1, 1], ["det", "ROOT", "flat"], ["DET", "PROPN", "PROPN"], [(0,3)] ), # Miguel de Cervantes -> Miguel de Cervantes ( ["Miguel", "de", "Cervantes"], [0, 2, 0], ["ROOT", "case", "flat"], ["PROPN", "ADP", "PROPN"], [(0,3)] ), ( ["Rio", "de", "Janeiro"], [0, 2, 0], ["ROOT", "case", "flat"], ["PROPN", "ADP", "PROPN"], [(0,3)] ), # la destrucción de la ciudad -> la destrucción, la ciudad ( ["la", "destrucción", "de", "la", "ciudad"], [1, 1, 4, 4, 1], ['det', 'ROOT', 'case', 'det', 'nmod'], ['DET', 'NOUN', 'ADP', 'DET', 'NOUN'], [(0,2), (3,5)] ), # la traducción de Susana del informe -> la traducción, Susana, informe ( ['la', 'traducción', 'de', 'Susana', 'del', 'informe'], [1, 1, 3, 1, 5, 1], ['det', 'ROOT', 'case', 'nmod', 'case', 'nmod'], ['DET', 'NOUN', 'ADP', 'PROPN', 'ADP', 'NOUN'], [(0,2), (3,4), (5,6)] ), # El gato regordete de Susana y su amigo -> el gato regordete, Susana, su amigo ( ['El', 'gato', 'regordete', 'de', 'Susana', 'y', 'su', 'amigo'], [1, 1, 1, 4, 1, 7, 7, 1], ['det', 'ROOT', 'amod', 'case', 'nmod', 'cc', 'det', 'conj'], ['DET', 'NOUN', 'ADJ', 'ADP', 'PROPN', 'CCONJ', 'DET', 'NOUN'], [(0,3), (4,5), (6,8)] ), # Afirmó que sigue el criterio europeo y que trata de incentivar el mercado donde no lo hay -> el criterio europeo, el mercado, donde, lo ( ['Afirmó', 'que', 'sigue', 'el', 'criterio', 'europeo', 'y', 'que', 'trata', 'de', 'incentivar', 'el', 'mercado', 'donde', 'no', 'lo', 'hay'], [0, 2, 0, 4, 2, 4, 8, 8, 2, 10, 8, 12, 10, 16, 16, 16, 0], ['ROOT', 'mark', 'ccomp', 'det', 'obj', 'amod', 'cc', 'mark', 'conj', 'mark', 'xcomp', 'det', 'obj', 'obl', 'advmod', 'obj', 'advcl'], ['VERB', 'SCONJ', 'VERB', 'DET', 'NOUN', 'ADJ', 'CCONJ', 'SCONJ', 'VERB', 'ADP', 'VERB', 'DET', 'NOUN', 'PRON', 'ADV', 'PRON', 'AUX'], [(3,6), (11,13), (13,14), (15,16)] ), # En este sentido se refirió a la reciente creación del Ministerio de Ciencia y Tecnología y a las primeras declaraciones de su titular, Anna Birulés, sobre el impulso de la investigación, desarrollo e innovación -> este sentido, se, la reciente creación, Ministerio de Ciencia y Tecnología, a las primeras declaraciones, su titular, , Anna Birulés,, el impulso, la investigación, , desarrollo, innovación ( ['En', 'este', 'sentido', 'se', 'refirió', 'a', 'la', 'reciente', 'creación', 'del', 'Ministerio', 'de', 'Ciencia', 'y', 'Tecnología', 'y', 'a', 'las', 'primeras', 'declaraciones', 'de', 'su', 'titular', ',', 'Anna', 'Birulés', ',', 'sobre', 'el', 'impulso', 'de', 'la', 'investigación', ',', 'desarrollo', 'e', 'innovación'], [2, 2, 4, 4, 4, 8, 8, 8, 4, 10, 8, 12, 10, 14, 12, 19, 19, 19, 19, 8, 22, 22, 19, 24, 22, 24, 24, 29, 29, 19, 32, 32, 29, 34, 32, 36, 32], ['case', 'det', 'obl', 'obj', 'ROOT', 'case', 'det', 'amod', 'obj', 'case', 'nmod', 'case', 'flat', 'cc', 'conj', 'cc', 'case', 'det', 'amod', 'conj', 'case', 'det', 'nmod', 'punct', 'appos', 'flat', 'punct', 'case', 'det', 'nmod', 'case', 'det', 'nmod', 'punct', 'conj', 'cc', 'conj'], ['ADP', 'DET', 'NOUN', 'PRON', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', 'ADP', 'PROPN', 'ADP', 'PROPN', 'CCONJ', 'PROPN', 'CCONJ', 'ADP', 'DET', 'ADJ', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT', 'PROPN', 'PROPN', 'PUNCT', 'ADP', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT', 'NOUN', 'CCONJ', 'NOUN'], [(1, 3), (3, 4), (6, 9), (10, 15), (16, 20), (21, 23), (23, 27), (28, 30), (31, 33), (33, 35), (36, 37)] ), # Asimismo defiende la financiación pública de la investigación básica y pone de manifiesto que las empresas se centran más en la investigación y desarrollo con objetivos de mercado. -> la financiación pública, la investigación básica, manifiesto, las empresas, se, la investigación, desarrollo, objetivos, mercado ( ['Asimismo', 'defiende', 'la', 'financiación', 'pública', 'de', 'la', 'investigación', 'básica', 'y', 'pone', 'de', 'manifiesto', 'que', 'las', 'empresas', 'se', 'centran', 'más', 'en', 'la', 'investigación', 'y', 'desarrollo', 'con', 'objetivos', 'de', 'mercado'], [1, 1, 3, 1, 3, 7, 7, 3, 7, 10, 1, 12, 10, 17, 15, 17, 17, 10, 17, 21, 21, 17, 23, 21, 25, 17, 27, 25], ['advmod', 'ROOT', 'det', 'obj', 'amod', 'case', 'det', 'nmod', 'amod', 'cc', 'conj', 'case', 'obl', 'mark', 'det', 'nsubj', 'obj', 'ccomp', 'obj', 'case', 'det', 'obl', 'cc', 'conj', 'case', 'obl', 'case', 'nmod'], ['ADV', 'VERB', 'DET', 'NOUN', 'ADJ', 'ADP', 'DET', 'NOUN', 'ADJ', 'CCONJ', 'VERB', 'ADP', 'NOUN', 'SCONJ', 'DET', 'NOUN', 'PRON', 'VERB', 'ADV', 'ADP', 'DET', 'NOUN', 'CCONJ', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'], [(2, 5), (6, 9), (12, 13), (14, 16), (16, 17), (20, 22), (23, 24), (25, 26), (27, 28)] ), # Tras indicar que la inversión media en investigación en la Unión Europea se sitúa en el 1,8 por ciento del PIB, frente al 2,8 por ciento en Japón y EEUU, Couceiro dijo que España está en "el buen camino" y se está creando un entorno propicio para la innovación empresarial' -> la inversión media, investigación, la Unión Europea, se, PIB, Japón, EEUU, Couceiro, España, se, un entorno propicio para la innovación empresaria ( ['Tras', 'indicar', 'que', 'la', 'inversión', 'media', 'en', 'investigación', 'en', 'la', 'Unión', 'Europea', 'se', 'sitúa', 'en', 'el', '1,8', 'por', 'ciento', 'del', 'PIB', ',', 'frente', 'al', '2,8', 'por', 'ciento', 'en', 'Japón', 'y', 'EEUU', ',', 'Couceiro', 'dijo', 'que', 'España', 'está', 'en', '"', 'el', 'buen', 'camino', '"', 'y', 'se', 'está', 'creando', 'un', 'entorno', 'propicio', 'para', 'la', 'innovación', 'empresarial'], [1, 33, 13, 4, 13, 4, 7, 4, 10, 10, 4, 10, 13, 1, 16, 16, 13, 18, 16, 20, 16, 24, 24, 22, 13, 26, 24, 28, 24, 30, 28, 1, 33, 33, 41, 41, 41, 41, 41, 41, 41, 33, 41, 46, 46, 46, 33, 48, 46, 48, 52, 52, 49, 52], ['mark', 'advcl', 'mark', 'det', 'nsubj', 'amod', 'case', 'nmod', 'case', 'det', 'nmod', 'flat', 'obj', 'ccomp', 'case', 'det', 'obj', 'case', 'compound', 'case', 'nmod', 'punct', 'case', 'fixed', 'obl', 'case', 'compound', 'case', 'nmod', 'cc', 'conj', 'punct', 'nsubj', 'ROOT', 'mark', 'nsubj', 'cop', 'case', 'punct', 'det', 'amod', 'ccomp', 'punct', 'cc', 'obj', 'aux', 'conj', 'det', 'nsubj', 'amod', 'case', 'det', 'nmod', 'amod'], ['ADP', 'VERB', 'SCONJ', 'DET', 'NOUN', 'ADJ', 'ADP', 'NOUN', 'ADP', 'DET', 'PROPN', 'PROPN', 'PRON', 'VERB', 'ADP', 'DET', 'NUM', 'ADP', 'NUM', 'ADP', 'PROPN', 'PUNCT', 'NOUN', 'ADP', 'NUM', 'ADP', 'NUM', 'ADP', 'PROPN', 'CCONJ', 'PROPN', 'PUNCT', 'PROPN', 'VERB', 'SCONJ', 'PROPN', 'AUX', 'ADP', 'PUNCT', 'DET', 'ADJ', 'NOUN', 'PUNCT', 'CCONJ', 'PRON', 'AUX', 'VERB', 'DET', 'NOUN', 'ADJ', 'ADP', 'DET', 'NOUN', 'ADJ'], [(3, 6), (7, 8), (9, 12), (12, 13), (20, 21), (28, 29), (30, 31), (32, 33), (35, 36), (44, 45), (47, 54)] ), ], ) # fmt: on def test_es_noun_chunks(es_vocab, words, heads, deps, pos, chunk_offsets): doc = Doc(es_vocab, words=words, heads=heads, deps=deps, pos=pos) assert [(c.start, c.end) for c in doc.noun_chunks] == chunk_offsets def test_noun_chunks_is_parsed_es(es_tokenizer): """Test that noun_chunks raises Value Error for 'es' language if Doc is not parsed.""" doc = es_tokenizer("en Oxford este verano") with pytest.raises(ValueError): list(doc.noun_chunks)
9,901
60.8875
452
py
spaCy
spaCy-master/spacy/tests/lang/es/test_text.py
import pytest from spacy.lang.es import Spanish from spacy.lang.es.lex_attrs import like_num @pytest.mark.issue(3803) def test_issue3803(): """Test that spanish num-like tokens have True for like_num attribute.""" nlp = Spanish() text = "2 dos 1000 mil 12 doce" doc = nlp(text) assert [t.like_num for t in doc] == [True, True, True, True, True, True] def test_es_tokenizer_handles_long_text(es_tokenizer): text = """Cuando a José Mujica lo invitaron a dar una conferencia en Oxford este verano, su cabeza hizo "crac". La "más antigua" universidad de habla inglesa, esa que cobra decenas de miles de euros de matrícula a sus alumnos y en cuyos salones han disertado desde Margaret Thatcher hasta Stephen Hawking, reclamaba los servicios de este viejo de 81 años, formado en un colegio público en Montevideo y que pregona las bondades de la vida austera.""" tokens = es_tokenizer(text) assert len(tokens) == 90 @pytest.mark.parametrize( "text,length", [ ("¿Por qué José Mujica?", 6), ("“¿Oh no?”", 6), ("""¡Sí! "Vámonos", contestó José Arcadio Buendía""", 11), ("Corrieron aprox. 10km.", 5), ("Y entonces por qué...", 5), ], ) def test_es_tokenizer_handles_cnts(es_tokenizer, text, length): tokens = es_tokenizer(text) assert len(tokens) == length @pytest.mark.parametrize( "text,match", [ ("10", True), ("1", True), ("10.000", True), ("1000", True), ("999,0", True), ("uno", True), ("dos", True), ("billón", True), ("veintiséis", True), ("perro", False), (",", False), ("1/2", True), ], ) def test_lex_attrs_like_number(es_tokenizer, text, match): tokens = es_tokenizer(text) assert len(tokens) == 1 assert tokens[0].like_num == match @pytest.mark.parametrize("word", ["once"]) def test_es_lex_attrs_capitals(word): assert like_num(word) assert like_num(word.upper())
2,004
25.733333
83
py
spaCy
spaCy-master/spacy/tests/lang/et/__init__.py
0
0
0
py
spaCy
spaCy-master/spacy/tests/lang/et/test_text.py
import pytest def test_long_text(et_tokenizer): # Excerpt: European Convention on Human Rights text = """ arvestades, et nimetatud deklaratsiooni eesmärk on tagada selles kuulutatud õiguste üldine ja tõhus tunnustamine ning järgimine; arvestades, et Euroopa Nõukogu eesmärk on saavutada tema liikmete suurem ühtsus ning et üheks selle eesmärgi saavutamise vahendiks on inimõiguste ja põhivabaduste järgimine ning elluviimine; taaskinnitades oma sügavat usku neisse põhivabadustesse, mis on õigluse ja rahu aluseks maailmas ning mida kõige paremini tagab ühelt poolt tõhus poliitiline demokraatia ning teiselt poolt inimõiguste, millest nad sõltuvad, üldine mõistmine ja järgimine; """ tokens = et_tokenizer(text) assert len(tokens) == 94 @pytest.mark.xfail def test_ordinal_number(et_tokenizer): text = "10. detsembril 1948" tokens = et_tokenizer(text) assert len(tokens) == 3
909
32.703704
66
py
spaCy
spaCy-master/spacy/tests/lang/et/test_tokenizer.py
import pytest ET_BASIC_TOKENIZATION_TESTS = [ ( "Kedagi ei või piinata ega ebainimlikult või alandavalt kohelda " "ega karistada.", [ "Kedagi", "ei", "või", "piinata", "ega", "ebainimlikult", "või", "alandavalt", "kohelda", "ega", "karistada", ".", ], ), ] @pytest.mark.parametrize("text,expected_tokens", ET_BASIC_TOKENIZATION_TESTS) def test_et_tokenizer_basic(et_tokenizer, text, expected_tokens): tokens = et_tokenizer(text) token_list = [token.text for token in tokens if not token.is_space] assert expected_tokens == token_list
733
23.466667
77
py
spaCy
spaCy-master/spacy/tests/lang/eu/__init__.py
0
0
0
py