sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _capture_snapshot(a_snapshot: Snapshot, resolved_kwargs: Mapping[str, Any]) -> Any: """ Capture the snapshot from the keyword arguments resolved before the function call (including the default values). :param a_snapshot: snapshot to be captured :param resolved_kwargs: resolved keyword arguments (including the default values) :return: captured value """ if a_snapshot.arg is not None: if a_snapshot.arg not in resolved_kwargs: raise TypeError(("The argument of the snapshot has not been set: {}. " "Does the original function define it? Did you supply it in the call?").format( a_snapshot.arg)) value = a_snapshot.capture(**{a_snapshot.arg: resolved_kwargs[a_snapshot.arg]}) else: value = a_snapshot.capture() return value
Capture the snapshot from the keyword arguments resolved before the function call (including the default values). :param a_snapshot: snapshot to be captured :param resolved_kwargs: resolved keyword arguments (including the default values) :return: captured value
entailment
def decorate_with_checker(func: CallableT) -> CallableT: """Decorate the function with a checker that verifies the preconditions and postconditions.""" assert not hasattr(func, "__preconditions__"), \ "Expected func to have no list of preconditions (there should be only a single contract checker per function)." assert not hasattr(func, "__postconditions__"), \ "Expected func to have no list of postconditions (there should be only a single contract checker per function)." assert not hasattr(func, "__postcondition_snapshots__"), \ "Expected func to have no list of postcondition snapshots (there should be only a single contract checker " \ "per function)." sign = inspect.signature(func) param_names = list(sign.parameters.keys()) # Determine the default argument values. kwdefaults = dict() # type: Dict[str, Any] # Add to the defaults all the values that are needed by the contracts. for param in sign.parameters.values(): if param.default != inspect.Parameter.empty: kwdefaults[param.name] = param.default def wrapper(*args, **kwargs): """Wrap func by checking the preconditions and postconditions.""" preconditions = getattr(wrapper, "__preconditions__") # type: List[List[Contract]] snapshots = getattr(wrapper, "__postcondition_snapshots__") # type: List[Snapshot] postconditions = getattr(wrapper, "__postconditions__") # type: List[Contract] resolved_kwargs = _kwargs_from_call(param_names=param_names, kwdefaults=kwdefaults, args=args, kwargs=kwargs) if postconditions: if 'result' in resolved_kwargs: raise TypeError("Unexpected argument 'result' in a function decorated with postconditions.") if 'OLD' in resolved_kwargs: raise TypeError("Unexpected argument 'OLD' in a function decorated with postconditions.") # Assert the preconditions in groups. This is necessary to implement "require else" logic when a class # weakens the preconditions of its base class. violation_err = None # type: Optional[ViolationError] for group in preconditions: violation_err = None try: for contract in group: _assert_precondition(contract=contract, resolved_kwargs=resolved_kwargs) break except ViolationError as err: violation_err = err if violation_err is not None: raise violation_err # pylint: disable=raising-bad-type # Capture the snapshots if postconditions: old_as_mapping = dict() # type: MutableMapping[str, Any] for snap in snapshots: # This assert is just a last defense. # Conflicting snapshot names should have been caught before, either during the decoration or # in the meta-class. assert snap.name not in old_as_mapping, "Snapshots with the conflicting name: {}" old_as_mapping[snap.name] = _capture_snapshot(a_snapshot=snap, resolved_kwargs=resolved_kwargs) resolved_kwargs['OLD'] = _Old(mapping=old_as_mapping) # Execute the wrapped function result = func(*args, **kwargs) if postconditions: resolved_kwargs['result'] = result # Assert the postconditions as a conjunction for contract in postconditions: _assert_postcondition(contract=contract, resolved_kwargs=resolved_kwargs) return result # type: ignore # Copy __doc__ and other properties so that doctests can run functools.update_wrapper(wrapper=wrapper, wrapped=func) assert not hasattr(wrapper, "__preconditions__"), "Expected no preconditions set on a pristine contract checker." assert not hasattr(wrapper, "__postcondition_snapshots__"), \ "Expected no postcondition snapshots set on a pristine contract checker." assert not hasattr(wrapper, "__postconditions__"), "Expected no postconditions set on a pristine contract checker." # Precondition is a list of condition groups (i.e. disjunctive normal form): # each group consists of AND'ed preconditions, while the groups are OR'ed. # # This is necessary in order to implement "require else" logic when a class weakens the preconditions of # its base class. setattr(wrapper, "__preconditions__", []) setattr(wrapper, "__postcondition_snapshots__", []) setattr(wrapper, "__postconditions__", []) return wrapper
Decorate the function with a checker that verifies the preconditions and postconditions.
entailment
def _find_self(param_names: List[str], args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any: """Find the instance of ``self`` in the arguments.""" instance_i = param_names.index("self") if instance_i < len(args): instance = args[instance_i] else: instance = kwargs["self"] return instance
Find the instance of ``self`` in the arguments.
entailment
def _decorate_with_invariants(func: CallableT, is_init: bool) -> CallableT: """ Decorate the function ``func`` of the class ``cls`` with invariant checks. If the function has been already decorated with invariant checks, the function returns immediately. :param func: function to be wrapped :param is_init: True if the ``func`` is __init__ :return: function wrapped with invariant checks """ if _already_decorated_with_invariants(func=func): return func sign = inspect.signature(func) param_names = list(sign.parameters.keys()) if is_init: def wrapper(*args, **kwargs): """Wrap __init__ method of a class by checking the invariants *after* the invocation.""" result = func(*args, **kwargs) instance = _find_self(param_names=param_names, args=args, kwargs=kwargs) for contract in instance.__class__.__invariants__: _assert_invariant(contract=contract, instance=instance) return result else: def wrapper(*args, **kwargs): """Wrap a function of a class by checking the invariants *before* and *after* the invocation.""" instance = _find_self(param_names=param_names, args=args, kwargs=kwargs) for contract in instance.__class__.__invariants__: _assert_invariant(contract=contract, instance=instance) result = func(*args, **kwargs) for contract in instance.__class__.__invariants__: _assert_invariant(contract=contract, instance=instance) return result functools.update_wrapper(wrapper=wrapper, wrapped=func) setattr(wrapper, "__is_invariant_check__", True) return wrapper
Decorate the function ``func`` of the class ``cls`` with invariant checks. If the function has been already decorated with invariant checks, the function returns immediately. :param func: function to be wrapped :param is_init: True if the ``func`` is __init__ :return: function wrapped with invariant checks
entailment
def _already_decorated_with_invariants(func: CallableT) -> bool: """Check if the function has been already decorated with an invariant check by going through its decorator stack.""" already_decorated = False for a_decorator in _walk_decorator_stack(func=func): if getattr(a_decorator, "__is_invariant_check__", False): already_decorated = True break return already_decorated
Check if the function has been already decorated with an invariant check by going through its decorator stack.
entailment
def add_invariant_checks(cls: type) -> None: """Decorate each of the class functions with invariant checks if not already decorated.""" # Candidates for the decoration as list of (name, dir() value) init_name_func = None # type: Optional[Tuple[str, Callable[..., None]]] names_funcs = [] # type: List[Tuple[str, Callable[..., None]]] names_properties = [] # type: List[Tuple[str, property]] # Filter out entries in the directory which are certainly not candidates for decoration. for name, value in [(name, getattr(cls, name)) for name in dir(cls)]: # We need to ignore __repr__ to prevent endless loops when generating error messages. # __getattribute__, __setattr__ and __delattr__ are too invasive and alter the state of the instance. # Hence we don't consider them "public". if name in ["__repr__", "__getattribute__", "__setattr__", "__delattr__"]: continue if name == "__init__": assert inspect.isfunction(value) or isinstance(value, _SLOT_WRAPPER_TYPE), \ "Expected __init__ to be either a function or a slot wrapper, but got: {}".format(type(value)) init_name_func = (name, value) continue if not inspect.isfunction(value) and not isinstance(value, _SLOT_WRAPPER_TYPE) and \ not isinstance(value, property): continue # Ignore class methods if getattr(value, "__self__", None) is cls: continue # Ignore "protected"/"private" methods if name.startswith("_") and not (name.startswith("__") and name.endswith("__")): continue if inspect.isfunction(value) or isinstance(value, _SLOT_WRAPPER_TYPE): names_funcs.append((name, value)) elif isinstance(value, property): names_properties.append((name, value)) else: raise NotImplementedError("Unhandled directory entry of class {} for {}: {}".format(cls, name, value)) if init_name_func: name, func = init_name_func wrapper = _decorate_with_invariants(func=func, is_init=True) setattr(cls, name, wrapper) for name, func in names_funcs: wrapper = _decorate_with_invariants(func=func, is_init=False) setattr(cls, name, wrapper) for name, prop in names_properties: new_prop = property( # type: ignore fget=_decorate_with_invariants(func=prop.fget, is_init=False) if prop.fget else None, fset=_decorate_with_invariants(func=prop.fset, is_init=False) if prop.fset else None, fdel=_decorate_with_invariants(func=prop.fdel, is_init=False) if prop.fdel else None, doc=prop.__doc__) setattr(cls, name, new_prop)
Decorate each of the class functions with invariant checks if not already decorated.
entailment
def main() -> None: """"Execute the main routine.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--module", help="name of the module to import", choices=[ "functions_100_with_no_contract", "functions_100_with_1_contract", "functions_100_with_5_contracts", "functions_100_with_10_contracts", "functions_100_with_1_disabled_contract", "functions_100_with_5_disabled_contracts", "functions_100_with_10_disabled_contracts", "classes_100_with_no_invariant", "classes_100_with_1_invariant", "classes_100_with_5_invariants", "classes_100_with_10_invariants", "classes_100_with_1_disabled_invariant", "classes_100_with_5_disabled_invariants", "classes_100_with_10_disabled_invariants", ], required=True) args = parser.parse_args() a_module = str(args.module) if a_module == "functions_100_with_no_contract": start = time.time() import functions_100_with_no_contract print(time.time() - start) elif a_module == "functions_100_with_1_contract": start = time.time() import functions_100_with_1_contract print(time.time() - start) elif a_module == "functions_100_with_5_contracts": start = time.time() import functions_100_with_5_contracts print(time.time() - start) elif a_module == "functions_100_with_10_contracts": start = time.time() import functions_100_with_10_contracts print(time.time() - start) elif a_module == "functions_100_with_1_disabled_contract": start = time.time() import functions_100_with_1_disabled_contract print(time.time() - start) elif a_module == "functions_100_with_5_disabled_contracts": start = time.time() import functions_100_with_5_disabled_contracts print(time.time() - start) elif a_module == "functions_100_with_10_disabled_contracts": start = time.time() import functions_100_with_10_disabled_contracts print(time.time() - start) elif a_module == "classes_100_with_no_invariant": start = time.time() import classes_100_with_no_invariant print(time.time() - start) elif a_module == "classes_100_with_1_invariant": start = time.time() import classes_100_with_1_invariant print(time.time() - start) elif a_module == "classes_100_with_5_invariants": start = time.time() import classes_100_with_5_invariants print(time.time() - start) elif a_module == "classes_100_with_10_invariants": start = time.time() import classes_100_with_10_invariants print(time.time() - start) elif a_module == "classes_100_with_1_disabled_invariant": start = time.time() import classes_100_with_1_disabled_invariant print(time.time() - start) elif a_module == "classes_100_with_5_disabled_invariants": start = time.time() import classes_100_with_5_disabled_invariants print(time.time() - start) elif a_module == "classes_100_with_10_disabled_invariants": start = time.time() import classes_100_with_10_disabled_invariants print(time.time() - start) else: raise NotImplementedError("Unhandled module: {}".format(a_module))
Execute the main routine.
entailment
def main() -> None: """"Execute the main routine.""" modules = [ "functions_100_with_no_contract", "functions_100_with_1_contract", "functions_100_with_5_contracts", "functions_100_with_10_contracts", "functions_100_with_1_disabled_contract", "functions_100_with_5_disabled_contracts", "functions_100_with_10_disabled_contracts", "classes_100_with_no_invariant", "classes_100_with_1_invariant", "classes_100_with_5_invariants", "classes_100_with_10_invariants", "classes_100_with_1_disabled_invariant", "classes_100_with_5_disabled_invariants", "classes_100_with_10_disabled_invariants", ] for a_module in modules: durations = [] # type: List[float] for i in range(0, 10): duration = float( subprocess.check_output(["./measure.py", "--module", a_module], cwd=os.path.dirname(__file__)).strip()) durations.append(duration) print("Duration to import the module {} (in milliseconds): {:.2f} ± {:.2f}".format( a_module, statistics.mean(durations) * 10e3, statistics.stdev(durations) * 10e3))
Execute the main routine.
entailment
def main() -> None: """"Execute the main routine.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--outdir", help="output directory", default=os.path.dirname(__file__)) args = parser.parse_args() outdir = pathlib.Path(args.outdir) if not outdir.exists(): raise FileNotFoundError("Output directory is missing: {}".format(outdir)) for contracts in [0, 1, 5, 10]: if contracts == 0: pth = outdir / "functions_100_with_no_contract.py" elif contracts == 1: pth = outdir / "functions_100_with_1_contract.py" else: pth = outdir / "functions_100_with_{}_contracts.py".format(contracts) text = generate_functions(functions=100, contracts=contracts, disabled=False) pth.write_text(text) for contracts in [1, 5, 10]: if contracts == 1: pth = outdir / "functions_100_with_1_disabled_contract.py" else: pth = outdir / "functions_100_with_{}_disabled_contracts.py".format(contracts) text = generate_functions(functions=100, contracts=contracts, disabled=True) pth.write_text(text) for invariants in [0, 1, 5, 10]: if invariants == 0: pth = outdir / "classes_100_with_no_invariant.py" elif invariants == 1: pth = outdir / "classes_100_with_1_invariant.py" else: pth = outdir / "classes_100_with_{}_invariants.py".format(invariants) text = generate_classes(classes=100, invariants=invariants, disabled=False) pth.write_text(text) for invariants in [1, 5, 10]: if invariants == 1: pth = outdir / "classes_100_with_1_disabled_invariant.py" else: pth = outdir / "classes_100_with_{}_disabled_invariants.py".format(invariants) text = generate_classes(classes=100, invariants=invariants, disabled=True) pth.write_text(text)
Execute the main routine.
entailment
def visit_Num(self, node: ast.Num) -> Union[int, float]: """Recompute the value as the number at the node.""" result = node.n self.recomputed_values[node] = result return result
Recompute the value as the number at the node.
entailment
def visit_Str(self, node: ast.Str) -> str: """Recompute the value as the string at the node.""" result = node.s self.recomputed_values[node] = result return result
Recompute the value as the string at the node.
entailment
def visit_Bytes(self, node: ast.Bytes) -> bytes: """Recompute the value as the bytes at the node.""" result = node.s self.recomputed_values[node] = result return node.s
Recompute the value as the bytes at the node.
entailment
def visit_List(self, node: ast.List) -> List[Any]: """Visit the elements and assemble the results into a list.""" if isinstance(node.ctx, ast.Store): raise NotImplementedError("Can not compute the value of a Store on a list") result = [self.visit(node=elt) for elt in node.elts] self.recomputed_values[node] = result return result
Visit the elements and assemble the results into a list.
entailment
def visit_Tuple(self, node: ast.Tuple) -> Tuple[Any, ...]: """Visit the elements and assemble the results into a tuple.""" if isinstance(node.ctx, ast.Store): raise NotImplementedError("Can not compute the value of a Store on a tuple") result = tuple(self.visit(node=elt) for elt in node.elts) self.recomputed_values[node] = result return result
Visit the elements and assemble the results into a tuple.
entailment
def visit_Set(self, node: ast.Set) -> Set[Any]: """Visit the elements and assemble the results into a set.""" result = set(self.visit(node=elt) for elt in node.elts) self.recomputed_values[node] = result return result
Visit the elements and assemble the results into a set.
entailment
def visit_Dict(self, node: ast.Dict) -> Dict[Any, Any]: """Visit keys and values and assemble a dictionary with the results.""" recomputed_dict = dict() # type: Dict[Any, Any] for key, val in zip(node.keys, node.values): recomputed_dict[self.visit(node=key)] = self.visit(node=val) self.recomputed_values[node] = recomputed_dict return recomputed_dict
Visit keys and values and assemble a dictionary with the results.
entailment
def visit_NameConstant(self, node: ast.NameConstant) -> Any: """Forward the node value as a result.""" self.recomputed_values[node] = node.value return node.value
Forward the node value as a result.
entailment
def visit_Name(self, node: ast.Name) -> Any: """Load the variable by looking it up in the variable look-up and in the built-ins.""" if not isinstance(node.ctx, ast.Load): raise NotImplementedError("Can only compute a value of Load on a name {}, but got context: {}".format( node.id, node.ctx)) result = None # type: Optional[Any] if node.id in self._name_to_value: result = self._name_to_value[node.id] if result is None and hasattr(builtins, node.id): result = getattr(builtins, node.id) if result is None and node.id != "None": # The variable refers to a name local of the lambda (e.g., a target in the generator expression). # Since we evaluate generator expressions with runtime compilation, None is returned here as a placeholder. return PLACEHOLDER self.recomputed_values[node] = result return result
Load the variable by looking it up in the variable look-up and in the built-ins.
entailment
def visit_Expr(self, node: ast.Expr) -> Any: """Visit the node's ``value``.""" result = self.visit(node=node.value) self.recomputed_values[node] = result return result
Visit the node's ``value``.
entailment
def visit_UnaryOp(self, node: ast.UnaryOp) -> Any: """Visit the node operand and apply the operation on the result.""" if isinstance(node.op, ast.UAdd): result = +self.visit(node=node.operand) elif isinstance(node.op, ast.USub): result = -self.visit(node=node.operand) elif isinstance(node.op, ast.Not): result = not self.visit(node=node.operand) elif isinstance(node.op, ast.Invert): result = ~self.visit(node=node.operand) else: raise NotImplementedError("Unhandled op of {}: {}".format(node, node.op)) self.recomputed_values[node] = result return result
Visit the node operand and apply the operation on the result.
entailment
def visit_BinOp(self, node: ast.BinOp) -> Any: """Recursively visit the left and right operand, respectively, and apply the operation on the results.""" # pylint: disable=too-many-branches left = self.visit(node=node.left) right = self.visit(node=node.right) if isinstance(node.op, ast.Add): result = left + right elif isinstance(node.op, ast.Sub): result = left - right elif isinstance(node.op, ast.Mult): result = left * right elif isinstance(node.op, ast.Div): result = left / right elif isinstance(node.op, ast.FloorDiv): result = left // right elif isinstance(node.op, ast.Mod): result = left % right elif isinstance(node.op, ast.Pow): result = left**right elif isinstance(node.op, ast.LShift): result = left << right elif isinstance(node.op, ast.RShift): result = left >> right elif isinstance(node.op, ast.BitOr): result = left | right elif isinstance(node.op, ast.BitXor): result = left ^ right elif isinstance(node.op, ast.BitAnd): result = left & right elif isinstance(node.op, ast.MatMult): result = left @ right else: raise NotImplementedError("Unhandled op of {}: {}".format(node, node.op)) self.recomputed_values[node] = result return result
Recursively visit the left and right operand, respectively, and apply the operation on the results.
entailment
def visit_BoolOp(self, node: ast.BoolOp) -> Any: """Recursively visit the operands and apply the operation on them.""" values = [self.visit(value_node) for value_node in node.values] if isinstance(node.op, ast.And): result = functools.reduce(lambda left, right: left and right, values, True) elif isinstance(node.op, ast.Or): result = functools.reduce(lambda left, right: left or right, values, True) else: raise NotImplementedError("Unhandled op of {}: {}".format(node, node.op)) self.recomputed_values[node] = result return result
Recursively visit the operands and apply the operation on them.
entailment
def visit_Compare(self, node: ast.Compare) -> Any: """Recursively visit the comparators and apply the operations on them.""" # pylint: disable=too-many-branches left = self.visit(node=node.left) comparators = [self.visit(node=comparator) for comparator in node.comparators] result = None # type: Optional[Any] for comparator, op in zip(comparators, node.ops): if isinstance(op, ast.Eq): comparison = left == comparator elif isinstance(op, ast.NotEq): comparison = left != comparator elif isinstance(op, ast.Lt): comparison = left < comparator elif isinstance(op, ast.LtE): comparison = left <= comparator elif isinstance(op, ast.Gt): comparison = left > comparator elif isinstance(op, ast.GtE): comparison = left >= comparator elif isinstance(op, ast.Is): comparison = left is comparator elif isinstance(op, ast.IsNot): comparison = left is not comparator elif isinstance(op, ast.In): comparison = left in comparator elif isinstance(op, ast.NotIn): comparison = left not in comparator else: raise NotImplementedError("Unhandled op of {}: {}".format(node, op)) if result is None: result = comparison else: result = result and comparison left = comparator self.recomputed_values[node] = result return result
Recursively visit the comparators and apply the operations on them.
entailment
def visit_Call(self, node: ast.Call) -> Any: """Visit the function and the arguments and finally make the function call with them.""" func = self.visit(node=node.func) args = [] # type: List[Any] for arg_node in node.args: if isinstance(arg_node, ast.Starred): args.extend(self.visit(node=arg_node)) else: args.append(self.visit(node=arg_node)) kwargs = dict() # type: Dict[str, Any] for keyword in node.keywords: if keyword.arg is None: kw = self.visit(node=keyword.value) for key, val in kw.items(): kwargs[key] = val else: kwargs[keyword.arg] = self.visit(node=keyword.value) result = func(*args, **kwargs) self.recomputed_values[node] = result return result
Visit the function and the arguments and finally make the function call with them.
entailment
def visit_IfExp(self, node: ast.IfExp) -> Any: """Visit the ``test``, and depending on its outcome, the ``body`` or ``orelse``.""" test = self.visit(node=node.test) if test: result = self.visit(node=node.body) else: result = self.visit(node=node.orelse) self.recomputed_values[node] = result return result
Visit the ``test``, and depending on its outcome, the ``body`` or ``orelse``.
entailment
def visit_Attribute(self, node: ast.Attribute) -> Any: """Visit the node's ``value`` and get the attribute from the result.""" value = self.visit(node=node.value) if not isinstance(node.ctx, ast.Load): raise NotImplementedError( "Can only compute a value of Load on the attribute {}, but got context: {}".format(node.attr, node.ctx)) result = getattr(value, node.attr) self.recomputed_values[node] = result return result
Visit the node's ``value`` and get the attribute from the result.
entailment
def visit_Index(self, node: ast.Index) -> Any: """Visit the node's ``value``.""" result = self.visit(node=node.value) self.recomputed_values[node] = result return result
Visit the node's ``value``.
entailment
def visit_Slice(self, node: ast.Slice) -> slice: """Visit ``lower``, ``upper`` and ``step`` and recompute the node as a ``slice``.""" lower = None # type: Optional[int] if node.lower is not None: lower = self.visit(node=node.lower) upper = None # type: Optional[int] if node.upper is not None: upper = self.visit(node=node.upper) step = None # type: Optional[int] if node.step is not None: step = self.visit(node=node.step) result = slice(lower, upper, step) self.recomputed_values[node] = result return result
Visit ``lower``, ``upper`` and ``step`` and recompute the node as a ``slice``.
entailment
def visit_ExtSlice(self, node: ast.ExtSlice) -> Tuple[Any, ...]: """Visit each dimension of the advanced slicing and assemble the dimensions in a tuple.""" result = tuple(self.visit(node=dim) for dim in node.dims) self.recomputed_values[node] = result return result
Visit each dimension of the advanced slicing and assemble the dimensions in a tuple.
entailment
def visit_Subscript(self, node: ast.Subscript) -> Any: """Visit the ``slice`` and a ``value`` and get the element.""" value = self.visit(node=node.value) a_slice = self.visit(node=node.slice) result = value[a_slice] self.recomputed_values[node] = result return result
Visit the ``slice`` and a ``value`` and get the element.
entailment
def _execute_comprehension(self, node: Union[ast.ListComp, ast.SetComp, ast.GeneratorExp, ast.DictComp]) -> Any: """Compile the generator or comprehension from the node and execute the compiled code.""" args = [ast.arg(arg=name) for name in sorted(self._name_to_value.keys())] func_def_node = ast.FunctionDef( name="generator_expr", args=ast.arguments(args=args, kwonlyargs=[], kw_defaults=[], defaults=[]), decorator_list=[], body=[ast.Return(node)]) module_node = ast.Module(body=[func_def_node]) ast.fix_missing_locations(module_node) code = compile(source=module_node, filename='<ast>', mode='exec') module_locals = {} # type: Dict[str, Any] module_globals = {} # type: Dict[str, Any] exec(code, module_globals, module_locals) # pylint: disable=exec-used generator_expr_func = module_locals["generator_expr"] return generator_expr_func(**self._name_to_value)
Compile the generator or comprehension from the node and execute the compiled code.
entailment
def visit_GeneratorExp(self, node: ast.GeneratorExp) -> Any: """Compile the generator expression as a function and call it.""" result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) # Do not set the computed value of the node since its representation would be non-informative. return result
Compile the generator expression as a function and call it.
entailment
def visit_ListComp(self, node: ast.ListComp) -> Any: """Compile the list comprehension as a function and call it.""" result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) self.recomputed_values[node] = result return result
Compile the list comprehension as a function and call it.
entailment
def visit_SetComp(self, node: ast.SetComp) -> Any: """Compile the set comprehension as a function and call it.""" result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) self.recomputed_values[node] = result return result
Compile the set comprehension as a function and call it.
entailment
def visit_DictComp(self, node: ast.DictComp) -> Any: """Compile the dictionary comprehension as a function and call it.""" result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) self.recomputed_values[node] = result return result
Compile the dictionary comprehension as a function and call it.
entailment
def visit_Return(self, node: ast.Return) -> Any: # pylint: disable=no-self-use """Raise an exception that this node is unexpected.""" raise AssertionError("Unexpected return node during the re-computation: {}".format(ast.dump(node)))
Raise an exception that this node is unexpected.
entailment
def generic_visit(self, node: ast.AST) -> None: """Raise an exception that this node has not been handled.""" raise NotImplementedError("Unhandled recomputation of the node: {} {}".format(type(node), node))
Raise an exception that this node has not been handled.
entailment
def tokenize_words(string): """ Tokenize input text to words. :param string: Text to tokenize :type string: str or unicode :return: words :rtype: list of strings """ string = six.text_type(string) return re.findall(WORD_TOKENIZATION_RULES, string)
Tokenize input text to words. :param string: Text to tokenize :type string: str or unicode :return: words :rtype: list of strings
entailment
def tokenize_sents(string): """ Tokenize input text to sentences. :param string: Text to tokenize :type string: str or unicode :return: sentences :rtype: list of strings """ string = six.text_type(string) spans = [] for match in re.finditer('[^\s]+', string): spans.append(match) spans_count = len(spans) rez = [] off = 0 for i in range(spans_count): tok = string[spans[i].start():spans[i].end()] if i == spans_count - 1: rez.append(string[off:spans[i].end()]) elif tok[-1] in ['.', '!', '?', '…', '»']: tok1 = tok[re.search('[.!?…»]', tok).start()-1] next_tok = string[spans[i + 1].start():spans[i + 1].end()] if (next_tok[0].isupper() and not tok1.isupper() and not (tok[-1] != '.' or tok1[0] == '(' or tok in ABBRS)): rez.append(string[off:spans[i].end()]) off = spans[i + 1].start() return rez
Tokenize input text to sentences. :param string: Text to tokenize :type string: str or unicode :return: sentences :rtype: list of strings
entailment
def tokenize_text(string): """ Tokenize input text to paragraphs, sentences and words. Tokenization to paragraphs is done using simple Newline algorithm For sentences and words tokenizers above are used :param string: Text to tokenize :type string: str or unicode :return: text, tokenized into paragraphs, sentences and words :rtype: list of list of list of words """ string = six.text_type(string) rez = [] for part in string.split('\n'): par = [] for sent in tokenize_sents(part): par.append(tokenize_words(sent)) if par: rez.append(par) return rez
Tokenize input text to paragraphs, sentences and words. Tokenization to paragraphs is done using simple Newline algorithm For sentences and words tokenizers above are used :param string: Text to tokenize :type string: str or unicode :return: text, tokenized into paragraphs, sentences and words :rtype: list of list of list of words
entailment
def collate(self, graph, collation): ''' :type graph: VariantGraph :type collation: Collation ''' # Build the variant graph for the first witness # this is easy: generate a vertex for every token first_witness = collation.witnesses[0] tokens = first_witness.tokens() token_to_vertex = self.merge(graph, first_witness.sigil, tokens) # let the scorer prepare the first witness self.scorer.prepare_witness(first_witness) # construct superbase superbase = tokens # align witness 2 - n for x in range(1, len(collation.witnesses)): next_witness = collation.witnesses[x] # let the scorer prepare the next witness self.scorer.prepare_witness(next_witness) # # VOOR CONTROLE! # alignment = self._align_table(superbase, next_witness, token_to_vertex) # self.table2 = self.table # alignment = token -> vertex alignment = self.align_function(superbase, next_witness, token_to_vertex) # merge token_to_vertex.update(self.merge(graph, next_witness.sigil, next_witness.tokens(), alignment)) # print("actual") # self._debug_edit_graph_table(self.table) # print("expected") # self._debug_edit_graph_table(self.table2) # change superbase superbase = self.new_superbase if self.debug_scores: self._debug_edit_graph_table(self.table)
:type graph: VariantGraph :type collation: Collation
entailment
def align(self): ''' Every step we have 3 choices: 1) Move pointer witness a --> omission 2) Move pointer witness b --> addition 3) Move pointer of both witness a/b --> match Note: a replacement is omission followed by an addition or the other way around Choice 1 and 2 are only possible if token a and b are not a match OR when tokens are repeated. For now I ignore token repetition.. ''' # extract tokens from witness (note that this can be done in a streaming manner if desired) tokens_a = self.witness_a.tokens() tokens_b = self.witness_b.tokens() # create virtual decision tree (nodes are created on demand) # see above # create start node start = DecisionTreeNode(self) # search the decision tree result = self.tree.search(start) print(result) pass
Every step we have 3 choices: 1) Move pointer witness a --> omission 2) Move pointer witness b --> addition 3) Move pointer of both witness a/b --> match Note: a replacement is omission followed by an addition or the other way around Choice 1 and 2 are only possible if token a and b are not a match OR when tokens are repeated. For now I ignore token repetition..
entailment
def connect(self, source, target, witnesses): """ :type source: integer :type target: integer """ # print("Adding Edge: "+source+":"+target) if self.graph.has_edge(source, target): self.graph[source][target]["label"] += ", " + str(witnesses) else: self.graph.add_edge(source, target, label=witnesses)
:type source: integer :type target: integer
entailment
def connect_near(self, source, target, weight): # Near edges are added to self.near_graph, not self.graph, to avoid cycles """ :type source: integer :type target: integer """ self.near_graph.add_edge(source, target, weight = weight, type='near')
:type source: integer :type target: integer
entailment
def merge(self, graph, witness_sigil, witness_tokens, alignments={}): """ :type graph: VariantGraph """ # NOTE: token_to_vertex only contains newly generated vertices token_to_vertex = {} last = graph.start for token in witness_tokens: vertex = alignments.get(token, None) if not vertex: vertex = graph.add_vertex(token, witness_sigil) token_to_vertex[token] = vertex else: vertex.add_token(witness_sigil, token) # graph.add_token_to_vertex(vertex, token, witness_sigil) graph.connect(last, vertex, witness_sigil) last = vertex graph.connect(last, graph.end, witness_sigil) return token_to_vertex
:type graph: VariantGraph
entailment
def collate(self, graph): """ :type graph: VariantGraph """ # prepare the token index self.token_index.prepare() self.vertex_array = [None] * len(self.token_index.token_array) # Build the variant graph for the first witness # this is easy: generate a vertex for every token first_witness = self.collation.witnesses[0] tokens = first_witness.tokens() token_to_vertex = self.merge(graph, first_witness.sigil, tokens) # print("> token_to_vertex=", token_to_vertex) self.update_token_position_to_vertex(token_to_vertex) self.update_token_to_vertex_array(tokens, first_witness, self.token_position_to_vertex) # align witness 2 - n for x in range(1, len(self.collation.witnesses)): witness = self.collation.witnesses[x] tokens = witness.tokens() # print("\nwitness", witness.sigil) variant_graph_ranking = VariantGraphRanking.of(graph) # print("> x =", x, ", variant_graph_ranking =", variant_graph_ranking.byRank) variant_graph_ranks = list(set(map(lambda v: variant_graph_ranking.byVertex.get(v), graph.vertices()))) # we leave in the rank of the start vertex, but remove the rank of the end vertex variant_graph_ranks.pop() # now the vertical stuff tokens_as_index_list = self.as_index_list(tokens) match_cube = MatchCube(self.token_index, witness, self.vertex_array, variant_graph_ranking, self.properties_filter) # print("> match_cube.matches=", match_cube.matches) self.fill_needleman_wunsch_table(variant_graph_ranks, tokens_as_index_list, match_cube) aligned = self.align_matching_tokens(match_cube) # print("> aligned=", aligned) # print("self.token_index.token_array=", self.token_index.token_array) # alignment = self.align_function(superbase, next_witness, token_to_vertex, match_cube) # merge witness_token_to_generated_vertex = self.merge(graph, witness.sigil, witness.tokens(), aligned) # print("> witness_token_to_generated_vertex =", witness_token_to_generated_vertex) token_to_vertex.update(witness_token_to_generated_vertex) # print("> token_to_vertex =", token_to_vertex) self.update_token_position_to_vertex(token_to_vertex, aligned) witness_token_position_to_vertex = {} for p in self.token_index.get_range_for_witness(witness.sigil): # print("> p= ", p) witness_token_position_to_vertex[p] = self.token_position_to_vertex[p] self.update_token_to_vertex_array(tokens, witness, witness_token_position_to_vertex) # print("> vertex_array =", self.vertex_array) # print("actual") # self._debug_edit_graph_table(self.table) # print("expected") # self._debug_edit_graph_table(self.table2) # change superbase # superbase = self.new_superbase if self.detect_transpositions: detector = TranspositionDetection(self) detector.detect()
:type graph: VariantGraph
entailment
def _parse(self, pattern): """Parse string of comma-separated x-y/step -like ranges""" # Comma separated ranges if pattern.find(',') < 0: subranges = [pattern] else: subranges = pattern.split(',') for subrange in subranges: if subrange.find('/') < 0: step = 1 baserange = subrange else: baserange, step = subrange.split('/', 1) try: step = int(step) except ValueError: raise RangeSetParseError(subrange, "cannot convert string to integer") if baserange.find('-') < 0: if step != 1: raise RangeSetParseError(subrange, "invalid step usage") begin = end = baserange else: begin, end = baserange.split('-', 1) # compute padding and return node range info tuple try: pad = 0 if int(begin) != 0: begins = begin.lstrip("0") if len(begin) - len(begins) > 0: pad = len(begin) start = int(begins) else: if len(begin) > 1: pad = len(begin) start = 0 if int(end) != 0: ends = end.lstrip("0") else: ends = end stop = int(ends) except ValueError: raise RangeSetParseError(subrange, "cannot convert string to integer") # check preconditions if stop > 1e100 or start > stop or step < 1: raise RangeSetParseError(subrange, "invalid values in range") self.add_range(start, stop + 1, step, pad)
Parse string of comma-separated x-y/step -like ranges
entailment
def fromlist(cls, rnglist, autostep=None): """Class method that returns a new RangeSet with ranges from provided list.""" inst = RangeSet(autostep=autostep) inst.updaten(rnglist) return inst
Class method that returns a new RangeSet with ranges from provided list.
entailment
def fromone(cls, index, pad=0, autostep=None): """Class method that returns a new RangeSet of one single item or a single range (from integer or slice object).""" inst = RangeSet(autostep=autostep) # support slice object with duck-typing try: inst.add(index, pad) except TypeError: if not index.stop: raise ValueError("Invalid range upper limit (%s)" % index.stop) inst.add_range(index.start or 0, index.stop, index.step or 1, pad) return inst
Class method that returns a new RangeSet of one single item or a single range (from integer or slice object).
entailment
def set_autostep(self, val): """Set autostep value (property)""" if val is None: # disabled by default for pdsh compat (+inf is 1E400, but a bug in # python 2.4 makes it impossible to be pickled, so we use less) # NOTE: Later, we could consider sys.maxint here self._autostep = 1E100 else: # - 1 because user means node count, but we means real steps self._autostep = int(val) - 1
Set autostep value (property)
entailment
def striter(self): """Iterate over each (optionally padded) string element in RangeSet.""" pad = self.padding or 0 for i in self._sorted(): yield "%0*d" % (pad, i)
Iterate over each (optionally padded) string element in RangeSet.
entailment
def contiguous(self): """Object-based iterator over contiguous range sets.""" pad = self.padding or 0 for sli in self._contiguous_slices(): yield RangeSet.fromone(slice(sli.start, sli.stop, sli.step), pad)
Object-based iterator over contiguous range sets.
entailment
def _strslices(self): """Stringify slices list (x-y/step format)""" pad = self.padding or 0 for sli in self.slices(): if sli.start + 1 == sli.stop: yield "%0*d" % (pad, sli.start) else: assert sli.step >= 0, "Internal error: sli.step < 0" if sli.step == 1: yield "%0*d-%0*d" % (pad, sli.start, pad, sli.stop - 1) else: yield "%0*d-%0*d/%d" % (pad, sli.start, pad, sli.stop - 1, \ sli.step)
Stringify slices list (x-y/step format)
entailment
def _contiguous_slices(self): """Internal iterator over contiguous slices in RangeSet.""" k = j = None for i in self._sorted(): if k is None: k = j = i if i - j > 1: yield slice(k, j + 1, 1) k = i j = i if k is not None: yield slice(k, j + 1, 1)
Internal iterator over contiguous slices in RangeSet.
entailment
def _folded_slices(self): """Internal generator that is able to retrieve ranges organized by step. Complexity: O(n) with n = number of ranges in tree.""" if len(self) == 0: return prng = None # pending range istart = None # processing starting indice m = 0 # processing step for sli in self._contiguous_slices(): start = sli.start stop = sli.stop unitary = (start + 1 == stop) # one indice? if istart is None: # first loop if unitary: istart = start else: prng = [start, stop, 1] istart = stop - 1 i = k = istart elif m == 0: # istart is set but step is unknown if not unitary: if prng is not None: # yield and replace pending range yield slice(*prng) else: yield slice(istart, istart + 1, 1) prng = [start, stop, 1] istart = k = stop - 1 continue i = start else: # step m > 0 assert m > 0 i = start # does current range lead to broken step? if m != i - k or not unitary: #j = i if m == i - k else k if m == i - k: j = i else: j = k # stepped is True when autostep setting does apply stepped = (j - istart >= self._autostep * m) if prng: # yield pending range? if stepped: prng[1] -= 1 else: istart += m yield slice(*prng) prng = None if m != i - k: # case: step value has changed if stepped: yield slice(istart, k + 1, m) else: for j in range(istart, k - m + 1, m): yield slice(j, j + 1, 1) if not unitary: yield slice(k, k + 1, 1) if unitary: if stepped: istart = i = k = start else: istart = k else: prng = [start, stop, 1] istart = i = k = stop - 1 elif not unitary: # case: broken step by contiguous range if stepped: # yield 'range/m' by taking first indice of new range yield slice(istart, i + 1, m) i += 1 else: # autostep setting does not apply in that case for j in range(istart, i - m + 1, m): yield slice(j, j + 1, 1) if stop > i + 1: # current->pending only if not unitary prng = [i, stop, 1] istart = i = k = stop - 1 m = i - k # compute step k = i # exited loop, process pending range or indice... if m == 0: if prng: yield slice(*prng) else: yield slice(istart, istart + 1, 1) else: assert m > 0 stepped = (k - istart >= self._autostep * m) if prng: if stepped: prng[1] -= 1 else: istart += m yield slice(*prng) prng = None if stepped: yield slice(istart, i + 1, m) else: for j in range(istart, i + 1, m): yield slice(j, j + 1, 1)
Internal generator that is able to retrieve ranges organized by step. Complexity: O(n) with n = number of ranges in tree.
entailment
def split(self, nbr): """ Split the rangeset into nbr sub-rangesets (at most). Each sub-rangeset will have the same number of elements more or less 1. Current rangeset remains unmodified. Returns an iterator. >>> RangeSet("1-5").split(3) RangeSet("1-2") RangeSet("3-4") RangeSet("foo5") """ assert(nbr > 0) # We put the same number of element in each sub-nodeset. slice_size = len(self) / nbr left = len(self) % nbr begin = 0 for i in range(0, min(nbr, len(self))): length = slice_size + int(i < left) yield self[begin:begin + length] begin += length
Split the rangeset into nbr sub-rangesets (at most). Each sub-rangeset will have the same number of elements more or less 1. Current rangeset remains unmodified. Returns an iterator. >>> RangeSet("1-5").split(3) RangeSet("1-2") RangeSet("3-4") RangeSet("foo5")
entailment
def add_range(self, start, stop, step=1, pad=0): """ Add a range (start, stop, step and padding length) to RangeSet. Like the Python built-in function range(), the last element is the largest start + i * step less than stop. """ assert start < stop, "please provide ordered node index ranges" assert step > 0 assert pad >= 0 assert stop - start < 1e9, "range too large" if pad > 0 and self.padding is None: self.padding = pad set.update(self, range(start, stop, step))
Add a range (start, stop, step and padding length) to RangeSet. Like the Python built-in function range(), the last element is the largest start + i * step less than stop.
entailment
def copy(self): """Return a shallow copy of a RangeSet.""" cpy = self.__class__() cpy._autostep = self._autostep cpy.padding = self.padding cpy.update(self) return cpy
Return a shallow copy of a RangeSet.
entailment
def _wrap_set_op(self, fun, arg): """Wrap built-in set operations for RangeSet to workaround built-in set base class issues (RangeSet.__new/init__ not called)""" result = fun(self, arg) result._autostep = self._autostep result.padding = self.padding return result
Wrap built-in set operations for RangeSet to workaround built-in set base class issues (RangeSet.__new/init__ not called)
entailment
def intersection(self, other): """Return the intersection of two RangeSets as a new RangeSet. (I.e. all elements that are in both sets.) """ #NOTE: This is a work around # Python 3 return as the result of set.intersection a new set instance. # Python 2 however returns as a the result a ClusterShell.RangeSet.RangeSet instance. # ORIGINAL CODE: return self._wrap_set_op(set.intersection, other) copy = self.copy() copy.intersection_update(other) return copy
Return the intersection of two RangeSets as a new RangeSet. (I.e. all elements that are in both sets.)
entailment
def difference(self, other): """Return the difference of two RangeSets as a new RangeSet. (I.e. all elements that are in this set and not in the other.) """ #NOTE: This is a work around # Python 3 return as the result of set.intersection a new set instance. # Python 2 however returns as a the result a ClusterShell.RangeSet.RangeSet instance. # ORIGINAL CODE: return self._wrap_set_op(set.difference, other) copy = self.copy() copy.difference_update(other) return copy
Return the difference of two RangeSets as a new RangeSet. (I.e. all elements that are in this set and not in the other.)
entailment
def issubset(self, other): """Report whether another set contains this RangeSet.""" self._binary_sanity_check(other) return set.issubset(self, other)
Report whether another set contains this RangeSet.
entailment
def issuperset(self, other): """Report whether this RangeSet contains another set.""" self._binary_sanity_check(other) return set.issuperset(self, other)
Report whether this RangeSet contains another set.
entailment
def difference_update(self, other, strict=False): """Remove all elements of another set from this RangeSet. If strict is True, raise KeyError if an element cannot be removed. (strict is a RangeSet addition)""" if strict and other not in self: raise KeyError(other.difference(self)[0]) set.difference_update(self, other)
Remove all elements of another set from this RangeSet. If strict is True, raise KeyError if an element cannot be removed. (strict is a RangeSet addition)
entailment
def update(self, iterable): """Add all integers from an iterable (such as a list).""" if isinstance(iterable, RangeSet): # keep padding unless is has not been defined yet if self.padding is None and iterable.padding is not None: self.padding = iterable.padding assert type(iterable) is not str set.update(self, iterable)
Add all integers from an iterable (such as a list).
entailment
def updaten(self, rangesets): """ Update a rangeset with the union of itself and several others. """ for rng in rangesets: if isinstance(rng, set): self.update(rng) else: self.update(RangeSet(rng))
Update a rangeset with the union of itself and several others.
entailment
def add(self, element, pad=0): """Add an element to a RangeSet. This has no effect if the element is already present. """ set.add(self, int(element)) if pad > 0 and self.padding is None: self.padding = pad
Add an element to a RangeSet. This has no effect if the element is already present.
entailment
def discard(self, element): """Remove element from the RangeSet if it is a member. If the element is not a member, do nothing. """ try: i = int(element) set.discard(self, i) except ValueError: pass
Remove element from the RangeSet if it is a member. If the element is not a member, do nothing.
entailment
def _open(filename, mode="r"): """ Universal open file facility. With normal files, this function behaves as the open builtin. With gzip-ed files, it decompress or compress according to the specified mode. In addition, when filename is '-', it opens the standard input or output according to the specified mode. Mode are expected to be either 'r' or 'w'. """ if filename.endswith(".gz"): return GzipFile(filename, mode, COMPRESSION_LEVEL) elif filename == "-": if mode == "r": return _stdin elif mode == "w": return _stdout else: # TODO: set encoding to UTF-8? return open(filename, mode=mode)
Universal open file facility. With normal files, this function behaves as the open builtin. With gzip-ed files, it decompress or compress according to the specified mode. In addition, when filename is '-', it opens the standard input or output according to the specified mode. Mode are expected to be either 'r' or 'w'.
entailment
def _radixPass(a, b, r, n, K): """ Stable sort of the sequence a according to the keys given in r. >>> a=range(5) >>> b=[0]*5 >>> r=[2,1,3,0,4] >>> _radixPass(a, b, r, 5, 5) >>> b [3, 1, 0, 2, 4] When n is less than the length of a, the end of b must be left unaltered. >>> b=[5]*5 >>> _radixPass(a, b, r, 2, 2) >>> b [1, 0, 5, 5, 5] >>> _a=a=[1, 0] >>> b= [0]*2 >>> r=[0, 1] >>> _radixPass(a, b, r, 2, 2) >>> a=_a >>> b [0, 1] >>> a=[1, 1] >>> _radixPass(a, b, r, 2, 2) >>> b [1, 1] >>> a=[0, 1, 1, 0] >>> b= [0]*4 >>> r=[0, 1] >>> _radixPass(a, b, r, 4, 2) >>> a=_a >>> b [0, 0, 1, 1] """ c = _array("i", [0] * (K + 1)) # counter array for i in range(n): # count occurrences c[r[a[i]]] += 1 sum = 0 for i in range(K + 1): # exclusive prefix sums t = c[i] c[i] = sum sum += t for a_i in a[:n]: # sort b[c[r[a_i]]] = a_i c[r[a_i]] += 1
Stable sort of the sequence a according to the keys given in r. >>> a=range(5) >>> b=[0]*5 >>> r=[2,1,3,0,4] >>> _radixPass(a, b, r, 5, 5) >>> b [3, 1, 0, 2, 4] When n is less than the length of a, the end of b must be left unaltered. >>> b=[5]*5 >>> _radixPass(a, b, r, 2, 2) >>> b [1, 0, 5, 5, 5] >>> _a=a=[1, 0] >>> b= [0]*2 >>> r=[0, 1] >>> _radixPass(a, b, r, 2, 2) >>> a=_a >>> b [0, 1] >>> a=[1, 1] >>> _radixPass(a, b, r, 2, 2) >>> b [1, 1] >>> a=[0, 1, 1, 0] >>> b= [0]*4 >>> r=[0, 1] >>> _radixPass(a, b, r, 4, 2) >>> a=_a >>> b [0, 0, 1, 1]
entailment
def _nbOperations(n): """ Exact number of atomic operations in _radixPass. """ if n < 2: return 0 else: n0 = (n + 2) // 3 n02 = n0 + n // 3 return 3 * (n02) + n0 + _nbOperations(n02)
Exact number of atomic operations in _radixPass.
entailment
def _suffixArrayWithTrace(s, SA, n, K, operations, totalOperations): """ This function is a rewrite in Python of the C implementation proposed in Kärkkäinen and Sanders paper. Find the suffix array SA of s[0..n-1] in {1..K}^n Require s[n]=s[n+1]=s[n+2]=0, n>=2 """ if _trace: _traceSuffixArray(operations, totalOperations) n0 = (n + 2) // 3 n1 = (n + 1) // 3 n2 = n // 3 n02 = n0 + n2 SA12 = _array("i", [0] * (n02 + 3)) SA0 = _array("i", [0] * n0) s0 = _array("i", [0] * n0) # s12 : positions of mod 1 and mod 2 suffixes s12 = _array("i", [i for i in range(n + (n0 - n1)) if i % 3]) # <- writing i%3 is more efficient than i%3!=0 s12.extend([0] * 3) # lsb radix sort the mod 1 and mod 2 triples _radixPass(s12, SA12, s[2:], n02, K) if _trace: operations += n02 _traceSuffixArray(operations, totalOperations) _radixPass(SA12, s12, s[1:], n02, K) if _trace: operations += n02 _traceSuffixArray(operations, totalOperations) _radixPass(s12, SA12, s, n02, K) if _trace: operations += n02 _traceSuffixArray(operations, totalOperations) # find lexicographic names of triples name = 0 c = _array("i", [-1] * 3) for i in range(n02): cSA12 = s[SA12[i]:SA12[i] + 3] if cSA12 != c: name += 1 c = cSA12 if SA12[i] % 3 == 1: s12[SA12[i] // 3] = name # left half else: s12[(SA12[i] // 3) + n0] = name # right half if name < n02: # recurse if names are not yet unique operations = _suffixArrayWithTrace(s12, SA12, n02, name + 1, operations, totalOperations) if _trace: _traceSuffixArray(operations, totalOperations) # store unique names in s12 using the suffix array for i, SA12_i in enumerate(SA12[:n02]): s12[SA12_i] = i + 1 else: # generate the suffix array of s12 directly if _trace: operations += _nbOperations(n02) _traceSuffixArray(operations, totalOperations) for i, s12_i in enumerate(s12[:n02]): SA12[s12_i - 1] = i # stably sort the mod 0 suffixes from SA12 by their first character j = 0 for SA12_i in SA12[:n02]: if (SA12_i < n0): s0[j] = 3 * SA12_i j += 1 _radixPass(s0, SA0, s, n0, K) if _trace: operations += n0 _traceSuffixArray(operations, totalOperations) # merge sorted SA0 suffixes and sorted SA12 suffixes p = j = k = 0 t = n0 - n1 while k < n: if SA12[t] < n0: # pos of current offset 12 suffix i = SA12[t] * 3 + 1 else: i = (SA12[t] - n0) * 3 + 2 j = SA0[p] # pos of current offset 0 suffix if SA12[t] < n0: bool = (s[i], s12[SA12[t] + n0]) <= (s[j], s12[int(j / 3)]) else: bool = (s[i], s[i + 1], s12[SA12[t] - n0 + 1]) <= (s[j], s[j + 1], s12[int(j / 3) + n0]) if (bool): SA[k] = i t += 1 if t == n02: # done --- only SA0 suffixes left k += 1 while p < n0: SA[k] = SA0[p] p += 1 k += 1 else: SA[k] = j p += 1 if p == n0: # done --- only SA12 suffixes left k += 1 while t < n02: if SA12[t] < n0: # pos of current offset 12 suffix SA[k] = (SA12[t] * 3) + 1 else: SA[k] = ((SA12[t] - n0) * 3) + 2 t += 1 k += 1 k += 1 return operations
This function is a rewrite in Python of the C implementation proposed in Kärkkäinen and Sanders paper. Find the suffix array SA of s[0..n-1] in {1..K}^n Require s[n]=s[n+1]=s[n+2]=0, n>=2
entailment
def _longestCommonPrefix(seq1, seq2, start1=0, start2=0): """ Returns the length of the longest common prefix of seq1 starting at offset start1 and seq2 starting at offset start2. >>> _longestCommonPrefix("abcdef", "abcghj") 3 >>> _longestCommonPrefix("abcghj", "abcdef") 3 >>> _longestCommonPrefix("miss", "") 0 >>> _longestCommonPrefix("", "mr") 0 >>> _longestCommonPrefix(range(128), range(128)) 128 >>> _longestCommonPrefix("abcabcabc", "abcdefabcdef", 0, 6) 3 >>> _longestCommonPrefix("abcdefabcdef", "abcabcabc", 6, 0) 3 >>> _longestCommonPrefix("abc", "abcabc", 1, 4) 2 >>> _longestCommonPrefix("abcabc", "abc", 4, 1) 2 """ len1 = len(seq1) - start1 len2 = len(seq2) - start2 # We set seq2 as the shortest sequence if len1 < len2: seq1, seq2 = seq2, seq1 start1, start2 = start2, start1 len1, len2 = len2, len1 # if seq2 is empty returns 0 if len2 == 0: return 0 i = 0 pos2 = start2 for i in range(min(len1, len2)): # print seq1, seq2, start1, start2 if seq1[start1 + i] != seq2[start2 + i]: return i # we have reached the end of seq2 (need to increment i) return i + 1
Returns the length of the longest common prefix of seq1 starting at offset start1 and seq2 starting at offset start2. >>> _longestCommonPrefix("abcdef", "abcghj") 3 >>> _longestCommonPrefix("abcghj", "abcdef") 3 >>> _longestCommonPrefix("miss", "") 0 >>> _longestCommonPrefix("", "mr") 0 >>> _longestCommonPrefix(range(128), range(128)) 128 >>> _longestCommonPrefix("abcabcabc", "abcdefabcdef", 0, 6) 3 >>> _longestCommonPrefix("abcdefabcdef", "abcabcabc", 6, 0) 3 >>> _longestCommonPrefix("abc", "abcabc", 1, 4) 2 >>> _longestCommonPrefix("abcabc", "abc", 4, 1) 2
entailment
def LCP(SA): """ Compute the longest common prefix for every adjacent suffixes. The result is a list of same size as SA. Given two suffixes at positions i and i+1, their LCP is stored at position i+1. A zero is stored at position 0 of the output. >>> SA=SuffixArray("abba", unit=UNIT_BYTE) >>> SA._LCP_values array('i', [0, 1, 0, 1]) >>> SA=SuffixArray("", unit=UNIT_BYTE) >>> SA._LCP_values array('i') >>> SA=SuffixArray("", unit=UNIT_CHARACTER) >>> SA._LCP_values array('i') >>> SA=SuffixArray("", unit=UNIT_WORD) >>> SA._LCP_values array('i') >>> SA=SuffixArray("abab", unit=UNIT_BYTE) >>> SA._LCP_values array('i', [0, 2, 0, 1]) """ string = SA.string length = SA.length lcps = _array("i", [0] * length) SA = SA.SA if _trace: delta = max(length // 100, 1) for i, pos in enumerate(SA): if i % delta == 0: percent = float((i + 1) * 100) / length print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r" % (percent, i + 1, length), lcps[i] = _longestCommonPrefix(string, string, SA[i - 1], pos) else: for i, pos in enumerate(SA): lcps[i] = _longestCommonPrefix(string, string, SA[i - 1], pos) if _trace: print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r" % (100.0, length, length) if lcps: # Correct the case where string[0] == string[-1] lcps[0] = 0 return lcps
Compute the longest common prefix for every adjacent suffixes. The result is a list of same size as SA. Given two suffixes at positions i and i+1, their LCP is stored at position i+1. A zero is stored at position 0 of the output. >>> SA=SuffixArray("abba", unit=UNIT_BYTE) >>> SA._LCP_values array('i', [0, 1, 0, 1]) >>> SA=SuffixArray("", unit=UNIT_BYTE) >>> SA._LCP_values array('i') >>> SA=SuffixArray("", unit=UNIT_CHARACTER) >>> SA._LCP_values array('i') >>> SA=SuffixArray("", unit=UNIT_WORD) >>> SA._LCP_values array('i') >>> SA=SuffixArray("abab", unit=UNIT_BYTE) >>> SA._LCP_values array('i', [0, 2, 0, 1])
entailment
def parseArgv(): """ Command line option parser. """ parser = OptionParser() parser.usage = r""" cat <TEXT> | %prog [--unit <UNIT>] [--output <SA_FILE>] Create the suffix array of TEXT with the processing UNIT and optionally store it in SA_FILE for subsequent use. UNIT may be set to 'byte', 'character' (given an encoding with the --encoding option) or 'word', which is the default. """ parser.add_option("-i", "--input", action="store", type="string", dest="input", default=False, help="Path of the file containing the input text. When '-' is given, read the standard input (default). If the path ends with '.gz', reads the decompressed file.") parser.add_option("-o", "--output", action="store", type="string", dest="output", default=False, help="Store the suffix array of the input to the file OUTPUT. When '-' is given, writes to the standard output. If the filename ends with '.gz', the suffix array will be stored compressed.") parser.add_option("", "--load", action="store", type="string", dest="SAFile", default=False, help="Load a suffix array from SAFILE, this option and --input are mutually exclusive.") parser.add_option("-u", "--unit", action="store", type="string", dest="unit", default=DEFAULT_UNIT_STR, help="Processing unit used for the creation of the suffix array." + \ 'Possible values are "byte", "character" and "word". Default is "%s".' % DEFAULT_UNIT_STR + \ "This option is ignored when the suffix array is loaded from SAFILE." + \ 'For characters, the input is decoded according to the encoding set via the option --encoding.') parser.add_option("-e", "--encoding", action="store", type="string", dest="encoding", default=DEFAULT_ENCODING, help="Encoding of the input. This information is required only when processing characters. Default is '%s'." % DEFAULT_ENCODING) parser.add_option("-p", "--print", action="store_true", dest="printSA", default=False, help="Prints the suffix array in a human readable format to the standard error output.") parser.add_option("", "--verbose", action="store_true", dest="verbose", default=False, help="Prints more information.") parser.add_option("", "--no-lcps", action="store_true", dest="noLCPs", default=False, help="Switch off the computation of LCPs. By doing so, the find functions are unusable.") (options, args) = parser.parse_args(_argv) strings = args[1:] return (options, strings)
Command line option parser.
entailment
def main(): """ Entry point for the standalone script. """ (options, strings) = parseArgv() global _suffixArray, _trace ############# # Verbosity # ############# _trace = options.verbose ################### # Processing unit # ################### if options.unit == "byte": options.unit = UNIT_BYTE elif options.unit == "character": options.unit = UNIT_CHARACTER elif options.unit == "word": options.unit = UNIT_WORD else: print >> _stderr, "Please specify a valid unit type." exit(EXIT_BAD_OPTION) ###################### # Build suffix array # ###################### if not options.SAFile: # Build the suffix array from INPUT if not options.input: # default is standard input options.input = "-" try: string = _open(options.input, "r").read() except IOError: print >> _stderr, "File %s does not exist." % options.input exit(EXIT_ERROR_FILE) SA = SuffixArray(string, options.unit, options.encoding, options.noLCPs) ######################## # Or load suffix array # ######################## elif not options.input and options.SAFile: # Load suffix array from SA_FILE try: SA = SuffixArray.fromFile(options.SAFile) except IOError: print >> _stderr, "SA_FILE %s does not exist." % options.SAFile exit(EXIT_ERROR_FILE) else: print >> _stderr, "Please set only one option amongst --input and --load.\n" + \ "Type %s --help for more details." % _argv[0] exit(EXIT_BAD_OPTION) ###################### # Print suffix array # ###################### if options.printSA: # Buffered ouptut deltaLength = 1000 start = 0 while start < SA.length: print >> _stderr, SA.__str__(start, start + deltaLength) start += deltaLength #################################### # Look for every string in strings # #################################### for string in strings: print >> _stderr, "" print >> _stderr, "Positions of %s:" % string print >> _stderr, " %s" % list(SA.find(string)) ######################### # Save SAFILE if needed # ######################### if options.output: SA.toFile(options.output) if _trace: print >> _stderr, "Done\r\n"
Entry point for the standalone script.
entailment
def addFeatureSA(self, callback, default=None, name=None): """ Add a feature to the suffix array. The callback must return a sequence such that the feature at position i is attached to the suffix referenced by self.SA[i]. It is called with one argument: the instance of SuffixArray self. The callback may traverse self.SA in any fashion. The default behavior is to name the new feature after the callback name. To give another name, set the argument name accordingly. When the feature of an unknown substring of the text is requested, the value of the default argument is used. If the feature attached to a suffix is independent of the other suffix features, then the method addFeature gives a better alternative. You may use addFeatureSA as a decorator as in the following example. Example: feature named bigram which attach the frequencies of the leading bigram to each suffix. >>> SA=SuffixArray("mississippi", unit=UNIT_BYTE) >>> def bigram(SA): ... res=[0]*SA.length ... end=0 ... while end <= SA.length: ... ... begin=end-1 ... while end < SA.length and SA._LCP_values[end]>=2: ... if SA.SA[end]+2<=SA.length: #end of string ... end+=1 ... ... nbBigram=end-begin ... for i in xrange(begin, end): ... if SA.SA[i]+2<=SA.length: ... res[i]=nbBigram ... ... end+=1 ... return res >>> SA.addFeatureSA(bigram, 0) >>> SA._bigram_values [0, 1, 2, 2, 1, 1, 1, 2, 2, 2, 2] >>> print str(SA).expandtabs(14) #doctest: +SKIP ... 10 'i' LCP=0 , bigram=0 ... 7 'ippi' LCP=1 , bigram=1 ... 4 'issippi' LCP=1 , bigram=2 ... 1 'ississippi' LCP=4 , bigram=2 ... 0 'mississipp' LCP=0 , bigram=1 ... 9 'pi' LCP=0 , bigram=1 ... 8 'ppi' LCP=1 , bigram=1 ... 6 'sippi' LCP=0 , bigram=2 ... 3 'sissippi' LCP=2 , bigram=2 ... 5 'ssippi' LCP=1 , bigram=2 ... 2 'ssissippi' LCP=3 , bigram=2 >>> SA.bigram('ip') 1 >>> SA.bigram('si') 2 >>> SA.bigram('zw') 0 """ if name is None: featureName = callback.__name__ else: featureName = name featureValues = callback(self) setattr(self, "_%s_values" % featureName, featureValues) setattr(self, "%s_default" % featureName, default) self.features.append(featureName) def findFeature(substring): res = self._findOne(substring, ) if res is not False: return featureValues[res] else: return default setattr(self, featureName, findFeature)
Add a feature to the suffix array. The callback must return a sequence such that the feature at position i is attached to the suffix referenced by self.SA[i]. It is called with one argument: the instance of SuffixArray self. The callback may traverse self.SA in any fashion. The default behavior is to name the new feature after the callback name. To give another name, set the argument name accordingly. When the feature of an unknown substring of the text is requested, the value of the default argument is used. If the feature attached to a suffix is independent of the other suffix features, then the method addFeature gives a better alternative. You may use addFeatureSA as a decorator as in the following example. Example: feature named bigram which attach the frequencies of the leading bigram to each suffix. >>> SA=SuffixArray("mississippi", unit=UNIT_BYTE) >>> def bigram(SA): ... res=[0]*SA.length ... end=0 ... while end <= SA.length: ... ... begin=end-1 ... while end < SA.length and SA._LCP_values[end]>=2: ... if SA.SA[end]+2<=SA.length: #end of string ... end+=1 ... ... nbBigram=end-begin ... for i in xrange(begin, end): ... if SA.SA[i]+2<=SA.length: ... res[i]=nbBigram ... ... end+=1 ... return res >>> SA.addFeatureSA(bigram, 0) >>> SA._bigram_values [0, 1, 2, 2, 1, 1, 1, 2, 2, 2, 2] >>> print str(SA).expandtabs(14) #doctest: +SKIP ... 10 'i' LCP=0 , bigram=0 ... 7 'ippi' LCP=1 , bigram=1 ... 4 'issippi' LCP=1 , bigram=2 ... 1 'ississippi' LCP=4 , bigram=2 ... 0 'mississipp' LCP=0 , bigram=1 ... 9 'pi' LCP=0 , bigram=1 ... 8 'ppi' LCP=1 , bigram=1 ... 6 'sippi' LCP=0 , bigram=2 ... 3 'sissippi' LCP=2 , bigram=2 ... 5 'ssippi' LCP=1 , bigram=2 ... 2 'ssissippi' LCP=3 , bigram=2 >>> SA.bigram('ip') 1 >>> SA.bigram('si') 2 >>> SA.bigram('zw') 0
entailment
def addFeature(self, callback, default=None, name=None, arguments=None): """ Add a feature to the suffix array. The callback must return the feature corresponding to the suffix at position self.SA[i]. The callback must be callable (a function or lambda). The argument names of the callback are used to determine the data needed. If an argument is the name of feature already defined, then this argument will be the value of that feature for the current suffix. In addition the argument pos is the position of the current suffix and iSA is the index of pos in SA. Other attributes of the SuffixArray instance may be use as argument names. If the feature attached to a suffix depends on other suffix features, then the method addFeatureSA is the only choice. """ if name is None: featureName = callback.__name__ else: featureName = name if arguments is None: signature = getargspec(callback)[0] else: signature = arguments featureValues = [default] * (self.length) args = [getattr(self, "_%s_values" % featName) for featName in signature] # print args for i, pos in enumerate(self.SA): arg = [j[i] for j in args] # print arg featureValues[i] = callback(*arg) # end alternative setattr(self, "_%s_values" % featureName, featureValues) setattr(self, "%s_default" % featureName, default) self.features.append(featureName) def findFeature(substring): res = self._findOne(substring) if res: return featureValues[res] else: return default setattr(self, featureName, findFeature)
Add a feature to the suffix array. The callback must return the feature corresponding to the suffix at position self.SA[i]. The callback must be callable (a function or lambda). The argument names of the callback are used to determine the data needed. If an argument is the name of feature already defined, then this argument will be the value of that feature for the current suffix. In addition the argument pos is the position of the current suffix and iSA is the index of pos in SA. Other attributes of the SuffixArray instance may be use as argument names. If the feature attached to a suffix depends on other suffix features, then the method addFeatureSA is the only choice.
entailment
def tokenize(self, string): """ Tokenizer utility. When processing byte, outputs the string unaltered. The character unit type is used for unicode data, the string is decoded according to the encoding provided. In the case of word unit, EOL characters are detached from the preceding word, and outputs the list of words, i.e. the list of non-space strings separated by space strings. >>> SA=SuffixArray('abecedaire', UNIT_BYTE) >>> SA.tokenize('abecedaire')=='abecedaire' True >>> len(SA.tokenize('abecedaire')) 10 >>> SA=SuffixArray('abecedaire', UNIT_BYTE, "utf-8") >>> SA.tokenize('abecedaire')==u'abecedaire' True >>> len(SA.tokenize('abecedaire')) 10 >>> SA=SuffixArray('mississippi', UNIT_WORD) >>> SA.tokenize('miss issi ppi') ['miss', 'issi', 'ppi'] >>> SA.tokenize('miss issi\\nppi') ['miss', 'issi', '\\n', 'ppi'] """ if self.unit == UNIT_WORD: # the EOL character is treated as a word, hence a substitution # before split return [token for token in string.replace("\n", " \n ").split(self.tokSep) if token != ""] elif self.unit == UNIT_CHARACTER: return string.decode(self.encoding) else: return string
Tokenizer utility. When processing byte, outputs the string unaltered. The character unit type is used for unicode data, the string is decoded according to the encoding provided. In the case of word unit, EOL characters are detached from the preceding word, and outputs the list of words, i.e. the list of non-space strings separated by space strings. >>> SA=SuffixArray('abecedaire', UNIT_BYTE) >>> SA.tokenize('abecedaire')=='abecedaire' True >>> len(SA.tokenize('abecedaire')) 10 >>> SA=SuffixArray('abecedaire', UNIT_BYTE, "utf-8") >>> SA.tokenize('abecedaire')==u'abecedaire' True >>> len(SA.tokenize('abecedaire')) 10 >>> SA=SuffixArray('mississippi', UNIT_WORD) >>> SA.tokenize('miss issi ppi') ['miss', 'issi', 'ppi'] >>> SA.tokenize('miss issi\\nppi') ['miss', 'issi', '\\n', 'ppi']
entailment
def reprString(self, string, length): """ Output a string of length tokens in the original form. If string is an integer, it is considered as an offset in the text. Otherwise string is considered as a sequence of ids (see voc and tokId). >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.reprString(0, 3) 'mis' >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.reprString([1, 4, 1, 3, 3, 2], 5) 'isipp' >>> SA=SuffixArray('missi ssi ppi', UNIT_WORD) >>> SA.reprString(0, 3) 'missi ssi ppi' >>> SA=SuffixArray('missi ssi ppi', UNIT_WORD) >>> SA.reprString([1, 3, 2], 3) 'missi ssi ppi' """ if isinstance(string, int): length = min(length, self.length - string) string = self.string[string:string + length] voc = self.voc res = self.tokSep.join((voc[id] for id in string[:length])) if self.unit == UNIT_WORD: res = res.replace(" \n", "\n") res = res.replace("\n ", "\n") if self.unit == UNIT_CHARACTER: res = res.encode(self.encoding) return res
Output a string of length tokens in the original form. If string is an integer, it is considered as an offset in the text. Otherwise string is considered as a sequence of ids (see voc and tokId). >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.reprString(0, 3) 'mis' >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.reprString([1, 4, 1, 3, 3, 2], 5) 'isipp' >>> SA=SuffixArray('missi ssi ppi', UNIT_WORD) >>> SA.reprString(0, 3) 'missi ssi ppi' >>> SA=SuffixArray('missi ssi ppi', UNIT_WORD) >>> SA.reprString([1, 3, 2], 3) 'missi ssi ppi'
entailment
def toFile(self, filename): """ Save the suffix array instance including all features attached in filename. Accept any filename following the _open conventions, for example if it ends with .gz the file created will be a compressed GZip file. """ start = _time() fd = _open(filename, "w") savedData = [self.string, self.unit, self.voc, self.vocSize, self.SA, self.features] for featureName in self.features: featureValues = getattr(self, "_%s_values" % featureName) featureDefault = getattr(self, "%s_default" % featureName) savedData.append((featureValues, featureDefault)) fd.write(_dumps(savedData, _HIGHEST_PROTOCOL)) fd.flush() try: self.sizeOfSavedFile = getsize(fd.name) except OSError: # if stdout is used self.sizeOfSavedFile = "-1" self.toFileTime = _time() - start if _trace: print >> _stderr, "toFileTime %.2fs" % self.toFileTime if _trace: print >> _stderr, "sizeOfSavedFile %sb" % self.sizeOfSavedFile fd.close()
Save the suffix array instance including all features attached in filename. Accept any filename following the _open conventions, for example if it ends with .gz the file created will be a compressed GZip file.
entailment
def fromFile(cls, filename): """ Load a suffix array instance from filename, a file created by toFile. Accept any filename following the _open conventions. """ self = cls.__new__(cls) # new instance which does not call __init__ start = _time() savedData = _loads(_open(filename, "r").read()) # load common attributes self.string, self.unit, self.voc, self.vocSize, self.SA, features = savedData[:6] self.length = len(self.SA) # determine token delimiter if self.unit == UNIT_WORD: self.tokSep = " " elif self.unit in (UNIT_CHARACTER, UNIT_BYTE): self.tokSep = "" else: raise Exception("Unknown unit type identifier:", self.unit) # recompute tokId based on voc self.tokId = dict((char, iChar) for iChar, char in enumerate(self.voc)) self.nbSentences = self.string.count(self.tokId.get("\n", 0)) # Load features self.features = [] for featureName, (featureValues, featureDefault) in zip(features, savedData[6:]): self.addFeatureSA((lambda _: featureValues), name=featureName, default=featureDefault) self.fromFileTime = _time() - start if _trace: print >> _stderr, "fromFileTime %.2fs" % self.fromFileTime return self
Load a suffix array instance from filename, a file created by toFile. Accept any filename following the _open conventions.
entailment
def _findOne(self, subString): """ >>> SA=SuffixArray("mississippi", unit=UNIT_BYTE) >>> SA._findOne("ippi") 1 >>> SA._findOne("missi") 4 """ SA = self.SA LCPs = self._LCP_values string = self.string try: subString = _array("i", [self.tokId[c] for c in self.tokenize(subString)]) except KeyError: # if a token of the subString is not in the vocabulary # the substring can't be in the string return False lenSubString = len(subString) ################################# # Dichotomy search of subString # ################################# lower = 0 upper = self.length success = False while upper - lower > 0: middle = (lower + upper) // 2 middleSubString = string[SA[middle]:min(SA[middle] + lenSubString, self.length)] # NOTE: the cmp function is removed in Python 3 # Strictly speaking we are doing one comparison more now if subString < middleSubString: upper = middle elif subString > middleSubString: lower = middle + 1 else: success = True break if not success: return False else: return middle
>>> SA=SuffixArray("mississippi", unit=UNIT_BYTE) >>> SA._findOne("ippi") 1 >>> SA._findOne("missi") 4
entailment
def find(self, subString, features=[]): """ Dichotomy search of subString in the suffix array. As soon as a suffix which starts with subString is found, it uses the LCPs in order to find the other matching suffixes. The outputs consists in a list of tuple (pos, feature0, feature1, ...) where feature0, feature1, ... are the features attached to the suffix at position pos. Features are listed in the same order as requested in the input list of features [featureName0, featureName1, ...] >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("ssi") array('i', [5, 2]) >>> SA.find("mi") array('i', [0]) >>> SA=SuffixArray('miss A and miss B', UNIT_WORD) >>> SA.find("miss") array('i', [0, 3]) >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("iss", ['LCP']) [(4, 1), (1, 4)] >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("A") array('i') >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("pp") array('i', [8]) >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("ppp") array('i') >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("im") array('i') """ SA = self.SA LCPs = self._LCP_values string = self.string middle = self._findOne(subString) if middle is False: return _array('i') subString = _array("i", [self.tokId[c] for c in self.tokenize(subString)]) lenSubString = len(subString) ########################################### # Use LCPS to retrieve the other suffixes # ########################################### lower = middle upper = middle + 1 middleLCP = LCPs[middle] while lower > 0 and LCPs[lower] >= lenSubString: lower -= 1 while upper < self.length and LCPs[upper] >= lenSubString: upper += 1 ############################################### # When features is empty, outputs a flat list # ############################################### res = SA[lower:upper] if len(features) == 0: return res ############################################## # When features is non empty, outputs a list # # of tuples (pos, feature_1, feature_2, ...) # ############################################## else: features = [getattr(self, "_%s_values" % featureName) for featureName in features] features = [featureValues[lower:upper] for featureValues in features] return zip(res, *features)
Dichotomy search of subString in the suffix array. As soon as a suffix which starts with subString is found, it uses the LCPs in order to find the other matching suffixes. The outputs consists in a list of tuple (pos, feature0, feature1, ...) where feature0, feature1, ... are the features attached to the suffix at position pos. Features are listed in the same order as requested in the input list of features [featureName0, featureName1, ...] >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("ssi") array('i', [5, 2]) >>> SA.find("mi") array('i', [0]) >>> SA=SuffixArray('miss A and miss B', UNIT_WORD) >>> SA.find("miss") array('i', [0, 3]) >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("iss", ['LCP']) [(4, 1), (1, 4)] >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("A") array('i') >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("pp") array('i', [8]) >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("ppp") array('i') >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("im") array('i')
entailment
def escape(s, quote=False): """Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag `quote` is `True`, the quotation mark character (") is also translated. There is a special handling for `None` which escapes to an empty string. :param s: the string to escape. :param quote: set to true to also escape double quotes. """ if s is None: return '' elif hasattr(s, '__html__'): return s.__html__() elif not isinstance(s, basestring): s = unicode(s) s = s.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') if quote: s = s.replace('"', "&quot;") return s
Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag `quote` is `True`, the quotation mark character (") is also translated. There is a special handling for `None` which escapes to an empty string. :param s: the string to escape. :param quote: set to true to also escape double quotes.
entailment
def add(self, *args, **kwargs): """Add a new entry to the feed. This function can either be called with a :class:`FeedEntry` or some keyword and positional arguments that are forwarded to the :class:`FeedEntry` constructor. """ if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry): self.entries.append(args[0]) else: kwargs['feed_url'] = self.feed_url kwargs['timezone'] = self.timezone self.entries.append(FeedEntry(*args, **kwargs))
Add a new entry to the feed. This function can either be called with a :class:`FeedEntry` or some keyword and positional arguments that are forwarded to the :class:`FeedEntry` constructor.
entailment
def generate(self): """Return a generator that yields pieces of XML.""" # atom demands either an author element in every entry or a global one if not self.author: if False in map(lambda e: bool(e.author), self.entries): self.author = ({'name': u'unbekannter Autor'},) if not self.updated: dates = sorted([entry.updated for entry in self.entries]) self.updated = dates and dates[-1] or datetime.utcnow() yield u'<?xml version="1.0" encoding="utf-8"?>\n' yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n' yield ' ' + _make_text_block('title', self.title, self.title_type) yield u' <id>%s</id>\n' % escape(self.id) yield u' <updated>%s</updated>\n' % format_iso8601(self.updated, self.timezone) if self.url: yield u' <link href="%s" />\n' % escape(self.url, True) if self.feed_url: yield u' <link href="%s" rel="self" />\n' % \ escape(self.feed_url, True) for link in self.links: yield u' <link %s/>\n' % ''.join('%s="%s" ' % \ (k, escape(link[k], True)) for k in link) for author in self.author: yield u' <author>\n' yield u' <name>%s</name>\n' % escape(author['name']) if 'uri' in author: yield u' <uri>%s</uri>\n' % escape(author['uri']) if 'email' in author: yield ' <email>%s</email>\n' % escape(author['email']) yield ' </author>\n' if self.subtitle: yield ' ' + _make_text_block('subtitle', self.subtitle, self.subtitle_type) if self.icon: yield u' <icon>%s</icon>\n' % escape(self.icon) if self.logo: yield u' <logo>%s</logo>\n' % escape(self.logo) if self.rights: yield ' ' + _make_text_block('rights', self.rights, self.rights_type) generator_name, generator_url, generator_version = self.generator if generator_name or generator_url or generator_version: tmp = [u' <generator'] if generator_url: tmp.append(u' uri="%s"' % escape(generator_url, True)) if generator_version: tmp.append(u' version="%s"' % escape(generator_version, True)) tmp.append(u'>%s</generator>\n' % escape(generator_name)) yield u''.join(tmp) for entry in self.entries: for line in entry.generate(): yield u' ' + line yield u'</feed>\n'
Return a generator that yields pieces of XML.
entailment
def generate(self): """Yields pieces of ATOM XML.""" base = '' if self.xml_base: base = ' xml:base="%s"' % escape(self.xml_base, True) yield u'<entry%s>\n' % base yield u' ' + _make_text_block('title', self.title, self.title_type) yield u' <id>%s</id>\n' % escape(self.id) yield u' <updated>%s</updated>\n' % format_iso8601(self.updated, self.timezone) if self.published: yield u' <published>%s</published>\n' % \ format_iso8601(self.published, self.timezone) if self.url: yield u' <link href="%s" />\n' % escape(self.url) for author in self.author: yield u' <author>\n' yield u' <name>%s</name>\n' % escape(author['name']) if 'uri' in author: yield u' <uri>%s</uri>\n' % escape(author['uri']) if 'email' in author: yield u' <email>%s</email>\n' % escape(author['email']) yield u' </author>\n' for link in self.links: yield u' <link %s/>\n' % ''.join('%s="%s" ' % \ (k, escape(link[k], True)) for k in link) if self.summary: yield u' ' + _make_text_block('summary', self.summary, self.summary_type) if self.content: if issubclass(self.content.__class__, dict): if "content" in self.content: yield u' <content %s>%s</content>\n' % (' '.join('%s="%s"' % \ (k, escape(self.content[k], True)) for k in self.content if k != "content"), escape(self.content["content"])) else: yield u' <content %s/>\n' % ' '.join('%s="%s" ' % \ (k, escape(self.content[k], True)) for k in self.content) else: yield u' ' + _make_text_block('content', self.content, self.content_type) yield u'</entry>\n'
Yields pieces of ATOM XML.
entailment
def override_djconfig(**new_cache_values): """ Temporarily override config values. This is similar to :py:func:`django.test.override_settings`,\ use it in testing. :param new_cache_values: Keyword arguments,\ the key should match one in the config,\ a new one is created otherwise,\ the value is overridden within\ the decorated function """ def decorator(func): @wraps(func) def func_wrapper(*args, **kw): old_cache_values = { key: getattr(conf.config, key) for key in new_cache_values} conf.config._set_many(new_cache_values) try: # todo: make a note about this in the docs: # don't populate the config within migrations # This works coz the config table is empty, # so even if the middleware gets called, # it won't update the config (_updated_at # will be None), this is assuming the table # is not populated by the user (ie: within # a migration), in which case it will load # all the default values return func(*args, **kw) finally: conf.config._set_many(old_cache_values) return func_wrapper return decorator
Temporarily override config values. This is similar to :py:func:`django.test.override_settings`,\ use it in testing. :param new_cache_values: Keyword arguments,\ the key should match one in the config,\ a new one is created otherwise,\ the value is overridden within\ the decorated function
entailment
def serialize(value, field): """ Form values serialization :param object value: A value to be serialized\ for saving it into the database and later\ loading it into the form as initial value """ assert isinstance(field, forms.Field) if isinstance(field, forms.ModelMultipleChoiceField): return json.dumps([v.pk for v in value]) # todo: remove if isinstance(value, models.Model): return value.pk return value
Form values serialization :param object value: A value to be serialized\ for saving it into the database and later\ loading it into the form as initial value
entailment
def get_version(package): """Get version without importing the lib""" with io.open(os.path.join(BASE_DIR, package, '__init__.py'), encoding='utf-8') as fh: return [ l.split('=', 1)[1].strip().strip("'").strip('"') for l in fh.readlines() if '__version__' in l][0]
Get version without importing the lib
entailment
def _check_backend(): """ Check :py:class:`djconfig.middleware.DjConfigMiddleware`\ is registered into ``settings.MIDDLEWARE_CLASSES`` """ # Django 1.10 does not allow # both settings to be set middleware = set( getattr(settings, 'MIDDLEWARE', None) or getattr(settings, 'MIDDLEWARE_CLASSES', None) or []) # Deprecated alias if "djconfig.middleware.DjConfigLocMemMiddleware" in middleware: return if "djconfig.middleware.DjConfigMiddleware" in middleware: return raise ValueError( "djconfig.middleware.DjConfigMiddleware " "is required but it was not found in " "MIDDLEWARE_CLASSES nor in MIDDLEWARE")
Check :py:class:`djconfig.middleware.DjConfigMiddleware`\ is registered into ``settings.MIDDLEWARE_CLASSES``
entailment
def _register(self, form_class, check_middleware=True): """ Register a config form into the registry :param object form_class: The form class to register.\ Must be an instance of :py:class:`djconfig.forms.ConfigForm` :param bool check_middleware: Check\ :py:class:`djconfig.middleware.DjConfigMiddleware`\ is registered into ``settings.MIDDLEWARE_CLASSES``. Default True """ if not issubclass(form_class, _ConfigFormBase): raise ValueError( "The form does not inherit from `forms.ConfigForm`") self._registry.add(form_class) if check_middleware: _check_backend()
Register a config form into the registry :param object form_class: The form class to register.\ Must be an instance of :py:class:`djconfig.forms.ConfigForm` :param bool check_middleware: Check\ :py:class:`djconfig.middleware.DjConfigMiddleware`\ is registered into ``settings.MIDDLEWARE_CLASSES``. Default True
entailment
def _reload(self): """ Gets every registered form's field value.\ If a field name is found in the db, it will load it from there.\ Otherwise, the initial value from the field form is used """ ConfigModel = apps.get_model('djconfig.Config') cache = {} data = dict( ConfigModel.objects .all() .values_list('key', 'value')) # populate cache with initial form values, # then with cleaned database values, # then with raw database file/image paths for form_class in self._registry: empty_form = form_class() cache.update({ name: field.initial for name, field in empty_form.fields.items()}) form = form_class(data={ name: _deserialize(data[name], field) for name, field in empty_form.fields.items() if name in data and not isinstance(field, forms.FileField)}) form.is_valid() cache.update({ name: _unlazify(value) for name, value in form.cleaned_data.items() if name in data}) # files are special because they don't have an initial value # and the POSTED data must contain the file. So, we keep # the stored path as is # TODO: see if serialize/deserialize/unlazify can be used for this instead cache.update({ name: data[name] for name, field in empty_form.fields.items() if name in data and isinstance(field, forms.FileField)}) cache['_updated_at'] = data.get('_updated_at') self._cache = cache
Gets every registered form's field value.\ If a field name is found in the db, it will load it from there.\ Otherwise, the initial value from the field form is used
entailment
def _reload_maybe(self): """ Reload the config if the config\ model has been updated. This is called\ once on every request by the middleware.\ Should not be called directly. """ ConfigModel = apps.get_model('djconfig.Config') data = dict( ConfigModel.objects .filter(key='_updated_at') .values_list('key', 'value')) if (not hasattr(self, '_updated_at') or self._updated_at != data.get('_updated_at')): self._reload()
Reload the config if the config\ model has been updated. This is called\ once on every request by the middleware.\ Should not be called directly.
entailment
def save(self): """ Save the config with the cleaned data,\ update the last modified date so\ the config is reloaded on other process/nodes.\ Reload the config so it can be called right away. """ assert self.__class__ in conf.config._registry,\ '%(class_name)s is not registered' % { 'class_name': self.__class__.__name__ } ConfigModel = apps.get_model('djconfig.Config') for field_name, value in self.cleaned_data.items(): value = utils.serialize( value=value, field=self.fields.get(field_name, None)) # TODO: use update_or_create count = (ConfigModel.objects .filter(key=field_name) .update(value=value)) if not count: ConfigModel.objects.create( key=field_name, value=value) count = (ConfigModel.objects .filter(key='_updated_at') .update(value=str(timezone.now()))) if not count: ConfigModel.objects.create( key='_updated_at', value=str(timezone.now())) conf.config._reload()
Save the config with the cleaned data,\ update the last modified date so\ the config is reloaded on other process/nodes.\ Reload the config so it can be called right away.
entailment
def register(conf, conf_admin, **options): """ Register a new admin section. :param conf: A subclass of ``djconfig.admin.Config`` :param conf_admin: A subclass of ``djconfig.admin.ConfigAdmin`` :param options: Extra options passed to ``django.contrib.admin.site.register`` """ assert issubclass(conf_admin, ConfigAdmin), ( 'conf_admin is not a ConfigAdmin subclass') assert issubclass( getattr(conf_admin, 'change_list_form', None), ConfigForm), 'No change_list_form set' assert issubclass(conf, Config), ( 'conf is not a Config subclass') assert conf.app_label, 'No app_label set' assert conf.verbose_name_plural, 'No verbose_name_plural set' assert not conf.name or re.match(r"^[a-zA-Z_]+$", conf.name), ( 'Not a valid name. Valid chars are [a-zA-Z_]') config_class = type("Config", (), {}) config_class._meta = type("Meta", (_ConfigMeta,), { 'app_label': conf.app_label, 'verbose_name_plural': conf.verbose_name_plural, 'object_name': 'Config', 'model_name': conf.name, 'module_name': conf.name}) admin.site.register([config_class], conf_admin, **options)
Register a new admin section. :param conf: A subclass of ``djconfig.admin.Config`` :param conf_admin: A subclass of ``djconfig.admin.ConfigAdmin`` :param options: Extra options passed to ``django.contrib.admin.site.register``
entailment
def Mixed(*types): """Mixed type, used to indicate a field in a schema can be one of many types. Use as a last resort only. The Mixed type can be used directly as a class to indicate any type is permitted for a given field: `"my_field": {"type": Mixed}` It can also be instantiated with list of specific types the field may is allowed to be for more control: `"my_field": {"type": Mixed(ObjectId, int)}` """ if len(types) < 2: raise ValueError("Mixed type requires at least 2 specific types") types = set(types) # dedupe class MixedType(type): def __instancecheck__(cls, instance): """Returns true if the given value is an instance of one of the types enclosed by this mixed type.""" for mtype in types: if isinstance(instance, mtype): return True return False class Mixed(object): __metaclass__ = MixedType return Mixed
Mixed type, used to indicate a field in a schema can be one of many types. Use as a last resort only. The Mixed type can be used directly as a class to indicate any type is permitted for a given field: `"my_field": {"type": Mixed}` It can also be instantiated with list of specific types the field may is allowed to be for more control: `"my_field": {"type": Mixed(ObjectId, int)}`
entailment
def one_of(*args): """ Validates that a field value matches one of the values given to this validator. """ if len(args) == 1 and isinstance(args[0], list): items = args[0] else: items = list(args) def validate(value): if not value in items: return e("{} is not in the list {}", value, items) return validate
Validates that a field value matches one of the values given to this validator.
entailment
def gte(min_value): """ Validates that a field value is greater than or equal to the value given to this validator. """ def validate(value): if value < min_value: return e("{} is not greater than or equal to {}", value, min_value) return validate
Validates that a field value is greater than or equal to the value given to this validator.
entailment