code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def fetch_subproof_data(
self, proof_id: str, force_reread: bool = False, uptodate_check_method: str = 'timestamp'
) -> Proof:
"""Get a subproof, re-reading from disk if it's not up-to-date."""
if self.proof_dir is not None and (force_reread or not self._subproofs[proof_id].up_to_date):
updated_subproof = Proof.read_proof_data(self.proof_dir, proof_id)
self._subproofs[proof_id] = updated_subproof
return updated_subproof
else:
return self._subproofs[proof_id] | Get a subproof, re-reading from disk if it's not up-to-date. | fetch_subproof_data | python | runtimeverification/k | pyk/src/pyk/proof/proof.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/proof/proof.py | BSD-3-Clause |
def subproofs(self) -> Iterable[Proof]:
"""Return the subproofs, re-reading from disk the ones that changed."""
return self._subproofs.values() | Return the subproofs, re-reading from disk the ones that changed. | subproofs | python | runtimeverification/k | pyk/src/pyk/proof/proof.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/proof/proof.py | BSD-3-Clause |
def get_steps(self) -> Iterable[PS]:
"""Return all currently available steps associated with this Proof. Should not modify `self`."""
... | Return all currently available steps associated with this Proof. Should not modify `self`. | get_steps | python | runtimeverification/k | pyk/src/pyk/proof/proof.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/proof/proof.py | BSD-3-Clause |
def parallel_advance_proof(
proof: P,
create_prover: Callable[[], Prover[P, PS, SR]],
max_iterations: int | None = None,
fail_fast: bool = False,
max_workers: int = 1,
callback: Callable[[P], None] = (lambda x: None),
maintenance_rate: int = 1,
) -> None:
"""Advance proof with multithreaded strategy.
`Prover.step_proof()` to a worker thread pool for each step as available,
and `Proof.commit()` results as they become available,
and get new steps with `Proof.get_steps()` and submit to thread pool.
Generic type variables:
- P: Type of proof to be advanced in parallel.
- PS: Proof step: data required to perform a step of the proof.
- SR: Step result: data produced by executing a PS with `Prover.step_proof` used to update the `Proof`.
Args:
proof: The proof to advance.
create_prover: Function which creates a new `Prover`. These provers must not reference any shared
data to be written during `parallel_advance_proof`, to avoid race conditions.
max_iterations: Maximum number of steps to take.
fail_fast: If the proof is failing after finishing a step,
halt execution even if there are still available steps.
max_workers: Maximum number of worker threads the pool can spawn.
callback: Callable to run during proof maintenance, useful for getting real-time information about the proof.
maintenance_rate: Number of iterations between proof maintenance (writing to disk and executing callback).
"""
pending: set[Future[Any]] = set()
explored: set[PS] = set()
iterations = 0
with create_prover() as main_prover:
main_prover.init_proof(proof)
with _ProverPool[P, PS, SR](create_prover=create_prover, max_workers=max_workers) as pool:
def submit_steps(_steps: Iterable[PS]) -> None:
for step in _steps:
if step in explored:
continue
explored.add(step)
future: Future[Any] = pool.submit(step) # <-- schedule steps for execution
pending.add(future)
submit_steps(proof.get_steps())
while True:
if not pending:
break
done, _ = wait(pending, return_when='FIRST_COMPLETED')
future = done.pop()
proof_results = future.result()
for result in proof_results:
proof.commit(result)
iterations += 1
if iterations % maintenance_rate == 0:
proof.write_proof_data()
callback(proof)
if max_iterations is not None and max_iterations <= iterations:
break
if fail_fast and proof.failed:
_LOGGER.warning(f'Terminating proof early because fail_fast is set: {proof.id}')
break
submit_steps(proof.get_steps())
pending.remove(future)
if proof.failed:
proof.failure_info = main_prover.failure_info(proof)
proof.write_proof_data() | Advance proof with multithreaded strategy.
`Prover.step_proof()` to a worker thread pool for each step as available,
and `Proof.commit()` results as they become available,
and get new steps with `Proof.get_steps()` and submit to thread pool.
Generic type variables:
- P: Type of proof to be advanced in parallel.
- PS: Proof step: data required to perform a step of the proof.
- SR: Step result: data produced by executing a PS with `Prover.step_proof` used to update the `Proof`.
Args:
proof: The proof to advance.
create_prover: Function which creates a new `Prover`. These provers must not reference any shared
data to be written during `parallel_advance_proof`, to avoid race conditions.
max_iterations: Maximum number of steps to take.
fail_fast: If the proof is failing after finishing a step,
halt execution even if there are still available steps.
max_workers: Maximum number of worker threads the pool can spawn.
callback: Callable to run during proof maintenance, useful for getting real-time information about the proof.
maintenance_rate: Number of iterations between proof maintenance (writing to disk and executing callback). | parallel_advance_proof | python | runtimeverification/k | pyk/src/pyk/proof/proof.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/proof/proof.py | BSD-3-Clause |
def __init__(
self,
create_prover: Callable[[], Prover[P, PS, SR]],
*,
max_workers: int | None = None,
) -> None:
"""Initialize an instance.
Args:
create_prover: Function which creates a new `Prover`. These provers must not reference any shared
data to be written during `parallel_advance_proof`, to avoid race conditions.
max_workers (optional): Maximum number of worker threads the pool can spawn.
"""
self._create_prover = create_prover
self._provers = {}
self._executor = ThreadPoolExecutor(max_workers)
self._closed = False | Initialize an instance.
Args:
create_prover: Function which creates a new `Prover`. These provers must not reference any shared
data to be written during `parallel_advance_proof`, to avoid race conditions.
max_workers (optional): Maximum number of worker threads the pool can spawn. | __init__ | python | runtimeverification/k | pyk/src/pyk/proof/proof.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/proof/proof.py | BSD-3-Clause |
def step_proof(self, step: PS) -> Iterable[SR]:
"""Do the work associated with a `PS`, a proof step.
Should not modify a `Proof` or `self`, but may read from `self` as long as
those fields are not being modified during `step_proof()`, `get_steps()`, and `commit()`.
"""
... | Do the work associated with a `PS`, a proof step.
Should not modify a `Proof` or `self`, but may read from `self` as long as
those fields are not being modified during `step_proof()`, `get_steps()`, and `commit()`. | step_proof | python | runtimeverification/k | pyk/src/pyk/proof/proof.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/proof/proof.py | BSD-3-Clause |
def init_proof(self, proof: P) -> None:
"""Perform any initialization steps needed at the beginning of proof execution.
For example, for `APRProver`, upload circularity and depends module of the proof
to the `KoreServer` via `add_module`.
"""
... | Perform any initialization steps needed at the beginning of proof execution.
For example, for `APRProver`, upload circularity and depends module of the proof
to the `KoreServer` via `add_module`. | init_proof | python | runtimeverification/k | pyk/src/pyk/proof/proof.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/proof/proof.py | BSD-3-Clause |
def advance_proof(
self,
proof: P,
max_iterations: int | None = None,
fail_fast: bool = False,
callback: Callable[[P], None] = (lambda x: None),
maintenance_rate: int = 1,
) -> None:
"""Advance a proof.
Performs loop `Proof.get_steps()` -> `Prover.step_proof()` -> `Proof.commit()`.
Args:
proof: proof to advance.
max_iterations (optional): Maximum number of steps to take.
fail_fast: If the proof is failing after finishing a step,
halt execution even if there are still available steps.
callback: Callable to run in between each completed step, useful for getting real-time information about the proof.
maintenance_rate: Number of iterations between proof maintenance (writing to disk and executing callback).
"""
iterations = 0
_LOGGER.info(f'Initializing proof: {proof.id}')
self.init_proof(proof)
while True:
steps = list(proof.get_steps())
_LOGGER.info(f'Found {len(steps)} next steps for proof: {proof.id}')
if len(steps) == 0:
break
for step in steps:
if fail_fast and proof.failed:
_LOGGER.warning(f'Terminating proof early because fail_fast is set: {proof.id}')
proof.failure_info = self.failure_info(proof)
return
if max_iterations is not None and max_iterations <= iterations:
return
iterations += 1
results = self.step_proof(step)
for result in results:
proof.commit(result)
if iterations % maintenance_rate == 0:
proof.write_proof_data()
callback(proof)
if proof.failed:
proof.failure_info = self.failure_info(proof) | Advance a proof.
Performs loop `Proof.get_steps()` -> `Prover.step_proof()` -> `Proof.commit()`.
Args:
proof: proof to advance.
max_iterations (optional): Maximum number of steps to take.
fail_fast: If the proof is failing after finishing a step,
halt execution even if there are still available steps.
callback: Callable to run in between each completed step, useful for getting real-time information about the proof.
maintenance_rate: Number of iterations between proof maintenance (writing to disk and executing callback). | advance_proof | python | runtimeverification/k | pyk/src/pyk/proof/proof.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/proof/proof.py | BSD-3-Clause |
def lift_edge(self, b_id: NodeIdLike) -> None:
"""Lift an edge up another edge directly preceding it.
`A --M steps--> B --N steps--> C` becomes `A --(M + N) steps--> C`. Node `B` is removed.
Args:
b_id: the identifier of the central node `B` of a sequence of edges `A --> B --> C`.
Raises:
AssertionError: If the edges in question are not in place.
"""
# Obtain edges `A -> B`, `B -> C`
a_to_b = single(self.kcfg.edges(target_id=b_id))
b_to_c = single(self.kcfg.edges(source_id=b_id))
# Remove the node `B`, effectively removing the entire initial structure
self.kcfg.remove_node(b_id)
# Create edge `A -> C`
self.kcfg.create_edge(
a_to_b.source.id, b_to_c.target.id, a_to_b.depth + b_to_c.depth, a_to_b.rules + b_to_c.rules
) | Lift an edge up another edge directly preceding it.
`A --M steps--> B --N steps--> C` becomes `A --(M + N) steps--> C`. Node `B` is removed.
Args:
b_id: the identifier of the central node `B` of a sequence of edges `A --> B --> C`.
Raises:
AssertionError: If the edges in question are not in place. | lift_edge | python | runtimeverification/k | pyk/src/pyk/kcfg/minimize.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kcfg/minimize.py | BSD-3-Clause |
def lift_edges(self) -> bool:
"""Perform all possible edge lifts across the KCFG.
The KCFG is transformed to an equivalent in which no further edge lifts are possible.
Given the KCFG design, it is not possible for one edge lift to either disallow another or
allow another that was not previously possible. Therefore, this function is guaranteed to
lift all possible edges without having to loop.
Returns:
An indicator of whether or not at least one edge lift was performed.
"""
edges_to_lift = sorted(
[
node.id
for node in self.kcfg.nodes
if len(self.kcfg.edges(source_id=node.id)) > 0 and len(self.kcfg.edges(target_id=node.id)) > 0
]
)
for node_id in edges_to_lift:
self.lift_edge(node_id)
return len(edges_to_lift) > 0 | Perform all possible edge lifts across the KCFG.
The KCFG is transformed to an equivalent in which no further edge lifts are possible.
Given the KCFG design, it is not possible for one edge lift to either disallow another or
allow another that was not previously possible. Therefore, this function is guaranteed to
lift all possible edges without having to loop.
Returns:
An indicator of whether or not at least one edge lift was performed. | lift_edges | python | runtimeverification/k | pyk/src/pyk/kcfg/minimize.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kcfg/minimize.py | BSD-3-Clause |
def lift_split_edge(self, b_id: NodeIdLike) -> None:
"""Lift a split up an edge directly preceding it.
`A --M steps--> B --[cond_1, ..., cond_N]--> [C_1, ..., C_N]` becomes
`A --[cond_1, ..., cond_N]--> [A #And cond_1 --M steps--> C_1, ..., A #And cond_N --M steps--> C_N]`.
Node `B` is removed.
Args:
b_id: The identifier of the central node `B` of the structure `A --> B --> [C_1, ..., C_N]`.
Raises:
AssertionError: If the structure in question is not in place.
AssertionError: If any of the `cond_i` contain variables not present in `A`.
"""
# Obtain edge `A -> B`, split `[cond_I, C_I | I = 1..N ]`
a_to_b = single(self.kcfg.edges(target_id=b_id))
a = a_to_b.source
split_from_b = single(self.kcfg.splits(source_id=b_id))
ci, csubsts = list(split_from_b.splits.keys()), list(split_from_b.splits.values())
# Ensure split can be lifted soundly (i.e., that it does not introduce fresh variables)
assert (
len(split_from_b.source_vars.difference(a.free_vars)) == 0
and len(split_from_b.target_vars.difference(split_from_b.source_vars)) == 0 # <-- Can we delete this check?
)
# Create CTerms and CSubsts corresponding to the new targets of the split
new_cterms = [csubst(a.cterm) for csubst in csubsts]
# Remove the node `B`, effectively removing the entire initial structure
self.kcfg.remove_node(b_id)
# Create the nodes `[ A #And cond_I | I = 1..N ]`.
ai: list[NodeIdLike] = [self.kcfg.create_node(cterm).id for cterm in new_cterms]
# Create the edges `[A #And cond_1 --M steps--> C_I | I = 1..N ]`
for i in range(len(ai)):
self.kcfg.create_edge(ai[i], ci[i], a_to_b.depth, a_to_b.rules)
# Create the split `A --[cond_1, ..., cond_N]--> [A #And cond_1, ..., A #And cond_N]
self.kcfg.create_split_by_nodes(a.id, ai) | Lift a split up an edge directly preceding it.
`A --M steps--> B --[cond_1, ..., cond_N]--> [C_1, ..., C_N]` becomes
`A --[cond_1, ..., cond_N]--> [A #And cond_1 --M steps--> C_1, ..., A #And cond_N --M steps--> C_N]`.
Node `B` is removed.
Args:
b_id: The identifier of the central node `B` of the structure `A --> B --> [C_1, ..., C_N]`.
Raises:
AssertionError: If the structure in question is not in place.
AssertionError: If any of the `cond_i` contain variables not present in `A`. | lift_split_edge | python | runtimeverification/k | pyk/src/pyk/kcfg/minimize.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kcfg/minimize.py | BSD-3-Clause |
def lift_split_split(self, b_id: NodeIdLike) -> None:
"""Lift a split up a split directly preceding it, joining them into a single split.
`A --[..., cond_B, ...]--> [..., B, ...]` with `B --[cond_1, ..., cond_N]--> [C_1, ..., C_N]` becomes
`A --[..., cond_B #And cond_1, ..., cond_B #And cond_N, ...]--> [..., C_1, ..., C_N, ...]`.
Node `B` is removed.
Args:
b_id: the identifier of the node `B` of the structure
`A --[..., cond_B, ...]--> [..., B, ...]` with `B --[cond_1, ..., cond_N]--> [C_1, ..., C_N]`.
Raises:
AssertionError: If the structure in question is not in place.
"""
# Obtain splits `A --[..., cond_B, ...]--> [..., B, ...]` and
# `B --[cond_1, ..., cond_N]--> [C_1, ..., C_N]-> [C_1, ..., C_N]`
split_from_a, split_from_b = single(self.kcfg.splits(target_id=b_id)), single(self.kcfg.splits(source_id=b_id))
splits_from_a, splits_from_b = split_from_a.splits, split_from_b.splits
a = split_from_a.source
list(splits_from_b.keys())
# Ensure split can be lifted soundly (i.e., that it does not introduce fresh variables)
assert ( # <-- Does it will be a problem when using merging nodes, because it would introduce new variables?
len(split_from_b.source_vars.difference(a.free_vars)) == 0
and len(split_from_b.target_vars.difference(split_from_b.source_vars)) == 0
)
# Remove the node `B`, thereby removing the two splits as well
splits_from_a.pop(self.kcfg.node(b_id).id)
self.kcfg.remove_node(b_id)
# Create the new split `A --[..., cond_B #And cond_1, ..., cond_B #And cond_N, ...]--> [..., C_1, ..., C_N, ...]`
self.kcfg.create_split_by_nodes(a.id, list(splits_from_a.keys()) + list(splits_from_b.keys())) | Lift a split up a split directly preceding it, joining them into a single split.
`A --[..., cond_B, ...]--> [..., B, ...]` with `B --[cond_1, ..., cond_N]--> [C_1, ..., C_N]` becomes
`A --[..., cond_B #And cond_1, ..., cond_B #And cond_N, ...]--> [..., C_1, ..., C_N, ...]`.
Node `B` is removed.
Args:
b_id: the identifier of the node `B` of the structure
`A --[..., cond_B, ...]--> [..., B, ...]` with `B --[cond_1, ..., cond_N]--> [C_1, ..., C_N]`.
Raises:
AssertionError: If the structure in question is not in place. | lift_split_split | python | runtimeverification/k | pyk/src/pyk/kcfg/minimize.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kcfg/minimize.py | BSD-3-Clause |
def lift_splits(self) -> bool:
"""Perform all possible split liftings.
The KCFG is transformed to an equivalent in which no further split lifts are possible.
Returns:
An indicator of whether or not at least one split lift was performed.
"""
def _lift_split(finder: Callable, lifter: Callable) -> bool:
result = False
while True:
splits_to_lift = sorted(
[
node.id
for node in self.kcfg.nodes
if (splits := self.kcfg.splits(source_id=node.id)) != []
and (sources := finder(target_id=node.id)) != []
and (source := single(sources).source)
and (split := single(splits))
and len(split.source_vars.difference(source.free_vars)) == 0
and len(split.target_vars.difference(split.source_vars)) == 0
]
)
for id in splits_to_lift:
lifter(id)
result = True
if len(splits_to_lift) == 0:
break
return result
def _fold_lift(result: bool, finder_lifter: tuple[Callable, Callable]) -> bool:
return _lift_split(finder_lifter[0], finder_lifter[1]) or result
return reduce(
_fold_lift, [(self.kcfg.edges, self.lift_split_edge), (self.kcfg.splits, self.lift_split_split)], False
) | Perform all possible split liftings.
The KCFG is transformed to an equivalent in which no further split lifts are possible.
Returns:
An indicator of whether or not at least one split lift was performed. | lift_splits | python | runtimeverification/k | pyk/src/pyk/kcfg/minimize.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kcfg/minimize.py | BSD-3-Clause |
def merge_nodes(self) -> bool:
"""Merge targets of Split for cutting down the number of branches, using heuristics KCFGSemantics.is_mergeable.
Side Effect: The KCFG is rewritten by the following rewrite pattern,
- Match: A -|Split|-> A_i -|Edge|-> B_i
- Rewrite:
- if `B_x, B_y, ..., B_z are not mergeable` then unchanged
- if `B_x, B_y, ..., B_z are mergeable`, then
- A -|Split|-> A_x or A_y or ... or A_z
- A_x or A_y or ... or A_z -|Edge|-> B_x or B_y or ... or B_z
- B_x or B_y or ... or B_z -|Split|-> B_x, B_y, ..., B_z
Specifically, when `B_merge = B_x or B_y or ... or B_z`
- `or`: fresh variables in places where the configurations differ
- `Edge` in A_merged -|Edge|-> B_merge: list of merged edges is from A_i -|Edge|-> B_i
- `Split` in B_merge -|Split|-> B_x, B_y, ..., B_z: subst for it is from A -|Split|-> A_1, A_2, ..., A_n
:param semantics: provides the is_mergeable heuristic
:return: whether any merge was performed
"""
def _is_mergeable(x: KCFG.Edge | KCFG.MergedEdge, y: KCFG.Edge | KCFG.MergedEdge) -> bool:
return self.semantics.is_mergeable(x.target.cterm, y.target.cterm)
# ---- Match ----
# A -|Split|> Ai -|Edge/MergedEdge|> Mergeable Bi
sub_graphs: list[tuple[KCFG.Split, list[list[KCFG.Edge | KCFG.MergedEdge]]]] = []
for split in self.kcfg.splits():
_edges = [
single(self.kcfg.general_edges(source_id=ai))
for ai in split.target_ids
if self.kcfg.general_edges(source_id=ai)
]
_partitions = partition(_edges, _is_mergeable)
if len(_partitions) < len(_edges):
sub_graphs.append((split, _partitions))
if not sub_graphs:
return False
# ---- Rewrite ----
for split, edge_partitions in sub_graphs:
# Remove the original sub-graphs
for p in edge_partitions:
if len(p) == 1:
continue
for e in p:
# TODO: remove the split and edges, then safely remove the nodes.
self.kcfg.remove_edges_around(e.source.id)
# Create A -|MergedEdge|-> Merged_Bi -|Split|-> Bi, if one edge partition covers all the splits
if len(edge_partitions) == 1:
merged_bi_cterm, merged_bi_subst = cterms_anti_unify(
[edge.target.cterm for edge in edge_partitions[0]], keep_values=True, kdef=self.kdef
)
merged_bi = self.kcfg.create_node(merged_bi_cterm)
self.kcfg.create_merged_edge(split.source.id, merged_bi.id, edge_partitions[0])
self.kcfg.create_split(
merged_bi.id, zip([e.target.id for e in edge_partitions[0]], merged_bi_subst, strict=True)
)
continue
# Create A -|Split|-> Others & Merged_Ai -|MergedEdge|-> Merged_Bi -|Split|-> Bi
_split_nodes: list[NodeIdLike] = []
for edge_partition in edge_partitions:
if len(edge_partition) == 1:
_split_nodes.append(edge_partition[0].source.id)
continue
merged_ai_cterm, _ = cterms_anti_unify(
[ai2bi.source.cterm for ai2bi in edge_partition], keep_values=True, kdef=self.kdef
)
merged_bi_cterm, merged_bi_subst = cterms_anti_unify(
[ai2bi.target.cterm for ai2bi in edge_partition], keep_values=True, kdef=self.kdef
)
merged_ai = self.kcfg.create_node(merged_ai_cterm)
_split_nodes.append(merged_ai.id)
merged_bi = self.kcfg.create_node(merged_bi_cterm)
self.kcfg.create_merged_edge(merged_ai.id, merged_bi.id, edge_partition)
self.kcfg.create_split(
merged_bi.id, zip([ai2bi.target.id for ai2bi in edge_partition], merged_bi_subst, strict=True)
)
self.kcfg.create_split_by_nodes(split.source.id, _split_nodes)
return True | Merge targets of Split for cutting down the number of branches, using heuristics KCFGSemantics.is_mergeable.
Side Effect: The KCFG is rewritten by the following rewrite pattern,
- Match: A -|Split|-> A_i -|Edge|-> B_i
- Rewrite:
- if `B_x, B_y, ..., B_z are not mergeable` then unchanged
- if `B_x, B_y, ..., B_z are mergeable`, then
- A -|Split|-> A_x or A_y or ... or A_z
- A_x or A_y or ... or A_z -|Edge|-> B_x or B_y or ... or B_z
- B_x or B_y or ... or B_z -|Split|-> B_x, B_y, ..., B_z
Specifically, when `B_merge = B_x or B_y or ... or B_z`
- `or`: fresh variables in places where the configurations differ
- `Edge` in A_merged -|Edge|-> B_merge: list of merged edges is from A_i -|Edge|-> B_i
- `Split` in B_merge -|Split|-> B_x, B_y, ..., B_z: subst for it is from A -|Split|-> A_1, A_2, ..., A_n
:param semantics: provides the is_mergeable heuristic
:return: whether any merge was performed | merge_nodes | python | runtimeverification/k | pyk/src/pyk/kcfg/minimize.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kcfg/minimize.py | BSD-3-Clause |
def minimize(self, merge: bool = False) -> None:
"""Minimize KCFG by repeatedly performing the lifting transformations.
The KCFG is transformed to an equivalent in which no further lifting transformations are possible.
The loop is designed so that each transformation is performed once in each iteration.
"""
repeat = True
while repeat:
repeat = self.lift_edges()
repeat = self.lift_splits() or repeat
repeat = True
while repeat and merge:
repeat = self.merge_nodes() | Minimize KCFG by repeatedly performing the lifting transformations.
The KCFG is transformed to an equivalent in which no further lifting transformations are possible.
The loop is designed so that each transformation is performed once in each iteration. | minimize | python | runtimeverification/k | pyk/src/pyk/kcfg/minimize.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kcfg/minimize.py | BSD-3-Clause |
def create_split_by_nodes(self, source_id: NodeIdLike, target_ids: Iterable[NodeIdLike]) -> KCFG.Split | None:
"""Create a split without crafting a CSubst."""
source = self.node(source_id)
targets = [self.node(nid) for nid in target_ids]
try:
csubsts = [not_none(source.cterm.match_with_constraint(target.cterm)) for target in targets]
except ValueError:
return None
return self.create_split(source.id, zip(target_ids, csubsts, strict=True)) | Create a split without crafting a CSubst. | create_split_by_nodes | python | runtimeverification/k | pyk/src/pyk/kcfg/kcfg.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kcfg/kcfg.py | BSD-3-Clause |
def pretty_segments(self, kcfg: KCFG, minimize: bool = True) -> Iterable[tuple[str, Iterable[str]]]:
"""Return a pretty version of the KCFG in segments.
Each segment is a tuple of an identifier and a list of lines to be printed for that segment (Tuple[str, Iterable[str]).
The identifier tells you whether that segment is for a given node, edge, or just pretty spacing ('unknown').
This is useful for applications which want to pretty print in chunks, so that they can know which printed region corresponds to each node/edge.
"""
processed_nodes: list[KCFG.Node] = []
ret_lines: list[tuple[str, list[str]]] = []
def _multi_line_print(
label: str, lines: list[str], default: str = 'None', indent: int = 4, max_width: int | None = None
) -> list[str]:
ret_lines = []
if len(lines) == 0:
ret_lines.append(f'{label}: {default}')
else:
ret_lines.append(f'{label}:')
ret_lines.extend([f'{indent * " "}{line}' for line in lines])
if max_width is not None:
ret_lines = [
ret_line if len(ret_line) <= max_width else ret_line[0:max_width] + '...' for ret_line in ret_lines
]
return ret_lines
def _print_csubst(
csubst: CSubst, subst_first: bool = False, indent: int = 4, minimize: bool = False
) -> list[str]:
max_width = 78 if minimize else None
_constraint_strs = [
self.kprint.pretty_print(ml_pred_to_bool(constraint, unsafe=True)) for constraint in csubst.constraints
]
constraint_strs = _multi_line_print('constraint', _constraint_strs, 'true')
if len(csubst.subst.minimize()) > 0 and minimize:
subst_strs = ['subst: ...']
else:
_subst_strs = [
line
for k, v in csubst.subst.minimize().items()
for line in f'{k} <- {self.kprint.pretty_print(v)}'.split('\n')
]
subst_strs = _multi_line_print('subst', _subst_strs, '.Subst', max_width=max_width)
if subst_first:
return subst_strs + constraint_strs
return constraint_strs + subst_strs
def _print_node(node: KCFG.Node) -> list[str]:
return self.node_short_info(kcfg, node)
def _print_edge(edge: KCFG.Edge) -> list[str]:
if edge.depth == 1:
return ['(' + str(edge.depth) + ' step)']
else:
return ['(' + str(edge.depth) + ' steps)']
def _print_merged_edge(merged_edge: KCFG.MergedEdge) -> list[str]:
res = '('
for edge in merged_edge.edges:
res += f'{edge.depth}|'
res = res[:-1] + ' steps)'
return [res] if len(res) < 78 else ['(merged edge)']
def _print_cover(cover: KCFG.Cover) -> Iterable[str]:
return _print_csubst(cover.csubst, subst_first=False, indent=4, minimize=minimize)
def _print_split_edge(split: KCFG.Split, target_id: int) -> list[str]:
return _print_csubst(split.splits[target_id], subst_first=True, indent=4, minimize=minimize)
def _print_subgraph(indent: str, curr_node: KCFG.Node, prior_on_trace: list[KCFG.Node]) -> None:
processed = curr_node in processed_nodes
processed_nodes.append(curr_node)
successors = list(kcfg.successors(curr_node.id))
curr_node_strs = _print_node(curr_node)
ret_node_lines = []
suffix = []
elbow = 'ββ'
node_indent = 'β '
if kcfg.is_root(curr_node.id):
elbow = 'ββ'
elif processed or not successors:
elbow = 'ββ'
node_indent = ' '
if curr_node in prior_on_trace:
suffix = ['(looped back)', '']
elif processed and not kcfg.is_leaf(curr_node.id):
suffix = ['(continues as previously)', '']
else:
suffix = ['']
ret_node_lines.append(indent + elbow + ' ' + curr_node_strs[0])
ret_node_lines.extend(add_indent(indent + node_indent, curr_node_strs[1:]))
ret_node_lines.extend(add_indent(indent + ' ', suffix))
ret_lines.append((f'node_{curr_node.id}', ret_node_lines))
if processed or not successors:
return
successor = successors[0]
if isinstance(successor, KCFG.MultiEdge):
ret_lines.append(('unknown', [f'{indent}β']))
multiedge_label = '1 step' if type(successor) is KCFG.NDBranch else 'branch'
multiedge_id = 'ndbranch' if type(successor) is KCFG.NDBranch else 'split'
ret_lines.append(('unknown', [f'{indent}β ({multiedge_label})']))
for target in successor.targets[:-1]:
if type(successor) is KCFG.Split:
ret_edge_lines = _print_split_edge(successor, target.id)
ret_edge_lines = [indent + 'β£βββ ' + ret_edge_lines[0]] + add_indent(
indent + 'β β ', ret_edge_lines[1:]
)
elif type(successor) is KCFG.NDBranch:
ret_edge_lines = [indent + 'β£βββ ']
else:
raise AssertionError()
ret_edge_lines.append(indent + 'β β')
ret_lines.append((f'{multiedge_id}_{curr_node.id}_{target.id}', ret_edge_lines))
_print_subgraph(indent + 'β ', target, prior_on_trace + [curr_node])
target = successor.targets[-1]
if type(successor) is KCFG.Split:
ret_edge_lines = _print_split_edge(successor, target.id)
ret_edge_lines = [indent + 'ββββ ' + ret_edge_lines[0]] + add_indent(
indent + ' β ', ret_edge_lines[1:]
)
elif type(successor) is KCFG.NDBranch:
ret_edge_lines = [indent + 'ββββ ']
else:
raise AssertionError()
ret_edge_lines.append(indent + ' β')
ret_lines.append((f'{multiedge_id}_{curr_node.id}_{target.id}', ret_edge_lines))
_print_subgraph(indent + ' ', target, prior_on_trace + [curr_node])
elif isinstance(successor, KCFG.EdgeLike):
ret_lines.append(('unknown', [f'{indent}β']))
if type(successor) is KCFG.Edge:
ret_edge_lines = []
ret_edge_lines.extend(add_indent(indent + 'β ', _print_edge(successor)))
ret_lines.append((f'edge_{successor.source.id}_{successor.target.id}', ret_edge_lines))
elif type(successor) is KCFG.MergedEdge:
ret_edge_lines = []
ret_edge_lines.extend(add_indent(indent + 'β ', _print_merged_edge(successor)))
ret_lines.append((f'merged_edge_{successor.source.id}_{successor.target.id}', ret_edge_lines))
elif type(successor) is KCFG.Cover:
ret_edge_lines = []
ret_edge_lines.extend(add_indent(indent + 'β ', _print_cover(successor)))
ret_lines.append((f'cover_{successor.source.id}_{successor.target.id}', ret_edge_lines))
_print_subgraph(indent, successor.target, prior_on_trace + [curr_node])
def _sorted_init_nodes() -> tuple[list[KCFG.Node], list[KCFG.Node]]:
sorted_init_nodes = sorted(node for node in kcfg.nodes if node not in processed_nodes)
init_nodes = []
init_leaf_nodes = []
remaining_nodes = []
for node in sorted_init_nodes:
if kcfg.is_root(node.id):
if kcfg.is_leaf(node.id):
init_leaf_nodes.append(node)
else:
init_nodes.append(node)
else:
remaining_nodes.append(node)
return (init_nodes + init_leaf_nodes, remaining_nodes)
init, _ = _sorted_init_nodes()
while init:
ret_lines.append(('unknown', ['']))
_print_subgraph('', init[0], [])
init, _ = _sorted_init_nodes()
_, remaining = _sorted_init_nodes()
if remaining:
ret_lines.append(('unknown', ['', 'Remaining Nodes:']))
for node in remaining:
ret_node_lines = [''] + _print_node(node)
ret_lines.append((f'node_{node.id}', ret_node_lines))
return KCFGShow.make_unique_segments(ret_lines) | Return a pretty version of the KCFG in segments.
Each segment is a tuple of an identifier and a list of lines to be printed for that segment (Tuple[str, Iterable[str]).
The identifier tells you whether that segment is for a given node, edge, or just pretty spacing ('unknown').
This is useful for applications which want to pretty print in chunks, so that they can know which printed region corresponds to each node/edge. | pretty_segments | python | runtimeverification/k | pyk/src/pyk/kcfg/show.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kcfg/show.py | BSD-3-Clause |
def _resolve_depends(module_list: KFlatModuleList) -> KFlatModuleList:
"""Resolve each depends value relative to the module the claim belongs to.
Example:
```
module THIS-MODULE
claim ... [depends(foo,OTHER-MODULE.bar)]
endmodule
```
becomes
```
module THIS-MODULE
claim ... [depends(THIS-MODULE.foo,OTHER-MODULE.bar)]
endmodule
```
"""
labels = {claim.label for module in module_list.modules for claim in module.claims}
def resolve_claim_depends(module_name: str, claim: KClaim) -> KClaim:
depends = claim.dependencies
if not depends:
return claim
resolve = partial(ClaimIndex._resolve_claim_label, labels, module_name)
resolved = [resolve(label) for label in depends]
return claim.let(att=claim.att.update([Atts.DEPENDS(','.join(resolved))]))
modules: list[KFlatModule] = []
for module in module_list.modules:
resolve_depends = partial(resolve_claim_depends, module.name)
module = module.map_sentences(resolve_depends, of_type=KClaim)
modules.append(module)
return module_list.let(modules=modules) | Resolve each depends value relative to the module the claim belongs to.
Example:
```
module THIS-MODULE
claim ... [depends(foo,OTHER-MODULE.bar)]
endmodule
```
becomes
```
module THIS-MODULE
claim ... [depends(THIS-MODULE.foo,OTHER-MODULE.bar)]
endmodule
``` | _resolve_depends | python | runtimeverification/k | pyk/src/pyk/ktool/claim_index.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/claim_index.py | BSD-3-Clause |
def _resolve_claim_label(labels: Container[str], module_name: str | None, label: str) -> str:
"""Resolve `label` to a valid label in `labels`, or raise.
If a `label` is not found and `module_name` is set, the label is tried after qualifying.
"""
if label in labels:
return label
if module_name is not None:
qualified = f'{module_name}.{label}'
if qualified in labels:
return qualified
raise ValueError(f'Claim label not found: {label}') | Resolve `label` to a valid label in `labels`, or raise.
If a `label` is not found and `module_name` is set, the label is tried after qualifying. | _resolve_claim_label | python | runtimeverification/k | pyk/src/pyk/ktool/claim_index.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/claim_index.py | BSD-3-Clause |
def labels(
self,
*,
include: Iterable[str] | None = None,
exclude: Iterable[str] | None = None,
with_depends: bool = True,
ordered: bool = False,
) -> list[str]:
"""Return a list of labels from the index.
Args:
include: Labels to include in the result. If `None`, all labels are included.
exclude: Labels to exclude from the result. If `None`, no labels are excluded.
Takes precedence over `include`.
with_depends: If `True`, the result is transitively closed w.r.t. the dependency relation.
Labels in `exclude` are pruned, and their dependencies are not considered on the given path.
ordered: If `True`, the result is topologically sorted w.r.t. the dependency relation.
Returns:
A list of labels from the index.
Raises:
ValueError: If an item in `include` or `exclude` cannot be resolved to a valid label.
"""
include = self.resolve_all(include) if include is not None else self.claims
exclude = self.resolve_all(exclude) if exclude is not None else []
labels: list[str]
if with_depends:
labels = self._close_dependencies(labels=include, prune=exclude)
else:
labels = [label for label in include if label not in set(exclude)]
if ordered:
return self._sort_topologically(labels)
return labels | Return a list of labels from the index.
Args:
include: Labels to include in the result. If `None`, all labels are included.
exclude: Labels to exclude from the result. If `None`, no labels are excluded.
Takes precedence over `include`.
with_depends: If `True`, the result is transitively closed w.r.t. the dependency relation.
Labels in `exclude` are pruned, and their dependencies are not considered on the given path.
ordered: If `True`, the result is topologically sorted w.r.t. the dependency relation.
Returns:
A list of labels from the index.
Raises:
ValueError: If an item in `include` or `exclude` cannot be resolved to a valid label. | labels | python | runtimeverification/k | pyk/src/pyk/ktool/claim_index.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/claim_index.py | BSD-3-Clause |
def builtin_dir(self) -> Path:
"""The path to the `builtin` directory."""
return self.path / 'include/kframework/builtin' | The path to the `builtin` directory. | builtin_dir | python | runtimeverification/k | pyk/src/pyk/ktool/utils.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/utils.py | BSD-3-Clause |
def create() -> KDistribution | None:
"""Instantiate the class based on the path to the `kompile` binary."""
kompile_bin = KDistribution._which_kompile()
if kompile_bin is None:
return None
return KDistribution(kompile_bin.parents[1]) | Instantiate the class based on the path to the `kompile` binary. | create | python | runtimeverification/k | pyk/src/pyk/ktool/utils.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/utils.py | BSD-3-Clause |
def load_claims(
self,
spec_file: Path,
*,
spec_module_name: str | None = None,
include_dirs: Iterable[Path] = (),
md_selector: str | None = None,
claim_labels: Iterable[str] | None = None,
exclude_claim_labels: Iterable[str] | None = None,
include_dependencies: bool = True,
type_inference_mode: TypeInferenceMode | None = None,
) -> list[KClaim]:
"""Attempt to load a spec from JSON, write file on cache miss.
Args:
spec_file: Spec file to load.
spec_module_name (optional): Spec module to load.
include_dirs (optional): Includes.
md_selector (optional): Selector expression for Markdown tags.
claim_labels (optional): Claim labels to include in the result.
exclude_claim_labels (optional): Claim labels to exclude from the result.
include_dependencies (optional): If ``True``, claim dependencies are transitively included.
type_inference_mode (optional): Type inference mode.
"""
_LOGGER.info(f'Loading spec file: {spec_file}')
digest = self._digest(spec_file, include_dirs=include_dirs, md_selector=md_selector)
_LOGGER.info(f'Calculated digest: {digest}')
claim_file = spec_file.with_suffix('.json')
cache_hit = False
if claim_file.exists():
_LOGGER.info(f'Loading claim file: {claim_file}')
module_list, loaded_digest = _ClaimModuleList.from_dict(json.loads(claim_file.read_text()))
cache_hit = digest == loaded_digest
if not cache_hit:
_LOGGER.info('Generating claim modules')
module_list = self._kprove.parse_modules(
file_path=spec_file,
module_name=spec_module_name,
include_dirs=include_dirs,
md_selector=md_selector,
type_inference_mode=type_inference_mode,
)
claim_module_list = _ClaimModuleList(module_list=module_list, digest=digest)
_LOGGER.info(f'Writing claim file: {claim_file}')
claim_file.write_text(json.dumps(claim_module_list.to_dict()))
claim_index = ClaimIndex.from_module_list(module_list)
labels = claim_index.labels(
include=claim_labels,
exclude=exclude_claim_labels,
with_depends=include_dependencies,
)
return [claim_index[label] for label in labels] | Attempt to load a spec from JSON, write file on cache miss.
Args:
spec_file: Spec file to load.
spec_module_name (optional): Spec module to load.
include_dirs (optional): Includes.
md_selector (optional): Selector expression for Markdown tags.
claim_labels (optional): Claim labels to include in the result.
exclude_claim_labels (optional): Claim labels to exclude from the result.
include_dependencies (optional): If ``True``, claim dependencies are transitively included.
type_inference_mode (optional): Type inference mode. | load_claims | python | runtimeverification/k | pyk/src/pyk/ktool/claim_loader.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/claim_loader.py | BSD-3-Clause |
def handle_test(self, args: Mapping[EVar, Pattern]) -> None:
"""Handle each test case with the variable substitutions that are being used."""
... | Handle each test case with the variable substitutions that are being used. | handle_test | python | runtimeverification/k | pyk/src/pyk/ktool/kfuzz.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/kfuzz.py | BSD-3-Clause |
def handle_failure(self, args: Mapping[EVar, Pattern]) -> None:
"""Handle a test case failure, before the `AssertionError` is raised."""
... | Handle a test case failure, before the `AssertionError` is raised. | handle_failure | python | runtimeverification/k | pyk/src/pyk/ktool/kfuzz.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/kfuzz.py | BSD-3-Clause |
def fuzz_with_check(
self,
template: Pattern,
subst_strategy: dict[EVar, SearchStrategy[Pattern]],
check_func: Callable[[Pattern], Any],
**hypothesis_args: Any,
) -> None:
"""Fuzz over a property test using check_func to check for a passing test.
See :any:`fuzz` for info on the parameters.
"""
fuzz(
self.definition_dir,
template,
subst_strategy,
check_func=check_func,
handler=self.handler,
**hypothesis_args,
) | Fuzz over a property test using check_func to check for a passing test.
See :any:`fuzz` for info on the parameters. | fuzz_with_check | python | runtimeverification/k | pyk/src/pyk/ktool/kfuzz.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/kfuzz.py | BSD-3-Clause |
def fuzz_with_exit_code(
self,
template: Pattern,
subst_strategy: dict[EVar, SearchStrategy[Pattern]],
**hypothesis_args: Any,
) -> None:
"""Fuzz over a property test using the exit code from the interpreter to check for a passing test.
See :any:`fuzz` for info on the parameters.
"""
fuzz(
self.definition_dir,
template,
subst_strategy,
check_exit_code=True,
handler=self.handler,
**hypothesis_args,
) | Fuzz over a property test using the exit code from the interpreter to check for a passing test.
See :any:`fuzz` for info on the parameters. | fuzz_with_exit_code | python | runtimeverification/k | pyk/src/pyk/ktool/kfuzz.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/kfuzz.py | BSD-3-Clause |
def kintegers(
*,
min_value: int | None = None,
max_value: int | None = None,
with_inj: str | None = None,
) -> SearchStrategy[Pattern]:
"""Return a search strategy for K integers.
Args:
min_value: Minimum value for the generated integers
max_value: Maximum value for the generated integers
with_inj: Return the integer as an injection into this sort
Returns:
A strategy which generates integer domain values.
"""
def int_dv(value: int) -> Pattern:
res: Pattern = DV(SortApp('SortInt'), value=String(str(value)))
if with_inj is not None:
res = inj(SortApp('SortInt'), SortApp(f'Sort{with_inj}'), res)
return res
return integers(min_value=min_value, max_value=max_value).map(int_dv) | Return a search strategy for K integers.
Args:
min_value: Minimum value for the generated integers
max_value: Maximum value for the generated integers
with_inj: Return the integer as an injection into this sort
Returns:
A strategy which generates integer domain values. | kintegers | python | runtimeverification/k | pyk/src/pyk/ktool/kfuzz.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/kfuzz.py | BSD-3-Clause |
def fuzz(
definition_dir: str | Path,
template: Pattern,
subst_strategy: dict[EVar, SearchStrategy[Pattern]],
*,
check_func: Callable[[Pattern], Any] | None = None,
check_exit_code: bool = False,
handler: KFuzzHandler = _DEFAULT_HANDLER,
**hypothesis_args: Any,
) -> None:
"""Fuzz a property test with concrete execution over a K term.
Args:
definition_dir: The location of the K definition to run the interpreter for.
template: The term which will be sent to the interpreter after randomizing inputs. It should contain at least one variable which will be substituted for a value.
subst_strategy: Should have each variable in the template term mapped to a strategy for generating values for it.
check_func: Will be called on the kore output from the interpreter.
Should throw an AssertionError if it determines that the output indicates a test failure.
A RuntimeError will be thrown if this is passed as an argument and check_exit_code is True.
check_exit_code: Check the exit code of the interpreter for a test failure instead of using check_func.
An exit code of 0 indicates a passing test.
A RuntimeError will be thrown if this is True and check_func is also passed as an argument.
handler: An instance of a `KFuzzHandler` implementing custom behavior while fuzzing.
hypothesis_args: Keyword arguments that will be passed as settings for the hypothesis test. Defaults:
deadline: 5000
phases: (Phase.explicit, Phase.reuse, Phase.generate)
Raises:
RuntimeError: If check_func exists and check_exit_code is set, or check_func doesn't exist and check_exit_code is cleared.
"""
if bool(check_func) == check_exit_code:
raise RuntimeError('Must pass one of check_func or check_exit_code, and not both!')
def test(subst_case: Mapping[EVar, Pattern]) -> None:
def sub(p: Pattern) -> Pattern:
if isinstance(p, EVar) and p in subst_case:
return subst_case[p]
else:
return p
handler.handle_test(subst_case)
test_pattern = template.top_down(sub)
res = llvm_interpret_raw(definition_dir, test_pattern.text, check=False)
try:
if check_exit_code:
assert res.returncode == 0
else:
assert check_func
res_pattern = KoreParser(res.stdout).pattern()
check_func(res_pattern)
except AssertionError:
handler.handle_failure(subst_case)
raise
strat: SearchStrategy = fixed_dictionaries(subst_strategy)
# Default settings for hypothesis
hypothesis_args.setdefault('deadline', 5000)
hypothesis_args.setdefault('phases', (Phase.explicit, Phase.reuse, Phase.generate))
given(strat)(settings(**hypothesis_args)(test))() | Fuzz a property test with concrete execution over a K term.
Args:
definition_dir: The location of the K definition to run the interpreter for.
template: The term which will be sent to the interpreter after randomizing inputs. It should contain at least one variable which will be substituted for a value.
subst_strategy: Should have each variable in the template term mapped to a strategy for generating values for it.
check_func: Will be called on the kore output from the interpreter.
Should throw an AssertionError if it determines that the output indicates a test failure.
A RuntimeError will be thrown if this is passed as an argument and check_exit_code is True.
check_exit_code: Check the exit code of the interpreter for a test failure instead of using check_func.
An exit code of 0 indicates a passing test.
A RuntimeError will be thrown if this is True and check_func is also passed as an argument.
handler: An instance of a `KFuzzHandler` implementing custom behavior while fuzzing.
hypothesis_args: Keyword arguments that will be passed as settings for the hypothesis test. Defaults:
deadline: 5000
phases: (Phase.explicit, Phase.reuse, Phase.generate)
Raises:
RuntimeError: If check_func exists and check_exit_code is set, or check_func doesn't exist and check_exit_code is cleared. | fuzz | python | runtimeverification/k | pyk/src/pyk/ktool/kfuzz.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/kfuzz.py | BSD-3-Clause |
def llvm_interpret(
definition_dir: str | Path, pattern: Pattern, *, depth: int | None = None, check: bool = True
) -> Pattern:
"""Execute the `interpreter` binary generated by the LLVM Backend.
Args:
definition_dir: Path to the kompiled definition directory.
pattern: KORE pattern to start rewriting from.
depth: Maximal number of rewrite steps to take.
Returns:
The pattern resulting from the rewrites.
Raises:
RuntimeError: If ``check`` and the interpreter fails.
"""
try:
res = llvm_interpret_raw(definition_dir, pattern.text, depth=depth, check=check)
except CalledProcessError as err:
raise RuntimeError(f'Interpreter failed with status {err.returncode}: {err.stderr}') from err
return KoreParser(res.stdout).pattern() | Execute the `interpreter` binary generated by the LLVM Backend.
Args:
definition_dir: Path to the kompiled definition directory.
pattern: KORE pattern to start rewriting from.
depth: Maximal number of rewrite steps to take.
Returns:
The pattern resulting from the rewrites.
Raises:
RuntimeError: If ``check`` and the interpreter fails. | llvm_interpret | python | runtimeverification/k | pyk/src/pyk/ktool/krun.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/krun.py | BSD-3-Clause |
def llvm_interpret_raw(
definition_dir: str | Path, kore: str, *, depth: int | None = None, check: bool = True
) -> CompletedProcess:
"""Execute the `interpreter` binary generated by the LLVM Backend, with no processing of input/output.
Args:
definition_dir: Path to the kompiled definition directory.
pattern: KORE string to start rewriting from.
depth: Maximal number of rewrite steps to take.
check: check the return code of the CompletedProcess
Returns:
The CompletedProcess of the interpreter.
Raises:
CalledProcessError: If ``check`` and the interpreter fails.
"""
definition_dir = Path(definition_dir)
interpreter_file = definition_dir / 'interpreter'
check_file_path(interpreter_file)
depth = depth if depth is not None else -1
args = [str(interpreter_file), '/dev/stdin', str(depth), '/dev/stdout']
return run_process_2(args, input=kore, logger=_LOGGER, loglevel=logging.DEBUG, check=check) | Execute the `interpreter` binary generated by the LLVM Backend, with no processing of input/output.
Args:
definition_dir: Path to the kompiled definition directory.
pattern: KORE string to start rewriting from.
depth: Maximal number of rewrite steps to take.
check: check the return code of the CompletedProcess
Returns:
The CompletedProcess of the interpreter.
Raises:
CalledProcessError: If ``check`` and the interpreter fails. | llvm_interpret_raw | python | runtimeverification/k | pyk/src/pyk/ktool/krun.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/ktool/krun.py | BSD-3-Clause |
def module_to_kore(definition: KDefinition) -> Module:
"""Convert the main module of a kompiled KAST definition to KORE format."""
module = simplified_module(definition)
defn = KDefinition(module.name, (module,)) # for getting the sort lattice
name = name_to_kore(module.name)
attrs = atts_to_kore({key: value for key, value in module.att.items() if key != Atts.DIGEST}) # filter digest
imports = [Import('K')]
sort_decls = [
sort_decl_to_kore(syntax_sort)
for syntax_sort in module.syntax_sorts
if syntax_sort.sort.name not in [K.name, K_ITEM.name]
]
symbol_decls = [
symbol_prod_to_kore(prod)
for prod in module.productions
if prod.klabel and prod.klabel.name not in BUILTIN_LABELS
]
sentences: list[Sentence] = []
sentences += imports
sentences += sort_decls
sentences += symbol_decls
sentences += _subsort_axioms(module)
sentences += _assoc_axioms(defn)
sentences += _idem_axioms(module)
sentences += _unit_axioms(module)
sentences += _functional_axioms(module)
sentences += _no_confusion_axioms(module)
sentences += _no_junk_axioms(defn)
sentences += _overload_axioms(defn)
res = Module(name=name, sentences=sentences, attrs=attrs)
# Filter the assoc, and _internal_constructor attribute
res = res.let(
sentences=(
(
sent.let_attrs(attr for attr in sent.attrs if attr.symbol not in ['assoc', 'internal-constructor'])
if isinstance(sent, SymbolDecl)
else sent
)
for sent in res.sentences
)
)
# Filter the overload attribute
res = res.let(
sentences=(sent.let_attrs(attr for attr in sent.attrs if attr.symbol != 'overload') for sent in res.sentences)
)
return res | Convert the main module of a kompiled KAST definition to KORE format. | module_to_kore | python | runtimeverification/k | pyk/src/pyk/konvert/_module_to_kore.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/konvert/_module_to_kore.py | BSD-3-Clause |
def simplified_module(definition: KDefinition, module_name: str | None = None) -> KFlatModule:
"""Perform a series of simplification steps on a module.
In ModuleToKORE.java, there are some implicit KAST-to-KAST kompilation
steps hidden in the conversion. In particular, the kompiled KAST definition
(compiled.json) is modular, whereas the kompiled definition
(definition.kore) is flat.
This function aims to factor out these hidden KAST-to-KAST kompilation
steps so that our implementation of module_to_kore can be as simple as
possible. Moreover, this has the potential to shed some light on how
modules can be kompiled incrementally.
This function is an approximation, i.e. there might be cases where it
produces a different result to what would be expected based on kompile's
output. These discrepancies should be analyzed and fixed.
"""
module_name = module_name or definition.main_module_name
pipeline = (
FlattenDefinition(module_name),
# sorts
DiscardSyntaxSortAtts(
[
Atts.CELL_COLLECTION,
],
),
AddSyntaxSorts(),
AddCollectionAtts(),
AddDomainValueAtts(),
# symbols
PullUpRewrites(),
DiscardSymbolAtts(
[
Atts.AVOID,
Atts.CELL_COLLECTION,
Atts.CELL_FRAGMENT,
Atts.CELL_NAME,
Atts.CELL_OPT_ABSENT,
Atts.COLOR,
Atts.COLORS,
Atts.COMM,
Atts.EXIT,
Atts.FORMAT,
Atts.GROUP,
Atts.INDEX,
Atts.INITIALIZER,
Atts.LEFT,
Atts.MAINCELL,
Atts.MULTIPLICITY,
Atts.PREDICATE,
Atts.PREFER,
Atts.PRIVATE,
Atts.PRODUCTION,
Atts.PROJECTION,
Atts.RETURNS_UNIT,
Atts.RIGHT,
Atts.SEQSTRICT,
Atts.STRICT,
Atts.TYPE,
Atts.TERMINATOR_SYMBOL,
Atts.USER_LIST,
Atts.WRAP_ELEMENT,
],
),
DiscardHookAtts(),
AddImpureAtts(),
AddSymbolAtts(Atts.MACRO(None), _is_macro),
AddSymbolAtts(Atts.FUNCTIONAL(None), _is_functional),
AddSymbolAtts(Atts.INJECTIVE(None), _is_injective),
AddAnywhereAttsFromRules(),
# Mark symbols that require constructor axioms with an internal attribute.
# Has to precede `AddAnywhereAttsFromOverloads`: symbols that would be considewred constructors without
# the extra `anywhere` require a constructor axiom.
AddSymbolAtts(_INTERNAL_CONSTRUCTOR(None), _is_constructor),
AddAnywhereAttsFromOverloads(),
AddSymbolAtts(Atts.CONSTRUCTOR(None), _is_constructor),
)
definition = reduce(lambda defn, step: step.execute(defn), pipeline, definition)
module = definition.modules[0]
return module | Perform a series of simplification steps on a module.
In ModuleToKORE.java, there are some implicit KAST-to-KAST kompilation
steps hidden in the conversion. In particular, the kompiled KAST definition
(compiled.json) is modular, whereas the kompiled definition
(definition.kore) is flat.
This function aims to factor out these hidden KAST-to-KAST kompilation
steps so that our implementation of module_to_kore can be as simple as
possible. Moreover, this has the potential to shed some light on how
modules can be kompiled incrementally.
This function is an approximation, i.e. there might be cases where it
produces a different result to what would be expected based on kompile's
output. These discrepancies should be analyzed and fixed. | simplified_module | python | runtimeverification/k | pyk/src/pyk/konvert/_module_to_kore.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/konvert/_module_to_kore.py | BSD-3-Clause |
def _imported_sentences(definition: KDefinition, module_name: str) -> list[KSentence]:
"""Return all sentences from imported modules, including the module itself."""
pending: list[str] = [module_name]
imported: set[str] = set()
res: list[KSentence] = []
while pending:
module_name = pending.pop()
if module_name in imported:
continue
module = definition.module(module_name)
res += module.sentences
pending += (importt.name for importt in module.imports)
imported.add(module_name)
return res | Return all sentences from imported modules, including the module itself. | _imported_sentences | python | runtimeverification/k | pyk/src/pyk/konvert/_module_to_kore.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/konvert/_module_to_kore.py | BSD-3-Clause |
def _syntax_sorts(module: KFlatModule) -> list[KSyntaxSort]:
"""Return a declaration for each sort in the module."""
def is_higher_order(production: KProduction) -> bool:
# Example: syntax {Sort} Sort ::= Sort "#as" Sort
return production.sort in production.params
def merge_atts(atts: list[KAtt]) -> KAtt:
grouped: dict[AttKey, set[Any]] = {}
for att, value in chain.from_iterable(att.items() for att in atts):
grouped.setdefault(att, set()).add(value)
entries = [att(next(iter(values))) for att, values in grouped.items() if len(values) == 1]
return KAtt(entries)
declarations: dict[KSort, list[KAtt]] = {}
# Merge attributes from KSyntaxSort instances
for syntax_sort in module.syntax_sorts:
declarations.setdefault(syntax_sort.sort, []).append(syntax_sort.att)
# Also consider production sorts
for production in module.productions:
if is_higher_order(production):
continue
declarations.setdefault(production.sort, [])
return [KSyntaxSort(sort, att=merge_atts(atts)) for sort, atts in declarations.items()] | Return a declaration for each sort in the module. | _syntax_sorts | python | runtimeverification/k | pyk/src/pyk/konvert/_module_to_kore.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/konvert/_module_to_kore.py | BSD-3-Clause |
def _rules_by_klabel(module: KFlatModule) -> dict[KLabel, list[KRule]]:
"""Return a dict that maps a label l to the list of all rules l => X.
If a label does not have a matching rule, it will be not contained in the dict.
The function expects that all rules have a rewrite on top.
"""
res: dict[KLabel, list[KRule]] = {}
for rule in module.rules:
assert isinstance(rule.body, KRewrite)
if not isinstance(rule.body.lhs, KApply):
continue
label = rule.body.lhs.label
res.setdefault(label, []).append(rule)
return res | Return a dict that maps a label l to the list of all rules l => X.
If a label does not have a matching rule, it will be not contained in the dict.
The function expects that all rules have a rewrite on top. | _rules_by_klabel | python | runtimeverification/k | pyk/src/pyk/konvert/_module_to_kore.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/konvert/_module_to_kore.py | BSD-3-Clause |
def _add_sort_injections(definition: KDefinition, term: KInner, sort: KSort) -> KInner:
"""Add sort injections to a KAST term bottom-up.
Maintains its own stack for an iterative implementation. Each stack frame consists of:
- `term`: the current term
- `sort`: the sort the current term must be injected to
- `argument_terms`: direct subterms of the current term
- `argument_sorts`: the sorts the respective direct subterms must be injected to
- `done_terms`: results for direct subterms
"""
stack: list = [term, sort, _argument_terms(definition, term), _argument_sorts(definition, term), []]
while True:
done_terms = stack[-1]
argument_sorts = stack[-2]
argument_terms = stack[-3]
sort = stack[-4]
term = stack[-5]
idx = len(done_terms) - len(argument_terms)
if not idx:
stack.pop()
stack.pop()
stack.pop()
stack.pop()
stack.pop()
term = _let_arguments(term, done_terms)
term = _inject(definition, term, sort)
if not stack:
return term
stack[-1].append(term)
else:
term = argument_terms[idx]
stack.append(term)
stack.append(argument_sorts[idx])
stack.append(_argument_terms(definition, term))
stack.append(_argument_sorts(definition, term))
stack.append([]) | Add sort injections to a KAST term bottom-up.
Maintains its own stack for an iterative implementation. Each stack frame consists of:
- `term`: the current term
- `sort`: the sort the current term must be injected to
- `argument_terms`: direct subterms of the current term
- `argument_sorts`: the sorts the respective direct subterms must be injected to
- `done_terms`: results for direct subterms | _add_sort_injections | python | runtimeverification/k | pyk/src/pyk/konvert/_kast_to_kore.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/konvert/_kast_to_kore.py | BSD-3-Clause |
def __init__(self, config: KInner, constraints: Iterable[KInner] = ()) -> None:
"""Instantiate a given `CTerm`, performing basic sanity checks on the `config` and `constraints`."""
if is_top(config, weak=True):
config = mlTop()
constraints = ()
elif is_bottom(config, weak=True):
config = mlBottom()
constraints = ()
else:
self._check_config(config)
constraints = self._normalize_constraints(constraints)
object.__setattr__(self, 'config', config)
object.__setattr__(self, 'constraints', constraints) | Instantiate a given `CTerm`, performing basic sanity checks on the `config` and `constraints`. | __init__ | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def from_kast(kast: KInner) -> CTerm:
"""Interpret a given `KInner` as a `CTerm` by splitting the `config` and `constraints` (see `CTerm.kast`)."""
if is_top(kast, weak=True):
return CTerm.top()
elif is_bottom(kast, weak=True):
return CTerm.bottom()
else:
config, constraint = split_config_and_constraints(kast)
constraints = flatten_label('#And', constraint)
return CTerm(config, constraints) | Interpret a given `KInner` as a `CTerm` by splitting the `config` and `constraints` (see `CTerm.kast`). | from_kast | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def from_dict(dct: dict[str, Any]) -> CTerm:
"""Deserialize a `CTerm` from its dictionary representation."""
config = KInner.from_dict(dct['config'])
constraints = [KInner.from_dict(c) for c in dct['constraints']]
return CTerm(config, constraints) | Deserialize a `CTerm` from its dictionary representation. | from_dict | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def top() -> CTerm:
"""Construct a `CTerm` representing all possible states."""
return CTerm(mlTop(), ()) | Construct a `CTerm` representing all possible states. | top | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def bottom() -> CTerm:
"""Construct a `CTerm` representing no possible states."""
return CTerm(mlBottom(), ()) | Construct a `CTerm` representing no possible states. | bottom | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def is_bottom(self) -> bool:
"""Check if a given `CTerm` is trivially empty."""
return is_bottom(self.config, weak=True) or any(is_bottom(cterm, weak=True) for cterm in self.constraints) | Check if a given `CTerm` is trivially empty. | is_bottom | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def __iter__(self) -> Iterator[KInner]:
"""Return an iterator with the head being the `config` and the tail being the `constraints`."""
return chain([self.config], self.constraints) | Return an iterator with the head being the `config` and the tail being the `constraints`. | __iter__ | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def to_dict(self) -> dict[str, Any]:
"""Serialize a `CTerm` to dictionary representation."""
return {
'config': self.config.to_dict(),
'constraints': [c.to_dict() for c in self.constraints],
} | Serialize a `CTerm` to dictionary representation. | to_dict | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def kast(self) -> KInner:
"""Return the unstructured bare `KInner` representation of a `CTerm` (see `CTerm.from_kast`)."""
return mlAnd(self, GENERATED_TOP_CELL) | Return the unstructured bare `KInner` representation of a `CTerm` (see `CTerm.from_kast`). | kast | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def free_vars(self) -> frozenset[str]:
"""Return the set of free variable names contained in this `CTerm`."""
return frozenset(free_vars(self.kast)) | Return the set of free variable names contained in this `CTerm`. | free_vars | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def hash(self) -> str:
"""Unique hash representing the contents of this `CTerm`."""
return self.kast.hash | Unique hash representing the contents of this `CTerm`. | hash | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def cells(self) -> Subst:
"""Return key-value store of the contents of each cell in the `config`."""
_, subst = split_config_from(self.config)
return Subst(subst) | Return key-value store of the contents of each cell in the `config`. | cells | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def cell(self, cell: str) -> KInner:
"""Access the contents of a named cell in the `config`, die on failure."""
return self.cells[cell] | Access the contents of a named cell in the `config`, die on failure. | cell | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def try_cell(self, cell: str) -> KInner | None:
"""Access the contents of a named cell in the `config`, return `None` on failure."""
return self.cells.get(cell) | Access the contents of a named cell in the `config`, return `None` on failure. | try_cell | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def match(self, cterm: CTerm) -> Subst | None:
"""Find `Subst` instantiating this `CTerm` to the other, return `None` if no such `Subst` exists."""
csubst = self.match_with_constraint(cterm)
if not csubst:
return None
if csubst.constraint != mlTop(GENERATED_TOP_CELL):
return None
return csubst.subst | Find `Subst` instantiating this `CTerm` to the other, return `None` if no such `Subst` exists. | match | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def match_with_constraint(self, cterm: CTerm) -> CSubst | None:
"""Find `CSubst` instantiating this `CTerm` to the other, return `None` if no such `CSubst` exists."""
subst = self.config.match(cterm.config)
if subst is None:
return None
source_constraints = [subst(c) for c in self.constraints]
constraints = [c for c in cterm.constraints if c not in source_constraints]
return CSubst(subst, constraints) | Find `CSubst` instantiating this `CTerm` to the other, return `None` if no such `CSubst` exists. | match_with_constraint | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def add_constraint(self, new_constraint: KInner) -> CTerm:
"""Return a new `CTerm` with the additional constraints."""
return CTerm(self.config, [new_constraint] + list(self.constraints)) | Return a new `CTerm` with the additional constraints. | add_constraint | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def anti_unify(
self, other: CTerm, keep_values: bool = False, kdef: KDefinition | None = None
) -> tuple[CTerm, CSubst, CSubst]:
"""Given two `CTerm` instances, find a more general `CTerm` which can instantiate to both.
Args:
other: other `CTerm` to consider for finding a more general `CTerm` with this one.
keep_values: do not discard information about abstracted variables in returned result.
kdef (optional): `KDefinition` to make analysis more precise.
Returns:
A tuple ``(cterm, csubst1, csubst2)`` where
- ``cterm``: More general `CTerm` than either `self` or `other`.
- ``csubst1``: Constrained substitution to apply to `cterm` to obtain `self`.
- ``csubst2``: Constrained substitution to apply to `cterm` to obtain `other`.
"""
new_config, self_subst, other_subst = anti_unify(self.config, other.config, kdef=kdef)
# todo: It's not able to distinguish between constraints in different cterms,
# because variable names may be used inconsistently in different cterms.
common_constraints = [constraint for constraint in self.constraints if constraint in other.constraints]
self_unique_constraints = [
ml_pred_to_bool(constraint) for constraint in self.constraints if constraint not in other.constraints
]
other_unique_constraints = [
ml_pred_to_bool(constraint) for constraint in other.constraints if constraint not in self.constraints
]
new_cterm = CTerm(config=new_config, constraints=())
if keep_values:
disjunct_lhs = andBool([self_subst.pred] + self_unique_constraints)
disjunct_rhs = andBool([other_subst.pred] + other_unique_constraints)
if KToken('true', 'Bool') not in [disjunct_lhs, disjunct_rhs]:
new_cterm = new_cterm.add_constraint(mlEqualsTrue(orBool([disjunct_lhs, disjunct_rhs])))
new_constraints = remove_useless_constraints(common_constraints, new_cterm.free_vars)
for constraint in new_constraints:
new_cterm = new_cterm.add_constraint(constraint)
self_csubst = new_cterm.match_with_constraint(self)
other_csubst = new_cterm.match_with_constraint(other)
if self_csubst is None or other_csubst is None:
raise ValueError(
f'Anti-unification failed to produce a more general state: {(new_cterm, (self, self_csubst), (other, other_csubst))}'
)
return (new_cterm, self_csubst, other_csubst) | Given two `CTerm` instances, find a more general `CTerm` which can instantiate to both.
Args:
other: other `CTerm` to consider for finding a more general `CTerm` with this one.
keep_values: do not discard information about abstracted variables in returned result.
kdef (optional): `KDefinition` to make analysis more precise.
Returns:
A tuple ``(cterm, csubst1, csubst2)`` where
- ``cterm``: More general `CTerm` than either `self` or `other`.
- ``csubst1``: Constrained substitution to apply to `cterm` to obtain `self`.
- ``csubst2``: Constrained substitution to apply to `cterm` to obtain `other`. | anti_unify | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def remove_useless_constraints(self, keep_vars: Iterable[str] = ()) -> CTerm:
"""Return a new `CTerm` with constraints over unbound variables removed.
Args:
keep_vars: List of variables to keep constraints for even if unbound in the `CTerm`.
Returns:
A `CTerm` with the constraints over unbound variables removed.
"""
initial_vars = free_vars(self.config) | set(keep_vars)
new_constraints = remove_useless_constraints(self.constraints, initial_vars)
return CTerm(self.config, new_constraints) | Return a new `CTerm` with constraints over unbound variables removed.
Args:
keep_vars: List of variables to keep constraints for even if unbound in the `CTerm`.
Returns:
A `CTerm` with the constraints over unbound variables removed. | remove_useless_constraints | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def anti_unify(state1: KInner, state2: KInner, kdef: KDefinition | None = None) -> tuple[KInner, Subst, Subst]:
"""Return a generalized state over the two input states.
Args:
state1: State to generalize over, represented as bare `KInner`.
state2: State to generalize over, represented as bare `KInner`.
kdef (optional): `KDefinition` to make the analysis more precise.
Note:
Both `state1` and `state2` are expected to be bare configurations with no constraints attached.
Returns:
A tuple ``(state, subst1, subst2)`` such that
- ``state``: A symbolic state represented as `KInner` which is more general than `state1` or `state2`.
- ``subst1``: A `Subst` which, when applied to `state`, recovers `state1`.
- ``subst2``: A `Subst` which, when applied to `state`, recovers `state2`.
"""
def _rewrites_to_abstractions(_kast: KInner) -> KInner:
if type(_kast) is KRewrite:
sort = kdef.sort(_kast) if kdef else None
return abstract_term_safely(_kast, sort=sort)
return _kast
minimized_rewrite = push_down_rewrites(KRewrite(state1, state2))
abstracted_state = bottom_up(_rewrites_to_abstractions, minimized_rewrite)
subst1 = abstracted_state.match(state1)
subst2 = abstracted_state.match(state2)
if subst1 is None or subst2 is None:
raise ValueError('Anti-unification failed to produce a more general state!')
return (abstracted_state, subst1, subst2) | Return a generalized state over the two input states.
Args:
state1: State to generalize over, represented as bare `KInner`.
state2: State to generalize over, represented as bare `KInner`.
kdef (optional): `KDefinition` to make the analysis more precise.
Note:
Both `state1` and `state2` are expected to be bare configurations with no constraints attached.
Returns:
A tuple ``(state, subst1, subst2)`` such that
- ``state``: A symbolic state represented as `KInner` which is more general than `state1` or `state2`.
- ``subst1``: A `Subst` which, when applied to `state`, recovers `state1`.
- ``subst2``: A `Subst` which, when applied to `state`, recovers `state2`. | anti_unify | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def __init__(self, subst: Subst | None = None, constraints: Iterable[KInner] = ()) -> None:
"""Construct a new `CSubst` given a `Subst` and set of constraints as `KInner`, performing basic sanity checks."""
object.__setattr__(self, 'subst', subst if subst is not None else Subst({}))
object.__setattr__(self, 'constraints', normalize_constraints(constraints)) | Construct a new `CSubst` given a `Subst` and set of constraints as `KInner`, performing basic sanity checks. | __init__ | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def __iter__(self) -> Iterator[Subst | KInner]:
"""Return an iterator with the head being the `subst` and the tail being the `constraints`."""
return chain([self.subst], self.constraints) | Return an iterator with the head being the `subst` and the tail being the `constraints`. | __iter__ | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def to_dict(self) -> dict[str, Any]:
"""Serialize `CSubst` to dictionary representation."""
return {
'subst': self.subst.to_dict(),
'constraints': [c.to_dict() for c in self.constraints],
} | Serialize `CSubst` to dictionary representation. | to_dict | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def from_dict(dct: dict[str, Any]) -> CSubst:
"""Deserialize `CSubst` from a dictionary representation."""
subst = Subst.from_dict(dct['subst'])
constraints = (KInner.from_dict(c) for c in dct['constraints'])
return CSubst(subst=subst, constraints=constraints) | Deserialize `CSubst` from a dictionary representation. | from_dict | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def from_pred(pred: KInner) -> CSubst:
"""Extract from a boolean predicate a CSubst."""
subst, pred = extract_subst(pred)
return CSubst(subst=subst, constraints=flatten_label('#And', pred)) | Extract from a boolean predicate a CSubst. | from_pred | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def pred(self, sort_with: KDefinition | None = None, subst: bool = True, constraints: bool = True) -> KInner:
"""Return an ML predicate representing this substitution."""
_preds: list[KInner] = []
if subst:
for k, v in self.subst.minimize().items():
sort = K
if sort_with is not None:
_sort = sort_with.sort(v)
sort = _sort if _sort is not None else sort
_preds.append(mlEquals(KVariable(k, sort=sort), v, arg_sort=sort))
if constraints:
_preds.extend(self.constraints)
return mlAnd(_preds) | Return an ML predicate representing this substitution. | pred | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def constraint(self) -> KInner:
"""Return the set of constraints as a single flattened constraint using `mlAnd`."""
return mlAnd(self.constraints) | Return the set of constraints as a single flattened constraint using `mlAnd`. | constraint | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def add_constraint(self, constraint: KInner) -> CSubst:
"""Return this `CSubst` with an additional constraint added."""
return CSubst(self.subst, list(self.constraints) + [constraint]) | Return this `CSubst` with an additional constraint added. | add_constraint | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def apply(self, cterm: CTerm) -> CTerm:
"""Apply this `CSubst` to the given `CTerm` (instantiating the free variables, and adding the constraints)."""
config = self.subst(cterm.config)
constraints = [self.subst(constraint) for constraint in cterm.constraints] + list(self.constraints)
return CTerm(config, constraints) | Apply this `CSubst` to the given `CTerm` (instantiating the free variables, and adding the constraints). | apply | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def __call__(self, cterm: CTerm) -> CTerm:
"""Overload for `CSubst.apply`."""
return self.apply(cterm) | Overload for `CSubst.apply`. | __call__ | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def cterm_build_claim(
claim_id: str, init_cterm: CTerm, final_cterm: CTerm, keep_vars: Iterable[str] = ()
) -> tuple[KClaim, Subst]:
"""Return a `KClaim` between the supplied initial and final states.
Args:
claim_id: Label to give the claim.
init_cterm: State to put on LHS of the rule (constraints interpreted as `requires` clause).
final_cterm: State to put on RHS of the rule (constraints interpreted as `ensures` clause).
keep_vars: Variables to leave in the side-conditions even if not bound in the configuration.
Returns:
A tuple ``(claim, var_map)`` where
- ``claim``: A `KClaim` with variable naming conventions applied
so that it should be parseable by the K Frontend.
- ``var_map``: The variable renamings applied to make the claim parseable by the K Frontend
(which can be undone to recover original variables).
"""
init_config, *init_constraints = init_cterm
final_config, *final_constraints = final_cterm
return build_claim(claim_id, init_config, final_config, init_constraints, final_constraints, keep_vars=keep_vars) | Return a `KClaim` between the supplied initial and final states.
Args:
claim_id: Label to give the claim.
init_cterm: State to put on LHS of the rule (constraints interpreted as `requires` clause).
final_cterm: State to put on RHS of the rule (constraints interpreted as `ensures` clause).
keep_vars: Variables to leave in the side-conditions even if not bound in the configuration.
Returns:
A tuple ``(claim, var_map)`` where
- ``claim``: A `KClaim` with variable naming conventions applied
so that it should be parseable by the K Frontend.
- ``var_map``: The variable renamings applied to make the claim parseable by the K Frontend
(which can be undone to recover original variables). | cterm_build_claim | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def cterm_build_rule(
rule_id: str,
init_cterm: CTerm,
final_cterm: CTerm,
priority: int | None = None,
keep_vars: Iterable[str] = (),
defunc_with: KDefinition | None = None,
) -> tuple[KRule, Subst]:
"""Return a `KRule` between the supplied initial and final states.
Args:
rule_id: Label to give the rule.
init_cterm: State to put on LHS of the rule (constraints interpreted as `requires` clause).
final_cterm: State to put on RHS of the rule (constraints interpreted as `ensures` clause).
keep_vars: Variables to leave in the side-conditions even if not bound in the configuration.
priority: Priority index to use for generated rules.
defunc_with (optional): KDefinition to be able to defunctionalize LHS appropriately.
Returns:
A tuple ``(rule, var_map)`` where
- ``rule``: A `KRule` with variable naming conventions applied
so that it should be parseable by the K Frontend.
- ``var_map``: The variable renamings applied to make the rule parseable by the K Frontend
(which can be undone to recover original variables).
"""
init_config, *init_constraints = init_cterm
final_config, *final_constraints = final_cterm
return build_rule(
rule_id,
init_config,
final_config,
init_constraints,
final_constraints,
priority,
keep_vars,
defunc_with=defunc_with,
) | Return a `KRule` between the supplied initial and final states.
Args:
rule_id: Label to give the rule.
init_cterm: State to put on LHS of the rule (constraints interpreted as `requires` clause).
final_cterm: State to put on RHS of the rule (constraints interpreted as `ensures` clause).
keep_vars: Variables to leave in the side-conditions even if not bound in the configuration.
priority: Priority index to use for generated rules.
defunc_with (optional): KDefinition to be able to defunctionalize LHS appropriately.
Returns:
A tuple ``(rule, var_map)`` where
- ``rule``: A `KRule` with variable naming conventions applied
so that it should be parseable by the K Frontend.
- ``var_map``: The variable renamings applied to make the rule parseable by the K Frontend
(which can be undone to recover original variables). | cterm_build_rule | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def cterms_anti_unify(
cterms: Iterable[CTerm], keep_values: bool = False, kdef: KDefinition | None = None
) -> tuple[CTerm, list[CSubst]]:
"""Given many `CTerm` instances, find a more general `CTerm` which can instantiate to all.
Args:
cterms: `CTerm`s to consider for finding a more general `CTerm` with this one.
keep_values: do not discard information about abstracted variables in returned result.
kdef (optional): `KDefinition` to make analysis more precise.
Returns:
A tuple ``(cterm, csubsts)`` where
- ``cterm``: More general `CTerm` than any of the input `CTerm`s.
- ``csubsts``: List of `CSubst` which, when applied to `cterm`, yield the input `CTerm`s.
"""
# TODO: optimize this function, reduce useless auto-generated variables.
cterms = list(cterms)
if not cterms:
raise ValueError('Anti-unification failed, no CTerms provided')
merged_cterm = cterms[0]
for cterm in cterms[1:]:
merged_cterm = merged_cterm.anti_unify(cterm, keep_values, kdef)[0]
csubsts = [not_none(merged_cterm.match_with_constraint(cterm)) for cterm in cterms]
return merged_cterm, csubsts | Given many `CTerm` instances, find a more general `CTerm` which can instantiate to all.
Args:
cterms: `CTerm`s to consider for finding a more general `CTerm` with this one.
keep_values: do not discard information about abstracted variables in returned result.
kdef (optional): `KDefinition` to make analysis more precise.
Returns:
A tuple ``(cterm, csubsts)`` where
- ``cterm``: More general `CTerm` than any of the input `CTerm`s.
- ``csubsts``: List of `CSubst` which, when applied to `cterm`, yield the input `CTerm`s. | cterms_anti_unify | python | runtimeverification/k | pyk/src/pyk/cterm/cterm.py | https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/cterm/cterm.py | BSD-3-Clause |
def parameter_repr(self):
"""
When building documentation, we want Parameter objects to show their
description in a nice way
"""
significance = 'Insignificant ' if not self.significant else ''
class_name = self.__class__.__name__
has_default = self._default != luigi.parameter._no_value
default = ' (defaults to {})'.format(self._default) if has_default else ''
description = (': ' + self.description if self.description else '')
return significance + class_name + default + description | When building documentation, we want Parameter objects to show their
description in a nice way | parameter_repr | python | spotify/luigi | doc/conf.py | https://github.com/spotify/luigi/blob/master/doc/conf.py | Apache-2.0 |
def assertIn(needle, haystack):
"""
We test repr of Parameter objects, since it'll be used for readthedocs
"""
assert needle in haystack | We test repr of Parameter objects, since it'll be used for readthedocs | assertIn | python | spotify/luigi | doc/conf.py | https://github.com/spotify/luigi/blob/master/doc/conf.py | Apache-2.0 |
def _warn_node(self, msg, node, *args, **kwargs):
"""
Mute warnings that are like ``WARNING: nonlocal image URI found: https://img. ...``
Solution was found by googling, copied it from SO:
http://stackoverflow.com/questions/12772927/specifying-an-online-image-in-sphinx-restructuredtext-format
"""
if not msg.startswith('nonlocal image URI found:'):
self._warnfunc(msg, '%s:%s' % get_source_line(node), *args, **kwargs) | Mute warnings that are like ``WARNING: nonlocal image URI found: https://img. ...``
Solution was found by googling, copied it from SO:
http://stackoverflow.com/questions/12772927/specifying-an-online-image-in-sphinx-restructuredtext-format | _warn_node | python | spotify/luigi | doc/conf.py | https://github.com/spotify/luigi/blob/master/doc/conf.py | Apache-2.0 |
def exists(self):
"""
Returns ``True`` if the :py:class:`Target` exists and ``False`` otherwise.
"""
pass | Returns ``True`` if the :py:class:`Target` exists and ``False`` otherwise. | exists | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def exists(self, path):
"""
Return ``True`` if file or directory at ``path`` exist, ``False`` otherwise
:param str path: a path within the FileSystem to check for existence.
"""
pass | Return ``True`` if file or directory at ``path`` exist, ``False`` otherwise
:param str path: a path within the FileSystem to check for existence. | exists | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def remove(self, path, recursive=True, skip_trash=True):
""" Remove file or directory at location ``path``
:param str path: a path within the FileSystem to remove.
:param bool recursive: if the path is a directory, recursively remove the directory and all
of its descendants. Defaults to ``True``.
"""
pass | Remove file or directory at location ``path``
:param str path: a path within the FileSystem to remove.
:param bool recursive: if the path is a directory, recursively remove the directory and all
of its descendants. Defaults to ``True``. | remove | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def mkdir(self, path, parents=True, raise_if_exists=False):
"""
Create directory at location ``path``
Creates the directory at ``path`` and implicitly create parent
directories if they do not already exist.
:param str path: a path within the FileSystem to create as a directory.
:param bool parents: Create parent directories when necessary. When
parents=False and the parent directory doesn't
exist, raise luigi.target.MissingParentDirectory
:param bool raise_if_exists: raise luigi.target.FileAlreadyExists if
the folder already exists.
"""
raise NotImplementedError("mkdir() not implemented on {0}".format(self.__class__.__name__)) | Create directory at location ``path``
Creates the directory at ``path`` and implicitly create parent
directories if they do not already exist.
:param str path: a path within the FileSystem to create as a directory.
:param bool parents: Create parent directories when necessary. When
parents=False and the parent directory doesn't
exist, raise luigi.target.MissingParentDirectory
:param bool raise_if_exists: raise luigi.target.FileAlreadyExists if
the folder already exists. | mkdir | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def isdir(self, path):
"""
Return ``True`` if the location at ``path`` is a directory. If not, return ``False``.
:param str path: a path within the FileSystem to check as a directory.
*Note*: This method is optional, not all FileSystem subclasses implements it.
"""
raise NotImplementedError("isdir() not implemented on {0}".format(self.__class__.__name__)) | Return ``True`` if the location at ``path`` is a directory. If not, return ``False``.
:param str path: a path within the FileSystem to check as a directory.
*Note*: This method is optional, not all FileSystem subclasses implements it. | isdir | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def listdir(self, path):
"""Return a list of files rooted in path.
This returns an iterable of the files rooted at ``path``. This is intended to be a
recursive listing.
:param str path: a path within the FileSystem to list.
*Note*: This method is optional, not all FileSystem subclasses implements it.
"""
raise NotImplementedError("listdir() not implemented on {0}".format(self.__class__.__name__)) | Return a list of files rooted in path.
This returns an iterable of the files rooted at ``path``. This is intended to be a
recursive listing.
:param str path: a path within the FileSystem to list.
*Note*: This method is optional, not all FileSystem subclasses implements it. | listdir | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def move(self, path, dest):
"""
Move a file, as one would expect.
"""
raise NotImplementedError("move() not implemented on {0}".format(self.__class__.__name__)) | Move a file, as one would expect. | move | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def rename_dont_move(self, path, dest):
"""
Potentially rename ``path`` to ``dest``, but don't move it into the
``dest`` folder (if it is a folder). This relates to :ref:`AtomicWrites`.
This method has a reasonable but not bullet proof default
implementation. It will just do ``move()`` if the file doesn't
``exists()`` already.
"""
warnings.warn("File system {} client doesn't support atomic mv.".format(self.__class__.__name__))
if self.exists(dest):
raise FileAlreadyExists()
self.move(path, dest) | Potentially rename ``path`` to ``dest``, but don't move it into the
``dest`` folder (if it is a folder). This relates to :ref:`AtomicWrites`.
This method has a reasonable but not bullet proof default
implementation. It will just do ``move()`` if the file doesn't
``exists()`` already. | rename_dont_move | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def rename(self, *args, **kwargs):
"""
Alias for ``move()``
"""
self.move(*args, **kwargs) | Alias for ``move()`` | rename | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def copy(self, path, dest):
"""
Copy a file or a directory with contents.
Currently, LocalFileSystem and MockFileSystem support only single file
copying but S3Client copies either a file or a directory as required.
"""
raise NotImplementedError("copy() not implemented on {0}".
format(self.__class__.__name__)) | Copy a file or a directory with contents.
Currently, LocalFileSystem and MockFileSystem support only single file
copying but S3Client copies either a file or a directory as required. | copy | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def __init__(self, path):
"""
Initializes a FileSystemTarget instance.
:param path: the path associated with this FileSystemTarget.
"""
# cast to str to allow path to be objects like pathlib.PosixPath and py._path.local.LocalPath
self.path = str(path) | Initializes a FileSystemTarget instance.
:param path: the path associated with this FileSystemTarget. | __init__ | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def fs(self):
"""
The :py:class:`FileSystem` associated with this FileSystemTarget.
"""
raise NotImplementedError() | The :py:class:`FileSystem` associated with this FileSystemTarget. | fs | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def open(self, mode):
"""
Open the FileSystem target.
This method returns a file-like object which can either be read from or written to depending
on the specified mode.
:param str mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will
open the FileSystemTarget in write mode. Subclasses can implement
additional options. Using `b` is not supported; initialize with
`format=Nop` instead.
"""
pass | Open the FileSystem target.
This method returns a file-like object which can either be read from or written to depending
on the specified mode.
:param str mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will
open the FileSystemTarget in write mode. Subclasses can implement
additional options. Using `b` is not supported; initialize with
`format=Nop` instead. | open | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def exists(self):
"""
Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise.
This method is implemented by using :py:attr:`fs`.
"""
path = self.path
if '*' in path or '?' in path or '[' in path or '{' in path:
logger.warning("Using wildcards in path %s might lead to processing of an incomplete dataset; "
"override exists() to suppress the warning.", path)
return self.fs.exists(path) | Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise.
This method is implemented by using :py:attr:`fs`. | exists | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def remove(self):
"""
Remove the resource at the path specified by this FileSystemTarget.
This method is implemented by using :py:attr:`fs`.
"""
self.fs.remove(self.path) | Remove the resource at the path specified by this FileSystemTarget.
This method is implemented by using :py:attr:`fs`. | remove | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def temporary_path(self):
"""
A context manager that enables a reasonably short, general and
magic-less way to solve the :ref:`AtomicWrites`.
* On *entering*, it will create the parent directories so the
temporary_path is writeable right away.
This step uses :py:meth:`FileSystem.mkdir`.
* On *exiting*, it will move the temporary file if there was no exception thrown.
This step uses :py:meth:`FileSystem.rename_dont_move`
The file system operations will be carried out by calling them on :py:attr:`fs`.
The typical use case looks like this:
.. code:: python
class MyTask(luigi.Task):
def output(self):
return MyFileSystemTarget(...)
def run(self):
with self.output().temporary_path() as self.temp_output_path:
run_some_external_command(output_path=self.temp_output_path)
"""
num = random.randrange(0, 10_000_000_000)
slashless_path = self.path.rstrip('/').rstrip("\\")
_temp_path = '{}-luigi-tmp-{:010}{}'.format(
slashless_path,
num,
self._trailing_slash())
# TODO: os.path doesn't make sense here as it's os-dependent
tmp_dir = os.path.dirname(slashless_path)
if tmp_dir:
self.fs.mkdir(tmp_dir, parents=True, raise_if_exists=False)
yield _temp_path
# We won't reach here if there was an user exception.
self.fs.rename_dont_move(_temp_path, self.path) | A context manager that enables a reasonably short, general and
magic-less way to solve the :ref:`AtomicWrites`.
* On *entering*, it will create the parent directories so the
temporary_path is writeable right away.
This step uses :py:meth:`FileSystem.mkdir`.
* On *exiting*, it will move the temporary file if there was no exception thrown.
This step uses :py:meth:`FileSystem.rename_dont_move`
The file system operations will be carried out by calling them on :py:attr:`fs`.
The typical use case looks like this:
.. code:: python
class MyTask(luigi.Task):
def output(self):
return MyFileSystemTarget(...)
def run(self):
with self.output().temporary_path() as self.temp_output_path:
run_some_external_command(output_path=self.temp_output_path) | temporary_path | python | spotify/luigi | luigi/target.py | https://github.com/spotify/luigi/blob/master/luigi/target.py | Apache-2.0 |
def _urljoin(base, url):
"""
Join relative URLs to base URLs like urllib.parse.urljoin but support
arbitrary URIs (esp. 'http+unix://').
base part is fixed or mounted point, every url contains full base part.
"""
parsed = urlparse(base)
scheme = parsed.scheme
return urlparse(
urljoin(parsed._replace(scheme='http').geturl(), parsed.path + (url if url[0] == '/' else '/' + url))
)._replace(scheme=scheme).geturl() | Join relative URLs to base URLs like urllib.parse.urljoin but support
arbitrary URIs (esp. 'http+unix://').
base part is fixed or mounted point, every url contains full base part. | _urljoin | python | spotify/luigi | luigi/rpc.py | https://github.com/spotify/luigi/blob/master/luigi/rpc.py | Apache-2.0 |
def find_all_by_parameters(self, task_name, session=None, **task_params):
"""
Find tasks with the given task_name and the same parameters as the kwargs.
"""
with self._session(session) as session:
query = session.query(TaskRecord).join(TaskEvent).filter(TaskRecord.name == task_name)
for k, v in task_params.items():
alias = sqlalchemy.orm.aliased(TaskParameter)
query = query.join(alias).filter(alias.name == k, alias.value == v)
tasks = query.order_by(TaskEvent.ts)
for task in tasks:
# Sanity check
assert all(k in task.parameters and v == str(task.parameters[k].value) for k, v in task_params.items())
yield task | Find tasks with the given task_name and the same parameters as the kwargs. | find_all_by_parameters | python | spotify/luigi | luigi/db_task_history.py | https://github.com/spotify/luigi/blob/master/luigi/db_task_history.py | Apache-2.0 |
def find_all_by_name(self, task_name, session=None):
"""
Find all tasks with the given task_name.
"""
return self.find_all_by_parameters(task_name, session) | Find all tasks with the given task_name. | find_all_by_name | python | spotify/luigi | luigi/db_task_history.py | https://github.com/spotify/luigi/blob/master/luigi/db_task_history.py | Apache-2.0 |
def find_latest_runs(self, session=None):
"""
Return tasks that have been updated in the past 24 hours.
"""
with self._session(session) as session:
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
return session.query(TaskRecord).\
join(TaskEvent).\
filter(TaskEvent.ts >= yesterday).\
group_by(TaskRecord.id, TaskEvent.event_name, TaskEvent.ts).\
order_by(TaskEvent.ts.desc()).\
all() | Return tasks that have been updated in the past 24 hours. | find_latest_runs | python | spotify/luigi | luigi/db_task_history.py | https://github.com/spotify/luigi/blob/master/luigi/db_task_history.py | Apache-2.0 |
def find_all_runs(self, session=None):
"""
Return all tasks that have been updated.
"""
with self._session(session) as session:
return session.query(TaskRecord).all() | Return all tasks that have been updated. | find_all_runs | python | spotify/luigi | luigi/db_task_history.py | https://github.com/spotify/luigi/blob/master/luigi/db_task_history.py | Apache-2.0 |
def find_all_events(self, session=None):
"""
Return all running/failed/done events.
"""
with self._session(session) as session:
return session.query(TaskEvent).all() | Return all running/failed/done events. | find_all_events | python | spotify/luigi | luigi/db_task_history.py | https://github.com/spotify/luigi/blob/master/luigi/db_task_history.py | Apache-2.0 |
def find_task_by_id(self, id, session=None):
"""
Find task with the given record ID.
"""
with self._session(session) as session:
return session.query(TaskRecord).get(id) | Find task with the given record ID. | find_task_by_id | python | spotify/luigi | luigi/db_task_history.py | https://github.com/spotify/luigi/blob/master/luigi/db_task_history.py | Apache-2.0 |
def find_task_by_task_id(self, task_id, session=None):
"""
Find task with the given task ID.
"""
with self._session(session) as session:
return session.query(TaskRecord).filter(TaskRecord.task_id == task_id).all()[-1] | Find task with the given task ID. | find_task_by_task_id | python | spotify/luigi | luigi/db_task_history.py | https://github.com/spotify/luigi/blob/master/luigi/db_task_history.py | Apache-2.0 |
def _upgrade_schema(engine):
"""
Ensure the database schema is up to date with the codebase.
:param engine: SQLAlchemy engine of the underlying database.
"""
inspector = reflection.Inspector.from_engine(engine)
with engine.connect() as conn:
# Upgrade 1. Add task_id column and index to tasks
if 'task_id' not in [x['name'] for x in inspector.get_columns('tasks')]:
logger.warning('Upgrading DbTaskHistory schema: Adding tasks.task_id')
conn.execute('ALTER TABLE tasks ADD COLUMN task_id VARCHAR(200)')
conn.execute('CREATE INDEX ix_task_id ON tasks (task_id)')
# Upgrade 2. Alter value column to be TEXT, note that this is idempotent so no if-guard
if 'mysql' in engine.dialect.name:
conn.execute('ALTER TABLE task_parameters MODIFY COLUMN value TEXT')
elif 'oracle' in engine.dialect.name:
conn.execute('ALTER TABLE task_parameters MODIFY value TEXT')
elif 'mssql' in engine.dialect.name:
conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TEXT')
elif 'postgresql' in engine.dialect.name:
if str([x for x in inspector.get_columns('task_parameters')
if x['name'] == 'value'][0]['type']) != 'TEXT':
conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT')
elif 'sqlite' in engine.dialect.name:
# SQLite does not support changing column types. A database file will need
# to be used to pickup this migration change.
for i in conn.execute('PRAGMA table_info(task_parameters);').fetchall():
if i['name'] == 'value' and i['type'] != 'TEXT':
logger.warning(
'SQLite can not change column types. Please use a new database '
'to pickup column type changes.'
)
else:
logger.warning(
'SQLAlcheny dialect {} could not be migrated to the TEXT type'.format(
engine.dialect
)
) | Ensure the database schema is up to date with the codebase.
:param engine: SQLAlchemy engine of the underlying database. | _upgrade_schema | python | spotify/luigi | luigi/db_task_history.py | https://github.com/spotify/luigi/blob/master/luigi/db_task_history.py | Apache-2.0 |
def terminate(self):
"""Terminate this process and its subprocesses."""
# default terminate() doesn't cleanup child processes, it orphans them.
try:
return self._recursive_terminate()
except ImportError:
return super(TaskProcess, self).terminate() | Terminate this process and its subprocesses. | terminate | python | spotify/luigi | luigi/worker.py | https://github.com/spotify/luigi/blob/master/luigi/worker.py | Apache-2.0 |
def check_complete(task, out_queue, completion_cache=None):
"""
Checks if task is complete, puts the result to out_queue, optionally using the completion cache.
"""
logger.debug("Checking if %s is complete", task)
try:
is_complete = check_complete_cached(task, completion_cache)
except Exception:
is_complete = TracebackWrapper(traceback.format_exc())
out_queue.put((task, is_complete)) | Checks if task is complete, puts the result to out_queue, optionally using the completion cache. | check_complete | python | spotify/luigi | luigi/worker.py | https://github.com/spotify/luigi/blob/master/luigi/worker.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.