text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Get a dict of phi variables and their corresponding variables.
<END_TASK>
<USER_TASK:>
Description:
def get_phi_variables(self, block_addr):
"""
Get a dict of phi variables and their corresponding variables.
:param int block_addr: Address of the block.
:return: A dict of phi variables of an empty dict if there are no phi variables at the block.
:rtype: dict
""" |
if block_addr not in self._phi_variables_by_block:
return dict()
variables = { }
for phi in self._phi_variables_by_block[block_addr]:
variables[phi] = self._phi_variables[phi]
return variables |
<SYSTEM_TASK:>
Get all variables that have never been written to.
<END_TASK>
<USER_TASK:>
Description:
def input_variables(self, exclude_specials=True):
"""
Get all variables that have never been written to.
:return: A list of variables that are never written to.
""" |
def has_write_access(accesses):
return any(acc for acc in accesses if acc.access_type == 'write')
def has_read_access(accesses):
return any(acc for acc in accesses if acc.access_type == 'read')
input_variables = [ ]
for variable, accesses in self._variable_accesses.items():
if not has_write_access(accesses) and has_read_access(accesses):
if not exclude_specials or not variable.category:
input_variables.append(variable)
return input_variables |
<SYSTEM_TASK:>
Assign default names to all variables.
<END_TASK>
<USER_TASK:>
Description:
def assign_variable_names(self):
"""
Assign default names to all variables.
:return: None
""" |
for var in self._variables:
if isinstance(var, SimStackVariable):
if var.name is not None:
continue
if var.ident.startswith('iarg'):
var.name = 'arg_%x' % var.offset
else:
var.name = 's_%x' % (-var.offset)
# var.name = var.ident
elif isinstance(var, SimRegisterVariable):
if var.name is not None:
continue
var.name = var.ident |
<SYSTEM_TASK:>
Get a list of all references to the given variable.
<END_TASK>
<USER_TASK:>
Description:
def get_variable_accesses(self, variable, same_name=False):
"""
Get a list of all references to the given variable.
:param SimVariable variable: The variable.
:param bool same_name: Whether to include all variables with the same variable name, or just
based on the variable identifier.
:return: All references to the variable.
:rtype: list
""" |
if variable.region == 'global':
return self.global_manager.get_variable_accesses(variable, same_name=same_name)
elif variable.region in self.function_managers:
return self.function_managers[variable.region].get_variable_accesses(variable, same_name=same_name)
l.warning('get_variable_accesses(): Region %s is not found.', variable.region)
return [ ] |
<SYSTEM_TASK:>
Call this Callable with a string of C-style arguments.
<END_TASK>
<USER_TASK:>
Description:
def call_c(self, c_args):
"""
Call this Callable with a string of C-style arguments.
:param str c_args: C-style arguments.
:return: The return value from the call.
:rtype: claripy.Ast
""" |
c_args = c_args.strip()
if c_args[0] != "(":
c_args = "(" + c_args
if c_args[-1] != ")":
c_args += ")"
# Parse arguments
content = "int main() { func%s; }" % c_args
ast = pycparser.CParser().parse(content)
if not ast.ext or not isinstance(ast.ext[0], pycparser.c_ast.FuncDef):
raise AngrCallableError("Error in parsing the given C-style argument string.")
if not ast.ext[0].body.block_items or not isinstance(ast.ext[0].body.block_items[0], pycparser.c_ast.FuncCall):
raise AngrCallableError("Error in parsing the given C-style argument string: "
"Cannot find the expected function call.")
arg_exprs = ast.ext[0].body.block_items[0].args.exprs
args = [ ]
for expr in arg_exprs:
if isinstance(expr, pycparser.c_ast.Constant):
# string
if expr.type == "string":
args.append(expr.value[1:-1])
elif expr.type == "int":
args.append(int(expr.value))
else:
raise AngrCallableError("Unsupported expression type %s." % expr.type)
else:
raise AngrCallableError("Unsupported expression type %s." % type(expr))
return self.__call__(*args) |
<SYSTEM_TASK:>
Discard the ancestry of this state.
<END_TASK>
<USER_TASK:>
Description:
def trim(self):
"""
Discard the ancestry of this state.
""" |
new_hist = self.copy({})
new_hist.parent = None
self.state.register_plugin('history', new_hist) |
<SYSTEM_TASK:>
Filter self.actions based on some common parameters.
<END_TASK>
<USER_TASK:>
Description:
def filter_actions(self, block_addr=None, block_stmt=None, insn_addr=None, read_from=None, write_to=None):
"""
Filter self.actions based on some common parameters.
:param block_addr: Only return actions generated in blocks starting at this address.
:param block_stmt: Only return actions generated in the nth statement of each block.
:param insn_addr: Only return actions generated in the assembly instruction at this address.
:param read_from: Only return actions that perform a read from the specified location.
:param write_to: Only return actions that perform a write to the specified location.
Notes:
If IR optimization is turned on, reads and writes may not occur in the instruction
they originally came from. Most commonly, If a register is read from twice in the same
block, the second read will not happen, instead reusing the temp the value is already
stored in.
Valid values for read_from and write_to are the string literals 'reg' or 'mem' (matching
any read or write to registers or memory, respectively), any string (representing a read
or write to the named register), and any integer (representing a read or write to the
memory at this address).
""" |
if read_from is not None:
if write_to is not None:
raise ValueError("Can't handle read_from and write_to at the same time!")
if read_from in ('reg', 'mem'):
read_type = read_from
read_offset = None
elif isinstance(read_from, str):
read_type = 'reg'
read_offset = self.state.project.arch.registers[read_from][0]
else:
read_type = 'mem'
read_offset = read_from
if write_to is not None:
if write_to in ('reg', 'mem'):
write_type = write_to
write_offset = None
elif isinstance(write_to, str):
write_type = 'reg'
write_offset = self.state.project.arch.registers[write_to][0]
else:
write_type = 'mem'
write_offset = write_to
#def addr_of_stmt(bbl_addr, stmt_idx):
# if stmt_idx is None:
# return None
# stmts = self.state.project.factory.block(bbl_addr).vex.statements
# if stmt_idx >= len(stmts):
# return None
# for i in reversed(range(stmt_idx + 1)):
# if stmts[i].tag == 'Ist_IMark':
# return stmts[i].addr + stmts[i].delta
# return None
def action_reads(action):
if action.type != read_type:
return False
if action.action != 'read':
return False
if read_offset is None:
return True
addr = action.addr
if isinstance(addr, SimActionObject):
addr = addr.ast
if isinstance(addr, claripy.ast.Base):
if addr.symbolic:
return False
addr = self.state.solver.eval(addr)
if addr != read_offset:
return False
return True
def action_writes(action):
if action.type != write_type:
return False
if action.action != 'write':
return False
if write_offset is None:
return True
addr = action.addr
if isinstance(addr, SimActionObject):
addr = addr.ast
if isinstance(addr, claripy.ast.Base):
if addr.symbolic:
return False
addr = self.state.solver.eval(addr)
if addr != write_offset:
return False
return True
return [x for x in reversed(self.actions) if
(block_addr is None or x.bbl_addr == block_addr) and
(block_stmt is None or x.stmt_idx == block_stmt) and
(read_from is None or action_reads(x)) and
(write_to is None or action_writes(x)) and
(insn_addr is None or (x.sim_procedure is None and x.ins_addr == insn_addr))
#(insn_addr is None or (x.sim_procedure is None and addr_of_stmt(x.bbl_addr, x.stmt_idx) == insn_addr))
] |
<SYSTEM_TASK:>
Find the common ancestor between this history node and 'other'.
<END_TASK>
<USER_TASK:>
Description:
def closest_common_ancestor(self, other):
"""
Find the common ancestor between this history node and 'other'.
:param other: the PathHistory to find a common ancestor with.
:return: the common ancestor SimStateHistory, or None if there isn't one
""" |
our_history_iter = reversed(HistoryIter(self))
their_history_iter = reversed(HistoryIter(other))
sofar = set()
while True:
our_done = False
their_done = False
try:
our_next = next(our_history_iter)
if our_next in sofar:
# we found it!
return our_next
sofar.add(our_next)
except StopIteration:
# we ran out of items during iteration
our_done = True
try:
their_next = next(their_history_iter)
if their_next in sofar:
# we found it!
return their_next
sofar.add(their_next)
except StopIteration:
# we ran out of items during iteration
their_done = True
# if we ran out of both lists, there's no common ancestor
if our_done and their_done:
return None |
<SYSTEM_TASK:>
Returns the constraints that have been accumulated since `other`.
<END_TASK>
<USER_TASK:>
Description:
def constraints_since(self, other):
"""
Returns the constraints that have been accumulated since `other`.
:param other: a prior PathHistory object
:returns: a list of constraints
""" |
constraints = [ ]
cur = self
while cur is not other and cur is not None:
constraints.extend(cur.recent_constraints)
cur = cur.parent
return constraints |
<SYSTEM_TASK:>
Generate a slice of the graph from the head node to the given frontier.
<END_TASK>
<USER_TASK:>
Description:
def slice_graph(graph, node, frontier, include_frontier=False):
"""
Generate a slice of the graph from the head node to the given frontier.
:param networkx.DiGraph graph: The graph to work on.
:param node: The starting node in the graph.
:param frontier: A list of frontier nodes.
:param bool include_frontier: Whether the frontier nodes are included in the slice or not.
:return: A subgraph.
:rtype: networkx.DiGraph
""" |
subgraph = networkx.DiGraph()
for frontier_node in frontier:
for simple_path in networkx.all_simple_paths(graph, node, frontier_node):
for src, dst in zip(simple_path, simple_path[1:]):
if include_frontier or (src not in frontier and dst not in frontier):
subgraph.add_edge(src, dst)
if not list(subgraph.nodes):
# HACK: FIXME: for infinite loop nodes, this would return an empty set, so we include the loop body itself
# Make sure this makes sense (EDG thinks it does)
if (node, node) in graph.edges:
subgraph.add_edge(node, node)
return subgraph |
<SYSTEM_TASK:>
Translates an integer, set, list or function into a lambda that checks if state's current basic block matches
<END_TASK>
<USER_TASK:>
Description:
def condition_to_lambda(condition, default=False):
"""
Translates an integer, set, list or function into a lambda that checks if state's current basic block matches
some condition.
:param condition: An integer, set, list or lambda to convert to a lambda.
:param default: The default return value of the lambda (in case condition is None). Default: false.
:returns: A tuple of two items: a lambda that takes a state and returns the set of addresses that it
matched from the condition, and a set that contains the normalized set of addresses to stop
at, or None if no addresses were provided statically.
""" |
if condition is None:
condition_function = lambda state: default
static_addrs = set()
elif isinstance(condition, int):
return condition_to_lambda((condition,))
elif isinstance(condition, (tuple, set, list)):
static_addrs = set(condition)
def condition_function(state):
if state.addr in static_addrs:
# returning {state.addr} instead of True to properly handle find/avoid conflicts
return {state.addr}
if not isinstance(state.project.engines.default_engine, engines.SimEngineVEX):
return False
try:
# If the address is not in the set (which could mean it is
# not at the top of a block), check directly in the blocks
# (Blocks are repeatedly created for every check, but with
# the IRSB cache in angr lifter it should be OK.)
return static_addrs.intersection(set(state.block().instruction_addrs))
except (AngrError, SimError):
return False
elif hasattr(condition, '__call__'):
condition_function = condition
static_addrs = None
else:
raise AngrExplorationTechniqueError("ExplorationTechnique is unable to convert given type (%s) to a callable condition function." % condition.__class__)
return condition_function, static_addrs |
<SYSTEM_TASK:>
Determines a persistent ID for an object.
<END_TASK>
<USER_TASK:>
Description:
def _get_persistent_id(self, o):
"""
Determines a persistent ID for an object.
Does NOT do stores.
""" |
if type(o) in self.hash_dedup:
oid = o.__class__.__name__ + "-" + str(hash(o))
self._object_cache[oid] = o
return oid
if any(isinstance(o,c) for c in self.unsafe_key_baseclasses):
return None
try:
return self._uuid_cache[o]
except KeyError:
pass
except TypeError:
return None
#if type(o) in self.uuid_dedup:
# return self._get_id(o)
if o.__class__.__module__.split('.')[0] in self.module_dedup or o.__class__ in self.uuid_dedup:
oid = o.__class__.__name__.split(".")[-1] + '-' + str(uuid.uuid4())
self._object_cache[oid] = o
self._uuid_cache[o] = oid
return oid
return None |
<SYSTEM_TASK:>
Checks if the provided id is already in the vault.
<END_TASK>
<USER_TASK:>
Description:
def is_stored(self, i):
"""
Checks if the provided id is already in the vault.
""" |
if i in self.stored:
return True
try:
with self._read_context(i):
return True
except (AngrVaultError, EOFError):
return False |
<SYSTEM_TASK:>
Retrieves one object from the pickler with the provided id.
<END_TASK>
<USER_TASK:>
Description:
def load(self, id): #pylint:disable=redefined-builtin
"""
Retrieves one object from the pickler with the provided id.
:param id: an ID to use
""" |
l.debug("LOAD: %s", id)
try:
l.debug("... trying cached")
return self._object_cache[id]
except KeyError:
l.debug("... cached failed")
with self._read_context(id) as u:
return VaultUnpickler(self, u).load() |
<SYSTEM_TASK:>
Stores an object and returns its ID.
<END_TASK>
<USER_TASK:>
Description:
def store(self, o, id=None): #pylint:disable=redefined-builtin
"""
Stores an object and returns its ID.
:param o: the object
:param id: an ID to use
""" |
actual_id = id or self._get_persistent_id(o) or "TMP-"+str(uuid.uuid4())
l.debug("STORE: %s %s", o, actual_id)
# this handles recursive objects
if actual_id in self.storing:
return actual_id
if self.is_stored(actual_id):
l.debug("... already stored")
return actual_id
with self._write_context(actual_id) as output:
self.storing.add(actual_id)
VaultPickler(self, output, assigned_objects=(o,)).dump(o)
self.stored.add(actual_id)
return actual_id |
<SYSTEM_TASK:>
Returns a serialized string representing the object, post-deduplication.
<END_TASK>
<USER_TASK:>
Description:
def dumps(self, o):
"""
Returns a serialized string representing the object, post-deduplication.
:param o: the object
""" |
f = io.BytesIO()
VaultPickler(self, f).dump(o)
f.seek(0)
return f.read() |
<SYSTEM_TASK:>
Deserializes a string representation of the object.
<END_TASK>
<USER_TASK:>
Description:
def loads(self, s):
"""
Deserializes a string representation of the object.
:param s: the string
""" |
f = io.BytesIO(s)
return VaultUnpickler(self, f).load() |
<SYSTEM_TASK:>
Checks that the specified state options result in the same states over the next `depth` states.
<END_TASK>
<USER_TASK:>
Description:
def set_state_options(self, left_add_options=None, left_remove_options=None, right_add_options=None, right_remove_options=None):
"""
Checks that the specified state options result in the same states over the next `depth` states.
""" |
s_right = self.project.factory.full_init_state(
add_options=right_add_options, remove_options=right_remove_options,
args=[],
)
s_left = self.project.factory.full_init_state(
add_options=left_add_options, remove_options=left_remove_options,
args=[],
)
return self.set_states(s_left, s_right) |
<SYSTEM_TASK:>
Checks that the specified paths stay the same over the next `depth` states.
<END_TASK>
<USER_TASK:>
Description:
def set_states(self, left_state, right_state):
"""
Checks that the specified paths stay the same over the next `depth` states.
""" |
simgr = self.project.factory.simulation_manager(right_state)
simgr.stash(to_stash='right')
simgr.active.append(left_state)
simgr.stash(to_stash='left')
simgr.stash(to_stash='stashed_left')
simgr.stash(to_stash='stashed_right')
return self.set_simgr(simgr) |
<SYSTEM_TASK:>
Checks that a detected incongruency is not caused by translation backends having a different
<END_TASK>
<USER_TASK:>
Description:
def _validate_incongruency(self):
"""
Checks that a detected incongruency is not caused by translation backends having a different
idea of what constitutes a basic block.
""" |
ot = self._throw
try:
self._throw = False
l.debug("Validating incongruency.")
if ("UNICORN" in self.simgr.right[0].options) ^ ("UNICORN" in self.simgr.left[0].options):
if "UNICORN" in self.simgr.right[0].options:
unicorn_stash = 'right'
normal_stash = 'left'
else:
unicorn_stash = 'left'
normal_stash = 'right'
unicorn_path = self.simgr.stashes[unicorn_stash][0]
normal_path = self.simgr.stashes[normal_stash][0]
if unicorn_path.arch.name in ("X86", "AMD64"):
# unicorn "falls behind" on loop and rep instructions, since
# it sees them as ending a basic block. Here, we will
# step the unicorn until it's caught up
npg = self.project.factory.simulation_manager(unicorn_path)
npg.explore(find=lambda p: p.addr == normal_path.addr, n=200)
if len(npg.found) == 0:
l.debug("Validator failed to sync paths.")
return True
new_unicorn = npg.found[0]
delta = new_unicorn.history.block_count - normal_path.history.block_count
normal_path.history.recent_block_count += delta
new_normal = normal_path
elif unicorn_path.arch.name == "MIPS32":
# unicorn gets ahead here, because VEX falls behind for unknown reasons
# for example, this block:
#
# 0x1016f20: lui $gp, 0x17
# 0x1016f24: addiu $gp, $gp, -0x35c0
# 0x1016f28: addu $gp, $gp, $t9
# 0x1016f2c: addiu $sp, $sp, -0x28
# 0x1016f30: sw $ra, 0x24($sp)
# 0x1016f34: sw $s0, 0x20($sp)
# 0x1016f38: sw $gp, 0x10($sp)
# 0x1016f3c: lw $v0, -0x6cf0($gp)
# 0x1016f40: move $at, $at
npg = self.project.factory.simulation_manager(normal_path)
npg.explore(find=lambda p: p.addr == unicorn_path.addr, n=200)
if len(npg.found) == 0:
l.debug("Validator failed to sync paths.")
return True
new_normal = npg.found[0]
delta = new_normal.history.block_count - unicorn_path.history.block_count
unicorn_path.history.recent_block_count += delta
new_unicorn = unicorn_path
else:
l.debug("Dunno!")
return True
if self.compare_paths(new_unicorn, new_normal):
l.debug("Divergence accounted for by unicorn.")
self.simgr.stashes[unicorn_stash][0] = new_unicorn
self.simgr.stashes[normal_stash][0] = new_normal
return False
else:
l.warning("Divergence unaccounted for by unicorn.")
return True
else:
# no idea
l.warning("Divergence unaccounted for.")
return True
finally:
self._throw = ot |
<SYSTEM_TASK:>
Checks state `state` to see if the breakpoint should fire.
<END_TASK>
<USER_TASK:>
Description:
def check(self, state, when):
"""
Checks state `state` to see if the breakpoint should fire.
:param state: The state.
:param when: Whether the check is happening before or after the event.
:return: A boolean representing whether the checkpoint should fire.
""" |
ok = self.enabled and (when == self.when or self.when == BP_BOTH)
if not ok:
return ok
l.debug("... after enabled and when: %s", ok)
for a in [ _ for _ in self.kwargs if not _.endswith("_unique") ]:
current_expr = getattr(state.inspect, a)
needed = self.kwargs.get(a, None)
l.debug("... checking condition %s", a)
if current_expr is None and needed is None:
l.debug("...... both None, True")
c_ok = True
elif current_expr is not None and needed is not None:
if state.solver.solution(current_expr, needed):
l.debug("...... is_solution!")
c_ok = True
else:
l.debug("...... not solution...")
c_ok = False
if c_ok and self.kwargs.get(a+'_unique', True):
l.debug("...... checking uniqueness")
if not state.solver.unique(current_expr):
l.debug("...... not unique")
c_ok = False
else:
l.debug("...... one None, False")
c_ok = False
ok = ok and c_ok
if not ok:
return ok
l.debug("... after condition %s: %s", a, ok)
ok = ok and (self.condition is None or self.condition(state))
l.debug("... after condition func: %s", ok)
return ok |
<SYSTEM_TASK:>
Trigger the breakpoint.
<END_TASK>
<USER_TASK:>
Description:
def fire(self, state):
"""
Trigger the breakpoint.
:param state: The state.
""" |
if self.action is None or self.action == BP_IPDB:
import ipdb; ipdb.set_trace() #pylint:disable=F0401
elif self.action == BP_IPYTHON:
import IPython
shell = IPython.terminal.embed.InteractiveShellEmbed()
shell.mainloop(display_banner="This is an ipython shell for you to happily debug your state!\n" + \
"The state can be accessed through the variable 'state'. You can\n" +\
"make modifications, then exit this shell to resume your analysis.")
else:
self.action(state) |
<SYSTEM_TASK:>
Called from within SimuVEX when events happens. This function checks all breakpoints registered for that event
<END_TASK>
<USER_TASK:>
Description:
def action(self, event_type, when, **kwargs):
"""
Called from within SimuVEX when events happens. This function checks all breakpoints registered for that event
and fires the ones whose conditions match.
""" |
l.debug("Event %s (%s) firing...", event_type, when)
for k,v in kwargs.items():
if k not in inspect_attributes:
raise ValueError("Invalid inspect attribute %s passed in. Should be one of: %s" % (k, inspect_attributes))
#l.debug("... %s = %r", k, v)
l.debug("... setting %s", k)
setattr(self, k, v)
for bp in self._breakpoints[event_type]:
l.debug("... checking bp %r", bp)
if bp.check(self.state, when):
l.debug("... FIRE")
bp.fire(self.state) |
<SYSTEM_TASK:>
Adds a breakpoint which would trigger on `event_type`.
<END_TASK>
<USER_TASK:>
Description:
def add_breakpoint(self, event_type, bp):
"""
Adds a breakpoint which would trigger on `event_type`.
:param event_type: The event type to trigger on
:param bp: The breakpoint
:return: The created breakpoint.
""" |
if event_type not in event_types:
raise ValueError("Invalid event type %s passed in. Should be one of: %s" % (event_type,
", ".join(event_types))
)
self._breakpoints[event_type].append(bp) |
<SYSTEM_TASK:>
Removes a breakpoint.
<END_TASK>
<USER_TASK:>
Description:
def remove_breakpoint(self, event_type, bp=None, filter_func=None):
"""
Removes a breakpoint.
:param bp: The breakpoint to remove.
:param filter_func: A filter function to specify whether each breakpoint should be removed or not.
""" |
if bp is None and filter_func is None:
raise ValueError('remove_breakpoint(): You must specify either "bp" or "filter".')
try:
if bp is not None:
self._breakpoints[event_type].remove(bp)
else:
self._breakpoints[event_type] = [ b for b in self._breakpoints[event_type] if not filter_func(b) ]
except ValueError:
# the breakpoint is not found
l.error('remove_breakpoint(): Breakpoint %s (type %s) is not found.', bp, event_type) |
<SYSTEM_TASK:>
Given a state, return the procedure corresponding to the current syscall.
<END_TASK>
<USER_TASK:>
Description:
def syscall(self, state, allow_unsupported=True):
"""
Given a state, return the procedure corresponding to the current syscall.
This procedure will have .syscall_number, .display_name, and .addr set.
:param state: The state to get the syscall number from
:param allow_unsupported: Whether to return a "dummy" sycall instead of raising an unsupported exception
""" |
abi = self.syscall_abi(state)
if state.os_name in SYSCALL_CC[state.arch.name]:
cc = SYSCALL_CC[state.arch.name][state.os_name](state.arch)
else:
# Use the default syscall calling convention - it may bring problems
_l.warning("No syscall calling convention available for %s/%s", state.arch.name, state.os_name)
cc = SYSCALL_CC[state.arch.name]['default'](state.arch)
sym_num = cc.syscall_num(state)
try:
num = state.solver.eval_one(sym_num)
except SimSolverError:
if allow_unsupported:
num = self.unknown_syscall_number
else:
if not state.solver.satisfiable():
raise AngrUnsupportedSyscallError("The program state is not satisfiable")
else:
raise AngrUnsupportedSyscallError("Got a symbolic syscall number")
proc = self.syscall_from_number(num, allow_unsupported=allow_unsupported, abi=abi)
proc.cc = cc
return proc |
<SYSTEM_TASK:>
Return whether or not the given address corresponds to a syscall implementation.
<END_TASK>
<USER_TASK:>
Description:
def is_syscall_addr(self, addr):
"""
Return whether or not the given address corresponds to a syscall implementation.
""" |
if self.kernel_base is None or addr < self.kernel_base:
return False
addr -= self.kernel_base
if addr % self.syscall_addr_alignment != 0:
return False
addr //= self.syscall_addr_alignment
return addr <= self.unknown_syscall_number |
<SYSTEM_TASK:>
Get a syscall SimProcedure from an address.
<END_TASK>
<USER_TASK:>
Description:
def syscall_from_addr(self, addr, allow_unsupported=True):
"""
Get a syscall SimProcedure from an address.
:param addr: The address to convert to a syscall SimProcedure
:param allow_unsupported: Whether to return a dummy procedure for an unsupported syscall instead of raising an
exception.
:return: The SimProcedure for the syscall, or None if the address is not a syscall address.
""" |
if not self.is_syscall_addr(addr):
return None
number = (addr - self.kernel_base) // self.syscall_addr_alignment
for abi in self.syscall_abis:
baseno, minno, maxno = self.syscall_abis[abi]
if baseno <= number <= baseno + maxno - minno:
number += minno
number -= baseno
break
else:
abi = None
return self.syscall_from_number(number, allow_unsupported=allow_unsupported, abi=abi) |
<SYSTEM_TASK:>
Get a syscall SimProcedure from its number.
<END_TASK>
<USER_TASK:>
Description:
def syscall_from_number(self, number, allow_unsupported=True, abi=None):
"""
Get a syscall SimProcedure from its number.
:param number: The syscall number
:param allow_unsupported: Whether to return a "stub" syscall for unsupported numbers instead of throwing an error
:param abi: The name of the abi to use. If None, will assume that the abis have disjoint
numbering schemes and pick the right one.
:return: The SimProcedure for the syscall
""" |
abilist = self.syscall_abis if abi is None else [abi]
if self.syscall_library is None:
if not allow_unsupported:
raise AngrUnsupportedSyscallError("%s does not have a library of syscalls implemented" % self.name)
proc = P['stubs']['syscall']()
elif not allow_unsupported and not self.syscall_library.has_implementation(number, self.arch, abilist):
raise AngrUnsupportedSyscallError("No implementation for syscall %d" % number)
else:
proc = self.syscall_library.get(number, self.arch, abilist)
if proc.abi is not None:
baseno, minno, _ = self.syscall_abis[proc.abi]
mapno = number - minno + baseno
else:
mapno = self.unknown_syscall_number
proc.addr = mapno * self.syscall_addr_alignment + self.kernel_base
return proc |
<SYSTEM_TASK:>
Convert from a windows memory protection constant to an angr bitmask
<END_TASK>
<USER_TASK:>
Description:
def convert_prot(prot):
"""
Convert from a windows memory protection constant to an angr bitmask
""" |
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786(v=vs.85).aspx
if prot & 0x10:
return 4
if prot & 0x20:
return 5
if prot & 0x40:
return 7
if prot & 0x80:
return 7
if prot & 0x01:
return 0
if prot & 0x02:
return 1
if prot & 0x04:
return 3
if prot & 0x08:
return 3
raise angr.errors.SimValueError("Unknown windows memory protection constant: %#x" % prot) |
<SYSTEM_TASK:>
Add a new definition of variable.
<END_TASK>
<USER_TASK:>
Description:
def add_def(self, variable, location, size_threshold=32):
"""
Add a new definition of variable.
:param SimVariable variable: The variable being defined.
:param CodeLocation location: Location of the varaible being defined.
:param int size_threshold: The maximum bytes to consider for the variable.
:return: True if the definition was new, False otherwise
:rtype: bool
""" |
new_defs_added = False
if isinstance(variable, SimRegisterVariable):
if variable.reg is None:
l.warning('add_def: Got a None for a SimRegisterVariable. Consider fixing.')
return new_defs_added
size = min(variable.size, size_threshold)
offset = variable.reg
while offset < variable.reg + size:
if location not in self._register_map[offset]:
new_defs_added = True
self._register_map[offset].add(location)
offset += 1
self._defs[variable].add(location)
elif isinstance(variable, SimMemoryVariable):
size = min(variable.size, size_threshold)
offset = variable.addr
while offset < variable.addr + size:
if location not in self._memory_map[offset]:
new_defs_added = True
self._memory_map[offset].add(location)
offset += 1
self._defs[variable].add(location)
else:
l.error('Unsupported variable type "%s".', type(variable))
return new_defs_added |
<SYSTEM_TASK:>
Add a collection of new definitions of a variable.
<END_TASK>
<USER_TASK:>
Description:
def add_defs(self, variable, locations, size_threshold=32):
"""
Add a collection of new definitions of a variable.
:param SimVariable variable: The variable being defined.
:param iterable locations: A collection of locations where the variable was defined.
:param int size_threshold: The maximum bytes to consider for the variable.
:return: True if any of the definition was new, False otherwise
:rtype: bool
""" |
new_defs_added = False
for loc in locations:
new_defs_added |= self.add_def(variable, loc, size_threshold=size_threshold)
return new_defs_added |
<SYSTEM_TASK:>
Add a new definition for variable and kill all previous definitions.
<END_TASK>
<USER_TASK:>
Description:
def kill_def(self, variable, location, size_threshold=32):
"""
Add a new definition for variable and kill all previous definitions.
:param SimVariable variable: The variable to kill.
:param CodeLocation location: The location where this variable is defined.
:param int size_threshold: The maximum bytes to consider for the variable.
:return: None
""" |
if isinstance(variable, SimRegisterVariable):
if variable.reg is None:
l.warning('kill_def: Got a None for a SimRegisterVariable. Consider fixing.')
return None
size = min(variable.size, size_threshold)
offset = variable.reg
while offset < variable.reg + size:
self._register_map[offset] = { location }
offset += 1
self._defs[variable] = { location }
elif isinstance(variable, SimMemoryVariable):
size = min(variable.size, size_threshold)
offset = variable.addr
while offset < variable.addr + size:
self._memory_map[offset] = { location }
offset += 1
self._defs[variable] = { location }
else:
l.error('Unsupported variable type "%s".', type(variable)) |
<SYSTEM_TASK:>
Find all definitions of the varaible
<END_TASK>
<USER_TASK:>
Description:
def lookup_defs(self, variable, size_threshold=32):
"""
Find all definitions of the varaible
:param SimVariable variable: The variable to lookup for.
:param int size_threshold: The maximum bytes to consider for the variable. For example, if the variable is 100
byte long, only the first `size_threshold` bytes are considered.
:return: A set of code locations where the variable is defined.
:rtype: set
""" |
live_def_locs = set()
if isinstance(variable, SimRegisterVariable):
if variable.reg is None:
l.warning('lookup_defs: Got a None for a SimRegisterVariable. Consider fixing.')
return live_def_locs
size = min(variable.size, size_threshold)
offset = variable.reg
while offset < variable.reg + size:
if offset in self._register_map:
live_def_locs |= self._register_map[offset]
offset += 1
elif isinstance(variable, SimMemoryVariable):
size = min(variable.size, size_threshold)
offset = variable.addr
while offset < variable.addr + size:
if offset in self._memory_map:
live_def_locs |= self._memory_map[offset]
offset += 1
else:
# umm unsupported variable type
l.error('Unsupported variable type "%s".', type(variable))
return live_def_locs |
<SYSTEM_TASK:>
Convert a ProgramVariable instance to a DDGViewItem object.
<END_TASK>
<USER_TASK:>
Description:
def _to_viewitem(self, prog_var):
"""
Convert a ProgramVariable instance to a DDGViewItem object.
:param ProgramVariable prog_var: The ProgramVariable object to convert.
:return: The converted DDGViewItem object.
:rtype: DDGViewItem
""" |
return DDGViewItem(self._ddg, prog_var, simplified=self._simplified) |
<SYSTEM_TASK:>
Get all definitions located at the current instruction address.
<END_TASK>
<USER_TASK:>
Description:
def definitions(self):
"""
Get all definitions located at the current instruction address.
:return: A list of ProgramVariable instances.
:rtype: list
""" |
defs = set()
if self._simplified:
graph = self._ddg.simplified_data_graph
else:
graph = self._ddg.data_graph
for n in graph.nodes(): # type: ProgramVariable
if n.location.ins_addr == self._insn_addr:
defs.add(DDGViewItem(self._ddg, n, simplified=self._simplified))
return list(defs) |
<SYSTEM_TASK:>
Get a dependency graph for the function `func`.
<END_TASK>
<USER_TASK:>
Description:
def function_dependency_graph(self, func):
"""
Get a dependency graph for the function `func`.
:param func: The Function object in CFG.function_manager.
:returns: A networkx.DiGraph instance.
""" |
if self._function_data_dependencies is None:
self._build_function_dependency_graphs()
if func in self._function_data_dependencies:
return self._function_data_dependencies[func]
# Not found
return None |
<SYSTEM_TASK:>
Get a subgraph from the data graph or the simplified data graph that starts from node pv.
<END_TASK>
<USER_TASK:>
Description:
def data_sub_graph(self, pv, simplified=True, killing_edges=False, excluding_types=None):
"""
Get a subgraph from the data graph or the simplified data graph that starts from node pv.
:param ProgramVariable pv: The starting point of the subgraph.
:param bool simplified: When True, the simplified data graph is used, otherwise the data graph is used.
:param bool killing_edges: Are killing edges included or not.
:param iterable excluding_types: Excluding edges whose types are among those excluded types.
:return: A subgraph.
:rtype: networkx.MultiDiGraph
""" |
result = networkx.MultiDiGraph()
result.add_node(pv)
base_graph = self.simplified_data_graph if simplified else self.data_graph
if pv not in base_graph:
return result
# traverse all edges and add them to the result graph if needed
queue = [ pv ]
traversed = set()
while queue:
elem = queue[0]
queue = queue[1:]
if elem in traversed:
continue
traversed.add(elem)
out_edges = base_graph.out_edges(elem, data=True)
if not killing_edges:
# remove killing edges
out_edges = [ (a, b, data) for a, b, data in out_edges if 'type' not in data or data['type'] != 'kill']
if excluding_types:
out_edges = [ (a, b, data) for a, b, data in out_edges if
'type' not in data or data['type'] not in excluding_types
]
for src, dst, data in out_edges:
result.add_edge(src, dst, **data)
if dst not in traversed:
queue.append(dst)
return result |
<SYSTEM_TASK:>
This is a backward lookup in the previous defs. Note that, as we are using VSA, it is possible that `variable`
<END_TASK>
<USER_TASK:>
Description:
def _def_lookup(self, variable): # pylint:disable=no-self-use
"""
This is a backward lookup in the previous defs. Note that, as we are using VSA, it is possible that `variable`
is affected by several definitions.
:param angr.analyses.ddg.LiveDefinitions live_defs:
The collection of live definitions.
:param SimVariable: The variable to lookup for definitions.
:returns: A dict {stmt:labels} where label is the number of individual addresses of `addr_list` (or
the actual set of addresses depending on the keep_addrs flag) that are definted by stmt.
""" |
prevdefs = {}
for code_loc in self._live_defs.lookup_defs(variable):
# Label edges with cardinality or actual sets of addresses
if isinstance(variable, SimMemoryVariable):
type_ = 'mem'
elif isinstance(variable, SimRegisterVariable):
type_ = 'reg'
else:
raise AngrDDGError('Unknown variable type %s' % type(variable))
prevdefs[code_loc] = {
'type': type_,
'data': variable
}
return prevdefs |
<SYSTEM_TASK:>
Get the size of a register.
<END_TASK>
<USER_TASK:>
Description:
def _get_register_size(self, reg_offset):
"""
Get the size of a register.
:param int reg_offset: Offset of the register.
:return: Size in bytes.
:rtype: int
""" |
# TODO: support registers that are not aligned
if reg_offset in self.project.arch.register_names:
reg_name = self.project.arch.register_names[reg_offset]
reg_size = self.project.arch.registers[reg_name][1]
return reg_size
l.warning("_get_register_size(): unsupported register offset %d. Assum size 1. "
"More register name mappings should be implemented in archinfo.", reg_offset)
return 1 |
<SYSTEM_TASK:>
For memory actions, get a list of addresses it operates on.
<END_TASK>
<USER_TASK:>
Description:
def _get_actual_addrs(action, state):
"""
For memory actions, get a list of addresses it operates on.
:param SimAction action: The action object to work with.
:return: A list of addresses that are accessed with that action.
:rtype: list
""" |
if action.actual_addrs is None:
# For now, mem reads don't necessarily have actual_addrs set properly
try:
addr_list = {state.solver.eval(action.addr.ast)}
except (SimSolverModeError, SimUnsatError, ZeroDivisionError):
# FIXME: ZeroDivisionError should have been caught by claripy and simuvex.
# FIXME: see claripy issue #75. this is just a temporary workaround.
# it's symbolic... just continue
addr_list = {0x60000000} # TODO: this is a random address that I pick. Fix it.
else:
addr_list = set(action.actual_addrs)
return addr_list |
<SYSTEM_TASK:>
Create a SimStackVariable or SimMemoryVariable based on action objects and its address.
<END_TASK>
<USER_TASK:>
Description:
def _create_memory_variable(self, action, addr, addrs):
"""
Create a SimStackVariable or SimMemoryVariable based on action objects and its address.
:param SimAction action: The action to work with.
:param int addr: The address of the memory variable in creation.
:param list addrs: A list of all addresses that the action was effective on.
:return:
""" |
variable = None
if len(addrs) == 1 and len(action.addr.tmp_deps) == 1:
addr_tmp = list(action.addr.tmp_deps)[0]
if addr_tmp in self._temp_register_symbols:
# it must be a stack variable
sort, offset = self._temp_register_symbols[addr_tmp]
variable = SimStackVariable(offset, action.size.ast // 8, base=sort, base_addr=addr - offset)
if variable is None:
variable = SimMemoryVariable(addr, action.size.ast // 8)
return variable |
<SYSTEM_TASK:>
Add an edge in the data dependence graph.
<END_TASK>
<USER_TASK:>
Description:
def _data_graph_add_edge(self, src, dst, **edge_labels):
"""
Add an edge in the data dependence graph.
:param ProgramVariable src: Source node.
:param ProgramVariable dst: Destination node.
:param edge_labels: All labels associated with the edge.
:return: None
""" |
if src in self._data_graph and dst in self._data_graph[src]:
return
self._data_graph.add_edge(src, dst, **edge_labels)
self._simplified_data_graph = None |
<SYSTEM_TASK:>
Add an edge in the statement dependence graph from a program location `src` to another program location `dst`.
<END_TASK>
<USER_TASK:>
Description:
def _stmt_graph_add_edge(self, src, dst, **edge_labels):
"""
Add an edge in the statement dependence graph from a program location `src` to another program location `dst`.
:param CodeLocation src: Source node.
:param CodeLocation dst: Destination node.
:param edge_labels: All labels associated with the edge.
:returns: None
""" |
# Is that edge already in the graph ?
# If at least one is new, then we are not redoing the same path again
if src in self._stmt_graph and dst in self._stmt_graph[src]:
return
self._stmt_graph.add_edge(src, dst, **edge_labels) |
<SYSTEM_TASK:>
Add new annotations to edges in the statement dependence graph.
<END_TASK>
<USER_TASK:>
Description:
def _stmt_graph_annotate_edges(self, edges_to_annotate, **new_labels):
"""
Add new annotations to edges in the statement dependence graph.
:param list edges_to_annotate: A list of edges to annotate.
:param new_labels: New labels to be added to those edges.
:returns: None
""" |
graph = self.graph
for src, dst in edges_to_annotate:
if src not in graph:
continue
if dst not in graph[src]:
continue
data = graph[src][dst]
for k, v in new_labels.items():
if k in data:
if v not in data[k]:
data[k] = data[k] + (v,)
else:
# Construct a tuple
data[k] = (v,) |
<SYSTEM_TASK:>
Simplify a data graph by removing all temp variable nodes on the graph.
<END_TASK>
<USER_TASK:>
Description:
def _simplify_data_graph(self, data_graph): # pylint:disable=no-self-use
"""
Simplify a data graph by removing all temp variable nodes on the graph.
:param networkx.DiGraph data_graph: The data dependence graph to simplify.
:return: The simplified graph.
:rtype: networkx.MultiDiGraph
""" |
graph = networkx.MultiDiGraph(data_graph)
all_nodes = [ n for n in graph.nodes() if isinstance(n.variable, SimTemporaryVariable) ]
for tmp_node in all_nodes:
# remove each tmp node by linking their successors and predecessors directly
in_edges = graph.in_edges(tmp_node, data=True)
out_edges = graph.out_edges(tmp_node, data=True)
for pred, _, _ in in_edges:
graph.remove_edge(pred, tmp_node)
for _, suc, _ in out_edges:
graph.remove_edge(tmp_node, suc)
for pred, _, data_in in in_edges:
for _, suc, data_out in out_edges:
if pred is not tmp_node and suc is not tmp_node:
if suc not in graph[pred]:
data = data_in.copy()
data.update(data_out)
graph.add_edge(pred, suc, **data)
graph.remove_node(tmp_node)
return graph |
<SYSTEM_TASK:>
Append a CFGNode and its successors into the work-list, and respect the call-depth limit
<END_TASK>
<USER_TASK:>
Description:
def _worklist_append(self, node_wrapper, worklist, worklist_set):
"""
Append a CFGNode and its successors into the work-list, and respect the call-depth limit
:param node_wrapper: The NodeWrapper instance to insert.
:param worklist: The work-list, which is a list.
:param worklist_set: A set of all CFGNodes that are inside the work-list, just for the sake of fast look-up.
It will be updated as well.
:returns: A set of newly-inserted CFGNodes (not NodeWrapper instances).
""" |
if node_wrapper.cfg_node in worklist_set:
# It's already in the work-list
return
worklist.append(node_wrapper)
worklist_set.add(node_wrapper.cfg_node)
stack = [ node_wrapper ]
traversed_nodes = { node_wrapper.cfg_node }
inserted = { node_wrapper.cfg_node }
while stack:
nw = stack.pop()
n, call_depth = nw.cfg_node, nw.call_depth
# Get successors
edges = self._cfg.graph.out_edges(n, data=True)
for _, dst, data in edges:
if (dst not in traversed_nodes # which means we haven't touch this node in this appending procedure
and dst not in worklist_set): # which means this node is not in the work-list
# We see a new node!
traversed_nodes.add(dst)
if data['jumpkind'] == 'Ijk_Call':
if self._call_depth is None or call_depth < self._call_depth:
inserted.add(dst)
new_nw = DDGJob(dst, call_depth + 1)
worklist.append(new_nw)
worklist_set.add(dst)
stack.append(new_nw)
elif data['jumpkind'] == 'Ijk_Ret':
if call_depth > 0:
inserted.add(dst)
new_nw = DDGJob(dst, call_depth - 1)
worklist.append(new_nw)
worklist_set.add(dst)
stack.append(new_nw)
else:
new_nw = DDGJob(dst, call_depth)
inserted.add(dst)
worklist_set.add(dst)
worklist.append(new_nw)
stack.append(new_nw)
return inserted |
<SYSTEM_TASK:>
Build dependency graphs for each function, and save them in self._function_data_dependencies.
<END_TASK>
<USER_TASK:>
Description:
def _build_function_dependency_graphs(self):
"""
Build dependency graphs for each function, and save them in self._function_data_dependencies.
""" |
# This is a map between functions and its corresponding dependencies
self._function_data_dependencies = defaultdict(networkx.DiGraph)
# Group all dependencies first
block_addr_to_func = { }
for _, func in self.kb.functions.items():
for block in func.blocks:
block_addr_to_func[block.addr] = func
for src, dst, data in self.graph.edges(data=True):
src_target_func = None
if src.block_addr in block_addr_to_func:
src_target_func = block_addr_to_func[src.block_addr]
self._function_data_dependencies[src_target_func].add_edge(src, dst, **data)
if dst.block_addr in block_addr_to_func:
dst_target_func = block_addr_to_func[dst.block_addr]
if not dst_target_func is src_target_func:
self._function_data_dependencies[dst_target_func].add_edge(src, dst, **data) |
<SYSTEM_TASK:>
If we are not tracing into the function that are called in a real execution, we should properly filter the defs
<END_TASK>
<USER_TASK:>
Description:
def _filter_defs_at_call_sites(self, defs):
"""
If we are not tracing into the function that are called in a real execution, we should properly filter the defs
to account for the behavior of the skipped function at this call site.
This function is a WIP. See TODOs inside.
:param defs:
:return:
""" |
# TODO: make definition killing architecture independent and calling convention independent
# TODO: use information from a calling convention analysis
filtered_defs = LiveDefinitions()
for variable, locs in defs.items():
if isinstance(variable, SimRegisterVariable):
if self.project.arch.name == 'X86':
if variable.reg in (self.project.arch.registers['eax'][0],
self.project.arch.registers['ecx'][0],
self.project.arch.registers['edx'][0]):
continue
filtered_defs.add_defs(variable, locs)
return filtered_defs |
<SYSTEM_TASK:>
Find all definitions of the given variable.
<END_TASK>
<USER_TASK:>
Description:
def find_definitions(self, variable, location=None, simplified_graph=True):
"""
Find all definitions of the given variable.
:param SimVariable variable:
:param bool simplified_graph: True if you just want to search in the simplified graph instead of the normal
graph. Usually the simplified graph suffices for finding definitions of register
or memory variables.
:return: A collection of all variable definitions to the specific variable.
:rtype: list
""" |
if simplified_graph:
graph = self.simplified_data_graph
else:
graph = self.data_graph
defs = []
for n in graph.nodes(): # type: ProgramVariable
if n.variable == variable:
if location is None:
defs.append(n)
else:
# TODO: finish this part
if n.location.block_addr == location.block_addr:
defs.append(n)
return defs |
<SYSTEM_TASK:>
Find all consumers to the specified variable definition.
<END_TASK>
<USER_TASK:>
Description:
def find_consumers(self, var_def, simplified_graph=True):
"""
Find all consumers to the specified variable definition.
:param ProgramVariable var_def: The variable definition.
:param bool simplified_graph: True if we want to search in the simplified graph, False otherwise.
:return: A collection of all consumers to the specified variable definition.
:rtype: list
""" |
if simplified_graph:
graph = self.simplified_data_graph
else:
graph = self.data_graph
if var_def not in graph:
return []
consumers = []
srcs = [var_def]
traversed = set()
while srcs:
src = srcs.pop()
out_edges = graph.out_edges(src, data=True)
for _, dst, data in out_edges:
if 'type' in data and data['type'] == 'kill':
# skip killing edges
continue
if isinstance(dst.variable, SimTemporaryVariable):
if dst not in traversed:
srcs.append(dst)
traversed.add(dst)
else:
if dst not in consumers:
consumers.append(dst)
return consumers |
<SYSTEM_TASK:>
Find all killers to the specified variable definition.
<END_TASK>
<USER_TASK:>
Description:
def find_killers(self, var_def, simplified_graph=True):
"""
Find all killers to the specified variable definition.
:param ProgramVariable var_def: The variable definition.
:param bool simplified_graph: True if we want to search in the simplified graph, False otherwise.
:return: A collection of all killers to the specified variable definition.
:rtype: list
""" |
if simplified_graph:
graph = self.simplified_data_graph
else:
graph = self.data_graph
if var_def not in graph:
return []
killers = []
out_edges = graph.out_edges(var_def, data=True)
for _, dst, data in out_edges:
if 'type' in data and data['type'] == 'kill':
killers.append(dst)
return killers |
<SYSTEM_TASK:>
Find all sources to the specified variable definition.
<END_TASK>
<USER_TASK:>
Description:
def find_sources(self, var_def, simplified_graph=True):
"""
Find all sources to the specified variable definition.
:param ProgramVariable var_def: The variable definition.
:param bool simplified_graph: True if we want to search in the simplified graph, False otherwise.
:return: A collection of all sources to the specified variable definition.
:rtype: list
""" |
if simplified_graph:
graph = self.simplified_data_graph
else:
graph = self.data_graph
if var_def not in graph:
return []
sources = []
defs = [ var_def ]
traversed = set()
while defs:
definition = defs.pop()
in_edges = graph.in_edges(definition, data=True)
for src, _, data in in_edges:
if 'type' in data and data['type'] == 'kill':
continue
if isinstance(src.variable, SimTemporaryVariable):
if src not in traversed:
defs.append(src)
traversed.add(src)
else:
if src not in sources:
sources.append(src)
return sources |
<SYSTEM_TASK:>
Allocates a new multi array in memory and returns the reference to the base.
<END_TASK>
<USER_TASK:>
Description:
def new_array(state, element_type, size, default_value_generator=None):
"""
Allocates a new multi array in memory and returns the reference to the base.
""" |
size_bounded = SimSootExpr_NewMultiArray._bound_multi_array_size(state, size)
# return the reference of the array base
# => elements getting lazy initialized in the javavm memory
return SimSootValue_ArrayBaseRef(heap_alloc_id=state.javavm_memory.get_new_uuid(),
element_type=element_type,
size=size_bounded,
default_value_generator=default_value_generator) |
<SYSTEM_TASK:>
Gets the minimum solution of an address.
<END_TASK>
<USER_TASK:>
Description:
def _min(self, memory, addr, **kwargs):
"""
Gets the minimum solution of an address.
""" |
return memory.state.solver.min(addr, exact=kwargs.pop('exact', self._exact), **kwargs) |
<SYSTEM_TASK:>
Gets the maximum solution of an address.
<END_TASK>
<USER_TASK:>
Description:
def _max(self, memory, addr, **kwargs):
"""
Gets the maximum solution of an address.
""" |
return memory.state.solver.max(addr, exact=kwargs.pop('exact', self._exact), **kwargs) |
<SYSTEM_TASK:>
Gets any solution of an address.
<END_TASK>
<USER_TASK:>
Description:
def _any(self, memory, addr, **kwargs):
"""
Gets any solution of an address.
""" |
return memory.state.solver.eval(addr, exact=kwargs.pop('exact', self._exact), **kwargs) |
<SYSTEM_TASK:>
Gets n solutions for an address.
<END_TASK>
<USER_TASK:>
Description:
def _eval(self, memory, addr, n, **kwargs):
"""
Gets n solutions for an address.
""" |
return memory.state.solver.eval_upto(addr, n, exact=kwargs.pop('exact', self._exact), **kwargs) |
<SYSTEM_TASK:>
Concretizes the address into a list of values.
<END_TASK>
<USER_TASK:>
Description:
def concretize(self, memory, addr):
"""
Concretizes the address into a list of values.
If this strategy cannot handle this address, returns None.
""" |
if self._filter is None or self._filter(memory, addr):
return self._concretize(memory, addr) |
<SYSTEM_TASK:>
Given a local transition graph of a function, find all merge points inside, and then perform a
<END_TASK>
<USER_TASK:>
Description:
def find_merge_points(function_addr, function_endpoints, graph): # pylint:disable=unused-argument
"""
Given a local transition graph of a function, find all merge points inside, and then perform a
quasi-topological sort of those merge points.
A merge point might be one of the following cases:
- two or more paths come together, and ends at the same address.
- end of the current function
:param int function_addr: Address of the function.
:param list function_endpoints: Endpoints of the function. They typically come from Function.endpoints.
:param networkx.DiGraph graph: A local transition graph of a function. Normally it comes from Function.graph.
:return: A list of ordered addresses of merge points.
:rtype: list
""" |
merge_points = set()
for node in graph.nodes():
if graph.in_degree(node) > 1:
merge_points.add(node)
ordered_merge_points = CFGUtils.quasi_topological_sort_nodes(graph, merge_points)
addrs = [n.addr for n in ordered_merge_points]
return addrs |
<SYSTEM_TASK:>
Given a local transition graph of a function, find all widening points inside.
<END_TASK>
<USER_TASK:>
Description:
def find_widening_points(function_addr, function_endpoints, graph): # pylint: disable=unused-argument
"""
Given a local transition graph of a function, find all widening points inside.
Correctly choosing widening points is very important in order to not lose too much information during static
analysis. We mainly consider merge points that has at least one loop back edges coming in as widening points.
:param int function_addr: Address of the function.
:param list function_endpoints: Endpoints of the function, typically coming from Function.endpoints.
:param networkx.DiGraph graph: A local transition graph of a function, normally Function.graph.
:return: A list of addresses of widening points.
:rtype: list
""" |
sccs = networkx.strongly_connected_components(graph)
widening_addrs = set()
for scc in sccs:
if len(scc) == 1:
node = next(iter(scc))
if graph.has_edge(node, node):
# self loop
widening_addrs.add(node.addr)
else:
for n in scc:
predecessors = graph.predecessors(n)
if any([ p not in scc for p in predecessors]):
widening_addrs.add(n.addr)
break
return list(widening_addrs) |
<SYSTEM_TASK:>
Sort a given set of nodes in reverse post ordering.
<END_TASK>
<USER_TASK:>
Description:
def reverse_post_order_sort_nodes(graph, nodes=None):
"""
Sort a given set of nodes in reverse post ordering.
:param networkx.DiGraph graph: A local transition graph of a function.
:param iterable nodes: A collection of nodes to sort.
:return: A list of sorted nodes.
:rtype: list
""" |
post_order = networkx.dfs_postorder_nodes(graph)
if nodes is None:
return reversed(list(post_order))
addrs_to_index = {}
for i, n in enumerate(post_order):
addrs_to_index[n.addr] = i
return sorted(nodes, key=lambda n: addrs_to_index[n.addr], reverse=True) |
<SYSTEM_TASK:>
Append all nodes from a strongly connected component to a list of ordered nodes and ensure the topological
<END_TASK>
<USER_TASK:>
Description:
def _append_scc(graph, ordered_nodes, scc):
"""
Append all nodes from a strongly connected component to a list of ordered nodes and ensure the topological
order.
:param networkx.DiGraph graph: The graph where all nodes belong to.
:param list ordered_nodes: Ordered nodes.
:param iterable scc: A set of nodes that forms a strongly connected component in the graph.
:return: None
""" |
# find the first node in the strongly connected component that is the successor to any node in ordered_nodes
loop_head = None
for parent_node in reversed(ordered_nodes):
for n in scc:
if n in graph[parent_node]:
loop_head = n
break
if loop_head is not None:
break
if loop_head is None:
# randomly pick one
loop_head = next(iter(scc))
subgraph = graph.subgraph(scc).copy() # type: networkx.DiGraph
for src, _ in list(subgraph.in_edges(loop_head)):
subgraph.remove_edge(src, loop_head)
ordered_nodes.extend(CFGUtils.quasi_topological_sort_nodes(subgraph)) |
<SYSTEM_TASK:>
Mulpyplex across several stashes.
<END_TASK>
<USER_TASK:>
Description:
def mulpyplex(self, *stashes):
"""
Mulpyplex across several stashes.
:param stashes: the stashes to mulpyplex
:return: a mulpyplexed list of states from the stashes in question, in the specified order
""" |
return mulpyplexer.MP(list(itertools.chain.from_iterable(self._stashes[s] for s in stashes))) |
<SYSTEM_TASK:>
Make a copy of this simulation manager. Pass ``deep=True`` to copy all the states in it as well.
<END_TASK>
<USER_TASK:>
Description:
def copy(self, deep=False): # pylint: disable=arguments-differ
"""
Make a copy of this simulation manager. Pass ``deep=True`` to copy all the states in it as well.
""" |
simgr = SimulationManager(self._project,
stashes=self._copy_stashes(deep=deep),
hierarchy=self._hierarchy,
resilience=self._resilience,
auto_drop=self._auto_drop,
completion_mode=self.completion_mode,
errored=self._errored)
return simgr |
<SYSTEM_TASK:>
Use an exploration technique with this SimulationManager.
<END_TASK>
<USER_TASK:>
Description:
def use_technique(self, tech):
"""
Use an exploration technique with this SimulationManager.
Techniques can be found in :mod:`angr.exploration_techniques`.
:param tech: An ExplorationTechnique object that contains code to modify
this SimulationManager's behavior.
:type tech: ExplorationTechnique
:return: The technique that was added, for convenience
""" |
if not isinstance(tech, ExplorationTechnique):
raise SimulationManagerError
# XXX: as promised
tech.project = self._project
tech.setup(self)
HookSet.install_hooks(self, **tech._get_hooks())
self._techniques.append(tech)
return tech |
<SYSTEM_TASK:>
Remove an exploration technique from a list of active techniques.
<END_TASK>
<USER_TASK:>
Description:
def remove_technique(self, tech):
"""
Remove an exploration technique from a list of active techniques.
:param tech: An ExplorationTechnique object.
:type tech: ExplorationTechnique
""" |
if not isinstance(tech, ExplorationTechnique):
raise SimulationManagerError
def _is_overriden(name):
return getattr(tech, name).__code__ is not getattr(ExplorationTechnique, name).__code__
overriden = filter(_is_overriden, ('step', 'filter', 'selector', 'step_state', 'successors'))
hooks = {name: getattr(tech, name) for name in overriden}
HookSet.remove_hooks(self, **hooks)
self._techniques.remove(tech)
return tech |
<SYSTEM_TASK:>
Run until the SimulationManager has reached a completed state, according to
<END_TASK>
<USER_TASK:>
Description:
def run(self, stash='active', n=None, until=None, **kwargs):
"""
Run until the SimulationManager has reached a completed state, according to
the current exploration techniques. If no exploration techniques that define a completion
state are being used, run until there is nothing left to run.
:param stash: Operate on this stash
:param n: Step at most this many times
:param until: If provided, should be a function that takes a SimulationManager and
returns True or False. Stepping will terminate when it is True.
:return: The simulation manager, for chaining.
:rtype: SimulationManager
""" |
for _ in (itertools.count() if n is None else range(0, n)):
if not self.complete() and self._stashes[stash]:
self.step(stash=stash, **kwargs)
if not (until and until(self)):
continue
break
return self |
<SYSTEM_TASK:>
Returns whether or not this manager has reached a "completed" state.
<END_TASK>
<USER_TASK:>
Description:
def complete(self):
"""
Returns whether or not this manager has reached a "completed" state.
""" |
if not self._techniques:
return False
if not any(tech._is_overriden('complete') for tech in self._techniques):
return False
return self.completion_mode(tech.complete(self) for tech in self._techniques if tech._is_overriden('complete')) |
<SYSTEM_TASK:>
Step a stash of states forward and categorize the successors appropriately.
<END_TASK>
<USER_TASK:>
Description:
def step(self, stash='active', n=None, selector_func=None, step_func=None,
successor_func=None, until=None, filter_func=None, **run_args):
"""
Step a stash of states forward and categorize the successors appropriately.
The parameters to this function allow you to control everything about the stepping and
categorization process.
:param stash: The name of the stash to step (default: 'active')
:param selector_func: If provided, should be a function that takes a state and returns a
boolean. If True, the state will be stepped. Otherwise, it will be
kept as-is.
:param step_func: If provided, should be a function that takes a SimulationManager and
returns a SimulationManager. Will be called with the SimulationManager
at every step. Note that this function should not actually perform any
stepping - it is meant to be a maintenance function called after each step.
:param successor_func: If provided, should be a function that takes a state and return its successors.
Otherwise, project.factory.successors will be used.
:param filter_func: If provided, should be a function that takes a state and return the name
of the stash, to which the state should be moved.
:param until: (DEPRECATED) If provided, should be a function that takes a SimulationManager and
returns True or False. Stepping will terminate when it is True.
:param n: (DEPRECATED) The number of times to step (default: 1 if "until" is not provided)
Additionally, you can pass in any of the following keyword args for project.factory.successors:
:param jumpkind: The jumpkind of the previous exit
:param addr: An address to execute at instead of the state's ip.
:param stmt_whitelist: A list of stmt indexes to which to confine execution.
:param last_stmt: A statement index at which to stop execution.
:param thumb: Whether the block should be lifted in ARM's THUMB mode.
:param backup_state: A state to read bytes from instead of using project memory.
:param opt_level: The VEX optimization level to use.
:param insn_bytes: A string of bytes to use for the block instead of the project.
:param size: The maximum size of the block, in bytes.
:param num_inst: The maximum number of instructions.
:param traceflags: traceflags to be passed to VEX. Default: 0
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
""" |
l.info("Stepping %s of %s", stash, self)
# 8<----------------- Compatibility layer -----------------
if n is not None or until is not None:
if once('simgr_step_n_until'):
print("\x1b[31;1mDeprecation warning: the use of `n` and `until` arguments is deprecated. "
"Consider using simgr.run() with the same arguments if you want to specify "
"a number of steps or an additional condition on when to stop the execution.\x1b[0m")
return self.run(stash, n, until, selector_func=selector_func, step_func=step_func,
successor_func=successor_func, filter_func=filter_func, **run_args)
# ------------------ Compatibility layer ---------------->8
bucket = defaultdict(list)
for state in self._fetch_states(stash=stash):
goto = self.filter(state, filter_func=filter_func)
if isinstance(goto, tuple):
goto, state = goto
if goto not in (None, stash):
bucket[goto].append(state)
continue
if not self.selector(state, selector_func=selector_func):
bucket[stash].append(state)
continue
pre_errored = len(self._errored)
successors = self.step_state(state, successor_func=successor_func, **run_args)
# handle degenerate stepping cases here. desired behavior:
# if a step produced only unsat states, always add them to the unsat stash since this usually indicates a bug
# if a step produced sat states and save_unsat is False, drop the unsats
# if a step produced no successors, period, add the original state to deadended
# first check if anything happened besides unsat. that gates all this behavior
if not any(v for k, v in successors.items() if k != 'unsat') and len(self._errored) == pre_errored:
# then check if there were some unsats
if successors.get('unsat', []):
# only unsats. current setup is acceptable.
pass
else:
# no unsats. we've deadended.
bucket['deadended'].append(state)
continue
else:
# there were sat states. it's okay to drop the unsat ones if the user said so.
if not self._save_unsat:
successors.pop('unsat', None)
for to_stash, successor_states in successors.items():
bucket[to_stash or stash].extend(successor_states)
self._clear_states(stash=stash)
for to_stash, states in bucket.items():
self._store_states(to_stash or stash, states)
if step_func is not None:
return step_func(self)
return self |
<SYSTEM_TASK:>
Prune unsatisfiable states from a stash.
<END_TASK>
<USER_TASK:>
Description:
def prune(self, filter_func=None, from_stash='active', to_stash='pruned'):
"""
Prune unsatisfiable states from a stash.
This function will move all unsatisfiable states in the given stash into a different stash.
:param filter_func: Only prune states that match this filter.
:param from_stash: Prune states from this stash. (default: 'active')
:param to_stash: Put pruned states in this stash. (default: 'pruned')
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
""" |
def _prune_filter(state):
to_prune = not filter_func or filter_func(state)
if to_prune and not state.satisfiable():
if self._hierarchy:
self._hierarchy.unreachable_state(state)
self._hierarchy.simplify()
return True
return False
self.move(from_stash, to_stash, _prune_filter)
return self |
<SYSTEM_TASK:>
Move states from one stash to another.
<END_TASK>
<USER_TASK:>
Description:
def move(self, from_stash, to_stash, filter_func=None):
"""
Move states from one stash to another.
:param from_stash: Take matching states from this stash.
:param to_stash: Put matching states into this stash.
:param filter_func: Stash states that match this filter. Should be a function that takes
a state and returns True or False. (default: stash all states)
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
""" |
filter_func = filter_func or (lambda s: True)
stash_splitter = lambda states: reversed(self._filter_states(filter_func, states))
return self.split(stash_splitter, from_stash=from_stash, to_stash=to_stash) |
<SYSTEM_TASK:>
Applies a given function to a given stash.
<END_TASK>
<USER_TASK:>
Description:
def apply(self, state_func=None, stash_func=None, stash='active', to_stash=None):
"""
Applies a given function to a given stash.
:param state_func: A function to apply to every state. Should take a state and return a state.
The returned state will take the place of the old state. If the function
*doesn't* return a state, the old state will be used. If the function returns
a list of states, they will replace the original states.
:param stash_func: A function to apply to the whole stash. Should take a list of states and
return a list of states. The resulting list will replace the stash.
If both state_func and stash_func are provided state_func is applied first,
then stash_func is applied on the results.
:param stash: A stash to work with.
:param to_stash: If specified, this stash will be used to store the resulting states instead.
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
""" |
to_stash = to_stash or stash
def _stash_splitter(states):
keep, split = [], []
if state_func is not None:
for s in states:
ns = state_func(s)
if isinstance(ns, SimState):
split.append(ns)
elif isinstance(ns, (list, tuple, set)):
split.extend(ns)
else:
split.append(s)
if stash_func is not None:
split = stash_func(states)
if to_stash is not stash:
keep = states
return keep, split
return self.split(_stash_splitter, from_stash=stash, to_stash=to_stash) |
<SYSTEM_TASK:>
Split a stash of states into two stashes depending on the specified options.
<END_TASK>
<USER_TASK:>
Description:
def split(self, stash_splitter=None, stash_ranker=None, state_ranker=None,
limit=8, from_stash='active', to_stash='stashed'):
"""
Split a stash of states into two stashes depending on the specified options.
The stash from_stash will be split into two stashes depending on the other options
passed in. If to_stash is provided, the second stash will be written there.
stash_splitter overrides stash_ranker, which in turn overrides state_ranker.
If no functions are provided, the states are simply split according to the limit.
The sort done with state_ranker is ascending.
:param stash_splitter: A function that should take a list of states and return a tuple
of two lists (the two resulting stashes).
:param stash_ranker: A function that should take a list of states and return a sorted
list of states. This list will then be split according to "limit".
:param state_ranker: An alternative to stash_splitter. States will be sorted with outputs
of this function, which are to be used as a key. The first "limit"
of them will be kept, the rest split off.
:param limit: For use with state_ranker. The number of states to keep. Default: 8
:param from_stash: The stash to split (default: 'active')
:param to_stash: The stash to write to (default: 'stashed')
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
""" |
states = self._fetch_states(stash=from_stash)
if stash_splitter is not None:
keep, split = stash_splitter(states)
elif stash_ranker is not None:
ranked_paths = stash_ranker(states)
keep, split = ranked_paths[:limit], ranked_paths[limit:]
elif state_ranker is not None:
ranked_paths = sorted(states, key=state_ranker)
keep, split = ranked_paths[:limit], ranked_paths[limit:]
else:
keep, split = states[:limit], states[limit:]
keep, split = map(list, (keep, split))
self._clear_states(from_stash)
self._store_states(from_stash, keep)
self._store_states(to_stash, split)
return self |
<SYSTEM_TASK:>
Merge the states in a given stash.
<END_TASK>
<USER_TASK:>
Description:
def merge(self, merge_func=None, merge_key=None, stash='active'):
"""
Merge the states in a given stash.
:param stash: The stash (default: 'active')
:param merge_func: If provided, instead of using state.merge, call this function with
the states as the argument. Should return the merged state.
:param merge_key: If provided, should be a function that takes a state and returns a key that will compare
equal for all states that are allowed to be merged together, as a first aproximation.
By default: uses PC, callstack, and open file descriptors.
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
""" |
self.prune(from_stash=stash)
to_merge = self._fetch_states(stash=stash)
not_to_merge = []
if merge_key is None: merge_key = self._merge_key
merge_groups = [ ]
while to_merge:
base_key = merge_key(to_merge[0])
g, to_merge = self._filter_states(lambda s: base_key == merge_key(s), to_merge)
if len(g) <= 1:
not_to_merge.extend(g)
else:
merge_groups.append(g)
for g in merge_groups:
try:
m = self._merge_states(g) if merge_func is None else merge_func(*g)
not_to_merge.append(m)
except SimMergeError:
l.warning("SimMergeError while merging %d states", len(g), exc_info=True)
not_to_merge.extend(g)
self._clear_states(stash)
self._store_states(stash, not_to_merge)
return self |
<SYSTEM_TASK:>
Merges a list of states.
<END_TASK>
<USER_TASK:>
Description:
def _merge_states(self, states):
"""
Merges a list of states.
:param states: the states to merge
:returns SimState: the resulting state
""" |
if self._hierarchy:
optimal, common_history, others = self._hierarchy.most_mergeable(states)
else:
optimal, common_history, others = states, None, []
if len(optimal) >= 2:
# We found optimal states (states that share a common ancestor) to merge.
# Compute constraints for each state starting from the common ancestor,
# and use them as merge conditions.
constraints = [s.history.constraints_since(common_history) for s in optimal]
o = optimal[0]
m, _, _ = o.merge(*optimal[1:],
merge_conditions=constraints,
common_ancestor=common_history.strongref_state
)
else:
l.warning(
"Cannot find states with common history line to merge. Fall back to the naive merging strategy "
"and merge all states."
)
s = states[0]
m, _, _ = s.merge(*states[1:])
others = []
if self._hierarchy:
self._hierarchy.add_state(m)
if len(others):
others.append(m)
return self._merge_states(others)
else:
return m |
<SYSTEM_TASK:>
Launch a postmortem debug shell at the site of the error.
<END_TASK>
<USER_TASK:>
Description:
def debug(self):
"""
Launch a postmortem debug shell at the site of the error.
""" |
try:
__import__('ipdb').post_mortem(self.traceback)
except ImportError:
__import__('pdb').post_mortem(self.traceback) |
<SYSTEM_TASK:>
Calculate the complement of `self` and `other`.
<END_TASK>
<USER_TASK:>
Description:
def complement(self, other):
"""
Calculate the complement of `self` and `other`.
:param other: Another SimVariableSet instance.
:return: The complement result.
""" |
s = SimVariableSet()
s.register_variables = self.register_variables - other.register_variables
s.register_variable_offsets = self.register_variable_offsets - other.register_variable_offsets
s.memory_variables = self.memory_variables - other.memory_variables
s.memory_variable_addresses = self.memory_variable_addresses - other.memory_variable_addresses
return s |
<SYSTEM_TASK:>
Get the CFGNode object on the control flow graph given an angr state.
<END_TASK>
<USER_TASK:>
Description:
def _get_cfg_node(cfg, state):
"""
Get the CFGNode object on the control flow graph given an angr state.
:param angr.analyses.CFGEmulated cfg: An instance of CFGEmulated.
:param angr.SimState state: The current state.
:return: A CFGNode instance if the node exists, or None if the node cannot be found.
:rtype: CFGNode or None
""" |
call_stack_suffix = state.callstack.stack_suffix(cfg.context_sensitivity_level)
is_syscall = state.history.jumpkind is not None and state.history.jumpkind.startswith('Ijk_Sys')
block_id = cfg._generate_block_id(call_stack_suffix, state.addr, is_syscall)
return cfg.get_node(block_id) |
<SYSTEM_TASK:>
Perform a depth-first search on the given DiGraph, with a limit on maximum steps.
<END_TASK>
<USER_TASK:>
Description:
def _dfs_edges(graph, source, max_steps=None):
"""
Perform a depth-first search on the given DiGraph, with a limit on maximum steps.
:param networkx.DiGraph graph: The graph to traverse.
:param Any source: The source to begin traversal.
:param int max_steps: Maximum steps of the traversal, or None if not limiting steps.
:return: An iterator of edges.
""" |
if max_steps is None:
yield networkx.dfs_edges(graph, source)
else:
steps_map = defaultdict(int)
traversed = { source }
stack = [ source ]
while stack:
src = stack.pop()
for dst in graph.successors(src):
if dst in traversed:
continue
traversed.add(dst)
dst_steps = max(steps_map[src] + 1, steps_map[dst])
if dst_steps > max_steps:
continue
yield src, dst
steps_map[dst] = dst_steps
stack.append(dst) |
<SYSTEM_TASK:>
Check if the specified address will be executed
<END_TASK>
<USER_TASK:>
Description:
def check(self, cfg, state, peek_blocks):
"""
Check if the specified address will be executed
:param cfg:
:param state:
:param int peek_blocks:
:return:
:rtype: bool
""" |
# Get the current CFGNode from the CFG
node = self._get_cfg_node(cfg, state)
if node is None:
# Umm it doesn't exist on the control flow graph - why?
l.error('Failed to find CFGNode for state %s on the control flow graph.', state)
return False
# crawl the graph to see if we can reach the target address next
for src, dst in self._dfs_edges(cfg.graph, node, max_steps=peek_blocks):
if src.addr == self.addr or dst.addr == self.addr:
l.debug("State %s will reach %#x.", state, self.addr)
return True
l.debug('SimState %s will not reach %#x.', state, self.addr)
return False |
<SYSTEM_TASK:>
Check if the specified function will be reached with certain arguments.
<END_TASK>
<USER_TASK:>
Description:
def check(self, cfg, state, peek_blocks):
"""
Check if the specified function will be reached with certain arguments.
:param cfg:
:param state:
:param peek_blocks:
:return:
""" |
# Get the current CFGNode
node = self._get_cfg_node(cfg, state)
if node is None:
l.error("Failed to find CFGNode for state %s on the control flow graph.", state)
return False
# crawl the graph to see if we can reach the target function within the limited steps
for src, dst in self._dfs_edges(cfg.graph, node, max_steps=peek_blocks):
the_node = None
if src.addr == self.function.addr:
the_node = src
elif dst.addr == self.function.addr:
the_node = dst
if the_node is not None:
if self.arguments is None:
# we do not care about arguments
return True
else:
# check arguments
arch = state.arch
state = the_node.input_state
same_arguments = self._check_arguments(arch, state)
if same_arguments:
# all arguments are the same!
return True
l.debug("SimState %s will not reach function %s.", state, self.function)
return False |
<SYSTEM_TASK:>
Check if the specific function is reached with certain arguments
<END_TASK>
<USER_TASK:>
Description:
def check_state(self, state):
"""
Check if the specific function is reached with certain arguments
:param angr.SimState state: The state to check
:return: True if the function is reached with certain arguments, False otherwise.
:rtype: bool
""" |
if state.addr == self.function.addr:
arch = state.arch
if self._check_arguments(arch, state):
return True
return False |
<SYSTEM_TASK:>
Make sure all current basic block on each state shows up in the CFG. For blocks that are not in the CFG, start
<END_TASK>
<USER_TASK:>
Description:
def _peek_forward(self, simgr):
"""
Make sure all current basic block on each state shows up in the CFG. For blocks that are not in the CFG, start
CFG recovery from them with a maximum basic block depth of 100.
:param simgr:
:return:
""" |
if self._cfg is None:
starts = list(simgr.active)
self._cfg_kb = KnowledgeBase(self.project)
self._cfg = self.project.analyses.CFGEmulated(kb=self._cfg_kb, starts=starts, max_steps=self._peek_blocks,
keep_state=self._cfg_keep_states
)
else:
starts = list(simgr.active)
self._cfg.resume(starts=starts, max_steps=self._peek_blocks) |
<SYSTEM_TASK:>
Load the last N deprioritized states will be extracted from the "deprioritized" stash and put to "active" stash.
<END_TASK>
<USER_TASK:>
Description:
def _load_fallback_states(self, pg):
"""
Load the last N deprioritized states will be extracted from the "deprioritized" stash and put to "active" stash.
N is controlled by 'num_fallback_states'.
:param SimulationManager pg: The simulation manager.
:return: None
""" |
# take back some of the deprioritized states
l.debug("No more active states. Load some deprioritized states to 'active' stash.")
if 'deprioritized' in pg.stashes and pg.deprioritized:
pg.active.extend(pg.deprioritized[-self._num_fallback_states : ])
pg.stashes['deprioritized'] = pg.deprioritized[ : -self._num_fallback_states] |
<SYSTEM_TASK:>
Detects if there is any xor operation in the function.
<END_TASK>
<USER_TASK:>
Description:
def has_xor(self):
"""
Detects if there is any xor operation in the function.
:return: Tags
""" |
def _has_xor(expr):
return isinstance(expr, pyvex.IRExpr.Binop) and expr.op.startswith("Iop_Xor")
found_xor = False
for block in self._function.blocks:
if block.size == 0:
continue
for stmt in block.vex.statements:
if isinstance(stmt, pyvex.IRStmt.Put):
found_xor = found_xor or _has_xor(stmt.data)
elif isinstance(stmt, pyvex.IRStmt.WrTmp):
found_xor = found_xor or _has_xor(stmt.data)
if found_xor:
break
if found_xor:
return { CodeTags.HAS_XOR }
return None |
<SYSTEM_TASK:>
Detects if there is any bitwise operation in the function.
<END_TASK>
<USER_TASK:>
Description:
def has_bitshifts(self):
"""
Detects if there is any bitwise operation in the function.
:return: Tags.
""" |
def _has_bitshifts(expr):
if isinstance(expr, pyvex.IRExpr.Binop):
return expr.op.startswith("Iop_Shl") or expr.op.startswith("Iop_Shr") \
or expr.op.startswith("Iop_Sar")
return False
found_bitops = False
for block in self._function.blocks:
if block.size == 0:
continue
for stmt in block.vex.statements:
if isinstance(stmt, pyvex.IRStmt.Put):
found_bitops = found_bitops or _has_bitshifts(stmt.data)
elif isinstance(stmt, pyvex.IRStmt.WrTmp):
found_bitops = found_bitops or _has_bitshifts(stmt.data)
if found_bitops:
break
if found_bitops:
return { CodeTags.HAS_BITSHIFTS }
return None |
<SYSTEM_TASK:>
Merge another KeyedRegion into this KeyedRegion.
<END_TASK>
<USER_TASK:>
Description:
def merge(self, other, replacements=None):
"""
Merge another KeyedRegion into this KeyedRegion.
:param KeyedRegion other: The other instance to merge with.
:return: None
""" |
# TODO: is the current solution not optimal enough?
for _, item in other._storage.items(): # type: RegionObject
for so in item.stored_objects: # type: StoredObject
if replacements and so.obj in replacements:
so = StoredObject(so.start, replacements[so.obj], so.size)
self._object_mapping[so.obj_id] = so
self.__store(so, overwrite=False)
return self |
<SYSTEM_TASK:>
Replace variables with other variables.
<END_TASK>
<USER_TASK:>
Description:
def replace(self, replacements):
"""
Replace variables with other variables.
:param dict replacements: A dict of variable replacements.
:return: self
""" |
for old_var, new_var in replacements.items():
old_var_id = id(old_var)
if old_var_id in self._object_mapping:
# FIXME: we need to check if old_var still exists in the storage
old_so = self._object_mapping[old_var_id] # type: StoredObject
self._store(old_so.start, new_var, old_so.size, overwrite=True)
return self |
<SYSTEM_TASK:>
Add a variable to this region at the given offset.
<END_TASK>
<USER_TASK:>
Description:
def add_variable(self, start, variable):
"""
Add a variable to this region at the given offset.
:param int start:
:param SimVariable variable:
:return: None
""" |
size = variable.size if variable.size is not None else 1
self.add_object(start, variable, size) |
<SYSTEM_TASK:>
Add a variable to this region at the given offset, and remove all other variables that are fully covered by
<END_TASK>
<USER_TASK:>
Description:
def set_variable(self, start, variable):
"""
Add a variable to this region at the given offset, and remove all other variables that are fully covered by
this variable.
:param int start:
:param SimVariable variable:
:return: None
""" |
size = variable.size if variable.size is not None else 1
self.set_object(start, variable, size) |
<SYSTEM_TASK:>
Add an object to this region at the given offset, and remove all other objects that are fully covered by this
<END_TASK>
<USER_TASK:>
Description:
def set_object(self, start, obj, object_size):
"""
Add an object to this region at the given offset, and remove all other objects that are fully covered by this
object.
:param start:
:param obj:
:param object_size:
:return:
""" |
self._store(start, obj, object_size, overwrite=True) |
<SYSTEM_TASK:>
Find variables covering the given region offset.
<END_TASK>
<USER_TASK:>
Description:
def get_variables_by_offset(self, start):
"""
Find variables covering the given region offset.
:param int start:
:return: A list of stack variables.
:rtype: set
""" |
_, container = self._get_container(start)
if container is None:
return []
else:
return container.internal_objects |
<SYSTEM_TASK:>
Find objects covering the given region offset.
<END_TASK>
<USER_TASK:>
Description:
def get_objects_by_offset(self, start):
"""
Find objects covering the given region offset.
:param start:
:return:
""" |
_, container = self._get_container(start)
if container is None:
return set()
else:
return container.internal_objects |
<SYSTEM_TASK:>
Update the progress with a percentage, including updating the progressbar as well as calling the progress
<END_TASK>
<USER_TASK:>
Description:
def _update_progress(self, percentage, **kwargs):
"""
Update the progress with a percentage, including updating the progressbar as well as calling the progress
callback.
:param float percentage: Percentage of the progressbar. from 0.0 to 100.0.
:param kwargs: Other parameters that will be passed to the progress_callback handler.
:return: None
""" |
if self._show_progressbar:
if self._progressbar is None:
self._initialize_progressbar()
self._progressbar.update(percentage * 10000)
if self._progress_callback is not None:
self._progress_callback(percentage, **kwargs) |
<SYSTEM_TASK:>
Convert an address to a stack offset.
<END_TASK>
<USER_TASK:>
Description:
def _addr_to_stack_offset(self, addr):
"""
Convert an address to a stack offset.
:param claripy.ast.Base addr: The address to convert from.
:return: A stack offset if the addr comes from the stack pointer, or None if the address
does not come from the stack pointer.
""" |
def _parse(addr):
if addr.op == '__add__':
# __add__ might have multiple arguments
parsed = [ _parse(arg) for arg in addr.args ]
annotated = [ True for annotated, _ in parsed if annotated is True ]
if len(annotated) != 1:
# either nothing is annotated, or more than one element is annotated
raise ValueError()
return True, sum([ offset for _, offset in parsed ])
elif addr.op == '__sub__':
# __sub__ might have multiple arguments
parsed = [ _parse(arg) for arg in addr.args ]
first_annotated, first_offset = parsed[0]
if first_annotated is False:
# the first argument is not annotated. we don't support it.
raise ValueError()
if any([ annotated for annotated, _ in parsed[1:] ]):
# more than one argument is annotated. we don't support it.
raise ValueError()
return True, first_offset - sum([ offset for _, offset in parsed[1:] ])
else:
anno = next(iter(anno for anno in addr.annotations if isinstance(anno, StackLocationAnnotation)), None)
if anno is None:
if addr.op == 'BVV':
return False, addr._model_concrete.value
raise ValueError()
return True, anno.offset
# find the annotated AST
try: annotated, offset = _parse(addr)
except ValueError: return None
if not annotated:
return None
return self._to_signed(offset) |
<SYSTEM_TASK:>
Take an input abstract state, execute the node, and derive an output state.
<END_TASK>
<USER_TASK:>
Description:
def _run_on_node(self, node, state):
"""
Take an input abstract state, execute the node, and derive an output state.
:param angr.Block node: The node to work on.
:param VariableRecoveryState state: The input state.
:return: A tuple of (changed, new output state).
:rtype: tuple
""" |
l.debug('Analyzing block %#x, iteration %d.', node.addr, self._node_iterations[node])
concrete_state = state.get_concrete_state(node.addr)
if concrete_state is None:
# didn't find any state going to here
l.error("_run_on_node(): cannot find any state for address %#x.", node.addr)
return False, state
state = state.copy()
self._instates[node.addr] = state
if self._node_iterations[node] >= self._max_iterations:
l.debug('Skip node %s as we have iterated %d times on it.', node, self._node_iterations[node])
return False, state
state.register_callbacks([ concrete_state ])
successors = self.project.factory.successors(concrete_state,
addr=node.addr,
size=node.size,
opt_level=0 # disable the optimization in order to have
# instruction-level analysis results
)
output_states = successors.all_successors
state.concrete_states = [ state for state in output_states if not state.ip.symbolic ]
self._outstates[node.addr] = state
self._node_iterations[node] += 1
return True, state |
<SYSTEM_TASK:>
Copy self attributes to the new object.
<END_TASK>
<USER_TASK:>
Description:
def make_copy(self, copy_to):
"""
Copy self attributes to the new object.
:param CFGBase copy_to: The target to copy to.
:return: None
""" |
for attr, value in self.__dict__.items():
if attr.startswith('__') and attr.endswith('__'):
continue
setattr(copy_to, attr, value) |
<SYSTEM_TASK:>
Merge two adjacent CFGNodes into one.
<END_TASK>
<USER_TASK:>
Description:
def _merge_cfgnodes(self, cfgnode_0, cfgnode_1):
"""
Merge two adjacent CFGNodes into one.
:param CFGNode cfgnode_0: The first CFGNode.
:param CFGNode cfgnode_1: The second CFGNode.
:return: None
""" |
assert cfgnode_0.addr + cfgnode_0.size == cfgnode_1.addr
addr0, addr1 = cfgnode_0.addr, cfgnode_1.addr
new_node = cfgnode_0.merge(cfgnode_1)
# Update the graph and the nodes dict accordingly
if addr1 in self._nodes_by_addr:
self._nodes_by_addr[addr1].remove(cfgnode_1)
if not self._nodes_by_addr[addr1]:
del self._nodes_by_addr[addr1]
del self._nodes[cfgnode_1.block_id]
self._nodes_by_addr[addr0].remove(cfgnode_0)
if not self._nodes_by_addr[addr0]:
del self._nodes_by_addr[addr0]
del self._nodes[cfgnode_0.block_id]
in_edges = list(self.graph.in_edges(cfgnode_0, data=True))
out_edges = list(self.graph.out_edges(cfgnode_1, data=True))
self.graph.remove_node(cfgnode_0)
self.graph.remove_node(cfgnode_1)
self.graph.add_node(new_node)
for src, _, data in in_edges:
self.graph.add_edge(src, new_node, **data)
for _, dst, data in out_edges:
self.graph.add_edge(new_node, dst, **data)
# Put the new node into node dicts
self._nodes[new_node.block_id] = new_node
self._nodes_by_addr[addr0].append(new_node) |
<SYSTEM_TASK:>
Convert a CFGNode instance to a CodeNode object.
<END_TASK>
<USER_TASK:>
Description:
def _to_snippet(self, cfg_node=None, addr=None, size=None, thumb=False, jumpkind=None, base_state=None):
"""
Convert a CFGNode instance to a CodeNode object.
:param angr.analyses.CFGNode cfg_node: The CFGNode instance.
:param int addr: Address of the node. Only used when `cfg_node` is None.
:param bool thumb: Whether this is in THUMB mode or not. Only used for ARM code and when `cfg_node` is None.
:param str or None jumpkind: Jumpkind of this node.
:param SimState or None base_state: The state where BlockNode should be created from.
:return: A converted CodeNode instance.
:rtype: CodeNode
""" |
if cfg_node is not None:
addr = cfg_node.addr
size = cfg_node.size
thumb = cfg_node.thumb
else:
addr = addr
size = size
thumb = thumb
if addr is None:
raise ValueError('_to_snippet(): Either cfg_node or addr must be provided.')
if self.project.is_hooked(addr) and jumpkind != 'Ijk_NoHook':
hooker = self.project._sim_procedures[addr]
size = hooker.kwargs.get('length', 0)
return HookNode(addr, size, type(hooker))
if cfg_node is not None:
return BlockNode(addr, size, thumb=thumb, bytestr=cfg_node.byte_string) # pylint: disable=no-member
else:
return self.project.factory.snippet(addr, size=size, jumpkind=jumpkind, thumb=thumb,
backup_state=base_state) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.