text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Construct a dependency graph based on given parameters.
<END_TASK>
<USER_TASK:>
Description:
def _construct(self, targets, control_flow_slice=False):
"""
Construct a dependency graph based on given parameters.
:param targets: A list of tuples like (CFGNode, statement ID)
:param control_flow_slice: Is the backward slicing only depends on CFG or not.
""" |
if control_flow_slice:
simruns = [ r for r, _ in targets ]
self._construct_control_flow_slice(simruns)
else:
self._construct_default(targets) |
<SYSTEM_TASK:>
Build a slice of the program without considering the effect of data dependencies.
<END_TASK>
<USER_TASK:>
Description:
def _construct_control_flow_slice(self, simruns):
"""
Build a slice of the program without considering the effect of data dependencies.
This is an incorrect hack, but it should work fine with small programs.
:param simruns: A list of SimRun targets. You probably wanna get it from the CFG somehow. It must exist in the
CFG.
""" |
# TODO: Support context-sensitivity!
if self._cfg is None:
l.error('Please build CFG first.')
cfg = self._cfg.graph
for simrun in simruns:
if simrun not in cfg:
l.error('SimRun instance %s is not in the CFG.', simrun)
stack = [ ]
for simrun in simruns:
stack.append(simrun)
self.runs_in_slice = networkx.DiGraph()
self.cfg_nodes_in_slice = networkx.DiGraph()
self.chosen_statements = { }
while stack:
# Pop one out
block = stack.pop()
if block.addr not in self.chosen_statements:
self.chosen_statements[block.addr] = True
# Get all predecessors of that block
predecessors = cfg.predecessors(block)
for pred in predecessors:
stack.append(pred)
self.cfg_nodes_in_slice.add_edge(pred, block)
self.runs_in_slice.add_edge(pred.addr, block.addr) |
<SYSTEM_TASK:>
Source block has more than one exit, and through some of those exits, the control flow can eventually go to
<END_TASK>
<USER_TASK:>
Description:
def _find_exits(self, src_block, target_block):
"""
Source block has more than one exit, and through some of those exits, the control flow can eventually go to
the target block. This method returns exits that lead to the target block.
:param src_block: The block that has multiple exits.
:param target_block: The target block to reach.
:returns: a dict of statement ID -> a list of target IPs (or None if the exit should not be taken), each
corresponds to an exit to take in order to reach the target.
For example, it returns the following dict:
{
'default': None, # It has a default exit, but shouldn't be taken
15: [ 0x400080 ], # Statement 15 is an exit statement, and should be taken when the target is
# 0x400080
28: None # Statement 28 is an exit statement, but shouldn't be taken
}
""" |
# Enumerate all statements and find exit statements
# Since we don't have a state, we have to rely on the pyvex block instead of SimIRSB
# Just create the block from pyvex again - not a big deal
if self.project.is_hooked(src_block.addr):
# Just return all exits for now
return { -1: [ target_block.addr ] }
block = self.project.factory.block(src_block.addr)
vex_block = block.vex
exit_stmt_ids = { }
for stmt_idx, stmt in enumerate(vex_block.statements):
if isinstance(stmt, pyvex.IRStmt.Exit):
exit_stmt_ids[stmt_idx] = None
# And of course, it has a default exit
# Don't forget about it.
exit_stmt_ids[DEFAULT_STATEMENT] = None
# Find all paths from src_block to target_block
# FIXME: This is some crappy code written in a hurry. Replace the all_simple_paths() later.
all_simple_paths = list(networkx.all_simple_paths(self._cfg.graph, src_block, target_block, cutoff=3))
for simple_path in all_simple_paths:
if len(simple_path) <= 1:
# Oops, it looks that src_block and target_block are the same guy?
continue
if self._same_function:
# Examine this path and make sure it does not have call or return edge
for i in range(len(simple_path) - 1):
jumpkind = self._cfg.graph[simple_path[i]][simple_path[i + 1]]['jumpkind']
if jumpkind in ('Ijk_Call', 'Ijk_Ret'):
return { }
# Get the first two nodes
a, b = simple_path[0], simple_path[1]
# Get the exit statement ID from CFG
exit_stmt_id = self._cfg.get_exit_stmt_idx(a, b)
if exit_stmt_id is None:
continue
# Mark it!
if exit_stmt_ids[exit_stmt_id] is None:
exit_stmt_ids[exit_stmt_id] = [ b.addr ]
else:
exit_stmt_ids[exit_stmt_id].append(b.addr)
return exit_stmt_ids |
<SYSTEM_TASK:>
Map our current slice to CFG.
<END_TASK>
<USER_TASK:>
Description:
def _map_to_cfg(self):
"""
Map our current slice to CFG.
Based on self._statements_per_run and self._exit_statements_per_run, this method will traverse the CFG and
check if there is any missing block on the path. If there is, the default exit of that missing block will be
included in the slice. This is because Slicecutor cannot skip individual basic blocks along a path.
""" |
exit_statements_per_run = self.chosen_exits
new_exit_statements_per_run = defaultdict(list)
while len(exit_statements_per_run):
for block_address, exits in exit_statements_per_run.items():
for stmt_idx, exit_target in exits:
if exit_target not in self.chosen_exits:
# Oh we found one!
# The default exit should be taken no matter where it leads to
# Add it to the new set
tpl = (DEFAULT_STATEMENT, None)
if tpl not in new_exit_statements_per_run[exit_target]:
new_exit_statements_per_run[exit_target].append(tpl)
# Add the new ones to our global dict
for block_address, exits in new_exit_statements_per_run.items():
for ex in exits:
if ex not in self.chosen_exits[block_address]:
self.chosen_exits[block_address].append(ex)
# Switch them so we can process the new set
exit_statements_per_run = new_exit_statements_per_run
new_exit_statements_per_run = defaultdict(list) |
<SYSTEM_TASK:>
Include a statement in the final slice.
<END_TASK>
<USER_TASK:>
Description:
def _pick_statement(self, block_address, stmt_idx):
"""
Include a statement in the final slice.
:param int block_address: Address of the basic block.
:param int stmt_idx: Statement ID.
""" |
# TODO: Support context-sensitivity
# Sanity check
if not isinstance(block_address, int):
raise AngrBackwardSlicingError("Invalid block address %s." % block_address)
if not isinstance(stmt_idx, int):
raise AngrBackwardSlicingError("Invalid statement ID %s." % stmt_idx)
self.chosen_statements[block_address].add(stmt_idx) |
<SYSTEM_TASK:>
Include an exit in the final slice.
<END_TASK>
<USER_TASK:>
Description:
def _pick_exit(self, block_address, stmt_idx, target_ips):
"""
Include an exit in the final slice.
:param block_address: Address of the basic block.
:param stmt_idx: ID of the exit statement.
:param target_ips: The target address of this exit statement.
""" |
# TODO: Support context-sensitivity
tpl = (stmt_idx, target_ips)
if tpl not in self.chosen_exits[block_address]:
self.chosen_exits[block_address].append(tpl) |
<SYSTEM_TASK:>
Return a list of conditional statement exits with respect to a basic block.
<END_TASK>
<USER_TASK:>
Description:
def _conditional_exits(self, block_addr):
"""
Return a list of conditional statement exits with respect to a basic block.
:param block_addr: The address of the basic block.
:return: A list of statement IDs.
""" |
vex_block = self.project.factory.block(block_addr).vex
lst = [ ]
for i, stmt in enumerate(vex_block.statements):
if isinstance(stmt, pyvex.IRStmt.Exit):
lst.append(i)
return lst |
<SYSTEM_TASK:>
Returns the address of the methods basic block that contains the given
<END_TASK>
<USER_TASK:>
Description:
def _get_bb_addr_from_instr(self, instr):
"""
Returns the address of the methods basic block that contains the given
instruction.
:param instr: The index of the instruction (within the current method).
:rtype: SootAddressDescriptor
""" |
current_method = self.state.addr.method
try:
bb = current_method.block_by_label[instr]
except KeyError:
l.error("Possible jump to a non-existing bb %s --> %d",
self.state.addr, instr)
raise IncorrectLocationException()
return SootAddressDescriptor(current_method, bb.idx, 0) |
<SYSTEM_TASK:>
The default uninitialized read handler. Returns symbolic bytes.
<END_TASK>
<USER_TASK:>
Description:
def _handle_uninitialized_read(self, addr, inspect=True, events=True):
"""
The default uninitialized read handler. Returns symbolic bytes.
""" |
if self._uninitialized_read_handler is None:
v = self.state.solver.Unconstrained("%s_%s" % (self.id, addr), self.width*self.state.arch.byte_width, key=self.variable_key_prefix + (addr,), inspect=inspect, events=events)
return v.reversed if self.endness == "Iend_LE" else v
else:
return self._uninitialized_read_handler(self, addr, inspect=inspect, events=events) |
<SYSTEM_TASK:>
Resolves this address.
<END_TASK>
<USER_TASK:>
Description:
def _translate_addr(self, a): #pylint:disable=no-self-use
"""
Resolves this address.
""" |
if isinstance(a, claripy.ast.Base) and not a.singlevalued:
raise SimFastMemoryError("address not supported")
return self.state.solver.eval(a) |
<SYSTEM_TASK:>
Checks whether this size can be supported by FastMemory."
<END_TASK>
<USER_TASK:>
Description:
def _translate_size(self, s): #pylint:disable=no-self-use
"""
Checks whether this size can be supported by FastMemory."
""" |
if isinstance(s, claripy.ast.Base) and not s.singlevalued:
raise SimFastMemoryError("size not supported")
if s is None:
return s
return self.state.solver.eval(s) |
<SYSTEM_TASK:>
Checks whether this condition can be supported by FastMemory."
<END_TASK>
<USER_TASK:>
Description:
def _translate_cond(self, c): #pylint:disable=no-self-use
"""
Checks whether this condition can be supported by FastMemory."
""" |
if isinstance(c, claripy.ast.Base) and not c.singlevalued:
raise SimFastMemoryError("size not supported")
if c is None:
return True
else:
return self.state.solver.eval_upto(c, 1)[0] |
<SYSTEM_TASK:>
Resolves a memory access of a certain size. Returns a sequence of the bases, offsets, and sizes of the accesses required
<END_TASK>
<USER_TASK:>
Description:
def _resolve_access(self, addr, size):
"""
Resolves a memory access of a certain size. Returns a sequence of the bases, offsets, and sizes of the accesses required
to fulfil this.
""" |
# if we fit in one word
first_offset = addr % self.width
first_base = addr - first_offset
if first_offset + size <= self.width:
return [ (first_base, first_offset, size) ]
last_size = (addr + size) % self.width
last_base = addr + size - last_size
accesses = [ ]
accesses.append((first_base, first_offset, self.width - first_offset))
accesses.extend((a, 0, self.width) for a in range(first_base+self.width, last_base, self.width))
if last_size != 0:
accesses.append((last_base, 0, last_size))
return accesses |
<SYSTEM_TASK:>
Performs a single load.
<END_TASK>
<USER_TASK:>
Description:
def _single_load(self, addr, offset, size, inspect=True, events=True):
"""
Performs a single load.
""" |
try:
d = self._contents[addr]
except KeyError:
d = self._handle_uninitialized_read(addr, inspect=inspect, events=events)
self._contents[addr] = d
if offset == 0 and size == self.width:
return d
else:
return d.get_bytes(offset, size) |
<SYSTEM_TASK:>
Performs a single store.
<END_TASK>
<USER_TASK:>
Description:
def _single_store(self, addr, offset, size, data):
"""
Performs a single store.
""" |
if offset == 0 and size == self.width:
self._contents[addr] = data
elif offset == 0:
cur = self._single_load(addr, size, self.width - size)
self._contents[addr] = data.concat(cur)
elif offset + size == self.width:
cur = self._single_load(addr, 0, offset)
self._contents[addr] = cur.concat(data)
else:
cur = self._single_load(addr, 0, self.width)
start = cur.get_bytes(0, offset)
end = cur.get_bytes(offset+size, self.width-offset-size)
self._contents[addr] = start.concat(data, end) |
<SYSTEM_TASK:>
Remove assignments to registers that has no consumers, but immediately killed.
<END_TASK>
<USER_TASK:>
Description:
def _dead_assignment_elimination(self, function, data_graph): #pylint:disable=unused-argument
"""
Remove assignments to registers that has no consumers, but immediately killed.
BROKEN - DO NOT USE IT
:param angr.knowledge.Function function:
:param networkx.MultiDiGraph data_graph:
:return: None
""" |
register_pvs = set()
for node in data_graph.nodes():
if isinstance(node.variable, SimRegisterVariable) and \
node.variable.reg is not None and \
node.variable.reg < 40:
register_pvs.add(node)
for reg in register_pvs:
# does it have a consumer?
out_edges = data_graph.out_edges(reg, data=True)
consumers = [ ]
killers = [ ]
for _, _, data in out_edges:
if 'type' in data and data['type'] == 'kill':
killers.append(data)
else:
consumers.append(data)
if not consumers and killers:
# we can remove the assignment!
da = DeadAssignment(reg)
self.dead_assignments.append(da) |
<SYSTEM_TASK:>
Set the gs register in the angr to the value of the fs register in the concrete process
<END_TASK>
<USER_TASK:>
Description:
def initialize_segment_register_x64(self, state, concrete_target):
"""
Set the gs register in the angr to the value of the fs register in the concrete process
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return: None
""" |
_l.debug("Synchronizing gs segment register")
state.regs.gs = self._read_gs_register_x64(concrete_target) |
<SYSTEM_TASK:>
Create a GDT in the state memory and populate the segment registers.
<END_TASK>
<USER_TASK:>
Description:
def initialize_gdt_x86(self, state, concrete_target):
"""
Create a GDT in the state memory and populate the segment registers.
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return: the created GlobalDescriptorTable object
""" |
_l.debug("Creating Global Descriptor Table and synchronizing fs segment register")
fs = self._read_fs_register_x86(concrete_target)
gdt = self.generate_gdt(fs,0x0)
self.setup_gdt(state,gdt)
return gdt |
<SYSTEM_TASK:>
Lookups the object that was used for creating the reference.
<END_TASK>
<USER_TASK:>
Description:
def lookup(self, opaque_ref):
"""
Lookups the object that was used for creating the reference.
""" |
opaque_ref_value = self._get_reference_value(opaque_ref)
# check local refs
if opaque_ref_value in self.local_refs:
return self.local_refs[opaque_ref_value]
# check global refs
if opaque_ref_value in self.global_refs:
return self.global_refs[opaque_ref_value]
raise KeyError("Unknown JNI reference %d. Local references: %s Global references: %s"
% (opaque_ref_value, self.local_refs, self.global_refs)) |
<SYSTEM_TASK:>
Create a new reference thats maps to the given object.
<END_TASK>
<USER_TASK:>
Description:
def create_new_reference(self, obj, global_ref=False):
"""
Create a new reference thats maps to the given object.
:param obj: Object which gets referenced.
:param bool global_ref: Whether a local or global reference is created.
""" |
# get an unique address
opaque_ref = self.state.project.loader.extern_object.allocate()
# map the object to that address
l.debug("Map %s to opaque reference 0x%x", obj, opaque_ref)
if global_ref:
self.global_refs[opaque_ref] = obj
else:
self.local_refs[opaque_ref] = obj
return opaque_ref |
<SYSTEM_TASK:>
Delete the stored mapping of a reference.
<END_TASK>
<USER_TASK:>
Description:
def delete_reference(self, opaque_ref, global_ref=False):
"""
Delete the stored mapping of a reference.
:param opaque_ref: Reference which should be removed.
:param bool global_ref: Whether opaque_ref is a local or global
reference.
""" |
opaque_ref_value = self._get_reference_value(opaque_ref)
if global_ref:
del self.global_refs[opaque_ref_value]
else:
del self.local_refs[opaque_ref_value] |
<SYSTEM_TASK:>
Pop a job from the pending jobs list.
<END_TASK>
<USER_TASK:>
Description:
def pop_job(self, returning=True):
"""
Pop a job from the pending jobs list.
When returning == True, we prioritize the jobs whose functions are known to be returning (function.returning is
True). As an optimization, we are sorting the pending jobs list according to job.function.returning.
:param bool returning: Only pop a pending job if the corresponding function returns.
:return: A pending job if we can find one, or None if we cannot find any that satisfies the requirement.
:rtype: angr.analyses.cfg.cfg_fast.CFGJob
""" |
if not self:
return None
if not returning:
return self._pop_job(next(reversed(self._jobs.keys())))
# Prioritize returning functions
for func_addr in reversed(self._jobs.keys()):
if func_addr not in self._returning_functions:
continue
return self._pop_job(func_addr)
return None |
<SYSTEM_TASK:>
Mark a function as returning.
<END_TASK>
<USER_TASK:>
Description:
def add_returning_function(self, func_addr):
"""
Mark a function as returning.
:param int func_addr: Address of the function that returns.
:return: None
""" |
self._returning_functions.add(func_addr)
self._updated_functions.add(func_addr) |
<SYSTEM_TASK:>
Calculate the entropy of a piece of data
<END_TASK>
<USER_TASK:>
Description:
def _calc_entropy(data, size=None):
"""
Calculate the entropy of a piece of data
:param data: The target data to calculate entropy on
:param size: Size of the data, Optional.
:return: A float
""" |
if not data:
return 0
entropy = 0
if size is None:
size = len(data)
data = bytes(pyvex.ffi.buffer(data, size))
for x in range(0, 256):
p_x = float(data.count(x)) / size
if p_x > 0:
entropy += - p_x * math.log(p_x, 2)
return entropy |
<SYSTEM_TASK:>
Check if the address is inside any existing region.
<END_TASK>
<USER_TASK:>
Description:
def _inside_regions(self, address):
"""
Check if the address is inside any existing region.
:param int address: Address to check.
:return: True if the address is within one of the memory regions, False otherwise.
:rtype: bool
""" |
try:
start_addr = next(self._regions.irange(maximum=address, reverse=True))
except StopIteration:
return False
else:
return address < self._regions[start_addr] |
<SYSTEM_TASK:>
Get the minimum address out of all regions. We assume self._regions is sorted.
<END_TASK>
<USER_TASK:>
Description:
def _get_min_addr(self):
"""
Get the minimum address out of all regions. We assume self._regions is sorted.
:return: The minimum address.
:rtype: int
""" |
if not self._regions:
if self.project.arch.name != "Soot":
l.error("self._regions is empty or not properly set.")
return None
return next(self._regions.irange()) |
<SYSTEM_TASK:>
Return the next immediate address that is inside any of the regions.
<END_TASK>
<USER_TASK:>
Description:
def _next_address_in_regions(self, address):
"""
Return the next immediate address that is inside any of the regions.
:param int address: The address to start scanning.
:return: The next address that is inside one of the memory regions.
:rtype: int
""" |
if self._inside_regions(address):
return address
try:
return next(self._regions.irange(minimum=address, reverse=False))
except StopIteration:
return None |
<SYSTEM_TASK:>
Find the next address that we haven't processed
<END_TASK>
<USER_TASK:>
Description:
def _next_unscanned_addr(self, alignment=None):
"""
Find the next address that we haven't processed
:param alignment: Assures the address returns must be aligned by this number
:return: An address to process next, or None if all addresses have been processed
""" |
# TODO: Take care of those functions that are already generated
if self._next_addr is None:
self._next_addr = self._get_min_addr()
curr_addr = self._next_addr
else:
curr_addr = self._next_addr + 1
if not self._inside_regions(curr_addr):
curr_addr = self._next_address_in_regions(curr_addr)
if curr_addr is None:
l.debug("All addresses within memory regions have been scanned.")
return None
if self._seg_list.has_blocks:
curr_addr = self._seg_list.next_free_pos(curr_addr)
if alignment is not None:
if curr_addr % alignment > 0:
curr_addr = curr_addr - (curr_addr % alignment) + alignment
# Make sure curr_addr exists in binary
accepted = False
for start, end in self._regions.items():
if start <= curr_addr < end:
# accept
accepted = True
break
if curr_addr < start:
# accept, but we are skipping the gap
accepted = True
curr_addr = start
break
if not accepted:
# No memory available!
return None
self._next_addr = curr_addr
if self._inside_regions(curr_addr):
l.debug("Returning a new recon address: %#x", curr_addr)
return curr_addr
l.debug("%#x is beyond the ending point. Returning None.", curr_addr)
return None |
<SYSTEM_TASK:>
Some pre job-processing tasks, like update progress bar.
<END_TASK>
<USER_TASK:>
Description:
def _pre_job_handling(self, job): # pylint:disable=arguments-differ
"""
Some pre job-processing tasks, like update progress bar.
:param CFGJob job: The CFGJob instance.
:return: None
""" |
if self._low_priority:
self._release_gil(len(self._nodes), 20, 0.0001)
# a new entry is picked. Deregister it
self._deregister_analysis_job(job.func_addr, job)
if not self._inside_regions(job.addr):
obj = self.project.loader.find_object_containing(job.addr)
if obj is not None and isinstance(obj, self._cle_pseudo_objects):
pass
else:
# it's outside permitted regions. skip.
raise AngrSkipJobNotice()
# Do not calculate progress if the user doesn't care about the progress at all
if self._show_progressbar or self._progress_callback:
max_percentage_stage_1 = 50.0
percentage = self._seg_list.occupied_size * max_percentage_stage_1 / self._regions_size
if percentage > max_percentage_stage_1:
percentage = max_percentage_stage_1
self._update_progress(percentage, cfg=self) |
<SYSTEM_TASK:>
Get all possible function addresses that are specified by the symbols in the binary
<END_TASK>
<USER_TASK:>
Description:
def _func_addrs_from_symbols(self):
"""
Get all possible function addresses that are specified by the symbols in the binary
:return: A set of addresses that are probably functions
:rtype: set
""" |
return {sym.rebased_addr for sym in self._binary.symbols if sym.is_function} |
<SYSTEM_TASK:>
Scan the entire program image for function prologues, and start code scanning at those positions
<END_TASK>
<USER_TASK:>
Description:
def _func_addrs_from_prologues(self):
"""
Scan the entire program image for function prologues, and start code scanning at those positions
:return: A list of possible function addresses
""" |
# Pre-compile all regexes
regexes = list()
for ins_regex in self.project.arch.function_prologs:
r = re.compile(ins_regex)
regexes.append(r)
# EDG says: I challenge anyone bothering to read this to come up with a better
# way to handle CPU modes that affect instruction decoding.
# Since the only one we care about is ARM/Thumb right now
# we have this gross hack. Sorry about that.
thumb_regexes = list()
if hasattr(self.project.arch, 'thumb_prologs'):
for ins_regex in self.project.arch.thumb_prologs:
# Thumb prologues are found at even addrs, but their actual addr is odd!
# Isn't that great?
r = re.compile(ins_regex)
thumb_regexes.append(r)
# Construct the binary blob first
unassured_functions = [ ]
for start_, bytes_ in self._binary.memory.backers():
for regex in regexes:
# Match them!
for mo in regex.finditer(bytes_):
position = mo.start() + start_
if position % self.project.arch.instruction_alignment == 0:
mapped_position = AT.from_rva(position, self._binary).to_mva()
if self._addr_in_exec_memory_regions(mapped_position):
unassured_functions.append(mapped_position)
# HACK part 2: Yes, i really have to do this
for regex in thumb_regexes:
# Match them!
for mo in regex.finditer(bytes_):
position = mo.start() + start_
if position % self.project.arch.instruction_alignment == 0:
mapped_position = AT.from_rva(position, self._binary).to_mva()
if self._addr_in_exec_memory_regions(mapped_position):
unassured_functions.append(mapped_position+1)
l.info("Found %d functions with prologue scanning.", len(unassured_functions))
return unassured_functions |
<SYSTEM_TASK:>
Checks addresses are in the correct segments and creates or updates
<END_TASK>
<USER_TASK:>
Description:
def _add_data_reference(self, irsb_addr, stmt_idx, insn_addr, data_addr, # pylint: disable=unused-argument
data_size=None, data_type=None):
"""
Checks addresses are in the correct segments and creates or updates
MemoryData in _memory_data as appropriate, labelling as segment
boundaries or data type
:param int irsb_addr: irsb address
:param int stmt_idx: Statement ID
:param int insn_addr: instruction address
:param data_addr: address of data manipulated by statement
:param data_size: Size of the data being manipulated
:param str data_type: Type of the data being manipulated
:return: None
""" |
# Make sure data_addr is within a valid memory range
if not self.project.loader.find_segment_containing(data_addr):
# data might be at the end of some section or segment...
# let's take a look
for segment in self.project.loader.main_object.segments:
if segment.vaddr + segment.memsize == data_addr:
# yeah!
new_data = False
if data_addr not in self._memory_data:
data = MemoryData(data_addr, 0, MemoryDataSort.SegmentBoundary)
self._memory_data[data_addr] = data
new_data = True
if new_data or self._extra_cross_references:
cr = CodeReference(insn_addr, irsb_addr, stmt_idx, memory_data=self.model.memory_data[data_addr])
self.model.references.add_ref(cr)
break
return
new_data = False
if data_addr not in self._memory_data:
if data_type is not None and data_size is not None:
data = MemoryData(data_addr, data_size, data_type, max_size=data_size)
else:
data = MemoryData(data_addr, 0, MemoryDataSort.Unknown)
self._memory_data[data_addr] = data
new_data = True
if new_data or self._extra_cross_references:
cr = CodeReference(insn_addr, irsb_addr, stmt_idx, memory_data=self.model.memory_data[data_addr])
self.model.references.add_ref(cr)
self.insn_addr_to_memory_data[insn_addr] = self._memory_data[data_addr] |
<SYSTEM_TASK:>
Determine if the IRSB at the given address is a PLT stub. If it is, concretely execute the basic block to
<END_TASK>
<USER_TASK:>
Description:
def _resolve_plt(self, addr, irsb, indir_jump):
"""
Determine if the IRSB at the given address is a PLT stub. If it is, concretely execute the basic block to
resolve the jump target.
:param int addr: Address of the block.
:param irsb: The basic block.
:param IndirectJump indir_jump: The IndirectJump instance.
:return: True if the IRSB represents a PLT stub and we successfully resolved the target.
False otherwise.
:rtype: bool
""" |
# is the address identified by CLE as a PLT stub?
if self.project.loader.all_elf_objects:
# restrict this heuristics to ELF files only
if not any([ addr in obj.reverse_plt for obj in self.project.loader.all_elf_objects ]):
return False
# Make sure the IRSB has statements
if not irsb.has_statements:
irsb = self.project.factory.block(irsb.addr, size=irsb.size).vex
# try to resolve the jump target
simsucc = self.project.engines.default_engine.process(self._initial_state, irsb, force_addr=addr)
if len(simsucc.successors) == 1:
ip = simsucc.successors[0].ip
if ip._model_concrete is not ip:
target_addr = ip._model_concrete.value
if (self.project.loader.find_object_containing(target_addr, membership_check=False) is not
self.project.loader.main_object) \
or self.project.is_hooked(target_addr):
# resolved!
# Fill the IndirectJump object
indir_jump.resolved_targets.add(target_addr)
l.debug("Address %#x is resolved as a PLT entry, jumping to %#x", addr, target_addr)
return True
return False |
<SYSTEM_TASK:>
Shrink the size of a node in CFG.
<END_TASK>
<USER_TASK:>
Description:
def _shrink_node(self, node, new_size, remove_function=True):
"""
Shrink the size of a node in CFG.
:param CFGNode node: The CFGNode to shrink
:param int new_size: The new size of the basic block
:param bool remove_function: If there is a function starting at `node`, should we remove that function or not.
:return: None
""" |
# Generate the new node
new_node = CFGNode(node.addr, new_size, self.model,
function_address=None if remove_function else node.function_address,
instruction_addrs=[i for i in node.instruction_addrs
if node.addr <= i < node.addr + new_size
],
thumb=node.thumb,
byte_string=None if node.byte_string is None else node.byte_string[:new_size],
block_id=node.addr,
)
old_in_edges = self.graph.in_edges(node, data=True)
for src, _, data in old_in_edges:
self.graph.add_edge(src, new_node, **data)
successor_node_addr = node.addr + new_size
if successor_node_addr in self._nodes:
successor = self._nodes[successor_node_addr]
else:
successor_size = node.size - new_size
successor = CFGNode(successor_node_addr, successor_size, self.model,
function_address=successor_node_addr if remove_function else node.function_address,
instruction_addrs=[i for i in node.instruction_addrs if i >= node.addr + new_size],
thumb=node.thumb,
byte_string=None if node.byte_string is None else node.byte_string[new_size:]
)
self.graph.add_edge(new_node, successor, jumpkind='Ijk_Boring')
# if the node B already has resolved targets, we will skip all unresolvable successors when adding old out edges
# from node A to node B.
# this matters in cases where node B is resolved as a special indirect jump entry (like a PLT stub), but (node
# A + node B) wasn't properly resolved.
unresolvable_target_addrs = (self._unresolvable_jump_target_addr, self._unresolvable_call_target_addr)
has_resolved_targets = any([ node_.addr not in unresolvable_target_addrs
for node_ in self.graph.successors(successor) ]
)
old_out_edges = self.graph.out_edges(node, data=True)
for _, dst, data in old_out_edges:
if (has_resolved_targets and dst.addr not in unresolvable_target_addrs) or \
not has_resolved_targets:
self.graph.add_edge(successor, dst, **data)
# remove the old node from indices
if node.addr in self._nodes and self._nodes[node.addr] is node:
del self._nodes[node.addr]
if node.addr in self._nodes_by_addr and node in self._nodes_by_addr[node.addr]:
self._nodes_by_addr[node.addr].remove(node)
# remove the old node form the graph
self.graph.remove_node(node)
# add the new node to indices
self._nodes[new_node.addr] = new_node
self._nodes_by_addr[new_node.addr].append(new_node)
# the function starting at this point is probably totally incorrect
# hopefull future call to `make_functions()` will correct everything
if node.addr in self.kb.functions:
del self.kb.functions[node.addr]
if not remove_function:
# add functions back
self._function_add_node(node, node.addr)
successor_node = self.model.get_any_node(successor_node_addr)
if successor_node and successor_node.function_address == node.addr:
# if there is absolutely no predecessors to successor_node, we'd like to add it as a new function
# so that it will not be left behind
if not list(self.graph.predecessors(successor_node)):
self._function_add_node(successor_node, successor_node_addr) |
<SYSTEM_TASK:>
Add edge between nodes, or add node if entry point
<END_TASK>
<USER_TASK:>
Description:
def _graph_add_edge(self, cfg_node, src_node, src_jumpkind, src_ins_addr, src_stmt_idx):
"""
Add edge between nodes, or add node if entry point
:param CFGNode cfg_node: node which is jumped to
:param CFGNode src_node: node which is jumped from none if entry point
:param str src_jumpkind: what type of jump the edge takes
:param int or str src_stmt_idx: source statements ID
:return: None
""" |
if src_node is None:
self.graph.add_node(cfg_node)
else:
self.graph.add_edge(src_node, cfg_node, jumpkind=src_jumpkind, ins_addr=src_ins_addr,
stmt_idx=src_stmt_idx) |
<SYSTEM_TASK:>
Adds node to function manager, converting address to CodeNode if
<END_TASK>
<USER_TASK:>
Description:
def _function_add_node(self, cfg_node, function_addr):
"""
Adds node to function manager, converting address to CodeNode if
possible
:param CFGNode cfg_node: A CFGNode instance.
:param int function_addr: Address of the current function.
:return: None
""" |
snippet = self._to_snippet(cfg_node=cfg_node)
self.kb.functions._add_node(function_addr, snippet) |
<SYSTEM_TASK:>
Add a transition edge to the function transiton map.
<END_TASK>
<USER_TASK:>
Description:
def _function_add_transition_edge(self, dst_addr, src_node, src_func_addr, to_outside=False, dst_func_addr=None,
stmt_idx=None, ins_addr=None):
"""
Add a transition edge to the function transiton map.
:param int dst_addr: Address that the control flow transits to.
:param CFGNode src_node: The source node that the control flow transits from.
:param int src_func_addr: Function address.
:return: True if the edge is correctly added. False if any exception occurred (for example, the target address
does not exist)
:rtype: bool
""" |
try:
target_node = self._nodes.get(dst_addr, None)
if target_node is None:
target_snippet = self._to_snippet(addr=dst_addr, base_state=self._base_state)
else:
target_snippet = self._to_snippet(cfg_node=target_node)
if src_node is None:
# Add this basic block into the function manager
self.kb.functions._add_node(src_func_addr, target_snippet)
else:
src_snippet = self._to_snippet(cfg_node=src_node)
if not to_outside:
self.kb.functions._add_transition_to(src_func_addr, src_snippet, target_snippet, stmt_idx=stmt_idx,
ins_addr=ins_addr
)
else:
self.kb.functions._add_outside_transition_to(src_func_addr, src_snippet, target_snippet,
to_function_addr=dst_func_addr,
stmt_idx=stmt_idx, ins_addr=ins_addr
)
return True
except (SimMemoryError, SimEngineError):
return False |
<SYSTEM_TASK:>
Add a call edge to the function transition map.
<END_TASK>
<USER_TASK:>
Description:
def _function_add_call_edge(self, addr, src_node, function_addr, syscall=False, stmt_idx=None, ins_addr=None):
"""
Add a call edge to the function transition map.
:param int addr: Address that is being called (callee).
:param CFGNode src_node: The source CFG node (caller).
:param int ret_addr: Address that returns to (in case the function returns).
:param int function_addr: Function address..
:param bool syscall: If this is a call to a syscall or not.
:param int or str stmt_idx: Statement ID of this call.
:param int or None ins_addr: Instruction address of this call.
:return: True if the edge is added. False if any exception occurred.
:rtype: bool
""" |
try:
if src_node is None:
self.kb.functions._add_node(function_addr, addr, syscall=syscall)
else:
src_snippet = self._to_snippet(cfg_node=src_node)
return_to_outside = False
ret_snippet = None
self.kb.functions._add_call_to(function_addr, src_snippet, addr, ret_snippet, syscall=syscall,
stmt_idx=stmt_idx, ins_addr=ins_addr,
return_to_outside=return_to_outside,
)
return True
except (SimMemoryError, SimEngineError):
return False |
<SYSTEM_TASK:>
Generate CodeNodes for target and source, if no source node add node
<END_TASK>
<USER_TASK:>
Description:
def _function_add_fakeret_edge(self, addr, src_node, src_func_addr, confirmed=None):
"""
Generate CodeNodes for target and source, if no source node add node
for function, otherwise creates fake return to in function manager
:param int addr: target address
:param angr.analyses.CFGNode src_node: source node
:param int src_func_addr: address of function
:param confirmed: used as attribute on eventual digraph
:return: None
""" |
target_node = self._nodes.get(addr, None)
if target_node is None:
target_snippet = self._to_snippet(addr=addr, base_state=self._base_state)
else:
target_snippet = self._to_snippet(cfg_node=target_node)
if src_node is None:
self.kb.functions._add_node(src_func_addr, target_snippet)
else:
src_snippet = self._to_snippet(cfg_node=src_node)
self.kb.functions._add_fakeret_to(src_func_addr, src_snippet, target_snippet, confirmed=confirmed) |
<SYSTEM_TASK:>
Generate CodeNodes for target address, registers node for function to
<END_TASK>
<USER_TASK:>
Description:
def _function_add_return_site(self, addr, function_addr):
"""
Generate CodeNodes for target address, registers node for function to
function manager as return site
:param int addr: target address
:param int function_addr: address of function
:return: None
""" |
try:
target = self._to_snippet(self._nodes[addr])
except KeyError:
target = addr
self.kb.functions._add_return_from(function_addr, target) |
<SYSTEM_TASK:>
Generate CodeNodes for return_to_addr, add this node for function to
<END_TASK>
<USER_TASK:>
Description:
def _function_add_return_edge(self, return_from_addr, return_to_addr, function_addr):
"""
Generate CodeNodes for return_to_addr, add this node for function to
function manager generating new edge
:param int return_from_addr: target address
:param int return_to_addr: target address
:param int function_addr: address of function
:return: None
""" |
return_to_node = self._nodes.get(return_to_addr, None)
if return_to_node is None:
return_to_snippet = self._to_snippet(addr=return_to_addr, base_state=self._base_state)
to_outside = False
else:
return_to_snippet = self._to_snippet(cfg_node=return_to_node)
to_outside = return_to_node.function_address != function_addr
self.kb.functions._add_return_from_call(function_addr, return_from_addr, return_to_snippet,
to_outside=to_outside) |
<SYSTEM_TASK:>
At the beginning of the basic block, we check if the first instruction stores the LR register onto the stack.
<END_TASK>
<USER_TASK:>
Description:
def _arm_track_lr_on_stack(self, addr, irsb, function):
"""
At the beginning of the basic block, we check if the first instruction stores the LR register onto the stack.
If it does, we calculate the offset of that store, and record the offset in function.info.
For instance, here is the disassembly of a THUMB mode function:
000007E4 STR.W LR, [SP,#var_4]!
000007E8 MOV R2, R1
000007EA SUB SP, SP, #0xC
000007EC MOVS R1, #0
...
00000800 ADD SP, SP, #0xC
00000802 LDR.W PC, [SP+4+var_4],#4
The very last basic block has a jumpkind of Ijk_Boring, which is because VEX cannot do such complicated analysis
to determine the real jumpkind.
As we can see, instruction 7e4h stores LR at [sp-4], and at the end of this function, instruction 802 loads LR
from [sp], then increments sp by 4. We execute the first instruction, and track the following things:
- if the value from register LR is stored onto the stack.
- the difference between the offset of the LR store on stack, and the SP after the store.
If at the end of the function, the LR is read out from the stack at the exact same stack offset, we will change
the jumpkind of the final IRSB to Ijk_Ret.
This method can be enabled by setting "ret_jumpkind_heuristics", which is an architecture-specific option on
ARM, to True.
:param int addr: Address of the basic block.
:param pyvex.IRSB irsb: The basic block object.
:param Function function: The function instance.
:return: None
""" |
if irsb.statements is None:
return
if 'lr_saved_on_stack' in function.info:
return
# if it does, we log it down to the Function object.
lr_offset = self.project.arch.registers['lr'][0]
sp_offset = self.project.arch.sp_offset
initial_sp = 0x7fff0000
initial_lr = 0xabcdef
tmps = {}
# pylint:disable=too-many-nested-blocks
for stmt in irsb.statements:
if isinstance(stmt, pyvex.IRStmt.IMark):
if stmt.addr + stmt.delta != addr:
break
elif isinstance(stmt, pyvex.IRStmt.WrTmp):
data = stmt.data
if isinstance(data, pyvex.IRExpr.Get):
if data.offset == sp_offset:
tmps[stmt.tmp] = initial_sp
elif data.offset == lr_offset:
tmps[stmt.tmp] = initial_lr
elif isinstance(data, pyvex.IRExpr.Binop):
if data.op == 'Iop_Sub32':
arg0, arg1 = data.args
if isinstance(arg0, pyvex.IRExpr.RdTmp) and isinstance(arg1, pyvex.IRExpr.Const):
if arg0.tmp in tmps:
tmps[stmt.tmp] = tmps[arg0.tmp] - arg1.con.value
elif isinstance(stmt, (pyvex.IRStmt.Store, pyvex.IRStmt.StoreG)):
data = stmt.data
storing_lr = False
if isinstance(data, pyvex.IRExpr.RdTmp):
if data.tmp in tmps:
val = tmps[data.tmp]
if val == initial_lr:
# we are storing LR to somewhere
storing_lr = True
if storing_lr:
if isinstance(stmt.addr, pyvex.IRExpr.RdTmp):
if stmt.addr.tmp in tmps:
storing_addr = tmps[stmt.addr.tmp]
function.info['lr_saved_on_stack'] = True
function.info['lr_on_stack_offset'] = storing_addr - initial_sp
break
if 'lr_saved_on_stack' not in function.info:
function.info['lr_saved_on_stack'] = False |
<SYSTEM_TASK:>
Deprecated alias for `solver`
<END_TASK>
<USER_TASK:>
Description:
def se(self):
"""
Deprecated alias for `solver`
""" |
global _complained_se
if not _complained_se:
_complained_se = True
l.critical("The name state.se is deprecated; please use state.solver.")
return self.get_plugin('solver') |
<SYSTEM_TASK:>
Get the concrete address of the instruction pointer, without triggering SimInspect breakpoints or generating
<END_TASK>
<USER_TASK:>
Description:
def addr(self):
"""
Get the concrete address of the instruction pointer, without triggering SimInspect breakpoints or generating
SimActions. An integer is returned, or an exception is raised if the instruction pointer is symbolic.
:return: an int
""" |
ip = self.regs._ip
if isinstance(ip, SootAddressDescriptor):
return ip
return self.solver.eval_one(self.regs._ip) |
<SYSTEM_TASK:>
Add some constraints to the state.
<END_TASK>
<USER_TASK:>
Description:
def add_constraints(self, *args, **kwargs):
"""
Add some constraints to the state.
You may pass in any number of symbolic booleans as variadic positional arguments.
""" |
if len(args) > 0 and isinstance(args[0], (list, tuple)):
raise Exception("Tuple or list passed to add_constraints!")
if o.TRACK_CONSTRAINTS in self.options and len(args) > 0:
if o.SIMPLIFY_CONSTRAINTS in self.options:
constraints = [ self.simplify(a) for a in args ]
else:
constraints = args
self._inspect('constraints', BP_BEFORE, added_constraints=constraints)
constraints = self._inspect_getattr("added_constraints", constraints)
added = self.solver.add(*constraints)
self._inspect('constraints', BP_AFTER)
# add actions for the added constraints
if o.TRACK_CONSTRAINT_ACTIONS in self.options:
for c in added:
sac = SimActionConstraint(self, c)
self.history.add_action(sac)
else:
# preserve the old action logic for when we don't track constraints (why?)
if (
'action' in kwargs and kwargs['action'] and
o.TRACK_CONSTRAINT_ACTIONS in self.options and len(args) > 0
):
for arg in args:
if self.solver.symbolic(arg):
sac = SimActionConstraint(self, arg)
self.history.add_action(sac)
if o.ABSTRACT_SOLVER in self.options and len(args) > 0:
for arg in args:
if self.solver.is_false(arg):
self._satisfiable = False
return
if self.solver.is_true(arg):
continue
# `is_true` and `is_false` does not use VSABackend currently (see commits 97a75366 and 2dfba73e in
# claripy). There is a chance that VSA backend can in fact handle it.
# Therefore we try to resolve it with VSABackend again
if claripy.backends.vsa.is_false(arg):
self._satisfiable = False
return
if claripy.backends.vsa.is_true(arg):
continue
# It's neither True or False. Let's try to apply the condition
# We take the argument, extract a list of constrained SIs out of it (if we could, of course), and
# then replace each original SI the intersection of original SI and the constrained one.
_, converted = self.solver.constraint_to_si(arg)
for original_expr, constrained_si in converted:
if not original_expr.variables:
l.error('Incorrect original_expression to replace in add_constraints(). '
'This is due to defects in VSA logics inside claripy. Please report '
'to Fish and he will fix it if he\'s free.')
continue
new_expr = constrained_si
self.registers.replace_all(original_expr, new_expr)
for _, region in self.memory.regions.items():
region.memory.replace_all(original_expr, new_expr)
l.debug("SimState.add_constraints: Applied to final state.")
elif o.SYMBOLIC not in self.options and len(args) > 0:
for arg in args:
if self.solver.is_false(arg):
self._satisfiable = False
return |
<SYSTEM_TASK:>
Whether the state's constraints are satisfiable
<END_TASK>
<USER_TASK:>
Description:
def satisfiable(self, **kwargs):
"""
Whether the state's constraints are satisfiable
""" |
if o.ABSTRACT_SOLVER in self.options or o.SYMBOLIC not in self.options:
extra_constraints = kwargs.pop('extra_constraints', ())
for e in extra_constraints:
if self.solver.is_false(e):
return False
return self._satisfiable
else:
return self.solver.satisfiable(**kwargs) |
<SYSTEM_TASK:>
Represent the basic block at this state's instruction pointer.
<END_TASK>
<USER_TASK:>
Description:
def block(self, *args, **kwargs):
"""
Represent the basic block at this state's instruction pointer.
Any arguments to `AngrObjectFactory.block` can ba passed to this.
:return: A Block object describing the basic block of code at this point.
""" |
if not args and 'addr' not in kwargs:
kwargs['addr'] = self.addr
return self.project.factory.block(*args, backup_state=self, **kwargs) |
<SYSTEM_TASK:>
Merges this state with the other states. Returns the merging result, merged state, and the merge flag.
<END_TASK>
<USER_TASK:>
Description:
def merge(self, *others, **kwargs):
"""
Merges this state with the other states. Returns the merging result, merged state, and the merge flag.
:param states: the states to merge
:param merge_conditions: a tuple of the conditions under which each state holds
:param common_ancestor: a state that represents the common history between the states being merged. Usually it
is only available when EFFICIENT_STATE_MERGING is enabled, otherwise weak-refed states
might be dropped from state history instances.
:param plugin_whitelist: a list of plugin names that will be merged. If this option is given and is not None,
any plugin that is not inside this list will not be merged, and will be created as a
fresh instance in the new state.
:param common_ancestor_history:
a SimStateHistory instance that represents the common history between the states being
merged. This is to allow optimal state merging when EFFICIENT_STATE_MERGING is
disabled.
:return: (merged state, merge flag, a bool indicating if any merging occured)
""" |
merge_conditions = kwargs.pop('merge_conditions', None)
common_ancestor = kwargs.pop('common_ancestor', None)
plugin_whitelist = kwargs.pop('plugin_whitelist', None)
common_ancestor_history = kwargs.pop('common_ancestor_history', None)
if len(kwargs) != 0:
raise ValueError("invalid arguments: %s" % kwargs.keys())
if merge_conditions is None:
# TODO: maybe make the length of this smaller? Maybe: math.ceil(math.log(len(others)+1, 2))
merge_flag = self.solver.BVS("state_merge_%d" % next(merge_counter), 16)
merge_values = range(len(others)+1)
merge_conditions = [ merge_flag == b for b in merge_values ]
else:
merge_conditions = [
(self.solver.true if len(mc) == 0 else self.solver.And(*mc)) for mc in merge_conditions
]
if len(set(o.arch.name for o in others)) != 1:
raise SimMergeError("Unable to merge due to different architectures.")
all_plugins = set(self.plugins.keys()) | set.union(*(set(o.plugins.keys()) for o in others))
if plugin_whitelist is not None:
all_plugins = all_plugins.intersection(set(plugin_whitelist))
merged = self.copy()
merging_occurred = False
# fix parent
merged.history.parent = self.history
# plugins
for p in all_plugins:
our_plugin = merged.plugins[p] if p in merged.plugins else None
their_plugins = [ (pl.plugins[p] if p in pl.plugins else None) for pl in others ]
plugin_classes = (
set([our_plugin.__class__]) | set(pl.__class__ for pl in their_plugins)
) - set([None.__class__])
if len(plugin_classes) != 1:
raise SimMergeError(
"There are differing plugin classes (%s) for plugin %s" % (plugin_classes, p)
)
plugin_class = plugin_classes.pop()
our_filled_plugin = our_plugin if our_plugin is not None else merged.register_plugin(
p, plugin_class()
)
their_filled_plugins = [
(tp if tp is not None else t.register_plugin(p, plugin_class()))
for t,tp in zip(others, their_plugins)
]
plugin_common_ancestor = (
common_ancestor.plugins[p] if
(common_ancestor is not None and p in common_ancestor.plugins) else
None
)
if plugin_common_ancestor is None and \
plugin_class is SimStateHistory and \
common_ancestor_history is not None:
plugin_common_ancestor = common_ancestor_history
plugin_state_merged = our_filled_plugin.merge(
their_filled_plugins, merge_conditions, common_ancestor=plugin_common_ancestor,
)
if plugin_state_merged:
l.debug('Merging occurred in %s', p)
merging_occurred = True
merged.add_constraints(merged.solver.Or(*merge_conditions))
return merged, merge_conditions, merging_occurred |
<SYSTEM_TASK:>
Returns the contents of a register but, if that register is symbolic,
<END_TASK>
<USER_TASK:>
Description:
def reg_concrete(self, *args, **kwargs):
"""
Returns the contents of a register but, if that register is symbolic,
raises a SimValueError.
""" |
e = self.registers.load(*args, **kwargs)
if self.solver.symbolic(e):
raise SimValueError("target of reg_concrete is symbolic!")
return self.solver.eval(e) |
<SYSTEM_TASK:>
Returns the contents of a memory but, if the contents are symbolic,
<END_TASK>
<USER_TASK:>
Description:
def mem_concrete(self, *args, **kwargs):
"""
Returns the contents of a memory but, if the contents are symbolic,
raises a SimValueError.
""" |
e = self.memory.load(*args, **kwargs)
if self.solver.symbolic(e):
raise SimValueError("target of mem_concrete is symbolic!")
return self.solver.eval(e) |
<SYSTEM_TASK:>
Push 'thing' to the stack, writing the thing to memory and adjusting the stack pointer.
<END_TASK>
<USER_TASK:>
Description:
def stack_push(self, thing):
"""
Push 'thing' to the stack, writing the thing to memory and adjusting the stack pointer.
""" |
# increment sp
sp = self.regs.sp + self.arch.stack_change
self.regs.sp = sp
return self.memory.store(sp, thing, endness=self.arch.memory_endness) |
<SYSTEM_TASK:>
Pops from the stack and returns the popped thing. The length will be the architecture word size.
<END_TASK>
<USER_TASK:>
Description:
def stack_pop(self):
"""
Pops from the stack and returns the popped thing. The length will be the architecture word size.
""" |
sp = self.regs.sp
self.regs.sp = sp - self.arch.stack_change
return self.memory.load(sp, self.arch.bytes, endness=self.arch.memory_endness) |
<SYSTEM_TASK:>
Reads length bytes, at an offset into the stack.
<END_TASK>
<USER_TASK:>
Description:
def stack_read(self, offset, length, bp=False):
"""
Reads length bytes, at an offset into the stack.
:param offset: The offset from the stack pointer.
:param length: The number of bytes to read.
:param bp: If True, offset from the BP instead of the SP. Default: False.
""" |
sp = self.regs.bp if bp else self.regs.sp
return self.memory.load(sp+offset, length, endness=self.arch.memory_endness) |
<SYSTEM_TASK:>
Convert each stack value to a string
<END_TASK>
<USER_TASK:>
Description:
def _stack_values_to_string(self, stack_values):
"""
Convert each stack value to a string
:param stack_values: A list of values
:return: The converted string
""" |
strings = [ ]
for stack_value in stack_values:
if self.solver.symbolic(stack_value):
concretized_value = "SYMBOLIC - %s" % repr(stack_value)
else:
if len(self.solver.eval_upto(stack_value, 2)) == 2:
concretized_value = repr(stack_value)
else:
concretized_value = repr(stack_value)
strings.append(concretized_value)
return " .. ".join(strings) |
<SYSTEM_TASK:>
Concretizes a size argument, if necessary, to something that makes sense when allocating space. Here we just
<END_TASK>
<USER_TASK:>
Description:
def _conc_alloc_size(self, sim_size):
"""
Concretizes a size argument, if necessary, to something that makes sense when allocating space. Here we just
maximize its potential size up to the maximum variable size specified in the libc plugin.
TODO:
Further consideration of the tradeoffs of this approach is probably warranted. SimHeapPTMalloc especially makes
a lot of different concretization strategy assumptions, but this function handles one of the more important
problems that any heap implementation will face: how to decide the amount of space to allocate upon request for
a symbolic size. Either we do as we do here and silently constrain the amount returned to a default max value,
or we could add a path constraint to the state to prevent exploration of any paths that would have legitimately
occurred given a larger allocation size.
The first approach (the silent maximum) has its benefit in that the explored state space will not be
constrained. Sometimes this could work out, as when an allocation is returned that is smaller than requested but
which the program doesn't end up making full use of anyways. Alternatively, this lack of fidelity could cause
the program to overwrite other allocations made, since it should be able to assume the allocation is as large as
it requested it be.
The second approach (the path constraint) has its benefit in that no paths will be explored that *could* fail
when an allocation is made too small. On the other hand, as stated above, some of these paths might not have
failed anyways, and doing this causes us to lose the opportunity to explore those paths.
Perhaps these behaviors could be parameterized in the future?
""" |
if self.state.solver.symbolic(sim_size):
size = self.state.solver.max_int(sim_size)
if size > self.state.libc.max_variable_size:
l.warning("Allocation request of %d bytes exceeded maximum of %d bytes; allocating %d bytes",
size, self.state.libc.max_variable_size, size)
size = self.state.libc.max_variable_size
else:
size = self.state.solver.eval(sim_size)
return size |
<SYSTEM_TASK:>
The implementation here is simple - just perform a pattern matching of all different architectures we support,
<END_TASK>
<USER_TASK:>
Description:
def _reconnoiter(self):
"""
The implementation here is simple - just perform a pattern matching of all different architectures we support,
and then perform a vote.
""" |
# Retrieve the binary string of main binary
votes = defaultdict(int)
for arch in all_arches:
regexes = set()
if not arch.function_prologs:
continue
# TODO: BoyScout does not support Thumb-only / Cortex-M binaries yet.
for ins_regex in set(arch.function_prologs).union(arch.function_epilogs):
r = re.compile(ins_regex)
regexes.add(r)
for start_, data in self.project.loader.main_object.memory.backers():
for regex in regexes:
# Match them!
for mo in regex.finditer(data):
position = mo.start() + start_
if position % arch.instruction_alignment == 0:
votes[(arch.name, arch.memory_endness)] += 1
l.debug("%s %s hits %d times", arch.name, arch.memory_endness,
votes[(arch.name, arch.memory_endness)])
arch_name, endianness, hits = sorted([(k[0], k[1], v) for k, v in votes.items()], key=lambda x: x[2], reverse=True)[0]
if hits < self.cookiesize * 2:
# this cannot possibly be code
arch_name = "DATA"
endianness = ""
self.arch = arch_name
self.endianness = endianness
# Save it as well for debugging
self.votes = votes
l.debug("The architecture should be %s with %s", self.arch, self.endianness) |
<SYSTEM_TASK:>
Convert multiple supported forms of stack pointer representations into stack offsets.
<END_TASK>
<USER_TASK:>
Description:
def parse_stack_pointer(sp):
"""
Convert multiple supported forms of stack pointer representations into stack offsets.
:param sp: A stack pointer representation.
:return: A stack pointer offset.
:rtype: int
""" |
if isinstance(sp, int):
return sp
if isinstance(sp, StackBaseOffset):
return sp.offset
if isinstance(sp, BinaryOp):
op0, op1 = sp.operands
off0 = parse_stack_pointer(op0)
off1 = parse_stack_pointer(op1)
if sp.op == "Sub":
return off0 - off1
elif sp.op == "Add":
return off0 + off1
raise NotImplementedError("Unsupported stack pointer representation type %s." % type(sp)) |
<SYSTEM_TASK:>
Get variables that are defined at the specified block.
<END_TASK>
<USER_TASK:>
Description:
def get_variable_definitions(self, block_addr):
"""
Get variables that are defined at the specified block.
:param int block_addr: Address of the block.
:return: A set of variables.
""" |
if block_addr in self._outstates:
return self._outstates[block_addr].variables
return set() |
<SYSTEM_TASK:>
Checks if `phi_variable` is a phi variable, and if it contains `variable` as a sub-variable.
<END_TASK>
<USER_TASK:>
Description:
def _phi_node_contains(self, phi_variable, variable):
"""
Checks if `phi_variable` is a phi variable, and if it contains `variable` as a sub-variable.
:param phi_variable:
:param variable:
:return:
""" |
if self.variable_manager[self.function.addr].is_phi_variable(phi_variable):
return variable in self.variable_manager[self.function.addr].get_phi_subvariables(phi_variable)
return False |
<SYSTEM_TASK:>
Returns the lineage of histories leading up to `h`.
<END_TASK>
<USER_TASK:>
Description:
def lineage(self, h):
"""
Returns the lineage of histories leading up to `h`.
""" |
lineage = [ ]
predecessors = list(self._graph.predecessors(h))
while len(predecessors):
lineage.append(predecessors[0])
predecessors = list(self._graph.predecessors(predecessors[0]))
lineage.reverse()
return lineage |
<SYSTEM_TASK:>
Find the "most mergeable" set of states from those provided.
<END_TASK>
<USER_TASK:>
Description:
def most_mergeable(self, states):
"""
Find the "most mergeable" set of states from those provided.
:param states: a list of states
:returns: a tuple of: (a list of states to merge, those states' common history, a list of states to not merge yet)
""" |
histories = set(self.get_ref(s.history) for s in states)
for n in networkx.algorithms.dfs_postorder_nodes(self._graph):
intersection = histories.intersection(self.all_successors(n))
if len(intersection) > 1:
return (
[ s for s in states if self.get_ref(s.history) in intersection ],
n(),
[ s for s in states if self.get_ref(s.history) not in intersection ]
)
# didn't find any?
return set(), None, states |
<SYSTEM_TASK:>
Check if the resolved target is valid.
<END_TASK>
<USER_TASK:>
Description:
def _is_target_valid(self, cfg, target): # pylint:disable=no-self-use
"""
Check if the resolved target is valid.
:param cfg: The CFG analysis object.
:param int target: The target to check.
:return: True if the target is valid. False otherwise.
:rtype: bool
""" |
if self.base_state is not None:
try:
if self.base_state.solver.is_true((self.base_state.memory.permissions(target) & 4) == 4):
return True
except SimMemoryError:
pass
return False
if cfg._addr_in_exec_memory_regions(target):
# the jump target is executable
return True
if self.project.is_hooked(target):
# the jump target is hooked
return True
return False |
<SYSTEM_TASK:>
Sets the size of the chunk, preserving any flags.
<END_TASK>
<USER_TASK:>
Description:
def set_size(self, size):
"""
Sets the size of the chunk, preserving any flags.
""" |
raise NotImplementedError("%s not implemented for %s" % (self.set_size.__func__.__name__,
self.__class__.__name__)) |
<SYSTEM_TASK:>
Returns the address of the payload of the chunk.
<END_TASK>
<USER_TASK:>
Description:
def data_ptr(self):
"""
Returns the address of the payload of the chunk.
""" |
raise NotImplementedError("%s not implemented for %s" % (self.data_ptr.__func__.__name__,
self.__class__.__name__)) |
<SYSTEM_TASK:>
Returns a concrete determination as to whether the chunk is free.
<END_TASK>
<USER_TASK:>
Description:
def is_free(self):
"""
Returns a concrete determination as to whether the chunk is free.
""" |
raise NotImplementedError("%s not implemented for %s" % (self.is_free.__func__.__name__,
self.__class__.__name__)) |
<SYSTEM_TASK:>
Returns the chunk following this chunk in the list of free chunks.
<END_TASK>
<USER_TASK:>
Description:
def fwd_chunk(self):
"""
Returns the chunk following this chunk in the list of free chunks.
""" |
raise NotImplementedError("%s not implemented for %s" % (self.fwd_chunk.__func__.__name__,
self.__class__.__name__)) |
<SYSTEM_TASK:>
Sets the chunk following this chunk in the list of free chunks.
<END_TASK>
<USER_TASK:>
Description:
def set_fwd_chunk(self, fwd):
"""
Sets the chunk following this chunk in the list of free chunks.
:param fwd: the chunk to follow this chunk in the list of free chunks
""" |
raise NotImplementedError("%s not implemented for %s" % (self.set_fwd_chunk.__func__.__name__,
self.__class__.__name__)) |
<SYSTEM_TASK:>
Returns the chunk backward from this chunk in the list of free chunks.
<END_TASK>
<USER_TASK:>
Description:
def bck_chunk(self):
"""
Returns the chunk backward from this chunk in the list of free chunks.
""" |
raise NotImplementedError("%s not implemented for %s" % (self.bck_chunk.__func__.__name__,
self.__class__.__name__)) |
<SYSTEM_TASK:>
Sets the chunk backward from this chunk in the list of free chunks.
<END_TASK>
<USER_TASK:>
Description:
def set_bck_chunk(self, bck):
"""
Sets the chunk backward from this chunk in the list of free chunks.
:param bck: the chunk to precede this chunk in the list of free chunks
""" |
raise NotImplementedError("%s not implemented for %s" % (self.set_bck_chunk.__func__.__name__,
self.__class__.__name__)) |
<SYSTEM_TASK:>
Returns an iterator over all the chunks in the heap.
<END_TASK>
<USER_TASK:>
Description:
def chunks(self):
"""
Returns an iterator over all the chunks in the heap.
""" |
raise NotImplementedError("%s not implemented for %s" % (self.chunks.__func__.__name__,
self.__class__.__name__)) |
<SYSTEM_TASK:>
Returns an iterator over all the allocated chunks in the heap.
<END_TASK>
<USER_TASK:>
Description:
def allocated_chunks(self):
"""
Returns an iterator over all the allocated chunks in the heap.
""" |
raise NotImplementedError("%s not implemented for %s" % (self.allocated_chunks.__func__.__name__,
self.__class__.__name__)) |
<SYSTEM_TASK:>
Returns an iterator over all the free chunks in the heap.
<END_TASK>
<USER_TASK:>
Description:
def free_chunks(self):
"""
Returns an iterator over all the free chunks in the heap.
""" |
raise NotImplementedError("%s not implemented for %s" % (self.free_chunks.__func__.__name__,
self.__class__.__name__)) |
<SYSTEM_TASK:>
Given a pointer to a user payload, return the chunk associated with that payload.
<END_TASK>
<USER_TASK:>
Description:
def chunk_from_mem(self, ptr):
"""
Given a pointer to a user payload, return the chunk associated with that payload.
:param ptr: a pointer to the base of a user payload in the heap
:returns: the associated heap chunk
""" |
raise NotImplementedError("%s not implemented for %s" % (self.chunk_from_mem.__func__.__name__,
self.__class__.__name__)) |
<SYSTEM_TASK:>
Perform execution using any applicable engine. Enumerate the current engines and use the
<END_TASK>
<USER_TASK:>
Description:
def successors(self, state, addr=None, jumpkind=None, default_engine=False, procedure_engine=False,
engines=None, **kwargs):
"""
Perform execution using any applicable engine. Enumerate the current engines and use the
first one that works. Engines are enumerated in order, specified by the ``order`` attribute.
:param state: The state to analyze
:param addr: optional, an address to execute at instead of the state's ip
:param jumpkind: optional, the jumpkind of the previous exit
:param default_engine: Whether we should only attempt to use the default engine (usually VEX)
:param procedure_engine: Whether we should only attempt to use the procedure engine
:param engines: A list of engines to try to use, instead of the default.
This list is expected to contain engine names or engine instances.
Additional keyword arguments will be passed directly into each engine's process method.
:return SimSuccessors: A SimSuccessors object classifying the results of the run.
""" |
if addr is not None or jumpkind is not None:
state = state.copy()
if addr is not None:
state.ip = addr
if jumpkind is not None:
state.history.jumpkind = jumpkind
if default_engine and self.has_default_engine():
engines = [self.default_engine]
elif procedure_engine and self.has_procedure_engine():
engines = [self.procedure_engine]
elif engines is None:
engines = (self.get_plugin(name) for name in self.order)
else:
engines = (self.get_plugin(e) if isinstance(e, str) else e for e in engines)
for engine in engines:
if engine.check(state, **kwargs):
r = engine.process(state, **kwargs)
if r.processed:
return r
raise AngrExitError("All engines failed to execute!") |
<SYSTEM_TASK:>
Besides calling _get_next_addr, we will check if data locates at that address seems to be code or not. If not,
<END_TASK>
<USER_TASK:>
Description:
def _get_next_code_addr(self, initial_state):
"""
Besides calling _get_next_addr, we will check if data locates at that address seems to be code or not. If not,
we'll move on to request for next valid address.
""" |
next_addr = self._get_next_addr_to_search()
if next_addr is None:
return None
start_addr = next_addr
sz = ""
is_sz = True
while is_sz:
# Get data until we meet a 0
while next_addr in initial_state.memory:
try:
l.debug("Searching address %x", next_addr)
val = initial_state.mem_concrete(next_addr, 1)
if val == 0:
if len(sz) < 4:
is_sz = False
else:
reach_end = True
break
if chr(val) not in string.printable:
is_sz = False
break
sz += chr(val)
next_addr += 1
except SimValueError:
# Not concretizable
l.debug("Address 0x%08x is not concretizable!", next_addr)
break
if len(sz) > 0 and is_sz:
l.debug("Got a string of %d chars: [%s]", len(sz), sz)
# l.debug("Occpuy %x - %x", start_addr, start_addr + len(sz) + 1)
self._seg_list.occupy(start_addr, len(sz) + 1)
sz = ""
next_addr = self._get_next_addr_to_search()
if next_addr is None:
return None
# l.debug("next addr = %x", next_addr)
start_addr = next_addr
if is_sz:
next_addr += 1
instr_alignment = initial_state.arch.instruction_alignment
if start_addr % instr_alignment > 0:
start_addr = start_addr - start_addr % instr_alignment + \
instr_alignment
l.debug('_get_next_code_addr() returns 0x%x', start_addr)
return start_addr |
<SYSTEM_TASK:>
Voting for the most possible base address.
<END_TASK>
<USER_TASK:>
Description:
def _solve_forbase_address(self, function_starts, functions):
"""
Voting for the most possible base address.
:param function_starts:
:param functions:
:returns:
""" |
pseudo_base_addr = self.project.loader.main_object.min_addr
base_addr_ctr = { }
for s in function_starts:
for f in functions:
base_addr = s - f + pseudo_base_addr
ctr = 1
for k in function_starts:
if k - base_addr + pseudo_base_addr in functions:
ctr += 1
if ctr > 5:
base_addr_ctr[base_addr] = ctr
if len(base_addr_ctr):
base_addr, hits = sorted([(k, v) for k, v in base_addr_ctr.items()], key=lambda x: x[1], reverse=True)[0]
return base_addr
else:
return None |
<SYSTEM_TASK:>
Perform a full code scan on the target binary.
<END_TASK>
<USER_TASK:>
Description:
def _full_code_scan(self):
"""
Perform a full code scan on the target binary.
""" |
# We gotta time this function
start_time = datetime.now()
traced_address = set()
self.functions = set()
self.call_map = networkx.DiGraph()
self.cfg = networkx.DiGraph()
initial_state = self.project.factory.blank_state(mode="fastpath")
initial_options = initial_state.options - {o.TRACK_CONSTRAINTS} - o.refs
initial_options |= {o.SUPER_FASTPATH}
# initial_options.remove(o.COW_STATES)
initial_state.options = initial_options
# Sadly, not all calls to functions are explicitly made by call
# instruction - they could be a jmp or b, or something else. So we
# should record all exits from a single function, and then add
# necessary calling edges in our call map during the post-processing
# phase.
function_exits = defaultdict(set)
widgets = [progressbar.Percentage(),
' ',
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ',
progressbar.Timer(),
' ',
progressbar.ETA()
]
pb = progressbar.ProgressBar(widgets=widgets, maxval=10000 * 100).start()
while True:
next_addr = self._get_next_code_addr(initial_state)
percentage = self._seg_list.occupied_size * 100.0 / (self._valid_memory_region_size)
if percentage > 100.0: percentage = 100.0
pb.update(percentage * 10000)
if next_addr is not None:
l.info("Analyzing %xh, progress %0.04f%%", next_addr, percentage)
else:
l.info('No more addr to analyze. Progress %0.04f%%', percentage)
break
self.call_map.add_node(next_addr)
self._scan_code(traced_address, function_exits, initial_state, next_addr)
pb.finish()
end_time = datetime.now()
l.info("A full code scan takes %d seconds.", (end_time - start_time).seconds) |
<SYSTEM_TASK:>
Generate a sif file from the call map
<END_TASK>
<USER_TASK:>
Description:
def genenare_callmap_sif(self, filepath):
"""
Generate a sif file from the call map
""" |
graph = self.call_map
if graph is None:
raise AngrGirlScoutError('Please generate the call graph first.')
f = open(filepath, "wb")
for src, dst in graph.edges():
f.write("0x%x\tDirectEdge\t0x%x\n" % (src, dst))
f.close() |
<SYSTEM_TASK:>
Reloads the solver. Useful when changing solver options.
<END_TASK>
<USER_TASK:>
Description:
def reload_solver(self, constraints=None):
"""
Reloads the solver. Useful when changing solver options.
:param list constraints: A new list of constraints to use in the reloaded solver instead of the current one
""" |
if constraints is None:
constraints = self._solver.constraints
self._stored_solver = None
self._solver.add(constraints) |
<SYSTEM_TASK:>
Iterate over all variables for which their tracking key is a prefix of the values provided.
<END_TASK>
<USER_TASK:>
Description:
def get_variables(self, *keys):
"""
Iterate over all variables for which their tracking key is a prefix of the values provided.
Elements are a tuple, the first element is the full tracking key, the second is the symbol.
>>> list(s.solver.get_variables('mem'))
[(('mem', 0x1000), <BV64 mem_1000_4_64>), (('mem', 0x1008), <BV64 mem_1008_5_64>)]
>>> list(s.solver.get_variables('file'))
[(('file', 1, 0), <BV8 file_1_0_6_8>), (('file', 1, 1), <BV8 file_1_1_7_8>), (('file', 2, 0), <BV8 file_2_0_8_8>)]
>>> list(s.solver.get_variables('file', 2))
[(('file', 2, 0), <BV8 file_2_0_8_8>)]
>>> list(s.solver.get_variables())
[(('mem', 0x1000), <BV64 mem_1000_4_64>), (('mem', 0x1008), <BV64 mem_1008_5_64>), (('file', 1, 0), <BV8 file_1_0_6_8>), (('file', 1, 1), <BV8 file_1_1_7_8>), (('file', 2, 0), <BV8 file_2_0_8_8>)]
""" |
for k, v in self.eternal_tracked_variables.items():
if len(k) >= len(keys) and all(x == y for x, y in zip(keys, k)):
yield k, v
for k, v in self.temporal_tracked_variables.items():
if k[-1] is None:
continue
if len(k) >= len(keys) and all(x == y for x, y in zip(keys, k)):
yield k, v |
<SYSTEM_TASK:>
Register a value with the variable tracking system
<END_TASK>
<USER_TASK:>
Description:
def register_variable(self, v, key, eternal=True):
"""
Register a value with the variable tracking system
:param v: The BVS to register
:param key: A tuple to register the variable under
:parma eternal: Whether this is an eternal variable, default True. If False, an incrementing counter will be
appended to the key.
""" |
if type(key) is not tuple:
raise TypeError("Variable tracking key must be a tuple")
if eternal:
self.eternal_tracked_variables[key] = v
else:
self.temporal_tracked_variables = dict(self.temporal_tracked_variables)
ctrkey = key + (None,)
ctrval = self.temporal_tracked_variables.get(ctrkey, 0) + 1
self.temporal_tracked_variables[ctrkey] = ctrval
tempkey = key + (ctrval,)
self.temporal_tracked_variables[tempkey] = v |
<SYSTEM_TASK:>
Given an AST, iterate over all the keys of all the BVS leaves in the tree which are registered.
<END_TASK>
<USER_TASK:>
Description:
def describe_variables(self, v):
"""
Given an AST, iterate over all the keys of all the BVS leaves in the tree which are registered.
""" |
reverse_mapping = {next(iter(var.variables)): k for k, var in self.eternal_tracked_variables.items()}
reverse_mapping.update({next(iter(var.variables)): k for k, var in self.temporal_tracked_variables.items() if k[-1] is not None})
for var in v.variables:
if var in reverse_mapping:
yield reverse_mapping[var] |
<SYSTEM_TASK:>
Creates or gets a Claripy solver, based on the state options.
<END_TASK>
<USER_TASK:>
Description:
def _solver(self):
"""
Creates or gets a Claripy solver, based on the state options.
""" |
if self._stored_solver is not None:
return self._stored_solver
track = o.CONSTRAINT_TRACKING_IN_SOLVER in self.state.options
approximate_first = o.APPROXIMATE_FIRST in self.state.options
if o.STRINGS_ANALYSIS in self.state.options:
if 'smtlib_cvc4' in backend_manager.backends._backends_by_name:
our_backend = backend_manager.backends.smtlib_cvc4
elif 'smtlib_z3' in backend_manager.backends._backends_by_name:
our_backend = backend_manager.backends.smtlib_z3
elif 'smtlib_abc' in backend_manager.backends._backends_by_name:
our_backend = backend_manager.backends.smtlib_abc
else:
raise ValueError("Could not find suitable string solver!")
if o.COMPOSITE_SOLVER in self.state.options:
self._stored_solver = claripy.SolverComposite(
template_solver_string=claripy.SolverCompositeChild(backend=our_backend, track=track)
)
elif o.ABSTRACT_SOLVER in self.state.options:
self._stored_solver = claripy.SolverVSA()
elif o.SYMBOLIC in self.state.options and o.REPLACEMENT_SOLVER in self.state.options:
self._stored_solver = claripy.SolverReplacement(auto_replace=False)
elif o.SYMBOLIC in self.state.options and o.CACHELESS_SOLVER in self.state.options:
self._stored_solver = claripy.SolverCacheless(track=track)
elif o.SYMBOLIC in self.state.options and o.COMPOSITE_SOLVER in self.state.options:
self._stored_solver = claripy.SolverComposite(track=track)
elif o.SYMBOLIC in self.state.options and any(opt in self.state.options for opt in o.approximation):
self._stored_solver = claripy.SolverHybrid(track=track, approximate_first=approximate_first)
elif o.HYBRID_SOLVER in self.state.options:
self._stored_solver = claripy.SolverHybrid(track=track, approximate_first=approximate_first)
elif o.SYMBOLIC in self.state.options:
self._stored_solver = claripy.Solver(track=track)
else:
self._stored_solver = claripy.SolverConcrete()
return self._stored_solver |
<SYSTEM_TASK:>
Evaluate an expression, using the solver if necessary. Returns AST objects.
<END_TASK>
<USER_TASK:>
Description:
def eval_to_ast(self, e, n, extra_constraints=(), exact=None):
"""
Evaluate an expression, using the solver if necessary. Returns AST objects.
:param e: the expression
:param n: the number of desired solutions
:param extra_constraints: extra constraints to apply to the solver
:param exact: if False, returns approximate solutions
:return: a tuple of the solutions, in the form of claripy AST nodes
:rtype: tuple
""" |
return self._solver.eval_to_ast(e, n, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact) |
<SYSTEM_TASK:>
Evaluate an expression, using the solver if necessary. Returns primitives.
<END_TASK>
<USER_TASK:>
Description:
def _eval(self, e, n, extra_constraints=(), exact=None):
"""
Evaluate an expression, using the solver if necessary. Returns primitives.
:param e: the expression
:param n: the number of desired solutions
:param extra_constraints: extra constraints to apply to the solver
:param exact: if False, returns approximate solutions
:return: a tuple of the solutions, in the form of Python primitives
:rtype: tuple
""" |
return self._solver.eval(e, n, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact) |
<SYSTEM_TASK:>
Return the maximum value of expression `e`.
<END_TASK>
<USER_TASK:>
Description:
def max(self, e, extra_constraints=(), exact=None):
"""
Return the maximum value of expression `e`.
:param e : expression (an AST) to evaluate
:param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve
:param exact : if False, return approximate solutions.
:return: the maximum possible value of e (backend object)
""" |
if exact is False and o.VALIDATE_APPROXIMATIONS in self.state.options:
ar = self._solver.max(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=False)
er = self._solver.max(e, extra_constraints=self._adjust_constraint_list(extra_constraints))
assert er <= ar
return ar
return self._solver.max(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact) |
<SYSTEM_TASK:>
Add some constraints to the solver.
<END_TASK>
<USER_TASK:>
Description:
def add(self, *constraints):
"""
Add some constraints to the solver.
:param constraints: Pass any constraints that you want to add (ASTs) as varargs.
""" |
cc = self._adjust_constraint_list(constraints)
return self._solver.add(cc) |
<SYSTEM_TASK:>
Casts a solution for the given expression to type `cast_to`.
<END_TASK>
<USER_TASK:>
Description:
def _cast_to(e, solution, cast_to):
"""
Casts a solution for the given expression to type `cast_to`.
:param e: The expression `value` is a solution for
:param value: The solution to be cast
:param cast_to: The type `value` should be cast to. Must be one of the currently supported types (bytes|int)
:raise ValueError: If cast_to is a currently unsupported cast target.
:return: The value of `solution` cast to type `cast_to`
""" |
if cast_to is None:
return solution
if type(solution) is bool:
if cast_to is bytes:
return bytes([int(solution)])
elif cast_to is int:
return int(solution)
elif type(solution) is float:
solution = _concrete_value(claripy.FPV(solution, claripy.fp.FSort.from_size(len(e))).raw_to_bv())
if cast_to is bytes:
if len(e) == 0:
return b""
return binascii.unhexlify('{:x}'.format(solution).zfill(len(e)//4))
if cast_to is not int:
raise ValueError("cast_to parameter {!r} is not a valid cast target, currently supported are only int and bytes!".format(cast_to))
return solution |
<SYSTEM_TASK:>
Evaluate an expression, using the solver if necessary. Returns primitives as specified by the `cast_to`
<END_TASK>
<USER_TASK:>
Description:
def eval_upto(self, e, n, cast_to=None, **kwargs):
"""
Evaluate an expression, using the solver if necessary. Returns primitives as specified by the `cast_to`
parameter. Only certain primitives are supported, check the implementation of `_cast_to` to see which ones.
:param e: the expression
:param n: the number of desired solutions
:param extra_constraints: extra constraints to apply to the solver
:param exact: if False, returns approximate solutions
:param cast_to: A type to cast the resulting values to
:return: a tuple of the solutions, in the form of Python primitives
:rtype: tuple
""" |
concrete_val = _concrete_value(e)
if concrete_val is not None:
return [self._cast_to(e, concrete_val, cast_to)]
cast_vals = [self._cast_to(e, v, cast_to) for v in self._eval(e, n, **kwargs)]
if len(cast_vals) == 0:
raise SimUnsatError('Not satisfiable: %s, expected up to %d solutions' % (e.shallow_repr(), n))
return cast_vals |
<SYSTEM_TASK:>
Evaluate an expression to get the only possible solution. Errors if either no or more than one solution is
<END_TASK>
<USER_TASK:>
Description:
def eval_one(self, e, **kwargs):
"""
Evaluate an expression to get the only possible solution. Errors if either no or more than one solution is
returned. A kwarg parameter `default` can be specified to be returned instead of failure!
:param e: the expression to get a solution for
:param default: A value can be passed as a kwarg here. It will be returned in case of failure.
:param kwargs: Any additional kwargs will be passed down to `eval_upto`
:raise SimUnsatError: if no solution could be found satisfying the given constraints
:raise SimValueError: if more than one solution was found to satisfy the given constraints
:return: The value for `e`
""" |
try:
return self.eval_exact(e, 1, **{k: v for (k, v) in kwargs.items() if k != 'default'})[0]
except (SimUnsatError, SimValueError, SimSolverModeError):
if 'default' in kwargs:
return kwargs.pop('default')
raise |
<SYSTEM_TASK:>
Evaluate an expression to get at most `n` possible solutions. Errors if either none or more than `n` solutions
<END_TASK>
<USER_TASK:>
Description:
def eval_atmost(self, e, n, **kwargs):
"""
Evaluate an expression to get at most `n` possible solutions. Errors if either none or more than `n` solutions
are returned.
:param e: the expression to get a solution for
:param n: the inclusive upper limit on the number of solutions
:param kwargs: Any additional kwargs will be passed down to `eval_upto`
:raise SimUnsatError: if no solution could be found satisfying the given constraints
:raise SimValueError: if more than `n` solutions were found to satisfy the given constraints
:return: The solutions for `e`
""" |
r = self.eval_upto(e, n+1, **kwargs)
if len(r) > n:
raise SimValueError("Concretized %d values (must be at most %d) in eval_atmost" % (len(r), n))
return r |
<SYSTEM_TASK:>
Returns True if the expression `e` has only one solution by querying
<END_TASK>
<USER_TASK:>
Description:
def unique(self, e, **kwargs):
"""
Returns True if the expression `e` has only one solution by querying
the constraint solver. It does also add that unique solution to the
solver's constraints.
""" |
if not isinstance(e, claripy.ast.Base):
return True
# if we don't want to do symbolic checks, assume symbolic variables are multivalued
if o.SYMBOLIC not in self.state.options and self.symbolic(e):
return False
r = self.eval_upto(e, 2, **kwargs)
if len(r) == 1:
self.add(e == r[0])
return True
elif len(r) == 0:
raise SimValueError("unsatness during uniqueness check(ness)")
else:
return False |
<SYSTEM_TASK:>
Returns True if the expression `e` is symbolic.
<END_TASK>
<USER_TASK:>
Description:
def symbolic(self, e): # pylint:disable=R0201
"""
Returns True if the expression `e` is symbolic.
""" |
if type(e) in (int, bytes, float, bool):
return False
return e.symbolic |
<SYSTEM_TASK:>
Simplifies `e`. If `e` is None, simplifies the constraints of this
<END_TASK>
<USER_TASK:>
Description:
def simplify(self, e=None):
"""
Simplifies `e`. If `e` is None, simplifies the constraints of this
state.
""" |
if e is None:
return self._solver.simplify()
elif isinstance(e, (int, float, bool)):
return e
elif isinstance(e, claripy.ast.Base) and e.op in claripy.operations.leaf_operations_concrete:
return e
elif isinstance(e, SimActionObject) and e.op in claripy.operations.leaf_operations_concrete:
return e.ast
elif not isinstance(e, (SimActionObject, claripy.ast.Base)):
return e
else:
return self._claripy_simplify(e) |
<SYSTEM_TASK:>
Add a preconstraint that ``variable == value`` to the state.
<END_TASK>
<USER_TASK:>
Description:
def preconstrain(self, value, variable):
"""
Add a preconstraint that ``variable == value`` to the state.
:param value: The concrete value. Can be a bitvector or a bytestring or an integer.
:param variable: The BVS to preconstrain.
""" |
if not isinstance(value, claripy.ast.Base):
value = self.state.solver.BVV(value, len(variable))
elif value.op != 'BVV':
raise ValueError("Passed a value to preconstrain that was not a BVV or a string")
if variable.op not in claripy.operations.leaf_operations:
l.warning("The variable %s to preconstrain is not a leaf AST. This may cause replacement failures in the "
"claripy replacement backend.", variable)
l.warning("Please use a leaf AST as the preconstraining variable instead.")
constraint = variable == value
l.debug("Preconstraint: %s", constraint)
# add the constraint for reconstraining later
self.variable_map[next(iter(variable.variables))] = constraint
self.preconstraints.append(constraint)
if o.REPLACEMENT_SOLVER in self.state.options:
self.state.solver._solver.add_replacement(variable, value, invalidate_cache=False)
else:
self.state.add_constraints(*self.preconstraints)
if not self.state.satisfiable():
l.warning("State went unsat while adding preconstraints") |
<SYSTEM_TASK:>
Preconstrain the contents of a file.
<END_TASK>
<USER_TASK:>
Description:
def preconstrain_file(self, content, simfile, set_length=False):
"""
Preconstrain the contents of a file.
:param content: The content to preconstrain the file to. Can be a bytestring or a list thereof.
:param simfile: The actual simfile to preconstrain
""" |
repair_entry_state_opts = False
if o.TRACK_ACTION_HISTORY in self.state.options:
repair_entry_state_opts = True
self.state.options -= {o.TRACK_ACTION_HISTORY}
if set_length: # disable read bounds
simfile.has_end = False
pos = 0
for write in content:
if type(write) is int:
write = bytes([write])
data, length, pos = simfile.read(pos, len(write), disable_actions=True, inspect=False, short_reads=False)
if not claripy.is_true(length == len(write)):
raise AngrError("Bug in either SimFile or in usage of preconstrainer: couldn't get requested data from file")
self.preconstrain(write, data)
# if the file is a stream, reset its position
if simfile.pos is not None:
simfile.pos = 0
if set_length: # enable read bounds; size is now maximum size
simfile.has_end = True
if repair_entry_state_opts:
self.state.options |= {o.TRACK_ACTION_HISTORY} |
<SYSTEM_TASK:>
Preconstrain the data in the flag page.
<END_TASK>
<USER_TASK:>
Description:
def preconstrain_flag_page(self, magic_content):
"""
Preconstrain the data in the flag page.
:param magic_content: The content of the magic page as a bytestring.
""" |
for m, v in zip(magic_content, self.state.cgc.flag_bytes):
self.preconstrain(m, v) |
<SYSTEM_TASK:>
Remove the preconstraints from the state.
<END_TASK>
<USER_TASK:>
Description:
def remove_preconstraints(self, to_composite_solver=True, simplify=True):
"""
Remove the preconstraints from the state.
If you are using the zen plugin, this will also use that to filter the constraints.
:param to_composite_solver: Whether to convert the replacement solver to a composite solver. You probably
want this if you're switching from tracing to symbolic analysis.
:param simplify: Whether to simplify the resulting set of constraints.
""" |
if not self.preconstraints:
return
# cache key set creation
precon_cache_keys = set()
for con in self.preconstraints:
precon_cache_keys.add(con.cache_key)
# if we used the replacement solver we didn't add constraints we need to remove so keep all constraints
if o.REPLACEMENT_SOLVER in self.state.options:
new_constraints = self.state.solver.constraints
else:
new_constraints = list(filter(lambda x: x.cache_key not in precon_cache_keys, self.state.solver.constraints))
if self.state.has_plugin("zen_plugin"):
new_constraints = self.state.get_plugin("zen_plugin").filter_constraints(new_constraints)
if to_composite_solver:
self.state.options.discard(o.REPLACEMENT_SOLVER)
self.state.options.add(o.COMPOSITE_SOLVER)
# clear the solver's internal memory and replace it with the new solver options and constraints
self.state.solver.reload_solver(new_constraints)
if simplify:
l.debug("simplifying solver...")
self.state.solver.simplify()
l.debug("...simplification done") |
<SYSTEM_TASK:>
Add a reference to a memory data object.
<END_TASK>
<USER_TASK:>
Description:
def add_ref(self, ref):
"""
Add a reference to a memory data object.
:param CodeReference ref: The reference.
:return: None
""" |
self.refs[ref.insn_addr].append(ref)
self.data_addr_to_ref[ref.memory_data.addr].append(ref) |
<SYSTEM_TASK:>
Get all CFGNodes whose address is the specified one.
<END_TASK>
<USER_TASK:>
Description:
def get_all_nodes(self, addr, is_syscall=None, anyaddr=False):
"""
Get all CFGNodes whose address is the specified one.
:param addr: Address of the node
:param is_syscall: True returns the syscall node, False returns the normal CFGNode, None returns both
:return: all CFGNodes
""" |
results = [ ]
for cfg_node in self.graph.nodes():
if cfg_node.addr == addr or (anyaddr and
cfg_node.size is not None and
cfg_node.addr <= addr < (cfg_node.addr + cfg_node.size)
):
if is_syscall and cfg_node.is_syscall:
results.append(cfg_node)
elif is_syscall is False and not cfg_node.is_syscall:
results.append(cfg_node)
else:
results.append(cfg_node)
return results |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.