text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Get predecessors of a node in the control flow graph. <END_TASK> <USER_TASK:> Description: def get_predecessors(self, cfgnode, excluding_fakeret=True, jumpkind=None): """ Get predecessors of a node in the control flow graph. :param CFGNode cfgnode: The node. :param bool excluding_fakeret: True if you want to exclude all predecessors that is connected to the node with a fakeret edge. :param str or None jumpkind: Only return predecessors with the specified jumpkind. This argument will be ignored if set to None. :return: A list of predecessors :rtype: list """
if excluding_fakeret and jumpkind == 'Ijk_FakeRet': return [ ] if not excluding_fakeret and jumpkind is None: # fast path if cfgnode in self.graph: return list(self.graph.predecessors(cfgnode)) return [ ] predecessors = [] for pred, _, data in self.graph.in_edges([cfgnode], data=True): jk = data['jumpkind'] if jumpkind is not None: if jk == jumpkind: predecessors.append(pred) elif excluding_fakeret: if jk != 'Ijk_FakeRet': predecessors.append(pred) else: predecessors.append(pred) return predecessors
<SYSTEM_TASK:> Get successors of a node in the control flow graph. <END_TASK> <USER_TASK:> Description: def get_successors(self, node, excluding_fakeret=True, jumpkind=None): """ Get successors of a node in the control flow graph. :param CFGNode node: The node. :param bool excluding_fakeret: True if you want to exclude all successors that is connected to the node with a fakeret edge. :param str or None jumpkind: Only return successors with the specified jumpkind. This argument will be ignored if set to None. :return: A list of successors :rtype: list """
if jumpkind is not None: if excluding_fakeret and jumpkind == 'Ijk_FakeRet': return [ ] if not excluding_fakeret and jumpkind is None: # fast path if node in self.graph: return list(self.graph.successors(node)) return [ ] successors = [] for _, suc, data in self.graph.out_edges([node], data=True): jk = data['jumpkind'] if jumpkind is not None: if jumpkind == jk: successors.append(suc) elif excluding_fakeret: if jk != 'Ijk_FakeRet': successors.append(suc) else: successors.append(suc) return successors
<SYSTEM_TASK:> Get a list of tuples where the first element is the successor of the CFG node and the second element is the <END_TASK> <USER_TASK:> Description: def get_successors_and_jumpkind(self, node, excluding_fakeret=True): """ Get a list of tuples where the first element is the successor of the CFG node and the second element is the jumpkind of the successor. :param CFGNode node: The node. :param bool excluding_fakeret: True if you want to exclude all successors that are fall-through successors. :return: A list of successors and their corresponding jumpkinds. :rtype: list """
successors = [] for _, suc, data in self.graph.out_edges([node], data=True): if not excluding_fakeret or data['jumpkind'] != 'Ijk_FakeRet': successors.append((suc, data['jumpkind'])) return successors
<SYSTEM_TASK:> Get all predecessors of a specific node on the control flow graph. <END_TASK> <USER_TASK:> Description: def get_all_predecessors(self, cfgnode): """ Get all predecessors of a specific node on the control flow graph. :param CFGNode cfgnode: The CFGNode object :return: A list of predecessors in the CFG :rtype: list """
s = set() for child, parent in networkx.dfs_predecessors(self.graph, cfgnode).items(): s.add(child) s.add(parent) return list(s)
<SYSTEM_TASK:> Returns all nodes that has an out degree >= 2 <END_TASK> <USER_TASK:> Description: def get_branching_nodes(self): """ Returns all nodes that has an out degree >= 2 """
nodes = set() for n in self.graph.nodes(): if self.graph.out_degree(n) >= 2: nodes.add(n) return nodes
<SYSTEM_TASK:> Get the corresponding exit statement ID for control flow to reach destination block from source block. The exit <END_TASK> <USER_TASK:> Description: def get_exit_stmt_idx(self, src_block, dst_block): """ Get the corresponding exit statement ID for control flow to reach destination block from source block. The exit statement ID was put on the edge when creating the CFG. Note that there must be a direct edge between the two blocks, otherwise an exception will be raised. :return: The exit statement ID """
if not self.graph.has_edge(src_block, dst_block): raise AngrCFGError('Edge (%s, %s) does not exist in CFG' % (src_block, dst_block)) return self.graph[src_block][dst_block]['stmt_idx']
<SYSTEM_TASK:> Hook target for native function call returns. <END_TASK> <USER_TASK:> Description: def prepare_native_return_state(native_state): """ Hook target for native function call returns. Recovers and stores the return value from native memory and toggles the state, s.t. execution continues in the Soot engine. """
javavm_simos = native_state.project.simos ret_state = native_state.copy() # set successor flags ret_state.regs._ip = ret_state.callstack.ret_addr ret_state.scratch.guard = ret_state.solver.true ret_state.history.jumpkind = 'Ijk_Ret' # if available, lookup the return value in native memory ret_var = ret_state.callstack.invoke_return_variable if ret_var is not None: # get return symbol from native state native_cc = javavm_simos.get_native_cc() ret_symbol = native_cc.get_return_val(native_state).to_claripy() # convert value to java type if ret_var.type in ArchSoot.primitive_types: # return value has a primitive type # => we need to manually cast the return value to the correct size, as this # would be usually done by the java callee ret_value = javavm_simos.cast_primitive(ret_state, ret_symbol, to_type=ret_var.type) else: # return value has a reference type # => ret_symbol is a opaque ref # => lookup corresponding java reference ret_value = ret_state.jni_references.lookup(ret_symbol) else: ret_value = None # teardown return state SimEngineSoot.prepare_return_state(ret_state, ret_value) # finally, delete all local references ret_state.jni_references.clear_local_references() return [ret_state]
<SYSTEM_TASK:> Return a concretization of the contents of the file, as a flat bytestring. <END_TASK> <USER_TASK:> Description: def concretize(self, **kwargs): """ Return a concretization of the contents of the file, as a flat bytestring. """
size = self.state.solver.min(self._size, **kwargs) data = self.load(0, size) kwargs['cast_to'] = kwargs.get('cast_to', bytes) kwargs['extra_constraints'] = tuple(kwargs.get('extra_constraints', ())) + (self._size == size,) return self.state.solver.eval(data, **kwargs)
<SYSTEM_TASK:> Returns a list of the packets read or written as bytestrings. <END_TASK> <USER_TASK:> Description: def concretize(self, **kwargs): """ Returns a list of the packets read or written as bytestrings. """
lengths = [self.state.solver.eval(x[1], **kwargs) for x in self.content] kwargs['cast_to'] = bytes return [b'' if i == 0 else self.state.solver.eval(x[0][i*self.state.arch.byte_width-1:], **kwargs) for i, x in zip(lengths, self.content)]
<SYSTEM_TASK:> Write a packet to the stream. <END_TASK> <USER_TASK:> Description: def write(self, pos, data, size=None, events=True, **kwargs): """ Write a packet to the stream. :param int pos: The packet number to write in the sequence of the stream. May be None to append to the stream. :param data: The data to write, as a string or bitvector. :param size: The optional size to write. May be symbolic; must be constrained to at most the size of data. :return: The next packet to use after this """
if events: self.state.history.add_event('fs_write', filename=self.name, data=data, size=size, pos=pos) # sanity check on read/write modes if self.write_mode is None: self.write_mode = True elif self.write_mode is False: raise SimFileError("Cannot read and write to the same SimPackets") data = _deps_unpack(data)[0] if type(data) is bytes: data = claripy.BVV(data) if size is None: size = len(data) // self.state.arch.byte_width if isinstance(data, claripy.Bits) else len(data) if type(size) is int: size = self.state.solver.BVV(size, self.state.arch.bits) # sanity check on packet number and determine if data is already present if pos < 0: raise SimFileError("SimPacket.write(%d): Negative packet number?" % pos) elif pos > len(self.content): raise SimFileError("SimPacket.write(%d): Packet number is past frontier of %d?" % (pos, len(self.content))) elif pos != len(self.content): realdata, realsize = self.content[pos] maxlen = max(len(realdata), len(data)) self.state.solver.add(realdata[maxlen-1:0] == data[maxlen-1:0]) self.state.solver.add(size == realsize) if not self.state.solver.satisfiable(): raise SimFileError("Packet write equality constraints made state unsatisfiable???") return pos+1 # write it out! self.content.append((_deps_unpack(data)[0], size)) return pos+1
<SYSTEM_TASK:> Reads some data from the file, storing it into memory. <END_TASK> <USER_TASK:> Description: def read(self, pos, size, **kwargs): """ Reads some data from the file, storing it into memory. :param pos: The address to write the read data into memory :param size: The requested length of the read :return: The real length of the read """
data, realsize = self.read_data(size, **kwargs) if not self.state.solver.is_true(realsize == 0): self.state.memory.store(pos, data, size=realsize) return realsize
<SYSTEM_TASK:> Writes some data, loaded from the state, into the file. <END_TASK> <USER_TASK:> Description: def write(self, pos, size, **kwargs): """ Writes some data, loaded from the state, into the file. :param pos: The address to read the data to write from in memory :param size: The requested size of the write :return: The real length of the write """
if type(pos) is str: raise TypeError("SimFileDescriptor.write takes an address and size. Did you mean write_data?") # Find a reasonable concrete size for the load since we don't want to concretize anything # This is copied from SimFile.read # TODO: refactor into a generic concretization strategy? if self.state.solver.symbolic(size): try: passed_max_size = self.state.solver.max(size, extra_constraints=(size < self.state.libc.max_packet_size,)) except SimSolverError: passed_max_size = self.state.solver.min(size) l.warning("Symbolic write size is too large for threshold - concretizing to min (%d)", passed_max_size) self.state.solver.add(size == passed_max_size) else: passed_max_size = self.state.solver.eval(size) if passed_max_size > 2**13: l.warning("Program performing extremely large write") data = self.state.memory.load(pos, passed_max_size) return self.write_data(data, size, **kwargs)
<SYSTEM_TASK:> Find a sinkhole which is large enough to support `length` bytes. <END_TASK> <USER_TASK:> Description: def get_max_sinkhole(self, length): """ Find a sinkhole which is large enough to support `length` bytes. This uses first-fit. The first sinkhole (ordered in descending order by their address) which can hold `length` bytes is chosen. If there are more than `length` bytes in the sinkhole, a new sinkhole is created representing the remaining bytes while the old sinkhole is removed. """
ordered_sinks = sorted(list(self.sinkholes), key=operator.itemgetter(0), reverse=True) max_pair = None for addr, sz in ordered_sinks: if sz >= length: max_pair = (addr, sz) break if max_pair is None: return None remaining = max_pair[1] - length max_addr = max_pair[0] + remaining max_length = remaining self.sinkholes.remove(max_pair) if remaining: self.sinkholes.add((max_pair[0], max_length)) return max_addr
<SYSTEM_TASK:> A decorator function you should apply to ``copy`` <END_TASK> <USER_TASK:> Description: def memo(f): """ A decorator function you should apply to ``copy`` """
def inner(self, memo=None, **kwargs): if memo is None: memo = {} if id(self) in memo: return memo[id(self)] else: c = f(self, memo, **kwargs) memo[id(self)] = c return c return inner
<SYSTEM_TASK:> Get any VFG node corresponding to the basic block at @addr. <END_TASK> <USER_TASK:> Description: def get_any_node(self, addr): """ Get any VFG node corresponding to the basic block at @addr. Note that depending on the context sensitivity level, there might be multiple nodes corresponding to different contexts. This function will return the first one it encounters, which might not be what you want. """
for n in self.graph.nodes(): if n.addr == addr: return n
<SYSTEM_TASK:> Executed before analysis starts. Necessary initializations are performed here. <END_TASK> <USER_TASK:> Description: def _pre_analysis(self): """ Executed before analysis starts. Necessary initializations are performed here. :return: None """
l.debug("Starting from %#x", self._start) # initialize the task stack self._task_stack = [ ] # initialize the execution counter dict self._execution_counter = defaultdict(int) # Generate a CFG if no CFG is provided if not self._cfg: l.debug("Generating a CFG, since none was given...") # TODO: can we use a fast CFG instead? note that fast CFG does not care of context sensitivity at all, but # TODO: for state merging, we also don't really care about context sensitivity. self._cfg = self.project.analyses.CFGEmulated(context_sensitivity_level=self._context_sensitivity_level, starts=(self._start,) ) if not self._cfg.normalized: l.warning("The given CFG is not normalized, which might impact the performance/accuracy of the VFG " "analysis.") # Prepare the state initial_state = self._prepare_initial_state(self._start, self._initial_state) initial_state.ip = self._start if self.project.arch.name.startswith('MIPS'): initial_state.regs.t9 = self._start # clear function merge points cache self._function_merge_points = {} # Create the initial state state = initial_state.copy() if self._start_at_function: # set the return address to an address so we can catch it and terminate the VSA analysis # TODO: Properly pick an address that will not conflict with any existing code and data in the program self._final_address = 0x4fff0000 self._set_return_address(state, self._final_address) call_stack = None if not self._start_at_function: # we should build a custom call stack call_stack = CallStack() call_stack = call_stack.call(None, self._function_start, retn_target=self._final_address) job = VFGJob(state.addr, state, self._context_sensitivity_level, jumpkind='Ijk_Boring', final_return_address=self._final_address, call_stack=call_stack ) block_id = BlockID.new(state.addr, job.get_call_stack_suffix(), job.jumpkind) job._block_id = block_id self._insert_job(job) # create the task function_analysis_task = FunctionAnalysis(self._function_start, self._final_address) function_analysis_task.jobs.append(job) self._task_stack.append(function_analysis_task)
<SYSTEM_TASK:> Get the sorting key of a VFGJob instance. <END_TASK> <USER_TASK:> Description: def _job_sorting_key(self, job): """ Get the sorting key of a VFGJob instance. :param VFGJob job: the VFGJob object. :return: An integer that determines the order of this job in the queue. :rtype: int """
MAX_BLOCKS_PER_FUNCTION = 1000000 task_functions = list(reversed( list(task.function_address for task in self._task_stack if isinstance(task, FunctionAnalysis)) )) try: function_pos = task_functions.index(job.func_addr) except ValueError: # not in the list # it might be because we followed the wrong path, or there is a bug in the traversal algorithm # anyways, do it first l.warning('Function address %#x is not found in task stack.', job.func_addr) return 0 try: block_in_function_pos = self._ordered_node_addrs(job.func_addr).index(job.addr) except ValueError: # block not found. what? block_in_function_pos = min(job.addr - job.func_addr, MAX_BLOCKS_PER_FUNCTION - 1) return block_in_function_pos + MAX_BLOCKS_PER_FUNCTION * function_pos
<SYSTEM_TASK:> Generate new jobs for all possible successor targets when there are more than one possible concrete value for <END_TASK> <USER_TASK:> Description: def _handle_successor_multitargets(self, job, successor, all_successors): """ Generate new jobs for all possible successor targets when there are more than one possible concrete value for successor.ip :param VFGJob job: The VFGJob instance. :param SimState successor: The succeeding state. :param list all_successors: All succeeding states from the same VFGJob. :return: A list of new succeeding jobs :rtype: list """
new_jobs = [ ] # Currently we assume a legit jumping target cannot have more than 256 concrete values # TODO: make it a setting on VFG MAX_NUMBER_OF_CONCRETE_VALUES = 256 all_possible_ips = successor.solver.eval_upto(successor.ip, MAX_NUMBER_OF_CONCRETE_VALUES + 1) if len(all_possible_ips) > MAX_NUMBER_OF_CONCRETE_VALUES: l.warning("IP can be concretized to more than %d values, which means it might be corrupted.", MAX_NUMBER_OF_CONCRETE_VALUES) return [ ] # Call this function to generate a successor for each possible IP for ip in all_possible_ips: concrete_successor = successor.copy() concrete_successor.ip = ip concrete_jobs = self._handle_successor(job, concrete_successor, all_successors) if job.is_call_jump: # TODO: take care of syscalls for new_job in concrete_jobs: # TODO: correctly fill the return address. The return address can be found from the # TODO: fakeret successor in the `successors` list function_analysis_task = FunctionAnalysis(new_job.addr, None) # log the new job function_analysis_task.jobs.append(new_job) # put it onto the stack self._task_stack.append(function_analysis_task) # log it in the call_task job.call_task.register_function_analysis(function_analysis_task) new_jobs.extend(concrete_jobs) return new_jobs
<SYSTEM_TASK:> Merge two given states, and return a new one. <END_TASK> <USER_TASK:> Description: def _merge_states(self, old_state, new_state): """ Merge two given states, and return a new one. :param old_state: :param new_state: :returns: The merged state, and whether a merging has occurred """
# print old_state.dbg_print_stack() # print new_state.dbg_print_stack() merged_state, _, merging_occurred = old_state.merge(new_state, plugin_whitelist=self._mergeable_plugins) # print "Merged: " # print merged_state.dbg_print_stack() return merged_state, merging_occurred
<SYSTEM_TASK:> Perform widen operation on the given states, and return a new one. <END_TASK> <USER_TASK:> Description: def _widen_states(old_state, new_state): """ Perform widen operation on the given states, and return a new one. :param old_state: :param new_state: :returns: The widened state, and whether widening has occurred """
# print old_state.dbg_print_stack() # print new_state.dbg_print_stack() l.debug('Widening state at IP %s', old_state.ip) widened_state, widening_occurred = old_state.widen(new_state) # print "Widened: " # print widened_state.dbg_print_stack() return widened_state, widening_occurred
<SYSTEM_TASK:> Try to narrow the state! <END_TASK> <USER_TASK:> Description: def _narrow_states(node, old_state, new_state, previously_widened_state): # pylint:disable=unused-argument,no-self-use """ Try to narrow the state! :param old_state: :param new_state: :param previously_widened_state: :returns: The narrowed state, and whether a narrowing has occurred """
l.debug('Narrowing state at IP %s', previously_widened_state.ip) s = previously_widened_state.copy() narrowing_occurred = False # TODO: Finish the narrowing logic return s, narrowing_occurred
<SYSTEM_TASK:> Get the state to start the analysis for function. <END_TASK> <USER_TASK:> Description: def _prepare_initial_state(self, function_start, state): """ Get the state to start the analysis for function. :param int function_start: Address of the function :param SimState state: The program state to base on. """
if state is None: state = self.project.factory.blank_state(mode="static", remove_options=self._state_options_to_remove ) # make room for arguments passed to the function sp = state.regs.sp sp_val = state.solver.eval_one(sp) state.memory.set_stack_address_mapping(sp_val, state.memory.stack_id(function_start) + '_pre', 0 ) state.registers.store('sp', sp - 0x100) # Set the stack address mapping for the initial stack state.memory.set_stack_size(state.arch.stack_size) initial_sp = state.solver.eval(state.regs.sp) # FIXME: This is bad, as it may lose tracking of multiple sp values initial_sp -= state.arch.bytes state.memory.set_stack_address_mapping(initial_sp, state.memory.stack_id(function_start), function_start ) return state
<SYSTEM_TASK:> Set the return address of the current state to a specific address. We assume we are at the beginning of a <END_TASK> <USER_TASK:> Description: def _set_return_address(self, state, ret_addr): """ Set the return address of the current state to a specific address. We assume we are at the beginning of a function, or in other words, we are about to execute the very first instruction of the function. :param SimState state: The program state :param int ret_addr: The return address :return: None """
# TODO: the following code is totally untested other than X86 and AMD64. Don't freak out if you find bugs :) # TODO: Test it ret_bvv = state.solver.BVV(ret_addr, self.project.arch.bits) if self.project.arch.name in ('X86', 'AMD64'): state.stack_push(ret_bvv) elif is_arm_arch(self.project.arch): state.regs.lr = ret_bvv elif self.project.arch.name in ('MIPS32', 'MIPS64'): state.regs.ra = ret_bvv elif self.project.arch.name in ('PPC32', 'PPC64'): state.regs.lr = ret_bvv else: l.warning('Return address cannot be set for architecture %s. Please add corresponding logic to ' 'VFG._set_return_address().', self.project.arch.name )
<SYSTEM_TASK:> Get an existing VFGNode instance from the graph. <END_TASK> <USER_TASK:> Description: def _graph_get_node(self, block_id, terminator_for_nonexistent_node=False): """ Get an existing VFGNode instance from the graph. :param BlockID block_id: The block ID for the node to get. :param bool terminator_for_nonexistent_node: True if a Terminator (which is a SimProcedure stub) should be created when there is no existing node available for the given block ID. :return: A node in the graph, or None. :rtype: VFGNode """
if block_id not in self._nodes: l.error("Trying to look up a node that we don't have yet. Is this okay????") if not terminator_for_nonexistent_node: return None # Generate a PathTerminator node addr = block_id.addr func_addr = block_id.func_addr if func_addr is None: # We'll have to use the current block address instead # TODO: Is it really OK? func_addr = addr input_state = self.project.factory.entry_state() input_state.ip = addr pt = VFGNode(addr, block_id, input_state) self._nodes[block_id] = pt if isinstance(self.project.arch, archinfo.ArchARM) and addr % 2 == 1: self._thumb_addrs.add(addr) self._thumb_addrs.add(addr - 1) l.debug("Block ID %s does not exist. Create a PathTerminator instead.", repr(block_id)) return self._nodes[block_id]
<SYSTEM_TASK:> Add an edge onto the graph. <END_TASK> <USER_TASK:> Description: def _graph_add_edge(self, src_block_id, dst_block_id, **kwargs): """ Add an edge onto the graph. :param BlockID src_block_id: The block ID for source node. :param BlockID dst_block_id: The block Id for destination node. :param str jumpkind: The jumpkind of the edge. :param exit_stmt_idx: ID of the statement in the source IRSB where this edge is created from. 'default' refers to the default exit. :return: None """
dst_node = self._graph_get_node(dst_block_id, terminator_for_nonexistent_node=True) if src_block_id is None: self.graph.add_node(dst_node) else: src_node = self._graph_get_node(src_block_id, terminator_for_nonexistent_node=True) self.graph.add_edge(src_node, dst_node, **kwargs)
<SYSTEM_TASK:> Remove all pending returns that are related to the current job. <END_TASK> <USER_TASK:> Description: def _remove_pending_return(self, job, pending_returns): """ Remove all pending returns that are related to the current job. """
# Build the tuples that we want to remove from the dict fake_func_retn_exits tpls_to_remove = [ ] call_stack_copy = job.call_stack_copy() while call_stack_copy.current_return_target is not None: ret_target = call_stack_copy.current_return_target # Remove the current call stack frame call_stack_copy = call_stack_copy.ret(ret_target) call_stack_suffix = call_stack_copy.stack_suffix(self._context_sensitivity_level) tpl = call_stack_suffix + (ret_target,) tpls_to_remove.append(tpl) # Remove those tuples from the dict for tpl in tpls_to_remove: if tpl in pending_returns: del pending_returns[tpl] l.debug("Removed (%s) from FakeExits dict.", ",".join([hex(i) if i is not None else 'None' for i in tpl]))
<SYSTEM_TASK:> Print out debugging information after handling a VFGJob and generating the succeeding jobs. <END_TASK> <USER_TASK:> Description: def _post_job_handling_debug(self, job, successors): """ Print out debugging information after handling a VFGJob and generating the succeeding jobs. :param VFGJob job: The VFGJob instance. :param list successors: A list of succeeding states. :return: None """
func = self.project.loader.find_symbol(job.addr) function_name = func.name if func is not None else None module_name = self.project.loader.find_object_containing(job.addr).provides l.debug("VFGJob @ %#08x with callstack [ %s ]", job.addr, job.callstack_repr(self.kb), ) l.debug("(Function %s of %s)", function_name, module_name) l.debug("- is call jump: %s", job.is_call_jump) for suc in successors: if suc not in job.dbg_exit_status: l.warning("- %s is not found. FIND OUT WHY.", suc) continue try: l.debug("- successor: %#08x of %s [%s]", suc.solver.eval_one(suc.ip), suc.history.jumpkind, job.dbg_exit_status[suc]) except SimValueError: l.debug("- target cannot be concretized. %s [%s]", job.dbg_exit_status[suc], suc.history.jumpkind) l.debug("Remaining/pending jobs: %d/%d", len(self._job_info_queue), len(self._pending_returns)) l.debug("Remaining jobs: %s", [ "%s %d" % (ent.job, id(ent.job)) for ent in self._job_info_queue]) l.debug("Task stack: %s", self._task_stack)
<SYSTEM_TASK:> Save the initial state of a function, and merge it with existing ones if there are any. <END_TASK> <USER_TASK:> Description: def _save_function_initial_state(self, function_key, function_address, state): """ Save the initial state of a function, and merge it with existing ones if there are any. :param FunctionKey function_key: The key to this function. :param int function_address: Address of the function. :param SimState state: Initial state of the function. :return: None """
l.debug('Saving the initial state for function %#08x with function key %s', function_address, function_key ) if function_key in self._function_initial_states[function_address]: existing_state = self._function_initial_states[function_address][function_key] merged_state, _, _ = existing_state.merge(state) self._function_initial_states[function_address][function_key] = merged_state else: self._function_initial_states[function_address][function_key] = state
<SYSTEM_TASK:> Save the final state of a function, and merge it with existing ones if there are any. <END_TASK> <USER_TASK:> Description: def _save_function_final_state(self, function_key, function_address, state): """ Save the final state of a function, and merge it with existing ones if there are any. :param FunctionKey function_key: The key to this function. :param int function_address: Address of the function. :param SimState state: Initial state of the function. :return: None """
l.debug('Saving the final state for function %#08x with function key %s', function_address, function_key ) if function_key in self._function_final_states[function_address]: existing_state = self._function_final_states[function_address][function_key] merged_state = existing_state.merge(state, plugin_whitelist=self._mergeable_plugins)[0] self._function_final_states[function_address][function_key] = merged_state else: self._function_final_states[function_address][function_key] = state
<SYSTEM_TASK:> Return the ordered merge points for a specific function. <END_TASK> <USER_TASK:> Description: def _merge_points(self, function_address): """ Return the ordered merge points for a specific function. :param int function_address: Address of the querying function. :return: A list of sorted merge points (addresses). :rtype: list """
# we are entering a new function. now it's time to figure out how to optimally traverse the control flow # graph by generating the sorted merge points try: new_function = self.kb.functions[function_address] except KeyError: # the function does not exist return [ ] if function_address not in self._function_merge_points: ordered_merge_points = CFGUtils.find_merge_points(function_address, new_function.endpoints, new_function.graph) self._function_merge_points[function_address] = ordered_merge_points return self._function_merge_points[function_address]
<SYSTEM_TASK:> Return the ordered widening points for a specific function. <END_TASK> <USER_TASK:> Description: def _widening_points(self, function_address): """ Return the ordered widening points for a specific function. :param int function_address: Address of the querying function. :return: A list of sorted merge points (addresses). :rtype: list """
# we are entering a new function. now it's time to figure out how to optimally traverse the control flow # graph by generating the sorted merge points try: new_function = self.kb.functions[function_address] except KeyError: # the function does not exist return [ ] if function_address not in self._function_widening_points: if not new_function.normalized: new_function.normalize() widening_points = CFGUtils.find_widening_points(function_address, new_function.endpoints, new_function.graph) self._function_widening_points[function_address] = widening_points return self._function_widening_points[function_address]
<SYSTEM_TASK:> For a given function, return all nodes in an optimal traversal order. If the function does not exist, return an <END_TASK> <USER_TASK:> Description: def _ordered_node_addrs(self, function_address): """ For a given function, return all nodes in an optimal traversal order. If the function does not exist, return an empty list. :param int function_address: Address of the function. :return: A ordered list of the nodes. :rtype: list """
try: function = self.kb.functions[function_address] except KeyError: # the function does not exist return [ ] if function_address not in self._function_node_addrs: sorted_nodes = CFGUtils.quasi_topological_sort_nodes(function.graph) self._function_node_addrs[function_address] = [ n.addr for n in sorted_nodes ] return self._function_node_addrs[function_address]
<SYSTEM_TASK:> Assign a new region for under-constrained symbolic execution. <END_TASK> <USER_TASK:> Description: def assign(self, dst_addr_ast): """ Assign a new region for under-constrained symbolic execution. :param dst_addr_ast: the symbolic AST which address of the new allocated region will be assigned to. :return: as ast of memory address that points to a new region """
if dst_addr_ast.uc_alloc_depth > self._max_alloc_depth: raise SimUCManagerAllocationError('Current allocation depth %d is greater than the cap (%d)' % \ (dst_addr_ast.uc_alloc_depth, self._max_alloc_depth)) abs_addr = self._region_base + self._pos ptr = self.state.solver.BVV(abs_addr, self.state.arch.bits) self._pos += self._region_size self._alloc_depth_map[(abs_addr - self._region_base) // self._region_size] = dst_addr_ast.uc_alloc_depth l.debug("Assigned new memory region %s", ptr) return ptr
<SYSTEM_TASK:> Test whether an AST is bounded by any existing constraint in the related solver. <END_TASK> <USER_TASK:> Description: def is_bounded(self, ast): """ Test whether an AST is bounded by any existing constraint in the related solver. :param ast: an claripy.AST object :return: True if there is at least one related constraint, False otherwise """
return len(ast.variables.intersection(self.state.solver._solver.variables)) != 0
<SYSTEM_TASK:> Return a string representation of all state options. <END_TASK> <USER_TASK:> Description: def tally(self, exclude_false=True, description=False): """ Return a string representation of all state options. :param bool exclude_false: Whether to exclude Boolean switches that are disabled. :param bool description: Whether to display the description of each option. :return: A string representation. :rtype: str """
total = [ ] for o in sorted(self.OPTIONS.values(), key=lambda x: x.name): try: value = self[o.name] except SimStateOptionsError: value = "<Unset>" if exclude_false and o.one_type() is bool and value is False: # Skip Boolean switches that are False continue s = "{option}: {value}".format(option=o.name, value=value) if description: s += " | {description}".format(description=o.description) total.append(s) return "\n".join(total)
<SYSTEM_TASK:> Register a state option. <END_TASK> <USER_TASK:> Description: def register_option(cls, name, types, default=None, description=None): """ Register a state option. :param str name: Name of the state option. :param types: A collection of allowed types of this state option. :param default: The default value of this state option. :param str description: The description of this state option. :return: None """
if name in cls.OPTIONS: raise SimStateOptionsError("A state option with the same name has been registered.") if isinstance(types, type): types = { types } o = StateOption(name, types, default=default, description=description) cls.OPTIONS[name] = o
<SYSTEM_TASK:> For now a lot of naive concretization is done when handling heap metadata to keep things manageable. This idiom <END_TASK> <USER_TASK:> Description: def concretize(x, solver, sym_handler): """ For now a lot of naive concretization is done when handling heap metadata to keep things manageable. This idiom showed up a lot as a result, so to reduce code repetition this function uses a callback to handle the one or two operations that varied across invocations. :param x: the item to be concretized :param solver: the solver to evaluate the item with :param sym_handler: the handler to be used when the item may take on more than one value :returns: a concrete value for the item """
if solver.symbolic(x): try: return solver.eval_one(x) except SimSolverError: return sym_handler(x) else: return solver.eval(x)
<SYSTEM_TASK:> return a 5-tuple of strings sufficient for formatting with ``%s%s%s%s%s`` to verbosely describe the procedure <END_TASK> <USER_TASK:> Description: def _describe_me(self): """ return a 5-tuple of strings sufficient for formatting with ``%s%s%s%s%s`` to verbosely describe the procedure """
return ( self.display_name, ' (cont: %s)' % self.run_func if self.is_continuation else '', ' (syscall)' if self.is_syscall else '', ' (inline)' if not self.use_state_arguments else '', ' (stub)' if self.is_stub else '', )
<SYSTEM_TASK:> Returns the ith argument. Raise a SimProcedureArgumentError if we don't have such an argument available. <END_TASK> <USER_TASK:> Description: def arg(self, i): """ Returns the ith argument. Raise a SimProcedureArgumentError if we don't have such an argument available. :param int i: The index of the argument to get :return: The argument :rtype: object """
if self.use_state_arguments: r = self.cc.arg(self.state, i) else: if i >= len(self.arguments): raise SimProcedureArgumentError("Argument %d does not exist." % i) r = self.arguments[i] # pylint: disable=unsubscriptable-object l.debug("returning argument") return r
<SYSTEM_TASK:> Call another SimProcedure in-line to retrieve its return value. <END_TASK> <USER_TASK:> Description: def inline_call(self, procedure, *arguments, **kwargs): """ Call another SimProcedure in-line to retrieve its return value. Returns an instance of the procedure with the ret_expr property set. :param procedure: The class of the procedure to execute :param arguments: Any additional positional args will be used as arguments to the procedure call :param sim_kwargs: Any additional keyword args will be passed as sim_kwargs to the procedure construtor """
e_args = [ self.state.solver.BVV(a, self.state.arch.bits) if isinstance(a, int) else a for a in arguments ] p = procedure(project=self.project, **kwargs) return p.execute(self.state, None, arguments=e_args)
<SYSTEM_TASK:> Add an exit representing a return from this function. <END_TASK> <USER_TASK:> Description: def ret(self, expr=None): """ Add an exit representing a return from this function. If this is not an inline call, grab a return address from the state and jump to it. If this is not an inline call, set a return expression with the calling convention. """
self.inhibit_autoret = True if expr is not None: if o.SIMPLIFY_RETS in self.state.options: l.debug("... simplifying") l.debug("... before: %s", expr) expr = self.state.solver.simplify(expr) l.debug("... after: %s", expr) if self.symbolic_return: size = len(expr) new_expr = self.state.solver.Unconstrained( "symbolic_return_" + self.display_name, size, key=('symbolic_return', self.display_name)) #pylint:disable=maybe-no-member self.state.add_constraints(new_expr == expr) expr = new_expr self.ret_expr = expr ret_addr = None # TODO: I had to put this check here because I don't understand why self.use_state_arguments gets reset to true # when calling the function ret. at the calling point the attribute is set to False if isinstance(self.addr, SootAddressDescriptor): ret_addr = self._compute_ret_addr(expr) elif self.use_state_arguments: ret_addr = self.cc.teardown_callsite( self.state, expr, arg_types=[False]*self.num_args if self.cc.args is None else None) if not self.should_add_successors: l.debug("Returning without setting exits due to 'internal' call.") return if self.ret_to is not None: ret_addr = self.ret_to if ret_addr is None: raise SimProcedureError("No source for return address in ret() call!") self._prepare_ret_state() self._exit_action(self.state, ret_addr) self.successors.add_successor(self.state, ret_addr, self.state.solver.true, 'Ijk_Ret')
<SYSTEM_TASK:> Add an exit representing calling another function via pointer. <END_TASK> <USER_TASK:> Description: def call(self, addr, args, continue_at, cc=None): """ Add an exit representing calling another function via pointer. :param addr: The address of the function to call :param args: The list of arguments to call the function with :param continue_at: Later, when the called function returns, execution of the current procedure will continue in the named method. :param cc: Optional: use this calling convention for calling the new function. Default is to use the current convention. """
self.inhibit_autoret = True if cc is None: cc = self.cc call_state = self.state.copy() ret_addr = self.make_continuation(continue_at) saved_local_vars = list(zip(self.local_vars, map(lambda name: getattr(self, name), self.local_vars))) simcallstack_entry = (self.state.regs.sp if hasattr(self.state.regs, "sp") else None, self.arguments, saved_local_vars, self.state.regs.lr if self.state.arch.lr_offset is not None else None) cc.setup_callsite(call_state, ret_addr, args) call_state.callstack.top.procedure_data = simcallstack_entry # TODO: Move this to setup_callsite? if isinstance(call_state.addr, SootAddressDescriptor): pass elif call_state.libc.ppc64_abiv == 'ppc64_1': call_state.regs.r2 = self.state.mem[addr + 8:].long.resolved addr = call_state.mem[addr:].long.resolved elif call_state.arch.name in ('MIPS32', 'MIPS64'): call_state.regs.t9 = addr self._exit_action(call_state, addr) self.successors.add_successor(call_state, addr, call_state.solver.true, 'Ijk_Call') if o.DO_RET_EMULATION in self.state.options: # we need to set up the call because the continuation will try to tear it down ret_state = self.state.copy() cc.setup_callsite(ret_state, ret_addr, args) ret_state.callstack.top.procedure_data = simcallstack_entry guard = ret_state.solver.true if o.TRUE_RET_EMULATION_GUARD in ret_state.options else ret_state.solver.false self.successors.add_successor(ret_state, ret_addr, guard, 'Ijk_FakeRet')
<SYSTEM_TASK:> Add an exit representing jumping to an address. <END_TASK> <USER_TASK:> Description: def jump(self, addr): """ Add an exit representing jumping to an address. """
self.inhibit_autoret = True self._exit_action(self.state, addr) self.successors.add_successor(self.state, addr, self.state.solver.true, 'Ijk_Boring')
<SYSTEM_TASK:> Add an exit representing terminating the program. <END_TASK> <USER_TASK:> Description: def exit(self, exit_code): """ Add an exit representing terminating the program. """
self.inhibit_autoret = True self.state.options.discard(o.AST_DEPS) self.state.options.discard(o.AUTO_REFS) if isinstance(exit_code, int): exit_code = self.state.solver.BVV(exit_code, self.state.arch.bits) self.state.history.add_event('terminate', exit_code=exit_code) self.successors.add_successor(self.state, self.state.regs.ip, self.state.solver.true, 'Ijk_Exit')
<SYSTEM_TASK:> This is a backward lookup in the previous defs. <END_TASK> <USER_TASK:> Description: def _def_lookup(self, live_defs, variable): """ This is a backward lookup in the previous defs. :param addr_list: a list of normalized addresses. Note that, as we are using VSA, it is possible that @a is affected by several definitions. :returns: a dict {stmt:labels} where label is the number of individual addresses of addr_list (or the actual set of addresses depending on the keep_addrs flag) that are definted by stmt. """
prevdefs = { } if variable in live_defs: code_loc_set = live_defs[variable] for code_loc in code_loc_set: # Label edges with cardinality or actual sets of addresses if isinstance(variable, SimMemoryVariable): type_ = 'mem' elif isinstance(variable, SimRegisterVariable): type_ = 'reg' else: raise AngrDDGError('Unknown variable type %s' % type(variable)) if self.keep_data is True: data = variable prevdefs[code_loc] = { 'type': type_, 'data': data } else: if code_loc in prevdefs: count = prevdefs[code_loc]['count'] + 1 else: count = 0 prevdefs[code_loc] = { 'type': type_, 'count': count } return prevdefs
<SYSTEM_TASK:> Get all DDG nodes matching the given basic block address and statement index. <END_TASK> <USER_TASK:> Description: def get_all_nodes(self, simrun_addr, stmt_idx): """ Get all DDG nodes matching the given basic block address and statement index. """
nodes=[] for n in self.graph.nodes(): if n.simrun_addr == simrun_addr and n.stmt_idx == stmt_idx: nodes.add(n) return nodes
<SYSTEM_TASK:> Yields each of the individual lane pairs from the arguments, in <END_TASK> <USER_TASK:> Description: def vector_args(self, args): """ Yields each of the individual lane pairs from the arguments, in order from most significan to least significant """
for i in reversed(range(self._vector_count)): pieces = [] for vec in args: pieces.append(vec[(i+1) * self._vector_size - 1 : i * self._vector_size]) yield pieces
<SYSTEM_TASK:> Halving add, for some ARM NEON instructions. <END_TASK> <USER_TASK:> Description: def _op_generic_HAdd(self, args): """ Halving add, for some ARM NEON instructions. """
components = [] for a, b in self.vector_args(args): if self.is_signed: a = a.sign_extend(self._vector_size) b = b.sign_extend(self._vector_size) else: a = a.zero_extend(self._vector_size) b = b.zero_extend(self._vector_size) components.append((a + b)[self._vector_size:1]) return claripy.Concat(*components)
<SYSTEM_TASK:> Return unsigned saturated BV from signed BV. <END_TASK> <USER_TASK:> Description: def _op_generic_StoU_saturation(self, value, min_value, max_value): #pylint:disable=no-self-use """ Return unsigned saturated BV from signed BV. Min and max value should be unsigned. """
return claripy.If( claripy.SGT(value, max_value), max_value, claripy.If(claripy.SLT(value, min_value), min_value, value))
<SYSTEM_TASK:> Sets an instance field. <END_TASK> <USER_TASK:> Description: def set_field(self, state, field_name, field_type, value): """ Sets an instance field. """
field_ref = SimSootValue_InstanceFieldRef.get_ref(state=state, obj_alloc_id=self.heap_alloc_id, field_class_name=self.type, field_name=field_name, field_type=field_type) # store value in java memory state.memory.store(field_ref, value)
<SYSTEM_TASK:> Gets the value of an instance field. <END_TASK> <USER_TASK:> Description: def get_field(self, state, field_name, field_type): """ Gets the value of an instance field. """
# get field reference field_ref = SimSootValue_InstanceFieldRef.get_ref(state=state, obj_alloc_id=self.heap_alloc_id, field_class_name=self.type, field_name=field_name, field_type=field_type) # load value from java memory return state.memory.load(field_ref, none_if_missing=True)
<SYSTEM_TASK:> Store a field of a given object, without resolving hierachy <END_TASK> <USER_TASK:> Description: def store_field(self, state, field_name, field_type, value): """ Store a field of a given object, without resolving hierachy :param state: angr state where we want to allocate the object attribute :type SimState :param field_name: name of the attribute :type str :param field_value: attibute's value :type SimSootValue """
field_ref = SimSootValue_InstanceFieldRef(self.heap_alloc_id, self.type, field_name, field_type) state.memory.store(field_ref, value)
<SYSTEM_TASK:> Load a field of a given object, without resolving hierachy <END_TASK> <USER_TASK:> Description: def load_field(self, state, field_name, field_type): """ Load a field of a given object, without resolving hierachy :param state: angr state where we want to load the object attribute :type SimState :param field_name: name of the attribute :type str :param field_type: type of the attribute :type str """
field_ref = SimSootValue_InstanceFieldRef(self.heap_alloc_id, self.type, field_name, field_type) return state.memory.load(field_ref, none_if_missing=False)
<SYSTEM_TASK:> This function prepares a state that is executing a call instruction. <END_TASK> <USER_TASK:> Description: def prepare_call_state(self, calling_state, initial_state=None, preserve_registers=(), preserve_memory=()): """ This function prepares a state that is executing a call instruction. If given an initial_state, it copies over all of the critical registers to it from the calling_state. Otherwise, it prepares the calling_state for action. This is mostly used to create minimalistic for CFG generation. Some ABIs, such as MIPS PIE and x86 PIE, require certain information to be maintained in certain registers. For example, for PIE MIPS, this function transfer t9, gp, and ra to the new state. """
if isinstance(self.arch, ArchMIPS32): if initial_state is not None: initial_state = self.state_blank() mips_caller_saves = ('s0', 's1', 's2', 's3', 's4', 's5', 's6', 's7', 'gp', 'sp', 'bp', 'ra') preserve_registers = preserve_registers + mips_caller_saves + ('t9',) if initial_state is None: new_state = calling_state.copy() else: new_state = initial_state.copy() for reg in set(preserve_registers): new_state.registers.store(reg, calling_state.registers.load(reg)) for addr, val in set(preserve_memory): new_state.memory.store(addr, calling_state.memory.load(addr, val)) return new_state
<SYSTEM_TASK:> Prepare the address space with the data necessary to perform relocations pointing to the given symbol <END_TASK> <USER_TASK:> Description: def prepare_function_symbol(self, symbol_name, basic_addr=None): """ Prepare the address space with the data necessary to perform relocations pointing to the given symbol Returns a 2-tuple. The first item is the address of the function code, the second is the address of the relocation target. """
if basic_addr is None: basic_addr = self.project.loader.extern_object.get_pseudo_addr(symbol_name) return basic_addr, basic_addr
<SYSTEM_TASK:> Write the GlobalDescriptorTable object in the current state memory <END_TASK> <USER_TASK:> Description: def setup_gdt(self, state, gdt): """ Write the GlobalDescriptorTable object in the current state memory :param state: state in which to write the GDT :param gdt: GlobalDescriptorTable object :return: """
state.memory.store(gdt.addr+8, gdt.table) state.regs.gdt = gdt.gdt state.regs.cs = gdt.cs state.regs.ds = gdt.ds state.regs.es = gdt.es state.regs.ss = gdt.ss state.regs.fs = gdt.fs state.regs.gs = gdt.gs
<SYSTEM_TASK:> Generate a GlobalDescriptorTable object and populate it using the value of the gs and fs register <END_TASK> <USER_TASK:> Description: def generate_gdt(self, fs, gs, fs_size=0xFFFFFFFF, gs_size=0xFFFFFFFF): """ Generate a GlobalDescriptorTable object and populate it using the value of the gs and fs register :param fs: value of the fs segment register :param gs: value of the gs segment register :param fs_size: size of the fs segment register :param gs_size: size of the gs segment register :return: gdt a GlobalDescriptorTable object """
A_PRESENT = 0x80 A_DATA = 0x10 A_DATA_WRITABLE = 0x2 A_PRIV_0 = 0x0 A_DIR_CON_BIT = 0x4 F_PROT_32 = 0x4 S_GDT = 0x0 S_PRIV_0 = 0x0 GDT_ADDR = 0x4000 GDT_LIMIT = 0x1000 normal_entry = self._create_gdt_entry(0, 0xFFFFFFFF, A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_0 | A_DIR_CON_BIT, F_PROT_32) stack_entry = self._create_gdt_entry(0, 0xFFFFFFFF, A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_0, F_PROT_32) fs_entry = self._create_gdt_entry(fs, fs_size, A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_0 | A_DIR_CON_BIT, F_PROT_32) gs_entry = self._create_gdt_entry(gs, gs_size, A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_0 | A_DIR_CON_BIT, F_PROT_32) table = normal_entry + stack_entry + fs_entry + gs_entry gdt = (GDT_ADDR << 16 | GDT_LIMIT) selector = self._create_selector(1, S_GDT | S_PRIV_0) cs = selector ds = selector es = selector selector = self._create_selector(2, S_GDT | S_PRIV_0) ss = selector selector = self._create_selector(3, S_GDT | S_PRIV_0) fs = selector selector = self._create_selector(4, S_GDT | S_PRIV_0) gs = selector global_descriptor_table = GlobalDescriptorTable(GDT_ADDR, GDT_LIMIT, table, gdt, cs, ds, es, ss, fs, gs) return global_descriptor_table
<SYSTEM_TASK:> Register a struct definition globally <END_TASK> <USER_TASK:> Description: def define_struct(defn): """ Register a struct definition globally >>> define_struct('struct abcd {int x; int y;}') """
struct = parse_type(defn) ALL_TYPES[struct.name] = struct return struct
<SYSTEM_TASK:> Run a string through the C preprocessor that ships with pycparser but is weirdly inaccessible? <END_TASK> <USER_TASK:> Description: def do_preprocess(defn): """ Run a string through the C preprocessor that ships with pycparser but is weirdly inaccessible? """
from pycparser.ply import lex, cpp lexer = lex.lex(cpp) p = cpp.Preprocessor(lexer) # p.add_path(dir) will add dir to the include search path p.parse(defn) return ''.join(tok.value for tok in p.parser if tok.type not in p.ignore)
<SYSTEM_TASK:> Parse a series of C definitions, returns a tuple of two type mappings, one for variable <END_TASK> <USER_TASK:> Description: def parse_file(defn, preprocess=True): """ Parse a series of C definitions, returns a tuple of two type mappings, one for variable definitions and one for type definitions. """
if pycparser is None: raise ImportError("Please install pycparser in order to parse C definitions") defn = '\n'.join(x for x in defn.split('\n') if _include_re.match(x) is None) if preprocess: defn = do_preprocess(defn) preamble, ignoreme = make_preamble() node = pycparser.c_parser.CParser().parse(preamble + defn) if not isinstance(node, pycparser.c_ast.FileAST): raise ValueError("Something went horribly wrong using pycparser") out = {} extra_types = {} for piece in node.ext: if isinstance(piece, pycparser.c_ast.FuncDef): out[piece.decl.name] = _decl_to_type(piece.decl.type, extra_types) elif isinstance(piece, pycparser.c_ast.Decl): ty = _decl_to_type(piece.type, extra_types) if piece.name is not None: out[piece.name] = ty elif isinstance(piece, pycparser.c_ast.Typedef): extra_types[piece.name] = _decl_to_type(piece.type, extra_types) for ty in ignoreme: del extra_types[ty] return out, extra_types
<SYSTEM_TASK:> The alignment of the type in bytes. <END_TASK> <USER_TASK:> Description: def alignment(self): """ The alignment of the type in bytes. """
if self._arch is None: return NotImplemented return self.size // self._arch.byte_width
<SYSTEM_TASK:> This is a hack to deal with small values being stored at offsets into large registers unpredictably <END_TASK> <USER_TASK:> Description: def _fix_offset(self, state, size, arch=None): """ This is a hack to deal with small values being stored at offsets into large registers unpredictably """
if state is not None: arch = state.arch if arch is None: raise ValueError('Either "state" or "arch" must be specified.') offset = arch.registers[self.reg_name][0] if size in self.alt_offsets: return offset + self.alt_offsets[size] elif size < self.size and arch.register_endness == 'Iend_BE': return offset + (self.size - size) return offset
<SYSTEM_TASK:> Iterate through all the possible arg positions that can only be used to store integer or pointer values <END_TASK> <USER_TASK:> Description: def int_args(self): """ Iterate through all the possible arg positions that can only be used to store integer or pointer values Does not take into account customizations. Returns an iterator of SimFunctionArguments """
if self.ARG_REGS is None: raise NotImplementedError() for reg in self.ARG_REGS: # pylint: disable=not-an-iterable yield SimRegArg(reg, self.arch.bytes)
<SYSTEM_TASK:> Iterate through all the possible arg positions that can be used to store any kind of argument <END_TASK> <USER_TASK:> Description: def both_args(self): """ Iterate through all the possible arg positions that can be used to store any kind of argument Does not take into account customizations. Returns an iterator of SimFunctionArguments """
turtle = self.STACKARG_SP_BUFF + self.STACKARG_SP_DIFF while True: yield SimStackArg(turtle, self.arch.bytes) turtle += self.arch.bytes
<SYSTEM_TASK:> Iterate through all the possible arg positions that can only be used to store floating point values <END_TASK> <USER_TASK:> Description: def fp_args(self): """ Iterate through all the possible arg positions that can only be used to store floating point values Does not take into account customizations. Returns an iterator of SimFunctionArguments """
if self.FP_ARG_REGS is None: raise NotImplementedError() for reg in self.FP_ARG_REGS: # pylint: disable=not-an-iterable yield SimRegArg(reg, self.arch.registers[reg][1])
<SYSTEM_TASK:> This should take a SimFunctionArgument instance and return whether or not that argument is a floating-point <END_TASK> <USER_TASK:> Description: def is_fp_arg(self, arg): """ This should take a SimFunctionArgument instance and return whether or not that argument is a floating-point argument. Returns True for MUST be a floating point arg, False for MUST NOT be a floating point arg, None for when it can be either. """
if arg in self.int_args: return False if arg in self.fp_args or arg == self.FP_RETURN_VAL: return True return None
<SYSTEM_TASK:> Returns a bitvector expression representing the nth argument of a function. <END_TASK> <USER_TASK:> Description: def arg(self, state, index, stack_base=None): """ Returns a bitvector expression representing the nth argument of a function. `stack_base` is an optional pointer to the top of the stack at the function start. If it is not specified, use the current stack pointer. WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless you've customized this CC. """
session = self.arg_session if self.args is None: arg_loc = [session.next_arg(False) for _ in range(index + 1)][-1] else: arg_loc = self.args[index] return arg_loc.get_value(state, stack_base=stack_base)
<SYSTEM_TASK:> `is_fp` should be a list of booleans specifying whether each corresponding argument is floating-point - <END_TASK> <USER_TASK:> Description: def get_args(self, state, is_fp=None, sizes=None, stack_base=None): """ `is_fp` should be a list of booleans specifying whether each corresponding argument is floating-point - True for fp and False for int. For a shorthand to assume that all the parameters are int, pass the number of parameters as an int. If you've customized this CC, you may omit this parameter entirely. If it is provided, it is used for sanity-checking. `sizes` is an optional list of argument sizes, in bytes. Be careful about using this if you've made explicit the arg locations, since it might decide to combine two locations into one if an arg is too big. `stack_base` is an optional pointer to the top of the stack at the function start. If it is not specified, use the current stack pointer. Returns a list of bitvector expressions representing the arguments of a function. """
if sizes is None and self.func_ty is not None: sizes = [arg.size for arg in self.func_ty.args] if is_fp is None: if self.args is None: if self.func_ty is None: raise ValueError("You must either customize this CC or pass a value to is_fp!") else: arg_locs = self.arg_locs([False]*len(self.func_ty.args)) else: arg_locs = self.args elif type(is_fp) is int: if self.args is not None and len(self.args) != is_fp: raise ValueError("Bad number of args requested: got %d, expected %d" % (is_fp, len(self.args))) arg_locs = self.arg_locs([False]*is_fp, sizes) else: arg_locs = self.arg_locs(is_fp, sizes) return [loc.get_value(state, stack_base=stack_base) for loc in arg_locs]
<SYSTEM_TASK:> This function performs the actions of the callee as it's getting ready to return. <END_TASK> <USER_TASK:> Description: def teardown_callsite(self, state, return_val=None, arg_types=None, force_callee_cleanup=False): """ This function performs the actions of the callee as it's getting ready to return. It returns the address to return to. :param state: The state to mutate :param return_val: The value to return :param arg_types: The fp-ness of each of the args. Used to calculate sizes to clean up :param force_callee_cleanup: If we should clean up the stack allocation for the arguments even if it's not the callee's job to do so TODO: support the stack_base parameter from setup_callsite...? Does that make sense in this context? Maybe it could make sense by saying that you pass it in as something like the "saved base pointer" value? """
if return_val is not None: self.set_return_val(state, return_val) ret_addr = self.return_addr.get_value(state) if state.arch.sp_offset is not None: if force_callee_cleanup or self.CALLEE_CLEANUP: if arg_types is not None: session = self.arg_session state.regs.sp += self.stack_space([session.next_arg(x) for x in arg_types]) elif self.args is not None: state.regs.sp += self.stack_space(self.args) else: l.warning("Can't perform callee cleanup when I have no idea how many arguments there are! Assuming 0") state.regs.sp += self.STACKARG_SP_DIFF else: state.regs.sp += self.STACKARG_SP_DIFF return ret_addr
<SYSTEM_TASK:> Get the return value out of the given state <END_TASK> <USER_TASK:> Description: def get_return_val(self, state, is_fp=None, size=None, stack_base=None): """ Get the return value out of the given state """
ty = self.func_ty.returnty if self.func_ty is not None else None if self.ret_val is not None: loc = self.ret_val elif is_fp is not None: loc = self.FP_RETURN_VAL if is_fp else self.RETURN_VAL elif ty is not None: loc = self.FP_RETURN_VAL if isinstance(ty, SimTypeFloat) else self.RETURN_VAL else: loc = self.RETURN_VAL if loc is None: raise NotImplementedError("This SimCC doesn't know how to get this value - should be implemented") val = loc.get_value(state, stack_base=stack_base, size=None if ty is None else ty.size//state.arch.byte_width) if self.is_fp_arg(loc) or self.is_fp_value(val) or isinstance(ty, SimTypeFloat): val = val.raw_to_fp() return val
<SYSTEM_TASK:> Set the return value into the given state <END_TASK> <USER_TASK:> Description: def set_return_val(self, state, val, is_fp=None, size=None, stack_base=None): """ Set the return value into the given state """
ty = self.func_ty.returnty if self.func_ty is not None else None try: betterval = self._standardize_value(val, ty, state, None) except AttributeError: raise ValueError("Can't fit value %s into a return value" % repr(val)) if self.ret_val is not None: loc = self.ret_val elif is_fp is not None: loc = self.FP_RETURN_VAL if is_fp else self.RETURN_VAL elif ty is not None: loc = self.FP_RETURN_VAL if isinstance(ty, SimTypeFloat) else self.RETURN_VAL else: loc = self.FP_RETURN_VAL if self.is_fp_value(val) else self.RETURN_VAL if loc is None: raise NotImplementedError("This SimCC doesn't know how to store this value - should be implemented") loc.set_value(state, betterval, endness='Iend_BE', stack_base=stack_base)
<SYSTEM_TASK:> Pinpoint the best-fit calling convention and return the corresponding SimCC instance, or None if no fit is <END_TASK> <USER_TASK:> Description: def find_cc(arch, args, sp_delta): """ Pinpoint the best-fit calling convention and return the corresponding SimCC instance, or None if no fit is found. :param Arch arch: An ArchX instance. Can be obtained from archinfo. :param list args: A list of arguments. :param int sp_delta: The change of stack pointer before and after the call is made. :return: A calling convention instance, or None if none of the SimCC subclasses seems to fit the arguments provided. :rtype: SimCC or None """
if arch.name not in CC: return None possible_cc_classes = CC[arch.name] for cc_cls in possible_cc_classes: if cc_cls._match(arch, args, sp_delta): return cc_cls(arch, args=args, sp_delta=sp_delta) return None
<SYSTEM_TASK:> Add a successor state of the SimRun. <END_TASK> <USER_TASK:> Description: def add_successor(self, state, target, guard, jumpkind, add_guard=True, exit_stmt_idx=None, exit_ins_addr=None, source=None): """ Add a successor state of the SimRun. This procedure stores method parameters into state.scratch, does some housekeeping, and calls out to helper functions to prepare the state and categorize it into the appropriate successor lists. :param SimState state: The successor state. :param target: The target (of the jump/call/ret). :param guard: The guard expression. :param str jumpkind: The jumpkind (call, ret, jump, or whatnot). :param bool add_guard: Whether to add the guard constraint (default: True). :param int exit_stmt_idx: The ID of the exit statement, an integer by default. 'default' stands for the default exit, and None means it's not from a statement (for example, from a SimProcedure). :param int exit_ins_addr: The instruction pointer of this exit, which is an integer by default. :param int source: The source of the jump (i.e., the address of the basic block). """
# First, trigger the SimInspect breakpoint state._inspect('exit', BP_BEFORE, exit_target=target, exit_guard=guard, exit_jumpkind=jumpkind) state.scratch.target = state._inspect_getattr("exit_target", target) state.scratch.guard = state._inspect_getattr("exit_guard", guard) state.history.jumpkind = state._inspect_getattr("exit_jumpkind", jumpkind) state.history.jump_target = state.scratch.target state.history.jump_guard = state.scratch.guard # track some vex-specific stuff here for now state.scratch.source = source if source is not None else self.addr state.scratch.exit_stmt_idx = exit_stmt_idx state.scratch.exit_ins_addr = exit_ins_addr self._preprocess_successor(state, add_guard=add_guard) if state.history.jumpkind == 'Ijk_SigFPE_IntDiv' and o.PRODUCE_ZERODIV_SUCCESSORS not in state.options: return self._categorize_successor(state) state._inspect('exit', BP_AFTER, exit_target=target, exit_guard=guard, exit_jumpkind=jumpkind) if state.supports_inspect: state.inspect.downsize()
<SYSTEM_TASK:> Preprocesses the successor state. <END_TASK> <USER_TASK:> Description: def _preprocess_successor(self, state, add_guard=True): #pylint:disable=unused-argument """ Preprocesses the successor state. :param state: the successor state """
# Next, simplify what needs to be simplified if o.SIMPLIFY_EXIT_STATE in state.options: state.solver.simplify() if o.SIMPLIFY_EXIT_GUARD in state.options: state.scratch.guard = state.solver.simplify(state.scratch.guard) if o.SIMPLIFY_EXIT_TARGET in state.options: state.scratch.target = state.solver.simplify(state.scratch.target) # unwrap stuff from SimActionObjects state.scratch.target = _raw_ast(state.scratch.target) state.scratch.guard = _raw_ast(state.scratch.guard) # apply the guard constraint and new program counter to the state if add_guard: state.add_constraints(state.scratch.guard) # trigger inspect breakpoints here since this statement technically shows up in the IRSB as the "next" state.regs.ip = state.scratch.target # For architectures with no stack pointer, we can't manage a callstack. This has the side effect of breaking # SimProcedures that call out to binary code self.call. if self.initial_state.arch.sp_offset is not None and not isinstance(state.arch, ArchSoot): self._manage_callstack(state) if len(self.successors) != 0: # This is a fork! state._inspect('fork', BP_AFTER) # clean up the state state.options.discard(o.AST_DEPS) state.options.discard(o.AUTO_REFS)
<SYSTEM_TASK:> Resolve syscall information from the state, get the IP address of the syscall SimProcedure, and set the IP of <END_TASK> <USER_TASK:> Description: def _fix_syscall_ip(state): """ Resolve syscall information from the state, get the IP address of the syscall SimProcedure, and set the IP of the state accordingly. Don't do anything if the resolution fails. :param SimState state: the program state. :return: None """
try: bypass = o.BYPASS_UNSUPPORTED_SYSCALL in state.options stub = state.project.simos.syscall(state, allow_unsupported=bypass) if stub: # can be None if simos is not a subclass of SimUserspace state.ip = stub.addr # fix the IP except AngrUnsupportedSyscallError: pass
<SYSTEM_TASK:> Finalizes the request. <END_TASK> <USER_TASK:> Description: def _finalize(self): """ Finalizes the request. """
if len(self.all_successors) == 0: return # do some cleanup if o.DOWNSIZE_Z3 in self.all_successors[0].options: for s in self.all_successors: s.downsize() # record if the exit is unavoidable if len(self.flat_successors) == 1 and len(self.unconstrained_successors) == 0: self.flat_successors[0].scratch.avoidable = False
<SYSTEM_TASK:> The traditional way of evaluating symbolic jump targets. <END_TASK> <USER_TASK:> Description: def _eval_target_brutal(state, ip, limit): """ The traditional way of evaluating symbolic jump targets. :param state: A SimState instance. :param ip: The AST of the instruction pointer to evaluate. :param limit: The maximum number of concrete IPs. :return: A list of conditions and the corresponding concrete IPs. :rtype: list """
addrs = state.solver.eval_upto(ip, limit) return [ (ip == addr, addr) for addr in addrs ]
<SYSTEM_TASK:> Get a long number for a byte being repeated for many times. This is part of the effort of optimizing <END_TASK> <USER_TASK:> Description: def _repeat_bytes(byt, rep): """ Get a long number for a byte being repeated for many times. This is part of the effort of optimizing performance of angr's memory operations. :param int byt: the byte to repeat :param int rep: times to repeat the byte :return: a long integer representing the repeating bytes ;rtype: int """
if rep == 1: return byt remainder = rep % 2 quotient = rep // 2 r_ = memset._repeat_bytes(byt, quotient) if remainder == 1: r = r_ << ((quotient + 1) * 8) r |= (r_ << 8) + byt else: r = r_ << (quotient * 8) r |= r_ return r
<SYSTEM_TASK:> Stores a memory object. <END_TASK> <USER_TASK:> Description: def store_mo(self, state, new_mo, overwrite=True): #pylint:disable=unused-argument """ Stores a memory object. :param new_mo: the memory object :param overwrite: whether to overwrite objects already in memory (if false, just fill in the holes) """
start, end = self._resolve_range(new_mo) if overwrite: self.store_overwrite(state, new_mo, start, end) else: self.store_underwrite(state, new_mo, start, end)
<SYSTEM_TASK:> Tests if the address is contained in any page of paged memory, without considering memory backers. <END_TASK> <USER_TASK:> Description: def contains_no_backer(self, addr): """ Tests if the address is contained in any page of paged memory, without considering memory backers. :param int addr: The address to test. :return: True if the address is included in one of the pages, False otherwise. :rtype: bool """
for i, p in self._pages.items(): if i * self._page_size <= addr < (i + 1) * self._page_size: return addr - (i * self._page_size) in p.keys() return False
<SYSTEM_TASK:> Writes a memory object to a `page` <END_TASK> <USER_TASK:> Description: def _apply_object_to_page(self, page_base, mo, page=None, overwrite=True): """ Writes a memory object to a `page` :param page_base: The base address of the page. :param mo: The memory object. :param page: (optional) the page to use. :param overwrite: (optional) If False, only write to currently-empty memory. """
page_num = page_base // self._page_size try: page = self._get_page(page_num, write=True, create=not self.allow_segv) if page is None else page except KeyError: if self.allow_segv: raise SimSegfaultError(mo.base, 'write-miss') else: raise if self.allow_segv and not page.concrete_permissions & Page.PROT_WRITE: raise SimSegfaultError(mo.base, 'non-writable') page.store_mo(self.state, mo, overwrite=overwrite) return True
<SYSTEM_TASK:> Replaces the memory object `old` with a new memory object containing `new_content`. <END_TASK> <USER_TASK:> Description: def replace_memory_object(self, old, new_content): """ Replaces the memory object `old` with a new memory object containing `new_content`. :param old: A SimMemoryObject (i.e., one from :func:`memory_objects_for_hash()` or :func:` memory_objects_for_name()`). :param new_content: The content (claripy expression) for the new memory object. :returns: the new memory object """
if old.object.size() != new_content.size(): raise SimMemoryError("memory objects can only be replaced by the same length content") new = SimMemoryObject(new_content, old.base, byte_width=self.byte_width) for p in self._containing_pages_mo(old): self._get_page(p//self._page_size, write=True).replace_mo(self.state, old, new) if isinstance(new.object, claripy.ast.BV): for b in range(old.base, old.base+old.length): self._update_mappings(b, new.object) return new
<SYSTEM_TASK:> Replaces all instances of expression `old` with expression `new`. <END_TASK> <USER_TASK:> Description: def replace_all(self, old, new): """ Replaces all instances of expression `old` with expression `new`. :param old: A claripy expression. Must contain at least one named variable (to make it possible to use the name index for speedup). :param new: The new variable to replace it with. """
if options.REVERSE_MEMORY_NAME_MAP not in self.state.options: raise SimMemoryError("replace_all is not doable without a reverse name mapping. Please add " "sim_options.REVERSE_MEMORY_NAME_MAP to the state options") if not isinstance(old, claripy.ast.BV) or not isinstance(new, claripy.ast.BV): raise SimMemoryError("old and new arguments to replace_all() must be claripy.BV objects") if len(old.variables) == 0: raise SimMemoryError("old argument to replace_all() must have at least one named variable") # Compute an intersection between sets of memory objects for each unique variable name. The eventual memory # object set contains all memory objects that we should update. memory_objects = None for v in old.variables: if memory_objects is None: memory_objects = self.memory_objects_for_name(v) elif len(memory_objects) == 0: # It's a set and it's already empty # there is no way for it to go back... break else: memory_objects &= self.memory_objects_for_name(v) replaced_objects_cache = { } for mo in memory_objects: replaced_object = None if mo.object in replaced_objects_cache: if mo.object is not replaced_objects_cache[mo.object]: replaced_object = replaced_objects_cache[mo.object] else: replaced_object = mo.object.replace(old, new) replaced_objects_cache[mo.object] = replaced_object if mo.object is replaced_object: # The replace does not really occur replaced_object = None if replaced_object is not None: self.replace_memory_object(mo, replaced_object)
<SYSTEM_TASK:> Returns addresses that contain expressions that contain a variable named `n`. <END_TASK> <USER_TASK:> Description: def addrs_for_name(self, n): """ Returns addresses that contain expressions that contain a variable named `n`. """
if n not in self._name_mapping: return self._mark_updated_mapping(self._name_mapping, n) to_discard = set() for e in self._name_mapping[n]: try: if n in self[e].object.variables: yield e else: to_discard.add(e) except KeyError: to_discard.add(e) self._name_mapping[n] -= to_discard
<SYSTEM_TASK:> Returns addresses that contain expressions that contain a variable with the hash of `h`. <END_TASK> <USER_TASK:> Description: def addrs_for_hash(self, h): """ Returns addresses that contain expressions that contain a variable with the hash of `h`. """
if h not in self._hash_mapping: return self._mark_updated_mapping(self._hash_mapping, h) to_discard = set() for e in self._hash_mapping[h]: try: if h == hash(self[e].object): yield e else: to_discard.add(e) except KeyError: to_discard.add(e) self._hash_mapping[h] -= to_discard
<SYSTEM_TASK:> Updates the signal mask. <END_TASK> <USER_TASK:> Description: def sigprocmask(self, how, new_mask, sigsetsize, valid_ptr=True): """ Updates the signal mask. :param how: the "how" argument of sigprocmask (see manpage) :param new_mask: the mask modification to apply :param sigsetsize: the size (in *bytes* of the sigmask set) :param valid_ptr: is set if the new_mask was not NULL """
oldmask = self.sigmask(sigsetsize) self._sigmask = self.state.solver.If(valid_ptr, self.state.solver.If(how == self.SIG_BLOCK, oldmask | new_mask, self.state.solver.If(how == self.SIG_UNBLOCK, oldmask & (~new_mask), self.state.solver.If(how == self.SIG_SETMASK, new_mask, oldmask ) ) ), oldmask )
<SYSTEM_TASK:> Returns the concrete content for a file by path. <END_TASK> <USER_TASK:> Description: def dump_file_by_path(self, path, **kwargs): """ Returns the concrete content for a file by path. :param path: file path as string :param kwargs: passed to state.solver.eval :return: file contents as string """
file = self.state.fs.get(path) if file is None: return None return file.concretize(**kwargs)
<SYSTEM_TASK:> Returns the concrete content for a file descriptor. <END_TASK> <USER_TASK:> Description: def dumps(self, fd, **kwargs): """ Returns the concrete content for a file descriptor. BACKWARD COMPATIBILITY: if you ask for file descriptors 0 1 or 2, it will return the data from stdin, stdout, or stderr as a flat string. :param fd: A file descriptor. :return: The concrete content. :rtype: str """
if 0 <= fd <= 2: data = [self.stdin, self.stdout, self.stderr][fd].concretize(**kwargs) if type(data) is list: data = b''.join(data) return data return self.get_fd(fd).concretize(**kwargs)
<SYSTEM_TASK:> This function receives an initial state and imark and processes a list of pyvex.IRStmts <END_TASK> <USER_TASK:> Description: def _handle_statement(self, state, successors, stmt): """ This function receives an initial state and imark and processes a list of pyvex.IRStmts It annotates the request with a final state, last imark, and a list of SimIRStmts """
if type(stmt) == pyvex.IRStmt.IMark: # TODO how much of this could be moved into the imark handler ins_addr = stmt.addr + stmt.delta state.scratch.ins_addr = ins_addr # Raise an exception if we're suddenly in self-modifying code for subaddr in range(stmt.len): if subaddr + stmt.addr in state.scratch.dirty_addrs: raise SimReliftException(state) state._inspect('instruction', BP_AFTER) l.debug("IMark: %#x", stmt.addr) state.scratch.num_insns += 1 state._inspect('instruction', BP_BEFORE, instruction=ins_addr) # process it! try: stmt_handler = self.stmt_handlers[stmt.tag_int] except IndexError: l.error("Unsupported statement type %s", (type(stmt))) if o.BYPASS_UNSUPPORTED_IRSTMT not in state.options: raise UnsupportedIRStmtError("Unsupported statement type %s" % (type(stmt))) state.history.add_event('resilience', resilience_type='irstmt', stmt=type(stmt).__name__, message='unsupported IRStmt') return None else: exit_data = stmt_handler(self, state, stmt) # for the exits, put *not* taking the exit on the list of constraints so # that we can continue on. Otherwise, add the constraints if exit_data is not None: l.debug("%s adding conditional exit", self) target, guard, jumpkind = exit_data # Produce our successor state! # Let SimSuccessors.add_successor handle the nitty gritty details cont_state = None exit_state = None if o.COPY_STATES not in state.options: # very special logic to try to minimize copies # first, check if this branch is impossible if guard.is_false(): cont_state = state elif o.LAZY_SOLVES not in state.options and not state.solver.satisfiable(extra_constraints=(guard,)): cont_state = state # then, check if it's impossible to continue from this branch elif guard.is_true(): exit_state = state elif o.LAZY_SOLVES not in state.options and not state.solver.satisfiable(extra_constraints=(claripy.Not(guard),)): exit_state = state else: exit_state = state.copy() cont_state = state else: exit_state = state.copy() cont_state = state if exit_state is not None: successors.add_successor(exit_state, target, guard, jumpkind, exit_stmt_idx=state.scratch.stmt_idx, exit_ins_addr=state.scratch.ins_addr) if cont_state is None: return False # Do our bookkeeping on the continuing state cont_condition = claripy.Not(guard) cont_state.add_constraints(cont_condition) cont_state.scratch.guard = claripy.And(cont_state.scratch.guard, cont_condition) return True
<SYSTEM_TASK:> Generate a sif file from the call map. <END_TASK> <USER_TASK:> Description: def _genenare_callmap_sif(self, filepath): """ Generate a sif file from the call map. :param filepath: Path of the sif file :return: None """
with open(filepath, "wb") as f: for src, dst in self.callgraph.edges(): f.write("%#x\tDirectEdge\t%#x\n" % (src, dst))
<SYSTEM_TASK:> Return the function who has the least address that is greater than or equal to `addr`. <END_TASK> <USER_TASK:> Description: def ceiling_func(self, addr): """ Return the function who has the least address that is greater than or equal to `addr`. :param int addr: The address to query. :return: A Function instance, or None if there is no other function after `addr`. :rtype: Function or None """
try: next_addr = self._function_map.ceiling_addr(addr) return self._function_map.get(next_addr) except KeyError: return None
<SYSTEM_TASK:> Return the function who has the greatest address that is less than or equal to `addr`. <END_TASK> <USER_TASK:> Description: def floor_func(self, addr): """ Return the function who has the greatest address that is less than or equal to `addr`. :param int addr: The address to query. :return: A Function instance, or None if there is no other function before `addr`. :rtype: Function or None """
try: prev_addr = self._function_map.floor_addr(addr) return self._function_map[prev_addr] except KeyError: return None
<SYSTEM_TASK:> Get a function object from the function manager. <END_TASK> <USER_TASK:> Description: def function(self, addr=None, name=None, create=False, syscall=False, plt=None): """ Get a function object from the function manager. Pass either `addr` or `name` with the appropriate values. :param int addr: Address of the function. :param str name: Name of the function. :param bool create: Whether to create the function or not if the function does not exist. :param bool syscall: True to create the function as a syscall, False otherwise. :param bool or None plt: True to find the PLT stub, False to find a non-PLT stub, None to disable this restriction. :return: The Function instance, or None if the function is not found and create is False. :rtype: Function or None """
if addr is not None: try: f = self._function_map.get(addr) if plt is None or f.is_plt == plt: return f except KeyError: if create: # the function is not found f = self._function_map[addr] if name is not None: f.name = name if syscall: f.is_syscall=True return f elif name is not None: for func in self._function_map.values(): if func.name == name: if plt is None or func.is_plt == plt: return func return None
<SYSTEM_TASK:> Return a list of nodes that are control dependent on the given node in the control dependence graph <END_TASK> <USER_TASK:> Description: def get_dependants(self, run): """ Return a list of nodes that are control dependent on the given node in the control dependence graph """
if run in self._graph.nodes(): return list(self._graph.successors(run)) else: return []
<SYSTEM_TASK:> Return a list of nodes on whom the specific node is control dependent in the control dependence graph <END_TASK> <USER_TASK:> Description: def get_guardians(self, run): """ Return a list of nodes on whom the specific node is control dependent in the control dependence graph """
if run in self._graph.nodes(): return list(self._graph.predecessors(run)) else: return []
<SYSTEM_TASK:> Construct a control dependence graph. <END_TASK> <USER_TASK:> Description: def _construct(self): """ Construct a control dependence graph. This implementation is based on figure 6 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron, etc. """
self._acyclic_cfg = self._cfg.copy() # TODO: Cycle-removing is not needed - confirm it later # The CFG we use should be acyclic! #self._acyclic_cfg.remove_cycles() # Pre-process the acyclic CFG self._pre_process_cfg() # Construct post-dominator tree self._pd_construct() self._graph = networkx.DiGraph() # Construct the reversed dominance frontier mapping rdf = compute_dominance_frontier(self._normalized_cfg, self._post_dom) for y in self._cfg.graph.nodes(): if y not in rdf: continue for x in rdf[y]: self._graph.add_edge(x, y)
<SYSTEM_TASK:> There are cases where a loop has two overlapping loop headers thanks <END_TASK> <USER_TASK:> Description: def _post_process(self): """ There are cases where a loop has two overlapping loop headers thanks to the way VEX is dealing with continuous instructions. As we were breaking the connection between the second loop header and its successor, we shall restore them in our CDG. """
# TODO: Verify its correctness loop_back_edges = self._cfg.get_loop_back_edges() for b1, b2 in loop_back_edges: self._graph.add_edge(b1, b2)
<SYSTEM_TASK:> Create a phi variable for variables at block `block_addr`. <END_TASK> <USER_TASK:> Description: def make_phi_node(self, block_addr, *variables): """ Create a phi variable for variables at block `block_addr`. :param int block_addr: The address of the current block. :param variables: Variables that the phi variable represents. :return: The created phi variable. """
existing_phis = set() non_phis = set() for var in variables: if self.is_phi_variable(var): existing_phis.add(var) else: non_phis.add(var) if len(existing_phis) == 1: existing_phi = next(iter(existing_phis)) if non_phis.issubset(self.get_phi_subvariables(existing_phi)): return existing_phi else: # Update phi variables self._phi_variables[existing_phi] |= non_phis return existing_phi repre = next(iter(variables)) repre_type = type(repre) if repre_type is SimRegisterVariable: ident_sort = 'register' a = SimRegisterVariable(repre.reg, repre.size, ident=self.next_variable_ident(ident_sort)) elif repre_type is SimMemoryVariable: ident_sort = 'memory' a = SimMemoryVariable(repre.addr, repre.size, ident=self.next_variable_ident(ident_sort)) elif repre_type is SimStackVariable: ident_sort = 'stack' a = SimStackVariable(repre.offset, repre.size, ident=self.next_variable_ident(ident_sort)) else: raise TypeError('make_phi_node(): Unsupported variable type "%s".' % type(repre)) # Keep a record of all phi variables self._phi_variables[a] = set(variables) self._phi_variables_by_block[block_addr].add(a) return a
<SYSTEM_TASK:> Get a list of variables. <END_TASK> <USER_TASK:> Description: def get_variables(self, sort=None, collapse_same_ident=False): """ Get a list of variables. :param str or None sort: Sort of the variable to get. :param collapse_same_ident: Whether variables of the same identifier should be collapsed or not. :return: A list of variables. :rtype: list """
variables = [ ] if collapse_same_ident: raise NotImplementedError() for var in self._variables: if sort == 'stack' and not isinstance(var, SimStackVariable): continue if sort == 'reg' and not isinstance(var, SimRegisterVariable): continue variables.append(var) return variables
<SYSTEM_TASK:> Get sub-variables that phi variable `var` represents. <END_TASK> <USER_TASK:> Description: def get_phi_subvariables(self, var): """ Get sub-variables that phi variable `var` represents. :param SimVariable var: The variable instance. :return: A set of sub-variables, or an empty set if `var` is not a phi variable. :rtype: set """
if not self.is_phi_variable(var): return set() return self._phi_variables[var]