text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Filter successors for THUMB mode basic blocks, and remove those successors that won't be taken normally. <END_TASK> <USER_TASK:> Description: def _arm_thumb_filter_jump_successors(self, addr, size, successors, get_ins_addr, get_exit_stmt_idx): """ Filter successors for THUMB mode basic blocks, and remove those successors that won't be taken normally. :param int addr: Address of the basic block / SimIRSB. :param int size: Size of the basic block. :param list successors: A list of successors. :param func get_ins_addr: A callable that returns the source instruction address for a successor. :param func get_exit_stmt_idx: A callable that returns the source statement ID for a successor. :return: A new list of successors after filtering. :rtype: list """
if not successors: return [ ] it_counter = 0 conc_temps = {} can_produce_exits = set() bb = self._lift(addr, size=size, thumb=True, opt_level=0) for stmt in bb.vex.statements: if stmt.tag == 'Ist_IMark': if it_counter > 0: it_counter -= 1 can_produce_exits.add(stmt.addr + stmt.delta) elif stmt.tag == 'Ist_WrTmp': val = stmt.data if val.tag == 'Iex_Const': conc_temps[stmt.tmp] = val.con.value elif stmt.tag == 'Ist_Put': if stmt.offset == self.project.arch.registers['itstate'][0]: val = stmt.data if val.tag == 'Iex_RdTmp': if val.tmp in conc_temps: # We found an IT instruction!! # Determine how many instructions are conditional it_counter = 0 itstate = conc_temps[val.tmp] while itstate != 0: it_counter += 1 itstate >>= 8 if it_counter != 0: l.debug('Basic block ends before calculated IT block (%#x)', addr) THUMB_BRANCH_INSTRUCTIONS = ('beq', 'bne', 'bcs', 'bhs', 'bcc', 'blo', 'bmi', 'bpl', 'bvs', 'bvc', 'bhi', 'bls', 'bge', 'blt', 'bgt', 'ble', 'cbz', 'cbnz') for cs_insn in bb.capstone.insns: if cs_insn.mnemonic.split('.')[0] in THUMB_BRANCH_INSTRUCTIONS: can_produce_exits.add(cs_insn.address) successors_filtered = [suc for suc in successors if get_ins_addr(suc) in can_produce_exits or get_exit_stmt_idx(suc) == DEFAULT_STATEMENT] return successors_filtered
<SYSTEM_TASK:> Check whether the given memory region is extremely sparse, i.e., all bytes are the same value. <END_TASK> <USER_TASK:> Description: def _is_region_extremely_sparse(self, start, end, base_state=None): """ Check whether the given memory region is extremely sparse, i.e., all bytes are the same value. :param int start: The beginning of the region. :param int end: The end of the region. :param base_state: The base state (optional). :return: True if the region is extremely sparse, False otherwise. :rtype: bool """
all_bytes = None if base_state is not None: all_bytes = base_state.memory.load(start, end - start + 1) try: all_bytes = base_state.solver.eval(all_bytes, cast_to=bytes) except SimError: all_bytes = None size = end - start + 1 if all_bytes is None: # load from the binary all_bytes = self._fast_memory_load_bytes(start, size) if all_bytes is None: return True if len(all_bytes) < size: l.warning("_is_region_extremely_sparse: The given region %#x-%#x is not a continuous memory region in the " "memory space. Only the first %d bytes (%#x-%#x) are processed.", start, end, len(all_bytes), start, start + len(all_bytes) - 1) the_byte_value = None for b in all_bytes: if the_byte_value is None: the_byte_value = b else: if the_byte_value != b: return False return True
<SYSTEM_TASK:> Some regions usually do not contain any executable code, but are still marked as executable. We should skip <END_TASK> <USER_TASK:> Description: def _should_skip_region(self, region_start): """ Some regions usually do not contain any executable code, but are still marked as executable. We should skip those regions by default. :param int region_start: Address of the beginning of the region. :return: True/False :rtype: bool """
obj = self.project.loader.find_object_containing(region_start, membership_check=False) if obj is None: return False if isinstance(obj, PE): section = obj.find_section_containing(region_start) if section is None: return False if section.name in {'.textbss'}: return True return False
<SYSTEM_TASK:> Get all executable memory regions from the binaries <END_TASK> <USER_TASK:> Description: def _executable_memory_regions(self, objects=None, force_segment=False): """ Get all executable memory regions from the binaries :param objects: A collection of binary objects to collect regions from. If None, regions from all project binary objects are used. :param bool force_segment: Rely on binary segments instead of sections. :return: A sorted list of tuples (beginning_address, end_address) """
if objects is None: binaries = self.project.loader.all_objects else: binaries = objects memory_regions = [ ] for b in binaries: if isinstance(b, ELF): # If we have sections, we get result from sections if not force_segment and b.sections: # Get all executable sections for section in b.sections: if section.is_executable: tpl = (section.min_addr, section.max_addr) memory_regions.append(tpl) else: # Get all executable segments for segment in b.segments: if segment.is_executable: tpl = (segment.min_addr, segment.max_addr) memory_regions.append(tpl) elif isinstance(b, PE): for section in b.sections: if section.is_executable: tpl = (section.min_addr, section.max_addr) memory_regions.append(tpl) elif isinstance(b, MachO): if b.segments: # Get all executable segments for seg in b.segments: if seg.is_executable: # Take all sections from this segment (MachO style) for section in seg.sections: tpl = (section.min_addr, section.max_addr) memory_regions.append(tpl) elif isinstance(b, Blob): # a blob is entirely executable tpl = (b.min_addr, b.max_addr) memory_regions.append(tpl) elif isinstance(b, self._cle_pseudo_objects): pass else: l.warning('Unsupported object format "%s". Treat it as an executable.', b.__class__.__name__) tpl = (b.min_addr, b.max_addr) memory_regions.append(tpl) if not memory_regions: memory_regions = [(start, start + len(backer)) for start, backer in self.project.loader.memory.backers()] memory_regions = sorted(memory_regions, key=lambda x: x[0]) return memory_regions
<SYSTEM_TASK:> Test if the address belongs to an executable memory region. <END_TASK> <USER_TASK:> Description: def _addr_in_exec_memory_regions(self, addr): """ Test if the address belongs to an executable memory region. :param int addr: The address to test :return: True if the address belongs to an exectubale memory region, False otherwise :rtype: bool """
for start, end in self._exec_mem_regions: if start <= addr < end: return True return False
<SYSTEM_TASK:> Test if two addresses belong to the same section. <END_TASK> <USER_TASK:> Description: def _addrs_belong_to_same_section(self, addr_a, addr_b): """ Test if two addresses belong to the same section. :param int addr_a: The first address to test. :param int addr_b: The second address to test. :return: True if the two addresses belong to the same section or both of them do not belong to any section, False otherwise. :rtype: bool """
obj = self.project.loader.find_object_containing(addr_a, membership_check=False) if obj is None: # test if addr_b also does not belong to any object obj_b = self.project.loader.find_object_containing(addr_b, membership_check=False) if obj_b is None: return True return False src_section = obj.find_section_containing(addr_a) if src_section is None: # test if addr_b also does not belong to any section dst_section = obj.find_section_containing(addr_b) if dst_section is None: return True return False return src_section.contains_addr(addr_b)
<SYSTEM_TASK:> Check whether the address belongs to a hook or a syscall. <END_TASK> <USER_TASK:> Description: def _addr_hooked_or_syscall(self, addr): """ Check whether the address belongs to a hook or a syscall. :param int addr: The address to check. :return: True if the address is hooked or belongs to a syscall. False otherwise. :rtype: bool """
return self.project.is_hooked(addr) or self.project.simos.is_syscall_addr(addr)
<SYSTEM_TASK:> Perform a fast memory loading of some data. <END_TASK> <USER_TASK:> Description: def _fast_memory_load_bytes(self, addr, length): """ Perform a fast memory loading of some data. :param int addr: Address to read from. :param int length: Size of the string to load. :return: A string or None if the address does not exist. :rtype: bytes or None """
try: return self.project.loader.memory.load(addr, length) except KeyError: return None
<SYSTEM_TASK:> Perform a fast memory loading of a pointer. <END_TASK> <USER_TASK:> Description: def _fast_memory_load_pointer(self, addr, size=None): """ Perform a fast memory loading of a pointer. :param int addr: Address to read from. :param int size: Size of the pointer. Default to machine-word size. :return: A pointer or None if the address does not exist. :rtype: int """
try: return self.project.loader.memory.unpack_word(addr, size=size) except KeyError: return None
<SYSTEM_TASK:> Determine if a function returns or not. <END_TASK> <USER_TASK:> Description: def _determine_function_returning(self, func, all_funcs_completed=False): """ Determine if a function returns or not. A function does not return if a) it is a SimProcedure that has NO_RET being True, or b) it is completely recovered (i.e. every block of this function has been recovered, and no future block will be added to it), and it does not have a ret or any equivalent instruction. A function returns if any of its block contains a ret instruction or any equivalence. :param Function func: The function to work on. :param bool all_funcs_completed: Whether we treat all functions as completed functions or not. :return: True if the function returns, False if the function does not return, or None if it is not yet determinable with the information available at the moment. :rtype: bool or None """
# If there is at least one return site, then this function is definitely returning if func.has_return: return True # Let's first see if it's a known SimProcedure that does not return if self.project.is_hooked(func.addr): procedure = self.project.hooked_by(func.addr) else: try: procedure = self.project.simos.syscall_from_addr(func.addr, allow_unsupported=False) except AngrUnsupportedSyscallError: procedure = None if procedure is not None and hasattr(procedure, 'NO_RET'): return not procedure.NO_RET # did we finish analyzing this function? if not all_funcs_completed and func.addr not in self._completed_functions: return None if not func.block_addrs_set: # there is no block inside this function # it might happen if the function has been incorrectly identified as part of another function # the error will be corrected during post-processing. In fact at this moment we cannot say anything # about whether this function returns or not. We always assume it returns. return True bail_out = False # if this function has jump-out sites or ret-out sites, it returns as long as any of the target function # returns for goout_site, type_ in [(site, 'jumpout') for site in func.jumpout_sites] + \ [(site, 'retout') for site in func.retout_sites]: # determine where it jumps/returns to goout_site_successors = goout_site.successors() if not goout_site_successors: # not sure where it jumps to. bail out bail_out = True continue # for ret-out sites, determine what function it calls if type_ == 'retout': # see whether the function being called returns or not func_successors = [succ for succ in goout_site_successors if isinstance(succ, Function)] if func_successors and all(func_successor.returning in (None, False) for func_successor in func_successors): # the returning of all possible function calls are undetermined, or they do not return # ignore this site continue if type_ == 'retout': goout_target = next((succ for succ in goout_site_successors if not isinstance(succ, Function)), None) else: goout_target = next((succ for succ in goout_site_successors), None) if goout_target is None: # there is no jumpout site, which is weird, but what can we do... continue if not self.kb.functions.contains_addr(goout_target.addr): # wait it does not jump to a function? bail_out = True continue target_func = self.kb.functions[goout_target.addr] if target_func.returning is True: return True elif target_func.returning is None: # the returning status of at least one of the target functions is not decided yet. bail_out = True if bail_out: # We cannot determine at this point. bail out return None # well this function does not return then return False
<SYSTEM_TASK:> For each function in the function_manager, try to determine if it returns or not. A function does not return if <END_TASK> <USER_TASK:> Description: def _analyze_function_features(self, all_funcs_completed=False): """ For each function in the function_manager, try to determine if it returns or not. A function does not return if it calls another function that is known to be not returning, and this function does not have other exits. We might as well analyze other features of functions in the future. :param bool all_funcs_completed: Ignore _completed_functions set and treat all functions as completed. This can be set to True after the entire CFG is built and _post_analysis() is called (at which point analysis on all functions must be completed). """
changes = { 'functions_return': [], 'functions_do_not_return': [] } if self._updated_nonreturning_functions is not None: all_func_addrs = self._updated_nonreturning_functions # Convert addresses to objects all_functions = [ self.kb.functions.get_by_addr(f) for f in all_func_addrs if self.kb.functions.contains_addr(f) ] else: all_functions = list(self.kb.functions.values()) analyzed_functions = set() # short-hand functions = self.kb.functions # type: angr.knowledge.FunctionManager while all_functions: func = all_functions.pop(-1) # type: angr.knowledge.Function analyzed_functions.add(func.addr) if func.returning is not None: # It has been determined before. Skip it continue returning = self._determine_function_returning(func, all_funcs_completed=all_funcs_completed) if returning: func.returning = True changes['functions_return'].append(func) elif returning is False: func.returning = False changes['functions_do_not_return'].append(func) if returning is not None: # Add all callers of this function to all_functions list if func.addr in functions.callgraph: callers = functions.callgraph.predecessors(func.addr) for caller in callers: if caller in analyzed_functions: continue if functions.contains_addr(caller): all_functions.append(functions.get_by_addr(caller)) return changes
<SYSTEM_TASK:> Iteratively analyze function features until a fixed point is reached. <END_TASK> <USER_TASK:> Description: def _iteratively_analyze_function_features(self, all_funcs_completed=False): """ Iteratively analyze function features until a fixed point is reached. :return: the "changes" dict :rtype: dict """
changes = { 'functions_do_not_return': set(), 'functions_return': set() } while True: new_changes = self._analyze_function_features(all_funcs_completed=all_funcs_completed) changes['functions_do_not_return'] |= set(new_changes['functions_do_not_return']) changes['functions_return'] |= set(new_changes['functions_return']) if not new_changes['functions_do_not_return'] and not new_changes['functions_return']: # a fixed point is reached break return changes
<SYSTEM_TASK:> Normalize the CFG, making sure that there are no overlapping basic blocks. <END_TASK> <USER_TASK:> Description: def normalize(self): """ Normalize the CFG, making sure that there are no overlapping basic blocks. Note that this method will not alter transition graphs of each function in self.kb.functions. You may call normalize() on each Function object to normalize their transition graphs. :return: None """
graph = self.graph smallest_nodes = { } # indexed by end address of the node end_addresses_to_nodes = defaultdict(set) for n in graph.nodes(): if n.is_simprocedure: continue end_addr = n.addr + n.size key = (end_addr, n.callstack_key) # add the new item end_addresses_to_nodes[key].add(n) for key in list(end_addresses_to_nodes.keys()): if len(end_addresses_to_nodes[key]) == 1: smallest_nodes[key] = next(iter(end_addresses_to_nodes[key])) del end_addresses_to_nodes[key] while end_addresses_to_nodes: key_to_find = (None, None) for tpl, x in end_addresses_to_nodes.items(): if len(x) > 1: key_to_find = tpl break end_addr, callstack_key = key_to_find all_nodes = end_addresses_to_nodes[key_to_find] all_nodes = sorted(all_nodes, key=lambda node: node.addr, reverse=True) smallest_node = all_nodes[0] # take the one that has the highest address other_nodes = all_nodes[1:] self._normalize_core(graph, callstack_key, smallest_node, other_nodes, smallest_nodes, end_addresses_to_nodes ) del end_addresses_to_nodes[key_to_find] # make sure the smallest node is stored in end_addresses smallest_nodes[key_to_find] = smallest_node # corner case # sometimes two overlapping blocks may not be ending at the instruction. this might happen when one of the # blocks (the bigger one) hits the instruction count limit or bytes limit before reaching the end address # of the smaller block. in this case we manually pick up those blocks. if not end_addresses_to_nodes: # find if there are still overlapping blocks sorted_smallest_nodes = defaultdict(list) # callstack_key is the key of this dict for k, node in smallest_nodes.items(): _, callstack_key = k sorted_smallest_nodes[callstack_key].append(node) for k in sorted_smallest_nodes.keys(): sorted_smallest_nodes[k] = sorted(sorted_smallest_nodes[k], key=lambda node: node.addr) for callstack_key, lst in sorted_smallest_nodes.items(): lst_len = len(lst) for i, node in enumerate(lst): if i == lst_len - 1: break next_node = lst[i + 1] if node.addr <= next_node.addr < node.addr + node.size: # umm, those nodes are overlapping, but they must have different end addresses nodekey_a = node.addr + node.size, callstack_key nodekey_b = next_node.addr + next_node.size, callstack_key if nodekey_a == nodekey_b: # error handling: this will only happen if we have completely overlapping nodes # caused by different jumps (one of the jumps is probably incorrect), which usually # indicates an error in CFG recovery. we print a warning and skip this node l.warning("Found completely overlapping nodes %s. It usually indicates an error in CFG " "recovery. Skip.", node) continue if nodekey_a in smallest_nodes and nodekey_b in smallest_nodes: # misuse end_addresses_to_nodes end_addresses_to_nodes[(node.addr + node.size, callstack_key)].add(node) end_addresses_to_nodes[(node.addr + node.size, callstack_key)].add(next_node) smallest_nodes.pop(nodekey_a, None) smallest_nodes.pop(nodekey_b, None) self._normalized = True
<SYSTEM_TASK:> From job manager, remove all functions of which we have finished analysis. <END_TASK> <USER_TASK:> Description: def _cleanup_analysis_jobs(self, finished_func_addrs=None): """ From job manager, remove all functions of which we have finished analysis. :param list or None finished_func_addrs: A list of addresses of functions of which we have finished analysis. A new list of function addresses will be obtained by calling _get_finished_functions() if this parameter is None. :return: None """
if finished_func_addrs is None: finished_func_addrs = self._get_finished_functions() for func_addr in finished_func_addrs: if func_addr in self._jobs_to_analyze_per_function: del self._jobs_to_analyze_per_function[func_addr]
<SYSTEM_TASK:> Fill in self._completed_functions list and clean up job manager. <END_TASK> <USER_TASK:> Description: def _make_completed_functions(self): """ Fill in self._completed_functions list and clean up job manager. :return: None """
finished = self._get_finished_functions() for func_addr in finished: self._completed_functions.add(func_addr) self._cleanup_analysis_jobs(finished_func_addrs=finished)
<SYSTEM_TASK:> Convert an address to a Function object, and store the mapping in a dict. If the block is known to be part of a <END_TASK> <USER_TASK:> Description: def _addr_to_function(self, addr, blockaddr_to_function, known_functions): """ Convert an address to a Function object, and store the mapping in a dict. If the block is known to be part of a function, just return that function. :param int addr: Address to convert :param dict blockaddr_to_function: A mapping between block addresses to Function instances. :param angr.knowledge_plugins.FunctionManager known_functions: Recovered functions. :return: a Function object :rtype: angr.knowledge.Function """
if addr in blockaddr_to_function: f = blockaddr_to_function[addr] else: is_syscall = self.project.simos.is_syscall_addr(addr) n = self.model.get_any_node(addr, is_syscall=is_syscall) if n is None: node = addr else: node = self._to_snippet(n) if isinstance(addr, SootAddressDescriptor): addr = addr.method self.kb.functions._add_node(addr, node, syscall=is_syscall) f = self.kb.functions.function(addr=addr) blockaddr_to_function[addr] = f function_is_returning = False if addr in known_functions: if known_functions.function(addr).returning: f.returning = True function_is_returning = True if not function_is_returning: # We will rerun function feature analysis on this function later. Add it to # self._updated_nonreturning_functions so it can be picked up by function feature analysis later. if self._updated_nonreturning_functions is not None: self._updated_nonreturning_functions.add(addr) return f
<SYSTEM_TASK:> Check if the block is a no-op block by checking VEX statements. <END_TASK> <USER_TASK:> Description: def _is_noop_block(arch, block): """ Check if the block is a no-op block by checking VEX statements. :param block: The VEX block instance. :return: True if the entire block is a single-byte or multi-byte nop instruction, False otherwise. :rtype: bool """
if arch.name == "MIPS32": if arch.memory_endness == "Iend_BE": MIPS32_BE_NOOPS = { b"\x00\x20\x08\x25", # move $at, $at } insns = set(block.bytes[i:i+4] for i in range(0, block.size, 4)) if MIPS32_BE_NOOPS.issuperset(insns): return True # Fallback # the block is a noop block if it only has IMark statements if all((type(stmt) is pyvex.IRStmt.IMark) for stmt in block.vex.statements): return True return False
<SYSTEM_TASK:> Check if the instruction does nothing. <END_TASK> <USER_TASK:> Description: def _is_noop_insn(insn): """ Check if the instruction does nothing. :param insn: The capstone insn object. :return: True if the instruction does no-op, False otherwise. """
if insn.insn_name() == 'nop': # nops return True if insn.insn_name() == 'lea': # lea reg, [reg + 0] op0, op1 = insn.operands if op0.type == 1 and op1.type == 3: # reg and mem if op0.reg == op1.mem.base and op1.mem.index == 0 and op1.mem.disp == 0: return True # add more types of no-op instructions here :-) return False
<SYSTEM_TASK:> Calculate the total size of leading nop instructions. <END_TASK> <USER_TASK:> Description: def _get_nop_length(cls, insns): """ Calculate the total size of leading nop instructions. :param insns: A list of capstone insn objects. :return: Number of bytes of leading nop instructions. :rtype: int """
nop_length = 0 if insns and cls._is_noop_insn(insns[0]): # see where those nop instructions terminate for insn in insns: if cls._is_noop_insn(insn): nop_length += insn.size else: break return nop_length
<SYSTEM_TASK:> Lift a basic block of code. Will use the base state as a source of bytes if possible. <END_TASK> <USER_TASK:> Description: def _lift(self, *args, **kwargs): """ Lift a basic block of code. Will use the base state as a source of bytes if possible. """
if 'backup_state' not in kwargs: kwargs['backup_state'] = self._base_state return self.project.factory.block(*args, **kwargs)
<SYSTEM_TASK:> Checks if MIPS32 and calls MIPS32 check, otherwise false <END_TASK> <USER_TASK:> Description: def _resolve_indirect_jump_timelessly(self, addr, block, func_addr, jumpkind): """ Checks if MIPS32 and calls MIPS32 check, otherwise false :param int addr: irsb address :param pyvex.IRSB block: irsb :param int func_addr: Function address :return: If it was resolved and targets alongside it :rtype: tuple """
if block.statements is None: block = self.project.factory.block(block.addr, size=block.size).vex for res in self.timeless_indirect_jump_resolvers: if res.filter(self, addr, func_addr, block, jumpkind): r, resolved_targets = res.resolve(self, addr, func_addr, block, jumpkind) if r: return True, resolved_targets return False, [ ]
<SYSTEM_TASK:> Resolve all unresolved indirect jumps found in previous scanning. <END_TASK> <USER_TASK:> Description: def _process_unresolved_indirect_jumps(self): """ Resolve all unresolved indirect jumps found in previous scanning. Currently we support resolving the following types of indirect jumps: - Ijk_Call: indirect calls where the function address is passed in from a proceeding basic block - Ijk_Boring: jump tables - For an up-to-date list, see analyses/cfg/indirect_jump_resolvers :return: A set of concrete indirect jump targets (ints). :rtype: set """
l.info("%d indirect jumps to resolve.", len(self._indirect_jumps_to_resolve)) all_targets = set() for idx, jump in enumerate(self._indirect_jumps_to_resolve): # type:int,IndirectJump if self._low_priority: self._release_gil(idx, 20, 0.0001) all_targets |= self._process_one_indirect_jump(jump) self._indirect_jumps_to_resolve.clear() return all_targets
<SYSTEM_TASK:> Resolve a given indirect jump. <END_TASK> <USER_TASK:> Description: def _process_one_indirect_jump(self, jump): """ Resolve a given indirect jump. :param IndirectJump jump: The IndirectJump instance. :return: A set of resolved indirect jump targets (ints). """
resolved = False resolved_by = None targets = None block = self._lift(jump.addr, opt_level=1) for resolver in self.indirect_jump_resolvers: resolver.base_state = self._base_state if not resolver.filter(self, jump.addr, jump.func_addr, block, jump.jumpkind): continue resolved, targets = resolver.resolve(self, jump.addr, jump.func_addr, block, jump.jumpkind) if resolved: resolved_by = resolver break if resolved: self._indirect_jump_resolved(jump, jump.addr, resolved_by, targets) else: self._indirect_jump_unresolved(jump) return set() if targets is None else set(targets)
<SYSTEM_TASK:> Parse a memory load VEX statement and get the jump target addresses. <END_TASK> <USER_TASK:> Description: def _parse_load_statement(load_stmt, state): """ Parse a memory load VEX statement and get the jump target addresses. :param load_stmt: The VEX statement for loading the jump target addresses. :param state: The SimState instance (in static mode). :return: An abstract value (or a concrete value) representing the jump target addresses. Return None if we fail to parse the statement. """
# The jump table address is stored in a tmp. In this case, we find the jump-target loading tmp. load_addr_tmp = None if isinstance(load_stmt, pyvex.IRStmt.WrTmp): if type(load_stmt.data.addr) is pyvex.IRExpr.RdTmp: load_addr_tmp = load_stmt.data.addr.tmp elif type(load_stmt.data.addr) is pyvex.IRExpr.Const: # It's directly loading from a constant address # e.g., # ldr r0, =main+1 # blx r0 # It's not a jump table, but we resolve it anyway jump_target_addr = load_stmt.data.addr.con.value return state.solver.BVV(jump_target_addr, state.arch.bits) elif isinstance(load_stmt, pyvex.IRStmt.LoadG): if type(load_stmt.addr) is pyvex.IRExpr.RdTmp: load_addr_tmp = load_stmt.addr.tmp elif type(load_stmt.addr) is pyvex.IRExpr.Const: # It's directly loading from a constant address # e.g., # 4352c SUB R1, R11, #0x1000 # 43530 LDRHI R3, =loc_45450 # ... # 43540 MOV PC, R3 # # It's not a jump table, but we resolve it anyway # Note that this block has two branches: One goes to 45450, the other one goes to whatever the original # value of R3 is. Some intensive data-flow analysis is required in this case. jump_target_addr = load_stmt.addr.con.value return state.solver.BVV(jump_target_addr, state.arch.bits) else: raise TypeError("Unsupported address loading statement type %s." % type(load_stmt)) if state.scratch.temps[load_addr_tmp] is None: # the tmp variable is not there... umm... return None jump_addr = state.scratch.temps[load_addr_tmp] if isinstance(load_stmt, pyvex.IRStmt.LoadG): # LoadG comes with a guard. We should apply this guard to the load expression guard_tmp = load_stmt.guard.tmp guard = state.scratch.temps[guard_tmp] != 0 try: jump_addr = state.memory._apply_condition_to_symbolic_addr(jump_addr, guard) except Exception: # pylint: disable=broad-except l.exception("Error computing jump table address!") return None return jump_addr
<SYSTEM_TASK:> Checks which segment that the address `addr` should belong to, and, returns the offset of that segment. <END_TASK> <USER_TASK:> Description: def _search(self, addr): """ Checks which segment that the address `addr` should belong to, and, returns the offset of that segment. Note that the address may not actually belong to the block. :param addr: The address to search :return: The offset of the segment. """
start = 0 end = len(self._list) while start != end: mid = (start + end) // 2 segment = self._list[mid] if addr < segment.start: end = mid elif addr >= segment.end: start = mid + 1 else: # Overlapped :( start = mid break return start
<SYSTEM_TASK:> Returns a string representation of the segments that form this SegmentList <END_TASK> <USER_TASK:> Description: def _dbg_output(self): """ Returns a string representation of the segments that form this SegmentList :return: String representation of contents :rtype: str """
s = "[" lst = [] for segment in self._list: lst.append(repr(segment)) s += ", ".join(lst) s += "]" return s
<SYSTEM_TASK:> Iterates over list checking segments with same sort do not overlap <END_TASK> <USER_TASK:> Description: def _debug_check(self): """ Iterates over list checking segments with same sort do not overlap :raise: Exception: if segments overlap space with same sort """
# old_start = 0 old_end = 0 old_sort = "" for segment in self._list: if segment.start <= old_end and segment.sort == old_sort: raise AngrCFGError("Error in SegmentList: blocks are not merged") # old_start = start old_end = segment.end old_sort = segment.sort
<SYSTEM_TASK:> Returns the next free position with respect to an address, including that address itself <END_TASK> <USER_TASK:> Description: def next_free_pos(self, address): """ Returns the next free position with respect to an address, including that address itself :param address: The address to begin the search with (including itself) :return: The next free position """
idx = self._search(address) if idx < len(self._list) and self._list[idx].start <= address < self._list[idx].end: # Occupied i = idx while i + 1 < len(self._list) and self._list[i].end == self._list[i + 1].start: i += 1 if i == len(self._list): return self._list[-1].end return self._list[i].end return address
<SYSTEM_TASK:> Returns the address of the next occupied block whose sort is not one of the specified ones. <END_TASK> <USER_TASK:> Description: def next_pos_with_sort_not_in(self, address, sorts, max_distance=None): """ Returns the address of the next occupied block whose sort is not one of the specified ones. :param int address: The address to begin the search with (including itself). :param sorts: A collection of sort strings. :param max_distance: The maximum distance between `address` and the next position. Search will stop after we come across an occupied position that is beyond `address` + max_distance. This check will be disabled if `max_distance` is set to None. :return: The next occupied position whose sort is not one of the specified ones, or None if no such position exists. :rtype: int or None """
list_length = len(self._list) idx = self._search(address) if idx < list_length: # Occupied block = self._list[idx] if max_distance is not None and address + max_distance < block.start: return None if block.start <= address < block.end: # the address is inside the current block if block.sort not in sorts: return address # tick the idx forward by 1 idx += 1 i = idx while i < list_length: if max_distance is not None and address + max_distance < self._list[i].start: return None if self._list[i].sort not in sorts: return self._list[i].start i += 1 return None
<SYSTEM_TASK:> Check if an address belongs to any segment <END_TASK> <USER_TASK:> Description: def is_occupied(self, address): """ Check if an address belongs to any segment :param address: The address to check :return: True if this address belongs to a segment, False otherwise """
idx = self._search(address) if len(self._list) <= idx: return False if self._list[idx].start <= address < self._list[idx].end: return True if idx > 0 and address < self._list[idx - 1].end: # TODO: It seems that this branch is never reached. Should it be removed? return True return False
<SYSTEM_TASK:> Check if an address belongs to any segment, and if yes, returns the sort of the segment <END_TASK> <USER_TASK:> Description: def occupied_by_sort(self, address): """ Check if an address belongs to any segment, and if yes, returns the sort of the segment :param int address: The address to check :return: Sort of the segment that occupies this address :rtype: str """
idx = self._search(address) if len(self._list) <= idx: return None if self._list[idx].start <= address < self._list[idx].end: return self._list[idx].sort if idx > 0 and address < self._list[idx - 1].end: # TODO: It seems that this branch is never reached. Should it be removed? return self._list[idx - 1].sort return None
<SYSTEM_TASK:> Make a copy of this SimLibrary, allowing it to be mutated without affecting the global version. <END_TASK> <USER_TASK:> Description: def copy(self): """ Make a copy of this SimLibrary, allowing it to be mutated without affecting the global version. :return: A new SimLibrary object with the same library references but different dict/list references """
o = SimLibrary() o.procedures = dict(self.procedures) o.non_returning = set(self.non_returning) o.prototypes = dict(self.prototypes) o.default_ccs = dict(self.default_ccs) o.names = list(self.names) return o
<SYSTEM_TASK:> Set some common names of this library by which it may be referred during linking <END_TASK> <USER_TASK:> Description: def set_library_names(self, *names): """ Set some common names of this library by which it may be referred during linking :param names: Any number of string library names may be passed as varargs. """
for name in names: self.names.append(name) SIM_LIBRARIES[name] = self
<SYSTEM_TASK:> Set the default calling convention used for this library under a given architecture <END_TASK> <USER_TASK:> Description: def set_default_cc(self, arch_name, cc_cls): """ Set the default calling convention used for this library under a given architecture :param arch_name: The string name of the architecture, i.e. the ``.name`` field from archinfo. :parm cc_cls: The SimCC class (not an instance!) to use """
arch_name = archinfo.arch_from_id(arch_name).name self.default_ccs[arch_name] = cc_cls
<SYSTEM_TASK:> Set the prototype of a function in the form of a C-style function declaration. <END_TASK> <USER_TASK:> Description: def set_c_prototype(self, c_decl): """ Set the prototype of a function in the form of a C-style function declaration. :param str c_decl: The C-style declaration of the function. :return: A tuple of (function name, function prototype) :rtype: tuple """
parsed = parse_file(c_decl) parsed_decl = parsed[0] if not parsed_decl: raise ValueError('Cannot parse the function prototype.') func_name, func_proto = next(iter(parsed_decl.items())) self.set_prototype(func_name, func_proto) return func_name, func_proto
<SYSTEM_TASK:> Add a function implementation fo the library. <END_TASK> <USER_TASK:> Description: def add(self, name, proc_cls, **kwargs): """ Add a function implementation fo the library. :param name: The name of the function as a string :param proc_cls: The implementation of the function as a SimProcedure _class_, not instance :param kwargs: Any additional parameters to the procedure class constructor may be passed as kwargs """
self.procedures[name] = proc_cls(display_name=name, **kwargs)
<SYSTEM_TASK:> Batch-add function implementations to the library. <END_TASK> <USER_TASK:> Description: def add_all_from_dict(self, dictionary, **kwargs): """ Batch-add function implementations to the library. :param dictionary: A mapping from name to procedure class, i.e. the first two arguments to add() :param kwargs: Any additional kwargs will be passed to the constructors of _each_ procedure class """
for name, procedure in dictionary.items(): self.add(name, procedure, **kwargs)
<SYSTEM_TASK:> Add some duplicate names for a given function. The original function's implementation must already be <END_TASK> <USER_TASK:> Description: def add_alias(self, name, *alt_names): """ Add some duplicate names for a given function. The original function's implementation must already be registered. :param name: The name of the function for which an implementation is already present :param alt_names: Any number of alternate names may be passed as varargs """
old_procedure = self.procedures[name] for alt in alt_names: new_procedure = copy.deepcopy(old_procedure) new_procedure.display_name = alt self.procedures[alt] = new_procedure
<SYSTEM_TASK:> Get an implementation of the given function specialized for the given arch, or a stub procedure if none exists. <END_TASK> <USER_TASK:> Description: def get(self, name, arch): """ Get an implementation of the given function specialized for the given arch, or a stub procedure if none exists. :param name: The name of the function as a string :param arch: The architecure to use, as either a string or an archinfo.Arch instance :return: A SimProcedure instance representing the function as found in the library """
if type(arch) is str: arch = archinfo.arch_from_id(arch) if name in self.procedures: proc = copy.deepcopy(self.procedures[name]) self._apply_metadata(proc, arch) return proc else: return self.get_stub(name, arch)
<SYSTEM_TASK:> Get a stub procedure for the given function, regardless of if a real implementation is available. This will <END_TASK> <USER_TASK:> Description: def get_stub(self, name, arch): """ Get a stub procedure for the given function, regardless of if a real implementation is available. This will apply any metadata, such as a default calling convention or a function prototype. By stub, we pretty much always mean a ``ReturnUnconstrained`` SimProcedure with the appropriate display name and metadata set. This will appear in ``state.history.descriptions`` as ``<SimProcedure display_name (stub)>`` :param name: The name of the function as a string :param arch: The architecture to use, as either a string or an archinfo.Arch instance :return: A SimProcedure instance representing a plausable stub as could be found in the library. """
proc = self.fallback_proc(display_name=name, is_stub=True) self._apply_metadata(proc, arch) return proc
<SYSTEM_TASK:> Check if a function has either an implementation or any metadata associated with it <END_TASK> <USER_TASK:> Description: def has_metadata(self, name): """ Check if a function has either an implementation or any metadata associated with it :param name: The name of the function as a string :return: A bool indicating if anything is known about the function """
return self.has_implementation(name) or \ name in self.non_returning or \ name in self.prototypes
<SYSTEM_TASK:> Associate a syscall number with the name of a function present in the underlying SimLibrary <END_TASK> <USER_TASK:> Description: def add_number_mapping(self, abi, number, name): """ Associate a syscall number with the name of a function present in the underlying SimLibrary :param abi: The abi for which this mapping applies :param number: The syscall number :param name: The name of the function """
self.syscall_number_mapping[abi][number] = name self.syscall_name_mapping[abi][name] = number
<SYSTEM_TASK:> Batch-associate syscall numbers with names of functions present in the underlying SimLibrary <END_TASK> <USER_TASK:> Description: def add_number_mapping_from_dict(self, abi, mapping): """ Batch-associate syscall numbers with names of functions present in the underlying SimLibrary :param abi: The abi for which this mapping applies :param mapping: A dict mapping syscall numbers to function names """
self.syscall_number_mapping[abi].update(mapping) self.syscall_name_mapping[abi].update(dict(reversed(i) for i in mapping.items()))
<SYSTEM_TASK:> Returns the Claripy expression of a VEX temp value. <END_TASK> <USER_TASK:> Description: def tmp_expr(self, tmp): """ Returns the Claripy expression of a VEX temp value. :param tmp: the number of the tmp :param simplify: simplify the tmp before returning it :returns: a Claripy expression of the tmp """
self.state._inspect('tmp_read', BP_BEFORE, tmp_read_num=tmp) try: v = self.temps[tmp] if v is None: raise SimValueError('VEX temp variable %d does not exist. This is usually the result of an incorrect ' 'slicing.' % tmp) except IndexError: raise SimValueError("Accessing a temp that is illegal in this tyenv") self.state._inspect('tmp_read', BP_AFTER, tmp_read_expr=v) return v
<SYSTEM_TASK:> Stores a Claripy expression in a VEX temp value. <END_TASK> <USER_TASK:> Description: def store_tmp(self, tmp, content, reg_deps=None, tmp_deps=None, deps=None): """ Stores a Claripy expression in a VEX temp value. If in symbolic mode, this involves adding a constraint for the tmp's symbolic variable. :param tmp: the number of the tmp :param content: a Claripy expression of the content :param reg_deps: the register dependencies of the content :param tmp_deps: the temporary value dependencies of the content """
self.state._inspect('tmp_write', BP_BEFORE, tmp_write_num=tmp, tmp_write_expr=content) tmp = self.state._inspect_getattr('tmp_write_num', tmp) content = self.state._inspect_getattr('tmp_write_expr', content) if o.SYMBOLIC_TEMPS not in self.state.options: # Non-symbolic self.temps[tmp] = content else: # Symbolic self.state.add_constraints(self.temps[tmp] == content) # get the size, and record the write if o.TRACK_TMP_ACTIONS in self.state.options: data_ao = SimActionObject(content, reg_deps=reg_deps, tmp_deps=tmp_deps, deps=deps, state=self.state) r = SimActionData(self.state, SimActionData.TMP, SimActionData.WRITE, tmp=tmp, data=data_ao, size=content.length) self.state.history.add_action(r) self.state._inspect('tmp_write', BP_AFTER)
<SYSTEM_TASK:> Takes a path and returns a simple absolute path as a list of directories from the root <END_TASK> <USER_TASK:> Description: def _normalize_path(self, path): """ Takes a path and returns a simple absolute path as a list of directories from the root """
if type(path) is str: path = path.encode() path = path.split(b'\0')[0] if path[0:1] != self.pathsep: path = self.cwd + self.pathsep + path keys = path.split(self.pathsep) i = 0 while i < len(keys): if keys[i] == b'': keys.pop(i) elif keys[i] == b'.': keys.pop(i) elif keys[i] == b'..': keys.pop(i) if i != 0: keys.pop(i-1) i -= 1 else: i += 1 return keys
<SYSTEM_TASK:> Changes the current directory to the given path <END_TASK> <USER_TASK:> Description: def chdir(self, path): """ Changes the current directory to the given path """
self.cwd = self._join_chunks(self._normalize_path(path))
<SYSTEM_TASK:> Get a file from the filesystem. Returns a SimFile or None. <END_TASK> <USER_TASK:> Description: def get(self, path): """ Get a file from the filesystem. Returns a SimFile or None. """
mountpoint, chunks = self.get_mountpoint(path) if mountpoint is None: return self._files.get(self._join_chunks(chunks)) else: return mountpoint.get(chunks)
<SYSTEM_TASK:> Insert a file into the filesystem. Returns whether the operation was successful. <END_TASK> <USER_TASK:> Description: def insert(self, path, simfile): """ Insert a file into the filesystem. Returns whether the operation was successful. """
if self.state is not None: simfile.set_state(self.state) mountpoint, chunks = self.get_mountpoint(path) if mountpoint is None: self._files[self._join_chunks(chunks)] = simfile return True else: return mountpoint.insert(chunks, simfile)
<SYSTEM_TASK:> Remove a file from the filesystem. Returns whether the operation was successful. <END_TASK> <USER_TASK:> Description: def delete(self, path): """ Remove a file from the filesystem. Returns whether the operation was successful. This will add a ``fs_unlink`` event with the path of the file and also the index into the `unlinks` list. """
mountpoint, chunks = self.get_mountpoint(path) apath = self._join_chunks(chunks) if mountpoint is None: try: simfile = self._files.pop(apath) except KeyError: return False else: self.state.history.add_event('fs_unlink', path=apath, unlink_idx=len(self.unlinks)) self.unlinks.append((apath, simfile)) return True else: return mountpoint.delete(chunks)
<SYSTEM_TASK:> Add a mountpoint to the filesystem. <END_TASK> <USER_TASK:> Description: def mount(self, path, mount): """ Add a mountpoint to the filesystem. """
self._mountpoints[self._join_chunks(self._normalize_path(path))] = mount
<SYSTEM_TASK:> Remove a mountpoint from the filesystem. <END_TASK> <USER_TASK:> Description: def unmount(self, path): """ Remove a mountpoint from the filesystem. """
del self._mountpoints[self._join_chunks(self._normalize_path(path))]
<SYSTEM_TASK:> Look up the mountpoint servicing the given path. <END_TASK> <USER_TASK:> Description: def get_mountpoint(self, path): """ Look up the mountpoint servicing the given path. :return: A tuple of the mount and a list of path elements traversing from the mountpoint to the specified file. """
path_chunks = self._normalize_path(path) for i in range(len(path_chunks) - 1, -1, -1): partial_path = self._join_chunks(path_chunks[:-i]) if partial_path in self._mountpoints: mountpoint = self._mountpoints[partial_path] if mountpoint is None: break return mountpoint, path_chunks[-i:] return None, path_chunks
<SYSTEM_TASK:> Store in native memory. <END_TASK> <USER_TASK:> Description: def _store_in_native_memory(self, data, data_type, addr=None): """ Store in native memory. :param data: Either a single value or a list. Lists get interpreted as an array. :param data_type: Java type of the element(s). :param addr: Native store address. If not set, native memory is allocated. :return: Native addr of the stored data. """
# check if addr is symbolic if addr is not None and self.state.solver.symbolic(addr): raise NotImplementedError('Symbolic addresses are not supported.') # lookup native size of the type type_size = ArchSoot.sizeof[data_type] native_memory_endness = self.state.arch.memory_endness # store single value if isinstance(data, int): if addr is None: addr = self._allocate_native_memory(size=type_size//8) value = self.state.solver.BVV(data, type_size) self.state.memory.store(addr, value, endness=native_memory_endness) # store array elif isinstance(data, list): if addr is None: addr = self._allocate_native_memory(size=type_size*len(data)//8) for idx, value in enumerate(data): memory_addr = addr+idx*type_size//8 self.state.memory.store(memory_addr, value, endness=native_memory_endness) # return native addr return addr
<SYSTEM_TASK:> Load from native memory. <END_TASK> <USER_TASK:> Description: def _load_from_native_memory(self, addr, data_type=None, data_size=None, no_of_elements=1, return_as_list=False): """ Load from native memory. :param addr: Native load address. :param data_type: Java type of elements. If set, all loaded elements are casted to this type. :param data_size: Size of each element. If not set, size is determined based on the given type. :param no_of_elements: Number of elements to load. :param return_as_list: Whether to wrap a single element in a list. :return: The value or a list of loaded element(s). """
# check if addr is symbolic if addr is not None and self.state.solver.symbolic(addr): raise NotImplementedError('Symbolic addresses are not supported.') # if data size is not set, derive it from the type if not data_size: if data_type: data_size = ArchSoot.sizeof[data_type]//8 else: raise ValueError("Cannot determine the data size w/o a type.") native_memory_endness = self.state.arch.memory_endness # load elements values = [] for i in range(no_of_elements): value = self.state.memory.load(addr + i*data_size, size=data_size, endness=native_memory_endness) if data_type: value = self.state.project.simos.cast_primitive(self.state, value=value, to_type=data_type) values.append(value) # return element(s) if no_of_elements == 1 and not return_as_list: return values[0] else: return values
<SYSTEM_TASK:> Load zero terminated UTF-8 string from native memory. <END_TASK> <USER_TASK:> Description: def _load_string_from_native_memory(self, addr_): """ Load zero terminated UTF-8 string from native memory. :param addr_: Native load address. :return: Loaded string. """
# check if addr is symbolic if self.state.solver.symbolic(addr_): l.error("Loading strings from symbolic addresses is not implemented. " "Continue execution with an empty string.") return "" addr = self.state.solver.eval(addr_) # load chars one by one chars = [] for i in itertools.count(): str_byte = self.state.memory.load(addr+i, size=1) if self.state.solver.symbolic(str_byte): l.error("Loading of strings with symbolic chars is not supported. " "Character %d is concretized.", i) str_byte = self.state.solver.eval(str_byte) if str_byte == 0: break chars.append(chr(str_byte)) return "".join(chars)
<SYSTEM_TASK:> Store given string UTF-8 encoded and zero terminated in native memory. <END_TASK> <USER_TASK:> Description: def _store_string_in_native_memory(self, string, addr=None): """ Store given string UTF-8 encoded and zero terminated in native memory. :param str string: String :param addr: Native store address. If not set, native memory is allocated. :return: Native address of the string. """
if addr is None: addr = self._allocate_native_memory(size=len(string)+1) else: # check if addr is symbolic if self.state.solver.symbolic(addr): l.error("Storing strings at symbolic addresses is not implemented. " "Continue execution with concretized address.") addr = self.state.solver.eval(addr) # warn if string is symbolic if self.state.solver.symbolic(string): l.warning('Support for symbolic strings, passed to native code, is limited. ' 'String will get concretized after `ReleaseStringUTFChars` is called.') # store chars one by one str_len = len(string) // 8 for idx in range(str_len): str_byte = StrSubstr(idx, 1, string) self.state.memory.store(addr+idx, str_byte) # store terminating zero self.state.memory.store(len(string), BVV(0, 8)) return addr
<SYSTEM_TASK:> In Java, all array indices are represented by a 32 bit integer and <END_TASK> <USER_TASK:> Description: def _normalize_array_idx(self, idx): """ In Java, all array indices are represented by a 32 bit integer and consequently we are using in the Soot engine a 32bit bitvector for this. This function normalize the given index to follow this "convention". :return: Index as a 32bit bitvector. """
if isinstance(idx, SimActionObject): idx = idx.to_claripy() if self.arch.memory_endness == "Iend_LE": return idx.reversed.get_bytes(index=0, size=4).reversed else: return idx.get_bytes(index=0, size=4)
<SYSTEM_TASK:> Given the target `target`, apply the hooks given as keyword arguments to it. <END_TASK> <USER_TASK:> Description: def install_hooks(target, **hooks): """ Given the target `target`, apply the hooks given as keyword arguments to it. If any targeted method has already been hooked, the hooks will not be overridden but will instead be pushed into a list of pending hooks. The final behavior should be that all hooks call each other in a nested stack. :param target: Any object. Its methods named as keys in `hooks` will be replaced by `HookedMethod` objects. :param hooks: Any keywords will be interpreted as hooks to apply. Each method named will hooked with the coresponding function value. """
for name, hook in hooks.items(): func = getattr(target, name) if not isinstance(func, HookedMethod): func = HookedMethod(func) setattr(target, name, func) func.pending.append(hook)
<SYSTEM_TASK:> Remove the given hooks from the given target. <END_TASK> <USER_TASK:> Description: def remove_hooks(target, **hooks): """ Remove the given hooks from the given target. :param target: The object from which to remove hooks. If all hooks are removed from a given method, the HookedMethod object will be removed and replaced with the original function. :param hooks: Any keywords will be interpreted as hooks to remove. You must provide the exact hook that was applied so that it can it can be identified for removal among any other hooks. """
for name, hook in hooks.items(): hooked = getattr(target, name) if hook in hooked.pending: try: hooked.pending.remove(hook) except ValueError as e: raise ValueError("%s is not hooked by %s" % (target, hook)) from e if not hooked.pending: setattr(target, name, hooked.func)
<SYSTEM_TASK:> Reset the internal node traversal state. Must be called prior to visiting future nodes. <END_TASK> <USER_TASK:> Description: def reset(self): """ Reset the internal node traversal state. Must be called prior to visiting future nodes. :return: None """
self._sorted_nodes.clear() self._node_to_index.clear() self._reached_fixedpoint.clear() for i, n in enumerate(self.sort_nodes()): self._node_to_index[n] = i self._sorted_nodes.add(n)
<SYSTEM_TASK:> Returns all successors to the specific node. <END_TASK> <USER_TASK:> Description: def all_successors(self, node, skip_reached_fixedpoint=False): """ Returns all successors to the specific node. :param node: A node in the graph. :return: A set of nodes that are all successors to the given node. :rtype: set """
successors = set() stack = [ node ] while stack: n = stack.pop() successors.add(n) stack.extend(succ for succ in self.successors(n) if succ not in successors and (not skip_reached_fixedpoint or succ not in self._reached_fixedpoint) ) return successors
<SYSTEM_TASK:> Revisit a node in the future. As a result, the successors to this node will be revisited as well. <END_TASK> <USER_TASK:> Description: def revisit(self, node, include_self=True): """ Revisit a node in the future. As a result, the successors to this node will be revisited as well. :param node: The node to revisit in the future. :return: None """
successors = self.successors(node) #, skip_reached_fixedpoint=True) if include_self: self._sorted_nodes.add(node) for succ in successors: self._sorted_nodes.add(succ) # reorder it self._sorted_nodes = OrderedSet(sorted(self._sorted_nodes, key=lambda n: self._node_to_index[n]))
<SYSTEM_TASK:> Add the input state to all successors of the given node. <END_TASK> <USER_TASK:> Description: def _add_input_state(self, node, input_state): """ Add the input state to all successors of the given node. :param node: The node whose successors' input states will be touched. :param input_state: The state that will be added to successors of the node. :return: None """
successors = self._graph_visitor.successors(node) for succ in successors: if succ in self._state_map: self._state_map[succ] = self._merge_states(succ, *([ self._state_map[succ], input_state ])) else: self._state_map[succ] = input_state
<SYSTEM_TASK:> Get the input abstract state for this node, and remove it from the state map. <END_TASK> <USER_TASK:> Description: def _pop_input_state(self, node): """ Get the input abstract state for this node, and remove it from the state map. :param node: The node in graph. :return: A merged state, or None if there is no input state for this node available. """
if node in self._state_map: return self._state_map.pop(node) return None
<SYSTEM_TASK:> Get abstract states for all predecessors of the node, merge them, and return the merged state. <END_TASK> <USER_TASK:> Description: def _merge_state_from_predecessors(self, node): """ Get abstract states for all predecessors of the node, merge them, and return the merged state. :param node: The node in graph. :return: A merged state, or None if no predecessor is available. """
preds = self._graph_visitor.predecessors(node) states = [ self._state_map[n] for n in preds if n in self._state_map ] if not states: return None return reduce(lambda s0, s1: self._merge_states(node, s0, s1), states[1:], states[0])
<SYSTEM_TASK:> Insert a new job into the job queue. If the job queue is ordered, this job will be inserted at the correct <END_TASK> <USER_TASK:> Description: def _insert_job(self, job): """ Insert a new job into the job queue. If the job queue is ordered, this job will be inserted at the correct position. :param job: The job to insert :return: None """
key = self._job_key(job) if self._allow_merging: if key in self._job_map: job_info = self._job_map[key] # decide if we want to trigger a widening # if not, we'll simply do the merge # TODO: save all previous jobs for the sake of widening job_added = False if self._allow_widening and self._should_widen_jobs(job_info.job, job): try: widened_job = self._widen_jobs(job_info.job, job) # remove the old job since now we have a widened one if job_info in self._job_info_queue: self._job_info_queue.remove(job_info) job_info.add_job(widened_job, widened=True) job_added = True except AngrJobWideningFailureNotice: # widening failed # fall back to merging... pass if not job_added: try: merged_job = self._merge_jobs(job_info.job, job) # remove the old job since now we have a merged one if job_info in self._job_info_queue: self._job_info_queue.remove(job_info) job_info.add_job(merged_job, merged=True) except AngrJobMergingFailureNotice: # merging failed job_info = JobInfo(key, job) # update the job map self._job_map[key] = job_info else: job_info = JobInfo(key, job) self._job_map[key] = job_info else: job_info = JobInfo(key, job) self._job_map[key] = job_info if self._order_jobs: self._binary_insert(self._job_info_queue, job_info, lambda elem: self._job_sorting_key(elem.job)) else: self._job_info_queue.append(job_info)
<SYSTEM_TASK:> Return the job currently at position `pos`, but still keep it in the job queue. An IndexError will be raised <END_TASK> <USER_TASK:> Description: def _peek_job(self, pos): """ Return the job currently at position `pos`, but still keep it in the job queue. An IndexError will be raised if that position does not currently exist in the job list. :param int pos: Position of the job to get. :return: The job """
if pos < len(self._job_info_queue): return self._job_info_queue[pos].job raise IndexError()
<SYSTEM_TASK:> Insert an element into a sorted list, and keep the list sorted. <END_TASK> <USER_TASK:> Description: def _binary_insert(lst, elem, key, lo=0, hi=None): """ Insert an element into a sorted list, and keep the list sorted. The major difference from bisect.bisect_left is that this function supports a key method, so user doesn't have to create the key array for each insertion. :param list lst: The list. Must be pre-ordered. :param object element: An element to insert into the list. :param func key: A method to get the key for each element in the list. :param int lo: Lower bound of the search. :param int hi: Upper bound of the search. :return: None """
if lo < 0: raise ValueError("lo must be a non-negative number") if hi is None: hi = len(lst) while lo < hi: mid = (lo + hi) // 2 if key(lst[mid]) < key(elem): lo = mid + 1 else: hi = mid lst.insert(lo, elem)
<SYSTEM_TASK:> Merge this SimMemory with the other SimMemory <END_TASK> <USER_TASK:> Description: def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument """ Merge this SimMemory with the other SimMemory """
changed_bytes = self._changes_to_merge(others) l.info("Merging %d bytes", len(changed_bytes)) l.info("... %s has changed bytes %s", self.id, changed_bytes) self.read_strategies = self._merge_strategies(self.read_strategies, *[ o.read_strategies for o in others ]) self.write_strategies = self._merge_strategies(self.write_strategies, *[ o.write_strategies for o in others ]) merged_bytes = self._merge(others, changed_bytes, merge_conditions=merge_conditions) return len(merged_bytes) > 0
<SYSTEM_TASK:> Replaces `length` bytes starting at `addr` with a symbolic variable named name. Adds a constraint equaling that <END_TASK> <USER_TASK:> Description: def make_symbolic(self, name, addr, length=None): """ Replaces `length` bytes starting at `addr` with a symbolic variable named name. Adds a constraint equaling that symbolic variable to the value previously at `addr`, and returns the variable. """
l.debug("making %s bytes symbolic", length) if isinstance(addr, str): addr, length = self.state.arch.registers[addr] else: if length is None: raise Exception("Unspecified length!") r = self.load(addr, length) v = self.get_unconstrained_bytes(name, r.size()) self.store(addr, v) self.state.add_constraints(r == v) l.debug("... eq constraints: %s", r == v) return v
<SYSTEM_TASK:> Applies concretization strategies on the address until one of them succeeds. <END_TASK> <USER_TASK:> Description: def _apply_concretization_strategies(self, addr, strategies, action): """ Applies concretization strategies on the address until one of them succeeds. """
# we try all the strategies in order for s in strategies: # first, we trigger the SimInspect breakpoint and give it a chance to intervene e = addr self.state._inspect( 'address_concretization', BP_BEFORE, address_concretization_strategy=s, address_concretization_action=action, address_concretization_memory=self, address_concretization_expr=e, address_concretization_add_constraints=True ) s = self.state._inspect_getattr('address_concretization_strategy', s) e = self.state._inspect_getattr('address_concretization_expr', addr) # if the breakpoint None'd out the strategy, we skip it if s is None: continue # let's try to apply it! try: a = s.concretize(self, e) except SimUnsatError: a = None # trigger the AFTER breakpoint and give it a chance to intervene self.state._inspect( 'address_concretization', BP_AFTER, address_concretization_result=a ) a = self.state._inspect_getattr('address_concretization_result', a) # return the result if not None! if a is not None: return a # well, we tried raise SimMemoryAddressError( "Unable to concretize address for %s with the provided strategies." % action )
<SYSTEM_TASK:> Concretizes an address meant for writing. <END_TASK> <USER_TASK:> Description: def concretize_write_addr(self, addr, strategies=None): """ Concretizes an address meant for writing. :param addr: An expression for the address. :param strategies: A list of concretization strategies (to override the default). :returns: A list of concrete addresses. """
if isinstance(addr, int): return [ addr ] elif not self.state.solver.symbolic(addr): return [ self.state.solver.eval(addr) ] strategies = self.write_strategies if strategies is None else strategies return self._apply_concretization_strategies(addr, strategies, 'store')
<SYSTEM_TASK:> Concretizes an address meant for reading. <END_TASK> <USER_TASK:> Description: def concretize_read_addr(self, addr, strategies=None): """ Concretizes an address meant for reading. :param addr: An expression for the address. :param strategies: A list of concretization strategies (to override the default). :returns: A list of concrete addresses. """
if isinstance(addr, int): return [ addr ] elif not self.state.solver.symbolic(addr): return [ self.state.solver.eval(addr) ] strategies = self.read_strategies if strategies is None else strategies return self._apply_concretization_strategies(addr, strategies, 'load')
<SYSTEM_TASK:> Retrieve the permissions of the page at address `addr`. <END_TASK> <USER_TASK:> Description: def permissions(self, addr, permissions=None): """ Retrieve the permissions of the page at address `addr`. :param addr: address to get the page permissions :param permissions: Integer or BVV to optionally set page permissions to :return: AST representing the permissions on the page """
out = self.mem.permissions(addr, permissions) # if unicorn is in play and we've marked a page writable, it must be uncached if permissions is not None and self.state.solver.is_true(permissions & 2 == 2): if self.state.has_plugin('unicorn'): self.state.unicorn.uncache_page(addr) return out
<SYSTEM_TASK:> Perform execution using any applicable engine. Enumerate the current engines and use the <END_TASK> <USER_TASK:> Description: def successors(self, *args, **kwargs): """ Perform execution using any applicable engine. Enumerate the current engines and use the first one that works. Return a SimSuccessors object classifying the results of the run. :param state: The state to analyze :param addr: optional, an address to execute at instead of the state's ip :param jumpkind: optional, the jumpkind of the previous exit :param inline: This is an inline execution. Do not bother copying the state. Additional keyword arguments will be passed directly into each engine's process method. """
return self.project.engines.successors(*args, **kwargs)
<SYSTEM_TASK:> Returns a state object initialized to the start of a given function, as if it were called with given parameters. <END_TASK> <USER_TASK:> Description: def call_state(self, addr, *args, **kwargs): """ Returns a state object initialized to the start of a given function, as if it were called with given parameters. :param addr: The address the state should start at instead of the entry point. :param args: Any additional positional arguments will be used as arguments to the function call. The following parametrs are optional. :param base_state: Use this SimState as the base for the new state instead of a blank state. :param cc: Optionally provide a SimCC object to use a specific calling convention. :param ret_addr: Use this address as the function's return target. :param stack_base: An optional pointer to use as the top of the stack, circa the function entry point :param alloc_base: An optional pointer to use as the place to put excess argument data :param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses :param toc: The address of the table of contents for ppc64 :param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names prefixed by this string. :param fs: A dictionary of file names with associated preset SimFile objects. :param concrete_fs: bool describing whether the host filesystem should be consulted when opening files. :param chroot: A path to use as a fake root directory, Behaves similarly to a real chroot. Used only when concrete_fs is set to True. :param kwargs: Any additional keyword args will be passed to the SimState constructor. :return: The state at the beginning of the function. :rtype: SimState The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the same type and size, while tuples (representing structs) can be elements of any type and size. If you'd like there to be a pointer to a given value, wrap the value in a `SimCC.PointerWrapper`. Any value that can't fit in a register will be automatically put in a PointerWrapper. If stack_base is not provided, the current stack pointer will be used, and it will be updated. If alloc_base is not provided, the current stack pointer will be used, and it will be updated. You might not like the results if you provide stack_base but not alloc_base. grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequencial allocations happen at increasing addresses. """
return self.project.simos.state_call(addr, *args, **kwargs)
<SYSTEM_TASK:> Constructs a new simulation manager. <END_TASK> <USER_TASK:> Description: def simulation_manager(self, thing=None, **kwargs): """ Constructs a new simulation manager. :param thing: Optional - What to put in the new SimulationManager's active stash (either a SimState or a list of SimStates). :param kwargs: Any additional keyword arguments will be passed to the SimulationManager constructor :returns: The new SimulationManager :rtype: angr.sim_manager.SimulationManager Many different types can be passed to this method: * If nothing is passed in, the SimulationManager is seeded with a state initialized for the program entry point, i.e. :meth:`entry_state()`. * If a :class:`SimState` is passed in, the SimulationManager is seeded with that state. * If a list is passed in, the list must contain only SimStates and the whole list will be used to seed the SimulationManager. """
if thing is None: thing = [ self.entry_state() ] elif isinstance(thing, (list, tuple)): if any(not isinstance(val, SimState) for val in thing): raise AngrError("Bad type to initialize SimulationManager") elif isinstance(thing, SimState): thing = [ thing ] else: raise AngrError("BadType to initialze SimulationManager: %s" % repr(thing)) return SimulationManager(self.project, active_states=thing, **kwargs)
<SYSTEM_TASK:> A Callable is a representation of a function in the binary that can be interacted with like a native python <END_TASK> <USER_TASK:> Description: def callable(self, addr, concrete_only=False, perform_merge=True, base_state=None, toc=None, cc=None): """ A Callable is a representation of a function in the binary that can be interacted with like a native python function. :param addr: The address of the function to use :param concrete_only: Throw an exception if the execution splits into multiple states :param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False) :param base_state: The state from which to do these runs :param toc: The address of the table of contents for ppc64 :param cc: The SimCC to use for a calling convention :returns: A Callable object that can be used as a interface for executing guest code like a python function. :rtype: angr.callable.Callable """
return Callable(self.project, addr=addr, concrete_only=concrete_only, perform_merge=perform_merge, base_state=base_state, toc=toc, cc=cc)
<SYSTEM_TASK:> An iterator of all local blocks in the current function. <END_TASK> <USER_TASK:> Description: def blocks(self): """ An iterator of all local blocks in the current function. :return: angr.lifter.Block instances. """
for block_addr, block in self._local_blocks.items(): try: yield self._get_block(block_addr, size=block.size, byte_string=block.bytestr if isinstance(block, BlockNode) else None) except (SimEngineError, SimMemoryError): pass
<SYSTEM_TASK:> All of the operations that are done by this functions. <END_TASK> <USER_TASK:> Description: def operations(self): """ All of the operations that are done by this functions. """
return [op for block in self.blocks for op in block.vex.operations]
<SYSTEM_TASK:> All of the constants that are used by this functions's code. <END_TASK> <USER_TASK:> Description: def code_constants(self): """ All of the constants that are used by this functions's code. """
# TODO: remove link register values return [const.value for block in self.blocks for const in block.vex.constants]
<SYSTEM_TASK:> All of the constant string references used by this function. <END_TASK> <USER_TASK:> Description: def string_references(self, minimum_length=2, vex_only=False): """ All of the constant string references used by this function. :param minimum_length: The minimum length of strings to find (default is 1) :param vex_only: Only analyze VEX IR, don't interpret the entry state to detect additional constants. :return: A list of tuples of (address, string) where is address is the location of the string in memory. """
strings = [] memory = self._project.loader.memory # get known instruction addresses and call targets # these addresses cannot be string references, but show up frequently in the runtime values known_executable_addresses = set() for block in self.blocks: known_executable_addresses.update(block.instruction_addrs) for function in self._function_manager.values(): known_executable_addresses.update(set(x.addr for x in function.graph.nodes())) # loop over all local runtime values and check if the value points to a printable string for addr in self.local_runtime_values if not vex_only else self.code_constants: if not isinstance(addr, claripy.fp.FPV) and addr in memory: # check that the address isn't an pointing to known executable code # and that it isn't an indirect pointer to known executable code try: possible_pointer = memory.unpack_word(addr) if addr not in known_executable_addresses and possible_pointer not in known_executable_addresses: # build string stn = "" offset = 0 current_char = chr(memory[addr + offset]) while current_char in string.printable: stn += current_char offset += 1 current_char = chr(memory[addr + offset]) # check that the string was a null terminated string with minimum length if current_char == "\x00" and len(stn) >= minimum_length: strings.append((addr, stn)) except KeyError: pass return strings
<SYSTEM_TASK:> Tries to find all runtime values of this function which do not come from inputs. <END_TASK> <USER_TASK:> Description: def local_runtime_values(self): """ Tries to find all runtime values of this function which do not come from inputs. These values are generated by starting from a blank state and reanalyzing the basic blocks once each. Function calls are skipped, and back edges are never taken so these values are often unreliable, This function is good at finding simple constant addresses which the function will use or calculate. :return: a set of constants """
constants = set() if not self._project.loader.main_object.contains_addr(self.addr): return constants # FIXME the old way was better for architectures like mips, but we need the initial irsb # reanalyze function with a new initial state (use persistent registers) # initial_state = self._function_manager._cfg.get_any_irsb(self.addr).initial_state # fresh_state = self._project.factory.blank_state(mode="fastpath") # for reg in initial_state.arch.persistent_regs + ['ip']: # fresh_state.registers.store(reg, initial_state.registers.load(reg)) # reanalyze function with a new initial state fresh_state = self._project.factory.blank_state(mode="fastpath") fresh_state.regs.ip = self.addr graph_addrs = set(x.addr for x in self.graph.nodes() if isinstance(x, BlockNode)) # process the nodes in a breadth-first order keeping track of which nodes have already been analyzed analyzed = set() q = [fresh_state] analyzed.add(fresh_state.solver.eval(fresh_state.ip)) while len(q) > 0: state = q.pop() # make sure its in this function if state.solver.eval(state.ip) not in graph_addrs: continue # don't trace into simprocedures if self._project.is_hooked(state.solver.eval(state.ip)): continue # don't trace outside of the binary if not self._project.loader.main_object.contains_addr(state.solver.eval(state.ip)): continue # don't trace unreachable blocks if state.history.jumpkind in {'Ijk_EmWarn', 'Ijk_NoDecode', 'Ijk_MapFail', 'Ijk_NoRedir', 'Ijk_SigTRAP', 'Ijk_SigSEGV', 'Ijk_ClientReq'}: continue curr_ip = state.solver.eval(state.ip) # get runtime values from logs of successors successors = self._project.factory.successors(state) for succ in successors.flat_successors + successors.unsat_successors: for a in succ.history.recent_actions: for ao in a.all_objects: if not isinstance(ao.ast, claripy.ast.Base): constants.add(ao.ast) elif not ao.ast.symbolic: constants.add(succ.solver.eval(ao.ast)) # add successors to the queue to analyze if not succ.solver.symbolic(succ.ip): succ_ip = succ.solver.eval(succ.ip) if succ_ip in self and succ_ip not in analyzed: analyzed.add(succ_ip) q.insert(0, succ) # force jumps to missing successors # (this is a slightly hacky way to force it to explore all the nodes in the function) node = self.get_node(curr_ip) if node is None: # the node does not exist. maybe it's not a block node. continue missing = set(x.addr for x in list(self.graph.successors(node))) - analyzed for succ_addr in missing: l.info("Forcing jump to missing successor: %#x", succ_addr) if succ_addr not in analyzed: all_successors = successors.unconstrained_successors + \ successors.flat_successors + \ successors.unsat_successors if len(all_successors) > 0: # set the ip of a copied successor to the successor address succ = all_successors[0].copy() succ.ip = succ_addr analyzed.add(succ_addr) q.insert(0, succ) else: l.warning("Could not reach successor: %#x", succ_addr) return constants
<SYSTEM_TASK:> Add a custom jumpout site. <END_TASK> <USER_TASK:> Description: def add_jumpout_site(self, node): """ Add a custom jumpout site. :param node: The address of the basic block that control flow leaves during this transition. :return: None """
self._register_nodes(True, node) self._jumpout_sites.add(node) self._add_endpoint(node, 'transition')
<SYSTEM_TASK:> Add a custom retout site. <END_TASK> <USER_TASK:> Description: def add_retout_site(self, node): """ Add a custom retout site. Retout (returning to outside of the function) sites are very rare. It mostly occurs during CFG recovery when we incorrectly identify the beginning of a function in the first iteration, and then correctly identify that function later in the same iteration (function alignments can lead to this bizarre case). We will mark all edges going out of the header of that function as a outside edge, because all successors now belong to the incorrectly-identified function. This identification error will be fixed in the second iteration of CFG recovery. However, we still want to keep track of jumpouts/retouts during the first iteration so other logic in CFG recovery still work. :param node: The address of the basic block that control flow leaves the current function after a call. :return: None """
self._register_nodes(True, node) self._retout_sites.add(node) self._add_endpoint(node, 'return')
<SYSTEM_TASK:> Determine the most suitable name of the function. <END_TASK> <USER_TASK:> Description: def _get_initial_name(self): """ Determine the most suitable name of the function. :return: The initial function name. :rtype: string """
name = None addr = self.addr # Try to get a name from existing labels if self._function_manager is not None: if addr in self._function_manager._kb.labels: name = self._function_manager._kb.labels[addr] # try to get the name from a hook if name is None and self.project is not None: project = self.project if project.is_hooked(addr): hooker = project.hooked_by(addr) name = hooker.display_name elif project.simos.is_syscall_addr(addr): syscall_inst = project.simos.syscall_from_addr(addr) name = syscall_inst.display_name # generate an IDA-style sub_X name if name is None: name = 'sub_%x' % addr return name
<SYSTEM_TASK:> Determine the name of the binary where this function is. <END_TASK> <USER_TASK:> Description: def _get_initial_binary_name(self): """ Determine the name of the binary where this function is. :return: None """
binary_name = None # if this function is a simprocedure but not a syscall, use its library name as # its binary name # if it is a syscall, fall back to use self.binary.binary which explicitly says cle##kernel if self.project and self.is_simprocedure and not self.is_syscall: hooker = self.project.hooked_by(self.addr) if hooker is not None: binary_name = hooker.library_name if binary_name is None and self.binary is not None: binary_name = os.path.basename(self.binary.binary) return binary_name
<SYSTEM_TASK:> Registers an edge between basic blocks in this function's transition graph. <END_TASK> <USER_TASK:> Description: def _transit_to(self, from_node, to_node, outside=False, ins_addr=None, stmt_idx=None): """ Registers an edge between basic blocks in this function's transition graph. Arguments are CodeNode objects. :param from_node The address of the basic block that control flow leaves during this transition. :param to_node The address of the basic block that control flow enters during this transition. :param bool outside: If this is a transition to another function, e.g. tail call optimization :return: None """
if outside: self._register_nodes(True, from_node) if to_node is not None: self._register_nodes(False, to_node) self._jumpout_sites.add(from_node) else: if to_node is not None: self._register_nodes(True, from_node, to_node) else: self._register_nodes(True, from_node) if to_node is not None: self.transition_graph.add_edge(from_node, to_node, type='transition', outside=outside, ins_addr=ins_addr, stmt_idx=stmt_idx ) if outside: # this node is an endpoint of the current function self._add_endpoint(from_node, 'transition') # clear the cache self._local_transition_graph = None
<SYSTEM_TASK:> Registers an edge between the caller basic block and callee function. <END_TASK> <USER_TASK:> Description: def _call_to(self, from_node, to_func, ret_node, stmt_idx=None, ins_addr=None, return_to_outside=False): """ Registers an edge between the caller basic block and callee function. :param from_addr: The basic block that control flow leaves during the transition. :type from_addr: angr.knowledge.CodeNode :param to_func: The function that we are calling :type to_func: Function :param ret_node The basic block that control flow should return to after the function call. :type to_func: angr.knowledge.CodeNode or None :param stmt_idx: Statement ID of this call. :type stmt_idx: int, str or None :param ins_addr: Instruction address of this call. :type ins_addr: int or None """
self._register_nodes(True, from_node) if to_func.is_syscall: self.transition_graph.add_edge(from_node, to_func, type='syscall', stmt_idx=stmt_idx, ins_addr=ins_addr) else: self.transition_graph.add_edge(from_node, to_func, type='call', stmt_idx=stmt_idx, ins_addr=ins_addr) if ret_node is not None: self._fakeret_to(from_node, ret_node, to_outside=return_to_outside) self._local_transition_graph = None
<SYSTEM_TASK:> Registers a basic block as a site for control flow to return from this function. <END_TASK> <USER_TASK:> Description: def _add_return_site(self, return_site): """ Registers a basic block as a site for control flow to return from this function. :param CodeNode return_site: The block node that ends with a return. """
self._register_nodes(True, return_site) self._ret_sites.add(return_site) # A return site must be an endpoint of the function - you cannot continue execution of the current function # after returning self._add_endpoint(return_site, 'return')
<SYSTEM_TASK:> Registers a basic block as calling a function and returning somewhere. <END_TASK> <USER_TASK:> Description: def _add_call_site(self, call_site_addr, call_target_addr, retn_addr): """ Registers a basic block as calling a function and returning somewhere. :param call_site_addr: The address of a basic block that ends in a call. :param call_target_addr: The address of the target of said call. :param retn_addr: The address that said call will return to. """
self._call_sites[call_site_addr] = (call_target_addr, retn_addr)
<SYSTEM_TASK:> Iterate through all call edges in transition graph. For each call a non-returning function, mark the source <END_TASK> <USER_TASK:> Description: def mark_nonreturning_calls_endpoints(self): """ Iterate through all call edges in transition graph. For each call a non-returning function, mark the source basic block as an endpoint. This method should only be executed once all functions are recovered and analyzed by CFG recovery, so we know whether each function returns or not. :return: None """
for src, dst, data in self.transition_graph.edges(data=True): if 'type' in data and data['type'] == 'call': func_addr = dst.addr if func_addr in self._function_manager: function = self._function_manager[func_addr] if function.returning is False: # the target function does not return the_node = self.get_node(src.addr) self._callout_sites.add(the_node) self._add_endpoint(the_node, 'call')
<SYSTEM_TASK:> Return a local transition graph that only contain nodes in current function. <END_TASK> <USER_TASK:> Description: def graph(self): """ Return a local transition graph that only contain nodes in current function. """
if self._local_transition_graph is not None: return self._local_transition_graph g = networkx.DiGraph() if self.startpoint is not None: g.add_node(self.startpoint) for block in self._local_blocks.values(): g.add_node(block) for src, dst, data in self.transition_graph.edges(data=True): if 'type' in data: if data['type'] == 'transition' and ('outside' not in data or data['outside'] is False): g.add_edge(src, dst, **data) elif data['type'] == 'fake_return' and 'confirmed' in data and \ ('outside' not in data or data['outside'] is False): g.add_edge(src, dst, **data) self._local_transition_graph = g return g
<SYSTEM_TASK:> Generate a sub control flow graph of instruction addresses based on self.graph <END_TASK> <USER_TASK:> Description: def subgraph(self, ins_addrs): """ Generate a sub control flow graph of instruction addresses based on self.graph :param iterable ins_addrs: A collection of instruction addresses that should be included in the subgraph. :return: A subgraph. :rtype: networkx.DiGraph """
# find all basic blocks that include those instructions blocks = [] block_addr_to_insns = {} for b in self._local_blocks.values(): # TODO: should I call get_blocks? block = self._get_block(b.addr, size=b.size, byte_string=b.bytestr) common_insns = set(block.instruction_addrs).intersection(ins_addrs) if common_insns: blocks.append(b) block_addr_to_insns[b.addr] = sorted(common_insns) #subgraph = networkx.subgraph(self.graph, blocks) subgraph = self.graph.subgraph(blocks).copy() g = networkx.DiGraph() for n in subgraph.nodes(): insns = block_addr_to_insns[n.addr] in_edges = subgraph.in_edges(n) # out_edges = subgraph.out_edges(n) if len(in_edges) > 1: # the first instruction address should be included if n.addr not in insns: insns = [n.addr] + insns for src, _ in in_edges: last_instr = block_addr_to_insns[src.addr][-1] g.add_edge(last_instr, insns[0]) for i in range(0, len(insns) - 1): g.add_edge(insns[i], insns[i + 1]) return g
<SYSTEM_TASK:> Get the size of the instruction specified by `insn_addr`. <END_TASK> <USER_TASK:> Description: def instruction_size(self, insn_addr): """ Get the size of the instruction specified by `insn_addr`. :param int insn_addr: Address of the instruction :return: Size of the instruction in bytes, or None if the instruction is not found. :rtype: int """
for b in self.blocks: block = self._get_block(b.addr, size=b.size, byte_string=b.bytestr) if insn_addr in block.instruction_addrs: index = block.instruction_addrs.index(insn_addr) if index == len(block.instruction_addrs) - 1: # the very last instruction size = block.addr + block.size - insn_addr else: size = block.instruction_addrs[index + 1] - insn_addr return size return None
<SYSTEM_TASK:> Draw the graph and save it to a PNG file. <END_TASK> <USER_TASK:> Description: def dbg_draw(self, filename): """ Draw the graph and save it to a PNG file. """
import matplotlib.pyplot as pyplot # pylint: disable=import-error from networkx.drawing.nx_agraph import graphviz_layout # pylint: disable=import-error tmp_graph = networkx.DiGraph() for from_block, to_block in self.transition_graph.edges(): node_a = "%#08x" % from_block.addr node_b = "%#08x" % to_block.addr if node_b in self._ret_sites: node_b += "[Ret]" if node_a in self._call_sites: node_a += "[Call]" tmp_graph.add_edge(node_a, node_b) pos = graphviz_layout(tmp_graph, prog='fdp') # pylint: disable=no-member networkx.draw(tmp_graph, pos, node_size=1200) pyplot.savefig(filename)
<SYSTEM_TASK:> Registers a register offset as being used as an argument to the function. <END_TASK> <USER_TASK:> Description: def _add_argument_register(self, reg_offset): """ Registers a register offset as being used as an argument to the function. :param reg_offset: The offset of the register to register. """
if reg_offset in self._function_manager._arg_registers and \ reg_offset not in self._argument_registers: self._argument_registers.append(reg_offset)
<SYSTEM_TASK:> Find the most likely function declaration from the embedded collection of prototypes, set it to self.prototype, <END_TASK> <USER_TASK:> Description: def find_declaration(self): """ Find the most likely function declaration from the embedded collection of prototypes, set it to self.prototype, and update self.calling_convention with the declaration. :return: None """
# determine the library name if not self.is_plt: binary_name = self.binary_name if binary_name not in SIM_LIBRARIES: return else: binary_name = None # PLT entries must have the same declaration as their jump targets # Try to determine which library this PLT entry will jump to edges = self.transition_graph.edges() node = next(iter(edges))[1] if len(edges) == 1 and (type(node) is HookNode or type(node) is SyscallNode): target = node.addr if target in self._function_manager: target_func = self._function_manager[target] binary_name = target_func.binary_name if binary_name is None: return library = SIM_LIBRARIES.get(binary_name, None) if library is None: return if not library.has_prototype(self.name): return proto = library.prototypes[self.name] self.prototype = proto if self.calling_convention is not None: self.calling_convention.args = None self.calling_convention.func_ty = proto
<SYSTEM_TASK:> Reverse look-up. <END_TASK> <USER_TASK:> Description: def _rfind(lst, item): """ Reverse look-up. :param list lst: The list to look up in. :param item: The item to look for. :return: Offset of the item if found. A ValueError is raised if the item is not in the list. :rtype: int """
try: return dropwhile(lambda x: lst[x] != item, next(reversed(range(len(lst))))) except Exception: raise ValueError("%s not in the list" % item)