code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def filenames(self, sources): filename = [] for src in sources: filename.append(src.split("/")[-1]) return filename
Return filenames from sources links
def not_downgrade(self, prgnam): name = "-".join(prgnam.split("-")[:-1]) sbo_ver = prgnam.split("-")[-1] ins_ver = GetFromInstalled(name).version()[1:] if not ins_ver: ins_ver = "0" if LooseVersion(sbo_ver) < LooseVersion(ins_ver): self.msg.template(78) print("| Package {0} don't downgrade, " "setting by user".format(name)) self.msg.template(78) return True
Don't downgrade packages if sbo version is lower than installed
def sbosrcarsh(self, prgnam, sbo_link, src_link): sources = [] name = "-".join(prgnam.split("-")[:-1]) category = "{0}/{1}/".format(sbo_link.split("/")[-2], name) for link in src_link: source = link.split("/")[-1] sources.append("{0}{1}{2}".format(self.meta.sbosrcarch_link, category, source)) return sources
Alternative repository for sbo sources
def prog_version(): print("Version : {0}\n" "Licence : {1}\n" "Email : {2}\n" "Maintainer: {3}".format(_meta_.__version__, _meta_.__license__, _meta_.__email__, _meta_.__maintainer__))
Print version, license and email
def binary(self, name, flag): if self.meta.rsl_deps in ["on", "ON"] and "--resolve-off" not in flag: sys.setrecursionlimit(10000) dependencies = [] requires = Requires(name, self.repo).get_deps() if requires: for req in requires: status(0) if req and req not in self.black: dependencies.append(req) if dependencies: self.dep_results.append(dependencies) for dep in dependencies: self.binary(dep, flag) return self.dep_results else: return []
Build all dependencies of a package
def pkg_not_found(self, bol, pkg, message, eol): print("{0}No such package {1}: {2}{3}".format(bol, pkg, message, eol))
Print message when package not found
def build_FAILED(self, prgnam): self.template(78) print("| Some error on the package {0} [ {1}FAILED{2} ]".format( prgnam, self.meta.color["RED"], self.meta.color["ENDC"])) self.template(78) print("| See the log file in '{0}/var/log/slpkg/sbo/build_logs{1}' " "directory or read the README file".format( self.meta.color["CYAN"], self.meta.color["ENDC"])) self.template(78) print("")
Print error message if build failed
def reading(self): sys.stdout.write("{0}Reading package lists...{1} ".format( self.meta.color["GREY"], self.meta.color["ENDC"])) sys.stdout.flush()
Message reading
def done(self): sys.stdout.write("\b{0}Done{1}\n".format(self.meta.color["GREY"], self.meta.color["ENDC"]))
Message done
def answer(self): if self.meta.default_answer in ["y", "Y"]: answer = self.meta.default_answer else: try: answer = raw_input("Would you like to continue [y/N]? ") except EOFError: print("") raise SystemExit() return answer
Message answer
def security_pkg(self, pkg): print("") self.template(78) print("| {0}{1}*** WARNING ***{2}").format( " " * 27, self.meta.color["RED"], self.meta.color["ENDC"]) self.template(78) print("| Before proceed with the package '{0}' will you must read\n" "| the README file. You can use the command " "'slpkg -n {1}'").format(pkg, pkg) self.template(78) print("")
Warning message for some special reasons
def reference(self, install, upgrade): self.template(78) print("| Total {0} {1} installed and {2} {3} upgraded".format( len(install), self.pkg(len(install)), len(upgrade), self.pkg(len(upgrade)))) self.template(78) for installed, upgraded in itertools.izip_longest(install, upgrade): if upgraded: print("| Package {0} upgraded successfully".format(upgraded)) if installed: print("| Package {0} installed successfully".format(installed)) self.template(78) print("")
Reference list with packages installed and upgraded
def matching(self, packages): print("\nNot found package with the name [ {0}{1}{2} ]. " "Matching packages:\nNOTE: Not dependenc" "ies are resolved\n".format(self.meta.color["CYAN"], "".join(packages), self.meta.color["ENDC"]))
Message for matching packages
def mirrors(name, location): rel = _meta_.slack_rel ver = slack_ver() repo = Repo().slack() if _meta_.arch == "x86_64": if rel == "stable": http = repo + "slackware64-{0}/{1}{2}".format(ver, location, name) else: http = repo + "slackware64-{0}/{1}{2}".format(rel, location, name) elif _meta_.arch.startswith("arm"): if rel == "stable": http = repo + "slackwarearm-{0}/{1}{2}".format(ver, location, name) else: http = repo + "slackwarearm-{0}/{1}{2}".format(rel, location, name) else: if rel == "stable": http = repo + "slackware-{0}/{1}{2}".format(ver, location, name) else: http = repo + "slackware-{0}/{1}{2}".format(rel, location, name) return http
Select Slackware official mirror packages based architecture and version.
def select(self): print("\nDetected Slackware binary package for installation:\n") for pkg in self.packages: print(" " + pkg.split("/")[-1]) print("") self.msg.template(78) print("| Choose a Slackware command:") self.msg.template(78) for com in sorted(self.commands): print("| {0}{1}{2}) {3}{4}{5}".format( self.meta.color["RED"], com, self.meta.color["ENDC"], self.meta.color["GREEN"], self.commands[com], self.meta.color["ENDC"])) self.msg.template(78) try: self.choice = raw_input(" > ") except EOFError: print("") raise SystemExit() if self.choice in self.commands.keys(): sys.stdout.write(" \x1b[1A{0}{1}{2}\n\n".format( self.meta.color["CYAN"], self.commands[self.choice], self.meta.color["ENDC"])) sys.stdout.flush() self.execute()
Select Slackware command
def execute(self): if self.choice in self.commands.keys(): if self.choice == "i": PackageManager(self.packages).install("") elif self.choice in ["u", "r"]: PackageManager(self.packages).upgrade( self.commands[self.choice][11:])
Execute Slackware command
def choose(self): keys = """ Choose repositories at the right side for enable or to the left side for disable. Keys: SPACE select or deselect the highlighted repositories, move it between the left and right lists ^ move the focus to the left list $ move the focus to the right list TAB move focus ENTER press the focused button Disabled <-------- REPOSITORIES -------> Enabled""" self.read_enabled() self.read_disabled() text, title, backtitle, status = keys, " Repositories ", "", False self.selected = DialogUtil(self.disabled, text, title, backtitle, status).buildlist(self.enabled) if self.selected is not None: self.update_repos() else: self.selected = self.enabled self.clear_screen() self.reference()
Choose repositories
def read_enabled(self): for line in self.conf.splitlines(): line = line.lstrip() if self.tag in line: self.tag_line = True if (line and self.tag_line and not line.startswith("#") and self.tag not in line): self.enabled.append(line) self.tag_line = False
Read enable repositories
def read_disabled(self): for line in self.conf.splitlines(): line = line.lstrip() if self.tag in line: self.tag_line = True if self.tag_line and line.startswith("#"): line = "".join(line.split("#")).strip() self.disabled.append(line) self.tag_line = False
Read disable repositories
def update_repos(self): with open("{0}{1}".format(self.meta.conf_path, self.repositories_conf), "w") as new_conf: for line in self.conf.splitlines(): line = line.lstrip() if self.tag in line: self.tag_line = True if self.tag_line and line.startswith("#"): repo = "".join(line.split("#")).strip() if repo in self.selected: new_conf.write(line.replace(line, repo + "\n")) continue if (self.tag_line and not line.startswith("#") and line != self.tag): repo = line.strip() if repo not in self.selected: new_conf.write(line.replace(line, "# " + line + "\n")) continue new_conf.write(line + "\n")
Update repositories.conf file with enabled or disabled repositories
def reference(self): total_enabled = ", ".join(self.selected) if len(total_enabled) < 1: total_enabled = ("{0}Are you crazy? This is a package " "manager for packages :p{1}".format( self.meta.color["RED"], self.meta.color["ENDC"])) self.msg.template(78) print("| Enabled repositories:") self.msg.template(78) print("| {0}".format(total_enabled)) self.msg.template(78) print("{0}Total {1}/{2} repositories enabled.{3}\n".format( self.meta.color["GREY"], len(self.selected), len(self.enabled + self.disabled), self.meta.color["ENDC"]))
Reference enable repositories
def sbo_search_pkg(name): repo = Repo().default_repository()["sbo"] sbo_url = "{0}{1}/".format(repo, slack_ver()) SLACKBUILDS_TXT = Utils().read_file( _meta_.lib_path + "sbo_repo/SLACKBUILDS.TXT") for line in SLACKBUILDS_TXT.splitlines(): if line.startswith("SLACKBUILD LOCATION"): sbo_name = (line[23:].split("/")[-1].replace("\n", "")).strip() if name == sbo_name: return (sbo_url + line[23:].strip() + "/") return ""
Search for package path from SLACKBUILDS.TXT file and return url
def router_main(self): ''' Main method for router; we stay in a loop in this method, receiving packets until the end of time. ''' while True: gotpkt = True try: timestamp,dev,pkt = self.net.recv_packet(timeout=1.0) except NoPackets: log_debug("No packets available in recv_packet") gotpkt = False except Shutdown: log_debug("Got shutdown signal") break if gotpkt: log_debug("Got a packet: {}".format(str(pkt))f router_main(self): ''' Main method for router; we stay in a loop in this method, receiving packets until the end of time. ''' while True: gotpkt = True try: timestamp,dev,pkt = self.net.recv_packet(timeout=1.0) except NoPackets: log_debug("No packets available in recv_packet") gotpkt = False except Shutdown: log_debug("Got shutdown signal") break if gotpkt: log_debug("Got a packet: {}".format(str(pkt)))
Main method for router; we stay in a loop in this method, receiving packets until the end of time.
def find_source_files(input_path, excludes): java_files = [] input_path = os.path.normpath(os.path.abspath(input_path)) for dirpath, dirnames, filenames in os.walk(input_path): if is_excluded(dirpath, excludes): del dirnames[:] continue for filename in filenames: if filename.endswith(".java"): java_files.append(os.path.join(dirpath, filename)) return java_files
Get a list of filenames for all Java source files within the given directory.
def from_bytes(rawbytes): ''' Takes a byte string as a parameter and returns a list of IPOption objects. ''' ipopts = IPOptionList() i = 0 while i < len(rawbytes): opttype = rawbytes[i] optcopied = opttype >> 7 # high order 1 bit optclass = (opttype >> 5) & 0x03 # next 2 bits optnum = opttype & 0x1f # low-order 5 bits are optnum optnum = IPOptionNumber(optnum) obj = IPOptionClasses[optnum]() eaten = obj.from_bytes(rawbytes[i:]) i += eaten ipopts.append(obj) return ipoptf from_bytes(rawbytes): ''' Takes a byte string as a parameter and returns a list of IPOption objects. ''' ipopts = IPOptionList() i = 0 while i < len(rawbytes): opttype = rawbytes[i] optcopied = opttype >> 7 # high order 1 bit optclass = (opttype >> 5) & 0x03 # next 2 bits optnum = opttype & 0x1f # low-order 5 bits are optnum optnum = IPOptionNumber(optnum) obj = IPOptionClasses[optnum]() eaten = obj.from_bytes(rawbytes[i:]) i += eaten ipopts.append(obj) return ipopts
Takes a byte string as a parameter and returns a list of IPOption objects.
def to_bytes(self): ''' Takes a list of IPOption objects and returns a packed byte string of options, appropriately padded if necessary. ''' raw = b'' if not self._options: return raw for ipopt in self._options: raw += ipopt.to_bytes() padbytes = 4 - (len(raw) % 4) raw += b'\x00'*padbytes return raf to_bytes(self): ''' Takes a list of IPOption objects and returns a packed byte string of options, appropriately padded if necessary. ''' raw = b'' if not self._options: return raw for ipopt in self._options: raw += ipopt.to_bytes() padbytes = 4 - (len(raw) % 4) raw += b'\x00'*padbytes return raw
Takes a list of IPOption objects and returns a packed byte string of options, appropriately padded if necessary.
def intf_down(self, interface): ''' Can be called when an interface goes down. FIXME: doesn't really do anything at this point. ''' intf = self._devinfo.get(interface, None) if intf and self._devupdown_callback: self._devupdown_callback(intf, 'down'f intf_down(self, interface): ''' Can be called when an interface goes down. FIXME: doesn't really do anything at this point. ''' intf = self._devinfo.get(interface, None) if intf and self._devupdown_callback: self._devupdown_callback(intf, 'down')
Can be called when an interface goes down. FIXME: doesn't really do anything at this point.
def intf_up(self, interface): ''' Can be called when an interface is put in service. FIXME: not currently used; more needs to be done to correctly put a new intf into service. ''' if interface.name not in self._devinfo: self._devinfo[interface.name] = interface if self._devupdown_callback: self._devupdown_callback(interface, 'up') else: raise ValueError("Interface already registered"f intf_up(self, interface): ''' Can be called when an interface is put in service. FIXME: not currently used; more needs to be done to correctly put a new intf into service. ''' if interface.name not in self._devinfo: self._devinfo[interface.name] = interface if self._devupdown_callback: self._devupdown_callback(interface, 'up') else: raise ValueError("Interface already registered")
Can be called when an interface is put in service. FIXME: not currently used; more needs to be done to correctly put a new intf into service.
def interface_by_name(self, name): ''' Given a device name, return the corresponding interface object ''' if name in self._devinfo: return self._devinfo[name] raise KeyError("No device named {}".format(name)f interface_by_name(self, name): ''' Given a device name, return the corresponding interface object ''' if name in self._devinfo: return self._devinfo[name] raise KeyError("No device named {}".format(name))
Given a device name, return the corresponding interface object
def interface_by_ipaddr(self, ipaddr): ''' Given an IP address, return the interface that 'owns' this address ''' ipaddr = IPAddr(ipaddr) for devname,iface in self._devinfo.items(): if iface.ipaddr == ipaddr: return iface raise KeyError("No device has IP address {}".format(ipaddr)f interface_by_ipaddr(self, ipaddr): ''' Given an IP address, return the interface that 'owns' this address ''' ipaddr = IPAddr(ipaddr) for devname,iface in self._devinfo.items(): if iface.ipaddr == ipaddr: return iface raise KeyError("No device has IP address {}".format(ipaddr))
Given an IP address, return the interface that 'owns' this address
def interface_by_macaddr(self, macaddr): ''' Given a MAC address, return the interface that 'owns' this address ''' macaddr = EthAddr(macaddr) for devname,iface in self._devinfo.items(): if iface.ethaddr == macaddr: return iface raise KeyError("No device has MAC address {}".format(macaddr)f interface_by_macaddr(self, macaddr): ''' Given a MAC address, return the interface that 'owns' this address ''' macaddr = EthAddr(macaddr) for devname,iface in self._devinfo.items(): if iface.ethaddr == macaddr: return iface raise KeyError("No device has MAC address {}".format(macaddr))
Given a MAC address, return the interface that 'owns' this address
def from_bytes(rawbytes): ''' Takes a byte string as a parameter and returns a list of ICMPv6Option objects. ''' icmpv6popts = ICMPv6OptionList() i = 0 while i < len(rawbytes): opttype = rawbytes[i] optnum = ICMPv6OptionNumber(opttype) obj = ICMPv6OptionClasses[optnum]() eaten = obj.from_bytes(rawbytes[i:]) i += eaten icmpv6popts.append(obj) return icmpv6poptf from_bytes(rawbytes): ''' Takes a byte string as a parameter and returns a list of ICMPv6Option objects. ''' icmpv6popts = ICMPv6OptionList() i = 0 while i < len(rawbytes): opttype = rawbytes[i] optnum = ICMPv6OptionNumber(opttype) obj = ICMPv6OptionClasses[optnum]() eaten = obj.from_bytes(rawbytes[i:]) i += eaten icmpv6popts.append(obj) return icmpv6popts
Takes a byte string as a parameter and returns a list of ICMPv6Option objects.
def to_bytes(self): ''' Takes a list of ICMPv6Option objects and returns a packed byte string of options, appropriately padded if necessary. ''' raw = b'' if not self._options: return raw for icmpv6popt in self._options: raw += icmpv6popt.to_bytes() # Padding doesn't seem necessary? # RFC states it should be padded to 'natural 64bit boundaries' # However, wireshark interprets \x00 as a malformed option field # So for now, ignore padding # padbytes = 4 - (len(raw) % 4) # raw += b'\x00'*padbytes return raf to_bytes(self): ''' Takes a list of ICMPv6Option objects and returns a packed byte string of options, appropriately padded if necessary. ''' raw = b'' if not self._options: return raw for icmpv6popt in self._options: raw += icmpv6popt.to_bytes() # Padding doesn't seem necessary? # RFC states it should be padded to 'natural 64bit boundaries' # However, wireshark interprets \x00 as a malformed option field # So for now, ignore padding # padbytes = 4 - (len(raw) % 4) # raw += b'\x00'*padbytes return raw
Takes a list of ICMPv6Option objects and returns a packed byte string of options, appropriately padded if necessary.
def _unpack_bitmap(bitmap, xenum): ''' Given an integer bitmap and an enumerated type, build a set that includes zero or more enumerated type values corresponding to the bitmap. ''' unpacked = set() for enval in xenum: if enval.value & bitmap == enval.value: unpacked.add(enval) return unpackef _unpack_bitmap(bitmap, xenum): ''' Given an integer bitmap and an enumerated type, build a set that includes zero or more enumerated type values corresponding to the bitmap. ''' unpacked = set() for enval in xenum: if enval.value & bitmap == enval.value: unpacked.add(enval) return unpacked
Given an integer bitmap and an enumerated type, build a set that includes zero or more enumerated type values corresponding to the bitmap.
def _make_wildcard_attr_map(): ''' Create a dictionary that maps an attribute name in OpenflowMatch with a non-prefix-related wildcard bit from the above OpenflowWildcard enumeration. ''' _xmap = {} for wc in OpenflowWildcard: if not wc.name.endswith('All') and \ not wc.name.endswith('Mask'): translated = '' for ch in wc.name: if ch.isupper(): translated += '_' translated += ch.lower() else: translated += ch _xmap[translated] = wc return _xmaf _make_wildcard_attr_map(): ''' Create a dictionary that maps an attribute name in OpenflowMatch with a non-prefix-related wildcard bit from the above OpenflowWildcard enumeration. ''' _xmap = {} for wc in OpenflowWildcard: if not wc.name.endswith('All') and \ not wc.name.endswith('Mask'): translated = '' for ch in wc.name: if ch.isupper(): translated += '_' translated += ch.lower() else: translated += ch _xmap[translated] = wc return _xmap
Create a dictionary that maps an attribute name in OpenflowMatch with a non-prefix-related wildcard bit from the above OpenflowWildcard enumeration.
def _unpack_actions(raw): ''' deserialize 1 or more actions; return a list of Action* objects ''' actions = [] while len(raw) > 0: atype, alen = struct.unpack('!HH', raw[:4]) atype = OpenflowActionType(atype) action = _ActionClassMap.get(atype)() action.from_bytes(raw[:alen]) raw = raw[alen:] actions.append(action) return actionf _unpack_actions(raw): ''' deserialize 1 or more actions; return a list of Action* objects ''' actions = [] while len(raw) > 0: atype, alen = struct.unpack('!HH', raw[:4]) atype = OpenflowActionType(atype) action = _ActionClassMap.get(atype)() action.from_bytes(raw[:alen]) raw = raw[alen:] actions.append(action) return actions
deserialize 1 or more actions; return a list of Action* objects
def overlaps_with(self, othermatch, strict=False): ''' Two match objects overlap if the same packet can be matched by both *and* they have the same priority. ''' one = self.matches_entry(othermatch, strict) if strict: return one return one and othermatch.matches_entry(self, strictf overlaps_with(self, othermatch, strict=False): ''' Two match objects overlap if the same packet can be matched by both *and* they have the same priority. ''' one = self.matches_entry(othermatch, strict) if strict: return one return one and othermatch.matches_entry(self, strict)
Two match objects overlap if the same packet can be matched by both *and* they have the same priority.
def build_from_packet(pkt): ''' Build and return a new OpenflowMatch object based on the packet object passed as a parameter. ''' m = OpenflowMatch() for mf,pkttuple in OpenflowMatch._match_field_to_packet.items(): for pktcls,field in pkttuple: if pkt.has_header(pktcls): setattr(m, mf, getattr(pkt[pktcls], field)) continue return f build_from_packet(pkt): ''' Build and return a new OpenflowMatch object based on the packet object passed as a parameter. ''' m = OpenflowMatch() for mf,pkttuple in OpenflowMatch._match_field_to_packet.items(): for pktcls,field in pkttuple: if pkt.has_header(pktcls): setattr(m, mf, getattr(pkt[pktcls], field)) continue return m
Build and return a new OpenflowMatch object based on the packet object passed as a parameter.
def pre_serialize(self, raw, pkt, i): ''' Set length of the header based on ''' self.length = len(raw) + OpenflowHeader._MINLEf pre_serialize(self, raw, pkt, i): ''' Set length of the header based on ''' self.length = len(raw) + OpenflowHeader._MINLEN
Set length of the header based on
def set_bpf_filter_on_all_devices(filterstr): ''' Long method name, but self-explanatory. Set the bpf filter on all devices that have been opened. ''' with PcapLiveDevice._lock: for dev in PcapLiveDevice._OpenDevices.values(): _PcapFfi.instance()._set_filter(dev, filterstrf set_bpf_filter_on_all_devices(filterstr): ''' Long method name, but self-explanatory. Set the bpf filter on all devices that have been opened. ''' with PcapLiveDevice._lock: for dev in PcapLiveDevice._OpenDevices.values(): _PcapFfi.instance()._set_filter(dev, filterstr)
Long method name, but self-explanatory. Set the bpf filter on all devices that have been opened.
def create_ip_arp_reply(srchw, dsthw, srcip, targetip): ''' Create an ARP reply (just change what needs to be changed from a request) ''' pkt = create_ip_arp_request(srchw, srcip, targetip) pkt[0].dst = dsthw pkt[1].operation = ArpOperation.Reply pkt[1].targethwaddr = dsthw return pkf create_ip_arp_reply(srchw, dsthw, srcip, targetip): ''' Create an ARP reply (just change what needs to be changed from a request) ''' pkt = create_ip_arp_request(srchw, srcip, targetip) pkt[0].dst = dsthw pkt[1].operation = ArpOperation.Reply pkt[1].targethwaddr = dsthw return pkt
Create an ARP reply (just change what needs to be changed from a request)
def create_ip_arp_request(srchw, srcip, targetip): ''' Create and return a packet containing an Ethernet header and ARP header. ''' ether = Ethernet() ether.src = srchw ether.dst = SpecialEthAddr.ETHER_BROADCAST.value ether.ethertype = EtherType.ARP arp = Arp() arp.operation = ArpOperation.Request arp.senderhwaddr = srchw arp.senderprotoaddr = srcip arp.targethwaddr = SpecialEthAddr.ETHER_BROADCAST.value arp.targetprotoaddr = targetip return ether + arf create_ip_arp_request(srchw, srcip, targetip): ''' Create and return a packet containing an Ethernet header and ARP header. ''' ether = Ethernet() ether.src = srchw ether.dst = SpecialEthAddr.ETHER_BROADCAST.value ether.ethertype = EtherType.ARP arp = Arp() arp.operation = ArpOperation.Request arp.senderhwaddr = srchw arp.senderprotoaddr = srcip arp.targethwaddr = SpecialEthAddr.ETHER_BROADCAST.value arp.targetprotoaddr = targetip return ether + arp
Create and return a packet containing an Ethernet header and ARP header.
def setup_logging(debug, logfile=None): ''' Setup logging format and log level. ''' if debug: level = logging.DEBUG else: level = logging.INFO if logfile is not None: logging.basicConfig(format="%(asctime)s %(levelname)8s %(message)s", datefmt="%H:%M:%S %Y/%m/%d", level=level, filename=logfile) else: logging.basicConfig(format="%(asctime)s %(levelname)8s %(message)s", datefmt="%H:%M:%S %Y/%m/%d", level=levelf setup_logging(debug, logfile=None): ''' Setup logging format and log level. ''' if debug: level = logging.DEBUG else: level = logging.INFO if logfile is not None: logging.basicConfig(format="%(asctime)s %(levelname)8s %(message)s", datefmt="%H:%M:%S %Y/%m/%d", level=level, filename=logfile) else: logging.basicConfig(format="%(asctime)s %(levelname)8s %(message)s", datefmt="%H:%M:%S %Y/%m/%d", level=level)
Setup logging format and log level.
def shutdown(self): ''' Should be called by Switchyard user code when a network object is being shut down. (This method cleans up internal threads and network interaction objects.) ''' if not LLNetReal.running: return LLNetReal.running = False log_debug("Joining threads for shutdown") for t in self._threads: t.join() log_debug("Closing pcap devices") for devname,pdev in self._pcaps.items(): pdev.close() for rdev in self._localsend.values(): rdev.close() log_debug("Done cleaning up"f shutdown(self): ''' Should be called by Switchyard user code when a network object is being shut down. (This method cleans up internal threads and network interaction objects.) ''' if not LLNetReal.running: return LLNetReal.running = False log_debug("Joining threads for shutdown") for t in self._threads: t.join() log_debug("Closing pcap devices") for devname,pdev in self._pcaps.items(): pdev.close() for rdev in self._localsend.values(): rdev.close() log_debug("Done cleaning up")
Should be called by Switchyard user code when a network object is being shut down. (This method cleans up internal threads and network interaction objects.)
def _spawn_threads(self): ''' Internal method. Creates threads to handle low-level network receive. ''' for devname,pdev in self._pcaps.items(): t = threading.Thread(target=LLNetReal._low_level_dispatch, args=(pdev, devname, self._pktqueue)) t.start() self._threads.append(tf _spawn_threads(self): ''' Internal method. Creates threads to handle low-level network receive. ''' for devname,pdev in self._pcaps.items(): t = threading.Thread(target=LLNetReal._low_level_dispatch, args=(pdev, devname, self._pktqueue)) t.start() self._threads.append(t)
Internal method. Creates threads to handle low-level network receive.
def _make_pcaps(self): ''' Internal method. Create libpcap devices for every network interface we care about and set them in non-blocking mode. ''' self._pcaps = {} for devname,intf in self._devinfo.items(): if intf.iftype == InterfaceType.Loopback: senddev = _RawSocket(devname, protocol=IPProtocol.UDP) self._localsend[devname] = senddev pdev = PcapLiveDevice(devname) self._pcaps[devname] = pdef _make_pcaps(self): ''' Internal method. Create libpcap devices for every network interface we care about and set them in non-blocking mode. ''' self._pcaps = {} for devname,intf in self._devinfo.items(): if intf.iftype == InterfaceType.Loopback: senddev = _RawSocket(devname, protocol=IPProtocol.UDP) self._localsend[devname] = senddev pdev = PcapLiveDevice(devname) self._pcaps[devname] = pdev
Internal method. Create libpcap devices for every network interface we care about and set them in non-blocking mode.
def _sig_handler(self, signum, stack): ''' Handle process INT signal. ''' log_debug("Got SIGINT.") if signum == signal.SIGINT: LLNetReal.running = False if self._pktqueue.qsize() == 0: # put dummy pkt in queue to unblock a # possibly stuck user thread self._pktqueue.put( (None,None,None) f _sig_handler(self, signum, stack): ''' Handle process INT signal. ''' log_debug("Got SIGINT.") if signum == signal.SIGINT: LLNetReal.running = False if self._pktqueue.qsize() == 0: # put dummy pkt in queue to unblock a # possibly stuck user thread self._pktqueue.put( (None,None,None) )
Handle process INT signal.
def _low_level_dispatch(pcapdev, devname, pktqueue): ''' Thread entrypoint for doing low-level receive and dispatch for a single pcap device. ''' while LLNetReal.running: # a non-zero timeout value is ok here; this is an # independent thread that handles input for this # one pcap device. it throws any packets received # into the shared queue (which is read by the actual # user code) pktinfo = pcapdev.recv_packet(timeout=0.2) if pktinfo is None: continue pktqueue.put( (devname,pcapdev.dlt,pktinfo) ) log_debug("Receiver thread for {} exiting".format(devname)) stats = pcapdev.stats() log_debug("Final device statistics {}: {} received, {} dropped, {} dropped/if".format(devname, stats.ps_recv, stats.ps_drop, stats.ps_ifdrop)f _low_level_dispatch(pcapdev, devname, pktqueue): ''' Thread entrypoint for doing low-level receive and dispatch for a single pcap device. ''' while LLNetReal.running: # a non-zero timeout value is ok here; this is an # independent thread that handles input for this # one pcap device. it throws any packets received # into the shared queue (which is read by the actual # user code) pktinfo = pcapdev.recv_packet(timeout=0.2) if pktinfo is None: continue pktqueue.put( (devname,pcapdev.dlt,pktinfo) ) log_debug("Receiver thread for {} exiting".format(devname)) stats = pcapdev.stats() log_debug("Final device statistics {}: {} received, {} dropped, {} dropped/if".format(devname, stats.ps_recv, stats.ps_drop, stats.ps_ifdrop))
Thread entrypoint for doing low-level receive and dispatch for a single pcap device.
def __default_filter(self, node): if not isinstance(node, javalang.tree.Declaration): return False if 'private' in node.modifiers: return False if isinstance(node, javalang.tree.Documented) and node.documentation: doc = javalang.javadoc.parse(node.documentation) if 'hide' in doc.tags or 'exclude' in doc.tags: return False return True
Excludes private members and those tagged "@hide" / "@exclude" in their docblocks.
def __output_see(self, see): if see.startswith('<a href'): # HTML link -- <a href="...">...</a> return self.__html_to_rst(see) elif '"' in see: # Plain text return see else: # Type reference (default) return ':java:ref:`%s`' % (see.replace('#', '.').replace(' ', ''),)
Convert the argument to a @see tag to rest
def compile(self, ast): documents = {} imports = util.StringBuilder() for imp in ast.imports: if imp.static or imp.wildcard: continue package_parts = [] cls_parts = [] for part in imp.path.split('.'): if cls_parts or part[0].isupper(): cls_parts.append(part) else: package_parts.append(part) # If the import's final part wasn't capitalized, # append it to the class parts anyway so sphinx doesn't complain. if cls_parts == []: cls_parts.append(package_parts.pop()) package = '.'.join(package_parts) cls = '.'.join(cls_parts) imports.append(util.Directive('java:import', package + ' ' + cls).build()) import_block = imports.build() if not ast.package: raise ValueError('File must have package declaration') package = ast.package.name type_declarations = [] for path, node in ast.filter(javalang.tree.TypeDeclaration): if not self.filter(node): continue classes = [n.name for n in path if isinstance(n, javalang.tree.TypeDeclaration)] classes.append(node.name) name = '.'.join(classes) type_declarations.append((package, name, node)) for package, name, declaration in type_declarations: full_name = package + '.' + name document = self.compile_type_document(import_block, package, name, declaration) documents[full_name] = (package, name, document.build()) return documents
Compile autodocs for the given Java syntax tree. Documents will be returned documenting each separate type.
def to_bytes(self): ''' Return packed byte representation of the UDP header. ''' hdr = struct.pack(RIPv2._PACKFMT, self.command.value, 2) routes = b''.join([r.to_bytes() for r in self._routes]) return hdr + routef to_bytes(self): ''' Return packed byte representation of the UDP header. ''' hdr = struct.pack(RIPv2._PACKFMT, self.command.value, 2) routes = b''.join([r.to_bytes() for r in self._routes]) return hdr + routes
Return packed byte representation of the UDP header.
def checksum (data, start = 0, skip_word = None): if len(data) % 2 != 0: arr = array.array('H', data[:-1]) else: arr = array.array('H', data) if skip_word is not None: for i in range(0, len(arr)): if i == skip_word: continue start += arr[i] else: for i in range(0, len(arr)): start += arr[i] if len(data) % 2 != 0: start += struct.unpack('H', data[-1:]+b'\x00')[0] # Specify order? start = (start >> 16) + (start & 0xffff) start += (start >> 16) #while start >> 16: # start = (start >> 16) + (start & 0xffff) return ntohs(~start & 0xffff)
Calculate standard internet checksum over data starting at start'th byte skip_word: If specified, it's the word offset of a word in data to "skip" (as if it were zero). The purpose is when data is received data which contains a computed checksum that you are trying to verify -- you want to skip that word since it was zero when the checksum was initially calculated.
def javadoc_role(name, rawtext, text, lineno, inliner, options={}, content=[]): has_explicit_title, title, target = split_explicit_title(text) title = utils.unescape(title) target = utils.unescape(target) if not has_explicit_title: target = target.lstrip('~') if title[0] == '~': title = title[1:].rpartition('.')[2] app = inliner.document.settings.env.app ref = get_javadoc_ref(app, rawtext, target) if not ref: raise ValueError("no Javadoc source found for %s in javadoc_url_map" % (target,)) ref.append(nodes.Text(title, title)) return [ref], []
Role for linking to external Javadoc
def add(self, port, pkt): ''' Add new input port + packet to buffer. ''' id = len(self._buffer) + 1 if id > self._buffsize: raise FullBuffer() self._buffer[id] = (port, deepcopy(pkt)) return if add(self, port, pkt): ''' Add new input port + packet to buffer. ''' id = len(self._buffer) + 1 if id > self._buffsize: raise FullBuffer() self._buffer[id] = (port, deepcopy(pkt)) return id
Add new input port + packet to buffer.
def _process_actions(self, actions, inport, packet): ''' Process actions in order, in two stages. Each action implements a __call__, which applies any packet-level changes or other non-output changes. The functors can optionally return another function to be applied at the second stage. ''' second_stage = [] for a in actions: fn = a(packet=packet, net=self._switchyard_net, controllers=self._controller_connections, inport=inport) if (fn): second_stage.append(fn) for fn in second_stage: fn(f _process_actions(self, actions, inport, packet): ''' Process actions in order, in two stages. Each action implements a __call__, which applies any packet-level changes or other non-output changes. The functors can optionally return another function to be applied at the second stage. ''' second_stage = [] for a in actions: fn = a(packet=packet, net=self._switchyard_net, controllers=self._controller_connections, inport=inport) if (fn): second_stage.append(fn) for fn in second_stage: fn()
Process actions in order, in two stages. Each action implements a __call__, which applies any packet-level changes or other non-output changes. The functors can optionally return another function to be applied at the second stage.
def _handle_datapath(self, inport, packet): ''' Handle single packet on the data plane. ''' inport = self._switchyard_net.port_by_name(inport) portnum = inport.ifnum log_info("Processing packet: {}->{}".format(portnum, packet)) actions = None for tnum,t in enumerate(self._tables): actions = t.match_packet(portnum, packet) # FIXME: this is all wrong/incomplete # if match: Update counters Execute instructions: # update action set # update packet/match set fields # update metadata # if no match and table miss entry exists, do the above # otherwise, drop the packet # actions = self._table.match_packet(portnum, packet) if actions is None: self._send_packet_in(portnum, packet) else: self._datapath_action(portnum, packet, actions=actionsf _handle_datapath(self, inport, packet): ''' Handle single packet on the data plane. ''' inport = self._switchyard_net.port_by_name(inport) portnum = inport.ifnum log_info("Processing packet: {}->{}".format(portnum, packet)) actions = None for tnum,t in enumerate(self._tables): actions = t.match_packet(portnum, packet) # FIXME: this is all wrong/incomplete # if match: Update counters Execute instructions: # update action set # update packet/match set fields # update metadata # if no match and table miss entry exists, do the above # otherwise, drop the packet # actions = self._table.match_packet(portnum, packet) if actions is None: self._send_packet_in(portnum, packet) else: self._datapath_action(portnum, packet, actions=actions)
Handle single packet on the data plane.
def to_bytes(self): ''' Return packed byte representation of the UDP header. ''' return struct.pack(UDP._PACKFMT, self._src, self._dst, self._len, self._checksumf to_bytes(self): ''' Return packed byte representation of the UDP header. ''' return struct.pack(UDP._PACKFMT, self._src, self._dst, self._len, self._checksum)
Return packed byte representation of the UDP header.
def from_bytes(self, raw): '''Return an Ethernet object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.''' if len(raw) < UDP._MINLEN: raise NotEnoughDataError("Not enough bytes ({}) to reconstruct an UDP object".format(len(raw))) fields = struct.unpack(UDP._PACKFMT, raw[:UDP._MINLEN]) self._src = fields[0] self._dst = fields[1] self._len = fields[2] self._checksum = fields[3] return raw[UDP._MINLEN:f from_bytes(self, raw): '''Return an Ethernet object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.''' if len(raw) < UDP._MINLEN: raise NotEnoughDataError("Not enough bytes ({}) to reconstruct an UDP object".format(len(raw))) fields = struct.unpack(UDP._PACKFMT, raw[:UDP._MINLEN]) self._src = fields[0] self._dst = fields[1] self._len = fields[2] self._checksum = fields[3] return raw[UDP._MINLEN:]
Return an Ethernet object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.
def to_bytes(self): ''' Return packed byte representation of the TCP header. ''' header = self._make_header(self._checksum) return header + self._options.to_bytes(f to_bytes(self): ''' Return packed byte representation of the TCP header. ''' header = self._make_header(self._checksum) return header + self._options.to_bytes()
Return packed byte representation of the TCP header.
def from_bytes(self, raw): '''Return an Ethernet object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.''' if len(raw) < TCP._MINLEN: raise NotEnoughDataError("Not enough bytes ({}) to reconstruct an TCP object".format(len(raw))) fields = struct.unpack(TCP._PACKFMT, raw[:TCP._MINLEN]) self._src = fields[0] self._dst = fields[1] self._seq = fields[2] self._ack = fields[3] offset = fields[4] >> 12 self._flags = fields[4] & 0x01ff self._window = fields[5] csum = fields[6] self._urg = fields[7] headerlen = offset * 4 optlen = headerlen - TCP._MINLEN self._options.from_bytes(raw[TCP._MINLEN:headerlen]) return raw[headerlen:f from_bytes(self, raw): '''Return an Ethernet object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.''' if len(raw) < TCP._MINLEN: raise NotEnoughDataError("Not enough bytes ({}) to reconstruct an TCP object".format(len(raw))) fields = struct.unpack(TCP._PACKFMT, raw[:TCP._MINLEN]) self._src = fields[0] self._dst = fields[1] self._seq = fields[2] self._ack = fields[3] offset = fields[4] >> 12 self._flags = fields[4] & 0x01ff self._window = fields[5] csum = fields[6] self._urg = fields[7] headerlen = offset * 4 optlen = headerlen - TCP._MINLEN self._options.from_bytes(raw[TCP._MINLEN:headerlen]) return raw[headerlen:]
Return an Ethernet object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.
def to_bytes(self, dochecksum=True): ''' Return packed byte representation of the UDP header. ''' csum = 0 if dochecksum: csum = self.checksum() return b''.join((struct.pack(ICMP._PACKFMT, self._type.value, self._code.value, csum), self._icmpdata.to_bytes())f to_bytes(self, dochecksum=True): ''' Return packed byte representation of the UDP header. ''' csum = 0 if dochecksum: csum = self.checksum() return b''.join((struct.pack(ICMP._PACKFMT, self._type.value, self._code.value, csum), self._icmpdata.to_bytes()))
Return packed byte representation of the UDP header.
def _parse_codeargs(argstr): ''' Parse and clean up argument to user code; separate *args from **kwargs. ''' args = [] kwargs = {} if isinstance(argstr, str): for a in argstr.split(): if '=' in a: k,attr = a.split('=') kwargs[k] = attr else: args.append(a) rd = {'args':args, 'kwargs':kwargs} return rf _parse_codeargs(argstr): ''' Parse and clean up argument to user code; separate *args from **kwargs. ''' args = [] kwargs = {} if isinstance(argstr, str): for a in argstr.split(): if '=' in a: k,attr = a.split('=') kwargs[k] = attr else: args.append(a) rd = {'args':args, 'kwargs':kwargs} return rd
Parse and clean up argument to user code; separate *args from **kwargs.
def netmask_to_cidr (dq): if isinstance(dq, str): dq = IPv4Address(dq) v = int(dq) c = 0 while v & 0x80000000: c += 1 v <<= 1 v = v & 0xffFFffFF if v != 0: raise RuntimeError("Netmask %s is not CIDR-compatible" % (dq,)) return c
Takes a netmask as either an IPAddr or a string, and returns the number of network bits. e.g., 255.255.255.0 -> 24 Raise exception if subnet mask is not CIDR-compatible.
def parse_cidr (addr, infer=True, allow_host=False): def check (r0, r1): a = int(r0) b = r1 if (not allow_host) and (a & ((1<<b)-1)): raise RuntimeError("Host part of CIDR address is not zero (%s)" % (addr,)) return (r0,32-r1) addr = addr.split('/', 2) if len(addr) == 1: if infer is False: return check(IPAddr(addr[0]), 0) addr = IPAddr(addr[0]) b = 32-infer_netmask(addr) m = (1<<b)-1 if (int(addr) & m) == 0: # All bits in wildcarded part are 0, so we'll use the wildcard return check(addr, b) else: # Some bits in the wildcarded part are set, so we'll assume it's a host return check(addr, 0) try: wild = 32-int(addr[1]) except: # Maybe they passed a netmask m = int(IPAddr(addr[1])) b = 0 while m & (1<<31): b += 1 m <<= 1 if m & 0x7fffffff != 0: raise RuntimeError("Netmask " + str(addr[1]) + " is not CIDR-compatible") wild = 32-b if not (wild >= 0 and wild <= 32): raise RuntimeError("Invalid mask length") return check(IPAddr(addr[0]), wild) if not (wild >= 0 and wild <= 32): raise RuntimeError("Invalid mask length") return check(IPAddr(addr[0]), wild)
Takes a CIDR address or plain dotted-quad, and returns a tuple of address and count-of-network-bits. Can infer the network bits based on network classes if infer=True. Can also take a string in the form 'address/netmask', as long as the netmask is representable in CIDR. FIXME: This function is badly named.
def infer_netmask (addr): addr = int(addr) if addr == 0: # Special case -- default network return 32-32 # all bits wildcarded if (addr & (1 << 31)) == 0: # Class A return 32-24 if (addr & (3 << 30)) == 2 << 30: # Class B return 32-16 if (addr & (7 << 29)) == 6 << 29: # Class C return 32-8 if (addr & (15 << 28)) == 14 << 28: # Class D (Multicast) return 32-0 # exact match # Must be a Class E (Experimental) return 32-0
Uses network classes to guess the number of network bits
def isBridgeFiltered (self): return ((self.__value[0] == 0x01) and (self.__value[1] == 0x80) and (self.__value[2] == 0xC2) and (self.__value[3] == 0x00) and (self.__value[4] == 0x00) and (self.__value[5] <= 0x0F))
Checks if address is an IEEE 802.1D MAC Bridge Filtered MAC Group Address This range is 01-80-C2-00-00-00 to 01-80-C2-00-00-0F. MAC frames that have a destination MAC address within this range are not relayed by bridges conforming to IEEE 802.1D
def toStr (self, separator = ':'): return separator.join(('{:02x}'.format(x) for x in self.__value))
Returns the address as string consisting of 12 hex chars separated by separator.
def to_bytes(self): ''' Return packed byte representation of the Ethernet header. ''' return struct.pack(Ethernet._PACKFMT, self._dst.packed, self._src.packed, self._ethertype.valuef to_bytes(self): ''' Return packed byte representation of the Ethernet header. ''' return struct.pack(Ethernet._PACKFMT, self._dst.packed, self._src.packed, self._ethertype.value)
Return packed byte representation of the Ethernet header.
def from_bytes(self, raw): '''Return an Ethernet object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.''' if len(raw) < Ethernet._MINLEN: raise NotEnoughDataError("Not enough bytes ({}) to reconstruct an " "Ethernet object".format(len(raw))) dst,src,ethertype = struct.unpack(Ethernet._PACKFMT, raw[:Ethernet._MINLEN]) self.src = src self.dst = dst if ethertype <= 1500: self.ethertype = EtherType.NoType else: self.ethertype = ethertype return raw[Ethernet._MINLEN:f from_bytes(self, raw): '''Return an Ethernet object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.''' if len(raw) < Ethernet._MINLEN: raise NotEnoughDataError("Not enough bytes ({}) to reconstruct an " "Ethernet object".format(len(raw))) dst,src,ethertype = struct.unpack(Ethernet._PACKFMT, raw[:Ethernet._MINLEN]) self.src = src self.dst = dst if ethertype <= 1500: self.ethertype = EtherType.NoType else: self.ethertype = ethertype return raw[Ethernet._MINLEN:]
Return an Ethernet object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.
def run_simulation(topo, **kwargs): ''' Get the simulation substrate started. The key things are to set up a series of queues that connect nodes together and get the link emulation objects started (all inside the NodeExecutor class). The NodePlumbing named tuples hold together threads for each node, the emulation substrate (NodeExecutors), and the ingress queue that each node receives packets from. ''' log_debug("Threads at startup:") for t in threading.enumerate(): log_debug("\tthread at startup {}".format(t.name)) with yellow(): log_info("Starting up switchyard simulation substrate.") glue = SyssGlue(topo, **kwargs) cli = Cli(glue, topo) try: cli.cmdloop() except KeyboardInterrupt: print("Received SIGINT --- shutting down.") cli.stop(f run_simulation(topo, **kwargs): ''' Get the simulation substrate started. The key things are to set up a series of queues that connect nodes together and get the link emulation objects started (all inside the NodeExecutor class). The NodePlumbing named tuples hold together threads for each node, the emulation substrate (NodeExecutors), and the ingress queue that each node receives packets from. ''' log_debug("Threads at startup:") for t in threading.enumerate(): log_debug("\tthread at startup {}".format(t.name)) with yellow(): log_info("Starting up switchyard simulation substrate.") glue = SyssGlue(topo, **kwargs) cli = Cli(glue, topo) try: cli.cmdloop() except KeyboardInterrupt: print("Received SIGINT --- shutting down.") cli.stop()
Get the simulation substrate started. The key things are to set up a series of queues that connect nodes together and get the link emulation objects started (all inside the NodeExecutor class). The NodePlumbing named tuples hold together threads for each node, the emulation substrate (NodeExecutors), and the ingress queue that each node receives packets from.
def _init(): ''' Internal switchyard static initialization method. ''' if ApplicationLayer._isinit: return ApplicationLayer._isinit = True ApplicationLayer._to_app = {} ApplicationLayer._from_app = Queue(f _init(): ''' Internal switchyard static initialization method. ''' if ApplicationLayer._isinit: return ApplicationLayer._isinit = True ApplicationLayer._to_app = {} ApplicationLayer._from_app = Queue()
Internal switchyard static initialization method.
def recv_from_app(timeout=_default_timeout): ''' Called by a network stack implementer to receive application-layer data for sending on to a remote location. Can optionally take a timeout value. If no data are available, raises NoPackets exception. Returns a 2-tuple: flowaddr and data. The flowaddr consists of 5 items: protocol, localaddr, localport, remoteaddr, remoteport. ''' try: return ApplicationLayer._from_app.get(timeout=timeout) except Empty: pass raise NoPackets(f recv_from_app(timeout=_default_timeout): ''' Called by a network stack implementer to receive application-layer data for sending on to a remote location. Can optionally take a timeout value. If no data are available, raises NoPackets exception. Returns a 2-tuple: flowaddr and data. The flowaddr consists of 5 items: protocol, localaddr, localport, remoteaddr, remoteport. ''' try: return ApplicationLayer._from_app.get(timeout=timeout) except Empty: pass raise NoPackets()
Called by a network stack implementer to receive application-layer data for sending on to a remote location. Can optionally take a timeout value. If no data are available, raises NoPackets exception. Returns a 2-tuple: flowaddr and data. The flowaddr consists of 5 items: protocol, localaddr, localport, remoteaddr, remoteport.
def _register_socket(s): ''' Internal method used by socket emulation layer to create a new "upward" queue for an app-layer socket and to register the socket object. Returns two queues: "downward" (fromapp) and "upward" (toapp). ''' queue_to_app = Queue() with _lock: ApplicationLayer._to_app[s._sockid()] = queue_to_app return ApplicationLayer._from_app, queue_to_apf _register_socket(s): ''' Internal method used by socket emulation layer to create a new "upward" queue for an app-layer socket and to register the socket object. Returns two queues: "downward" (fromapp) and "upward" (toapp). ''' queue_to_app = Queue() with _lock: ApplicationLayer._to_app[s._sockid()] = queue_to_app return ApplicationLayer._from_app, queue_to_app
Internal method used by socket emulation layer to create a new "upward" queue for an app-layer socket and to register the socket object. Returns two queues: "downward" (fromapp) and "upward" (toapp).
def _registry_update(s, oldid): ''' Internal method used to update an existing socket registry when the socket is re-bound to a different local port number. Requires the socket object and old sockid. Returns None. ''' with _lock: sock_queue = ApplicationLayer._to_app.pop(oldid) ApplicationLayer._to_app[s._sockid()] = sock_queuf _registry_update(s, oldid): ''' Internal method used to update an existing socket registry when the socket is re-bound to a different local port number. Requires the socket object and old sockid. Returns None. ''' with _lock: sock_queue = ApplicationLayer._to_app.pop(oldid) ApplicationLayer._to_app[s._sockid()] = sock_queue
Internal method used to update an existing socket registry when the socket is re-bound to a different local port number. Requires the socket object and old sockid. Returns None.
def _unregister_socket(s): ''' Internal method used to remove the socket from AppLayer registry. Warns if the "upward" socket queue has any left-over data. ''' with _lock: sock_queue = ApplicationLayer._to_app.pop(s._sockid()) if not sock_queue.empty(): log_warn("Socket being destroyed still has data enqueued for application layer."f _unregister_socket(s): ''' Internal method used to remove the socket from AppLayer registry. Warns if the "upward" socket queue has any left-over data. ''' with _lock: sock_queue = ApplicationLayer._to_app.pop(s._sockid()) if not sock_queue.empty(): log_warn("Socket being destroyed still has data enqueued for application layer.")
Internal method used to remove the socket from AppLayer registry. Warns if the "upward" socket queue has any left-over data.
def bind(self, address): ''' Alter the local address with which this socket is associated. The address parameter is a 2-tuple consisting of an IP address and port number. NB: this method fails and returns -1 if the requested port to bind to is already in use but does *not* check that the address is valid. ''' portset = _gather_ports().union(ApplicationLayer._emuports()) if address[1] in portset: log_warn("Port is already in use.") return -1 oldid = self._sockid() # block firewall port # set stack to only allow packets through for addr/port self._local_addr = _normalize_addrs(address) # update firewall and pcap filters self.__set_fw_rules() ApplicationLayer._registry_update(self, oldid) return f bind(self, address): ''' Alter the local address with which this socket is associated. The address parameter is a 2-tuple consisting of an IP address and port number. NB: this method fails and returns -1 if the requested port to bind to is already in use but does *not* check that the address is valid. ''' portset = _gather_ports().union(ApplicationLayer._emuports()) if address[1] in portset: log_warn("Port is already in use.") return -1 oldid = self._sockid() # block firewall port # set stack to only allow packets through for addr/port self._local_addr = _normalize_addrs(address) # update firewall and pcap filters self.__set_fw_rules() ApplicationLayer._registry_update(self, oldid) return 0
Alter the local address with which this socket is associated. The address parameter is a 2-tuple consisting of an IP address and port number. NB: this method fails and returns -1 if the requested port to bind to is already in use but does *not* check that the address is valid.
def recv(self, buffersize, flags=0): ''' Receive data on the socket. The buffersize and flags arguments are currently ignored. Only returns the data. ''' _,_,data = self._recv(buffersize) return datf recv(self, buffersize, flags=0): ''' Receive data on the socket. The buffersize and flags arguments are currently ignored. Only returns the data. ''' _,_,data = self._recv(buffersize) return data
Receive data on the socket. The buffersize and flags arguments are currently ignored. Only returns the data.
def recvfrom(self, buffersize, flags=0): ''' Receive data on the socket. The buffersize and flags arguments are currently ignored. Returns the data and an address tuple (IP address and port) of the remote host. ''' _,remoteaddr,data = self._recv(buffersize) return data,remoteaddf recvfrom(self, buffersize, flags=0): ''' Receive data on the socket. The buffersize and flags arguments are currently ignored. Returns the data and an address tuple (IP address and port) of the remote host. ''' _,remoteaddr,data = self._recv(buffersize) return data,remoteaddr
Receive data on the socket. The buffersize and flags arguments are currently ignored. Returns the data and an address tuple (IP address and port) of the remote host.
def send(self, data, flags=0): ''' Send data on the socket. A call to connect() must have been previously made for this call to succeed. Flags is currently ignored. ''' if self._remote_addr == (None,None): raise sockerr("ENOTCONN: socket not connected") return self._send(data, self._flowaddr()f send(self, data, flags=0): ''' Send data on the socket. A call to connect() must have been previously made for this call to succeed. Flags is currently ignored. ''' if self._remote_addr == (None,None): raise sockerr("ENOTCONN: socket not connected") return self._send(data, self._flowaddr())
Send data on the socket. A call to connect() must have been previously made for this call to succeed. Flags is currently ignored.
def sendto(self, data, *args): ''' Send data on the socket. Accepts the same parameters as the built-in socket sendto: data[, flags], address where address is a 2-tuple of IP address and port. Any flags are currently ignored. ''' remoteaddr = args[-1] remoteaddr = _normalize_addrs(remoteaddr) return self._send(data, (self._proto, self._local_addr[0], self._local_addr[1], remoteaddr[0], remoteaddr[1])f sendto(self, data, *args): ''' Send data on the socket. Accepts the same parameters as the built-in socket sendto: data[, flags], address where address is a 2-tuple of IP address and port. Any flags are currently ignored. ''' remoteaddr = args[-1] remoteaddr = _normalize_addrs(remoteaddr) return self._send(data, (self._proto, self._local_addr[0], self._local_addr[1], remoteaddr[0], remoteaddr[1]))
Send data on the socket. Accepts the same parameters as the built-in socket sendto: data[, flags], address where address is a 2-tuple of IP address and port. Any flags are currently ignored.
def settimeout(self, timeout): ''' Set the timeout value for this socket. ''' if timeout is None: self._block = True elif float(timeout) == 0.0: self._block = False else: self._timeout = float(timeout) self._block = Truf settimeout(self, timeout): ''' Set the timeout value for this socket. ''' if timeout is None: self._block = True elif float(timeout) == 0.0: self._block = False else: self._timeout = float(timeout) self._block = True
Set the timeout value for this socket.
def to_bytes(self): ''' Return packed byte representation of the ARP header. ''' return struct.pack(Arp._PACKFMT, self._hwtype.value, self._prototype.value, self._hwaddrlen, self._protoaddrlen, self._operation.value, self._senderhwaddr.packed, self._senderprotoaddr.packed, self._targethwaddr.packed, self._targetprotoaddr.packedf to_bytes(self): ''' Return packed byte representation of the ARP header. ''' return struct.pack(Arp._PACKFMT, self._hwtype.value, self._prototype.value, self._hwaddrlen, self._protoaddrlen, self._operation.value, self._senderhwaddr.packed, self._senderprotoaddr.packed, self._targethwaddr.packed, self._targetprotoaddr.packed)
Return packed byte representation of the ARP header.
def _process_table_cells(self, table): rows = [] for i, tr in enumerate(table.find_all('tr')): row = [] for c in tr.contents: cell_type = getattr(c, 'name', None) if cell_type not in ('td', 'th'): continue rowspan = int(c.attrs.get('rowspan', 1)) colspan = int(c.attrs.get('colspan', 1)) contents = self._process_children(c).strip() if cell_type == 'th' and i > 0: contents = self._inline('**', contents) row.append(Cell(cell_type, rowspan, colspan, contents)) rows.append(row) return rows
Compile all the table cells. Returns a list of rows. The rows may have different lengths because of column spans.
def block(self): ''' pfctl -a switchyard -f- < rules.txt pfctl -a switchyard -F rules pfctl -t switchyard -F r ''' st,output = _runcmd("/sbin/pfctl -aswitchyard -f -", self._rules) log_debug("Installing rules: {}".format(output)f block(self): ''' pfctl -a switchyard -f- < rules.txt pfctl -a switchyard -F rules pfctl -t switchyard -F r ''' st,output = _runcmd("/sbin/pfctl -aswitchyard -f -", self._rules) log_debug("Installing rules: {}".format(output))
pfctl -a switchyard -f- < rules.txt pfctl -a switchyard -F rules pfctl -t switchyard -F r
def show_graph(cn_topo, showintfs=False, showaddrs=False): ''' Display the topology ''' __do_draw(cn_topo, showintfs=showintfs, showaddrs=showaddrs) pyp.show(f show_graph(cn_topo, showintfs=False, showaddrs=False): ''' Display the topology ''' __do_draw(cn_topo, showintfs=showintfs, showaddrs=showaddrs) pyp.show()
Display the topology
def save_graph(cn_topo, filename, showintfs=False, showaddrs=False): ''' Save the topology to an image file ''' __do_draw(cn_topo, showintfs=showintfs, showaddrs=showaddrs) pyp.savefig(filenamef save_graph(cn_topo, filename, showintfs=False, showaddrs=False): ''' Save the topology to an image file ''' __do_draw(cn_topo, showintfs=showintfs, showaddrs=showaddrs) pyp.savefig(filename)
Save the topology to an image file
def load_from_file(filename): ''' Load a topology from filename and return it. ''' t = None with open(filename, 'rU') as infile: tdata = infile.read() t = Topology.unserialize(tdata) return f load_from_file(filename): ''' Load a topology from filename and return it. ''' t = None with open(filename, 'rU') as infile: tdata = infile.read() t = Topology.unserialize(tdata) return t
Load a topology from filename and return it.
def save_to_file(cn_topo, filename): ''' Save a topology to a file. ''' jstr = cn_topo.serialize() with open(filename, 'w') as outfile: outfile.write(jstrf save_to_file(cn_topo, filename): ''' Save a topology to a file. ''' jstr = cn_topo.serialize() with open(filename, 'w') as outfile: outfile.write(jstr)
Save a topology to a file.
def __addNode(self, name, cls): ''' Add a node to the topology ''' if name in self.nodes: raise Exception("A node by the name {} already exists. Can't add a duplicate.".format(name)) self.__nxgraph.add_node(name) self.__nxgraph.node[name]['label'] = name self.__nxgraph.node[name]['nodeobj'] = cls() self.__nxgraph.node[name]['type'] = cls.__name_f __addNode(self, name, cls): ''' Add a node to the topology ''' if name in self.nodes: raise Exception("A node by the name {} already exists. Can't add a duplicate.".format(name)) self.__nxgraph.add_node(name) self.__nxgraph.node[name]['label'] = name self.__nxgraph.node[name]['nodeobj'] = cls() self.__nxgraph.node[name]['type'] = cls.__name__
Add a node to the topology
def addHost(self, name=None): ''' Add a new host node to the topology. ''' if name is None: while True: name = 'h' + str(self.__hnum) self.__hnum += 1 if name not in self.__nxgraph: break self.__addNode(name, Host) return namf addHost(self, name=None): ''' Add a new host node to the topology. ''' if name is None: while True: name = 'h' + str(self.__hnum) self.__hnum += 1 if name not in self.__nxgraph: break self.__addNode(name, Host) return name
Add a new host node to the topology.
def addSwitch(self, name=None): ''' Add a new switch to the topology. ''' if name is None: while True: name = 's' + str(self.__snum) self.__snum += 1 if name not in self.__nxgraph: break self.__addNode(name, Switch) return namf addSwitch(self, name=None): ''' Add a new switch to the topology. ''' if name is None: while True: name = 's' + str(self.__snum) self.__snum += 1 if name not in self.__nxgraph: break self.__addNode(name, Switch) return name
Add a new switch to the topology.
def addRouter(self, name=None): ''' Add a new switch to the topology. ''' if name is None: while True: name = 'r' + str(self.__rnum) self.__rnum += 1 if name not in self.__nxgraph: break self.__addNode(name, Router) return namf addRouter(self, name=None): ''' Add a new switch to the topology. ''' if name is None: while True: name = 'r' + str(self.__rnum) self.__rnum += 1 if name not in self.__nxgraph: break self.__addNode(name, Router) return name
Add a new switch to the topology.
def serialize(self): ''' Return a JSON string of the serialized topology ''' return json.dumps(json_graph.node_link_data(self.__nxgraph), cls=Encoderf serialize(self): ''' Return a JSON string of the serialized topology ''' return json.dumps(json_graph.node_link_data(self.__nxgraph), cls=Encoder)
Return a JSON string of the serialized topology
def unserialize(jsonstr): ''' Unserialize a JSON string representation of a topology ''' topod = json.loads(jsonstr) G = json_graph.node_link_graph(topod) for n,ndict in G.nodes(data=True): if 'nodeobj' not in ndict or 'type' not in ndict: raise Exception("Required type information is not present in serialized node {} :{}".format(n, ndict)) nobj = ndict['nodeobj'] cls = eval(ndict['type']) ndict['nodeobj'] = cls(**dict(nobj)) t = Topology(nxgraph=G) return f unserialize(jsonstr): ''' Unserialize a JSON string representation of a topology ''' topod = json.loads(jsonstr) G = json_graph.node_link_graph(topod) for n,ndict in G.nodes(data=True): if 'nodeobj' not in ndict or 'type' not in ndict: raise Exception("Required type information is not present in serialized node {} :{}".format(n, ndict)) nobj = ndict['nodeobj'] cls = eval(ndict['type']) ndict['nodeobj'] = cls(**dict(nobj)) t = Topology(nxgraph=G) return t
Unserialize a JSON string representation of a topology
def getLinkInterfaces(self, node1, node2): ''' Given two node names that identify a link, return the pair of interface names assigned at each endpoint (as a tuple in the same order as the nodes given). ''' linkdata = self.getLink(node1,node2) return linkdata[node1],linkdata[node1f getLinkInterfaces(self, node1, node2): ''' Given two node names that identify a link, return the pair of interface names assigned at each endpoint (as a tuple in the same order as the nodes given). ''' linkdata = self.getLink(node1,node2) return linkdata[node1],linkdata[node1]
Given two node names that identify a link, return the pair of interface names assigned at each endpoint (as a tuple in the same order as the nodes given).
def setInterfaceAddresses(self, node, interface, mac=None, ip=None, netmask=None): ''' Set any one of Ethernet (MAC) address, IP address or IP netmask for a given interface on a node. ''' if not self.hasNode(node): raise Exception("No such node {}".format(node)) nobj = self.getNode(node)['nodeobj'] if interface not in nobj: raise Exception("No such interface {}".format(interface)) intf = nobj.getInterface(interface) if mac: intf.ethaddr = mac if ip: intf.ipaddr = ip if netmask: intf.netmask = netmasf setInterfaceAddresses(self, node, interface, mac=None, ip=None, netmask=None): ''' Set any one of Ethernet (MAC) address, IP address or IP netmask for a given interface on a node. ''' if not self.hasNode(node): raise Exception("No such node {}".format(node)) nobj = self.getNode(node)['nodeobj'] if interface not in nobj: raise Exception("No such interface {}".format(interface)) intf = nobj.getInterface(interface) if mac: intf.ethaddr = mac if ip: intf.ipaddr = ip if netmask: intf.netmask = netmask
Set any one of Ethernet (MAC) address, IP address or IP netmask for a given interface on a node.
def getInterfaceAddresses(self, node, interface): ''' Return the Ethernet and IP+mask addresses assigned to a given interface on a node. ''' intf = self.getNode(node)['nodeobj'].getInterface(interface) return intf.ethaddr,intf.ipaddr,intf.netmasf getInterfaceAddresses(self, node, interface): ''' Return the Ethernet and IP+mask addresses assigned to a given interface on a node. ''' intf = self.getNode(node)['nodeobj'].getInterface(interface) return intf.ethaddr,intf.ipaddr,intf.netmask
Return the Ethernet and IP+mask addresses assigned to a given interface on a node.
def addNodeLabelPrefix(self, prefix=None, copy=False): ''' Rename all nodes in the network from x to prefix_x. If no prefix is given, use the name of the graph as the prefix. The purpose of this method is to make node names unique so that composing two graphs is well-defined. ''' nxgraph = Topology.__relabel_graph(self.__nxgraph, prefix) if copy: newtopo = copy.deepcopy(self) newtopo.nxgraph = nxgraph return newtopo else: # looks like it was done in place self.__nxgraph = nxgrapf addNodeLabelPrefix(self, prefix=None, copy=False): ''' Rename all nodes in the network from x to prefix_x. If no prefix is given, use the name of the graph as the prefix. The purpose of this method is to make node names unique so that composing two graphs is well-defined. ''' nxgraph = Topology.__relabel_graph(self.__nxgraph, prefix) if copy: newtopo = copy.deepcopy(self) newtopo.nxgraph = nxgraph return newtopo else: # looks like it was done in place self.__nxgraph = nxgraph
Rename all nodes in the network from x to prefix_x. If no prefix is given, use the name of the graph as the prefix. The purpose of this method is to make node names unique so that composing two graphs is well-defined.
def from_bytes(self, raw): '''Return a Null header object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.''' if len(raw) < 4: raise NotEnoughDataError("Not enough bytes ({}) to reconstruct a Null object".format(len(raw))) fields = struct.unpack('=I', raw[:4]) self._af = fields[0] return raw[4:f from_bytes(self, raw): '''Return a Null header object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.''' if len(raw) < 4: raise NotEnoughDataError("Not enough bytes ({}) to reconstruct a Null object".format(len(raw))) fields = struct.unpack('=I', raw[:4]) self._af = fields[0] return raw[4:]
Return a Null header object reconstructed from raw bytes, or an Exception if we can't resurrect the packet.