text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Return a list of non-primitive types used by this object.
<END_TASK>
<USER_TASK:>
Description:
def _get_type_list(self, props):
"""Return a list of non-primitive types used by this object.""" |
type_list = []
for k, v in list(props.items()):
t = self._get_property_type(v)
if t is not None:
type_list.append(t)
return sorted(type_list) |
<SYSTEM_TASK:>
Output common validator types based on usage.
<END_TASK>
<USER_TASK:>
Description:
def _output_validators(self):
"""Output common validator types based on usage.""" |
if self._walk_for_type('Boolean'):
print("from .validators import boolean")
if self._walk_for_type('Integer'):
print("from .validators import integer")
vlist = self.override.get_validator_list()
for override in vlist:
if override.startswith('common/'):
override = override.lstrip('common/')
filename = "validators"
else:
filename = "%s_validators" % self.filename
print("from .%s import %s" % (filename, override)) |
<SYSTEM_TASK:>
Build a tree of non-primitive typed dependency order.
<END_TASK>
<USER_TASK:>
Description:
def build_tree(self, name, props, resource_name=None):
"""Build a tree of non-primitive typed dependency order.""" |
n = Node(name, props, resource_name)
prop_type_list = self._get_type_list(props)
if not prop_type_list:
return n
prop_type_list = sorted(prop_type_list)
for prop_name in prop_type_list:
if prop_name == 'Tag':
continue
child = self.build_tree(prop_name, self.properties[prop_name])
if child is not None:
n.add_child(child)
return n |
<SYSTEM_TASK:>
Returns the list of all troposphere members we are able to
<END_TASK>
<USER_TASK:>
Description:
def inspect_members(self):
"""
Returns the list of all troposphere members we are able to
construct
""" |
if not self._inspect_members:
TemplateGenerator._inspect_members = \
self._import_all_troposphere_modules()
return self._inspect_members |
<SYSTEM_TASK:>
Attempts to return troposphere class that represents Type of
<END_TASK>
<USER_TASK:>
Description:
def _get_resource_type_cls(self, name, resource):
"""Attempts to return troposphere class that represents Type of
provided resource. Attempts to find the troposphere class who's
`resource_type` field is the same as the provided resources `Type`
field.
:param resource: Resource to find troposphere class for
:return: None: If no class found for provided resource
type: Type of provided resource
:raise ResourceTypeNotDefined:
Provided resource does not have a `Type` field
""" |
# If provided resource does not have `Type` field
if 'Type' not in resource:
raise ResourceTypeNotDefined(name)
# Attempt to find troposphere resource with:
# `resource_type` == resource['Type']
try:
return self.inspect_resources[resource['Type']]
except KeyError:
# is there a custom mapping?
for custom_member in self._custom_members:
if custom_member.resource_type == resource['Type']:
return custom_member
# If no resource with `resource_type` == resource['Type'] found
return None |
<SYSTEM_TASK:>
Converts any object to its troposphere equivalent, if applicable.
<END_TASK>
<USER_TASK:>
Description:
def _convert_definition(self, definition, ref=None, cls=None):
"""
Converts any object to its troposphere equivalent, if applicable.
This function will recurse into lists and mappings to create
additional objects as necessary.
:param {*} definition: Object to convert
:param str ref: Name of key in parent dict that the provided definition
is from, can be None
:param type cls: Troposphere class which represents provided definition
""" |
if isinstance(definition, Mapping):
if 'Type' in definition: # this is an AWS Resource
expected_type = None
if cls is not None:
expected_type = cls
else:
# if the user uses the custom way to name custom resources,
# we'll dynamically create a new subclass for this use and
# pass that instead of the typical CustomObject resource
try:
expected_type = self._generate_custom_type(
definition['Type'])
except TypeError:
# If definition['Type'] turns out not to be a custom
# type (aka doesn't start with "Custom::")
if ref is not None:
raise ResourceTypeNotFound(ref, definition['Type'])
else:
# Make sure expected_type is nothing (as
# it always should be)
assert not expected_type
if expected_type:
args = self._normalize_properties(definition)
return self._create_instance(expected_type, args, ref)
if len(definition) == 1: # This might be a function?
function_type = self._get_function_type(
definition.keys()[0])
if function_type:
return self._create_instance(
function_type, definition.values()[0])
# nothing special here - return as dict
d = {}
for k, v in definition.iteritems():
d[k] = self._convert_definition(v)
return d
elif (isinstance(definition, Sequence) and
not isinstance(definition, basestring)):
return [self._convert_definition(v) for v in definition]
# anything else is returned as-is
return definition |
<SYSTEM_TASK:>
Returns an instance of `cls` with `args` passed as arguments.
<END_TASK>
<USER_TASK:>
Description:
def _create_instance(self, cls, args, ref=None):
"""
Returns an instance of `cls` with `args` passed as arguments.
Recursively inspects `args` to create nested objects and functions as
necessary.
`cls` will only be considered only if it's an object we track
(i.e.: troposphere objects).
If `cls` has a `props` attribute, nested properties will be
instanciated as troposphere Property objects as necessary.
If `cls` is a list and contains a single troposphere type, the
returned value will be a list of instances of that type.
""" |
if isinstance(cls, Sequence):
if len(cls) == 1:
# a list of 1 type means we must provide a list of such objects
if (isinstance(args, basestring) or
not isinstance(args, Sequence)):
args = [args]
return [self._create_instance(cls[0], v) for v in args]
if isinstance(cls, Sequence)\
or cls not in self.inspect_members.union(self._custom_members):
# this object doesn't map to any known object. could be a string
# or int, or a Ref... or a list of types such as
# [basestring, FindInMap, Ref] or maybe a
# validator such as `integer` or `port_range`
return self._convert_definition(args)
elif issubclass(cls, AWSHelperFn):
# special handling for functions, we want to handle it before
# entering the other conditions.
try:
if issubclass(cls, Tags):
arg_dict = {}
for d in args:
arg_dict[d['Key']] = d['Value']
return cls(arg_dict)
if (isinstance(args, Sequence) and
not isinstance(args, basestring)):
return cls(*self._convert_definition(args))
if issubclass(cls, autoscaling.Metadata):
return self._generate_autoscaling_metadata(cls, args)
if issubclass(cls, Export):
return cls(args['Name'])
args = self._convert_definition(args)
if isinstance(args, Ref) and issubclass(cls, Ref):
# watch out for double-refs...
# this can happen if an object's .props has 'Ref'
# as the expected type (which is wrong and should be
# changed to basestring!)
return args
return cls(args)
except TypeError as ex:
if '__init__() takes exactly' not in ex.message:
raise
# special AWSHelperFn typically take lowercased parameters,
# but templates use uppercase. for this reason we cannot
# map to most of them, so we fallback with a generic one.
# this might not work for all types if they do extra
# processing in their init routine
return GenericHelperFn(args)
elif isinstance(args, Mapping):
# we try to build as many troposphere objects as we can by
# inspecting its type validation metadata
kwargs = {}
kwargs.update(args)
for prop_name in getattr(cls, 'props', []):
if prop_name not in kwargs:
continue # the user did not specify this value; skip it
expected_type = cls.props[prop_name][0]
if (isinstance(expected_type, Sequence) or
expected_type in self.inspect_members):
kwargs[prop_name] = self._create_instance(
expected_type, kwargs[prop_name], prop_name)
else:
kwargs[prop_name] = self._convert_definition(
kwargs[prop_name], prop_name)
args = self._convert_definition(kwargs)
if isinstance(args, Ref):
# use the returned ref instead of creating a new object
return args
if isinstance(args, AWSHelperFn):
return self._convert_definition(kwargs)
assert isinstance(args, Mapping)
return cls(title=ref, **args)
return cls(self._convert_definition(args)) |
<SYSTEM_TASK:>
Inspects the definition and returns a copy of it that is updated
<END_TASK>
<USER_TASK:>
Description:
def _normalize_properties(self, definition):
"""
Inspects the definition and returns a copy of it that is updated
with any special property such as Condition, UpdatePolicy and the
like.
""" |
args = definition.get('Properties', {}).copy()
if 'Condition' in definition:
args.update({'Condition': definition['Condition']})
if 'UpdatePolicy' in definition:
# there's only 1 kind of UpdatePolicy; use it
args.update({'UpdatePolicy': self._create_instance(
UpdatePolicy, definition['UpdatePolicy'])})
if 'CreationPolicy' in definition:
# there's only 1 kind of CreationPolicy; use it
args.update({'CreationPolicy': self._create_instance(
CreationPolicy, definition['CreationPolicy'])})
if 'DeletionPolicy' in definition:
# DeletionPolicity is very basic
args.update(
{'DeletionPolicy': self._convert_definition(
definition['DeletionPolicy'])})
if 'Metadata' in definition:
# there are various kind of metadata; pass it as-is
args.update(
{'Metadata': self._convert_definition(
definition['Metadata'])})
if 'DependsOn' in definition:
args.update(
{'DependsOn': self._convert_definition(
definition['DependsOn'])})
return args |
<SYSTEM_TASK:>
Provides special handling for the autoscaling.Metadata object
<END_TASK>
<USER_TASK:>
Description:
def _generate_autoscaling_metadata(self, cls, args):
""" Provides special handling for the autoscaling.Metadata object """ |
assert isinstance(args, Mapping)
init_config = self._create_instance(
cloudformation.InitConfig,
args['AWS::CloudFormation::Init']['config'])
init = self._create_instance(
cloudformation.Init, {'config': init_config})
auth = None
if 'AWS::CloudFormation::Authentication' in args:
auth_blocks = {}
for k in args['AWS::CloudFormation::Authentication']:
auth_blocks[k] = self._create_instance(
cloudformation.AuthenticationBlock,
args['AWS::CloudFormation::Authentication'][k],
k)
auth = self._create_instance(
cloudformation.Authentication, auth_blocks)
return cls(init, auth) |
<SYSTEM_TASK:>
Imports all troposphere modules and returns them
<END_TASK>
<USER_TASK:>
Description:
def _import_all_troposphere_modules(self):
""" Imports all troposphere modules and returns them """ |
dirname = os.path.join(os.path.dirname(__file__))
module_names = [
pkg_name
for importer, pkg_name, is_pkg in
pkgutil.walk_packages([dirname], prefix="troposphere.")
if not is_pkg and pkg_name not in self.EXCLUDE_MODULES]
module_names.append('troposphere')
modules = []
for name in module_names:
modules.append(importlib.import_module(name))
def members_predicate(m):
return inspect.isclass(m) and not inspect.isbuiltin(m)
members = []
for module in modules:
members.extend((m[1] for m in inspect.getmembers(
module, members_predicate)))
return set(members) |
<SYSTEM_TASK:>
Returns windows interfaces through GetAdaptersAddresses.
<END_TASK>
<USER_TASK:>
Description:
def get_windows_if_list(extended=False):
"""Returns windows interfaces through GetAdaptersAddresses.
params:
- extended: include anycast and multicast IPv6 (default False)""" |
# Should work on Windows XP+
def _get_mac(x):
size = x["physical_address_length"]
if size != 6:
return ""
data = bytearray(x["physical_address"])
return str2mac(bytes(data)[:size])
def _get_ips(x):
unicast = x['first_unicast_address']
anycast = x['first_anycast_address']
multicast = x['first_multicast_address']
def _resolve_ips(y):
if not isinstance(y, list):
return []
ips = []
for ip in y:
addr = ip['address']['address'].contents
if addr.si_family == socket.AF_INET6:
ip_key = "Ipv6"
si_key = "sin6_addr"
else:
ip_key = "Ipv4"
si_key = "sin_addr"
data = getattr(addr, ip_key)
data = getattr(data, si_key)
data = bytes(bytearray(data.byte))
# Build IP
if data:
ips.append(inet_ntop(addr.si_family, data))
return ips
ips = []
ips.extend(_resolve_ips(unicast))
if extended:
ips.extend(_resolve_ips(anycast))
ips.extend(_resolve_ips(multicast))
return ips
if six.PY2:
_str_decode = lambda x: x.encode('utf8', errors='ignore')
else:
_str_decode = plain_str
return [
{
"name": _str_decode(x["friendly_name"]),
"win_index": x["interface_index"],
"description": _str_decode(x["description"]),
"guid": _str_decode(x["adapter_name"]),
"mac": _get_mac(x),
"ipv4_metric": 0 if WINDOWS_XP else x["ipv4_metric"],
"ipv6_metric": 0 if WINDOWS_XP else x["ipv6_metric"],
"ips": _get_ips(x)
} for x in GetAdaptersAddresses()
] |
<SYSTEM_TASK:>
Returns all available IPs matching to interfaces, using the windows system.
<END_TASK>
<USER_TASK:>
Description:
def get_ips(v6=False):
"""Returns all available IPs matching to interfaces, using the windows system.
Should only be used as a WinPcapy fallback.""" |
res = {}
for iface in six.itervalues(IFACES):
ips = []
for ip in iface.ips:
if v6 and ":" in ip:
ips.append(ip)
elif not v6 and ":" not in ip:
ips.append(ip)
res[iface] = ips
return res |
<SYSTEM_TASK:>
Internal util to run pcap control command
<END_TASK>
<USER_TASK:>
Description:
def _pcap_service_control(action, askadmin=True):
"""Internal util to run pcap control command""" |
command = action + ' ' + pcap_service_name()
res, code = _exec_cmd(_encapsulate_admin(command) if askadmin else command)
if code != 0:
warning(res.decode("utf8", errors="ignore"))
return (code == 0) |
<SYSTEM_TASK:>
Get the device pcap name by device name or Scapy NetworkInterface
<END_TASK>
<USER_TASK:>
Description:
def pcapname(dev):
"""Get the device pcap name by device name or Scapy NetworkInterface
""" |
if isinstance(dev, NetworkInterface):
if dev.is_invalid():
return None
return dev.pcap_name
try:
return IFACES.dev_from_name(dev).pcap_name
except ValueError:
return IFACES.dev_from_pcapname(dev).pcap_name |
<SYSTEM_TASK:>
Retrieve Windows routes through a GetIpForwardTable call.
<END_TASK>
<USER_TASK:>
Description:
def _read_routes_c_v1():
"""Retrieve Windows routes through a GetIpForwardTable call.
This is compatible with XP but won't get IPv6 routes.""" |
def _extract_ip(obj):
return inet_ntop(socket.AF_INET, struct.pack("<I", obj))
routes = []
for route in GetIpForwardTable():
ifIndex = route['ForwardIfIndex']
dest = route['ForwardDest']
netmask = route['ForwardMask']
nexthop = _extract_ip(route['ForwardNextHop'])
metric = route['ForwardMetric1']
# Build route
try:
iface = dev_from_index(ifIndex)
if iface.ip == "0.0.0.0":
continue
except ValueError:
continue
ip = iface.ip
# RouteMetric + InterfaceMetric
metric = metric + iface.ipv4_metric
routes.append((dest, netmask, nexthop, iface, ip, metric))
return routes |
<SYSTEM_TASK:>
Returns all IPv6 addresses found on the computer
<END_TASK>
<USER_TASK:>
Description:
def in6_getifaddr():
"""
Returns all IPv6 addresses found on the computer
""" |
ifaddrs = []
ip6s = get_ips(v6=True)
for iface in ip6s:
ips = ip6s[iface]
for ip in ips:
scope = in6_getscope(ip)
ifaddrs.append((ip, scope, iface))
# Appends Npcap loopback if available
if conf.use_npcap and scapy.consts.LOOPBACK_INTERFACE:
ifaddrs.append(("::1", 0, scapy.consts.LOOPBACK_INTERFACE))
return ifaddrs |
<SYSTEM_TASK:>
Update info about a network interface according
<END_TASK>
<USER_TASK:>
Description:
def update(self, data):
"""Update info about a network interface according
to a given dictionary. Such data is provided by get_windows_if_list
""" |
self.data = data
self.name = data['name']
self.description = data['description']
self.win_index = data['win_index']
self.guid = data['guid']
self.mac = data['mac']
self.ipv4_metric = data['ipv4_metric']
self.ipv6_metric = data['ipv6_metric']
self.ips = data['ips']
if 'invalid' in data:
self.invalid = data['invalid']
# Other attributes are optional
self._update_pcapdata()
try:
# Npcap loopback interface
if conf.use_npcap:
pcap_name_loopback = _get_npcap_config("LoopbackAdapter")
if pcap_name_loopback: # May not be defined
guid = _pcapname_to_guid(pcap_name_loopback)
if self.guid == guid:
# https://nmap.org/npcap/guide/npcap-devguide.html
self.mac = "00:00:00:00:00:00"
self.ip = "127.0.0.1"
return
except KeyError:
pass
try:
self.ip = next(x for x in self.ips if ":" not in x)
except StopIteration:
pass
try:
# Windows native loopback interface
if not self.ip and self.name == scapy.consts.LOOPBACK_NAME:
self.ip = "127.0.0.1"
except (KeyError, AttributeError, NameError) as e:
print(e) |
<SYSTEM_TASK:>
Returns True if the interface is in monitor mode.
<END_TASK>
<USER_TASK:>
Description:
def ismonitor(self):
"""Returns True if the interface is in monitor mode.
Only available with Npcap.""" |
if self.cache_mode is not None:
return self.cache_mode
try:
res = (self.mode() == "monitor")
self.cache_mode = res
return res
except Scapy_Exception:
return False |
<SYSTEM_TASK:>
Return the first pcap device name for a given Windows
<END_TASK>
<USER_TASK:>
Description:
def dev_from_name(self, name):
"""Return the first pcap device name for a given Windows
device name.
""" |
try:
return next(iface for iface in six.itervalues(self)
if (iface.name == name or iface.description == name))
except (StopIteration, RuntimeError):
raise ValueError("Unknown network interface %r" % name) |
<SYSTEM_TASK:>
Returns the right class for a given NTP packet.
<END_TASK>
<USER_TASK:>
Description:
def _ntp_dispatcher(payload):
"""
Returns the right class for a given NTP packet.
""" |
# By default, calling NTP() will build a NTP packet as defined in RFC 5905
# (see the code of NTPHeader). Use NTPHeader for extension fields and MAC.
if payload is None:
return NTPHeader
else:
length = len(payload)
if length >= _NTP_PACKET_MIN_SIZE:
first_byte = orb(payload[0])
# Extract NTP mode
mode = first_byte & 7
return {6: NTPControl, 7: NTPPrivate}.get(mode, NTPHeader)
return conf.raw_layer |
<SYSTEM_TASK:>
Check that the payload is long enough to build a NTP packet.
<END_TASK>
<USER_TASK:>
Description:
def pre_dissect(self, s):
"""
Check that the payload is long enough to build a NTP packet.
""" |
length = len(s)
if length < _NTP_PACKET_MIN_SIZE:
err = " ({}".format(length) + " is < _NTP_PACKET_MIN_SIZE "
err += "({})).".format(_NTP_PACKET_MIN_SIZE)
raise _NTPInvalidDataException(err)
return s |
<SYSTEM_TASK:>
There is actually only one key, the CLIENT-READ-KEY or -WRITE-KEY.
<END_TASK>
<USER_TASK:>
Description:
def sslv2_derive_keys(self, key_material):
"""
There is actually only one key, the CLIENT-READ-KEY or -WRITE-KEY.
Note that skip_first is opposite from the one with SSLv3 derivation.
Also, if needed, the IV should be set elsewhere.
""" |
skip_first = True
if ((self.connection_end == "client" and self.row == "read") or
(self.connection_end == "server" and self.row == "write")):
skip_first = False
cipher_alg = self.ciphersuite.cipher_alg
start = 0
if skip_first:
start += cipher_alg.key_len
end = start + cipher_alg.key_len
cipher_secret = key_material[start:end]
self.cipher = cipher_alg(cipher_secret)
self.debug_repr("cipher_secret", cipher_secret) |
<SYSTEM_TASK:>
This is used mostly as a way to keep the cipher state and the seq_num.
<END_TASK>
<USER_TASK:>
Description:
def snapshot(self):
"""
This is used mostly as a way to keep the cipher state and the seq_num.
""" |
snap = connState(connection_end=self.connection_end,
read_or_write=self.row,
seq_num=self.seq_num,
compression_alg=type(self.compression),
ciphersuite=type(self.ciphersuite),
tls_version=self.tls_version)
snap.cipher = self.cipher.snapshot()
if self.hmac:
snap.hmac.key = self.hmac.key
return snap |
<SYSTEM_TASK:>
Ciphers key and IV are updated accordingly for 0-RTT data.
<END_TASK>
<USER_TASK:>
Description:
def compute_tls13_early_secrets(self):
"""
Ciphers key and IV are updated accordingly for 0-RTT data.
self.handshake_messages should be ClientHello only.
""" |
# we use the prcs rather than the pwcs in a totally arbitrary way
if self.prcs is None:
# too soon
return
hkdf = self.prcs.hkdf
self.tls13_early_secret = hkdf.extract(None,
self.tls13_psk_secret)
bk = hkdf.derive_secret(self.tls13_early_secret,
b"external psk binder key",
# "resumption psk binder key",
b"")
self.tls13_derived_secrets["binder_key"] = bk
if len(self.handshake_messages) > 1:
# these secrets are not defined in case of HRR
return
cets = hkdf.derive_secret(self.tls13_early_secret,
b"client early traffic secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["client_early_traffic_secret"] = cets
ees = hkdf.derive_secret(self.tls13_early_secret,
b"early exporter master secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["early_exporter_secret"] = ees
if self.connection_end == "server":
self.prcs.tls13_derive_keys(cets)
elif self.connection_end == "client":
self.pwcs.tls13_derive_keys(cets) |
<SYSTEM_TASK:>
Ciphers key and IV are updated accordingly for Handshake data.
<END_TASK>
<USER_TASK:>
Description:
def compute_tls13_handshake_secrets(self):
"""
Ciphers key and IV are updated accordingly for Handshake data.
self.handshake_messages should be ClientHello...ServerHello.
""" |
if self.tls13_early_secret is None:
warning("No early secret. This is abnormal.")
hkdf = self.prcs.hkdf
self.tls13_handshake_secret = hkdf.extract(self.tls13_early_secret,
self.tls13_dhe_secret)
chts = hkdf.derive_secret(self.tls13_handshake_secret,
b"client handshake traffic secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["client_handshake_traffic_secret"] = chts
shts = hkdf.derive_secret(self.tls13_handshake_secret,
b"server handshake traffic secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["server_handshake_traffic_secret"] = shts
if self.connection_end == "server":
self.prcs.tls13_derive_keys(chts)
self.pwcs.tls13_derive_keys(shts)
elif self.connection_end == "client":
self.pwcs.tls13_derive_keys(chts)
self.prcs.tls13_derive_keys(shts) |
<SYSTEM_TASK:>
Ciphers key and IV are updated accordingly for Application data.
<END_TASK>
<USER_TASK:>
Description:
def compute_tls13_traffic_secrets(self):
"""
Ciphers key and IV are updated accordingly for Application data.
self.handshake_messages should be ClientHello...ServerFinished.
""" |
hkdf = self.prcs.hkdf
self.tls13_master_secret = hkdf.extract(self.tls13_handshake_secret,
None)
cts0 = hkdf.derive_secret(self.tls13_master_secret,
b"client application traffic secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["client_traffic_secrets"] = [cts0]
sts0 = hkdf.derive_secret(self.tls13_master_secret,
b"server application traffic secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["server_traffic_secrets"] = [sts0]
es = hkdf.derive_secret(self.tls13_master_secret,
b"exporter master secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["exporter_secret"] = es
if self.connection_end == "server":
# self.prcs.tls13_derive_keys(cts0)
self.pwcs.tls13_derive_keys(sts0)
elif self.connection_end == "client":
# self.pwcs.tls13_derive_keys(cts0)
self.prcs.tls13_derive_keys(sts0) |
<SYSTEM_TASK:>
self.handshake_messages should be ClientHello...ClientFinished.
<END_TASK>
<USER_TASK:>
Description:
def compute_tls13_resumption_secret(self):
"""
self.handshake_messages should be ClientHello...ClientFinished.
""" |
if self.connection_end == "server":
hkdf = self.prcs.hkdf
elif self.connection_end == "client":
hkdf = self.pwcs.hkdf
rs = hkdf.derive_secret(self.tls13_master_secret,
b"resumption master secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["resumption_secret"] = rs |
<SYSTEM_TASK:>
Ciphers key and IV are updated accordingly.
<END_TASK>
<USER_TASK:>
Description:
def compute_tls13_next_traffic_secrets(self):
"""
Ciphers key and IV are updated accordingly.
""" |
hkdf = self.prcs.hkdf
hl = hkdf.hash.digest_size
cts = self.tls13_derived_secrets["client_traffic_secrets"]
ctsN = cts[-1]
ctsN_1 = hkdf.expand_label(ctsN, "application traffic secret", "", hl)
cts.append(ctsN_1)
stsN_1 = hkdf.expand_label(ctsN, "application traffic secret", "", hl)
cts.append(stsN_1)
if self.connection_end == "server":
self.prcs.tls13_derive_keys(ctsN_1)
self.pwcs.tls13_derive_keys(stsN_1)
elif self.connection_end == "client":
self.pwcs.tls13_derive_keys(ctsN_1)
self.prcs.tls13_derive_keys(stsN_1) |
<SYSTEM_TASK:>
Guess the correct LLS class for a given payload
<END_TASK>
<USER_TASK:>
Description:
def _LLSGuessPayloadClass(p, **kargs):
""" Guess the correct LLS class for a given payload """ |
cls = conf.raw_layer
if len(p) >= 3:
typ = struct.unpack("!H", p[0:2])[0]
clsname = _OSPF_LLSclasses.get(typ, "LLS_Generic_TLV")
cls = globals()[clsname]
return cls(p, **kargs) |
<SYSTEM_TASK:>
Guess the correct OSPFv3 LSA class for a given payload
<END_TASK>
<USER_TASK:>
Description:
def _OSPFv3_LSAGuessPayloadClass(p, **kargs):
""" Guess the correct OSPFv3 LSA class for a given payload """ |
cls = conf.raw_layer
if len(p) >= 6:
typ = struct.unpack("!H", p[2:4])[0]
clsname = _OSPFv3_LSclasses.get(typ, "Raw")
cls = globals()[clsname]
return cls(p, **kargs) |
<SYSTEM_TASK:>
Return MAC address corresponding to a given IP address
<END_TASK>
<USER_TASK:>
Description:
def getmacbyip(ip, chainCC=0):
"""Return MAC address corresponding to a given IP address""" |
if isinstance(ip, Net):
ip = next(iter(ip))
ip = inet_ntoa(inet_aton(ip or "0.0.0.0"))
tmp = [orb(e) for e in inet_aton(ip)]
if (tmp[0] & 0xf0) == 0xe0: # mcast @
return "01:00:5e:%.2x:%.2x:%.2x" % (tmp[1] & 0x7f, tmp[2], tmp[3])
iff, _, gw = conf.route.route(ip)
if ((iff == consts.LOOPBACK_INTERFACE) or (ip == conf.route.get_if_bcast(iff))): # noqa: E501
return "ff:ff:ff:ff:ff:ff"
if gw != "0.0.0.0":
ip = gw
mac = conf.netcache.arp_cache.get(ip)
if mac:
return mac
try:
res = srp1(Ether(dst=ETHER_BROADCAST) / ARP(op="who-has", pdst=ip),
type=ETH_P_ARP,
iface=iff,
timeout=2,
verbose=0,
chainCC=chainCC,
nofilter=1)
except Exception:
return None
if res is not None:
mac = res.payload.hwsrc
conf.netcache.arp_cache[ip] = mac
return mac
return None |
<SYSTEM_TASK:>
Try to guess if target is in Promisc mode. The target is provided by its ip.
<END_TASK>
<USER_TASK:>
Description:
def is_promisc(ip, fake_bcast="ff:ff:00:00:00:00", **kargs):
"""Try to guess if target is in Promisc mode. The target is provided by its ip.""" | # noqa: E501
responses = srp1(Ether(dst=fake_bcast) / ARP(op="who-has", pdst=ip), type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0, **kargs) # noqa: E501
return responses is not None |
<SYSTEM_TASK:>
This function decompresses a string s, starting
<END_TASK>
<USER_TASK:>
Description:
def dns_get_str(s, pointer=0, pkt=None, _fullpacket=False):
"""This function decompresses a string s, starting
from the given pointer.
:param s: the string to decompress
:param pointer: first pointer on the string (default: 0)
:param pkt: (optional) an InheritOriginDNSStrPacket packet
:returns: (decoded_string, end_index, left_string)
""" |
# The _fullpacket parameter is reserved for scapy. It indicates
# that the string provided is the full dns packet, and thus
# will be the same than pkt._orig_str. The "Cannot decompress"
# error will not be prompted if True.
max_length = len(s)
# The result = the extracted name
name = b""
# Will contain the index after the pointer, to be returned
after_pointer = None
processed_pointers = [] # Used to check for decompression loops
# Analyse given pkt
if pkt and hasattr(pkt, "_orig_s") and pkt._orig_s:
s_full = pkt._orig_s
else:
s_full = None
bytes_left = None
while True:
if abs(pointer) >= max_length:
warning("DNS RR prematured end (ofs=%i, len=%i)" % (pointer,
len(s)))
break
cur = orb(s[pointer]) # get pointer value
pointer += 1 # make pointer go forward
if cur & 0xc0: # Label pointer
if after_pointer is None:
# after_pointer points to where the remaining bytes start,
# as pointer will follow the jump token
after_pointer = pointer + 1
if pointer >= max_length:
warning("DNS incomplete jump token at (ofs=%i)" % pointer)
break
# Follow the pointer
pointer = ((cur & ~0xc0) << 8) + orb(s[pointer]) - 12
if pointer in processed_pointers:
warning("DNS decompression loop detected")
break
if not _fullpacket:
# Do we have access to the whole packet ?
if s_full:
# Yes -> use it to continue
bytes_left = s[after_pointer:]
s = s_full
max_length = len(s)
_fullpacket = True
else:
# No -> abort
raise Scapy_Exception("DNS message can't be compressed" +
"at this point!")
processed_pointers.append(pointer)
continue
elif cur > 0: # Label
# cur = length of the string
name += s[pointer:pointer + cur] + b"."
pointer += cur
else:
break
if after_pointer is not None:
# Return the real end index (not the one we followed)
pointer = after_pointer
if bytes_left is None:
bytes_left = s[pointer:]
# name, end_index, remaining
return name, pointer, bytes_left |
<SYSTEM_TASK:>
Encodes a bytes string into the DNS format
<END_TASK>
<USER_TASK:>
Description:
def dns_encode(x, check_built=False):
"""Encodes a bytes string into the DNS format
:param x: the string
:param check_built: detect already-built strings and ignore them
:returns: the encoded bytes string
""" |
if not x or x == b".":
return b"\x00"
if check_built and b"." not in x and (
orb(x[-1]) == 0 or (orb(x[-2]) & 0xc0) == 0xc0
):
# The value has already been processed. Do not process it again
return x
# Truncate chunks that cannot be encoded (more than 63 bytes..)
x = b"".join(chb(len(y)) + y for y in (k[:63] for k in x.split(b".")))
if x[-1:] != b"\x00":
x += b"\x00"
return x |
<SYSTEM_TASK:>
This function compresses a DNS packet according to compression rules.
<END_TASK>
<USER_TASK:>
Description:
def dns_compress(pkt):
"""This function compresses a DNS packet according to compression rules.
""" |
if DNS not in pkt:
raise Scapy_Exception("Can only compress DNS layers")
pkt = pkt.copy()
dns_pkt = pkt.getlayer(DNS)
build_pkt = raw(dns_pkt)
def field_gen(dns_pkt):
"""Iterates through all DNS strings that can be compressed"""
for lay in [dns_pkt.qd, dns_pkt.an, dns_pkt.ns, dns_pkt.ar]:
if lay is None:
continue
current = lay
while not isinstance(current, NoPayload):
if isinstance(current, InheritOriginDNSStrPacket):
for field in current.fields_desc:
if isinstance(field, DNSStrField) or \
(isinstance(field, MultipleTypeField) and
current.type in [2, 5, 12]):
# Get the associated data and store it accordingly # noqa: E501
dat = current.getfieldval(field.name)
yield current, field.name, dat
current = current.payload
def possible_shortens(dat):
"""Iterates through all possible compression parts in a DNS string"""
yield dat
for x in range(1, dat.count(b".")):
yield dat.split(b".", x)[x]
data = {}
burned_data = 0
for current, name, dat in field_gen(dns_pkt):
for part in possible_shortens(dat):
# Encode the data
encoded = dns_encode(part, check_built=True)
if part not in data:
# We have no occurrence of such data, let's store it as a
# possible pointer for future strings.
# We get the index of the encoded data
index = build_pkt.index(encoded)
index -= burned_data
# The following is used to build correctly the pointer
fb_index = ((index >> 8) | 0xc0)
sb_index = index - (256 * (fb_index - 0xc0))
pointer = chb(fb_index) + chb(sb_index)
data[part] = [(current, name, pointer)]
else:
# This string already exists, let's mark the current field
# with it, so that it gets compressed
data[part].append((current, name))
# calculate spared space
burned_data += len(encoded) - 2
break
# Apply compression rules
for ck in data:
# compression_key is a DNS string
replacements = data[ck]
# replacements is the list of all tuples (layer, field name)
# where this string was found
replace_pointer = replacements.pop(0)[2]
# replace_pointer is the packed pointer that should replace
# those strings. Note that pop remove it from the list
for rep in replacements:
# setfieldval edits the value of the field in the layer
val = rep[0].getfieldval(rep[1])
assert val.endswith(ck)
kept_string = dns_encode(val[:-len(ck)], check_built=True)[:-1]
new_val = kept_string + replace_pointer
rep[0].setfieldval(rep[1], new_val)
try:
del(rep[0].rdlen)
except AttributeError:
pass
# End of the compression algorithm
# Destroy the previous DNS layer if needed
if not isinstance(pkt, DNS) and pkt.getlayer(DNS).underlayer:
pkt.getlayer(DNS).underlayer.remove_payload()
return pkt / dns_pkt
return dns_pkt |
<SYSTEM_TASK:>
Unpack the internal representation.
<END_TASK>
<USER_TASK:>
Description:
def _convert_seconds(self, packed_seconds):
"""Unpack the internal representation.""" |
seconds = struct.unpack("!H", packed_seconds[:2])[0]
seconds += struct.unpack("!I", packed_seconds[2:])[0]
return seconds |
<SYSTEM_TASK:>
Convert the number of seconds since 1-Jan-70 UTC to the packed
<END_TASK>
<USER_TASK:>
Description:
def h2i(self, pkt, seconds):
"""Convert the number of seconds since 1-Jan-70 UTC to the packed
representation.""" |
if seconds is None:
seconds = 0
tmp_short = (seconds >> 32) & 0xFFFF
tmp_int = seconds & 0xFFFFFFFF
return struct.pack("!HI", tmp_short, tmp_int) |
<SYSTEM_TASK:>
Convert the internal representation to a nice one using the RFC
<END_TASK>
<USER_TASK:>
Description:
def i2repr(self, pkt, packed_seconds):
"""Convert the internal representation to a nice one using the RFC
format.""" |
time_struct = time.gmtime(self._convert_seconds(packed_seconds))
return time.strftime("%a %b %d %H:%M:%S %Y", time_struct) |
<SYSTEM_TASK:>
Sends and receive an ICMPv6 Neighbor Solicitation message
<END_TASK>
<USER_TASK:>
Description:
def neighsol(addr, src, iface, timeout=1, chainCC=0):
"""Sends and receive an ICMPv6 Neighbor Solicitation message
This function sends an ICMPv6 Neighbor Solicitation message
to get the MAC address of the neighbor with specified IPv6 address address.
'src' address is used as source of the message. Message is sent on iface.
By default, timeout waiting for an answer is 1 second.
If no answer is gathered, None is returned. Else, the answer is
returned (ethernet frame).
""" |
nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr))
d = inet_ntop(socket.AF_INET6, nsma)
dm = in6_getnsmac(nsma)
p = Ether(dst=dm) / IPv6(dst=d, src=src, hlim=255)
p /= ICMPv6ND_NS(tgt=addr)
p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface))
res = srp1(p, type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0,
chainCC=chainCC)
return res |
<SYSTEM_TASK:>
Returns the MAC address corresponding to an IPv6 address
<END_TASK>
<USER_TASK:>
Description:
def getmacbyip6(ip6, chainCC=0):
"""Returns the MAC address corresponding to an IPv6 address
neighborCache.get() method is used on instantiated neighbor cache.
Resolution mechanism is described in associated doc string.
(chainCC parameter value ends up being passed to sending function
used to perform the resolution, if needed)
""" |
if isinstance(ip6, Net6):
ip6 = str(ip6)
if in6_ismaddr(ip6): # Multicast
mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6))
return mac
iff, a, nh = conf.route6.route(ip6)
if iff == scapy.consts.LOOPBACK_INTERFACE:
return "ff:ff:ff:ff:ff:ff"
if nh != '::':
ip6 = nh # Found next hop
mac = conf.netcache.in6_neighbor.get(ip6)
if mac:
return mac
res = neighsol(ip6, a, iff, chainCC=chainCC)
if res is not None:
if ICMPv6NDOptDstLLAddr in res:
mac = res[ICMPv6NDOptDstLLAddr].lladdr
else:
mac = res.src
conf.netcache.in6_neighbor[ip6] = mac
return mac
return None |
<SYSTEM_TASK:>
Internal generic helper accepting a specific callback as first argument,
<END_TASK>
<USER_TASK:>
Description:
def _NDP_Attack_DAD_DoS(reply_callback, iface=None, mac_src_filter=None,
tgt_filter=None, reply_mac=None):
"""
Internal generic helper accepting a specific callback as first argument,
for NS or NA reply. See the two specific functions below.
""" |
def is_request(req, mac_src_filter, tgt_filter):
"""
Check if packet req is a request
"""
# Those simple checks are based on Section 5.4.2 of RFC 4862
if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req):
return 0
# Get and compare the MAC address
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
# Source must be the unspecified address
if req[IPv6].src != "::":
return 0
# Check destination is the link-local solicited-node multicast
# address associated with target address in received NS
tgt = inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt)
if tgt_filter and tgt != tgt_filter:
return 0
received_snma = inet_pton(socket.AF_INET6, req[IPv6].dst)
expected_snma = in6_getnsma(tgt)
if received_snma != expected_snma:
return 0
return 1
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter),
prn=lambda x: reply_callback(x, reply_mac, iface),
iface=iface) |
<SYSTEM_TASK:>
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
<END_TASK>
<USER_TASK:>
Description:
def NDP_Attack_DAD_DoS_via_NS(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None):
"""
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages sent from the
unspecified address and sending a NS reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NS sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the unspecified address (::).
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface.
""" |
def ns_reply_callback(req, reply_mac, iface):
"""
Callback that reply to a NS by sending a similar NS
"""
# Let's build a reply and send it
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac) / IPv6(src="::", dst=dst) / ICMPv6ND_NS(tgt=tgt) # noqa: E501
sendp(rep, iface=iface, verbose=0)
print("Reply NS for target address %s (received from %s)" % (tgt, mac))
_NDP_Attack_DAD_DoS(ns_reply_callback, iface, mac_src_filter,
tgt_filter, reply_mac) |
<SYSTEM_TASK:>
Used to select the L2 address
<END_TASK>
<USER_TASK:>
Description:
def route(self):
"""Used to select the L2 address""" |
dst = self.dst
if isinstance(dst, Gen):
dst = next(iter(dst))
return conf.route6.route(dst) |
<SYSTEM_TASK:>
Compute the 'sources_number' field when needed
<END_TASK>
<USER_TASK:>
Description:
def post_build(self, packet, payload):
"""Compute the 'sources_number' field when needed""" |
if self.sources_number is None:
srcnum = struct.pack("!H", len(self.sources))
packet = packet[:26] + srcnum + packet[28:]
return _ICMPv6.post_build(self, packet, payload) |
<SYSTEM_TASK:>
Compute the 'records_number' field when needed
<END_TASK>
<USER_TASK:>
Description:
def post_build(self, packet, payload):
"""Compute the 'records_number' field when needed""" |
if self.records_number is None:
recnum = struct.pack("!H", len(self.records))
packet = packet[:6] + recnum + packet[8:]
return _ICMPv6.post_build(self, packet, payload) |
<SYSTEM_TASK:>
Add the endianness to the format
<END_TASK>
<USER_TASK:>
Description:
def set_endianess(self, pkt):
"""Add the endianness to the format""" |
end = self.endianess_from(pkt)
if isinstance(end, str) and end:
if isinstance(self.fld, UUIDField):
self.fld.uuid_fmt = (UUIDField.FORMAT_LE if end == '<'
else UUIDField.FORMAT_BE)
else:
# fld.fmt should always start with a order specifier, cf field
# init
self.fld.fmt = end[0] + self.fld.fmt[1:] |
<SYSTEM_TASK:>
add the field with endianness to the buffer
<END_TASK>
<USER_TASK:>
Description:
def addfield(self, pkt, buf, val):
"""add the field with endianness to the buffer""" |
self.set_endianess(pkt)
return self.fld.addfield(pkt, buf, val) |
<SYSTEM_TASK:>
dispatch_hook to choose among different registered payloads
<END_TASK>
<USER_TASK:>
Description:
def dispatch_hook(cls, _pkt, _underlayer=None, *args, **kargs):
"""dispatch_hook to choose among different registered payloads""" |
for klass in cls._payload_class:
if hasattr(klass, "can_handle") and \
klass.can_handle(_pkt, _underlayer):
return klass
print("DCE/RPC payload class not found or undefined (using Raw)")
return Raw |
<SYSTEM_TASK:>
_parse_multi_byte parses x as a multibyte representation to get the
<END_TASK>
<USER_TASK:>
Description:
def _parse_multi_byte(self, s):
# type: (str) -> int
""" _parse_multi_byte parses x as a multibyte representation to get the
int value of this AbstractUVarIntField.
@param str s: the multibyte string to parse.
@return int: The parsed int value represented by this AbstractUVarIntField. # noqa: E501
@raise: AssertionError
@raise: Scapy_Exception if the input value encodes an integer larger than 1<<64 # noqa: E501
""" |
assert(len(s) >= 2)
tmp_len = len(s)
value = 0
i = 1
byte = orb(s[i])
# For CPU sake, stops at an arbitrary large number!
max_value = 1 << 64
# As long as the MSG is set, an another byte must be read
while byte & 0x80:
value += (byte ^ 0x80) << (7 * (i - 1))
if value > max_value:
raise error.Scapy_Exception(
'out-of-bound value: the string encodes a value that is too large (>2^{64}): {}'.format(value) # noqa: E501
)
i += 1
assert i < tmp_len, 'EINVAL: x: out-of-bound read: the string ends before the AbstractUVarIntField!' # noqa: E501
byte = orb(s[i])
value += byte << (7 * (i - 1))
value += self._max_value
assert(value >= 0)
return value |
<SYSTEM_TASK:>
Computes the value of this field based on the provided packet and
<END_TASK>
<USER_TASK:>
Description:
def _compute_value(self, pkt):
# type: (packet.Packet) -> int
""" Computes the value of this field based on the provided packet and
the length_of field and the adjust callback
@param packet.Packet pkt: the packet from which is computed this field value. # noqa: E501
@return int: the computed value for this field.
@raise KeyError: the packet nor its payload do not contain an attribute
with the length_of name.
@raise AssertionError
@raise KeyError if _length_of is not one of pkt fields
""" |
fld, fval = pkt.getfield_and_val(self._length_of)
val = fld.i2len(pkt, fval)
ret = self._adjust(val)
assert(ret >= 0)
return ret |
<SYSTEM_TASK:>
huffman_encode_char assumes that the static_huffman_tree was
<END_TASK>
<USER_TASK:>
Description:
def _huffman_encode_char(cls, c):
# type: (Union[str, EOS]) -> Tuple[int, int]
""" huffman_encode_char assumes that the static_huffman_tree was
previously initialized
@param str|EOS c: a symbol to encode
@return (int, int): the bitstring of the symbol and its bitlength
@raise AssertionError
""" |
if isinstance(c, EOS):
return cls.static_huffman_code[-1]
else:
assert(isinstance(c, int) or len(c) == 1)
return cls.static_huffman_code[orb(c)] |
<SYSTEM_TASK:>
huffman_encode returns the bitstring and the bitlength of the
<END_TASK>
<USER_TASK:>
Description:
def huffman_encode(cls, s):
# type: (str) -> Tuple[int, int]
""" huffman_encode returns the bitstring and the bitlength of the
bitstring representing the string provided as a parameter
@param str s: the string to encode
@return (int, int): the bitstring of s and its bitlength
@raise AssertionError
""" |
i = 0
ibl = 0
for c in s:
val, bl = cls._huffman_encode_char(c)
i = (i << bl) + val
ibl += bl
padlen = 8 - (ibl % 8)
if padlen != 8:
val, bl = cls._huffman_encode_char(EOS())
i = (i << padlen) + (val >> (bl - padlen))
ibl += padlen
ret = i, ibl
assert(ret[0] >= 0)
assert (ret[1] >= 0)
return ret |
<SYSTEM_TASK:>
huffman_decode decodes the bitstring provided as parameters.
<END_TASK>
<USER_TASK:>
Description:
def huffman_decode(cls, i, ibl):
# type: (int, int) -> str
""" huffman_decode decodes the bitstring provided as parameters.
@param int i: the bitstring to decode
@param int ibl: the bitlength of i
@return str: the string decoded from the bitstring
@raise AssertionError, InvalidEncodingException
""" |
assert(i >= 0)
assert(ibl >= 0)
if isinstance(cls.static_huffman_tree, type(None)):
cls.huffman_compute_decode_tree()
assert(not isinstance(cls.static_huffman_tree, type(None)))
s = []
j = 0
interrupted = False
cur = cls.static_huffman_tree
cur_sym = 0
cur_sym_bl = 0
while j < ibl:
b = (i >> (ibl - j - 1)) & 1
cur_sym = (cur_sym << 1) + b
cur_sym_bl += 1
elmt = cur[b]
if isinstance(elmt, HuffmanNode):
interrupted = True
cur = elmt
if isinstance(cur, type(None)):
raise AssertionError()
elif isinstance(elmt, EOS):
raise InvalidEncodingException('Huffman decoder met the full EOS symbol') # noqa: E501
elif isinstance(elmt, bytes):
interrupted = False
s.append(elmt)
cur = cls.static_huffman_tree
cur_sym = 0
cur_sym_bl = 0
else:
raise InvalidEncodingException('Should never happen, so incidentally it will') # noqa: E501
j += 1
if interrupted:
# Interrupted values true if the bitstring ends in the middle of a
# symbol; this symbol must be, according to RFC7541 par5.2 the MSB
# of the EOS symbol
if cur_sym_bl > 7:
raise InvalidEncodingException('Huffman decoder is detecting padding longer than 7 bits') # noqa: E501
eos_symbol = cls.static_huffman_code[-1]
eos_msb = eos_symbol[0] >> (eos_symbol[1] - cur_sym_bl)
if eos_msb != cur_sym:
raise InvalidEncodingException('Huffman decoder is detecting unexpected padding format') # noqa: E501
return b''.join(s) |
<SYSTEM_TASK:>
self_build is overridden because type and len are determined at
<END_TASK>
<USER_TASK:>
Description:
def self_build(self, field_pos_list=None):
# type: (Any) -> str
"""self_build is overridden because type and len are determined at
build time, based on the "data" field internal type
""" |
if self.getfieldval('type') is None:
self.type = 1 if isinstance(self.getfieldval('data'), HPackZString) else 0 # noqa: E501
return super(HPackHdrString, self).self_build(field_pos_list) |
<SYSTEM_TASK:>
dispatch_hook returns the subclass of HPackHeaders that must be used
<END_TASK>
<USER_TASK:>
Description:
def dispatch_hook(cls, s=None, *_args, **_kwds):
# type: (Optional[str], *Any, **Any) -> base_classes.Packet_metaclass
"""dispatch_hook returns the subclass of HPackHeaders that must be used
to dissect the string.
""" |
if s is None:
return config.conf.raw_layer
fb = orb(s[0])
if fb & 0x80 != 0:
return HPackIndexedHdr
if fb & 0x40 != 0:
return HPackLitHdrFldWithIncrIndexing
if fb & 0x20 != 0:
return HPackDynamicSizeUpdate
return HPackLitHdrFldWithoutIndexing |
<SYSTEM_TASK:>
get_data_len computes the length of the data field
<END_TASK>
<USER_TASK:>
Description:
def get_data_len(self):
# type: () -> int
""" get_data_len computes the length of the data field
To do this computation, the length of the padlen field and the actual
padding is subtracted to the string that was provided to the pre_dissect # noqa: E501
fun of the pkt parameter
@return int; length of the data part of the HTTP/2 frame packet provided as parameter # noqa: E501
@raise AssertionError
""" |
padding_len = self.getfieldval('padlen')
fld, fval = self.getfield_and_val('padlen')
padding_len_len = fld.i2len(self, fval)
ret = self.s_len - padding_len_len - padding_len
assert(ret >= 0)
return ret |
<SYSTEM_TASK:>
_reduce_dynamic_table evicts entries from the dynamic table until it
<END_TASK>
<USER_TASK:>
Description:
def _reduce_dynamic_table(self, new_entry_size=0):
# type: (int) -> None
"""_reduce_dynamic_table evicts entries from the dynamic table until it
fits in less than the current size limit. The optional parameter,
new_entry_size, allows the resize to happen so that a new entry of this
size fits in.
@param int new_entry_size: if called before adding a new entry, the size of the new entry in bytes (following # noqa: E501
the RFC7541 definition of the size of an entry)
@raise AssertionError
""" |
assert(new_entry_size >= 0)
cur_sz = len(self)
dyn_tbl_sz = len(self._dynamic_table)
while dyn_tbl_sz > 0 and cur_sz + new_entry_size > self._dynamic_table_max_size: # noqa: E501
last_elmt_sz = len(self._dynamic_table[-1])
self._dynamic_table.pop()
dyn_tbl_sz -= 1
cur_sz -= last_elmt_sz |
<SYSTEM_TASK:>
register adds to this table the instances of
<END_TASK>
<USER_TASK:>
Description:
def register(self, hdrs):
# type: (Union[HPackLitHdrFldWithIncrIndexing, H2Frame, List[HPackHeaders]]) -> None # noqa: E501
"""register adds to this table the instances of
HPackLitHdrFldWithIncrIndexing provided as parameters.
A H2Frame with a H2HeadersFrame payload can be provided, as much as a
python list of HPackHeaders or a single HPackLitHdrFldWithIncrIndexing
instance.
@param HPackLitHdrFldWithIncrIndexing|H2Frame|list of HPackHeaders hdrs: the header(s) to register # noqa: E501
@raise AssertionError
""" |
if isinstance(hdrs, H2Frame):
hdrs = [hdr for hdr in hdrs.payload.hdrs if isinstance(hdr, HPackLitHdrFldWithIncrIndexing)] # noqa: E501
elif isinstance(hdrs, HPackLitHdrFldWithIncrIndexing):
hdrs = [hdrs]
else:
hdrs = [hdr for hdr in hdrs if isinstance(hdr, HPackLitHdrFldWithIncrIndexing)] # noqa: E501
for hdr in hdrs:
if hdr.index == 0:
hdr_name = hdr.hdr_name.getfieldval('data').origin()
else:
idx = int(hdr.index)
hdr_name = self[idx].name()
hdr_value = hdr.hdr_value.getfieldval('data').origin()
# Note: we do not delete any existing hdrentry with the same names
# and values, as dictated by RFC 7541 par2.3.2
entry = HPackHdrEntry(hdr_name, hdr_value)
# According to RFC7541 par4.4, "Before a new entry is added to
# the dynamic table, entries are evicted
# from the end of the dynamic table until the size of the dynamic
# table is less than or equal to (maximum size - new entry size)
# or until the table is empty"
# Also, "It is not an error to attempt to add an entry that is
# larger than the maximum size; an attempt to add an entry larger
# than the maximum size causes the table to be emptied of all
# existing entries and results in an empty table"
# For this reason, we first call the _reduce_dynamic_table and
# then throw an assertion error if the new entry does not fit in
new_entry_len = len(entry)
self._reduce_dynamic_table(new_entry_len)
assert(new_entry_len <= self._dynamic_table_max_size)
self._dynamic_table.insert(0, entry) |
<SYSTEM_TASK:>
get_idx_by_name returns the index of a matching registered header
<END_TASK>
<USER_TASK:>
Description:
def get_idx_by_name(self, name):
# type: (str) -> Optional[int]
""" get_idx_by_name returns the index of a matching registered header
This implementation will prefer returning a static entry index whenever
possible. If multiple matching header name are found in the static
table, there is insurance that the first entry (lowest index number)
will be returned.
If no matching header is found, this method returns None.
""" |
name = name.lower()
for key, val in six.iteritems(type(self)._static_entries):
if val.name() == name:
return key
for idx, val in enumerate(self._dynamic_table):
if val.name() == name:
return type(self)._static_entries_last_idx + idx + 1
return None |
<SYSTEM_TASK:>
gen_txt_repr returns a "textual" representation of the provided
<END_TASK>
<USER_TASK:>
Description:
def gen_txt_repr(self, hdrs, register=True):
# type: (Union[H2Frame, List[HPackHeaders]], Optional[bool]) -> str
""" gen_txt_repr returns a "textual" representation of the provided
headers.
The output of this function is compatible with the input of
parse_txt_hdrs.
@param H2Frame|list of HPackHeaders hdrs: the list of headers to convert to textual representation # noqa: E501
@param bool: whether incremental headers should be added to the dynamic table as we generate the text # noqa: E501
representation
@return str: the textual representation of the provided headers
@raise AssertionError
""" |
lst = []
if isinstance(hdrs, H2Frame):
hdrs = hdrs.payload.hdrs
for hdr in hdrs:
try:
if isinstance(hdr, HPackIndexedHdr):
lst.append('{}'.format(self[hdr.index]))
elif isinstance(hdr, (
HPackLitHdrFldWithIncrIndexing,
HPackLitHdrFldWithoutIndexing
)):
if hdr.index != 0:
name = self[hdr.index].name()
else:
name = hdr.hdr_name.getfieldval('data').origin()
if name.startswith(':'):
lst.append(
'{} {}'.format(
name,
hdr.hdr_value.getfieldval('data').origin()
)
)
else:
lst.append(
'{}: {}'.format(
name,
hdr.hdr_value.getfieldval('data').origin()
)
)
if register and isinstance(hdr, HPackLitHdrFldWithIncrIndexing): # noqa: E501
self.register(hdr)
except KeyError as e: # raised when an index is out-of-bound
print(e)
continue
return '\n'.join(lst) |
<SYSTEM_TASK:>
Craft an AVP based on its id and optional parameter fields
<END_TASK>
<USER_TASK:>
Description:
def AVP(avpId, **fields):
""" Craft an AVP based on its id and optional parameter fields""" |
val = None
classType = AVP_Unknown
if isinstance(avpId, str):
try:
for vnd in AvpDefDict:
for code in AvpDefDict[vnd]:
val = AvpDefDict[vnd][code]
if val[0][:len(
avpId)] == avpId: # A prefix of the full name is considered valid # noqa: E501
raise
found = False
except BaseException:
found = True
else:
if isinstance(avpId, list):
code = avpId[0]
vnd = avpId[1]
else: # Assume this is an int
code = avpId
vnd = 0
try:
val = AvpDefDict[vnd][code]
found = True
except BaseException:
found = False
if not found:
warning('The AVP identifier %s has not been found.' % str(avpId))
if isinstance(avpId, str): # The string input is not valid
return None
# At this point code, vnd are provisionned val may be set (if found is True) # noqa: E501
# Set/override AVP code
fields['avpCode'] = code
# Set vendor if not already defined and relevant
if 'avpVnd' not in fields and vnd:
fields['avpVnd'] = vnd
# Set flags if not already defined and possible ...
if 'avpFlags' not in fields:
if val:
fields['avpFlags'] = val[2]
else:
fields['avpFlags'] = vnd and 128 or 0
# Finally, set the name and class if possible
if val:
classType = val[1]
_ret = classType(**fields)
if val:
_ret.name = 'AVP ' + val[0]
return _ret |
<SYSTEM_TASK:>
Given a Packet instance `pkt` and the value `val` to be set,
<END_TASK>
<USER_TASK:>
Description:
def _find_fld_pkt_val(self, pkt, val):
"""Given a Packet instance `pkt` and the value `val` to be set,
returns the Field subclass to be used, and the updated `val` if necessary.
""" |
fld = self._iterate_fields_cond(pkt, val, True)
# Default ? (in this case, let's make sure it's up-do-date)
dflts_pkt = pkt.default_fields
if val == dflts_pkt[self.name] and self.name not in pkt.fields:
dflts_pkt[self.name] = fld.default
val = fld.default
return fld, val |
<SYSTEM_TASK:>
Returns the Field subclass to be used, depending on the Packet
<END_TASK>
<USER_TASK:>
Description:
def _find_fld(self):
"""Returns the Field subclass to be used, depending on the Packet
instance, or the default subclass.
DEV: since the Packet instance is not provided, we have to use a hack
to guess it. It should only be used if you cannot provide the current
Packet instance (for example, because of the current Scapy API).
If you have the current Packet instance, use ._find_fld_pkt_val() (if
the value to set is also known) of ._find_fld_pkt() instead.
""" |
# Hack to preserve current Scapy API
# See https://stackoverflow.com/a/7272464/3223422
frame = inspect.currentframe().f_back.f_back
while frame is not None:
try:
pkt = frame.f_locals['self']
except KeyError:
pass
else:
if isinstance(pkt, tuple(self.dflt.owners)):
return self._find_fld_pkt(pkt)
frame = frame.f_back
return self.dflt |
<SYSTEM_TASK:>
Checks .uuid_fmt, and raises an exception if it is not valid.
<END_TASK>
<USER_TASK:>
Description:
def _check_uuid_fmt(self):
"""Checks .uuid_fmt, and raises an exception if it is not valid.""" |
if self.uuid_fmt not in UUIDField.FORMATS:
raise FieldValueRangeException(
"Unsupported uuid_fmt ({})".format(self.uuid_fmt)) |
<SYSTEM_TASK:>
We need to parse the padding and type as soon as possible,
<END_TASK>
<USER_TASK:>
Description:
def pre_dissect(self, s):
"""
We need to parse the padding and type as soon as possible,
else we won't be able to parse the message list...
""" |
if len(s) < 1:
raise Exception("Invalid InnerPlaintext (too short).")
tmp_len = len(s) - 1
if s[-1] != b"\x00":
msg_len = tmp_len
else:
n = 1
while s[-n] != b"\x00" and n < tmp_len:
n += 1
msg_len = tmp_len - n
self.fields_desc[0].length_from = lambda pkt: msg_len
self.type = struct.unpack("B", s[msg_len:msg_len + 1])[0]
return s |
<SYSTEM_TASK:>
Decrypt, verify and decompress the message.
<END_TASK>
<USER_TASK:>
Description:
def pre_dissect(self, s):
"""
Decrypt, verify and decompress the message.
""" |
if len(s) < 5:
raise Exception("Invalid record: header is too short.")
if isinstance(self.tls_session.rcs.cipher, Cipher_NULL):
self.deciphered_len = None
return s
else:
msglen = struct.unpack('!H', s[3:5])[0]
hdr, efrag, r = s[:5], s[5:5 + msglen], s[msglen + 5:]
frag, auth_tag = self._tls_auth_decrypt(efrag)
self.deciphered_len = len(frag)
return hdr + frag + auth_tag + r |
<SYSTEM_TASK:>
Build a TKIP header for IV @iv and mac @mac, and encrypt @data
<END_TASK>
<USER_TASK:>
Description:
def build_TKIP_payload(data, iv, mac, tk):
"""Build a TKIP header for IV @iv and mac @mac, and encrypt @data
based on temporal key @tk
""" |
TSC5, TSC4, TSC3, TSC2, TSC1, TSC0 = (
(iv >> 40) & 0xFF,
(iv >> 32) & 0xFF,
(iv >> 24) & 0xFF,
(iv >> 16) & 0xFF,
(iv >> 8) & 0xFF,
iv & 0xFF
)
bitfield = 1 << 5 # Extended IV
TKIP_hdr = chb(TSC1) + chb((TSC1 | 0x20) & 0x7f) + chb(TSC0) + chb(bitfield) # noqa: E501
TKIP_hdr += chb(TSC2) + chb(TSC3) + chb(TSC4) + chb(TSC5)
TA = [orb(e) for e in mac2str(mac)]
TSC = [TSC0, TSC1, TSC2, TSC3, TSC4, TSC5]
TK = [orb(x) for x in tk]
rc4_key = gen_TKIP_RC4_key(TSC, TA, TK)
return TKIP_hdr + ARC4_encrypt(rc4_key, data) |
<SYSTEM_TASK:>
Compute and return the data with its MIC and ICV
<END_TASK>
<USER_TASK:>
Description:
def build_MIC_ICV(data, mic_key, source, dest):
"""Compute and return the data with its MIC and ICV""" |
# DATA - MIC(DA - SA - Priority=0 - 0 - 0 - 0 - DATA) - ICV
# 802.11i p.47
sa = mac2str(source) # Source MAC
da = mac2str(dest) # Dest MAC
MIC = michael(mic_key, da + sa + b"\x00" + b"\x00" * 3 + data)
ICV = pack("<I", crc32(data + MIC) & 0xFFFFFFFF)
return data + MIC + ICV |
<SYSTEM_TASK:>
Least Common Multiple between 2 integers.
<END_TASK>
<USER_TASK:>
Description:
def _lcm(a, b):
"""
Least Common Multiple between 2 integers.
""" |
if a == 0 or b == 0:
return 0
else:
return abs(a * b) // gcd(a, b) |
<SYSTEM_TASK:>
Encrypt an ESP packet
<END_TASK>
<USER_TASK:>
Description:
def encrypt(self, sa, esp, key):
"""
Encrypt an ESP packet
@param sa: the SecurityAssociation associated with the ESP packet.
@param esp: an unencrypted _ESPPlain packet with valid padding
@param key: the secret key used for encryption
@return: a valid ESP packet encrypted with this algorithm
""" |
data = esp.data_for_encryption()
if self.cipher:
mode_iv = self._format_mode_iv(algo=self, sa=sa, iv=esp.iv)
cipher = self.new_cipher(key, mode_iv)
encryptor = cipher.encryptor()
if self.is_aead:
aad = struct.pack('!LL', esp.spi, esp.seq)
encryptor.authenticate_additional_data(aad)
data = encryptor.update(data) + encryptor.finalize()
data += encryptor.tag[:self.icv_size]
else:
data = encryptor.update(data) + encryptor.finalize()
return ESP(spi=esp.spi, seq=esp.seq, data=esp.iv + data) |
<SYSTEM_TASK:>
Check that the key length is valid.
<END_TASK>
<USER_TASK:>
Description:
def check_key(self, key):
"""
Check that the key length is valid.
@param key: a byte string
""" |
if self.key_size and len(key) not in self.key_size:
raise TypeError('invalid key size %s, must be one of %s' %
(len(key), self.key_size)) |
<SYSTEM_TASK:>
Increment the explicit nonce while avoiding any overflow.
<END_TASK>
<USER_TASK:>
Description:
def _update_nonce_explicit(self):
"""
Increment the explicit nonce while avoiding any overflow.
""" |
ne = self.nonce_explicit + 1
self.nonce_explicit = ne % 2**(self.nonce_explicit_len * 8) |
<SYSTEM_TASK:>
Encrypt the data, and append the computed authentication code.
<END_TASK>
<USER_TASK:>
Description:
def auth_encrypt(self, P, A, seq_num):
"""
Encrypt the data, and append the computed authentication code.
TLS 1.3 does not use additional data, but we leave this option to the
user nonetheless.
Note that the cipher's authentication tag must be None when encrypting.
""" |
if False in six.itervalues(self.ready):
raise CipherError(P, A)
if hasattr(self, "pc_cls"):
self._cipher.mode._tag = None
self._cipher.mode._initialization_vector = self._get_nonce(seq_num)
encryptor = self._cipher.encryptor()
encryptor.authenticate_additional_data(A)
res = encryptor.update(P) + encryptor.finalize()
res += encryptor.tag
else:
if (conf.crypto_valid_advanced and
isinstance(self._cipher, AESCCM)):
res = self._cipher.encrypt(self._get_nonce(seq_num), P, A,
tag_length=self.tag_len)
else:
res = self._cipher.encrypt(self._get_nonce(seq_num), P, A)
return res |
<SYSTEM_TASK:>
Return the 3-tuple made of the Key Exchange Algorithm class, the Cipher
<END_TASK>
<USER_TASK:>
Description:
def get_algs_from_ciphersuite_name(ciphersuite_name):
"""
Return the 3-tuple made of the Key Exchange Algorithm class, the Cipher
class and the HMAC class, through the parsing of the ciphersuite name.
""" |
tls1_3 = False
if ciphersuite_name.startswith("TLS"):
s = ciphersuite_name[4:]
if s.endswith("CCM") or s.endswith("CCM_8"):
kx_name, s = s.split("_WITH_")
kx_alg = _tls_kx_algs.get(kx_name)
hash_alg = _tls_hash_algs.get("SHA256")
cipher_alg = _tls_cipher_algs.get(s)
hmac_alg = None
else:
if "WITH" in s:
kx_name, s = s.split("_WITH_")
kx_alg = _tls_kx_algs.get(kx_name)
else:
tls1_3 = True
kx_alg = _tls_kx_algs.get("TLS13")
hash_name = s.split('_')[-1]
hash_alg = _tls_hash_algs.get(hash_name)
cipher_name = s[:-(len(hash_name) + 1)]
if tls1_3:
cipher_name += "_TLS13"
cipher_alg = _tls_cipher_algs.get(cipher_name)
hmac_alg = None
if cipher_alg is not None and cipher_alg.type != "aead":
hmac_name = "HMAC-%s" % hash_name
hmac_alg = _tls_hmac_algs.get(hmac_name)
elif ciphersuite_name.startswith("SSL"):
s = ciphersuite_name[7:]
kx_alg = _tls_kx_algs.get("SSLv2")
cipher_name, hash_name = s.split("_WITH_")
cipher_alg = _tls_cipher_algs.get(cipher_name.rstrip("_EXPORT40"))
kx_alg.export = cipher_name.endswith("_EXPORT40")
hmac_alg = _tls_hmac_algs.get("HMAC-NULL")
hash_alg = _tls_hash_algs.get(hash_name)
return kx_alg, cipher_alg, hmac_alg, hash_alg, tls1_3 |
<SYSTEM_TASK:>
From a list of proposed ciphersuites, this function returns a list of
<END_TASK>
<USER_TASK:>
Description:
def get_usable_ciphersuites(l, kx):
"""
From a list of proposed ciphersuites, this function returns a list of
usable cipher suites, i.e. for which key exchange, cipher and hash
algorithms are known to be implemented and usable in current version of the
TLS extension. The order of the cipher suites in the list returned by the
function matches the one of the proposal.
""" |
res = []
for c in l:
if c in _tls_cipher_suites_cls:
ciph = _tls_cipher_suites_cls[c]
if ciph.usable:
# XXX select among RSA and ECDSA cipher suites
# according to the key(s) the server was given
if ciph.kx_alg.anonymous or kx in ciph.kx_alg.name:
res.append(c)
return res |
<SYSTEM_TASK:>
Identify IP id values classes in a list of packets
<END_TASK>
<USER_TASK:>
Description:
def IPID_count(lst, funcID=lambda x: x[1].id, funcpres=lambda x: x[1].summary()): # noqa: E501
"""Identify IP id values classes in a list of packets
lst: a list of packets
funcID: a function that returns IP id values
funcpres: a function used to summarize packets""" |
idlst = [funcID(e) for e in lst]
idlst.sort()
classes = [idlst[0]]
classes += [t[1] for t in zip(idlst[:-1], idlst[1:]) if abs(t[0] - t[1]) > 50] # noqa: E501
lst = [(funcID(x), funcpres(x)) for x in lst]
lst.sort()
print("Probably %i classes:" % len(classes), classes)
for id, pr in lst:
print("%5i" % id, pr) |
<SYSTEM_TASK:>
Returns ttl or hlim, depending on the IP version
<END_TASK>
<USER_TASK:>
Description:
def _ttl(self):
"""Returns ttl or hlim, depending on the IP version""" |
return self.hlim if isinstance(self, scapy.layers.inet6.IPv6) else self.ttl |
<SYSTEM_TASK:>
Called to explicitly fixup the packet according to the IGMP RFC
<END_TASK>
<USER_TASK:>
Description:
def igmpize(self):
"""Called to explicitly fixup the packet according to the IGMP RFC
The rules are:
General:
1. the Max Response time is meaningful only in Membership Queries and should be zero
IP:
1. Send General Group Query to 224.0.0.1 (all systems)
2. Send Leave Group to 224.0.0.2 (all routers)
3a.Otherwise send the packet to the group address
3b.Send reports/joins to the group address
4. ttl = 1 (RFC 2236, section 2)
5. send the packet with the router alert IP option (RFC 2236, section 2)
Ether:
1. Recalculate destination
Returns:
True The tuple ether/ip/self passed all check and represents
a proper IGMP packet.
False One of more validation checks failed and no fields
were adjusted.
The function will examine the IGMP message to assure proper format.
Corrections will be attempted if possible. The IP header is then properly
adjusted to ensure correct formatting and assignment. The Ethernet header
is then adjusted to the proper IGMP packet format.
""" |
gaddr = self.gaddr if hasattr(self, "gaddr") and self.gaddr else "0.0.0.0" # noqa: E501
underlayer = self.underlayer
if self.type not in [0x11, 0x30]: # General Rule 1 # noqa: E501
self.mrcode = 0
if isinstance(underlayer, IP):
if (self.type == 0x11):
if (gaddr == "0.0.0.0"):
underlayer.dst = "224.0.0.1" # IP rule 1 # noqa: E501
elif isValidMCAddr(gaddr):
underlayer.dst = gaddr # IP rule 3a # noqa: E501
else:
warning("Invalid IGMP Group Address detected !")
return False
elif ((self.type == 0x17) and isValidMCAddr(gaddr)):
underlayer.dst = "224.0.0.2" # IP rule 2 # noqa: E501
elif ((self.type == 0x12) or (self.type == 0x16)) and (isValidMCAddr(gaddr)): # noqa: E501
underlayer.dst = gaddr # IP rule 3b # noqa: E501
else:
warning("Invalid IGMP Type detected !")
return False
if not any(isinstance(x, IPOption_Router_Alert) for x in underlayer.options): # noqa: E501
underlayer.options.append(IPOption_Router_Alert())
underlayer.ttl = 1 # IP rule 4
_root = self.firstlayer()
if _root.haslayer(Ether):
# Force recalculate Ether dst
_root[Ether].dst = getmacbyip(underlayer.dst) # Ether rule 1 # noqa: E501
from scapy.contrib.igmpv3 import IGMPv3
if isinstance(self, IGMPv3):
self.encode_maxrespcode()
return True |
<SYSTEM_TASK:>
Returns the right class for a given BGP message.
<END_TASK>
<USER_TASK:>
Description:
def _bgp_dispatcher(payload):
"""
Returns the right class for a given BGP message.
""" |
cls = conf.raw_layer
# By default, calling BGP() will build a BGPHeader.
if payload is None:
cls = _get_cls("BGPHeader", conf.raw_layer)
else:
if len(payload) >= _BGP_HEADER_SIZE and\
payload[:16] == _BGP_HEADER_MARKER:
# Get BGP message type
message_type = orb(payload[18])
if message_type == 4:
cls = _get_cls("BGPKeepAlive")
else:
cls = _get_cls("BGPHeader")
return cls |
<SYSTEM_TASK:>
Returns the right class for a given BGP capability.
<END_TASK>
<USER_TASK:>
Description:
def _bgp_capability_dispatcher(payload):
"""
Returns the right class for a given BGP capability.
""" |
cls = _capabilities_registry["BGPCapGeneric"]
# By default, calling BGPCapability() will build a "generic" capability.
if payload is None:
cls = _capabilities_registry["BGPCapGeneric"]
else:
length = len(payload)
if length >= _BGP_CAPABILITY_MIN_SIZE:
code = orb(payload[0])
cls = _get_cls(_capabilities_objects.get(code, "BGPCapGeneric"))
return cls |
<SYSTEM_TASK:>
This will set the ByteField 'length' to the correct value.
<END_TASK>
<USER_TASK:>
Description:
def post_build(self, pkt, pay):
"""
This will set the ByteField 'length' to the correct value.
""" |
if self.length is None:
pkt = pkt[:4] + chb(len(pay)) + pkt[5:]
return pkt + pay |
<SYSTEM_TASK:>
ISOTP encodes the frame type in the first nibble of a frame.
<END_TASK>
<USER_TASK:>
Description:
def guess_payload_class(self, payload):
"""
ISOTP encodes the frame type in the first nibble of a frame.
""" |
t = (orb(payload[0]) & 0xf0) >> 4
if t == 0:
return ISOTP_SF
elif t == 1:
return ISOTP_FF
elif t == 2:
return ISOTP_CF
else:
return ISOTP_FC |
<SYSTEM_TASK:>
Attempt to feed an incoming CAN frame into the state machine
<END_TASK>
<USER_TASK:>
Description:
def feed(self, can):
"""Attempt to feed an incoming CAN frame into the state machine""" |
if not isinstance(can, CAN):
raise Scapy_Exception("argument is not a CAN frame")
identifier = can.identifier
data = bytes(can.data)
if len(data) > 1 and self.use_ext_addr is not True:
self._try_feed(identifier, None, data)
if len(data) > 2 and self.use_ext_addr is not False:
ea = six.indexbytes(data, 0)
self._try_feed(identifier, ea, data[1:]) |
<SYSTEM_TASK:>
Begin the transmission of message p. This method returns after
<END_TASK>
<USER_TASK:>
Description:
def begin_send(self, p):
"""Begin the transmission of message p. This method returns after
sending the first frame. If multiple frames are necessary to send the
message, this socket will unable to send other messages until either
the transmission of this frame succeeds or it fails.""" |
if hasattr(p, "sent_time"):
p.sent_time = time.time()
return self.outs.begin_send(bytes(p)) |
<SYSTEM_TASK:>
Receive a complete ISOTP message, blocking until a message is
<END_TASK>
<USER_TASK:>
Description:
def recv_with_timeout(self, timeout=1):
"""Receive a complete ISOTP message, blocking until a message is
received or the specified timeout is reached.
If timeout is 0, then this function doesn't block and returns the
first frame in the receive buffer or None if there isn't any.""" |
msg = self.ins.recv(timeout)
t = time.time()
if msg is None:
raise Scapy_Exception("Timeout")
return self.basecls, msg, t |
<SYSTEM_TASK:>
Receive a complete ISOTP message, blocking until a message is
<END_TASK>
<USER_TASK:>
Description:
def recv_raw(self, x=0xffff):
"""Receive a complete ISOTP message, blocking until a message is
received or the specified timeout is reached.
If self.timeout is 0, then this function doesn't block and returns the
first frame in the receive buffer or None if there isn't any.""" |
msg = self.ins.recv()
t = time.time()
return self.basecls, msg, t |
<SYSTEM_TASK:>
Call 'callback' in 'timeout' seconds, unless cancelled.
<END_TASK>
<USER_TASK:>
Description:
def set_timeout(self, timeout, callback):
"""Call 'callback' in 'timeout' seconds, unless cancelled.""" |
if not self._ready_sem.acquire(False):
raise Scapy_Exception("Timer was already started")
self._callback = callback
self._timeout = timeout
self._cancelled.clear()
self._busy_sem.release() |
<SYSTEM_TASK:>
Stop the timer without executing the callback.
<END_TASK>
<USER_TASK:>
Description:
def cancel(self):
"""Stop the timer without executing the callback.""" |
self._cancelled.set()
if not self._dead:
self._ready_sem.acquire()
self._ready_sem.release() |
<SYSTEM_TASK:>
Stop the thread, making this object unusable.
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
"""Stop the thread, making this object unusable.""" |
if not self._dead:
self._killed = True
self._cancelled.set()
self._busy_sem.release()
self.join()
if not self._ready_sem.acquire(False):
warning("ISOTP Timer thread may not have stopped "
"correctly") |
<SYSTEM_TASK:>
Method called every time the rx_timer times out, due to the peer not
<END_TASK>
<USER_TASK:>
Description:
def _rx_timer_handler(self):
"""Method called every time the rx_timer times out, due to the peer not
sending a consecutive frame within the expected time window""" |
with self.rx_mutex:
if self.rx_state == ISOTP_WAIT_DATA:
# we did not get new data frames in time.
# reset rx state
self.rx_state = ISOTP_IDLE
warning("RX state was reset due to timeout") |
<SYSTEM_TASK:>
Function that must be called every time a CAN frame is received, to
<END_TASK>
<USER_TASK:>
Description:
def on_recv(self, cf):
"""Function that must be called every time a CAN frame is received, to
advance the state machine.""" |
data = bytes(cf.data)
if len(data) < 2:
return
ae = 0
if self.extended_rx_addr is not None:
ae = 1
if len(data) < 3:
return
if six.indexbytes(data, 0) != self.extended_rx_addr:
return
n_pci = six.indexbytes(data, ae) & 0xf0
if n_pci == N_PCI_FC:
with self.tx_mutex:
self._recv_fc(data[ae:])
elif n_pci == N_PCI_SF:
with self.rx_mutex:
self._recv_sf(data[ae:])
elif n_pci == N_PCI_FF:
with self.rx_mutex:
self._recv_ff(data[ae:])
elif n_pci == N_PCI_CF:
with self.rx_mutex:
self._recv_cf(data[ae:]) |
<SYSTEM_TASK:>
Process a received 'Flow Control' frame
<END_TASK>
<USER_TASK:>
Description:
def _recv_fc(self, data):
"""Process a received 'Flow Control' frame""" |
if (self.tx_state != ISOTP_WAIT_FC and
self.tx_state != ISOTP_WAIT_FIRST_FC):
return 0
self.tx_timer.cancel()
if len(data) < 3:
self.tx_state = ISOTP_IDLE
self.tx_exception = "CF frame discarded because it was too short"
self.tx_done.set()
raise Scapy_Exception(self.tx_exception)
# get communication parameters only from the first FC frame
if self.tx_state == ISOTP_WAIT_FIRST_FC:
self.txfc_bs = six.indexbytes(data, 1)
self.txfc_stmin = six.indexbytes(data, 2)
if ((self.txfc_stmin > 0x7F) and
((self.txfc_stmin < 0xF1) or (self.txfc_stmin > 0xF9))):
self.txfc_stmin = 0x7F
if six.indexbytes(data, 2) <= 127:
tx_gap = six.indexbytes(data, 2) / 1000.0
elif 0xf1 <= six.indexbytes(data, 2) <= 0xf9:
tx_gap = (six.indexbytes(data, 2) & 0x0f) / 10000.0
else:
tx_gap = 0
self.tx_gap = tx_gap
self.tx_state = ISOTP_WAIT_FC
isotp_fc = six.indexbytes(data, 0) & 0x0f
if isotp_fc == ISOTP_FC_CTS:
self.tx_bs = 0
self.tx_state = ISOTP_SENDING
# start cyclic timer for sending CF frame
self.tx_timer.set_timeout(self.tx_gap, self._tx_timer_handler)
elif isotp_fc == ISOTP_FC_WT:
# start timer to wait for next FC frame
self.tx_state = ISOTP_WAIT_FC
self.tx_timer.set_timeout(self.fc_timeout, self._tx_timer_handler)
elif isotp_fc == ISOTP_FC_OVFLW:
# overflow in receiver side
self.tx_state = ISOTP_IDLE
self.tx_exception = "Overflow happened at the receiver side"
self.tx_done.set()
raise Scapy_Exception(self.tx_exception)
else:
self.tx_state = ISOTP_IDLE
self.tx_exception = "Unknown FC frame type"
self.tx_done.set()
raise Scapy_Exception(self.tx_exception)
return 0 |
<SYSTEM_TASK:>
Process a received 'Single Frame' frame
<END_TASK>
<USER_TASK:>
Description:
def _recv_sf(self, data):
"""Process a received 'Single Frame' frame""" |
self.rx_timer.cancel()
if self.rx_state != ISOTP_IDLE:
warning("RX state was reset because single frame was received")
self.rx_state = ISOTP_IDLE
length = six.indexbytes(data, 0) & 0xf
if len(data) - 1 < length:
return 1
msg = data[1:1 + length]
self.rx_queue.put(msg)
for cb in self.rx_callbacks:
cb(msg)
self.call_release()
return 0 |
<SYSTEM_TASK:>
Process a received 'Consecutive Frame' frame
<END_TASK>
<USER_TASK:>
Description:
def _recv_cf(self, data):
"""Process a received 'Consecutive Frame' frame""" |
if self.rx_state != ISOTP_WAIT_DATA:
return 0
self.rx_timer.cancel()
# CFs are never longer than the FF
if len(data) > self.rx_ll_dl:
return 1
# CFs have usually the LL_DL length
if len(data) < self.rx_ll_dl:
# this is only allowed for the last CF
if self.rx_len - self.rx_idx > self.rx_ll_dl:
warning("Received a CF with insuffifient length")
return 1
if six.indexbytes(data, 0) & 0x0f != self.rx_sn:
# Wrong sequence number
warning("RX state was reset because wrong sequence number was "
"received")
self.rx_state = ISOTP_IDLE
return 1
self.rx_sn = (self.rx_sn + 1) % 16
self.rx_buf += data[1:]
self.rx_idx = len(self.rx_buf)
if self.rx_idx >= self.rx_len:
# we are done
self.rx_buf = self.rx_buf[0:self.rx_len]
self.rx_state = ISOTP_IDLE
self.rx_queue.put(self.rx_buf)
for cb in self.rx_callbacks:
cb(self.rx_buf)
self.call_release()
self.rx_buf = None
return 0
# perform blocksize handling, if enabled
if self.rxfc_bs != 0:
self.rx_bs += 1
# check if we reached the end of the block
if self.rx_bs >= self.rxfc_bs and not self.listen_mode:
# send our FC frame
load = self.ea_hdr
load += struct.pack("BBB", N_PCI_FC, self.rxfc_bs,
self.rxfc_stmin)
self.can_send(load)
# wait for another CF
self.rx_timer.set_timeout(self.cf_timeout, self._rx_timer_handler)
return 0 |
<SYSTEM_TASK:>
Begins sending an ISOTP message. This method does not block.
<END_TASK>
<USER_TASK:>
Description:
def begin_send(self, x):
"""Begins sending an ISOTP message. This method does not block.""" |
with self.tx_mutex:
if self.tx_state != ISOTP_IDLE:
raise Scapy_Exception("Socket is already sending, retry later")
self.tx_done.clear()
self.tx_exception = None
self.tx_state = ISOTP_SENDING
length = len(x)
if length > ISOTP_MAX_DLEN_2015:
raise Scapy_Exception("Too much data for ISOTP message")
if len(self.ea_hdr) + length <= 7:
# send a single frame
data = self.ea_hdr
data += struct.pack("B", length)
data += x
self.tx_state = ISOTP_IDLE
self.can_send(data)
self.tx_done.set()
for cb in self.tx_callbacks:
cb()
return
# send the first frame
data = self.ea_hdr
if length > ISOTP_MAX_DLEN:
data += struct.pack(">HI", 0x1000, length)
else:
data += struct.pack(">H", 0x1000 | length)
load = x[0:8 - len(data)]
data += load
self.can_send(data)
self.tx_buf = x
self.tx_sn = 1
self.tx_bs = 0
self.tx_idx = len(load)
self.tx_state = ISOTP_WAIT_FIRST_FC
self.tx_timer.set_timeout(self.fc_timeout, self._tx_timer_handler) |
<SYSTEM_TASK:>
Send an ISOTP frame and block until the message is sent or an error
<END_TASK>
<USER_TASK:>
Description:
def send(self, p):
"""Send an ISOTP frame and block until the message is sent or an error
happens.""" |
with self.send_mutex:
self.begin_send(p)
# Wait until the tx callback is called
self.tx_done.wait()
if self.tx_exception is not None:
raise Scapy_Exception(self.tx_exception)
return |
<SYSTEM_TASK:>
Receive an ISOTP frame, blocking if none is available in the buffer
<END_TASK>
<USER_TASK:>
Description:
def recv(self, timeout=None):
"""Receive an ISOTP frame, blocking if none is available in the buffer
for at most 'timeout' seconds.""" |
try:
return self.rx_queue.get(timeout is None or timeout > 0, timeout)
except queue.Empty:
return None |
<SYSTEM_TASK:>
Returns the right parameter set class.
<END_TASK>
<USER_TASK:>
Description:
def dispatch_hook(cls, _pkt=None, *args, **kargs):
"""
Returns the right parameter set class.
""" |
cls = conf.raw_layer
if _pkt is not None:
ptype = orb(_pkt[0])
return globals().get(_param_set_cls.get(ptype), conf.raw_layer)
return cls |
<SYSTEM_TASK:>
dissect the IPv6 package compressed into this IPHC packet.
<END_TASK>
<USER_TASK:>
Description:
def post_dissect(self, data):
"""dissect the IPv6 package compressed into this IPHC packet.
The packet payload needs to be decompressed and depending on the
arguments, several conversions should be done.
""" |
# uncompress payload
packet = IPv6()
packet.version = IPHC_DEFAULT_VERSION
packet.tc, packet.fl = self._getTrafficClassAndFlowLabel()
if not self.nh:
packet.nh = self._nhField
# HLIM: Hop Limit
if self.hlim == 0:
packet.hlim = self._hopLimit
elif self.hlim == 0x1:
packet.hlim = 1
elif self.hlim == 0x2:
packet.hlim = 64
else:
packet.hlim = 255
# TODO: Payload length can be inferred from lower layers from either the # noqa: E501
# 6LoWPAN Fragmentation header or the IEEE802.15.4 header
packet.src = self.decompressSourceAddr(packet)
packet.dst = self.decompressDestinyAddr(packet)
if self.nh == 1:
# The Next Header field is compressed and the next header is
# encoded using LOWPAN_NHC
packet.nh = 0x11 # UDP
udp = UDP()
if self.header_compression and \
self.header_compression & 0x4 == 0x0:
udp.chksum = self.udpChecksum
s, d = nhc_port(self)
if s == 16:
udp.sport = self.udpSourcePort
elif s == 8:
udp.sport = 0xF000 + s
elif s == 4:
udp.sport = 0xF0B0 + s
if d == 16:
udp.dport = self.udpDestinyPort
elif d == 8:
udp.dport = 0xF000 + d
elif d == 4:
udp.dport = 0xF0B0 + d
packet.payload = udp / data
data = raw(packet)
# else self.nh == 0 not necessary
elif self._nhField & 0xE0 == 0xE0: # IPv6 Extension Header Decompression # noqa: E501
warning('Unimplemented: IPv6 Extension Header decompression') # noqa: E501
packet.payload = conf.raw_layer(data)
data = raw(packet)
else:
packet.payload = conf.raw_layer(data)
data = raw(packet)
return Packet.post_dissect(self, data) |
<SYSTEM_TASK:>
Depending on the payload content, the frame type we should interpretate
<END_TASK>
<USER_TASK:>
Description:
def dispatch_hook(cls, _pkt=b"", *args, **kargs):
"""Depending on the payload content, the frame type we should interpretate""" | # noqa: E501
if _pkt and len(_pkt) >= 1:
if orb(_pkt[0]) == 0x41:
return LoWPANUncompressedIPv6
if orb(_pkt[0]) == 0x42:
return LoWPAN_HC1
if orb(_pkt[0]) >> 3 == 0x18:
return LoWPANFragmentationFirst
elif orb(_pkt[0]) >> 3 == 0x1C:
return LoWPANFragmentationSubsequent
elif orb(_pkt[0]) >> 6 == 0x02:
return LoWPANMesh
elif orb(_pkt[0]) >> 6 == 0x01:
return LoWPAN_IPHC
return cls |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.