code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def nlmsg_for_each_attr(nlh, hdrlen, rem):
return nla_for_each_attr(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), rem) | Iterate over a stream of attributes in a message.
https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/msg.h#L123
Positional arguments:
nlh -- Netlink message header (nlmsghdr class instance).
hdrlen -- length of family header (integer).
rem -- initialized to len, holds bytes currently remaining in stream (c_int).
Returns:
Generator yielding nl_attr instances. |
def nlmsg_attrdata(nlh, hdrlen):
data = nlmsg_data(nlh)
return libnl.linux_private.netlink.nlattr(bytearray_ptr(data, libnl.linux_private.netlink.NLMSG_ALIGN(hdrlen))) | Head of attributes data.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L143
Positional arguments:
nlh -- Netlink message header (nlmsghdr class instance).
hdrlen -- length of family specific header (integer).
Returns:
First attribute (nlattr class instance with others in its payload). |
def nlmsg_attrlen(nlh, hdrlen):
return max(nlmsg_len(nlh) - libnl.linux_private.netlink.NLMSG_ALIGN(hdrlen), 0) | Length of attributes data.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L154
nlh -- Netlink message header (nlmsghdr class instance).
hdrlen -- length of family specific header (integer).
Returns:
Integer. |
def nlmsg_ok(nlh, remaining):
sizeof = libnl.linux_private.netlink.nlmsghdr.SIZEOF
return remaining.value >= sizeof and sizeof <= nlh.nlmsg_len <= remaining.value | Check if the Netlink message fits into the remaining bytes.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L179
Positional arguments:
nlh -- Netlink message header (nlmsghdr class instance).
remaining -- number of bytes remaining in message stream (c_int).
Returns:
Boolean. |
def nlmsg_next(nlh, remaining):
totlen = libnl.linux_private.netlink.NLMSG_ALIGN(nlh.nlmsg_len)
remaining.value -= totlen
return libnl.linux_private.netlink.nlmsghdr(bytearray_ptr(nlh.bytearray, totlen)) | Next Netlink message in message stream.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L194
Positional arguments:
nlh -- Netlink message header (nlmsghdr class instance).
remaining -- number of bytes remaining in message stream (c_int).
Returns:
The next Netlink message in the message stream and decrements remaining by the size of the current message. |
def nlmsg_parse(nlh, hdrlen, tb, maxtype, policy):
if not nlmsg_valid_hdr(nlh, hdrlen):
return -NLE_MSG_TOOSHORT
return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), policy) | Parse attributes of a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L213
Positional arguments:
nlh -- Netlink message header (nlmsghdr class instance).
hdrlen -- length of family specific header (integer).
tb -- dictionary of nlattr instances (length of maxtype+1).
maxtype -- maximum attribute type to be expected (integer).
policy -- validation policy (nla_policy class instance).
Returns:
0 on success or a negative error code. |
def nlmsg_find_attr(nlh, hdrlen, attrtype):
return nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), attrtype) | Find a specific attribute in a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L231
Positional arguments:
nlh -- Netlink message header (nlmsghdr class instance).
hdrlen -- length of family specific header (integer).
attrtype -- type of attribute to look for (integer).
Returns:
The first attribute which matches the specified type (nlattr class instance). |
def nlmsg_alloc(len_=default_msg_size):
len_ = max(libnl.linux_private.netlink.nlmsghdr.SIZEOF, len_)
nm = nl_msg()
nm.nm_refcnt = 1
nm.nm_nlh = libnl.linux_private.netlink.nlmsghdr(bytearray(b'\0') * len_)
nm.nm_protocol = -1
nm.nm_size = len_
nm.nm_nlh.nlmsg_len = nlmsg_total_size(0)
_LOGGER.debug('msg 0x%x: Allocated new message, maxlen=%d', id(nm), len_)
return nm | Allocate a new Netlink message with maximum payload size specified.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L299
Allocates a new Netlink message without any further payload. The maximum payload size defaults to
resource.getpagesize() or as otherwise specified with nlmsg_set_default_size().
Returns:
Newly allocated Netlink message (nl_msg class instance). |
def nlmsg_inherit(hdr=None):
nm = nlmsg_alloc()
if hdr:
new = nm.nm_nlh
new.nlmsg_type = hdr.nlmsg_type
new.nlmsg_flags = hdr.nlmsg_flags
new.nlmsg_seq = hdr.nlmsg_seq
new.nlmsg_pid = hdr.nlmsg_pid
return nm | Allocate a new Netlink message and inherit Netlink message header.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L322
Allocates a new Netlink message and inherits the original message header. If `hdr` is not None it will be used as a
template for the Netlink message header, otherwise the header is left blank.
Keyword arguments:
hdr -- Netlink message header template (nlmsghdr class instance).
Returns:
Newly allocated Netlink message (nl_msg class instance). |
def nlmsg_alloc_simple(nlmsgtype, flags):
nlh = libnl.linux_private.netlink.nlmsghdr(nlmsg_type=nlmsgtype, nlmsg_flags=flags)
msg = nlmsg_inherit(nlh)
_LOGGER.debug('msg 0x%x: Allocated new simple message', id(msg))
return msg | Allocate a new Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L346
Positional arguments:
nlmsgtype -- Netlink message type (integer).
flags -- message flags (integer).
Returns:
Newly allocated Netlink message (nl_msg class instance) or None. |
def nlmsg_convert(hdr):
nm = nlmsg_alloc(hdr.nlmsg_len)
if not nm:
return None
nm.nm_nlh.bytearray = hdr.bytearray.copy()[:hdr.nlmsg_len]
return nm | Convert a Netlink message received from a Netlink socket to an nl_msg.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L382
Allocates a new Netlink message and copies all of the data in `hdr` into the new message object.
Positional arguments:
hdr -- Netlink message received from netlink socket (nlmsghdr class instance).
Returns:
Newly allocated Netlink message (nl_msg class instance) or None. |
def nlmsg_reserve(n, len_, pad):
nlmsg_len_ = n.nm_nlh.nlmsg_len
tlen = len_ if not pad else ((len_ + (pad - 1)) & ~(pad - 1))
if tlen + nlmsg_len_ > n.nm_size:
return None
buf = bytearray_ptr(n.nm_nlh.bytearray, nlmsg_len_)
n.nm_nlh.nlmsg_len += tlen
if tlen > len_:
bytearray_ptr(buf, len_, tlen)[:] = bytearray(b'\0') * (tlen - len_)
_LOGGER.debug('msg 0x%x: Reserved %d (%d) bytes, pad=%d, nlmsg_len=%d', id(n), tlen, len_, pad, n.nm_nlh.nlmsg_len)
return buf | Reserve room for additional data in a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L407
Reserves room for additional data at the tail of the an existing netlink message. Eventual padding required will be
zeroed out.
bytearray_ptr() at the start of additional data or None. |
def nlmsg_append(n, data, len_, pad):
tmp = nlmsg_reserve(n, len_, pad)
if tmp is None:
return -NLE_NOMEM
tmp[:len_] = data.bytearray[:len_]
_LOGGER.debug('msg 0x%x: Appended %d bytes with padding %d', id(n), len_, pad)
return 0 | Append data to tail of a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L442
Extends the Netlink message as needed and appends the data of given length to the message.
Positional arguments:
n -- Netlink message (nl_msg class instance).
data -- data to add.
len_ -- length of data (integer).
pad -- number of bytes to align data to (integer).
Returns:
0 on success or a negative error code. |
def nlmsg_put(n, pid, seq, type_, payload, flags):
if n.nm_nlh.nlmsg_len < libnl.linux_private.netlink.NLMSG_HDRLEN:
raise BUG
nlh = n.nm_nlh
nlh.nlmsg_type = type_
nlh.nlmsg_flags = flags
nlh.nlmsg_pid = pid
nlh.nlmsg_seq = seq
_LOGGER.debug('msg 0x%x: Added netlink header type=%d, flags=%d, pid=%d, seq=%d', id(n), type_, flags, pid, seq)
if payload > 0 and nlmsg_reserve(n, payload, libnl.linux_private.netlink.NLMSG_ALIGNTO) is None:
return None
return nlh | Add a Netlink message header to a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L503
Adds or overwrites the Netlink message header in an existing message object.
Positional arguments:
n -- Netlink message (nl_msg class instance).
pid -- Netlink process id or NL_AUTO_PID (c_uint32).
seq -- sequence number of message or NL_AUTO_SEQ (c_uint32).
type_ -- message type (integer).
payload -- length of message payload (integer).
flags -- message flags (integer).
Returns:
nlmsghdr class instance or None. |
def nl_nlmsg_flags2str(flags, buf, _=None):
del buf[:]
all_flags = (
('REQUEST', libnl.linux_private.netlink.NLM_F_REQUEST),
('MULTI', libnl.linux_private.netlink.NLM_F_MULTI),
('ACK', libnl.linux_private.netlink.NLM_F_ACK),
('ECHO', libnl.linux_private.netlink.NLM_F_ECHO),
('ROOT', libnl.linux_private.netlink.NLM_F_ROOT),
('MATCH', libnl.linux_private.netlink.NLM_F_MATCH),
('ATOMIC', libnl.linux_private.netlink.NLM_F_ATOMIC),
('REPLACE', libnl.linux_private.netlink.NLM_F_REPLACE),
('EXCL', libnl.linux_private.netlink.NLM_F_EXCL),
('CREATE', libnl.linux_private.netlink.NLM_F_CREATE),
('APPEND', libnl.linux_private.netlink.NLM_F_APPEND),
)
print_flags = []
for k, v in all_flags:
if not flags & v:
continue
flags &= ~v
print_flags.append(k)
if flags:
print_flags.append('0x{0:x}'.format(flags))
buf.extend(','.join(print_flags).encode('ascii'))
return buf | Netlink Message Flags Translations.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L664
Positional arguments:
flags -- integer.
buf -- bytearray().
Keyword arguments:
_ -- unused.
Returns:
Reference to `buf`. |
def dump_hex(ofd, start, len_, prefix=0):
prefix_whitespaces = ' ' * prefix
limit = 16 - (prefix * 2)
start_ = start[:len_]
for line in (start_[i:i + limit] for i in range(0, len(start_), limit)): # stackoverflow.com/a/9475354/1198943
hex_lines, ascii_lines = list(), list()
for c in line:
hex_lines.append('{0:02x}'.format(c if hasattr(c, 'real') else ord(c)))
c2 = chr(c) if hasattr(c, 'real') else c
ascii_lines.append(c2 if c2 in string.printable[:95] else '.')
hex_line = ' '.join(hex_lines).ljust(limit * 3)
ascii_line = ''.join(ascii_lines)
ofd(' %s%s%s', prefix_whitespaces, hex_line, ascii_line) | Convert `start` to hex and logs it, 16 bytes per log statement.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L760
Positional arguments:
ofd -- function to call with arguments similar to `logging.debug`.
start -- bytearray() or bytearray_ptr() instance.
len_ -- size of `start` (integer).
Keyword arguments:
prefix -- additional number of whitespace pairs to prefix each log statement with. |
def print_hdr(ofd, msg):
nlh = nlmsg_hdr(msg)
buf = bytearray()
ofd(' .nlmsg_len = %d', nlh.nlmsg_len)
ops = nl_cache_ops_associate_safe(msg.nm_protocol, nlh.nlmsg_type)
if ops:
mt = nl_msgtype_lookup(ops, nlh.nlmsg_type)
if not mt:
raise BUG
buf.extend('{0}::{1}'.format(ops.co_name, mt.mt_name).encode('ascii'))
else:
nl_nlmsgtype2str(nlh.nlmsg_type, buf, 128)
ofd(' .type = %d <%s>', nlh.nlmsg_type, buf.decode('ascii'))
ofd(' .flags = %d <%s>', nlh.nlmsg_flags, nl_nlmsg_flags2str(nlh.nlmsg_flags, buf, 128).decode('ascii'))
ofd(' .seq = %d', nlh.nlmsg_seq)
ofd(' .port = %d', nlh.nlmsg_pid) | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L793.
Positional arguments:
ofd -- function to call with arguments similar to `logging.debug`.
msg -- message to print (nl_msg class instance). |
def print_genl_hdr(ofd, start):
ghdr = genlmsghdr(start)
ofd(' [GENERIC NETLINK HEADER] %d octets', GENL_HDRLEN)
ofd(' .cmd = %d', ghdr.cmd)
ofd(' .version = %d', ghdr.version)
ofd(' .unused = %#d', ghdr.reserved) | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L821.
Positional arguments:
ofd -- function to call with arguments similar to `logging.debug`.
start -- bytearray() or bytearray_ptr() instance. |
def print_genl_msg(_, ofd, hdr, ops, payloadlen):
data = nlmsg_data(hdr)
if payloadlen.value < GENL_HDRLEN:
return data
print_genl_hdr(ofd, data)
payloadlen.value -= GENL_HDRLEN
data = bytearray_ptr(data, GENL_HDRLEN)
if ops:
hdrsize = ops.co_hdrsize - GENL_HDRLEN
if hdrsize > 0:
if payloadlen.value < hdrsize:
return data
ofd(' [HEADER] %d octets', hdrsize)
dump_hex(ofd, data, hdrsize, 0)
payloadlen.value -= hdrsize
data = bytearray_ptr(data, hdrsize)
return data | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L831.
Positional arguments:
_ -- unused.
ofd -- function to call with arguments similar to `logging.debug`.
hdr -- Netlink message header (nlmsghdr class instance).
ops -- cache operations (nl_cache_ops class instance).
payloadlen -- length of payload in message (ctypes.c_int instance).
Returns:
data (bytearray_ptr). |
def dump_attr(ofd, attr, prefix=0):
dump_hex(ofd, nla_data(attr), nla_len(attr), prefix) | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L862.
Positional arguments:
ofd -- function to call with arguments similar to `logging.debug`.
attr -- nlattr class instance.
Keyword arguments:
prefix -- additional number of whitespace pairs to prefix each log statement with. |
def dump_attrs(ofd, attrs, attrlen, prefix=0):
prefix_whitespaces = ' ' * prefix
rem = c_int()
for nla in nla_for_each_attr(attrs, attrlen, rem):
alen = nla_len(nla)
if nla.nla_type == 0:
ofd('%s [ATTR PADDING] %d octets', prefix_whitespaces, alen)
else:
is_nested = ' NESTED' if nla_is_nested(nla) else ''
ofd('%s [ATTR %02d%s] %d octets', prefix_whitespaces, nla.nla_type, is_nested, alen)
if nla_is_nested(nla):
dump_attrs(ofd, nla_data(nla), alen, prefix + 1)
else:
dump_attr(ofd, nla, prefix)
padlen = nla_padlen(alen)
if padlen > 0:
ofd('%s [PADDING] %d octets', prefix_whitespaces, padlen)
dump_hex(ofd, bytearray_ptr(nla_data(nla), alen), padlen, prefix)
if rem.value:
ofd('%s [LEFTOVER] %d octets', prefix_whitespaces, rem) | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L869.
Positional arguments:
ofd -- function to call with arguments similar to `logging.debug`.
attrs -- nlattr class instance.
attrlen -- length of payload (integer).
Keyword arguments:
prefix -- additional number of whitespace pairs to prefix each log statement with. |
def dump_error_msg(msg, ofd=_LOGGER.debug):
hdr = nlmsg_hdr(msg)
err = libnl.linux_private.netlink.nlmsgerr(nlmsg_data(hdr))
ofd(' [ERRORMSG] %d octets', err.SIZEOF)
if nlmsg_len(hdr) >= err.SIZEOF:
ofd(' .error = %d "%s"', err.error, os.strerror(-err.error))
ofd(' [ORIGINAL MESSAGE] %d octets', hdr.SIZEOF)
errmsg = nlmsg_inherit(err.msg)
print_hdr(ofd, errmsg) | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L908.
Positional arguments:
msg -- message to print (nl_msg class instance).
Keyword arguments:
ofd -- function to call with arguments similar to `logging.debug`. |
def print_msg(msg, ofd, hdr):
payloadlen = c_int(nlmsg_len(hdr))
attrlen = 0
data = nlmsg_data(hdr)
ops = nl_cache_ops_associate_safe(msg.nm_protocol, hdr.nlmsg_type)
if ops:
attrlen = nlmsg_attrlen(hdr, ops.co_hdrsize)
payloadlen.value -= attrlen
if msg.nm_protocol == libnl.linux_private.netlink.NETLINK_GENERIC:
data = print_genl_msg(msg, ofd, hdr, ops, payloadlen)
if payloadlen.value:
ofd(' [PAYLOAD] %d octets', payloadlen.value)
dump_hex(ofd, data, payloadlen.value, 0)
if attrlen:
attrs = nlmsg_attrdata(hdr, ops.co_hdrsize)
attrlen = nlmsg_attrlen(hdr, ops.co_hdrsize)
dump_attrs(ofd, attrs, attrlen, 0) | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L929.
Positional arguments:
msg -- Netlink message (nl_msg class instance).
ofd -- function to call with arguments similar to `logging.debug`.
hdr -- Netlink message header (nlmsghdr class instance). |
def nl_msg_dump(msg, ofd=_LOGGER.debug):
hdr = nlmsg_hdr(msg)
ofd('-------------------------- BEGIN NETLINK MESSAGE ---------------------------')
ofd(' [NETLINK HEADER] %d octets', hdr.SIZEOF)
print_hdr(ofd, msg)
if hdr.nlmsg_type == libnl.linux_private.netlink.NLMSG_ERROR:
dump_error_msg(msg, ofd)
elif nlmsg_len(hdr) > 0:
print_msg(msg, ofd, hdr)
ofd('--------------------------- END NETLINK MESSAGE ---------------------------') | Dump message in human readable format to callable.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L970
Positional arguments:
msg -- message to print (nl_msg class instance).
Keyword arguments:
ofd -- function to call with arguments similar to `logging.debug`. |
def setChecked(src, ids=[], dpth = 0, key = ''):
#tabs = lambda n: ' ' * n * 4 # or 2 or 8 or...
#brace = lambda s, n: '%s%s%s' % ('['*n, s, ']'*n)
if isinstance(src, dict):
for key, value in src.iteritems():
setChecked(value, ids, dpth + 1, key)
elif isinstance(src, list):
for litem in src:
if isinstance(litem, types.DictType):
if "id" in litem and litem["id"] in ids:
litem["checked"] = True
litem["select"] = True
setChecked(litem, ids, dpth + 2) | Recursively find checked item. |
def setThirdStateChecked(src, ids=[], dpth = 0, key = ''):
#tabs = lambda n: ' ' * n * 4 # or 2 or 8 or...
#brace = lambda s, n: '%s%s%s' % ('['*n, s, ']'*n)
#print('third state nodes: ', third_state_nodes)
if isinstance(src, dict):
#print "DICT: ", src
if 'children' in src and type(src['children']) is list and len(src['children']) > 0:
children = src['children']
number_of_possible_checked = len(children)
number_of_checked = 0
for c in children:
if 'checked' in c and c['checked']:
number_of_checked += 1
if number_of_possible_checked == number_of_checked:
src['checked'] = True
elif number_of_checked > 0:
#print('>>>>> APPENDING: ', src['id'])
lineage = src['id'].split('.')
#fun(src, lineage)
src['cls'] = 'x-tree-checkbox-checked-disabled'
for key, value in src.iteritems():
setThirdStateChecked(value, ids, dpth + 1, key)
elif isinstance(src, list):
for litem in src:
if isinstance(litem, types.DictType):
#print litem["id"]
#print third_state_nodes
#if "id" in litem and litem["id"] in third_state_nodes:
# litem["cls"] = 'x-tree-checkbox-checked-disabled'
pass
setThirdStateChecked(litem, ids, dpth + 2) | Recursively find checked item. |
def nl_object_alloc(ops):
new = nl_object()
nl_init_list_head(new.ce_list)
new.ce_ops = ops
if ops.oo_constructor:
ops.oo_constructor(new)
_LOGGER.debug('Allocated new object 0x%x', id(new))
return new | Allocate a new object of kind specified by the operations handle.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/object.c#L54
Positional arguments:
ops -- cache operations handle (nl_object_ops class instance).
Returns:
New nl_object class instance or None. |
def lookup_cmd(ops, cmd_id):
for i in range(ops.o_ncmds):
cmd = ops.o_cmds[i]
if cmd.c_id == cmd_id:
return cmd
return None | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L33.
Positional arguments:
ops -- genl_ops class instance.
cmd_id -- integer.
Returns:
genl_cmd class instance or None. |
def cmd_msg_parser(who, nlh, ops, cache_ops, arg):
ghdr = genlmsg_hdr(nlh)
cmd = lookup_cmd(ops, ghdr.cmd)
if not cmd:
return -NLE_MSGTYPE_NOSUPPORT
if cmd.c_msg_parser is None:
return -NLE_OPNOTSUPP
tb = dict((i, None) for i in range(cmd.c_maxattr + 1))
info = genl_info(who=who, nlh=nlh, genlhdr=ghdr, userhdr=genlmsg_user_hdr(ghdr), attrs=tb)
err = nlmsg_parse(nlh, GENL_HDRSIZE(ops.o_hdrsize), tb, cmd.c_maxattr, cmd.c_attr_policy)
if err < 0:
return err
return cmd.c_msg_parser(cache_ops, cmd, info, arg) | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L47.
Positional arguments:
who -- sockaddr_nl class instance.
nlh -- nlmsghdr class instance.
ops -- genl_ops class instance.
cache_ops -- nl_cache_ops class instance.
arg -- to be passed along to .c_msg_parser().
Returns:
Integer |
def genl_msg_parser(ops, who, nlh, pp):
if ops.co_genl is None:
raise BUG
return int(cmd_msg_parser(who, nlh, ops.co_genl, ops, pp)) | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L85.
Positional arguments:
ops -- nl_cache_ops class instance.
who -- sockaddr_nl class instance.
nlh -- nlmsghdr class instance.
pp -- nl_parser_param class instance.
Returns:
Integer, cmd_msg_parser() output. |
def lookup_family(family):
for ops in nl_list_for_each_entry(genl_ops(), genl_ops_list, 'o_list'):
if ops.o_id == family:
return ops
return None | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L94.
Positional arguments:
family -- integer.
Returns:
genl_ops class instance or None. |
def lookup_family_by_name(name):
for ops in nl_list_for_each_entry(genl_ops(), genl_ops_list, 'o_list'):
if ops.o_name == name:
return ops
return None | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L106.
Positional arguments:
name -- string.
Returns:
genl_ops class instance or None. |
def genl_register_family(ops):
if not ops.o_name or (ops.o_cmds and ops.o_ncmds <= 0):
return -NLE_INVAL
if ops.o_id and lookup_family(ops.o_id):
return -NLE_EXIST
if lookup_family_by_name(ops.o_name):
return -NLE_EXIST
nl_list_add_tail(ops.o_list, genl_ops_list)
return 0 | Register Generic Netlink family and associated commands.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L164
Registers the specified Generic Netlink family definition together with all associated commands. After registration,
received Generic Netlink messages can be passed to genl_handle_msg() which will validate the messages, look for a
matching command and call the respective callback function automatically.
Positional arguments:
ops -- Generic Netlink family definition (genl_ops class instance).
Returns:
0 on success or a negative error code. |
def genl_register(ops):
if ops.co_protocol != NETLINK_GENERIC:
return -NLE_PROTO_MISMATCH
if ops.co_hdrsize < GENL_HDRSIZE(0):
return -NLE_INVAL
if ops.co_genl is None:
return -NLE_INVAL
ops.co_genl.o_cache_ops = ops
ops.co_genl.o_hdrsize = ops.co_hdrsize - GENL_HDRLEN
ops.co_genl.o_name = ops.co_msgtypes[0].mt_name
ops.co_genl.o_id = ops.co_msgtypes[0].mt_id
ops.co_msg_parser = genl_msg_parser
err = genl_register_family(ops.co_genl)
if err < 0:
return err
return nl_cache_mngt_register(ops) | Register Generic Netlink family backed cache.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L241
Same as genl_register_family() but additionally registers the specified cache operations using
nl_cache_mngt_register() and associates it with the Generic Netlink family.
Positional arguments:
ops -- cache operations definition (nl_cache_ops class instance).
Returns:
0 on success or a negative error code. |
def __setup_connection(self):
if self.payload != None and type(self.payload) is dict and 'settings' in self.payload:
config.plugin_client_settings = self.payload['settings']
config.offline = self.args.offline
config.connection = PluginConnection(
client=self.args.client or 'SUBLIME_TEXT_3',
ui=self.args.ui_switch,
args=self.args,
params=self.payload,
operation=self.operation,
verbose=self.args.verbose)
config.project = MavensMateProject(params=self.payload,ui=self.args.ui_switch)
config.sfdc_client = config.project.sfdc_client | each operation requested represents a session
the session holds information about the plugin running it
and establishes a project object |
def execute(self):
try:
self.__setup_connection()
#if the arg switch argument is included, the request is to launch the out of box
#MavensMate UI, so we generate the HTML for the UI and launch the process
#example: mm -o new_project --ui
if self.args.ui_switch == True:
config.logger.debug('UI operation requested, attempting to launch MavensMate UI')
tmp_html_file = util.generate_ui(self.operation,self.payload,self.args)
if config.connection.plugin_client == 'ATOM': #returning location of html file here so we can open the page inside an atom panel
self.__printr(util.generate_success_response(tmp_html_file))
else:
util.launch_ui(tmp_html_file)
self.__printr(util.generate_success_response('UI Generated Successfully'))
#non-ui command
else:
commands = get_available_commands()
#debug(commands)
try:
command_clazz = commands[self.operation](params=self.payload,args=self.args)
except KeyError:
raise MMUnsupportedOperationException('Could not find the operation you requested. Be sure the command is located in mm.commands, inherits from Command (found in basecommand.py) and includes an execute method.')
except NotImplementedError:
raise MMException("This command is not properly implemented. Be sure it contains an 'execute' method.")
self.__printr(command_clazz.execute())
except Exception, e:
self.__printr(e, is_exception=True) | Executes requested command |
def nl_syserr2nlerr(error_):
error_ = abs(error_)
legend = {
errno.EBADF: libnl.errno_.NLE_BAD_SOCK,
errno.EADDRINUSE: libnl.errno_.NLE_EXIST,
errno.EEXIST: libnl.errno_.NLE_EXIST,
errno.EADDRNOTAVAIL: libnl.errno_.NLE_NOADDR,
errno.ESRCH: libnl.errno_.NLE_OBJ_NOTFOUND,
errno.ENOENT: libnl.errno_.NLE_OBJ_NOTFOUND,
errno.EINTR: libnl.errno_.NLE_INTR,
errno.EAGAIN: libnl.errno_.NLE_AGAIN,
errno.ENOTSOCK: libnl.errno_.NLE_BAD_SOCK,
errno.ENOPROTOOPT: libnl.errno_.NLE_INVAL,
errno.EFAULT: libnl.errno_.NLE_INVAL,
errno.EACCES: libnl.errno_.NLE_NOACCESS,
errno.EINVAL: libnl.errno_.NLE_INVAL,
errno.ENOBUFS: libnl.errno_.NLE_NOMEM,
errno.ENOMEM: libnl.errno_.NLE_NOMEM,
errno.EAFNOSUPPORT: libnl.errno_.NLE_AF_NOSUPPORT,
errno.EPROTONOSUPPORT: libnl.errno_.NLE_PROTO_MISMATCH,
errno.EOPNOTSUPP: libnl.errno_.NLE_OPNOTSUPP,
errno.EPERM: libnl.errno_.NLE_PERM,
errno.EBUSY: libnl.errno_.NLE_BUSY,
errno.ERANGE: libnl.errno_.NLE_RANGE,
errno.ENODEV: libnl.errno_.NLE_NODEV,
}
return int(legend.get(error_, libnl.errno_.NLE_FAILURE)) | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/error.c#L84. |
def get_alert(self, alert):
if alert > self.alerts_count() or self.alerts_count() is None:
return None
else:
return self.get()[alert-1] | Recieves a day as an argument and returns the prediction for that alert
if is available. If not, function will return None. |
def get_forecast(self, latitude, longitude):
reply = self.http_get(self.url_builder(latitude, longitude))
self.forecast = json.loads(reply)
for item in self.forecast.keys():
setattr(self, item, self.forecast[item]) | Gets the weather data from darksky api and stores it in
the respective dictionaries if available.
This function should be used to fetch weather information. |
def get_forecast_fromstr(self, reply):
self.forecast = json.loads(reply)
for item in self.forecast.keys():
setattr(self, item, self.forecast[item]) | Gets the weather data from a darksky api response string
and stores it in the respective dictionaries if available.
This function should be used to fetch weather information. |
def url_builder(self, latitude, longitude):
try:
float(latitude)
float(longitude)
except TypeError:
raise TypeError('Latitude (%s) and Longitude (%s) must be a float number' % (latitude, longitude))
url = self._darksky_url + self.forecast_io_api_key + '/'
url += str(latitude).strip() + ',' + str(longitude).strip()
if self.time_url and not self.time_url.isspace():
url += ',' + self.time_url.strip()
url += '?units=' + self.units_url.strip()
url += '&lang=' + self.lang_url.strip()
if self.exclude_url is not None:
excludes = ''
if self.exclude_url in self._allowed_excludes_extends:
excludes += self.exclude_url + ','
else:
for item in self.exclude_url:
if item in self._allowed_excludes_extends:
excludes += item + ','
if len(excludes) > 0:
url += '&exclude=' + excludes.rstrip(',')
if self.extend_url is not None:
extends = ''
if self.extend_url in self._allowed_excludes_extends:
extends += self.extend_url + ','
else:
for item in self.extend_url:
if item in self._allowed_excludes_extends:
extends += item + ','
if len(extends) > 0:
url += '&extend=' + extends.rstrip(',')
return url | This function is used to build the correct url to make the request
to the forecast.io api.
Recieves the latitude and the longitude.
Return a string with the url. |
def http_get(self, request_url):
try:
headers = {'Accept-Encoding': 'gzip, deflate'}
response = requests.get(request_url, headers=headers)
except requests.exceptions.Timeout as ext:
log.error('Error: Timeout', ext)
except requests.exceptions.TooManyRedirects as extmr:
log.error('Error: TooManyRedirects', extmr)
except requests.exceptions.RequestException as ex:
log.error('Error: RequestException', ex)
sys.exit(1)
try:
self.cache_control = response.headers['Cache-Control']
except KeyError as kerr:
log.warning('Warning: Could not get headers. %s' % kerr)
self.cache_control = None
try:
self.expires = response.headers['Expires']
except KeyError as kerr:
log.warning('Warning: Could not get headers. %s' % kerr)
self.extend_url = None
try:
self.x_forecast_api_calls = response.headers['X-Forecast-API-Calls']
except KeyError as kerr:
log.warning('Warning: Could not get headers. %s' % kerr)
self.x_forecast_api_calls = None
try:
self.x_responde_time = response.headers['X-Response-Time']
except KeyError as kerr:
log.warning('Warning: Could not get headers. %s' % kerr)
self.x_responde_time = None
if response.status_code is not 200:
raise requests.exceptions.HTTPError('Bad response, status code: %x' % (response.status_code))
self.raw_response = response.text
return self.raw_response | This function recieves the request url and it is used internally to get
the information via http.
Returns the response content.
Raises Timeout, TooManyRedirects, RequestException.
Raises KeyError if headers are not present.
Raises HTTPError if responde code is not 200. |
def _deprecated_kwargs(kwargs, arg_newarg):
warn_for = []
for (arg, new_kw) in arg_newarg:
if arg in kwargs.keys():
val = kwargs.pop(arg)
kwargs[new_kw] = val
warn_for.append((arg, new_kw))
if len(warn_for) > 0:
if len(warn_for) == 1:
warnings.warn("Argument '{}' is deprecated. Use {} instead".
format(warn_for[0][0], warn_for[0][1]),
DeprecationWarning, stacklevel=4)
else:
args = ", ".join([x[0] for x in warn_for])
repl = ", ".join([x[1] for x in warn_for])
warnings.warn(
"Arguments '{}' are deprecated. Use '{}' instead respectively".
format(args, repl),
DeprecationWarning, stacklevel=4)
return kwargs | arg_newarg is a list of tuples, where each tuple has a pair of strings.
('old_arg', 'new_arg')
A DeprecationWarning is raised for the arguments that need to be
replaced. |
def _map_or_starmap(function, iterable, args, kwargs, map_or_starmap):
arg_newarg = (("parallel", "pm_parallel"), ("chunksize", "pm_chunksize"),
("pool", "pm_pool"), ("processes", "pm_processes"),
("parmap_progress", "pm_pbar"))
kwargs = _deprecated_kwargs(kwargs, arg_newarg)
chunksize = kwargs.pop("pm_chunksize", None)
progress = kwargs.pop("pm_pbar", False)
progress = progress and HAVE_TQDM
parallel, pool, close_pool = _create_pool(kwargs)
# Map:
if parallel:
func_star = _get_helper_func(map_or_starmap)
try:
if progress and close_pool:
try:
num_tasks = len(iterable)
# get a chunksize (as multiprocessing does):
chunksize = _get_default_chunksize(chunksize,
pool, num_tasks)
# use map_async to get progress information
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize)
finally:
pool.close()
# Progress bar:
try:
_do_pbar(result, num_tasks, chunksize)
finally:
output = result.get()
else:
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize)
output = result.get()
finally:
if close_pool:
if not progress:
pool.close()
pool.join()
else:
output = _serial_map_or_starmap(function, iterable, args, kwargs,
progress, map_or_starmap)
return output | Shared function between parmap.map and parmap.starmap.
Refer to those functions for details. |
def map(function, iterable, *args, **kwargs):
return _map_or_starmap(function, iterable, args, kwargs, "map") | This function is equivalent to:
>>> [function(x, args[0], args[1],...) for x in iterable]
:param pm_parallel: Force parallelization on/off
:type pm_parallel: bool
:param pm_chunksize: see :py:class:`multiprocessing.pool.Pool`
:type pm_chunksize: int
:param pm_pool: Pass an existing pool
:type pm_pool: multiprocessing.pool.Pool
:param pm_processes: Number of processes to use in the pool. See
:py:class:`multiprocessing.pool.Pool`
:type pm_processes: int
:param pm_pbar: Show progress bar
:type pm_pbar: bool |
def starmap(function, iterables, *args, **kwargs):
return _map_or_starmap(function, iterables, args, kwargs, "starmap") | Equivalent to:
>>> return ([function(x1,x2,x3,..., args[0], args[1],...) for
>>> (x1,x2,x3...) in iterable])
:param pm_parallel: Force parallelization on/off
:type pm_parallel: bool
:param pm_chunksize: see :py:class:`multiprocessing.pool.Pool`
:type pm_chunksize: int
:param pm_pool: Pass an existing pool
:type pm_pool: multiprocessing.pool.Pool
:param pm_processes: Number of processes to use in the pool. See
:py:class:`multiprocessing.pool.Pool`
:type pm_processes: int
:param pm_pbar: Show progress bar
:type pm_pbar: bool |
def _map_or_starmap_async(function, iterable, args, kwargs, map_or_starmap):
arg_newarg = (("parallel", "pm_parallel"), ("chunksize", "pm_chunksize"),
("pool", "pm_pool"), ("processes", "pm_processes"),
("callback", "pm_callback"),
("error_callback", "pm_error_callback"))
kwargs = _deprecated_kwargs(kwargs, arg_newarg)
chunksize = kwargs.pop("pm_chunksize", None)
callback = kwargs.pop("pm_callback", None)
error_callback = kwargs.pop("pm_error_callback", None)
parallel, pool, close_pool = _create_pool(kwargs)
# Map:
if parallel:
func_star = _get_helper_func(map_or_starmap)
try:
if sys.version_info[0] == 2: # does not support error_callback
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize, callback)
else:
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize, callback, error_callback)
finally:
if close_pool:
pool.close()
result = _ParallelAsyncResult(result, pool)
else:
result = _ParallelAsyncResult(result)
else:
values = _serial_map_or_starmap(function, iterable, args, kwargs,
False, map_or_starmap)
result = _DummyAsyncResult(values)
return result | Shared function between parmap.map_async and parmap.starmap_async.
Refer to those functions for details. |
def map_async(function, iterable, *args, **kwargs):
return _map_or_starmap_async(function, iterable, args, kwargs, "map") | This function is the multiprocessing.Pool.map_async version that
supports multiple arguments.
>>> [function(x, args[0], args[1],...) for x in iterable]
:param pm_parallel: Force parallelization on/off. If False, the
function won't be asynchronous.
:type pm_parallel: bool
:param pm_chunksize: see :py:class:`multiprocessing.pool.Pool`
:type pm_chunksize: int
:param pm_callback: see :py:class:`multiprocessing.pool.Pool`
:type pm_callback: function
:param pm_error_callback: (not on python 2) see
:py:class:`multiprocessing.pool.Pool`
:type pm_error_callback: function
:param pm_pool: Pass an existing pool.
:type pm_pool: multiprocessing.pool.Pool
:param pm_processes: Number of processes to use in the pool. See
:py:class:`multiprocessing.pool.Pool`
:type pm_processes: int |
def starmap_async(function, iterables, *args, **kwargs):
return _map_or_starmap_async(function, iterables, args, kwargs, "starmap") | This function is the multiprocessing.Pool.starmap_async version that
supports multiple arguments.
>>> return ([function(x1,x2,x3,..., args[0], args[1],...) for
>>> (x1,x2,x3...) in iterable])
:param pm_parallel: Force parallelization on/off. If False, the
function won't be asynchronous.
:type pm_parallel: bool
:param pm_chunksize: see :py:class:`multiprocessing.pool.Pool`
:type pm_chunksize: int
:param pm_callback: see :py:class:`multiprocessing.pool.Pool`
:type pm_callback: function
:param pm_error_callback: see :py:class:`multiprocessing.pool.Pool`
:type pm_error_callback: function
:param pm_pool: Pass an existing pool.
:type pm_pool: multiprocessing.pool.Pool
:param pm_processes: Number of processes to use in the pool. See
:py:class:`multiprocessing.pool.Pool`
:type pm_processes: int |
def lookup_domain(domain, nameservers=[], rtype="A",
exclude_nameservers=[], timeout=2):
dns_exp = DNSQuery(domains=[domain], nameservers=nameservers, rtype=rtype,
exclude_nameservers=exclude_nameservers, timeout=timeout)
return dns_exp.lookup_domain(domain) | Wrapper for DNSQuery method |
def parse_out_ips(message):
ips = []
for entry in message.answer:
for rdata in entry.items:
ips.append(rdata.to_text())
return ips | Given a message, parse out the ips in the answer |
def send_chaos_queries(self):
names = ["HOSTNAME.BIND", "VERSION.BIND", "ID.SERVER"]
self.results = {'exp-name': "chaos-queries"}
for name in names:
self.results[name] = {}
for nameserver in self.nameservers:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(self.timeout)
query = dns.message.make_query(name,
dns.rdatatype.from_text("TXT"),
dns.rdataclass.from_text("CH"))
sock.sendto(query.to_wire(), (nameserver, 53))
reads, _, _ = select.select([sock], [], [], self.timeout)
if len(reads) == 0:
self.results[name][nameserver] = None
else:
response = reads[0].recvfrom(4096)[0]
self.results[name][nameserver] = b64encode(response)
return self.results | Send chaos queries to identify the DNS server and its manufacturer
Note: we send 2 queries for BIND stuff per RFC 4892 and 1
query per RFC 6304
Note: we are not waiting on a second response because we
shouldn't be getting injected packets here |
def lookup_domains(self):
thread_error = False
thread_wait_timeout = 200
ind = 1
total_item_count = len(self.domains)
for domain in self.domains:
for nameserver in self.nameservers:
wait_time = 0
while threading.active_count() > self.max_threads:
time.sleep(1)
wait_time += 1
if wait_time > thread_wait_timeout:
thread_error = True
break
if thread_error:
self.results["error"] = "Threads took too long to finish."
break
log_prefix = "%d/%d: " % (ind, total_item_count)
thread = threading.Thread(target=self.lookup_domain,
args=(domain, nameserver,
log_prefix))
thread.setDaemon(1)
thread_open_success = False
retries = 0
while not thread_open_success and retries < MAX_THREAD_START_RETRY:
try:
thread.start()
self.threads.append(thread)
thread_open_success = True
except:
retries += 1
time.sleep(THREAD_START_DELAY)
logging.error("%sThread start failed for %s, retrying... (%d/%d)" % (log_prefix, domain, retries, MAX_THREAD_START_RETRY))
if retries == MAX_THREAD_START_RETRY:
logging.error("%sCan't start a new thread for %s after %d retries." % (log_prefix, domain, retries))
if thread_error:
break
ind += 1
for thread in self.threads:
thread.join(self.timeout * 3)
return self.results | More complex DNS primitive that looks up domains concurrently
Note: if you want to lookup multiple domains, you should use
this function |
def start(self, timeout=None):
self.thread.start()
start_time = time.time()
if not timeout:
timeout = self.timeout
# every second, check the condition of the thread and return
# control to the user if appropriate
while start_time + timeout > time.time():
self.thread.join(1)
if self.started:
return True
if self.error:
return False
return False | Start running the command |
def stop(self, timeout=None):
if not timeout:
timeout = self.timeout
self.kill_switch()
# Send the signal to all the process groups
self.process.kill()
self.thread.join(timeout)
try:
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
except:
pass
if self.stopped:
return True
else:
return False | Stop the given command |
def _traceroute_callback(self, line, kill_switch):
line = line.lower()
if "traceroute to" in line:
self.started = True
# need to run as root but not running as root.
# usually happens when doing TCP and ICMP traceroute.
if "enough privileges" in line:
self.error = True
self.kill_switch()
self.stopped = True
# name resolution failed
if "service not known" in line:
self.error = True
self.kill_switch()
self.stopped = True | Callback function to handle traceroute.
:param self:
:param line:
:param kill_switch:
:return: |
def output_callback(self, line, kill_switch):
self.notifications += line + "\n"
if "Initialization Sequence Completed" in line:
self.started = True
if "ERROR:" in line or "Cannot resolve host address:" in line:
self.error = True
if "process exiting" in line:
self.stopped = True | Set status of openvpn according to what we process |
def start(self, timeout=None):
if not timeout:
timeout = self.timeout
self.thread.start()
start_time = time.time()
while start_time + timeout > time.time():
self.thread.join(1)
if self.error or self.started:
break
if self.started:
logging.info("OpenVPN connected")
# append instance to connected list
OpenVPN.connected_instances.append(self)
else:
logging.warn("OpenVPN not started")
for line in self.notifications.split('\n'):
logging.warn("OpenVPN output:\t\t%s" % line) | Start OpenVPN and block until the connection is opened or there is
an error
:param timeout: time in seconds to wait for process to start
:return: |
def stop(self, timeout=None):
if not timeout:
timeout = self.timeout
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
self.thread.join(timeout)
if self.stopped:
logging.info("OpenVPN stopped")
if self in OpenVPN.connected_instances:
OpenVPN.connected_instances.remove(self)
else:
logging.error("Cannot stop OpenVPN!")
for line in self.notifications.split('\n'):
logging.warn("OpenVPN output:\t\t%s" % line) | Stop OpenVPN process group
:param timeout: time in seconds to wait for process to stop
:return: |
def load_experiments(self):
logging.debug("Loading experiments.")
# look for experiments in experiments directory
exp_dir = self.config['dirs']['experiments_dir']
for path in glob.glob(os.path.join(exp_dir, '[!_]*.py')):
# get name of file and path
name, ext = os.path.splitext(os.path.basename(path))
# load the experiment
try:
# do not load modules that have already been loaded
if name in loaded_modules:
continue
imp.load_source(name, path)
loaded_modules.add(name)
logging.debug("Loaded experiment \"%s(%s)\"." % (name, path))
except Exception as exception:
logging.exception("Failed to load experiment %s: %s" %
(name, exception))
logging.debug("Finished loading experiments.")
# return dict of experiment names and classes
return ExperimentList.experiments | This function will return the list of experiments. |
def get_meta(self):
# get the normalized IP if we don't already have it
if self._meta is None:
external_ip = get_external_ip()
if external_ip:
self._meta = get_meta(self.config, external_ip)
else:
raise Exception("Unable to get public IP")
if 'custom_meta' in self.config:
self._meta['custom_meta'] = self.config['custom_meta']
return self._meta | we only want to get the meta information (our normalized IP) once,
so we are going to do lazy instantiation to improve performance |
def _tcpdump_callback(self, line, kill_switch):
line = line.lower()
if ("listening" in line) or ("reading" in line):
self.started = True
if ("no suitable device" in line):
self.error = True
self.kill_switch()
if "by kernel" in line:
self.stopped = True | Callback function to handle tcpdump |
def meta_redirect(content):
decoded = content.decode("utf-8", errors="replace")
try:
soup = BeautifulSoup.BeautifulSoup(decoded)
except Exception as e:
return None
result = soup.find("meta", attrs={"http-equiv": re.compile("^refresh$", re.I)})
if result:
try:
wait, text = result["content"].split(";")
text = text.strip()
if text.lower().startswith("url="):
url = text[4:]
return url
except:
# there are normal meta tag with refresh that are not
# redirect and don't have a URL in it
pass
return None | Returns redirecting URL if there is a HTML refresh meta tag,
returns None otherwise
:param content: HTML content |
def _get_http_request(netloc, path="/", headers=None, ssl=False):
if ssl:
port = 443
else:
port = 80
host = netloc
if len(netloc.split(":")) == 2:
host, port = netloc.split(":")
request = {"host": host,
"port": port,
"path": path,
"ssl": ssl,
"method": "GET"}
if headers:
request["headers"] = headers
response = {}
try:
conn = ICHTTPConnection(host=host, port=port, timeout=10)
conn.request(path, headers, ssl, timeout=10)
response["status"] = conn.status
response["reason"] = conn.reason
response["headers"] = conn.headers
body = conn.body
try:
response["body"] = body.encode('utf-8')
except UnicodeDecodeError:
# if utf-8 fails to encode, just use base64
response["body.b64"] = body.encode('base64')
except Exception as err:
response["failure"] = str(err)
result = {"response": response,
"request": request}
return result | Actually gets the http. Moved this to it's own private method since
it is called several times for following redirects
:param host:
:param path:
:param headers:
:param ssl:
:return: |
def create_script_for_location(content, destination):
temp = tempfile.NamedTemporaryFile(mode='w', delete=False)
temp.write(content)
temp.close()
shutil.move(temp.name, destination)
cur_perms = os.stat(destination).st_mode
set_perms = cur_perms | stat.S_IXOTH | stat.S_IXGRP | stat.S_IXUSR
os.chmod(destination, set_perms) | Create a script with the given content, mv it to the
destination, and make it executable
Parameters:
content- the content to put in the script
destination- the directory to copy to
Note: due to constraints on os.rename, destination must be an
absolute path to a file, not just a directory |
def daemonize(package, bin_loc, user):
path = "/etc/cron.hourly/centinel-" + user
if user != "root":
# create a script to run centinel every hour as the current user
hourly = "".join(["#!/bin/bash\n",
"# cron job for centinel\n",
"su ", user, " -c '", bin_loc, " --sync'\n",
"su ", user, " -c '", bin_loc, "'\n",
"su ", user, " -c '", bin_loc, " --sync'\n"])
else:
# create a script to run centinel every hour as root
hourly = "".join(["#!/bin/bash\n",
"# cron job for centinel\n",
bin_loc, " --sync\n",
bin_loc, "\n",
bin_loc, " --sync\n"])
create_script_for_location(hourly, path)
# create a script to get the client to autoupdate every day
if package is None:
return
updater = "".join(["#!/bin/bash\n",
"# autoupdater for centinel\n"
"sudo pip install --upgrade ", package, "\n"])
create_script_for_location(updater, "/etc/cron.daily/centinel-autoupdate")
print "Successfully created cron jobs for user " + user | Create crontab entries to run centinel every hour and
autoupdate every day
Parameters:
package- name of the currently installed package (will be used for
autoupdate). If this parameter is None, the autoupdater will
not be used
bin_loc- location of the centinel binary/script.
Note: this works by creating temporary files, adding the content
of the cron scripts to these temporary files, moving these files
into the appropriate cron folders, and making these scripts
executable
Note: if the script already exists, this will delete it |
def create_config_files(directory):
# get the config file template
template_url = ("https://securenetconnection.com/vpnconfig/"
"openvpn-template.ovpn")
resp = requests.get(template_url)
resp.raise_for_status()
template = resp.content
# get the available servers and create a config file for each server
server_url = ("https://securenetconnection.com/vpnconfig/"
"servers-cli.php")
resp = requests.get(server_url)
resp.raise_for_status()
servers = resp.content.split("\n")
if not os.path.exists(directory):
os.makedirs(directory)
with open(os.path.join(directory, "servers.txt"), 'w') as f:
f.write(resp.content)
for server_line in servers:
if server_line.strip() == "":
continue
server_line = server_line.split("|")
try:
ip, desc, country, udp_sup, tcp_sup = server_line
except ValueError:
ip, desc, country, udp_sup, tcp_sup, no_rand = server_line
with open(os.path.join(directory, ip + ".ovpn"), 'w') as file_o:
file_o.write(template)
# create tcp if available, else udp
tcp_sup = tcp_sup.strip()
if tcp_sup:
port, proto = 443, "tcp"
else:
port, proto = 53, "udp"
file_o.write("remote {0} {1}\n".format(ip, port))
file_o.write("proto {0}\n".format(proto))
# add automatic dns server update
file_o.write("up /etc/openvpn/update-resolv-conf\n")
file_o.write("down /etc/openvpn/update-resolv-conf\n") | Create all available VPN configuration files in the given directory
Note: I am basically just following along with what their script
client does |
def informed_consent(self):
if self.typeable_handle is None:
consent_url = [self.config['server']['server_url'],
"/get_initial_consent?username="]
consent_url.append(urlsafe_b64encode(self.username))
consent_url.append("&password=")
consent_url.append(urlsafe_b64encode(self.password))
else:
consent_url = [self.config['server']['server_url'],
"/consent/"]
consent_url.append(self.typeable_handle)
consent_url = "".join(consent_url)
print "Please go to %s to give your consent." % (consent_url)
return consent_url | Create a URL for the user to give their consent through |
def create_config_files(directory):
# Some constant strings
vpn_gate_url = "http://www.vpngate.net/api/iphone/"
if not os.path.exists(directory):
os.makedirs(directory)
# get csv into memory
csv_str = ""
logging.info("Downloading info from VPN Gate API...")
r = requests.get(vpn_gate_url)
for line in r.text.split('\n'):
csv_str += line.encode('utf-8')
csv_str += "\n"
# convert csv string to string IO
f = StringIO.StringIO(csv_str)
# generate vpn dict
vpn_dict = {}
reader = csv.reader(f)
reader.next()
reader.next()
for row in reader:
if len(row) == 15:
alpha2 = row[6]
vpn_dict[alpha2] = vpn_dict.get(alpha2, [])
vpn_dict[alpha2].append({
"vpn_name": row[0],
"ip": row[1],
"country_name": row[5],
"alpha2": alpha2,
"openvpn_config": b64decode(row[-1])
})
f.close()
server_country = {}
# write config files
for country in vpn_dict:
for data in vpn_dict[country]:
config_filename = "{}.ovpn".format(data['ip'])
file_path = os.path.join(directory, config_filename)
with open(file_path, 'w') as f:
f.write(data['openvpn_config'])
f.write("up /etc/openvpn/update-resolv-conf\n")
f.write("down /etc/openvpn/update-resolv-conf\n")
server_country[data['ip']] = country
with open(os.path.join(directory, 'servers.txt'), 'w') as f:
for ip in server_country:
f.write('|'.join([ip, server_country[ip]]) + '\n') | Initialize directory ready for vpn walker
:param directory: the path where you want this to happen
:return: |
def return_abs_path(directory, path):
if directory is None or path is None:
return
directory = os.path.expanduser(directory)
return os.path.abspath(os.path.join(directory, path)) | Unfortunately, Python is not smart enough to return an absolute
path with tilde expansion, so I writing functionality to do this
:param directory:
:param path:
:return: |
def parse_config(self, config_file):
with open(config_file, 'r') as f:
config = json.load(f)
self.params = config
if self.params['proxy']['proxy_type']:
self.params['proxy'] = {self.params['proxy']['proxy_type']:
self.params['proxy']['proxy_url']} | Given a configuration file, read in and interpret the results
:param config_file:
:return: |
def update(self, old, backup_path=None):
for category in old.params.keys():
for parameter in old.params[category].keys():
if (category in self.params and parameter in self.params[category] and
(old.params[category][parameter] != self.params[category][parameter]) and
(category != "version")):
print ("Config value '%s.%s' "
"in old configuration is different "
"from the new version\n"
"[old value] = %s\n"
"[new value] = %s"
"" % (category, parameter,
old.params[category][parameter],
self.params[category][parameter]))
answer = raw_input("Do you want to overwrite? ([y]/n) ")
while answer.lower() not in ['y', 'yes', 'n', 'no']:
answer = raw_input("Answer not recongnized. Enter 'y' or 'n'. ")
if answer in ['n', 'no']:
old_value = old.params[category][parameter]
self.params[category][parameter] = old_value
elif not (category in self.params and
parameter in self.params[category]):
print ("Deprecated config option '%s.%s' has "
"been removed." % (category, parameter))
if backup_path is not None:
old.write_out_config(backup_path)
print "Backup saved in %s." % backup_path | Update the old configuration file with new values.
:param old: old configuration to update.
:param backup_path: path to write a backup of the old config file.
:return: |
def write_out_config(self, config_file):
with open(config_file, 'w') as f:
json.dump(self.params, f, indent=2,
separators=(',', ': ')) | Write out the configuration file
:param config_file:
:return:
Note: this will erase all comments from the config file |
def setup_profile(self, firebug=True, netexport=True):
profile = webdriver.FirefoxProfile()
profile.set_preference("app.update.enabled", False)
if firebug:
profile.add_extension(os.path.join(self.cur_path, 'extensions/firebug-2.0.8.xpi'))
profile.set_preference("extensions.firebug.currentVersion", "2.0.8")
profile.set_preference("extensions.firebug.allPagesActivation", "on")
profile.set_preference("extensions.firebug.defaultPanelName", "net")
profile.set_preference("extensions.firebug.net.enableSites", True)
profile.set_preference("extensions.firebug.delayLoad", False)
profile.set_preference("extensions.firebug.onByDefault", True)
profile.set_preference("extensions.firebug.showFirstRunPage", False)
profile.set_preference("extensions.firebug.net.defaultPersist", True) # persist all redirection responses
if netexport:
har_path = os.path.join(self.cur_path, "har")
if not os.path.exists(har_path):
os.mkdir(har_path)
profile.add_extension(os.path.join(self.cur_path, 'extensions/netExport-0.9b7.xpi'))
profile.set_preference("extensions.firebug.DBG_NETEXPORT", True)
profile.set_preference("extensions.firebug.netexport.alwaysEnableAutoExport", True)
profile.set_preference("extensions.firebug.netexport.defaultLogDir", har_path)
profile.set_preference("extensions.firebug.netexport.includeResponseBodies", True)
return profile | Setup the profile for firefox
:param firebug: whether add firebug extension
:param netexport: whether add netexport extension
:return: a firefox profile object |
def divide_url(self, url):
if 'https://' in url:
host = url[8:].split('/')[0]
path = url[8 + len(host):]
elif 'http://' in url:
host = url[7:].split('/')[0]
path = url[7 + len(host):]
else:
host = url.split('/')[0]
path = url[len(host):]
return host, path | divide url into host and path two parts |
def get(self, host, files_count, path="/", ssl=False, external=None):
theme = "https" if ssl else "http"
url = host + path
http_url = theme + "://" + url
result = {}
try:
capture_path = os.getcwd() + '/'
har_file_path = capture_path + "har/"
# fc.load_page(self.driver, http_url)
fc.switch_tab(self.driver)
self.load_page(http_url)
print "driver get: " + http_url
time.sleep(2)
# if url[-1] == "/":
# f_name = url.split('/')[-2]
# else:
# f_name = url.split('/')[-1]
# fc.save_html(self.driver, f_name, os.path.join(capture_path, "htmls/"))
# fc.save_screenshot(self.driver, f_name, os.path.join(capture_path, "screenshots/"))
result = self.wrap_results(url=http_url, files_count=files_count, fd=har_file_path)
if external is not None:
external[http_url] = result
except Exception as e:
result['error'] = e.message
print e
return result | Send get request to a url and wrap the results
:param host (str): the host name of the url
:param path (str): the path of the url (start with "/")
:return (dict): the result of the test url |
def run(self, input_files, url=None, verbose=0):
if not url and not input_files:
logging.warning("No input file")
return {"error": "no inputs"}
results = {}
self.open_virtual_display()
if verbose > 0:
log_file = sys.stdout
else:
log_file = None
# set up firefox driver
self.binary = FirefoxBinary(os.path.join(self.cur_path, 'firefox/firefox'), log_file=log_file)
self.profile = self.setup_profile()
self.driver = webdriver.Firefox(firefox_profile=self.profile, firefox_binary=self.binary, timeout=60)
self.driver.set_page_load_timeout(60)
isfile = False
if url:
host, path = self.divide_url(url)
results[url] = self.get(host, path)
else:
isfile = True
for input_file in input_files.items():
logging.info("Testing input file %s..." % (input_file[0]))
self.run_file(input_file, results)
# foctor_core will quit the driver by itself so we only quit the driver when we don't use foctor core
if not isfile:
logging.info("Quit driver")
self.driver.quit()
self.close_virtual_display()
logging.debug("Deleting har folder")
shutil.rmtree(os.path.join(self.cur_path, 'har'))
return results | run the headless browser with given input
if url given, the proc will only run hlb with given url and ignore input_list.
:param url:
:param input_files: the name of the file in "index url" format. i.e.
1, www.facebook.com
1, www.google.com
...
:param verbose:
:return: |
def hash_folder(folder, regex='[!_]*'):
file_hashes = {}
for path in glob.glob(os.path.join(folder, regex)):
# exclude folders
if not os.path.isfile(path):
continue
with open(path, 'r') as fileP:
md5_hash = hashlib.md5(fileP.read()).digest()
file_name = os.path.basename(path)
file_hashes[file_name] = urlsafe_b64encode(md5_hash)
return file_hashes | Get the md5 sum of each file in the folder and return to the user
:param folder: the folder to compute the sums over
:param regex: an expression to limit the files we match
:return:
Note: by default we will hash every file in the folder
Note: we will not match anything that starts with an underscore |
def compute_files_to_download(client_hashes, server_hashes):
to_dload, to_delete = [], []
for filename in server_hashes:
if filename not in client_hashes:
to_dload.append(filename)
continue
if client_hashes[filename] != server_hashes[filename]:
to_dload.append(filename)
for filename in client_hashes:
if filename not in server_hashes:
to_delete.append(filename)
return [to_dload, to_delete] | Given a dictionary of file hashes from the client and the
server, specify which files should be downloaded from the server
:param client_hashes: a dictionary where the filenames are keys and the
values are md5 hashes as strings
:param server_hashes: a dictionary where the filenames are keys and the
values are md5 hashes as strings
:return: a list of 2 lists -> [to_dload, to_delete]
to_dload- a list of filenames to get from the server
to_delete- a list of filenames to delete from the folder
Note: we will get a file from the server if a) it is not on the
client or b) the md5 differs between the client and server
Note: we will mark a file for deletion if it is not available on
the server |
def spinner(beep=False, disable=False, force=False):
return Spinner(beep, disable, force) | This function creates a context manager that is used to display a
spinner on stdout as long as the context has not exited.
The spinner is created only if stdout is not redirected, or if the spinner
is forced using the `force` parameter.
Parameters
----------
beep : bool
Beep when spinner finishes.
disable : bool
Hide spinner.
force : bool
Force creation of spinner even when stdout is redirected.
Example
-------
with spinner():
do_something()
do_something_else() |
def verifier(self, url):
webbrowser.open(url)
print('A browser should have opened up with a link to allow us to access')
print('your account, follow the instructions on the link and paste the verifier')
print('Code into here to give us access, if the browser didn\'t open, the link is:')
print(url)
print()
return input('Verifier: ').lstrip(" ").rstrip(" ") | Will ask user to click link to accept app and write code |
def write_config(self):
if not os.path.exists(os.path.dirname(self.config_file)):
os.makedirs(os.path.dirname(self.config_file))
with open(self.config_file, 'w') as f:
f.write(json.dumps(self.config))
f.close() | Write config to file |
def read_config(self):
try:
with open(self.config_file, 'r') as f:
self.config = json.loads(f.read())
f.close()
except IOError:
return False
return True | Read config from file |
def post_note(self):
if self.args.note_title:
note_title = self.args.note_title
else:
note_title = None
note_content = self.args.note_content
mynote = self.pump.Note(display_name=note_title, content=note_content)
mynote.to = self.pump.me.followers
mynote.cc = self.pump.Public
mynote.send()
return mynote.id or None | Post note and return the URL of the posted note |
def get_obj_id(self, item):
if item is not None:
if isinstance(item, six.string_types):
return item
elif hasattr(item, 'id'):
return item.id | Get the id of a PumpObject.
:param item: id string or PumpObject |
def get_page(self, url):
if url:
data = self.feed._request(url, offset=self._offset, since=self._since, before=self._before)
# set values to False to avoid using them for next request
self._before = False if self._before is not None else None
self._since = False if self._since is not None else None
if getattr(self.feed, 'issue65', False):
self._offset = False
if self._since is not None:
# we want oldest items first when using 'since'
return reversed(data['items'])
else:
return data['items']
else:
return [] | Get a page of items from API |
def get_cached(self):
def id_in_list(list, id):
if id:
if [i for i in list if i.id == id]:
return True
else:
raise PyPumpException("id %r not in feed." % self._since)
tmp = []
if self._before is not None:
# return list based on before param
if not id_in_list(self.feed._items, self._before):
return tmp
if isinstance(self._before, six.string_types):
found = False
for i in self.feed._items:
if not found:
if i.id == self._before:
found = True
continue
else:
tmp.append(i)
self._before = False
return tmp
if self._since is not None:
# return list based on since param
if not id_in_list(self.feed._items, self._since):
return tmp
if isinstance(self._since, six.string_types):
found = False
for i in self.feed._items:
if i.id == self._since:
found = True
break
else:
tmp.append(i)
self._since = False
return reversed(tmp)
if not hasattr(self, 'usedcache'):
self.usedcache = True # invalidate cache
if isinstance(self._offset, int):
# return list based on offset
return self.feed._items[self._offset:]
return self.feed._items
else:
return tmp | Get items from feed cache while trying to emulate
how API handles offset/since/before parameters |
def done(self):
if self._done:
return self._done
if self._limit is None:
self._done = False
elif self.itemcount >= self._limit:
self._done = True
return self._done | Check if we should stop returning objects |
def _build_cache(self):
self.cache = []
if self.done:
return
for i in (self.get_cached() if self._cached else self.get_page(self.url)):
if not self._cached:
# some objects don't have objectType set (inbox activities)
if not i.get("objectType"):
i["objectType"] = self.feed.object_types[0]
obj = Mapper(pypump=self.feed._pump).get_object(i)
else:
obj = i
self.cache.append(obj)
# ran out of items
if len(self.cache) <= 0:
self._done = True
# check what to do next time
if getattr(self.feed, 'issue65', False):
# work around API bug for favorites feed, see https://github.com/xray7224/PyPump/issues/65
if self._offset is None:
self._offset = 0
self._offset += 20
elif self._since is not None:
if self.feed.links.get('prev'):
self.url = self.feed.links['prev']
del self.feed.links['prev'] # avoid using it again
else:
if self.feed.links.get('next'):
self.url = self.feed.links['next']
del self.feed.links['next'] # avoid using it again
else:
self.url = None | Build a list of objects from feed's cached items or API page |
def items(self, offset=None, limit=20, since=None, before=None, *args, **kwargs):
return ItemList(self, offset=offset, limit=limit, since=since, before=before, cached=self.is_cached) | Get a feed's items.
:param offset: Amount of items to skip before returning data
:param since: Return items added after this id (ordered old -> new)
:param before: Return items added before this id (ordered new -> old)
:param limit: Amount of items to return |
def _subfeed(self, feedname):
url = self.url
if not url.endswith("/"):
url += "/"
return url + feedname | Used for Inbox/Outbox major/minor/direct subfeeds |
def direct(self):
url = self._subfeed("direct")
if "direct" in self.url or "major" in self.url or "minor" in self.url:
return self
if self._direct is None:
self._direct = self.__class__(url, pypump=self._pump)
return self._direct | Direct inbox feed,
contains activities addressed directly to the owner of the inbox. |
def major(self):
url = self._subfeed("major")
if "major" in self.url or "minor" in self.url:
return self
if self._major is None:
self._major = self.__class__(url, pypump=self._pump)
return self._major | Major inbox feed, contains major activities such as notes and images. |
def minor(self):
url = self._subfeed("minor")
if "minor" in self.url or "major" in self.url:
return self
if self._minor is None:
self._minor = self.__class__(url, pypump=self._pump)
return self._minor | Minor inbox feed, contains minor activities such as likes, shares and follows. |
def create(self, display_name, content=None):
activity = {
"verb": "create",
"object": {
"objectType": "collection",
"objectTypes": [self.membertype],
"displayName": display_name,
"content": content
}
}
if self._post_activity(activity, unserialize=False):
return self[display_name] | Create a new user list :class:`collection <pypump.models.collection.Collection>`.
:param display_name: List title.
:param content: (optional) List description.
Example:
>>> pump.me.lists.create(display_name='Friends', content='List of friends')
>>> myfriends = pump.me.lists['Friends']
>>> print(myfriends)
Friends |
def serialize(self):
data = super(Note, self).serialize()
data.update({
"verb": "post",
"object": {
"objectType": self.object_type,
"content": self.content,
}
})
if self.display_name:
data["object"]["displayName"] = self.display_name
return data | Converts the post to something compatible with `json.dumps` |
def context(self):
type = "client_associate" if self.key is None else "client_update"
data = {
"type": type,
"application_type": self.type,
}
# is this an update?
if self.key:
data["client_id"] = self.key
data["client_secret"] = self.secret
# Add optional params
if self.name:
data["application_name"] = self.name
if self.logo:
data["logo_url"] = self.logo
if self.contacts:
# space seporated list
data["contacts"] = " ".join(self.contacts)
if self.redirect:
data["redirect_uri"] = " ".join(self.redirect)
# Convert to JSON and send
return json.dumps(data) | Provides request context |
def request(self, server=None):
request = {
"headers": {"Content-Type": "application/json"},
"timeout": self._pump.timeout,
"data": self.context,
}
url = "{proto}://{server}/{endpoint}".format(
proto=self._pump.protocol,
server=server or self.server,
endpoint=self.ENDPOINT,
)
response = self._pump._requester(requests.post, url, **request)
try:
server_data = response.json()
except ValueError:
raise ClientException(response.content)
if "error" in server_data:
raise ClientException(server_data["error"], self.context)
_log.debug("Client registration recieved: %(id)s %(secret)s %(expire)s", {
"id": server_data["client_id"],
"secret": server_data["client_secret"],
"expire": server_data["expires_at"],
})
return server_data | Sends the request |
def register(self, server=None):
if (self.key or self.secret):
return self.update()
server_data = self.request(server)
self.key = server_data["client_id"]
self.secret = server_data["client_secret"]
self.expirey = server_data["expires_at"] | Registers the client with the Pump API retrieving the id and secret |
def update(self):
error = ""
if self.key is None:
error = "To update a client you need to provide a key"
if self.secret is None:
error = "To update a client you need to provide the secret"
if error:
raise ClientException(error)
self.request()
return True | Updates the information the Pump server has about the client |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.