text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Sends all pending requests into the connection. The default is
<END_TASK>
<USER_TASK:>
Description:
def send_pending_requests(self):
"""Sends all pending requests into the connection. The default is
to only send pending data that fits into the socket without blocking.
This returns `True` if all data was sent or `False` if pending data
is left over.
""" |
assert_open(self)
unsent_commands = self.commands
if unsent_commands:
self.commands = []
if self.auto_batch:
unsent_commands = auto_batch_commands(unsent_commands)
buf = []
for command_name, args, options, promise in unsent_commands:
buf.append((command_name,) + tuple(args))
self.pending_responses.append((command_name, options, promise))
cmds = self.connection.pack_commands(buf)
self._send_buf.extend(cmds)
if not self._send_buf:
return True
self.send_buffer()
return not self._send_buf |
<SYSTEM_TASK:>
Waits for all responses to come back and resolves the
<END_TASK>
<USER_TASK:>
Description:
def wait_for_responses(self, client):
"""Waits for all responses to come back and resolves the
eventual results.
""" |
assert_open(self)
if self.has_pending_requests:
raise RuntimeError('Cannot wait for responses if there are '
'pending requests outstanding. You need '
'to wait for pending requests to be sent '
'first.')
pending = self.pending_responses
self.pending_responses = []
for command_name, options, promise in pending:
value = client.parse_response(
self.connection, command_name, **options)
promise.resolve(value) |
<SYSTEM_TASK:>
Returns the command buffer for the given command and arguments.
<END_TASK>
<USER_TASK:>
Description:
def _get_command_buffer(self, host_id, command_name):
"""Returns the command buffer for the given command and arguments.""" |
buf = self._cb_poll.get(host_id)
if buf is not None:
return buf
if self._max_concurrency is not None:
while len(self._cb_poll) >= self._max_concurrency:
self.join(timeout=1.0)
def connect():
return self.connection_pool.get_connection(
command_name, shard_hint=host_id)
buf = CommandBuffer(host_id, connect, self.auto_batch)
self._cb_poll.register(host_id, buf)
return buf |
<SYSTEM_TASK:>
This is called by the command buffer when it closes.
<END_TASK>
<USER_TASK:>
Description:
def _release_command_buffer(self, command_buffer):
"""This is called by the command buffer when it closes.""" |
if command_buffer.closed:
return
self._cb_poll.unregister(command_buffer.host_id)
self.connection_pool.release(command_buffer.connection)
command_buffer.connection = None |
<SYSTEM_TASK:>
Waits for all outstanding responses to come back or the timeout
<END_TASK>
<USER_TASK:>
Description:
def join(self, timeout=None):
"""Waits for all outstanding responses to come back or the timeout
to be hit.
""" |
remaining = timeout
while self._cb_poll and (remaining is None or remaining > 0):
now = time.time()
rv = self._cb_poll.poll(remaining)
if remaining is not None:
remaining -= (time.time() - now)
for command_buffer, event in rv:
# This command buffer still has pending requests which
# means we have to send them out first before we can read
# all the data from it.
if command_buffer.has_pending_requests:
if event == 'close':
self._try_reconnect(command_buffer)
elif event == 'write':
self._send_or_reconnect(command_buffer)
# The general assumption is that all response is available
# or this might block. On reading we do not use async
# receiving. This generally works because latency in the
# network is low and redis is super quick in sending. It
# does not make a lot of sense to complicate things here.
elif event in ('read', 'close'):
try:
command_buffer.wait_for_responses(self)
finally:
self._release_command_buffer(command_buffer)
if self._cb_poll and timeout is not None:
raise TimeoutError('Did not receive all data in time.') |
<SYSTEM_TASK:>
Temporarily retarget the client for one call. This is useful
<END_TASK>
<USER_TASK:>
Description:
def target(self, hosts):
"""Temporarily retarget the client for one call. This is useful
when having to deal with a subset of hosts for one call.
""" |
if self.__is_retargeted:
raise TypeError('Cannot use target more than once.')
rv = FanoutClient(hosts, connection_pool=self.connection_pool,
max_concurrency=self._max_concurrency)
rv._cb_poll = self._cb_poll
rv.__is_retargeted = True
return rv |
<SYSTEM_TASK:>
Temporarily retarget the client for one call to route
<END_TASK>
<USER_TASK:>
Description:
def target_key(self, key):
"""Temporarily retarget the client for one call to route
specifically to the one host that the given key routes to. In
that case the result on the promise is just the one host's value
instead of a dictionary.
.. versionadded:: 1.3
""" |
router = self.connection_pool.cluster.get_router()
host_id = router.get_host_for_key(key)
rv = self.target([host_id])
rv.__resolve_singular_result = True
return rv |
<SYSTEM_TASK:>
Returns a thread unsafe fanout client.
<END_TASK>
<USER_TASK:>
Description:
def get_fanout_client(self, hosts, max_concurrency=64,
auto_batch=None):
"""Returns a thread unsafe fanout client.
Returns an instance of :class:`FanoutClient`.
""" |
if auto_batch is None:
auto_batch = self.auto_batch
return FanoutClient(hosts, connection_pool=self.connection_pool,
max_concurrency=max_concurrency,
auto_batch=auto_batch) |
<SYSTEM_TASK:>
Returns a context manager for a map operation. This runs
<END_TASK>
<USER_TASK:>
Description:
def map(self, timeout=None, max_concurrency=64, auto_batch=None):
"""Returns a context manager for a map operation. This runs
multiple queries in parallel and then joins in the end to collect
all results.
In the context manager the client available is a
:class:`MappingClient`. Example usage::
results = {}
with cluster.map() as client:
for key in keys_to_fetch:
results[key] = client.get(key)
for key, promise in results.iteritems():
print '%s => %s' % (key, promise.value)
""" |
return MapManager(self.get_mapping_client(max_concurrency, auto_batch),
timeout=timeout) |
<SYSTEM_TASK:>
Creates a promise object resolved with a certain value.
<END_TASK>
<USER_TASK:>
Description:
def resolved(value):
"""Creates a promise object resolved with a certain value.""" |
p = Promise()
p._state = 'resolved'
p.value = value
return p |
<SYSTEM_TASK:>
Creates a promise object rejected with a certain value.
<END_TASK>
<USER_TASK:>
Description:
def rejected(reason):
"""Creates a promise object rejected with a certain value.""" |
p = Promise()
p._state = 'rejected'
p.reason = reason
return p |
<SYSTEM_TASK:>
Resolves the promise with the given value.
<END_TASK>
<USER_TASK:>
Description:
def resolve(self, value):
"""Resolves the promise with the given value.""" |
if self is value:
raise TypeError('Cannot resolve promise with itself.')
if isinstance(value, Promise):
value.done(self.resolve, self.reject)
return
if self._state != 'pending':
raise RuntimeError('Promise is no longer pending.')
self.value = value
self._state = 'resolved'
callbacks = self._callbacks
self._callbacks = None
for callback in callbacks:
callback(value) |
<SYSTEM_TASK:>
Rejects the promise with the given reason.
<END_TASK>
<USER_TASK:>
Description:
def reject(self, reason):
"""Rejects the promise with the given reason.""" |
if self._state != 'pending':
raise RuntimeError('Promise is no longer pending.')
self.reason = reason
self._state = 'rejected'
errbacks = self._errbacks
self._errbacks = None
for errback in errbacks:
errback(reason) |
<SYSTEM_TASK:>
Attaches some callbacks to the promise and returns the promise.
<END_TASK>
<USER_TASK:>
Description:
def done(self, on_success=None, on_failure=None):
"""Attaches some callbacks to the promise and returns the promise.""" |
if on_success is not None:
if self._state == 'pending':
self._callbacks.append(on_success)
elif self._state == 'resolved':
on_success(self.value)
if on_failure is not None:
if self._state == 'pending':
self._errbacks.append(on_failure)
elif self._state == 'rejected':
on_failure(self.reason)
return self |
<SYSTEM_TASK:>
Returns the key a command operates on.
<END_TASK>
<USER_TASK:>
Description:
def get_key(self, command, args):
"""Returns the key a command operates on.""" |
spec = COMMANDS.get(command.upper())
if spec is None:
raise UnroutableCommand('The command "%r" is unknown to the '
'router and cannot be handled as a '
'result.' % command)
if 'movablekeys' in spec['flags']:
raise UnroutableCommand('The keys for "%r" are movable and '
'as such cannot be routed to a single '
'host.')
keys = extract_keys(args, spec['key_spec'])
if len(keys) == 1:
return keys[0]
elif not keys:
raise UnroutableCommand(
'The command "%r" does not operate on a key which means '
'that no suitable host could be determined. Consider '
'using a fanout instead.')
raise UnroutableCommand(
'The command "%r" operates on multiple keys (%d passed) which is '
'something that is not supported.' % (command, len(keys))) |
<SYSTEM_TASK:>
Returns the host this command should be executed against.
<END_TASK>
<USER_TASK:>
Description:
def get_host_for_command(self, command, args):
"""Returns the host this command should be executed against.""" |
return self.get_host_for_key(self.get_key(command, args)) |
<SYSTEM_TASK:>
Take in dd string and convert to dms
<END_TASK>
<USER_TASK:>
Description:
def ddtodms(self, dd):
"""Take in dd string and convert to dms""" |
negative = dd < 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
if negative:
if degrees > 0:
degrees = -degrees
elif minutes > 0:
minutes = -minutes
else:
seconds = -seconds
return (degrees,minutes,seconds) |
<SYSTEM_TASK:>
Obtain a proper file object.
<END_TASK>
<USER_TASK:>
Description:
def get_fobj(fname, mode='w+'):
"""Obtain a proper file object.
Parameters
----------
fname : string, file object, file descriptor
If a string or file descriptor, then we create a file object. If
*fname* is a file object, then we do nothing and ignore the specified
*mode* parameter.
mode : str
The mode of the file to be opened.
Returns
-------
fobj : file object
The file object.
close : bool
If *fname* was a string, then *close* will be *True* to signify that
the file object should be closed after writing to it. Otherwise,
*close* will be *False* signifying that the user, in essence,
created the file object already and that subsequent operations
should not close it.
""" |
if is_string_like(fname):
fobj = open(fname, mode)
close = True
elif hasattr(fname, 'write'):
# fname is a file-like object, perhaps a StringIO (for example)
fobj = fname
close = False
else:
# assume it is a file descriptor
fobj = os.fdopen(fname, mode)
close = False
return fobj, close |
<SYSTEM_TASK:>
Load graph as defined by a DOT file.
<END_TASK>
<USER_TASK:>
Description:
def graph_from_dot_file(path):
"""Load graph as defined by a DOT file.
The file is assumed to be in DOT format. It will
be loaded, parsed and a Dot class will be returned,
representing the graph.
""" |
fd = open(path, 'rb')
data = fd.read()
fd.close()
return graph_from_dot_data(data) |
<SYSTEM_TASK:>
Creates a basic graph out of an edge list.
<END_TASK>
<USER_TASK:>
Description:
def graph_from_edges(edge_list, node_prefix='', directed=False):
"""Creates a basic graph out of an edge list.
The edge list has to be a list of tuples representing
the nodes connected by the edge.
The values can be anything: bool, int, float, str.
If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
""" |
if edge_list is None:
edge_list = []
graph_type = "digraph" if directed else "graph"
with_prefix = functools.partial("{0}{1}".format, node_prefix)
graph = Dot(graph_type=graph_type)
for src, dst in edge_list:
src = with_prefix(src)
dst = with_prefix(dst)
graph.add_edge(Edge(src, dst))
return graph |
<SYSTEM_TASK:>
Creates a basic graph out of an adjacency matrix.
<END_TASK>
<USER_TASK:>
Description:
def graph_from_adjacency_matrix(matrix, node_prefix='', directed=False):
"""Creates a basic graph out of an adjacency matrix.
The matrix has to be a list of rows of values
representing an adjacency matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
""" |
node_orig = 1
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
if not directed:
skip = matrix.index(row)
r = row[skip:]
else:
skip = 0
r = row
node_dest = skip + 1
for e in r:
if e:
graph.add_edge(
Edge(
node_prefix + node_orig,
node_prefix + node_dest))
node_dest += 1
node_orig += 1
return graph |
<SYSTEM_TASK:>
Creates a basic graph out of an incidence matrix.
<END_TASK>
<USER_TASK:>
Description:
def graph_from_incidence_matrix(matrix, node_prefix='', directed=False):
"""Creates a basic graph out of an incidence matrix.
The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
""" |
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
nodes = []
c = 1
for node in row:
if node:
nodes.append(c * node)
c += 1
nodes.sort()
if len(nodes) == 2:
graph.add_edge(
Edge(
node_prefix + abs(nodes[0]),
node_prefix + nodes[1]))
if not directed:
graph.set_simplify(True)
return graph |
<SYSTEM_TASK:>
Used by find_graphviz
<END_TASK>
<USER_TASK:>
Description:
def __find_executables(path):
"""Used by find_graphviz
path - single directory as a string
If any of the executables are found, it will return a dictionary
containing the program names as keys and their paths as values.
Otherwise returns None
""" |
success = False
progs = {
"dot": "",
"twopi": "",
"neato": "",
"circo": "",
"fdp": "",
"sfdp": "",
}
was_quoted = False
path = path.strip()
if path.startswith('"') and path.endswith('"'):
path = path[1:-1]
was_quoted = True
if not os.path.isdir(path):
return None
for prg in progs:
if progs[prg]:
continue
prg_path = os.path.join(path, prg)
prg_exe_path = prg_path + ".exe"
if os.path.exists(prg_path):
if was_quoted:
prg_path = "\"{}\"".format(prg_path)
progs[prg] = prg_path
success = True
elif os.path.exists(prg_exe_path):
if was_quoted:
prg_exe_path = "\"{}\"".format(prg_exe_path)
progs[prg] = prg_exe_path
success = True
if success:
return progs
return None |
<SYSTEM_TASK:>
Locate Graphviz's executables in the system.
<END_TASK>
<USER_TASK:>
Description:
def find_graphviz():
"""Locate Graphviz's executables in the system.
Tries three methods:
First: Windows Registry (Windows only)
This requires Mark Hammond's pywin32 is installed.
Secondly: Search the path
It will look for 'dot', 'twopi' and 'neato' in all the directories
specified in the PATH environment variable.
Thirdly: Default install location (Windows only)
It will look for 'dot', 'twopi' and 'neato' in the default install
location under the "Program Files" directory.
It will return a dictionary containing the program names as keys
and their paths as values.
If this fails, it returns None.
""" |
# Method 1 (Windows only)
if os.sys.platform == 'win32':
HKEY_LOCAL_MACHINE = 0x80000002
KEY_QUERY_VALUE = 0x0001
RegOpenKeyEx = None
RegQueryValueEx = None
RegCloseKey = None
try:
import win32api
RegOpenKeyEx = win32api.RegOpenKeyEx
RegQueryValueEx = win32api.RegQueryValueEx
RegCloseKey = win32api.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
pass
try:
import ctypes
def RegOpenKeyEx(key, subkey, opt, sam):
result = ctypes.c_uint(0)
ctypes.windll.advapi32.RegOpenKeyExA(key, subkey, opt, sam,
ctypes.byref(result))
return result.value
def RegQueryValueEx(hkey, valuename):
data_type = ctypes.c_uint(0)
data_len = ctypes.c_uint(1024)
data = ctypes.create_string_buffer(1024)
# this has a return value, which we should probably check
ctypes.windll.advapi32.RegQueryValueExA(
hkey, valuename, 0, ctypes.byref(data_type),
data, ctypes.byref(data_len))
return data.value
RegCloseKey = ctypes.windll.advapi32.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
pass
if RegOpenKeyEx is not None:
# Get the GraphViz install path from the registry
hkey = None
potentialKeys = [
"SOFTWARE\\ATT\\Graphviz",
"SOFTWARE\\AT&T Research Labs\\Graphviz"]
for potentialKey in potentialKeys:
try:
hkey = RegOpenKeyEx(
HKEY_LOCAL_MACHINE,
potentialKey, 0, KEY_QUERY_VALUE)
if hkey is not None:
path = RegQueryValueEx(hkey, "InstallPath")
RegCloseKey(hkey)
# The regitry variable might exist, left by
# old installations but with no value, in those cases
# we keep searching...
if not path:
continue
# Now append the "bin" subdirectory:
path = os.path.join(path, "bin")
progs = __find_executables(path)
if progs is not None:
return progs
except Exception:
pass
else:
break
# Method 2 (Linux, Windows etc)
if 'PATH' in os.environ:
for path in os.environ['PATH'].split(os.pathsep):
progs = __find_executables(path)
if progs is not None:
return progs
# Method 3 (Windows only)
if os.sys.platform == 'win32':
# Try and work out the equivalent of "C:\Program Files" on this
# machine (might be on drive D:, or in a different language)
if 'PROGRAMFILES' in os.environ:
# Note, we could also use the win32api to get this
# information, but win32api may not be installed.
path = os.path.join(os.environ['PROGRAMFILES'], 'ATT',
'GraphViz', 'bin')
else:
# Just in case, try the default...
path = r"C:\Program Files\att\Graphviz\bin"
progs = __find_executables(path)
if progs is not None:
return progs
for path in (
'/usr/bin', '/usr/local/bin',
'/opt/local/bin',
'/opt/bin', '/sw/bin', '/usr/share',
'/Applications/Graphviz.app/Contents/MacOS/'):
progs = __find_executables(path)
if progs is not None:
return progs
# Failed to find GraphViz
return None |
<SYSTEM_TASK:>
Get the list of Node instances.
<END_TASK>
<USER_TASK:>
Description:
def get_node_list(self):
"""Get the list of Node instances.
This method returns the list of Node instances
composing the graph.
""" |
node_objs = list()
for obj_dict_list in self.obj_dict['nodes'].values():
node_objs.extend([
Node(obj_dict=obj_d)
for obj_d
in obj_dict_list])
return node_objs |
<SYSTEM_TASK:>
Get the list of Edge instances.
<END_TASK>
<USER_TASK:>
Description:
def get_edge_list(self):
"""Get the list of Edge instances.
This method returns the list of Edge instances
composing the graph.
""" |
edge_objs = list()
for obj_dict_list in self.obj_dict['edges'].values():
edge_objs.extend([
Edge(obj_dict=obj_d)
for obj_d
in obj_dict_list])
return edge_objs |
<SYSTEM_TASK:>
Returns a string representation of the graph in dot language.
<END_TASK>
<USER_TASK:>
Description:
def to_string(self):
"""Returns a string representation of the graph in dot language.
It will return the graph and all its subelements in string from.
""" |
graph = list()
if self.obj_dict.get('strict', None) is not None:
if self == self.get_parent_graph() and self.obj_dict['strict']:
graph.append('strict ')
if self.obj_dict['name'] == '':
if ('show_keyword' in self.obj_dict and
self.obj_dict['show_keyword']):
graph.append('subgraph {\n')
else:
graph.append('{\n')
else:
graph.append('%s %s {\n' % (self.obj_dict['type'],
self.obj_dict['name']))
for attr, value in sorted(self.obj_dict['attributes'].items(),
key=itemgetter(0)):
if value is not None:
graph.append('%s=%s' % (attr, quote_if_necessary(value)))
else:
graph.append(attr)
graph.append(';\n')
edges_done = set()
edge_obj_dicts = list()
for e in self.obj_dict['edges'].values():
edge_obj_dicts.extend(e)
if edge_obj_dicts:
edge_src_set, edge_dst_set = list(
zip(*[obj['points'] for obj in edge_obj_dicts]))
edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set)
else:
edge_src_set, edge_dst_set = set(), set()
node_obj_dicts = list()
for e in self.obj_dict['nodes'].values():
node_obj_dicts.extend(e)
sgraph_obj_dicts = list()
for sg in self.obj_dict['subgraphs'].values():
sgraph_obj_dicts.extend(sg)
obj_list = sorted([
(obj['sequence'], obj)
for obj
in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts)])
for _idx, obj in obj_list:
if obj['type'] == 'node':
node = Node(obj_dict=obj)
if self.obj_dict.get('suppress_disconnected', False):
if (node.get_name() not in edge_src_set and
node.get_name() not in edge_dst_set):
continue
graph.append(node.to_string() + '\n')
elif obj['type'] == 'edge':
edge = Edge(obj_dict=obj)
if self.obj_dict.get('simplify', False) and edge in edges_done:
continue
graph.append(edge.to_string() + '\n')
edges_done.add(edge)
else:
sgraph = Subgraph(obj_dict=obj)
graph.append(sgraph.to_string() + '\n')
graph.append('}\n')
return ''.join(graph) |
<SYSTEM_TASK:>
Write graph to file in selected format.
<END_TASK>
<USER_TASK:>
Description:
def write(self, path, prog=None, format='raw'):
"""Write graph to file in selected format.
Given a filename 'path' it will open/create and truncate
such file and write on it a representation of the graph
defined by the dot object and in the format specified by
'format'. 'path' can also be an open file-like object, such as
a StringIO instance.
The format 'raw' is used to dump the string representation
of the Dot object, without further processing.
The output can be processed by any of graphviz tools, defined
in 'prog', which defaults to 'dot'
Returns True or False according to the success of the write
operation.
There's also the preferred possibility of using:
write_'format'(path, prog='program')
which are automatically defined for all the supported formats.
[write_ps(), write_gif(), write_dia(), ...]
""" |
if prog is None:
prog = self.prog
fobj, close = get_fobj(path, 'w+b')
try:
if format == 'raw':
data = self.to_string()
if isinstance(data, basestring):
if not isinstance(data, unicode):
try:
data = unicode(data, 'utf-8')
except Exception:
pass
try:
charset = self.get_charset()
if not PY3 or not charset:
charset = 'utf-8'
data = data.encode(charset)
except Exception:
if PY3:
data = data.encode('utf-8')
pass
fobj.write(data)
else:
fobj.write(self.create(prog, format))
finally:
if close:
fobj.close()
return True |
<SYSTEM_TASK:>
Get crumbs for navigation links.
<END_TASK>
<USER_TASK:>
Description:
def get_crumbs(self):
"""
Get crumbs for navigation links.
Returns:
tuple:
concatenated list of crumbs using these crumbs and the
crumbs of the parent classes through ``__mro__``.
""" |
crumbs = []
for cls in reversed(type(self).__mro__[1:]):
crumbs.extend(getattr(cls, 'crumbs', ()))
crumbs.extend(list(self.crumbs))
return tuple(crumbs) |
<SYSTEM_TASK:>
Django view get function.
<END_TASK>
<USER_TASK:>
Description:
def get(self, request, *args, **kwargs):
"""
Django view get function.
Add items of extra_context, crumbs and grid to context.
Args:
request (): Django's request object.
*args (): request args.
**kwargs (): request kwargs.
Returns:
response: render to response with context.
""" |
context = self.get_context_data(**kwargs)
context.update(self.extra_context)
context['crumbs'] = self.get_crumbs()
context['title'] = self.title
context['suit'] = 'suit' in settings.INSTALLED_APPS
if context.get('dashboard_grid', None) is None and self.grid:
context['dashboard_grid'] = self.grid
return self.render_to_response(context) |
<SYSTEM_TASK:>
Return a widget as real-time.
<END_TASK>
<USER_TASK:>
Description:
def realtime(widget, url_name=None, url_regex=None, time_interval=None):
"""
Return a widget as real-time.
Args:
widget (Widget): the widget to register and return as real-time.
url_name (str): the URL name to call to get updated content.
url_regex (regex): the URL regex to be matched.
time_interval (int): the interval of refreshment in milliseconds.
Returns:
Widget: the "real-timed" widget.
""" |
if not hasattr(widget, 'get_updated_content'):
raise AttributeError('Widget %s must implement get_updated_content '
'method.' % widget)
elif not callable(widget.get_updated_content):
raise ValueError('get_updated_content in widget %s is not callable'
% widget)
if url_name is None:
if getattr(widget, 'url_name', None) is not None:
url_name = widget.url_name
else:
url_name = widget.__class__.__name__
if url_name in [w.url_name for w in REALTIME_WIDGETS]:
raise ValueError('URL name %s is already used by another '
'real time widget.' % url_name)
if url_regex is None:
if getattr(widget, 'url_regex', None) is not None:
url_regex = widget.url_regex
else:
url_regex = sha256(url_name.encode('utf-8'))
url_regex = url_regex.hexdigest()[:32]
url_regex = 'realtime/' + url_regex
if url_regex in [w.url_regex for w in REALTIME_WIDGETS]:
raise ValueError('URL regex %s is already used by another '
'real time widget.' % url_regex)
if time_interval is None:
if getattr(widget, 'time_interval', None) is not None:
time_interval = widget.time_interval
else:
time_interval = app_settings.default_time_interval
from django.views.generic import View
from braces.views import AjaxResponseMixin, JSONResponseMixin
# pylama:ignore=C0111,R0201
class PartialResponse(JSONResponseMixin, AjaxResponseMixin, View):
def get_data(self):
return widget.get_updated_content()
def get(self, request, *args, **kwargs):
return self.get_ajax(request, *args, **kwargs)
def get_ajax(self, request, *args, **kwargs):
return self.render_json_response(self.get_data())
PartialResponse.url_name = url_name
PartialResponse.url_regex = url_regex
PartialResponse.time_interval = time_interval
REALTIME_WIDGETS.append(PartialResponse)
if not hasattr(widget, 'url_name'):
widget.url_name = url_name
if not hasattr(widget, 'url_regex'):
widget.url_regex = url_regex
if not hasattr(widget, 'time_interval'):
widget.time_interval = time_interval
return widget |
<SYSTEM_TASK:>
Get the URL for real-time widgets.
<END_TASK>
<USER_TASK:>
Description:
def get_realtime_urls(admin_view_func=lambda x: x):
"""
Get the URL for real-time widgets.
Args:
admin_view_func (callable): an admin_view method from an AdminSite
instance. By default: identity.
Returns:
list: the list of the real-time URLs as django's ``url()``.
""" |
from .widgets import REALTIME_WIDGETS
return [url(w.url_regex, admin_view_func(w.as_view()), name=w.url_name)
for w in REALTIME_WIDGETS] |
<SYSTEM_TASK:>
Make sure our Honeypot time is consistent, and not too far off
<END_TASK>
<USER_TASK:>
Description:
def check_time(self):
""" Make sure our Honeypot time is consistent, and not too far off
from the actual time. """ |
poll = self.config['timecheck']['poll']
ntp_poll = self.config['timecheck']['ntp_pool']
while True:
clnt = ntplib.NTPClient()
try:
response = clnt.request(ntp_poll, version=3)
diff = response.offset
if abs(diff) >= 15:
logger.error('Timings found to be far off, shutting down drone ({0})'.format(diff))
sys.exit(1)
else:
logger.debug('Polled ntp server and found that drone has {0} seconds offset.'.format(diff))
except (ntplib.NTPException, _socket.error) as ex:
logger.warning('Error while polling ntp server: {0}'.format(ex))
gevent.sleep(poll * 60 * 60) |
<SYSTEM_TASK:>
Launches a new HTTP client session on the server taken from the `self.options` dict.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Launches a new HTTP client session on the server taken from the `self.options` dict.
:param my_ip: IP of this Client itself
""" |
username = self.options['username']
password = self.options['password']
server_host = self.options['server']
server_port = self.options['port']
honeypot_id = self.options['honeypot_id']
session = self.create_session(server_host, server_port, honeypot_id)
self.sessions[session.id] = session
logger.debug(
'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('http', server_host, server_port, session.id))
try:
url = self._make_url(server_host, '/index.html', server_port)
response = self.client.get(url, auth=HTTPBasicAuth(username, password), verify=False)
session.did_connect = True
if response.status_code == 200:
session.add_auth_attempt('plaintext', True, username=username, password=password)
session.did_login = True
else:
session.add_auth_attempt('plaintext', False, username=username, password=password)
links = self._get_links(response)
while self.sent_requests <= self.max_requests and links:
url = random.choice(links)
response = self.client.get(url, auth=HTTPBasicAuth(username, password), verify=False)
links = self._get_links(response)
session.did_complete = True
except Exception as err:
logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err))))
finally:
session.alldone = True
session.end_session()
self.client.close() |
<SYSTEM_TASK:>
Parses the response text and returns all the links in it.
<END_TASK>
<USER_TASK:>
Description:
def _get_links(self, response):
"""
Parses the response text and returns all the links in it.
:param response: The Response object.
""" |
html_text = response.text.encode('utf-8')
doc = document_fromstring(html_text)
links = []
for e in doc.cssselect('a'):
links.append(e.get('href')) |
<SYSTEM_TASK:>
Bootstraps localhost configurations for a Beeswarm server and a honeypot.
<END_TASK>
<USER_TASK:>
Description:
def bootstrap(server_workdir, drone_workdir):
"""Bootstraps localhost configurations for a Beeswarm server and a honeypot.
:param server_workdir: Output directory for the server configuration file.
:param drone_workdir: Output directory for the drone configuration file.
""" |
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)-15s (%(name)s) %(message)s')
console_log = logging.StreamHandler()
console_log.setLevel(logging.INFO)
console_log.setFormatter(formatter)
root_logger.addHandler(console_log)
server_workdir_absolute = os.path.abspath(server_workdir)
old_cwd = os.getcwd()
os.chdir(server_workdir)
server = Server(server_workdir_absolute, None, start_webui=False, customize=False, reset_password=False,
max_sessions=0, server_hostname='127.0.0.1')
logger.info('Server config has been written to {0}'.format(os.path.join(server_workdir, 'beeswarmcfg.json')))
gevent.spawn(server.start, False)
# waiting game to ensure actors has started.
gevent.sleep(2)
os.chdir(old_cwd)
# setting up socket to communicate with ZMQ actor.
context = beeswarm.shared.zmq_context
database_actor = context.socket(zmq.REQ)
database_actor.connect(SocketNames.DATABASE_REQUESTS.value)
db_session = database_setup.get_session()
drone = Honeypot()
protocol_config = (
('ftp', 21, {
'max_attempts': 3,
'banner': 'Microsoft FTP Server',
'syst_type': 'Windows-NT'
}),
('telnet', 23, {
'max_attempts': 3
}),
('pop3', 110, {
'max_attempts': 3
}),
('pop3s', 993, {
'max_attempts': 3
}),
('ssh', 22, {}),
('http', 80, {
'banner': 'Microsoft-IIS/5.0'
}),
('https', 443, {
'banner': 'Microsoft-IIS/5.0'
}),
('smtp', 25, {
'banner': 'Microsoft ESMTP MAIL service ready'
}),
('vnc', 5900, {})
)
for protocol, port, protocol_specific_data in protocol_config:
drone.add_capability(protocol, port, protocol_specific_data)
drone.cert_common_name = '*'
drone.cert_country = 'US'
drone.cert_state = 'None'
drone.cert_locality = 'None'
drone.cert_organization = 'None'
drone.cert_organization_unit = ''
db_session.add(drone)
db_session.commit()
drone_config = send_zmq_request_socket(database_actor, '{0} {1}'.format(Messages.DRONE_CONFIG.value, drone.id))
with open(os.path.join(drone_workdir, 'beeswarmcfg.json'), 'w') as drone_config_file:
drone_config_file.write(json.dumps(drone_config, indent=4))
logger.info('Drone config has been written to {0}'.format(os.path.join(server_workdir, 'beeswarmcfg.json')))
server.stop() |
<SYSTEM_TASK:>
Check if a database exists.
<END_TASK>
<USER_TASK:>
Description:
def database_exists(url):
"""Check if a database exists.
:param url: A SQLAlchemy engine URL.
Performs backend-specific testing to quickly determine if a database
exists on the server. ::
database_exists('postgres://postgres@localhost/name') #=> False
create_database('postgres://postgres@localhost/name')
database_exists('postgres://postgres@localhost/name') #=> True
Supports checking against a constructed URL as well. ::
engine = create_engine('postgres://postgres@localhost/name')
database_exists(engine.url) #=> False
create_database(engine.url)
database_exists(engine.url) #=> True
""" |
url = copy(make_url(url))
database = url.database
if url.drivername.startswith('postgresql'):
url.database = 'template1'
else:
url.database = None
engine = sa.create_engine(url)
if engine.dialect.name == 'postgresql':
text = "SELECT 1 FROM pg_database WHERE datname='%s'" % database
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'mysql':
text = ("SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA "
"WHERE SCHEMA_NAME = '%s'" % database)
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'sqlite':
return database == ':memory:' or os.path.exists(database)
else:
text = 'SELECT 1'
try:
url.database = database
engine = sa.create_engine(url)
engine.execute(text)
return True
except (ProgrammingError, OperationalError):
return False |
<SYSTEM_TASK:>
drone_data_inboud is for data comming from drones
<END_TASK>
<USER_TASK:>
Description:
def message_proxy(self, work_dir):
"""
drone_data_inboud is for data comming from drones
drone_data_outbound is for commands to the drones, topic must either be a drone ID or all for sending
a broadcast message to all drones
""" |
public_keys_dir = os.path.join(work_dir, 'certificates', 'public_keys')
secret_keys_dir = os.path.join(work_dir, 'certificates', 'private_keys')
# start and configure auth worker
auth = IOLoopAuthenticator()
auth.start()
auth.allow('127.0.0.1')
auth.configure_curve(domain='*', location=public_keys_dir)
# external interfaces for communicating with drones
server_secret_file = os.path.join(secret_keys_dir, 'beeswarm_server.pri')
server_public, server_secret = load_certificate(server_secret_file)
drone_data_inbound = beeswarm.shared.zmq_context.socket(zmq.PULL)
drone_data_inbound.curve_secretkey = server_secret
drone_data_inbound.curve_publickey = server_public
drone_data_inbound.curve_server = True
drone_data_inbound.bind('tcp://*:{0}'.format(self.config['network']['zmq_port']))
drone_data_outbound = beeswarm.shared.zmq_context.socket(zmq.PUB)
drone_data_outbound.curve_secretkey = server_secret
drone_data_outbound.curve_publickey = server_public
drone_data_outbound.curve_server = True
drone_data_outbound.bind('tcp://*:{0}'.format(self.config['network']['zmq_command_port']))
# internal interfaces
# all inbound session data from drones will be replayed on this socket
drone_data_socket = beeswarm.shared.zmq_context.socket(zmq.PUB)
drone_data_socket.bind(SocketNames.DRONE_DATA.value)
# all commands received on this will be published on the external interface
drone_command_socket = beeswarm.shared.zmq_context.socket(zmq.PULL)
drone_command_socket.bind(SocketNames.DRONE_COMMANDS.value)
poller = zmq.Poller()
poller.register(drone_data_inbound, zmq.POLLIN)
poller.register(drone_command_socket, zmq.POLLIN)
while True:
# .recv() gives no context switch - why not? using poller with timeout instead
socks = dict(poller.poll(100))
gevent.sleep()
if drone_command_socket in socks and socks[drone_command_socket] == zmq.POLLIN:
data = drone_command_socket.recv()
drone_id, _ = data.split(' ', 1)
logger.debug("Sending drone command to: {0}".format(drone_id))
# pub socket takes care of filtering
drone_data_outbound.send(data)
elif drone_data_inbound in socks and socks[drone_data_inbound] == zmq.POLLIN:
raw_msg = drone_data_inbound.recv()
split_data = raw_msg.split(' ', 2)
if len(split_data) == 3:
topic, drone_id, data = split_data
else:
data = None
topic, drone_id, = split_data
logger.debug("Received {0} message from {1}.".format(topic, drone_id))
# relay message on internal socket
drone_data_socket.send(raw_msg) |
<SYSTEM_TASK:>
Starts the BeeSwarm server.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Starts the BeeSwarm server.
""" |
self.started = True
if self.app:
web_port = self.config['network']['web_port']
logger.info('Starting server listening on port {0}'.format(web_port))
key_file = os.path.join(self.work_dir, 'server.key')
cert_file = os.path.join(self.work_dir, 'server.crt')
http_server = WSGIServer(('', web_port), self.app, keyfile=key_file, certfile=cert_file)
http_server_greenlet = gevent.spawn(http_server.serve_forever)
self.greenlets.append(http_server_greenlet)
stop_if_not_write_workdir(self.work_dir)
logger.info('Server started.')
gevent.joinall(self.greenlets) |
<SYSTEM_TASK:>
Return true if current time is in the active range
<END_TASK>
<USER_TASK:>
Description:
def time_in_range(self):
"""Return true if current time is in the active range""" |
curr = datetime.datetime.now().time()
if self.start_time <= self.end_time:
return self.start_time <= curr <= self.end_time
else:
return self.start_time <= curr or curr <= self.end_time |
<SYSTEM_TASK:>
Launches a new Telnet client session on the server taken from the `self.options` dict.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Launches a new Telnet client session on the server taken from the `self.options` dict.
This session always fails.
:param my_ip: IP of this Client itself
""" |
password = self.options['password']
server_host = self.options['server']
server_port = self.options['port']
honeypot_id = self.options['honeypot_id']
session = self.create_session(server_host, server_port, honeypot_id)
self.sessions[session.id] = session
logger.debug(
'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('vnc', server_host, server_port, session.id))
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client_socket.connect((server_host, int(server_port)))
session.source_port = client_socket.getsockname()[1]
except socket.error as e:
logger.debug('Caught exception: {0} ({1})'.format(e, str(type(e))))
else:
session.did_connect = True
protocol_version = client_socket.recv(1024)
client_socket.send(RFB_VERSION)
supported_auth_methods = client_socket.recv(1024)
# \x02 implies that VNC authentication method is to be used
# Refer to http://tools.ietf.org/html/rfc6143#section-7.1.2 for more info.
if '\x02' in supported_auth_methods:
client_socket.send(VNC_AUTH)
challenge = client_socket.recv(1024)
# password limit for vnc in 8 chars
aligned_password = (password + '\0' * 8)[:8]
des = RFBDes(aligned_password)
response = des.encrypt(challenge)
client_socket.send(response)
auth_status = client_socket.recv(1024)
if auth_status == AUTH_SUCCESSFUL:
session.add_auth_attempt('des_challenge', True, password=aligned_password)
session.did_login = True
else:
session.add_auth_attempt('des_challenge', False, password=aligned_password)
session.did_login = False
session.did_complete = True
finally:
session.alldone = True
session.end_session()
if client_socket:
client_socket.close() |
<SYSTEM_TASK:>
Creates a new session.
<END_TASK>
<USER_TASK:>
Description:
def create_session(self, server_host, server_port, honeypot_id):
"""
Creates a new session.
:param server_host: IP address of the server
:param server_port: Server port
:return: A new `BaitSession` object.
""" |
protocol = self.__class__.__name__.lower()
session = BaitSession(protocol, server_host, server_port, honeypot_id)
self.sessions[session.id] = session
return session |
<SYSTEM_TASK:>
Launches a new FTP client session on the server taken from the `self.options` dict.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Launches a new FTP client session on the server taken from the `self.options` dict.
:param my_ip: IP of this Client itself
""" |
username = self.options['username']
password = self.options['password']
server_host = self.options['server']
server_port = self.options['port']
honeypot_id = self.options['honeypot_id']
command_limit = random.randint(6, 11)
session = self.create_session(server_host, server_port, honeypot_id)
self.sessions[session.id] = session
logger.debug(
'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('ftp', server_host, server_port, session.id))
self.file_list = []
try:
self.connect()
session.did_connect = True
# TODO: Catch login failure
self.login(username, password)
session.add_auth_attempt('plaintext', True, username=username, password=password)
session.did_login = True
session.timestamp = datetime.utcnow()
except ftplib.error_perm as err:
logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err))))
except socket.error as err:
logger.debug('Error while communicating: {0} ({1})'.format(err, str(type(err))))
else:
command_count = 0
while command_count <= command_limit:
command_count += 1
try:
self.sense()
cmd, param = self.decide()
self.act(cmd, param)
gevent.sleep(random.uniform(0, 3))
except IndexError: # This means we hit an empty folder, or a folder with only files.
continue
session.did_complete = True
finally:
if self.client.sock is not None:
# will close socket
self.client.quit()
session.alldone = True
session.end_session() |
<SYSTEM_TASK:>
Launches a few "sensing" commands such as 'ls', or 'pwd'
<END_TASK>
<USER_TASK:>
Description:
def sense(self):
"""
Launches a few "sensing" commands such as 'ls', or 'pwd'
and updates the current bait state.
""" |
cmd_name = random.choice(self.senses)
command = getattr(self, cmd_name)
self.state['last_command'] = cmd_name
command() |
<SYSTEM_TASK:>
Decides the next command to be launched based on the current state.
<END_TASK>
<USER_TASK:>
Description:
def decide(self):
"""
Decides the next command to be launched based on the current state.
:return: Tuple containing the next command name, and it's parameters.
""" |
next_command_name = random.choice(self.COMMAND_MAP[self.state['last_command']])
param = ''
if next_command_name == 'retrieve':
param = random.choice(self.state['file_list'])
elif next_command_name == 'cwd':
param = random.choice(self.state['dir_list'])
return next_command_name, param |
<SYSTEM_TASK:>
Run the command with the parameters.
<END_TASK>
<USER_TASK:>
Description:
def act(self, cmd_name, param):
"""
Run the command with the parameters.
:param cmd_name: The name of command to run
:param param: Params for the command
""" |
command = getattr(self, cmd_name)
if param:
command(param)
else:
command() |
<SYSTEM_TASK:>
Run the FTP RETR command, and download the file
<END_TASK>
<USER_TASK:>
Description:
def retrieve(self, filename):
"""
Run the FTP RETR command, and download the file
:param filename: Name of the file to download
""" |
logger.debug('Sending FTP retr command. Filename: {}'.format(filename))
self.client.retrbinary('RETR {}'.format(filename), self._save_file) |
<SYSTEM_TASK:>
Send the FTP CWD command
<END_TASK>
<USER_TASK:>
Description:
def cwd(self, newdir):
"""
Send the FTP CWD command
:param newdir: Directory to change to
""" |
logger.debug('Sending FTP cwd command. New Workding Directory: {}'.format(newdir))
self.client.cwd(newdir)
self.state['current_dir'] = self.client.pwd() |
<SYSTEM_TASK:>
Processes a line of 'ls -l' output, and updates state accordingly.
<END_TASK>
<USER_TASK:>
Description:
def _process_list(self, list_line):
# -rw-r--r-- 1 ftp ftp 68 May 09 19:37 testftp.txt
"""
Processes a line of 'ls -l' output, and updates state accordingly.
:param list_line: Line to process
""" |
res = list_line.split(' ', 8)
if res[0].startswith('-'):
self.state['file_list'].append(res[-1])
if res[0].startswith('d'):
self.state['dir_list'].append(res[-1]) |
<SYSTEM_TASK:>
Starts sending client bait to the configured Honeypot.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Starts sending client bait to the configured Honeypot.
""" |
logger.info('Starting client.')
self.dispatcher_greenlets = []
for _, entry in self.config['baits'].items():
for b in clientbase.ClientBase.__subclasses__():
bait_name = b.__name__.lower()
# if the bait has a entry in the config we consider the bait enabled
if bait_name in entry:
bait_options = entry[bait_name]
dispatcher = BaitDispatcher(b, bait_options)
dispatcher.start()
self.dispatcher_greenlets.append(dispatcher)
logger.info('Adding {0} bait'.format(bait_name))
logger.debug('Bait added with options: {0}'.format(bait_options))
gevent.joinall(self.dispatcher_greenlets) |
<SYSTEM_TASK:>
Launches a new POP3 client session on the server taken from the `self.options` dict.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Launches a new POP3 client session on the server taken from the `self.options` dict.
:param my_ip: IP of this Client itself
""" |
username = self.options['username']
password = self.options['password']
server_host = self.options['server']
server_port = self.options['port']
honeypot_id = self.options['honeypot_id']
session = self.create_session(server_host, server_port, honeypot_id)
try:
logger.debug(
'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('pop3', server_host, server_port,
session.id))
conn = poplib.POP3_SSL(server_host, server_port)
session.source_port = conn.sock.getsockname()[1]
banner = conn.getwelcome()
session.protocol_data['banner'] = banner
session.did_connect = True
conn.user(username)
conn.pass_(password)
# TODO: Handle failed login
session.add_auth_attempt('plaintext', True, username=username, password=password)
session.did_login = True
session.timestamp = datetime.utcnow()
except Exception as err:
logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err))))
else:
list_entries = conn.list()[1]
for entry in list_entries:
index, _ = entry.split(' ')
conn.retr(index)
conn.dele(index)
logger.debug('Found and deleted {0} messages on {1}'.format(len(list_entries), server_host))
conn.quit()
session.did_complete = True
finally:
session.alldone = True
session.end_session() |
<SYSTEM_TASK:>
Tries to match a session with it's counterpart. For bait session it will try to match it with honeypot sessions
<END_TASK>
<USER_TASK:>
Description:
def get_matching_session(self, session, db_session, timediff=5):
"""
Tries to match a session with it's counterpart. For bait session it will try to match it with honeypot sessions
and the other way around.
:param session: session object which will be used as base for query.
:param timediff: +/- allowed time difference between a session and a potential matching session.
""" |
db_session = db_session
min_datetime = session.timestamp - timedelta(seconds=timediff)
max_datetime = session.timestamp + timedelta(seconds=timediff)
# default return value
match = None
classification = db_session.query(Classification).filter(
Classification.type == 'pending').one()
# get all sessions that match basic properties.
sessions = db_session.query(Session).options(joinedload(Session.authentication)) \
.filter(Session.protocol == session.protocol) \
.filter(Session.honeypot == session.honeypot) \
.filter(Session.timestamp >= min_datetime) \
.filter(Session.timestamp <= max_datetime) \
.filter(Session.id != session.id) \
.filter(Session.classification == classification)
# identify the correct session by comparing authentication.
# this could properly also be done using some fancy ORM/SQL construct.
for potential_match in sessions:
if potential_match.discriminator == session.discriminator:
continue
assert potential_match.id != session.id
for honey_auth in session.authentication:
for session_auth in potential_match.authentication:
if session_auth.username == honey_auth.username and \
session_auth.password == honey_auth.password and \
session_auth.successful == honey_auth.successful:
assert potential_match.id != session.id
match = potential_match
break
return match |
<SYSTEM_TASK:>
Will classify all unclassified sessions as malicious activity.
<END_TASK>
<USER_TASK:>
Description:
def _classify_malicious_sessions(self):
"""
Will classify all unclassified sessions as malicious activity.
:param delay_seconds: no sessions newer than (now - delay_seconds) will be processed.
""" |
min_datetime = datetime.utcnow() - timedelta(seconds=self.delay_seconds)
db_session = database_setup.get_session()
# find and process bait sessions that did not get classified during
# persistence.
bait_sessions = db_session.query(BaitSession).options(joinedload(BaitSession.authentication)) \
.filter(BaitSession.classification_id == 'pending') \
.filter(BaitSession.did_complete == True) \
.filter(BaitSession.received < min_datetime).all()
for bait_session in bait_sessions:
logger.debug(
'Classifying bait session with id {0} as MITM'.format(bait_session.id))
bait_session.classification = db_session.query(
Classification).filter(Classification.type == 'mitm').one()
db_session.commit()
# find and process honeypot sessions that did not get classified during
# persistence.
sessions = db_session.query(Session, Drone.name).filter(Session.discriminator == None) \
.filter(Session.timestamp <= min_datetime) \
.filter(Session.classification_id == 'pending') \
.all()
for entry in sessions:
# Check if the attack used credentials leaked by beeswarm drones
session = entry[0]
bait_match = None
for a in session.authentication:
bait_match = db_session.query(BaitSession) \
.filter(BaitSession.authentication.any(username=a.username, password=a.password)).first()
if bait_match:
break
if bait_match:
logger.debug('Classifying session with id {0} as attack which involved the reuse '
'of previously transmitted credentials.'.format(session.id))
session.classification = db_session.query(Classification).filter(
Classification.type == 'credentials_reuse').one()
elif len(session.authentication) == 0:
logger.debug(
'Classifying session with id {0} as probe.'.format(session.id))
session.classification = db_session.query(
Classification).filter(Classification.type == 'probe').one()
else:
# we have never transmitted this username/password combo
logger.debug(
'Classifying session with id {0} as bruteforce attempt.'.format(session.id))
session.classification = db_session.query(Classification).filter(
Classification.type == 'bruteforce').one()
db_session.commit()
session.name = entry[1]
self.processedSessionsPublisher.send(
'{0} {1}'.format(Messages.SESSION.value, json.dumps(session.to_dict()))) |
<SYSTEM_TASK:>
Launches a new SSH client session on the server taken from the `self.options` dict.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Launches a new SSH client session on the server taken from the `self.options` dict.
:param my_ip: IP of this Client itself
""" |
username = self.options['username']
password = self.options['password']
server_host = self.options['server']
server_port = self.options['port']
honeypot_id = self.options['honeypot_id']
session = self.create_session(server_host, server_port, honeypot_id)
self.sessions[session.id] = session
logger.debug(
'Sending ssh bait session to {0}:{1}. (bait id: {2})'.format(server_host, server_port, session.id))
try:
self.connect_login()
session.did_connect = True
# TODO: Handle failed login
session.add_auth_attempt('plaintext', True, username=username, password=password)
session.did_login = True
except (SSHException, AuthenticationFailed) as err:
logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err))))
else:
command_count = 0
command_limit = random.randint(6, 11)
while command_count < command_limit:
command_count += 1
self.sense()
comm, param = self.decide()
self.act(comm, param)
gevent.sleep(random.uniform(0.4, 5.6))
self.logout()
session.did_complete = True
finally:
session.alldone = True
session.end_session()
self.comm_chan.close() |
<SYSTEM_TASK:>
Send a command to the remote SSH server.
<END_TASK>
<USER_TASK:>
Description:
def send_command(self, cmd):
"""
Send a command to the remote SSH server.
:param cmd: The command to send
""" |
logger.debug('Sending {0} command.'.format(cmd))
self.comm_chan.sendall(cmd + '\n') |
<SYSTEM_TASK:>
Try to login to the Remote SSH Server.
<END_TASK>
<USER_TASK:>
Description:
def connect_login(self):
"""
Try to login to the Remote SSH Server.
:return: Response text on successful login
:raise: `AuthenticationFailed` on unsuccessful login
""" |
self.client.connect(self.options['server'], self.options['port'], self.options['username'],
self.options['password'])
self.comm_chan = self.client.invoke_shell()
time.sleep(1) # Let the server take some time to get ready.
while not self.comm_chan.recv_ready():
time.sleep(0.5)
login_response = self.comm_chan.recv(2048)
if not login_response.endswith('$ '):
raise AuthenticationFailed
return login_response |
<SYSTEM_TASK:>
Converts an absolute path to an entry resembling the output of
<END_TASK>
<USER_TASK:>
Description:
def path_to_ls(fn):
""" Converts an absolute path to an entry resembling the output of
the ls command on most UNIX systems.""" |
st = os.stat(fn)
full_mode = 'rwxrwxrwx'
mode = ''
file_time = ''
d = ''
for i in range(9):
# Incrementally builds up the 9 character string, using characters from the
# fullmode (defined above) and mode bits from the stat() system call.
mode += ((st.st_mode >> (8 - i)) & 1) and full_mode[i] or '-'
d = (os.path.isdir(fn)) and 'd' or '-'
file_time = time.strftime(' %b %d %H:%M ', time.gmtime(st.st_mtime))
list_format = '{0}{1} 1 ftp ftp {2}\t{3}{4}'.format(d, mode, str(st.st_size), file_time, os.path.basename(fn))
return list_format |
<SYSTEM_TASK:>
Restarts the drone
<END_TASK>
<USER_TASK:>
Description:
def _start_drone(self):
"""
Restarts the drone
""" |
with open(self.config_file, 'r') as config_file:
self.config = json.load(config_file, object_hook=asciify)
mode = None
if self.config['general']['mode'] == '' or self.config['general']['mode'] is None:
logger.info('Drone has not been configured, awaiting configuration from Beeswarm server.')
elif self.config['general']['mode'] == 'honeypot':
mode = Honeypot
elif self.config['general']['mode'] == 'client':
mode = Client
if mode:
self.drone = mode(self.work_dir, self.config)
self.drone_greenlet = gevent.spawn(self.drone.start)
self.drone_greenlet.link_exception(self.on_exception)
logger.info('Drone configured and running. ({0})'.format(self.id)) |
<SYSTEM_TASK:>
Launches a new SMTP client session on the server taken from the `self.options` dict.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Launches a new SMTP client session on the server taken from the `self.options` dict.
:param my_ip: IP of this Client itself
""" |
username = self.options['username']
password = self.options['password']
server_host = self.options['server']
server_port = self.options['port']
honeypot_id = self.options['honeypot_id']
session = self.create_session(server_host, server_port, honeypot_id)
logger.debug(
'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('smtp', server_host, server_port, session.id))
try:
self.connect()
session.did_connect = True
session.source_port = self.client.sock.getsockname()[1]
self.login(username, password)
# TODO: Handle failed login
# TODO: password='' is sillly fix, this needs to be fixed server side...
session.add_auth_attempt('plaintext', True, username=username, password='')
session.did_login = True
except smtplib.SMTPException as error:
logger.debug('Caught exception: {0} ({1})'.format(error, str(type(error))))
else:
while self.sent_mails <= self.max_mails:
from_addr, to_addr, mail_body = self.get_one_mail()
try:
if from_addr and to_addr and isinstance(mail_body, str):
self.client.sendmail(from_addr, to_addr, mail_body)
else:
continue
except TypeError as e:
logger.debug('Malformed email in mbox archive, skipping.')
continue
else:
self.sent_mails += 1
logger.debug('Sent mail from ({0}) to ({1})'.format(from_addr, to_addr))
time.sleep(1)
self.client.quit()
session.did_complete = True
finally:
logger.debug('SMTP Session complete.')
session.alldone = True
session.end_session()
self.client.close() |
<SYSTEM_TASK:>
Choose and return a random email from the mail archive.
<END_TASK>
<USER_TASK:>
Description:
def get_one_mail(self):
"""
Choose and return a random email from the mail archive.
:return: Tuple containing From Address, To Address and the mail body.
""" |
while True:
mail_key = random.choice(self.mailbox.keys())
mail = self.mailbox[mail_key]
from_addr = mail.get_from()
to_addr = mail['To']
mail_body = mail.get_payload()
if not from_addr or not to_addr:
continue
return from_addr, to_addr, mail_body |
<SYSTEM_TASK:>
Connect to the SMTP server.
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
"""
Connect to the SMTP server.
""" |
# TODO: local_hostname should be configurable
self.client = smtplib.SMTP(self.options['server'], self.options['port'],
local_hostname='local.domain', timeout=15) |
<SYSTEM_TASK:>
Ascii-fies dict keys and values
<END_TASK>
<USER_TASK:>
Description:
def _asciify_dict(data):
""" Ascii-fies dict keys and values """ |
ret = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = _remove_accents(key)
key = key.encode('utf-8')
# # note new if
if isinstance(value, unicode):
value = _remove_accents(value)
value = value.encode('utf-8')
elif isinstance(value, list):
value = _asciify_list(value)
elif isinstance(value, dict):
value = _asciify_dict(value)
ret[key] = value
return ret |
<SYSTEM_TASK:>
Emulates human typing speed
<END_TASK>
<USER_TASK:>
Description:
def write_human(self, buffer_):
""" Emulates human typing speed """ |
if self.IAC in buffer_:
buffer_ = buffer_.replace(self.IAC, self.IAC + self.IAC)
self.msg("send %r", buffer_)
for char in buffer_:
delta = random.gauss(80, 20)
self.sock.sendall(char)
time.sleep(delta / 1000.0) |
<SYSTEM_TASK:>
Launches a new Telnet client session on the server taken from the `self.options` dict.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Launches a new Telnet client session on the server taken from the `self.options` dict.
:param my_ip: IP of this Client itself
""" |
login = self.options['username']
password = self.options['password']
server_host = self.options['server']
server_port = self.options['port']
honeypot_id = self.options['honeypot_id']
command_limit = random.randint(6, 11)
session = self.create_session(server_host, server_port, honeypot_id)
self.sessions[session.id] = session
logger.debug(
'Sending telnet bait session to {0}:{1}. (bait id: {2})'.format(server_host, server_port, session.id))
try:
self.connect()
self.login(login, password)
session.add_auth_attempt('plaintext', True, username=login, password=password)
session.did_connect = True
session.source_port = self.client.sock.getsockname()[1]
session.did_login = True
except InvalidLogin:
logger.debug('Telnet session could not login. ({0})'.format(session.id))
session.did_login = False
except Exception as err:
logger.debug('Caught exception: {0} {1}'.format(err, str(err), exc_info=True))
else:
command_count = 0
while command_count < command_limit:
command_count += 1
self.sense()
comm, param = self.decide()
self.act(comm, param)
gevent.sleep(random.uniform(0.4, 5.6))
self.act('logout')
session.did_complete = True
finally:
session.alldone = True
session.end_session()
if self.client:
self.client.close() |
<SYSTEM_TASK:>
Open a new telnet session on the remote server.
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
"""
Open a new telnet session on the remote server.
""" |
self.client = BaitTelnetClient(self.options['server'], self.options['port'])
self.client.set_option_negotiation_callback(self.process_options) |
<SYSTEM_TASK:>
Login to the remote telnet server.
<END_TASK>
<USER_TASK:>
Description:
def login(self, login, password):
"""
Login to the remote telnet server.
:param login: Username to use for logging in
:param password: Password to use for logging in
:raise: `InvalidLogin` on failed login
""" |
self.client.read_until('Username: ')
self.client.write(login + '\r\n')
self.client.read_until('Password: ')
self.client.write(password + '\r\n')
current_data = self.client.read_until('$ ', 10)
if not current_data.endswith('$ '):
raise InvalidLogin |
<SYSTEM_TASK:>
Logout from the remote server.
<END_TASK>
<USER_TASK:>
Description:
def logout(self):
"""
Logout from the remote server.
""" |
self.client.write('exit\r\n')
self.client.read_all()
self.client.close() |
<SYSTEM_TASK:>
Launch a command in the 'senses' List, and update the current state.
<END_TASK>
<USER_TASK:>
Description:
def sense(self):
""" Launch a command in the 'senses' List, and update the current state.""" |
cmd_name = random.choice(self.senses)
param = ''
if cmd_name == 'ls':
if random.randint(0, 1):
param = '-l'
elif cmd_name == 'uname':
# Choose options from predefined ones
opts = 'asnrvmpio'
start = random.randint(0, len(opts) - 2)
end = random.randint(start + 1, len(opts) - 1)
param = '-{}'.format(opts[start:end])
command = getattr(self, cmd_name)
command(param) |
<SYSTEM_TASK:>
Choose the next command to execute, and its parameters, based on the current
<END_TASK>
<USER_TASK:>
Description:
def decide(self):
""" Choose the next command to execute, and its parameters, based on the current
state.
""" |
next_command_name = random.choice(self.COMMAND_MAP[self.state['last_command']])
param = ''
if next_command_name == 'cd':
try:
param = random.choice(self.state['dir_list'])
except IndexError:
next_command_name = 'ls'
elif next_command_name == 'uname':
opts = 'asnrvmpio'
start = random.randint(0, len(opts) - 2)
end = random.randint(start + 1, len(opts) - 1)
param = '-{}'.format(opts[start:end])
elif next_command_name == 'ls':
if random.randint(0, 1):
param = '-l'
elif next_command_name == 'cat':
try:
param = random.choice(self.state['file_list'])
except IndexError:
param = ''.join(random.choice(string.lowercase) for x in range(3))
elif next_command_name == 'echo':
param = random.choice([
'$http_proxy',
'$https_proxy',
'$ftp_proxy',
'$BROWSER',
'$EDITOR',
'$SHELL',
'$PAGER'
])
elif next_command_name == 'sudo':
param = random.choice([
'pm-hibernate',
'shutdown -h',
'vim /etc/httpd.conf',
'vim /etc/resolve.conf',
'service network restart',
'/etc/init.d/network-manager restart',
])
return next_command_name, param |
<SYSTEM_TASK:>
Run the specified command with its parameters.
<END_TASK>
<USER_TASK:>
Description:
def act(self, cmd_name, params=None):
""" Run the specified command with its parameters.""" |
command = getattr(self, cmd_name)
if params:
command(params)
else:
command() |
<SYSTEM_TASK:>
Override it to convert fields of `json_record` to needed types.
<END_TASK>
<USER_TASK:>
Description:
def mutate_json_record(self, json_record):
"""Override it to convert fields of `json_record` to needed types.
Default implementation converts `datetime` to string in ISO8601 format.
""" |
for attr_name in json_record:
attr = json_record[attr_name]
if isinstance(attr, datetime):
json_record[attr_name] = attr.isoformat()
return json_record |
<SYSTEM_TASK:>
Listen to the stream and send events to the client.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" Listen to the stream and send events to the client. """ |
channel = self._ssh_client.get_transport().open_session()
self._channel = channel
channel.exec_command("gerrit stream-events")
stdout = channel.makefile()
stderr = channel.makefile_stderr()
while not self._stop.is_set():
try:
if channel.exit_status_ready():
if channel.recv_stderr_ready():
error = stderr.readline().strip()
else:
error = "Remote server connection closed"
self._error_event(error)
self._stop.set()
else:
data = stdout.readline()
self._gerrit.put_event(data)
except Exception as e: # pylint: disable=W0703
self._error_event(repr(e))
self._stop.set() |
<SYSTEM_TASK:>
Run a query.
<END_TASK>
<USER_TASK:>
Description:
def query(self, term):
""" Run a query.
:arg str term: The query term to run.
:Returns: A list of results as :class:`pygerrit.models.Change` objects.
:Raises: `ValueError` if `term` is not a string.
""" |
results = []
command = ["query", "--current-patch-set", "--all-approvals",
"--format JSON", "--commit-message"]
if not isinstance(term, basestring):
raise ValueError("term must be a string")
command.append(escape_string(term))
result = self._ssh_client.run_gerrit_command(" ".join(command))
decoder = JSONDecoder()
for line in result.stdout.read().splitlines():
# Gerrit's response to the query command contains one or more
# lines of JSON-encoded strings. The last one is a status
# dictionary containing the key "type" whose value indicates
# whether or not the operation was successful.
# According to http://goo.gl/h13HD it should be safe to use the
# presence of the "type" key to determine whether the dictionary
# represents a change or if it's the query status indicator.
try:
data = decoder.decode(line)
except ValueError as err:
raise GerritError("Query returned invalid data: %s", err)
if "type" in data and data["type"] == "error":
raise GerritError("Query error: %s" % data["message"])
elif "project" in data:
results.append(Change(data))
return results |
<SYSTEM_TASK:>
Start streaming events from `gerrit stream-events`.
<END_TASK>
<USER_TASK:>
Description:
def start_event_stream(self):
""" Start streaming events from `gerrit stream-events`. """ |
if not self._stream:
self._stream = GerritStream(self, ssh_client=self._ssh_client)
self._stream.start() |
<SYSTEM_TASK:>
Stop streaming events from `gerrit stream-events`.
<END_TASK>
<USER_TASK:>
Description:
def stop_event_stream(self):
""" Stop streaming events from `gerrit stream-events`.""" |
if self._stream:
self._stream.stop()
self._stream.join()
self._stream = None
with self._events.mutex:
self._events.queue.clear() |
<SYSTEM_TASK:>
Get the next event from the queue.
<END_TASK>
<USER_TASK:>
Description:
def get_event(self, block=True, timeout=None):
""" Get the next event from the queue.
:arg boolean block: Set to True to block if no event is available.
:arg seconds timeout: Timeout to wait if no event is available.
:Returns: The next event as a :class:`pygerrit.events.GerritEvent`
instance, or `None` if:
- `block` is False and there is no event available in the queue, or
- `block` is True and no event is available within the time
specified by `timeout`.
""" |
try:
return self._events.get(block, timeout)
except Empty:
return None |
<SYSTEM_TASK:>
Create event from `data` and add it to the queue.
<END_TASK>
<USER_TASK:>
Description:
def put_event(self, data):
""" Create event from `data` and add it to the queue.
:arg json data: The JSON data from which to create the event.
:Raises: :class:`pygerrit.error.GerritError` if the queue is full, or
the factory could not create the event.
""" |
try:
event = self._factory.create(data)
self._events.put(event)
except Full:
raise GerritError("Unable to add event: queue is full") |
<SYSTEM_TASK:>
Extract the version from `version_string` using `pattern`.
<END_TASK>
<USER_TASK:>
Description:
def _extract_version(version_string, pattern):
""" Extract the version from `version_string` using `pattern`.
Return the version as a string, with leading/trailing whitespace
stripped.
""" |
if version_string:
match = pattern.match(version_string.strip())
if match:
return match.group(1)
return "" |
<SYSTEM_TASK:>
Configure the ssh parameters from the config file.
<END_TASK>
<USER_TASK:>
Description:
def _configure(self):
""" Configure the ssh parameters from the config file. """ |
configfile = expanduser("~/.ssh/config")
if not isfile(configfile):
raise GerritError("ssh config file '%s' does not exist" %
configfile)
config = SSHConfig()
config.parse(open(configfile))
data = config.lookup(self.hostname)
if not data:
raise GerritError("No ssh config for host %s" % self.hostname)
if 'hostname' not in data or 'port' not in data or 'user' not in data:
raise GerritError("Missing configuration data in %s" % configfile)
self.hostname = data['hostname']
self.username = data['user']
if 'identityfile' in data:
key_filename = abspath(expanduser(data['identityfile'][0]))
if not isfile(key_filename):
raise GerritError("Identity file '%s' does not exist" %
key_filename)
self.key_filename = key_filename
try:
self.port = int(data['port'])
except ValueError:
raise GerritError("Invalid port: %s" % data['port'])
if 'proxycommand' in data:
self.proxy = ProxyCommand(data['proxycommand']) |
<SYSTEM_TASK:>
Connect to the remote.
<END_TASK>
<USER_TASK:>
Description:
def _do_connect(self):
""" Connect to the remote. """ |
self.load_system_host_keys()
if self.username is None or self.port is None:
self._configure()
try:
self.connect(hostname=self.hostname,
port=self.port,
username=self.username,
key_filename=self.key_filename,
sock=self.proxy)
except socket.error as e:
raise GerritError("Failed to connect to server: %s" % e)
try:
version_string = self._transport.remote_version
pattern = re.compile(r'^.*GerritCodeReview_([a-z0-9-\.]*) .*$')
self.remote_version = _extract_version(version_string, pattern)
except AttributeError:
self.remote_version = None |
<SYSTEM_TASK:>
Connect to the remote if not already connected.
<END_TASK>
<USER_TASK:>
Description:
def _connect(self):
""" Connect to the remote if not already connected. """ |
if not self.connected.is_set():
try:
self.lock.acquire()
# Another thread may have connected while we were
# waiting to acquire the lock
if not self.connected.is_set():
self._do_connect()
if self.keepalive:
self._transport.set_keepalive(self.keepalive)
self.connected.set()
except GerritError:
raise
finally:
self.lock.release() |
<SYSTEM_TASK:>
Return the version of the remote Gerrit server.
<END_TASK>
<USER_TASK:>
Description:
def get_remote_version(self):
""" Return the version of the remote Gerrit server. """ |
if self.remote_version is None:
result = self.run_gerrit_command("version")
version_string = result.stdout.read()
pattern = re.compile(r'^gerrit version (.*)$')
self.remote_version = _extract_version(version_string, pattern)
return self.remote_version |
<SYSTEM_TASK:>
Decorator to register the event identified by `name`.
<END_TASK>
<USER_TASK:>
Description:
def register(cls, name):
""" Decorator to register the event identified by `name`.
Return the decorated class.
Raise GerritError if the event is already registered.
""" |
def decorate(klazz):
""" Decorator. """
if name in cls._events:
raise GerritError("Duplicate event: %s" % name)
cls._events[name] = [klazz.__module__, klazz.__name__]
klazz.name = name
return klazz
return decorate |
<SYSTEM_TASK:>
Create a new event instance.
<END_TASK>
<USER_TASK:>
Description:
def create(cls, data):
""" Create a new event instance.
Return an instance of the `GerritEvent` subclass after converting
`data` to json.
Raise GerritError if json parsed from `data` does not contain a `type`
key.
""" |
try:
json_data = json.loads(data)
except ValueError as err:
logging.debug("Failed to load json data: %s: [%s]", str(err), data)
json_data = json.loads(ErrorEvent.error_json(err))
if "type" not in json_data:
raise GerritError("`type` not in json_data")
name = json_data["type"]
if name not in cls._events:
name = 'unhandled-event'
event = cls._events[name]
module_name = event[0]
class_name = event[1]
module = __import__(module_name, fromlist=[module_name])
klazz = getattr(module, class_name)
return klazz(json_data) |
<SYSTEM_TASK:>
Send HTTP PUT to the endpoint.
<END_TASK>
<USER_TASK:>
Description:
def put(self, endpoint, **kwargs):
""" Send HTTP PUT to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
""" |
kwargs.update(self.kwargs.copy())
if "data" in kwargs:
kwargs["headers"].update(
{"Content-Type": "application/json;charset=UTF-8"})
response = requests.put(self.make_url(endpoint), **kwargs)
return _decode_response(response) |
<SYSTEM_TASK:>
Send HTTP DELETE to the endpoint.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, endpoint, **kwargs):
""" Send HTTP DELETE to the endpoint.
:arg str endpoint: The endpoint to send to.
:returns:
JSON decoded result.
:raises:
requests.RequestException on timeout or connection error.
""" |
kwargs.update(self.kwargs.copy())
response = requests.delete(self.make_url(endpoint), **kwargs)
return _decode_response(response) |
<SYSTEM_TASK:>
Add inline comments.
<END_TASK>
<USER_TASK:>
Description:
def add_comments(self, comments):
""" Add inline comments.
:arg dict comments: Comments to add.
Usage::
add_comments([{'filename': 'Makefile',
'line': 10,
'message': 'inline message'}])
add_comments([{'filename': 'Makefile',
'range': {'start_line': 0,
'start_character': 1,
'end_line': 0,
'end_character': 5},
'message': 'inline message'}])
""" |
for comment in comments:
if 'filename' and 'message' in comment.keys():
msg = {}
if 'range' in comment.keys():
msg = {"range": comment['range'],
"message": comment['message']}
elif 'line' in comment.keys():
msg = {"line": comment['line'],
"message": comment['message']}
else:
continue
file_comment = {comment['filename']: [msg]}
if self.comments:
if comment['filename'] in self.comments.keys():
self.comments[comment['filename']].append(msg)
else:
self.comments.update(file_comment)
else:
self.comments.update(file_comment) |
<SYSTEM_TASK:>
Convert a float to a list of GeoHash bits.
<END_TASK>
<USER_TASK:>
Description:
def _float_to_bits(value, lower=-90.0, middle=0.0, upper=90.0, length=15):
"""Convert a float to a list of GeoHash bits.""" |
ret = []
for i in range(length):
if value >= middle:
lower = middle
ret.append(1)
else:
upper = middle
ret.append(0)
middle = (upper + lower) / 2
return ret |
<SYSTEM_TASK:>
Convert a list of GeoHash bits to a GeoHash.
<END_TASK>
<USER_TASK:>
Description:
def _bits_to_geohash(value):
"""Convert a list of GeoHash bits to a GeoHash.""" |
ret = []
# Get 5 bits at a time
for i in (value[i:i+5] for i in xrange(0, len(value), 5)):
# Convert binary to integer
# Note: reverse here, the slice above doesn't work quite right in reverse.
total = sum([(bit*2**count) for count,bit in enumerate(i[::-1])])
ret.append(BASE32MAPR[total])
# Join the string and return
return "".join(ret) |
<SYSTEM_TASK:>
Return the adjacent geohash for a given direction.
<END_TASK>
<USER_TASK:>
Description:
def adjacent(geohash, direction):
"""Return the adjacent geohash for a given direction.""" |
# Based on an MIT licensed implementation by Chris Veness from:
# http://www.movable-type.co.uk/scripts/geohash.html
assert direction in 'nsew', "Invalid direction: %s"%direction
assert geohash, "Invalid geohash: %s"%geohash
neighbor = {
'n': [ 'p0r21436x8zb9dcf5h7kjnmqesgutwvy', 'bc01fg45238967deuvhjyznpkmstqrwx' ],
's': [ '14365h7k9dcfesgujnmqp0r2twvyx8zb', '238967debc01fg45kmstqrwxuvhjyznp' ],
'e': [ 'bc01fg45238967deuvhjyznpkmstqrwx', 'p0r21436x8zb9dcf5h7kjnmqesgutwvy' ],
'w': [ '238967debc01fg45kmstqrwxuvhjyznp', '14365h7k9dcfesgujnmqp0r2twvyx8zb' ]
}
border = {
'n': [ 'prxz', 'bcfguvyz' ],
's': [ '028b', '0145hjnp' ],
'e': [ 'bcfguvyz', 'prxz' ],
'w': [ '0145hjnp', '028b' ]
}
last = geohash[-1]
parent = geohash[0:-1]
t = len(geohash) % 2
# Check for edge cases
if (last in border[direction][t]) and (parent):
parent = adjacent(parent, direction)
return parent + BASESEQUENCE[neighbor[direction][t].index(last)] |
<SYSTEM_TASK:>
Return all neighboring geohashes.
<END_TASK>
<USER_TASK:>
Description:
def neighbors(geohash):
"""Return all neighboring geohashes.""" |
return {
'n': adjacent(geohash, 'n'),
'ne': adjacent(adjacent(geohash, 'n'), 'e'),
'e': adjacent(geohash, 'e'),
'se': adjacent(adjacent(geohash, 's'), 'e'),
's': adjacent(geohash, 's'),
'sw': adjacent(adjacent(geohash, 's'), 'w'),
'w': adjacent(geohash, 'w'),
'nw': adjacent(adjacent(geohash, 'n'), 'w'),
'c': geohash
} |
<SYSTEM_TASK:>
Does the same than np.in1d but uses the fact that ar1 and ar2 are sorted. Is therefore much faster.
<END_TASK>
<USER_TASK:>
Description:
def in1d_sorted(ar1, ar2):
"""
Does the same than np.in1d but uses the fact that ar1 and ar2 are sorted. Is therefore much faster.
""" |
if ar1.shape[0] == 0 or ar2.shape[0] == 0: # check for empty arrays to avoid crash
return []
inds = ar2.searchsorted(ar1)
inds[inds == len(ar2)] = 0
return ar2[inds] == ar1 |
<SYSTEM_TASK:>
Takes a list of files, searches for the parameter name in the file name and returns a ordered dict with the file name
<END_TASK>
<USER_TASK:>
Description:
def get_parameter_value_from_file_names(files, parameters=None, unique=False, sort=True):
"""
Takes a list of files, searches for the parameter name in the file name and returns a ordered dict with the file name
in the first dimension and the corresponding parameter value in the second.
The file names can be sorted by the parameter value, otherwise the order is kept. If unique is true every parameter is unique and
mapped to the file name that occurred last in the files list.
Parameters
----------
files : list of strings
parameter : string or list of strings
unique : bool
sort : bool
Returns
-------
collections.OrderedDict
""" |
# unique=False
logging.debug('Get the parameter: ' + str(parameters) + ' values from the file names of ' + str(len(files)) + ' files')
files_dict = collections.OrderedDict()
if parameters is None: # special case, no parameter defined
return files_dict
if isinstance(parameters, basestring):
parameters = (parameters, )
search_string = '_'.join(parameters)
for _ in parameters:
search_string += r'_(-?\d+)'
result = {}
for one_file in files:
parameter_values = re.findall(search_string, one_file)
if parameter_values:
if isinstance(parameter_values[0], tuple):
parameter_values = list(reduce(lambda t1, t2: t1 + t2, parameter_values))
parameter_values = [[int(i), ] for i in parameter_values] # convert string value to list with int
files_dict[one_file] = dict(zip(parameters, parameter_values))
if unique: # reduce to the files with different scan parameters
for key, value in files_dict.items():
if value not in result.values():
result[key] = value
else:
result[one_file] = files_dict[one_file]
return collections.OrderedDict(sorted(result.iteritems(), key=itemgetter(1)) if sort else files_dict) |
<SYSTEM_TASK:>
Generate a list of .h5 files which have a similar file name.
<END_TASK>
<USER_TASK:>
Description:
def get_data_file_names_from_scan_base(scan_base, filter_str=['_analyzed.h5', '_interpreted.h5', '_cut.h5', '_result.h5', '_hists.h5'], sort_by_time=True, meta_data_v2=True):
"""
Generate a list of .h5 files which have a similar file name.
Parameters
----------
scan_base : list, string
List of string or string of the scan base names. The scan_base will be used to search for files containing the string. The .h5 file extension will be added automatically.
filter : list, string
List of string or string which are used to filter the returned filenames. File names containing filter_str in the file name will not be returned. Use None to disable filter.
sort_by_time : bool
If True, return file name list sorted from oldest to newest. The time from meta table will be used to sort the files.
meta_data_v2 : bool
True for new (v2) meta data format, False for the old (v1) format.
Returns
-------
data_files : list
List of file names matching the obove conditions.
""" |
data_files = []
if scan_base is None:
return data_files
if isinstance(scan_base, basestring):
scan_base = [scan_base]
for scan_base_str in scan_base:
if '.h5' == os.path.splitext(scan_base_str)[1]:
data_files.append(scan_base_str)
else:
data_files.extend(glob.glob(scan_base_str + '*.h5'))
if filter_str:
if isinstance(filter_str, basestring):
filter_str = [filter_str]
data_files = filter(lambda data_file: not any([(True if x in data_file else False) for x in filter_str]), data_files)
if sort_by_time and len(data_files) > 1:
f_list = {}
for data_file in data_files:
with tb.open_file(data_file, mode="r") as h5_file:
try:
meta_data = h5_file.root.meta_data
except tb.NoSuchNodeError:
logging.warning("File %s is missing meta_data" % h5_file.filename)
else:
try:
if meta_data_v2:
timestamp = meta_data[0]["timestamp_start"]
else:
timestamp = meta_data[0]["timestamp"]
except IndexError:
logging.info("File %s has empty meta_data" % h5_file.filename)
else:
f_list[data_file] = timestamp
data_files = list(sorted(f_list, key=f_list.__getitem__, reverse=False))
return data_files |
<SYSTEM_TASK:>
Checks if the parameter names of all files are similar. Takes the dictionary from get_parameter_from_files output as input.
<END_TASK>
<USER_TASK:>
Description:
def check_parameter_similarity(files_dict):
"""
Checks if the parameter names of all files are similar. Takes the dictionary from get_parameter_from_files output as input.
""" |
try:
parameter_names = files_dict.itervalues().next().keys() # get the parameter names of the first file, to check if these are the same in the other files
except AttributeError: # if there is no parameter at all
if any(i is not None for i in files_dict.itervalues()): # check if there is also no parameter for the other files
return False
else:
return True
if any(parameter_names != i.keys() for i in files_dict.itervalues()):
return False
return True |
<SYSTEM_TASK:>
Takes the dict of hdf5 files and combines their meta data tables into one new numpy record array.
<END_TASK>
<USER_TASK:>
Description:
def combine_meta_data(files_dict, meta_data_v2=True):
"""
Takes the dict of hdf5 files and combines their meta data tables into one new numpy record array.
Parameters
----------
meta_data_v2 : bool
True for new (v2) meta data format, False for the old (v1) format.
""" |
if len(files_dict) > 10:
logging.info("Combine the meta data from %d files", len(files_dict))
# determine total length needed for the new combined array, thats the fastest way to combine arrays
total_length = 0 # the total length of the new table
for file_name in files_dict.iterkeys():
with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file
total_length += in_file_h5.root.meta_data.shape[0]
if meta_data_v2:
meta_data_combined = np.empty((total_length, ), dtype=[
('index_start', np.uint32),
('index_stop', np.uint32),
('data_length', np.uint32),
('timestamp_start', np.float64),
('timestamp_stop', np.float64),
('error', np.uint32)])
else:
meta_data_combined = np.empty((total_length, ), dtype=[
('start_index', np.uint32),
('stop_index', np.uint32),
('length', np.uint32),
('timestamp', np.float64),
('error', np.uint32)])
if len(files_dict) > 10:
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=total_length, term_width=80)
progress_bar.start()
index = 0
# fill actual result array
for file_name in files_dict.iterkeys():
with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file
array_length = in_file_h5.root.meta_data.shape[0]
meta_data_combined[index:index + array_length] = in_file_h5.root.meta_data[:]
index += array_length
if len(files_dict) > 10:
progress_bar.update(index)
if len(files_dict) > 10:
progress_bar.finish()
return meta_data_combined |
<SYSTEM_TASK:>
Takes two sorted arrays and return the intersection ar1 in ar2, ar2 in ar1.
<END_TASK>
<USER_TASK:>
Description:
def reduce_sorted_to_intersect(ar1, ar2):
"""
Takes two sorted arrays and return the intersection ar1 in ar2, ar2 in ar1.
Parameters
----------
ar1 : (M,) array_like
Input array.
ar2 : array_like
Input array.
Returns
-------
ar1, ar1 : ndarray, ndarray
The intersection values.
""" |
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# get min max values of the arrays
ar1_biggest_value = ar1[-1]
ar1_smallest_value = ar1[0]
ar2_biggest_value = ar2[-1]
ar2_smallest_value = ar2[0]
if ar1_biggest_value < ar2_smallest_value or ar1_smallest_value > ar2_biggest_value: # special case, no intersection at all
return ar1[0:0], ar2[0:0]
# get min/max indices with values that are also in the other array
min_index_ar1 = np.argmin(ar1 < ar2_smallest_value)
max_index_ar1 = np.argmax(ar1 > ar2_biggest_value)
min_index_ar2 = np.argmin(ar2 < ar1_smallest_value)
max_index_ar2 = np.argmax(ar2 > ar1_biggest_value)
if min_index_ar1 < 0:
min_index_ar1 = 0
if min_index_ar2 < 0:
min_index_ar2 = 0
if max_index_ar1 == 0 or max_index_ar1 > ar1.shape[0]:
max_index_ar1 = ar1.shape[0]
if max_index_ar2 == 0 or max_index_ar2 > ar2.shape[0]:
max_index_ar2 = ar2.shape[0]
# reduce the data
return ar1[min_index_ar1:max_index_ar1], ar2[min_index_ar2:max_index_ar2] |
<SYSTEM_TASK:>
Checking FEI4 raw data array for corrupted data.
<END_TASK>
<USER_TASK:>
Description:
def check_bad_data(raw_data, prepend_data_headers=None, trig_count=None):
"""Checking FEI4 raw data array for corrupted data.
""" |
consecutive_triggers = 16 if trig_count == 0 else trig_count
is_fe_data_header = logical_and(is_fe_word, is_data_header)
trigger_idx = np.where(is_trigger_word(raw_data) >= 1)[0]
fe_dh_idx = np.where(is_fe_data_header(raw_data) >= 1)[0]
n_triggers = trigger_idx.shape[0]
n_dh = fe_dh_idx.shape[0]
# get index of the last trigger
if n_triggers:
last_event_data_headers_cnt = np.where(fe_dh_idx > trigger_idx[-1])[0].shape[0]
if consecutive_triggers and last_event_data_headers_cnt == consecutive_triggers:
if not np.all(trigger_idx[-1] > fe_dh_idx):
trigger_idx = np.r_[trigger_idx, raw_data.shape]
last_event_data_headers_cnt = None
elif last_event_data_headers_cnt != 0:
fe_dh_idx = fe_dh_idx[:-last_event_data_headers_cnt]
elif not np.all(trigger_idx[-1] > fe_dh_idx):
trigger_idx = np.r_[trigger_idx, raw_data.shape]
# if any data header, add trigger for histogramming, next readout has to have trigger word
elif n_dh:
trigger_idx = np.r_[trigger_idx, raw_data.shape]
last_event_data_headers_cnt = None
# no trigger, no data header
# assuming correct data, return input values
else:
return False, prepend_data_headers, n_triggers, n_dh
# # no triggers, check for the right amount of data headers
# if consecutive_triggers and prepend_data_headers and prepend_data_headers + n_dh != consecutive_triggers:
# return True, n_dh, n_triggers, n_dh
n_triggers_cleaned = trigger_idx.shape[0]
n_dh_cleaned = fe_dh_idx.shape[0]
# check that trigger comes before data header
if prepend_data_headers is None and n_triggers_cleaned and n_dh_cleaned and not trigger_idx[0] < fe_dh_idx[0]:
return True, last_event_data_headers_cnt, n_triggers, n_dh # FIXME: 0?
# check that no trigger comes before the first data header
elif consecutive_triggers and prepend_data_headers is not None and n_triggers_cleaned and n_dh_cleaned and trigger_idx[0] < fe_dh_idx[0]:
return True, last_event_data_headers_cnt, n_triggers, n_dh # FIXME: 0?
# check for two consecutive triggers
elif consecutive_triggers is None and prepend_data_headers == 0 and n_triggers_cleaned and n_dh_cleaned and trigger_idx[0] < fe_dh_idx[0]:
return True, last_event_data_headers_cnt, n_triggers, n_dh # FIXME: 0?
elif prepend_data_headers is not None:
trigger_idx += (prepend_data_headers + 1)
fe_dh_idx += (prepend_data_headers + 1)
# for histogramming add trigger at index 0
trigger_idx = np.r_[0, trigger_idx]
fe_dh_idx = np.r_[range(1, prepend_data_headers + 1), fe_dh_idx]
event_hist, bins = np.histogram(fe_dh_idx, trigger_idx)
if consecutive_triggers is None and np.any(event_hist == 0):
return True, last_event_data_headers_cnt, n_triggers, n_dh
elif consecutive_triggers and np.any(event_hist != consecutive_triggers):
return True, last_event_data_headers_cnt, n_triggers, n_dh
return False, last_event_data_headers_cnt, n_triggers, n_dh |
<SYSTEM_TASK:>
Printing FEI4 data from raw data file for debugging.
<END_TASK>
<USER_TASK:>
Description:
def print_raw_data_file(input_file, start_index=0, limit=200, flavor='fei4b', select=None, tdc_trig_dist=False, trigger_data_mode=0, meta_data_v2=True):
"""Printing FEI4 data from raw data file for debugging.
""" |
with tb.open_file(input_file + '.h5', mode="r") as file_h5:
if meta_data_v2:
index_start = file_h5.root.meta_data.read(field='index_start')
index_stop = file_h5.root.meta_data.read(field='index_stop')
else:
index_start = file_h5.root.meta_data.read(field='start_index')
index_stop = file_h5.root.meta_data.read(field='stop_index')
total_words = 0
for read_out_index, (index_start, index_stop) in enumerate(np.column_stack((index_start, index_stop))):
if start_index < index_stop:
print "\nchunk %d with length %d (from index %d to %d)\n" % (read_out_index, (index_stop - index_start), index_start, index_stop)
raw_data = file_h5.root.raw_data.read(index_start, index_stop)
total_words += print_raw_data(raw_data=raw_data, start_index=max(start_index - index_start, 0), limit=limit - total_words, flavor=flavor, index_offset=index_start, select=select, tdc_trig_dist=tdc_trig_dist, trigger_data_mode=trigger_data_mode)
if limit and total_words >= limit:
break |
<SYSTEM_TASK:>
Printing FEI4 raw data array for debugging.
<END_TASK>
<USER_TASK:>
Description:
def print_raw_data(raw_data, start_index=0, limit=200, flavor='fei4b', index_offset=0, select=None, tdc_trig_dist=False, trigger_data_mode=0):
"""Printing FEI4 raw data array for debugging.
""" |
if not select:
select = ['DH', 'TW', "AR", "VR", "SR", "DR", 'TDC', 'UNKNOWN FE WORD', 'UNKNOWN WORD']
total_words = 0
for index in range(start_index, raw_data.shape[0]):
dw = FEI4Record(raw_data[index], chip_flavor=flavor, tdc_trig_dist=tdc_trig_dist, trigger_data_mode=trigger_data_mode)
if dw in select:
print index + index_offset, '{0:12d} {1:08b} {2:08b} {3:08b} {4:08b}'.format(raw_data[index], (raw_data[index] & 0xFF000000) >> 24, (raw_data[index] & 0x00FF0000) >> 16, (raw_data[index] & 0x0000FF00) >> 8, (raw_data[index] & 0x000000FF) >> 0), dw
total_words += 1
if limit and total_words >= limit:
break
return total_words |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.