text_prompt
stringlengths
100
17.7k
βŒ€
code_prompt
stringlengths
7
9.86k
βŒ€
<SYSTEM_TASK:> Interpolate a value from start to end at a given progress. <END_TASK> <USER_TASK:> Description: def _interpolate(start, end, step, total_steps): """ Interpolate a value from start to end at a given progress. :param start: The start value. :param end: The end value. :param step: The current step. :param total_steps: The total number of steps. :return: The interpolated value at the given progress. """
diff = end - start progress = step / (total_steps - 1) return start + progress * diff
<SYSTEM_TASK:> This method is called from everywhere in the code which accesses the database. <END_TASK> <USER_TASK:> Description: def instance(cls, hostname=None, auth=None, protocol=None, port=None, database=None): """ This method is called from everywhere in the code which accesses the database. If one of the parameters is set, these variables are overwritten for all others. """
if cls.class_instance is None: if hostname is None and auth is None and protocol is None and port is None and database is None: cls.class_instance = Client(hostname='localhost') else: cls.class_instance = Client(hostname=hostname, auth=auth, protocol=protocol, port=port, database=database) else: if hostname is not None: cls.class_instance.hostname = hostname if protocol is not None: cls.class_instance.hostname = protocol if port is not None: cls.class_instance.hostname = port if database is not None: cls.class_instance.database = database if auth is not None: cls.class_instance.auth = auth cls.class_instance._create_api() return cls.class_instance
<SYSTEM_TASK:> Returns a collection with the given name <END_TASK> <USER_TASK:> Description: def collection(self, name): """ Returns a collection with the given name :param name Collection name :returns Collection """
return Collection( name=name, api_resource=self.api.collection(name), api=self.api, )
<SYSTEM_TASK:> Creates database and sets itself as the active database. <END_TASK> <USER_TASK:> Description: def create(cls, name, users=None): """ Creates database and sets itself as the active database. :param name Database name :returns Database """
api = Client.instance().api database_data = { 'name': name, 'active': True, } if isinstance(users, list) or isinstance(users, tuple): database_data['users'] = users data = api.database.post(data=database_data) db = Database( name=name, api=api, kwargs=data ) Client.instance().set_database(name=name) return db
<SYSTEM_TASK:> Destroys the database. <END_TASK> <USER_TASK:> Description: def remove(cls, name): """ Destroys the database. """
client = Client.instance() new_current_database = None if client.database != name: new_current_database = name # Deletions are only possible from the system database client.set_database(name=SYSTEM_DATABASE) api = client.api api.database(name).delete() if new_current_database: client.set_database(name=new_current_database)
<SYSTEM_TASK:> Shortcut to create a collection <END_TASK> <USER_TASK:> Description: def create_collection(self, name, type=2): """ Shortcut to create a collection :param name Collection name :param type Collection type (2 = document / 3 = edge) :returns Collection """
return Collection.create(name=name, database=self.name, type=type)
<SYSTEM_TASK:> Retrieves all properties again for the collection and <END_TASK> <USER_TASK:> Description: def get(self): """ Retrieves all properties again for the collection and sets the attributes. """
data = self.resource(self.name).properties.get() self.set_data(**data) return data
<SYSTEM_TASK:> Returns figures about the collection. <END_TASK> <USER_TASK:> Description: def get_figures(self): """ Returns figures about the collection. """
data = self.resource(self.name).figures.get() return data['figures']
<SYSTEM_TASK:> Creates document object without really creating it in the collection. <END_TASK> <USER_TASK:> Description: def create(cls, collection): """ Creates document object without really creating it in the collection. :param collection Collection instance :returns Document """
api = Client.instance().api doc = Document( id='', key='', collection=collection.name, api=api, ) return doc
<SYSTEM_TASK:> If its internal state is loaded than it will only updated the <END_TASK> <USER_TASK:> Description: def save(self): """ If its internal state is loaded than it will only updated the set properties but otherwise it will create a new document. """
# TODO: Add option force_insert if not self.is_loaded and self.id is None or self.id == '': data = self.resource.post(data=self.data, collection=self.collection) self.id = data['_id'] self.key = data['_key'] self.revision = data['_rev'] self.is_loaded = True else: data = self.resource(self.id).patch(data=self.data) self.revision = data['_rev']
<SYSTEM_TASK:> Get the maximum number of steps the driver needs for a transition. <END_TASK> <USER_TASK:> Description: def steps(self, start, end): """ Get the maximum number of steps the driver needs for a transition. :param start: The start value as uniform pwm value (0.0-1.0). :param end: The end value as uniform pwm value (0.0-1.0). :return: The maximum number of steps. """
if not 0 <= start <= 1: raise ValueError('Values must be between 0 and 1.') if not 0 <= end <= 1: raise ValueError('Values must be between 0 and 1.') raw_start = self._to_single_raw_pwm(start) raw_end = self._to_single_raw_pwm(end) return abs(raw_start - raw_end)
<SYSTEM_TASK:> Get interleave mask for current thread. <END_TASK> <USER_TASK:> Description: def get_interleave_mask(): """ Get interleave mask for current thread. @return: node mask @rtype: C{set} """
nodemask = nodemask_t() bitmask = libnuma.numa_get_interleave_mask() libnuma.copy_bitmask_to_nodemask(bitmask, byref(nodemask)) libnuma.numa_bitmask_free(bitmask) return numa_nodemask_to_set(nodemask)
<SYSTEM_TASK:> Binds the current thread and its children to the nodes specified in nodemask. <END_TASK> <USER_TASK:> Description: def bind(nodemask): """ Binds the current thread and its children to the nodes specified in nodemask. They will only run on the CPUs of the specified nodes and only be able to allocate memory from them. @param nodemask: node mask @type nodemask: C{set} """
mask = set_to_numa_nodemask(nodemask) bitmask = libnuma.numa_allocate_nodemask() libnuma.copy_nodemask_to_bitmask(byref(mask), bitmask) libnuma.numa_bind(bitmask) libnuma.numa_bitmask_free(bitmask)
<SYSTEM_TASK:> Sets the preferred node for the current thread to node. <END_TASK> <USER_TASK:> Description: def set_preferred(node): """ Sets the preferred node for the current thread to node. The preferred node is the node on which memory is preferably allocated before falling back to other nodes. The default is to use the node on which the process is currently running (local policy). @param node: node idx @type node: C{int} """
if node < 0 or node > get_max_node(): raise ValueError(node) libnuma.numa_set_preferred(node)
<SYSTEM_TASK:> Sets the memory allocation mask. <END_TASK> <USER_TASK:> Description: def set_membind(nodemask): """ Sets the memory allocation mask. The thread will only allocate memory from the nodes set in nodemask. @param nodemask: node mask @type nodemask: C{set} """
mask = set_to_numa_nodemask(nodemask) tmp = bitmask_t() tmp.maskp = cast(byref(mask), POINTER(c_ulong)) tmp.size = sizeof(nodemask_t) * 8 libnuma.numa_set_membind(byref(tmp))
<SYSTEM_TASK:> Returns the mask of nodes from which memory can currently be allocated. <END_TASK> <USER_TASK:> Description: def get_membind(): """ Returns the mask of nodes from which memory can currently be allocated. @return: node mask @rtype: C{set} """
bitmask = libnuma.numa_get_membind() nodemask = nodemask_t() libnuma.copy_bitmask_to_nodemask(bitmask, byref(nodemask)) libnuma.numa_bitmask_free(bitmask) return numa_nodemask_to_set(nodemask)
<SYSTEM_TASK:> Runs the current thread and its children only on nodes specified in nodemask. <END_TASK> <USER_TASK:> Description: def set_run_on_node_mask(nodemask): """ Runs the current thread and its children only on nodes specified in nodemask. They will not migrate to CPUs of other nodes until the node affinity is reset with a new call to L{set_run_on_node_mask}. @param nodemask: node mask @type nodemask: C{set} """
mask = set_to_numa_nodemask(nodemask) tmp = bitmask_t() tmp.maskp = cast(byref(mask), POINTER(c_ulong)) tmp.size = sizeof(nodemask_t) * 8 if libnuma.numa_run_on_node_mask(byref(tmp)) < 0: raise RuntimeError()
<SYSTEM_TASK:> Returns the mask of nodes that the current thread is allowed to run on. <END_TASK> <USER_TASK:> Description: def get_run_on_node_mask(): """ Returns the mask of nodes that the current thread is allowed to run on. @return: node mask @rtype: C{set} """
bitmask = libnuma.numa_get_run_node_mask() nodemask = nodemask_t() libnuma.copy_bitmask_to_nodemask(bitmask, byref(nodemask)) libnuma.numa_bitmask_free(bitmask) return numa_nodemask_to_set(nodemask)
<SYSTEM_TASK:> Reports the distance in the machine topology between two nodes. <END_TASK> <USER_TASK:> Description: def get_distance(node1, node2): """ Reports the distance in the machine topology between two nodes. The factors are a multiple of 10. It returns 0 when the distance cannot be determined. A node has distance 10 to itself. Reporting the distance requires a Linux kernel version of 2.6.10 or newer. @param node1: node idx @type node1: C{int} @param node2: node idx @type node2: C{int} @rtype: C{int} """
if node1 < 0 or node1 > get_max_node(): raise ValueError(node1) if node2 < 0 or node2 > get_max_node(): raise ValueError(node2) return libnuma.numa_distance(node1, node2)
<SYSTEM_TASK:> Returns the affinity mask of the process whose ID is pid. <END_TASK> <USER_TASK:> Description: def get_affinity(pid): """ Returns the affinity mask of the process whose ID is pid. @param pid: process PID (0 == current process) @type pid: C{int} @return: set of CPU ids @rtype: C{set} """
cpuset = cpu_set_t() result = set() libnuma.sched_getaffinity(pid, sizeof(cpu_set_t), byref(cpuset)) for i in range(0, sizeof(cpu_set_t)*8): if __CPU_ISSET(i, cpuset): result.add(i) return result
<SYSTEM_TASK:> Sets the CPU affinity mask of the process whose ID is pid to the value specified by mask. <END_TASK> <USER_TASK:> Description: def set_affinity(pid, cpuset): """ Sets the CPU affinity mask of the process whose ID is pid to the value specified by mask. If pid is zero, then the calling process is used. @param pid: process PID (0 == current process) @type pid: C{int} @param cpuset: set of CPU ids @type cpuset: C{set} """
_cpuset = cpu_set_t() __CPU_ZERO(_cpuset) for i in cpuset: if i in range(0, sizeof(cpu_set_t) * 8): __CPU_SET(i, _cpuset) if libnuma.sched_setaffinity(pid, sizeof(cpu_set_t), byref(_cpuset)) < 0: raise RuntimeError()
<SYSTEM_TASK:> Assert that the given value is a valid brightness. <END_TASK> <USER_TASK:> Description: def _assert_is_color(value): """ Assert that the given value is a valid brightness. :param value: The value to check. """
if not isinstance(value, tuple) or len(value) != 3: raise ValueError("Color must be a RGB tuple.") if not all(0 <= x <= 255 for x in value): raise ValueError("RGB values of color must be between 0 and 255.")
<SYSTEM_TASK:> Deletes an index with id <END_TASK> <USER_TASK:> Description: def remove(cls, id): """ Deletes an index with id :param id string/document-handle """
api = Client.instance().api api.index(id).delete()
<SYSTEM_TASK:> Creates this index in the collection if it hasn't been already created <END_TASK> <USER_TASK:> Description: def save(self): """ Creates this index in the collection if it hasn't been already created """
api = Client.instance().api index_details = { 'type': self.index_type_obj.type_name } extra_index_attributes = self.index_type_obj.get_extra_attributes() for extra_attribute_key in extra_index_attributes: extra_attribute_value = extra_index_attributes[extra_attribute_key] index_details[extra_attribute_key] = extra_attribute_value query_parameters = { 'collection': self.collection.name, } result = api.index.post(data=index_details, **query_parameters) self.index_type_obj.is_new = result['isNewlyCreated'] self.index_type_obj.id = result['id']
<SYSTEM_TASK:> Streamlined display of images using matplotlib. <END_TASK> <USER_TASK:> Description: def image(img, cmap='gray', bar=False, nans=True, clim=None, size=7, ax=None): """ Streamlined display of images using matplotlib. Parameters ---------- img : ndarray, 2D or 3D The image to display cmap : str or Colormap, optional, default = 'gray' A colormap to use, for non RGB images bar : boolean, optional, default = False Whether to append a colorbar nans : boolean, optional, deafult = True Whether to replace NaNs, if True, will replace with 0s clim : tuple, optional, default = None Limits for scaling image size : scalar, optional, deafult = 9 Size of the figure ax : matplotlib axis, optional, default = None An existing axis to plot into """
from matplotlib.pyplot import axis, colorbar, figure, gca img = asarray(img) if (nans is True) and (img.dtype != bool): img = nan_to_num(img) if ax is None: f = figure(figsize=(size, size)) ax = gca() if img.ndim == 3: if bar: raise ValueError("Cannot show meaningful colorbar for RGB images") if img.shape[2] != 3: raise ValueError("Size of third dimension must be 3 for RGB images, got %g" % img.shape[2]) mn = img.min() mx = img.max() if mn < 0.0 or mx > 1.0: raise ValueError("Values must be between 0.0 and 1.0 for RGB images, got range (%g, %g)" % (mn, mx)) im = ax.imshow(img, interpolation='nearest', clim=clim) else: im = ax.imshow(img, cmap=cmap, interpolation='nearest', clim=clim) if bar is True: cb = colorbar(im, fraction=0.046, pad=0.04) rng = abs(cb.vmax - cb.vmin) * 0.05 cb.set_ticks([around(cb.vmin + rng, 1), around(cb.vmax - rng, 1)]) cb.outline.set_visible(False) axis('off') return im
<SYSTEM_TASK:> Returns a list of all configured endpoints the server is listening on. For each endpoint, <END_TASK> <USER_TASK:> Description: def all(cls): """ Returns a list of all configured endpoints the server is listening on. For each endpoint, the list of allowed databases is returned too if set. The result is a JSON hash which has the endpoints as keys, and the list of mapped database names as values for each endpoint. If a list of mapped databases is empty, it means that all databases can be accessed via the endpoint. If a list of mapped databases contains more than one database name, this means that any of the databases might be accessed via the endpoint, and the first database in the list will be treated as the default database for the endpoint. The default database will be used when an incoming request does not specify a database name in the request explicitly. *Note*: retrieving the list of all endpoints is allowed in the system database only. Calling this action in any other database will make the server return an error. """
api = Client.instance().api endpoint_list = api.endpoint.get() return endpoint_list
<SYSTEM_TASK:> If databases is an empty list, all databases present in the server will become accessible via the endpoint, <END_TASK> <USER_TASK:> Description: def create(cls, url, databases): """ If databases is an empty list, all databases present in the server will become accessible via the endpoint, with the _system database being the default database. If databases is non-empty, only the specified databases will become available via the endpoint. The first database name in the databases list will also become the default database for the endpoint. The default database will always be used if a request coming in on the endpoint does not specify the database name explicitly. *Note*: adding or reconfiguring endpoints is allowed in the system database only. Calling this action in any other database will make the server return an error. Adding SSL endpoints at runtime is only supported if the server was started with SSL properly configured (e.g. --server.keyfile must have been set). :param url the endpoint specification, e.g. tcp://127.0.0.1:8530 :param databases a list of database names the endpoint is responsible for. """
api = Client.instance().api result = api.endpoint.post(data={ 'endpoint': url, 'databases': databases, }) return result
<SYSTEM_TASK:> This operation deletes an existing endpoint from the list of all endpoints, <END_TASK> <USER_TASK:> Description: def destroy(cls, url): """ This operation deletes an existing endpoint from the list of all endpoints, and makes the server stop listening on the endpoint. *Note*: deleting and disconnecting an endpoint is allowed in the system database only. Calling this action in any other database will make the server return an error. Futhermore, the last remaining endpoint cannot be deleted as this would make the server kaput. :param url The endpoint to delete, e.g. tcp://127.0.0.1:8529. """
api = Client.instance().api api.endpoint(url).delete()
<SYSTEM_TASK:> Return a deferred that will be fired when the event is fired. <END_TASK> <USER_TASK:> Description: def wait(self): """ Return a deferred that will be fired when the event is fired. """
d = defer.Deferred() if self._result is None: self._waiters.append(d) else: self._fire_deferred(d) return d
<SYSTEM_TASK:> Explicitly close a channel <END_TASK> <USER_TASK:> Description: def close(self, reason): """Explicitly close a channel"""
self._closing = True self.do_close(reason) self._closing = False
<SYSTEM_TASK:> Raise the appropriate Closed-based error for the given reason. <END_TASK> <USER_TASK:> Description: def _raise_closed(reason): """Raise the appropriate Closed-based error for the given reason."""
if isinstance(reason, Message): if reason.method.klass.name == "channel": raise ChannelClosed(reason) elif reason.method.klass.name == "connection": raise ConnectionClosed(reason) raise Closed(reason)
<SYSTEM_TASK:> Explicitely close the connection. <END_TASK> <USER_TASK:> Description: def close(self, reason=None, within=0): """Explicitely close the connection. @param reason: Optional closing reason. If not given, ConnectionDone will be used. @param within: Shutdown the client within this amount of seconds. If zero (the default), all channels and queues will be closed immediately. If greater than 0, try to close the AMQP connection cleanly, by sending a "close" method and waiting for "close-ok". If no reply is received within the given amount of seconds, the transport will be forcely shutdown. """
if self.closed: return if reason is None: reason = ConnectionDone() if within > 0: channel0 = yield self.channel(0) deferred = channel0.connection_close() call = self.clock.callLater(within, deferred.cancel) try: yield deferred except defer.CancelledError: pass else: call.cancel() self.do_close(reason)
<SYSTEM_TASK:> Attempt to establish a TCP connection. <END_TASK> <USER_TASK:> Description: def _do_tcp_check(self, ip, results): """ Attempt to establish a TCP connection. If not successful, record the IP in the results dict. Always closes the connection at the end. """
try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(1) sock.connect((ip, self.conf['tcp_check_port'])) except: # Any problem during the connection attempt? We won't diagnose it, # we just indicate failure by adding the IP to the list results.append(ip) finally: sock.close()
<SYSTEM_TASK:> Perform a health check on a list of IP addresses. <END_TASK> <USER_TASK:> Description: def do_health_checks(self, list_of_ips): """ Perform a health check on a list of IP addresses. Each check (we use a TCP connection attempt) is run in its own thread. Gather up the results and return the list of those addresses that failed the test and the list of questionable IPs. TODO: Currently, this starts a thread for every single address we want to check. That's probably not a good idea if we have thousands of addresses. Therefore, we should implement some batching for large sets. """
threads = [] results = [] # Start the thread for each IP we wish to check. for count, ip in enumerate(list_of_ips): thread = threading.Thread( target = self._do_tcp_check, name = "%s:%s" % (self.thread_name, ip), args = (ip, results)) thread.start() threads.append(thread) # ... make sure all threads are done... for thread in threads: thread.join() # ... and send back all the failed IPs. return results, []
<SYSTEM_TASK:> Start the monitoring thread of the plugin. <END_TASK> <USER_TASK:> Description: def start(self): """ Start the monitoring thread of the plugin. """
logging.info("TCP health monitor plugin: Starting to watch " "instances.") self.monitor_thread = threading.Thread(target = self.start_monitoring, name = self.thread_name) self.monitor_thread.daemon = True self.monitor_thread.start()
<SYSTEM_TASK:> Arguments for the TCP health monitor plugin. <END_TASK> <USER_TASK:> Description: def add_arguments(cls, parser, sys_arg_list=None): """ Arguments for the TCP health monitor plugin. """
parser.add_argument('--tcp_check_interval', dest='tcp_check_interval', required=False, default=2, type=float, help="TCP health-test interval in seconds, " "default 2 " "(only for 'tcp' health monitor plugin)") parser.add_argument('--tcp_check_port', dest='tcp_check_port', required=False, default=22, type=int, help="Port for TCP health-test, default 22 " "(only for 'tcp' health monitor plugin)") return ["tcp_check_interval", "tcp_check_port"]
<SYSTEM_TASK:> Return a list of 'sparkline' strings for a given list of input numbers. <END_TASK> <USER_TASK:> Description: def sparklines(numbers=[], num_lines=1, emph=None, verbose=False, minimum=None, maximum=None, wrap=None): """ Return a list of 'sparkline' strings for a given list of input numbers. The list of input numbers may contain None values, too, for which the resulting sparkline will contain a blank character (a space). Examples: sparklines([3, 1, 4, 1, 5, 9, 2, 6]) -> ['β–ƒβ–β–„β–β–„β–ˆβ–‚β–…'] sparklines([3, 1, 4, 1, 5, 9, 2, 6], num_lines=2) -> [ ' β–ˆ β–‚', 'β–…β–β–†β–β–ˆβ–ˆβ–ƒβ–ˆ' ] """
assert num_lines > 0 if len(numbers) == 0: return [''] # raise warning for negative numbers _check_negatives(numbers) values = scale_values(numbers, num_lines=num_lines, minimum=minimum, maximum=maximum) # find values to be highlighted emphasized = _check_emphasis(numbers, emph) if emph else {} point_index = 0 subgraphs = [] for subgraph_values in batch(wrap, values): multi_values = [] for i in range(num_lines): multi_values.append([ min(v, 8) if not v is None else None for v in subgraph_values ]) subgraph_values = [max(0, v-8) if not v is None else None for v in subgraph_values] multi_values.reverse() lines = [] for subgraph_values in multi_values: if HAVE_TERMCOLOR and emphasized: tc = termcolor.colored res = [tc(blocks[int(v)], emphasized.get(point_index + i, 'white')) if not v is None else ' ' for (i, v) in enumerate(subgraph_values)] else: res = [blocks[int(v)] if not v is None else ' ' for v in subgraph_values] lines.append(''.join(res)) subgraphs.append(lines) point_index += len(subgraph_values) return list_join('', subgraphs)
<SYSTEM_TASK:> Remove all expired entries. <END_TASK> <USER_TASK:> Description: def _expire_data(self): """ Remove all expired entries. """
expire_time_stamp = time.time() - self.expire_time self.timed_data = {d: t for d, t in self.timed_data.items() if t > expire_time_stamp}
<SYSTEM_TASK:> Refresh the time of all specified elements in the supplied data set. <END_TASK> <USER_TASK:> Description: def update(self, data_set): """ Refresh the time of all specified elements in the supplied data set. """
now = time.time() for d in data_set: self.timed_data[d] = now self._expire_data()
<SYSTEM_TASK:> Retrieve all IPs of a given type from all sub-plugins. <END_TASK> <USER_TASK:> Description: def _accumulate_ips_from_plugins(self, ip_type_name, plugin_queue_lookup, ip_accumulator): """ Retrieve all IPs of a given type from all sub-plugins. ip_type_name: A name of the type of IP we are working with. Used for nice log messages. Example 'failed', 'questionable'. plugin_queue_lookup: Dictionary to lookup the queues (of a given type) for a plugins, by plugin name. ip_accumulator: An expiring data set for this type of IP address. Returns either a set of addresses to send out on our own reporting queues, or None. """
all_reported_ips = set() for pname, q in plugin_queue_lookup.items(): # Get all the IPs of the specified type from all the plugins. ips = utils.read_last_msg_from_queue(q) if ips: logging.debug("Sub-plugin '%s' reported %d " "%s IPs: %s" % (pname, len(ips), ip_type_name, ",".join(ips))) all_reported_ips.update(ips) # merge all the lists else: logging.debug("Sub-plugin '%s' reported no " "%s IPs." % (pname, ip_type_name)) # Send out the combined list of reported IPs. The receiver of this # message expects this list to always be the full list of IPs. So, IF # they get a message, it needs to be complete, since otherwise any IP # not mentioned in this update is considered healthy. # # Since different sub-plugins may report different IPs at different # times (and not always at the same time), we need to accumulate those # IPs that are recorded by different sub-plugins over time. # # We use an 'expiring data set' to store those: If any plugin refreshes # an IP as failed then the entry remains, otherwise, it will expire # after some time. The expiring data set therefore, is an accumulation # of recently reported IPs. We always report this set, whenever we send # out an update of IPs. # # Each type of IP (for example, 'failed' or 'questionable') has its own # accumulator, which was passed in to this function. if all_reported_ips: ip_accumulator.update(all_reported_ips) current_ips = ip_accumulator.get() logging.info("Multi-plugin health monitor: " "Reporting combined list of %s " "IPs: %s" % (ip_type_name, ",".join(current_ips))) return current_ips else: logging.debug("No failed IPs to report.") return None
<SYSTEM_TASK:> Pass IP lists to monitor sub-plugins and get results from them. <END_TASK> <USER_TASK:> Description: def start_monitoring(self): """ Pass IP lists to monitor sub-plugins and get results from them. Override the common definition of this function, since in the multi plugin it's a little different: Instead of monitoring ourselves, we just use a number of other plugins to gather results. The multi plugin just serves as a proxy and (de)multiplexer for those other plugins. Note that we don't have to push any updates about failed IPs if nothing new was detected. Therefore, our own updates can be entirely driven by updates from the sub-plugin, which keeps our architecture simple. """
logging.info("Multi-plugin health monitor: Started in thread.") try: while True: # Get new IP addresses and pass them on to the sub-plugins new_ips = self.get_new_working_set() if new_ips: logging.debug("Sending list of %d IPs to %d plugins." % (len(new_ips), len(self.plugins))) for q in self.monitor_ip_queues.values(): q.put(new_ips) # Get any notifications about failed or questionable IPs from # the plugins. all_failed_ips = self._accumulate_ips_from_plugins( "failed", self.failed_queue_lookup, self.report_failed_acc) if all_failed_ips: self.q_failed_ips.put(all_failed_ips) all_questionable_ips = self._accumulate_ips_from_plugins( "questionable", self.questionable_queue_lookup, self.report_questionable_acc) if all_questionable_ips: self.q_questionable_ips.put(all_questionable_ips) time.sleep(self.get_monitor_interval()) except common.StopReceived: # Received the stop signal, just exiting the thread function return
<SYSTEM_TASK:> Load plugin classes based on column separated list of plugin names. <END_TASK> <USER_TASK:> Description: def load_sub_plugins_from_str(cls, plugins_str): """ Load plugin classes based on column separated list of plugin names. Returns dict with plugin name as key and class as value. """
plugin_classes = {} if plugins_str: for plugin_name in plugins_str.split(":"): pc = load_plugin(plugin_name, MONITOR_DEFAULT_PLUGIN_MODULE) plugin_classes[plugin_name] = pc return plugin_classes
<SYSTEM_TASK:> Arguments for the Multi health monitor plugin. <END_TASK> <USER_TASK:> Description: def add_arguments(cls, parser, sys_arg_list=None): """ Arguments for the Multi health monitor plugin. """
parser.add_argument('--multi_plugins', dest='multi_plugins', required=True, help="Column seperated list of health monitor " "plugins (only for 'multi' health monitor " "plugin)") arglist = ["multi_plugins"] # Read the list of the specified sub-plugins ahead of time, so we can # get their classes and add their parameters. sub_plugin_names_str = \ utils.param_extract(sys_arg_list, None, "--multi_plugins") sub_plugin_classes = \ cls.load_sub_plugins_from_str(sub_plugin_names_str).values() # Store the list of the sub-plugins in the class, so we can iterate # over those during parameter evaluation later on. cls.multi_plugin_classes = sub_plugin_classes # Now also add the parameters for the sub-plugins for pc in sub_plugin_classes: arglist.extend(pc.add_arguments(parser, sys_arg_list)) return arglist
<SYSTEM_TASK:> Read, parse and sanity check the route spec config file. <END_TASK> <USER_TASK:> Description: def read_route_spec_config(fname): """ Read, parse and sanity check the route spec config file. The config file needs to be in this format: { "<CIDR-1>" : [ "host-1-ip", "host-2-ip", "host-3-ip" ], "<CIDR-2>" : [ "host-4-ip", "host-5-ip" ], "<CIDR-3>" : [ "host-6-ip", "host-7-ip", "host-8-ip", "host-9-ip" ] } Returns the validated route config. """
try: try: f = open(fname, "r") except IOError as e: # Cannot open file? Doesn't exist? raise ValueError("Cannot open file: " + str(e)) data = json.loads(f.read()) f.close() # Sanity checking on the data object data = common.parse_route_spec_config(data) except ValueError as e: logging.error("Config ignored: %s" % str(e)) data = None return data
<SYSTEM_TASK:> Start the configfile change monitoring thread. <END_TASK> <USER_TASK:> Description: def start(self): """ Start the configfile change monitoring thread. """
fname = self.conf['file'] logging.info("Configfile watcher plugin: Starting to watch route spec " "file '%s' for changes..." % fname) # Initial content of file needs to be processed at least once, before # we start watching for any changes to it. Therefore, we will write it # out on the queue right away. route_spec = {} try: route_spec = read_route_spec_config(fname) if route_spec: self.last_route_spec_update = datetime.datetime.now() self.q_route_spec.put(route_spec) except ValueError as e: logging.warning("Cannot parse route spec: %s" % str(e)) # Now prepare to watch for any changes in that file. Find the parent # directory of the config file, since this is where we will attach a # watcher to. abspath = os.path.abspath(fname) parent_dir = os.path.dirname(abspath) # Create the file watcher and run in endless loop handler = RouteSpecChangeEventHandler( route_spec_fname = fname, route_spec_abspath = abspath, q_route_spec = self.q_route_spec, plugin = self) self.observer_thread = watchdog.observers.Observer() self.observer_thread.name = "ConfMon" self.observer_thread.schedule(handler, parent_dir) self.observer_thread.start()
<SYSTEM_TASK:> Stop the config change monitoring thread. <END_TASK> <USER_TASK:> Description: def stop(self): """ Stop the config change monitoring thread. """
self.observer_thread.stop() self.observer_thread.join() logging.info("Configfile watcher plugin: Stopped")
<SYSTEM_TASK:> Arguments for the configfile mode. <END_TASK> <USER_TASK:> Description: def add_arguments(cls, parser, sys_arg_list=None): """ Arguments for the configfile mode. """
parser.add_argument('-f', '--file', dest='file', required=True, help="config file for routing groups " "(only in configfile mode)") return ["file"]
<SYSTEM_TASK:> Sanity checks for options needed for configfile mode. <END_TASK> <USER_TASK:> Description: def check_arguments(cls, conf): """ Sanity checks for options needed for configfile mode. """
try: # Check we have access to the config file f = open(conf['file'], "r") f.close() except IOError as e: raise ArgsError("Cannot open config file '%s': %s" % (conf['file'], e))
<SYSTEM_TASK:> Load a plugin plugin. <END_TASK> <USER_TASK:> Description: def load_plugin(plugin_name, default_plugin_module): """ Load a plugin plugin. Supports loading of plugins that are part of the vpcrouter, as well as external plugins: If the plugin name has a dotted notation then it assumes it's an external plugin and the dotted notation is the complete import path. If it's just a single word then it looks for the plugin in the specified default module. Return the plugin class. """
try: if "." in plugin_name: # Assume external plugin, full path plugin_mod_name = plugin_name plugin_class_name = plugin_name.split(".")[-1].capitalize() else: # One of the built-in plugins plugin_mod_name = "%s.%s" % (default_plugin_module, plugin_name) plugin_class_name = plugin_name.capitalize() plugin_mod = importlib.import_module(plugin_mod_name) plugin_class = getattr(plugin_mod, plugin_class_name) return plugin_class except ImportError as e: raise PluginError("Cannot load '%s'" % plugin_mod_name) except AttributeError: raise PluginError("Cannot find plugin class '%s' in " "plugin '%s'" % (plugin_class_name, plugin_mod_name)) except Exception as e: raise PluginError("Error while loading plugin '%s': %s" % (plugin_mod_name, str(e)))
<SYSTEM_TASK:> Remove an installed signal receiver by signal name. <END_TASK> <USER_TASK:> Description: def remove_signal_receiver(self, signal): """ Remove an installed signal receiver by signal name. See also :py:meth:`add_signal_receiver` :py:exc:`exceptions.ConnSignalNameNotRecognisedException` :param str signal: Signal name to uninstall e.g., :py:attr:`SIGNAL_PROPERTY_CHANGED` :return: :raises ConnSignalNameNotRecognisedException: if the signal name is not registered """
if (signal in self._signal_names): s = self._signals.get(signal) if (s): self._bus.remove_signal_receiver(s.signal_handler, signal, dbus_interface=self._dbus_addr) # noqa self._signals.pop(signal) else: raise ConnSignalNameNotRecognisedException
<SYSTEM_TASK:> file_to_be_downloaded is a file-like object that has already <END_TASK> <USER_TASK:> Description: def download(self, file_to_be_downloaded, perform_download=True, download_to_path=None): """ file_to_be_downloaded is a file-like object that has already been uploaded, you cannot download folders """
response = self.get( '/path/data/', file_to_be_downloaded, raw=False) if not perform_download: # The caller can decide how to process the download of the data return response if not download_to_path: download_to_path = file_to_be_downloaded.split("/")[-1] # download uses shutil.copyfileobj to download, which copies # the data in chunks o = open(download_to_path, 'wb') return shutil.copyfileobj(response.raw, o)
<SYSTEM_TASK:> Create notebook under notebook directory. <END_TASK> <USER_TASK:> Description: def create_notebook(self, data): """Create notebook under notebook directory."""
r = requests.post('http://{0}/api/notebook'.format(self.zeppelin_url), json=data) self.notebook_id = r.json()['body']
<SYSTEM_TASK:> Wait for notebook to finish executing before continuing. <END_TASK> <USER_TASK:> Description: def wait_for_notebook_to_execute(self): """Wait for notebook to finish executing before continuing."""
while True: r = requests.get('http://{0}/api/notebook/job/{1}'.format( self.zeppelin_url, self.notebook_id)) if r.status_code == 200: try: data = r.json()['body'] if all(paragraph['status'] in ['FINISHED', 'ERROR'] for paragraph in data): break time.sleep(5) continue except KeyError as e: print(e) print(r.json()) elif r.status_code == 500: print('Notebook is still busy executing. Checking again in 60 seconds...') time.sleep(60) continue else: print('ERROR: Unexpected return code: {}'.format(r.status_code)) sys.exit(1)
<SYSTEM_TASK:> Save notebook depending on user provided output path. <END_TASK> <USER_TASK:> Description: def save_notebook(self, body): """Save notebook depending on user provided output path."""
directory = os.path.dirname(self.output_path) full_path = os.path.join(directory, self.notebook_name) try: with open(full_path, 'w') as fh: fh.write(json.dumps(body, indent=2)) except ValueError: print('ERROR: Could not save executed notebook to path: ' + self.output_path + ' -- Please provide a valid absolute path.')
<SYSTEM_TASK:> Execute input notebook and save it to file. <END_TASK> <USER_TASK:> Description: def execute_notebook(self, data): """Execute input notebook and save it to file. If no output path given, the output will be printed to stdout. If any errors occur from executing the notebook's paragraphs, they will be displayed in stderr. """
self.create_notebook(data) self.run_notebook() self.wait_for_notebook_to_execute() body = self.get_executed_notebook() err = False output = [] for paragraph in body['paragraphs']: if 'results' in paragraph and paragraph['results']['code'] == 'ERROR': output.append(paragraph['results']['msg'][0]['data']) err = True elif 'result' in paragraph and paragraph['result']['code'] == 'ERROR': output.append(paragraph['result']['msg']) err = True [print(e.strip() + '\n', file=sys.stderr) for e in output if e] if err: sys.exit(1) if not self.output_path: print(json.dumps(body, indent=2)) else: self.save_notebook(body)
<SYSTEM_TASK:> Configure and return the argument parser for the command line options. <END_TASK> <USER_TASK:> Description: def _setup_arg_parser(args_list, watcher_plugin_class, health_plugin_class): """ Configure and return the argument parser for the command line options. If a watcher and/or health-monitor plugin_class is provided then call the add_arguments() callback of the plugin class(es), in order to add plugin specific options. Some parameters are required (vpc and region, for example), but we may be able to discover them automatically, later on. Therefore, we allow them to remain unset on the command line. We will have to complain about those parameters missing later on, if the auto discovery fails. The args_list (from sys.argv) is passed in, since some plugins have to do their own ad-hoc extraction of certain parameters in order to add things to the official parameter list. Return parser and the conf-name of all the arguments that have been added. """
parser = argparse.ArgumentParser( description="VPC router: Manage routes in VPC route table") # General arguments parser.add_argument('--verbose', dest="verbose", action='store_true', help="produces more output") parser.add_argument('-l', '--logfile', dest='logfile', default='-', help="full path name for the logfile, " "or '-' for logging to stdout, " "default: '-' (logging to stdout)"), parser.add_argument('-r', '--region', dest="region_name", required=False, default=None, help="the AWS region of the VPC") parser.add_argument('-v', '--vpc', dest="vpc_id", required=False, default=None, help="the ID of the VPC in which to operate") parser.add_argument('--ignore_routes', dest="ignore_routes", required=False, default=None, help="Comma separated list of CIDRs or IPs for " "routes which vpc-router should ignore.") parser.add_argument('--route_recheck_interval', dest="route_recheck_interval", required=False, default="30", type=int, help="time between regular checks of VPC route " "tables, default: 30") parser.add_argument('-a', '--address', dest="addr", default="localhost", help="address to listen on for HTTP requests, " "default: localhost") parser.add_argument('-p', '--port', dest="port", default="33289", type=int, help="port to listen on for HTTP requests, " "default: 33289") parser.add_argument('-m', '--mode', dest='mode', required=True, help="name of the watcher plugin") parser.add_argument('-H', '--health', dest='health', required=False, default=monitor.MONITOR_DEFAULT_PLUGIN, help="name of the health-check plugin, " "default: %s" % monitor.MONITOR_DEFAULT_PLUGIN) arglist = ["logfile", "region_name", "vpc_id", "route_recheck_interval", "verbose", "addr", "port", "mode", "health", "ignore_routes"] # Inform the CurrentState object of the main config parameter names, which # should be rendered in an overview. CURRENT_STATE.main_param_names = list(arglist) # Let each watcher and health-monitor plugin add its own arguments. for plugin_class in [watcher_plugin_class, health_plugin_class]: if plugin_class: arglist.extend(plugin_class.add_arguments(parser, args_list)) return parser, arglist
<SYSTEM_TASK:> Parse command line arguments and return relevant values in a dict. <END_TASK> <USER_TASK:> Description: def _parse_args(args_list, watcher_plugin_class, health_plugin_class): """ Parse command line arguments and return relevant values in a dict. Also perform basic sanity checking on some arguments. If plugin classes have been provided then a callback into those classes is used to extend the arguments with plugin-specific options. Likewise, the sanity checking will then also invoke a callback into the plugins, in order to perform a sanity check on the plugin options. """
conf = {} # Setting up the command line argument parser. Note that we pass the # complete list of all plugins, so that their parameter can be added to the # official parameter handling, the help screen, etc. Some plugins may even # add further plugins themselves, but will handle this themselves. parser, arglist = _setup_arg_parser(args_list, watcher_plugin_class, health_plugin_class) args = parser.parse_args(args_list) # Transcribe argument values into our own dict for argname in arglist: conf[argname] = getattr(args, argname) # Sanity checking of arguments. Let the watcher and health-monitor plugin # class check their own arguments. for plugin_class in [watcher_plugin_class, health_plugin_class]: if plugin_class: try: plugin_class.check_arguments(conf) except ArgsError as e: parser.print_help() raise e # Sanity checking of other args if conf['route_recheck_interval'] < 5 and \ conf['route_recheck_interval'] != 0: raise ArgsError("route_recheck_interval argument must be either 0 " "or at least 5") if not 0 < conf['port'] < 65535: raise ArgsError("Invalid listen port '%d' for built-in http server." % conf['port']) if not conf['addr'] == "localhost": # Check if a proper address was specified (already raises a suitable # ArgsError if not) utils.ip_check(conf['addr']) if conf['ignore_routes']: # Parse the list of addresses and CIDRs for a in conf['ignore_routes'].split(","): a = a.strip() a = utils.check_valid_ip_or_cidr(a, return_as_cidr=True) CURRENT_STATE.ignore_routes.append(a) # Store a reference to the config dict in the current state CURRENT_STATE.conf = conf return conf
<SYSTEM_TASK:> Starting point of the executable. <END_TASK> <USER_TASK:> Description: def main(): """ Starting point of the executable. """
try: # A bit of a hack: We want to load the plugins (specified via the mode # and health parameter) in order to add their arguments to the argument # parser. But this means we first need to look into the CLI arguments # to find them ... before looking at the arguments. So we first perform # a manual search through the argument list for this purpose only. args = sys.argv[1:] # Loading the watcher plugin mode_name = utils.param_extract(args, "-m", "--mode", default=None) if mode_name: watcher_plugin_class = \ load_plugin(mode_name, watcher.WATCHER_DEFAULT_PLUGIN_MODULE) else: watcher_plugin_class = None # Loading the health monitor plugin health_check_name = \ utils.param_extract(args, "-H", "--health", default=monitor.MONITOR_DEFAULT_PLUGIN) if health_check_name: health_plugin_class = \ load_plugin(health_check_name, monitor.MONITOR_DEFAULT_PLUGIN_MODULE) else: health_plugin_class = None # Provide complete arg parsing for vpcrouter and all plugin classes. conf = _parse_args(sys.argv[1:], watcher_plugin_class, health_plugin_class) if not health_plugin_class or not watcher_plugin_class: logging.error("Watcher plugin or health monitor plugin class " "are missing.") sys.exit(1) _setup_logging(conf) # If we are on an EC2 instance then some data is already available to # us. The return data items in the meta data match some of the command # line arguments, so we can pass this through to the parser function to # provide defaults for those parameters. Specifically: VPC-ID and # region name. if not conf['vpc_id'] or not conf['region_name']: meta_data = get_ec2_meta_data() if 'vpc_id' not in meta_data or 'region_name' not in meta_data: logging.error("VPC and region were not explicitly specified " "and can't be auto-discovered.") sys.exit(1) else: conf.update(meta_data) try: info_str = "vpc-router (%s): mode: %s (%s), " \ "health-check: %s (%s)" % \ (vpcrouter.__version__, conf['mode'], watcher_plugin_class.get_version(), health_check_name, health_plugin_class.get_version()) logging.info("*** Starting %s ***" % info_str) CURRENT_STATE.versions = info_str http_srv = http_server.VpcRouterHttpServer(conf) CURRENT_STATE._vpc_router_http = http_srv watcher.start_watcher(conf, watcher_plugin_class, health_plugin_class) http_srv.stop() logging.info("*** Stopping vpc-router ***") except Exception as e: import traceback traceback.print_exc() logging.error(e.message) logging.error("*** Exiting") except Exception as e: print "\n*** Error: %s\n" % e.message sys.exit(1)
<SYSTEM_TASK:> Collect the current live info from all the registered plugins. <END_TASK> <USER_TASK:> Description: def get_plugins_info(self): """ Collect the current live info from all the registered plugins. Return a dictionary, keyed on the plugin name. """
d = {} for p in self.plugins: d.update(p.get_info()) return d
<SYSTEM_TASK:> Returns the current state, or sub-state, depending on the path. <END_TASK> <USER_TASK:> Description: def get_state_repr(self, path): """ Returns the current state, or sub-state, depending on the path. """
if path == "ips": return { "failed_ips" : self.failed_ips, "questionable_ips" : self.questionable_ips, "working_set" : self.working_set, } if path == "route_info": return { "route_spec" : self.route_spec, "routes" : self.routes, "ignore_routes" : self.ignore_routes } if path == "plugins": return self.get_plugins_info() if path == "vpc": return self.vpc_state if path == "": return { "SERVER" : { "version" : self.versions, "start_time" : self.starttime.isoformat(), "current_time" : datetime.datetime.now().isoformat() }, "params" : self.render_main_params(), "plugins" : {"_href" : "/plugins"}, "ips" : {"_href" : "/ips"}, "route_info" : {"_href" : "/route_info"}, "vpc" : {"_href" : "/vpc"} }
<SYSTEM_TASK:> Return a rendering of the current state in JSON. <END_TASK> <USER_TASK:> Description: def as_json(self, path="", with_indent=False): """ Return a rendering of the current state in JSON. """
if path not in self.top_level_links: raise StateError("Unknown path") return json.dumps(self.get_state_repr(path), indent=4 if with_indent else None)
<SYSTEM_TASK:> Return a rendering of the current state in HTML. <END_TASK> <USER_TASK:> Description: def as_html(self, path=""): """ Return a rendering of the current state in HTML. """
if path not in self.top_level_links: raise StateError("Unknown path") header = """ <html> <head> <title>VPC-router state</title> </head> <body> <h3>VPC-router state</h3> <hr> <font face="courier"> """ footer = """ </font> </body> </html> """ rep = self.get_state_repr(path) def make_links(rep): # Recursively create clickable links for _href elements for e, v in rep.items(): if e == "_href": v = '<a href=%s>%s</a>' % (v, v) rep[e] = v else: if type(v) == dict: make_links(v) make_links(rep) rep_str_lines = json.dumps(rep, indent=4).split("\n") buf = [] for l in rep_str_lines: # Replace leading spaces with '&nbsp;' num_spaces = len(l) - len(l.lstrip()) l = "&nbsp;" * num_spaces + l[num_spaces:] buf.append(l) return "%s%s%s" % (header, "<br>\n".join(buf), footer)
<SYSTEM_TASK:> Start the config watch thread or process. <END_TASK> <USER_TASK:> Description: def start(self): """ Start the config watch thread or process. """
# Normally, we should start a thread or process here, pass the message # queue self.q_route_spec to that thread and let it send route # configurations through that queue. But since we're just sending a # single, fixed configuration, we can just do that right here. # Note that the q_route_spec queue was created by the __init__() # function of the WatcherPlugin base class. logging.info("Fixedconf watcher plugin: Started") # The configuration provided on the command line is available to every # plugin. Here we are reading our own parameters. cidr = self.conf['fixed_cidr'] hosts = self.conf['fixed_hosts'].split(":") route_spec = {cidr : hosts} try: # Probably don't really have to parse the route spec (sanity check) # one more time, since we already sanity checked the command line # options. common.parse_route_spec_config(route_spec) self.q_route_spec.put(route_spec) except Exception as e: logging.warning("Fixedconf watcher plugin: " "Invalid route spec: %s" % str(e))
<SYSTEM_TASK:> Callback to add command line options for this plugin to the argparse <END_TASK> <USER_TASK:> Description: def add_arguments(cls, parser, sys_arg_list=None): """ Callback to add command line options for this plugin to the argparse parser. """
parser.add_argument('--fixed_cidr', dest="fixed_cidr", required=True, help="specify the route CIDR " "(only in fixedconf mode)") parser.add_argument('--fixed_hosts', dest="fixed_hosts", required=True, help="list of host IPs, separated by ':' " "(only in fixedconf mode)") return ["fixed_cidr", "fixed_hosts"]
<SYSTEM_TASK:> Callback to perform sanity checking for the plugin's specific <END_TASK> <USER_TASK:> Description: def check_arguments(cls, conf): """ Callback to perform sanity checking for the plugin's specific parameters. """
# Perform sanity checking on CIDR utils.ip_check(conf['fixed_cidr'], netmask_expected=True) # Perform sanity checking on host list for host in conf['fixed_hosts'].split(":"): utils.ip_check(host)
<SYSTEM_TASK:> Get a new list of IPs to work with from the queue. <END_TASK> <USER_TASK:> Description: def get_new_working_set(self): """ Get a new list of IPs to work with from the queue. This returns None if there is no update. Read all the messages from the queue on which we get the IP addresses that we have to monitor. We will ignore all of them, except the last one, since maybe we received two updates in a row, but each update is a full state, so only the last one matters. Raises the StopReceived exception if the stop signal ("None") was received on the notification queue. """
new_list_of_ips = None while True: try: new_list_of_ips = self.q_monitor_ips.get_nowait() self.q_monitor_ips.task_done() if type(new_list_of_ips) is MonitorPluginStopSignal: raise StopReceived() except Queue.Empty: # No more messages, all done reading monitor list for now break if new_list_of_ips is not None: CURRENT_STATE.working_set = new_list_of_ips return new_list_of_ips
<SYSTEM_TASK:> Monitor IP addresses and send notifications if one of them has failed. <END_TASK> <USER_TASK:> Description: def start_monitoring(self): """ Monitor IP addresses and send notifications if one of them has failed. This function will continuously monitor q_monitor_ips for new lists of IP addresses to monitor. Each message received there is the full state (the complete lists of addresses to monitor). Push out (return) any failed IPs on q_failed_ips. This is also a list of IPs, which may be empty if all instances work correctly. If q_monitor_ips receives a 'None' instead of list then this is intepreted as a stop signal and the function exits. """
time.sleep(1) # This is our working set. This list may be updated occasionally when # we receive messages on the q_monitor_ips queue. But irrespective of # any received updates, the list of IPs in here is regularly checked. list_of_ips = [] currently_failed_ips = set() currently_questionable_ips = set() # Accumulating failed IPs for 10 intervals before rechecking them to # see if they are alive again recheck_failed_interval = 10 try: interval_count = 0 while not CURRENT_STATE._stop_all: start_time = time.time() # See if we should update our working set new_ips = self.get_new_working_set() if new_ips: list_of_ips = new_ips # Update the currently-failed-IP list to only include IPs # that are still in the spec. The list update may have # removed some of the historical, failed IPs altogether. currently_failed_ips = \ set([ip for ip in currently_failed_ips if ip in list_of_ips]) # Same for the questionable IPs currently_questionable_ips = \ set([ip for ip in currently_questionable_ips if ip in list_of_ips]) # Don't check failed IPs for liveness on every interval. We # keep a list of currently-failed IPs for that purpose. # But we will check questionable IPs, so we don't exclude # those. live_ips_to_check = [ip for ip in list_of_ips if ip not in currently_failed_ips] logging.debug("Checking live IPs: %s" % (",".join(live_ips_to_check) if live_ips_to_check else "(none alive)")) # Independent of any updates: Perform health check on all IPs # in the working set and send messages out about any failed # ones as necessary. if live_ips_to_check: failed_ips, questionable_ips = \ self.do_health_checks(live_ips_to_check) if failed_ips: # Update list of currently failed IPs with any new ones currently_failed_ips.update(failed_ips) logging.info('Currently failed IPs: %s' % ",".join(currently_failed_ips)) # Let the main loop know the full set of failed IPs self.q_failed_ips.put(list(currently_failed_ips)) if questionable_ips: # Update list of currently questionable IPs with any # new ones currently_questionable_ips.update(failed_ips) logging.info('Currently questionable IPs: %s' % ",".join(currently_questionable_ips)) # Let the main loop know the full set of questionable # IPs self.q_questionable_ips.put( list(currently_questionable_ips)) if interval_count == recheck_failed_interval: # Ever now and then clean out our currently failed IP cache # so that we can recheck them to see if they are still # failed. We also clear out the questionable IPs, so that # they don't forever accumulate. interval_count = 0 currently_failed_ips = set() currently_questionable_ips = set() # Wait until next monitoring interval: We deduct the time we # spent in this loop. end_time = time.time() time.sleep(self.get_monitor_interval() - (end_time - start_time)) interval_count += 1 logging.debug("Monitoring loop ended: Global stop") except StopReceived: # Received the stop signal, just exiting the thread function return
<SYSTEM_TASK:> Parse and sanity check the route spec config. <END_TASK> <USER_TASK:> Description: def parse_route_spec_config(data): """ Parse and sanity check the route spec config. The config data is a blob of JSON that needs to be in this format: { "<CIDR-1>" : [ "host-1-ip", "host-2-ip", "host-3-ip" ], "<CIDR-2>" : [ "host-4-ip", "host-5-ip" ], "<CIDR-3>" : [ "host-6-ip", "host-7-ip", "host-8-ip", "host-9-ip" ] } Returns the validated route config. This validation is performed on any route-spec pushed out by the config watcher plugin. Duplicate hosts in the host lists are removed. Raises ValueError exception in case of problems. """
# Sanity checking on the data object if type(data) is not dict: raise ValueError("Expected dictionary at top level") try: for k, v in data.items(): utils.ip_check(k, netmask_expected=True) if type(v) is not list: raise ValueError("Expect list of IPs as values in dict") hosts = set(v) # remove duplicates for ip in hosts: utils.ip_check(ip) clean_host_list = sorted(list(hosts)) data[k] = clean_host_list except ArgsError as e: raise ValueError(e.message) return data
<SYSTEM_TASK:> Take the current route spec and compare to the current list of known IP <END_TASK> <USER_TASK:> Description: def _update_health_monitor_with_new_ips(route_spec, all_ips, q_monitor_ips): """ Take the current route spec and compare to the current list of known IP addresses. If the route spec mentiones a different set of IPs, update the monitoring thread with that new list. Return the current set of IPs mentioned in the route spec. """
# Extract all the IP addresses from the route spec, unique and sorted. new_all_ips = \ sorted(set(itertools.chain.from_iterable(route_spec.values()))) if new_all_ips != all_ips: logging.debug("New route spec detected. Updating " "health-monitor with: %s" % ",".join(new_all_ips)) # Looks like we have a new list of IPs all_ips = new_all_ips q_monitor_ips.put(all_ips) else: logging.debug("New route spec detected. No changes in " "IP address list, not sending update to " "health-monitor.") return all_ips
<SYSTEM_TASK:> Monitor queues to receive updates about new route specs or any detected <END_TASK> <USER_TASK:> Description: def _event_monitor_loop(region_name, vpc_id, watcher_plugin, health_plugin, iterations, sleep_time, route_check_time_interval=30): """ Monitor queues to receive updates about new route specs or any detected failed IPs. If any of those have updates, notify the health-monitor thread with a message on a special queue and also re-process the entire routing table. The 'iterations' argument allows us to limit the running time of the watch loop for test purposes. Not used during normal operation. Also, for faster tests, sleep_time can be set to values less than 1. The 'route_check_time_interval' arguments specifies the number of seconds we allow to elapse before forcing a re-check of the VPC routes. This is so that accidentally deleted routes or manually broken route tables can be fixed back up again on their own. """
q_route_spec = watcher_plugin.get_route_spec_queue() q_monitor_ips, q_failed_ips, q_questionable_ips = \ health_plugin.get_queues() time.sleep(sleep_time) # Wait to allow monitor to report results current_route_spec = {} # The last route spec we have seen all_ips = [] # Cache of IP addresses we currently know about # Occasionally we want to recheck VPC routes even without other updates. # That way, if a route is manually deleted by someone, it will be # re-created on its own. last_route_check_time = time.time() while not CURRENT_STATE._stop_all: try: # Get the latest messages from the route-spec monitor and the # health-check monitor. At system start the route-spec queue should # immediately have been initialized with a first message. failed_ips = utils.read_last_msg_from_queue(q_failed_ips) questnbl_ips = utils.read_last_msg_from_queue(q_questionable_ips) new_route_spec = utils.read_last_msg_from_queue(q_route_spec) if failed_ips: # Store the failed IPs in the shared state CURRENT_STATE.failed_ips = failed_ips if questnbl_ips: # Store the questionable IPs in the shared state CURRENT_STATE.questionble_ips = questnbl_ips if new_route_spec: # Store the new route spec in the shared state CURRENT_STATE.route_spec = new_route_spec current_route_spec = new_route_spec # Need to communicate a new set of IPs to the health # monitoring thread, in case the list changed. The list of # addresses is extracted from the route spec. Pass in the old # version of the address list, so that this function can # compare to see if there are any changes to the host list. all_ips = _update_health_monitor_with_new_ips(new_route_spec, all_ips, q_monitor_ips) # Spec or list of failed or questionable IPs changed? Update # routes... # We pass in the last route spec we have seen, since we are also # here in case we only have failed/questionable IPs, but no new # route spec. This is also called occasionally on its own, so that # we can repair any damaged route tables in VPC. now = time.time() time_for_regular_recheck = \ (now - last_route_check_time) > route_check_time_interval if new_route_spec or failed_ips or questnbl_ips or \ time_for_regular_recheck: if not new_route_spec and not (failed_ips or questnbl_ips): # Only reason we are here is due to expired timer. logging.debug("Time for regular route check") last_route_check_time = now vpc.handle_spec(region_name, vpc_id, current_route_spec, failed_ips if failed_ips else [], questnbl_ips if questnbl_ips else []) # If iterations are provided, count down and exit if iterations is not None: iterations -= 1 if iterations == 0: break time.sleep(sleep_time) except KeyboardInterrupt: # Allow exit via keyboard interrupt, useful during development return except Exception as e: # Of course we should never get here, but if we do, better to log # it and keep operating best we can... import traceback traceback.print_exc() logging.error("*** Uncaught exception 1: %s" % str(e)) return logging.debug("event_monitor_loop ended: Global stop")
<SYSTEM_TASK:> Start watcher loop, listening for config changes or failed hosts. <END_TASK> <USER_TASK:> Description: def start_watcher(conf, watcher_plugin_class, health_plugin_class, iterations=None, sleep_time=1): """ Start watcher loop, listening for config changes or failed hosts. Also starts the various service threads. VPC router watches for any changes in the config and updates/adds/deletes routes as necessary. If failed hosts are reported, routes are also updated as needed. This function starts a few working threads: - The watcher plugin to monitor for updated route specs. - A health monitor plugin for instances mentioned in the route spec. It then drops into a loop to receive messages from the health monitoring thread and watcher plugin and re-process the config if any failed IPs are reported. The loop itself is in its own function to facilitate easier testing. """
if CURRENT_STATE._stop_all: logging.debug("Not starting plugins: Global stop") return # Start the working threads (health monitor, config event monitor, etc.) # and return the thread handles and message queues in a thread-info dict. watcher_plugin, health_plugin = \ start_plugins(conf, watcher_plugin_class, health_plugin_class, sleep_time) CURRENT_STATE.add_plugin(watcher_plugin) CURRENT_STATE.add_plugin(health_plugin) # Start the loop to process messages from the monitoring # threads about any failed IP addresses or updated route specs. _event_monitor_loop(conf['region_name'], conf['vpc_id'], watcher_plugin, health_plugin, iterations, sleep_time, conf['route_recheck_interval']) # Stopping plugins and collecting all worker threads when we are done stop_plugins(watcher_plugin, health_plugin)
<SYSTEM_TASK:> Generate the header for the Markdown file. <END_TASK> <USER_TASK:> Description: def build_header(self, title): """Generate the header for the Markdown file."""
header = ['---', 'title: ' + title, 'author(s): ' + self.user, 'tags: ', 'created_at: ' + str(self.date_created), 'updated_at: ' + str(self.date_updated), 'tldr: ', 'thumbnail: ', '---'] self.out = header + self.out
<SYSTEM_TASK:> Parse paragraph for the language of the code and the code itself. <END_TASK> <USER_TASK:> Description: def process_input(self, paragraph): """Parse paragraph for the language of the code and the code itself."""
try: lang, body = paragraph.split(None, 1) except ValueError: lang, body = paragraph, None if not lang.strip().startswith('%'): lang = 'scala' body = paragraph.strip() else: lang = lang.strip()[1:] if lang == 'md': self.build_markdown(lang, body) else: self.build_code(lang, body)
<SYSTEM_TASK:> Translate row into markdown format. <END_TASK> <USER_TASK:> Description: def create_md_row(self, row, header=False): """Translate row into markdown format."""
if not row: return cols = row.split('\t') if len(cols) == 1: self.out.append(cols[0]) else: col_md = '|' underline_md = '|' if cols: for col in cols: col_md += col + '|' underline_md += '-|' if header: self.out.append(col_md + '\n' + underline_md) else: self.out.append(col_md)
<SYSTEM_TASK:> Squash self.out into string. <END_TASK> <USER_TASK:> Description: def build_output(self, fout): """Squash self.out into string. Join every line in self.out with a new line and write the result to the output file. """
fout.write('\n'.join([s for s in self.out]))
<SYSTEM_TASK:> Convert json to markdown. <END_TASK> <USER_TASK:> Description: def convert(self, json, fout): """Convert json to markdown. Takes in a .json file as input and convert it to Markdown format, saving the generated .png images into ./images. """
self.build_markdown_body(json) # create the body self.build_header(json['name']) # create the md header self.build_output(fout)
<SYSTEM_TASK:> Generate the body for the Markdown file. <END_TASK> <USER_TASK:> Description: def build_markdown_body(self, text): """Generate the body for the Markdown file. - processes each json block one by one - for each block, process: - the creator of the notebook (user) - the date the notebook was created - the date the notebook was last updated - the input by detecting the editor language - the output by detecting the output format """
key_options = { 'dateCreated': self.process_date_created, 'dateUpdated': self.process_date_updated, 'title': self.process_title, 'text': self.process_input } for paragraph in text['paragraphs']: if 'user' in paragraph: self.user = paragraph['user'] for key, handler in key_options.items(): if key in paragraph: handler(paragraph[key]) if self._RESULT_KEY in paragraph: self.process_results(paragraph)
<SYSTEM_TASK:> Convert base64 encoding to png. <END_TASK> <USER_TASK:> Description: def build_image(self, msg): """Convert base64 encoding to png. Strips msg of the base64 image encoding and outputs the images to the specified directory. """
result = self.find_message(msg) if result is None: return self.index += 1 images_path = 'images' if self.directory: images_path = os.path.join(self.directory, images_path) if not os.path.isdir(images_path): os.makedirs(images_path) with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh: self.write_image_to_disk(msg, result, fh) self.out.append( '\n![png]({0}/output_{1}.png)\n'.format(images_path, self.index))
<SYSTEM_TASK:> Route Zeppelin output types to corresponding handlers. <END_TASK> <USER_TASK:> Description: def process_results(self, paragraph): """Route Zeppelin output types to corresponding handlers."""
if 'result' in paragraph and paragraph['result']['msg']: msg = paragraph['result']['msg'] self.output_options[paragraph['result']['type']](msg)
<SYSTEM_TASK:> Routes Zeppelin output types to corresponding handlers. <END_TASK> <USER_TASK:> Description: def process_results(self, paragraph): """Routes Zeppelin output types to corresponding handlers."""
if 'editorMode' in paragraph['config']: mode = paragraph['config']['editorMode'].split('/')[-1] if 'results' in paragraph and paragraph['results']['msg']: msg = paragraph['results']['msg'][0] if mode not in ('text', 'markdown'): self.output_options[msg['type']](msg['data'])
<SYSTEM_TASK:> Process request for route spec. <END_TASK> <USER_TASK:> Description: def handle_route_spec_request(): """ Process request for route spec. Either a new one is posted or the current one is to be retrieved. """
try: if bottle.request.method == 'GET': # Just return what we currenty have cached as the route spec data = CURRENT_STATE.route_spec if not data: bottle.response.status = 404 msg = "Route spec not found!" else: bottle.response.status = 200 msg = json.dumps(data) else: # A new route spec is posted raw_data = bottle.request.body.read() new_route_spec = json.loads(raw_data) logging.info("New route spec posted") common.parse_route_spec_config(new_route_spec) _Q_ROUTE_SPEC.put(new_route_spec) bottle.response.status = 200 msg = "Ok" except ValueError as e: logging.error("Config ignored: %s" % str(e)) bottle.response.status = 400 msg = "Config ignored: %s" % str(e) except Exception as e: logging.error("Exception while processing HTTP request: %s" % str(e)) bottle.response.status = 500 msg = "Internal server error" bottle.response.content_type = 'application/json' return msg
<SYSTEM_TASK:> Return decorated function that caches the results. <END_TASK> <USER_TASK:> Description: def cache_results(function): """Return decorated function that caches the results."""
def save_to_permacache(): """Save the in-memory cache data to the permacache. There is a race condition here between two processes updating at the same time. It's perfectly acceptable to lose and/or corrupt the permacache information as each process's in-memory cache will remain in-tact. """ update_from_permacache() try: with open(filename, 'wb') as fp: pickle.dump(cache, fp, pickle.HIGHEST_PROTOCOL) except IOError: pass # Ignore permacache saving exceptions def update_from_permacache(): """Attempt to update newer items from the permacache.""" try: with open(filename, 'rb') as fp: permacache = pickle.load(fp) except Exception: # TODO: Handle specific exceptions return # It's okay if it cannot load for key, value in permacache.items(): if key not in cache or value[0] > cache[key][0]: cache[key] = value cache = {} cache_expire_time = 3600 try: filename = os.path.join(gettempdir(), 'update_checker_cache.pkl') update_from_permacache() except NotImplementedError: filename = None @wraps(function) def wrapped(obj, package_name, package_version, **extra_data): """Return cached results if available.""" now = time.time() key = (package_name, package_version) if not obj.bypass_cache and key in cache: # Check the in-memory cache cache_time, retval = cache[key] if now - cache_time < cache_expire_time: return retval retval = function(obj, package_name, package_version, **extra_data) cache[key] = now, retval if filename: save_to_permacache() return retval return wrapped
<SYSTEM_TASK:> Attempt to return a human-readable time delta string. <END_TASK> <USER_TASK:> Description: def pretty_date(the_datetime): """Attempt to return a human-readable time delta string."""
# Source modified from # http://stackoverflow.com/a/5164027/176978 diff = datetime.utcnow() - the_datetime if diff.days > 7 or diff.days < 0: return the_datetime.strftime('%A %B %d, %Y') elif diff.days == 1: return '1 day ago' elif diff.days > 1: return '{0} days ago'.format(diff.days) elif diff.seconds <= 1: return 'just now' elif diff.seconds < 60: return '{0} seconds ago'.format(diff.seconds) elif diff.seconds < 120: return '1 minute ago' elif diff.seconds < 3600: return '{0} minutes ago'.format(int(round(diff.seconds / 60))) elif diff.seconds < 7200: return '1 hour ago' else: return '{0} hours ago'.format(int(round(diff.seconds / 3600)))
<SYSTEM_TASK:> Convenience method that outputs to stdout if an update is available. <END_TASK> <USER_TASK:> Description: def update_check(package_name, package_version, bypass_cache=False, url=None, **extra_data): """Convenience method that outputs to stdout if an update is available."""
checker = UpdateChecker(url) checker.bypass_cache = bypass_cache result = checker.check(package_name, package_version, **extra_data) if result: print(result)
<SYSTEM_TASK:> Returns True if Monitor was added, otherwise False. <END_TASK> <USER_TASK:> Description: def addMonitor(self, monitorFriendlyName, monitorURL): """ Returns True if Monitor was added, otherwise False. """
url = self.baseUrl url += "newMonitor?apiKey=%s" % self.apiKey url += "&monitorFriendlyName=%s" % monitorFriendlyName url += "&monitorURL=%s&monitorType=1" % monitorURL url += "&monitorAlertContacts=%s" % monitorAlertContacts url += "&noJsonCallback=1&format=json" success, response = self.requestApi(url) if success: return True else: return False
<SYSTEM_TASK:> Returns status and response payload for all known monitors. <END_TASK> <USER_TASK:> Description: def getMonitors(self, response_times=0, logs=0, uptime_ratio=''): """ Returns status and response payload for all known monitors. """
url = self.baseUrl url += "getMonitors?apiKey=%s" % (self.apiKey) url += "&noJsonCallback=1&format=json" # responseTimes - optional (defines if the response time data of each # monitor will be returned. Should be set to 1 for getting them. Default # is 0) if response_times: url += "&responseTimes=1" # logs - optional (defines if the logs of each monitor will be returned. # Should be set to 1 for getting the logs. Default is 0) if logs: url += '&logs=1' # customUptimeRatio - optional (defines the number of days to calculate # the uptime ratio(s) for. Ex: customUptimeRatio=7-30-45 to get the # uptime ratios for those periods) if uptime_ratio: url += '&customUptimeRatio=%s' % uptime_ratio return self.requestApi(url)
<SYSTEM_TASK:> monitorID is the only required object. All others are optional and must be quoted. <END_TASK> <USER_TASK:> Description: def editMonitor(self, monitorID, monitorStatus=None, monitorFriendlyName=None, monitorURL=None, monitorType=None, monitorSubType=None, monitorPort=None, monitorKeywordType=None, monitorKeywordValue=None, monitorHTTPUsername=None, monitorHTTPPassword=None, monitorAlertContacts=None): """ monitorID is the only required object. All others are optional and must be quoted. Returns Response object from api. """
url = self.baseUrl url += "editMonitor?apiKey=%s" % self.apiKey url += "&monitorID=%s" % monitorID if monitorStatus: # Pause, Start Montir url += "&monitorStatus=%s" % monitorStatus if monitorFriendlyName: # Update their FriendlyName url += "&monitorFriendlyName=%s" % monitorFriendlyName if monitorURL: # Edit the MontiorUrl url += "&monitorURL=%s" % monitorURL if monitorType: # Edit the type of montior url += "&monitorType=%s" % monitorType if monitorSubType: # Edit the SubType url += "&monitorSubType=%s" % monitorSubType if monitorPort: # Edit the Port url += "&monitorPort=%s" % monitorPort if monitorKeywordType: # Edit the Keyword Type url += "&monitorKeywordType=%s" % monitorKeywordType if monitorKeywordValue: # Edit the Keyword Match url += "&monitorKeywordValue=%s" % monitorKeywordValue if monitorHTTPUsername: # Edit the HTTP Username url += "&monitorHTTPUsername=%s" % monitorHTTPUsername if monitorHTTPPassword: # Edit the HTTP Password url += "&monitorHTTPPassword=%s" % monitorHTTPPassword if monitorAlertContacts: # Edit the contacts url += "&monitorAlertContacts=%s" % monitorAlertContacts url += "&noJsonCallback=1&format=json" success = self.requestApi(url) return success
<SYSTEM_TASK:> Returns True or False if monitor is deleted <END_TASK> <USER_TASK:> Description: def deleteMonitorById(self, monitorID): """ Returns True or False if monitor is deleted """
url = self.baseUrl url += "deleteMonitor?apiKey=%s" % self.apiKey url += "&monitorID=%s" % monitorID url += "&noJsonCallback=1&format=json" success, response = self.requestApi(url) if success: return True else: return False
<SYSTEM_TASK:> Instantiate BlockStorageRAM device from a file saved in block <END_TASK> <USER_TASK:> Description: def fromfile(file_, threadpool_size=None, ignore_lock=False): """ Instantiate BlockStorageRAM device from a file saved in block storage format. The file_ argument can be a file object or a string that represents a filename. If called with a file object, it should be opened in binary mode, and the caller is responsible for closing the file. This method returns a BlockStorageRAM instance. """
close_file = False if not hasattr(file_, 'read'): file_ = open(file_, 'rb') close_file = True try: header_data = file_.read(BlockStorageRAM._index_offset) block_size, block_count, user_header_size, locked = \ struct.unpack( BlockStorageRAM._index_struct_string, header_data) if locked and (not ignore_lock): raise IOError( "Can not open block storage device because it is " "locked by another process. To ignore this check, " "call this method with the keyword 'ignore_lock' " "set to True.") header_offset = len(header_data) + \ user_header_size f = bytearray(header_offset + \ (block_size * block_count)) f[:header_offset] = header_data + file_.read(user_header_size) f[header_offset:] = file_.read(block_size * block_count) finally: if close_file: file_.close() return BlockStorageRAM(f, threadpool_size=threadpool_size, ignore_lock=ignore_lock)
<SYSTEM_TASK:> Dump all storage data to a file. The file_ argument can be a <END_TASK> <USER_TASK:> Description: def tofile(self, file_): """ Dump all storage data to a file. The file_ argument can be a file object or a string that represents a filename. If called with a file object, it should be opened in binary mode, and the caller is responsible for closing the file. The method should only be called after the storage device has been closed to ensure that the locked flag has been set to False. """
close_file = False if not hasattr(file_, 'write'): file_ = open(file_, 'wb') close_file = True file_.write(self._f) if close_file: file_.close()
<SYSTEM_TASK:> Wrap a Bottle request so that a log line is emitted after it's handled. <END_TASK> <USER_TASK:> Description: def log_to_logger(fn): """ Wrap a Bottle request so that a log line is emitted after it's handled. """
@wraps(fn) def _log_to_logger(*args, **kwargs): actual_response = fn(*args, **kwargs) # modify this to log exactly what you need: logger.info('%s %s %s %s' % (bottle.request.remote_addr, bottle.request.method, bottle.request.url, bottle.response.status)) return actual_response return _log_to_logger
<SYSTEM_TASK:> Maintain some stats about our requests. <END_TASK> <USER_TASK:> Description: def update_stats(self, responses, no_responses): """ Maintain some stats about our requests. """
slowest_rtt = 0.0 slowest_ip = None fastest_rtt = 9999999.9 fastest_ip = None rtt_total = 0.0 for ip, rtt in responses.items(): rtt_total += rtt if rtt > slowest_rtt: slowest_rtt = rtt slowest_ip = ip elif rtt < fastest_rtt: fastest_rtt = rtt fastest_ip = ip sorted_rtts = sorted(responses.values()) l = len(sorted_rtts) if l == 0: median_rtt = 0.0 elif l % 2 == 1: # Odd number: Median is the middle element median_rtt = sorted_rtts[int(l / 2)] else: # Even number (average between two middle elements) median_rtt = (sorted_rtts[int(l / 2) - 1] + sorted_rtts[int(l / 2)]) / 2.0 now = datetime.datetime.now().isoformat() m = { "time" : now, "num_responses" : len(responses), "num_no_responses" : len(no_responses), "slowest" : { "ip" : slowest_ip, "rtt" : slowest_rtt }, "fastest" : { "ip" : fastest_ip, "rtt" : fastest_rtt }, "average_rtt" : rtt_total / len(responses), "median_rtt" : median_rtt } self.measurements.insert(0, m) self.measurements = self.measurements[:self.max_num_measurements]
<SYSTEM_TASK:> Perform a health check on a list of IP addresses, using ICMPecho. <END_TASK> <USER_TASK:> Description: def do_health_checks(self, list_of_ips): """ Perform a health check on a list of IP addresses, using ICMPecho. Return tuple with list of failed IPs and questionable IPs. """
# Calculate a decent overall timeout time for a ping attempt: 3/4th of # the monitoring interval. That way, we know we're done with this ping # attempt before the next monitoring attempt is started. ping_timeout = self.get_monitor_interval() * 0.75 # Calculate a decent number of retries. For very short intervals we # shouldn't have any retries, for very long ones, we should have # several ones. Converting the timeout to an integer gives us what we # want: For timeouts less than 1 we have no retry at all. num_retries = int(ping_timeout) try: self.ping_count += len(list_of_ips) responses, no_responses = multiping.multi_ping( list_of_ips, ping_timeout, num_retries) self.update_stats(responses, no_responses) except Exception as e: logging.error("Exception while trying to monitor servers: %s" % str(e)) # Need to assume all IPs failed no_responses = list_of_ips return no_responses, []
<SYSTEM_TASK:> Sanity check that the specified string is indeed an IP address or mask. <END_TASK> <USER_TASK:> Description: def ip_check(ip, netmask_expected=False): """ Sanity check that the specified string is indeed an IP address or mask. """
try: if netmask_expected: if "/" not in ip: raise netaddr.core.AddrFormatError() netaddr.IPNetwork(ip) else: netaddr.IPAddress(ip) except netaddr.core.AddrFormatError: if netmask_expected: raise ArgsError("Not a valid CIDR (%s)" % ip) else: raise ArgsError("Not a valid IP address (%s)" % ip) except Exception as e: raise ArgsError("Invalid format: %s" % str(e))
<SYSTEM_TASK:> Checks that the value is a valid IP address or a valid CIDR. <END_TASK> <USER_TASK:> Description: def check_valid_ip_or_cidr(val, return_as_cidr=False): """ Checks that the value is a valid IP address or a valid CIDR. Returns the specified value. If 'return_as_cidr' is set then the return value will always be in the form of a CIDR, even if a plain IP address was specified. """
is_ip = True if "/" in val: ip_check(val, netmask_expected=True) is_ip = False else: ip_check(val, netmask_expected=False) if return_as_cidr and is_ip: # Convert a plain IP to a CIDR if val == "0.0.0.0": # Special case for the default route val = "0.0.0.0/0" else: val = "%s/32" % val try: ipaddress.IPv4Network(unicode(val)) except Exception as e: raise ArgsError("Not a valid network: %s" % str(e)) return val
<SYSTEM_TASK:> Return True if the small CIDR is contained in the big CIDR. <END_TASK> <USER_TASK:> Description: def is_cidr_in_cidr(small_cidr, big_cidr): """ Return True if the small CIDR is contained in the big CIDR. """
# The default route (0.0.0.0/0) is handled differently, since every route # would always be contained in there. Instead, only a small CIDR of # "0.0.0.0/0" can match against it. Other small CIDRs will always result in # 'False' (not contained). if small_cidr == "0.0.0.0/0": return big_cidr == "0.0.0.0/0" else: if big_cidr == "0.0.0.0/0": return False s = ipaddress.IPv4Network(unicode(small_cidr)) b = ipaddress.IPv4Network(unicode(big_cidr)) return s.subnet_of(b)
<SYSTEM_TASK:> Read all messages from a queue and return the last one. <END_TASK> <USER_TASK:> Description: def read_last_msg_from_queue(q): """ Read all messages from a queue and return the last one. This is useful in many cases where all messages are always the complete state of things. Therefore, intermittent messages can be ignored. Doesn't block, returns None if there is no message waiting in the queue. """
msg = None while True: try: # The list of IPs is always a full list. msg = q.get_nowait() q.task_done() except Queue.Empty: # No more messages, all done for now return msg
<SYSTEM_TASK:> Quick extraction of a parameter from the command line argument list. <END_TASK> <USER_TASK:> Description: def param_extract(args, short_form, long_form, default=None): """ Quick extraction of a parameter from the command line argument list. In some cases we need to parse a few arguments before the official arg-parser starts. Returns parameter value, or None if not present. """
val = default for i, a in enumerate(args): # Long form may use "--xyz=foo", so need to split on '=', but it # doesn't necessarily do that, can also be "--xyz foo". elems = a.split("=", 1) if elems[0] in [short_form, long_form]: # At least make sure that an actual name was specified if len(elems) == 1: if i + 1 < len(args) and not args[i + 1].startswith("-"): val = args[i + 1] else: val = "" # Invalid value was specified else: val = elems[1] break return val
<SYSTEM_TASK:> Convert an integer into a base k string. <END_TASK> <USER_TASK:> Description: def base10_integer_to_basek_string(k, x): """Convert an integer into a base k string."""
if not (2 <= k <= max_k_labeled): raise ValueError("k must be in range [2, %d]: %s" % (max_k_labeled, k)) return ((x == 0) and numerals[0]) or \ (base10_integer_to_basek_string(k, x // k).\ lstrip(numerals[0]) + numerals[x % k])
<SYSTEM_TASK:> Convert a base k string into an integer. <END_TASK> <USER_TASK:> Description: def basek_string_to_base10_integer(k, x): """Convert a base k string into an integer."""
assert 1 < k <= max_k_labeled return sum(numeral_index[c]*(k**i) for i, c in enumerate(reversed(x)))