text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Pop the top frame from the stack. Return the new stack. <END_TASK> <USER_TASK:> Description: def pop(self): """ Pop the top frame from the stack. Return the new stack. """
if self.next is None: raise SimEmptyCallStackError("Cannot pop a frame from an empty call stack.") new_list = self.next.copy({}) if self.state is not None: self.state.register_plugin('callstack', new_list) self.state.history.recent_stack_actions.append(CallStackAction( hash(new_list), len(new_list), 'pop', ret_site_addr=self.ret_addr )) return new_list
<SYSTEM_TASK:> Push a stack frame into the call stack. This method is called when calling a function in CFG recovery. <END_TASK> <USER_TASK:> Description: def call(self, callsite_addr, addr, retn_target=None, stack_pointer=None): """ Push a stack frame into the call stack. This method is called when calling a function in CFG recovery. :param int callsite_addr: Address of the call site :param int addr: Address of the call target :param int or None retn_target: Address of the return target :param int stack_pointer: Value of the stack pointer :return: None """
frame = CallStack(call_site_addr=callsite_addr, func_addr=addr, ret_addr=retn_target, stack_ptr=stack_pointer) return self.push(frame)
<SYSTEM_TASK:> Pop one or many call frames from the stack. This method is called when returning from a function in CFG <END_TASK> <USER_TASK:> Description: def ret(self, retn_target=None): """ Pop one or many call frames from the stack. This method is called when returning from a function in CFG recovery. :param int retn_target: The target to return to. :return: None """
if retn_target is None: return self.pop() # We may want to return to several levels up there, not only a # single stack frame return_target_index = self._find_return_target(retn_target) if return_target_index is not None: o = self while return_target_index >= 0: o = o.pop() return_target_index -= 1 return o l.warning("Returning to an unexpected address %#x", retn_target) return self
<SYSTEM_TASK:> Debugging representation of this CallStack object. <END_TASK> <USER_TASK:> Description: def dbg_repr(self): """ Debugging representation of this CallStack object. :return: Details of this CalLStack :rtype: str """
stack = [ ] for i, frame in enumerate(self): s = "%d | %s -> %s, returning to %s" % ( i, "None" if frame.call_site_addr is None else "%#x" % frame.call_site_addr, "None" if frame.func_addr is None else "%#x" % frame.func_addr, "None" if frame.return_target is None else "%#x" % frame.return_target, ) stack.append(s) return "\n".join(stack)
<SYSTEM_TASK:> Generate the stack suffix. A stack suffix can be used as the key to a SimRun in CFG recovery. <END_TASK> <USER_TASK:> Description: def stack_suffix(self, context_sensitivity_level): """ Generate the stack suffix. A stack suffix can be used as the key to a SimRun in CFG recovery. :param int context_sensitivity_level: Level of context sensitivity. :return: A tuple of stack suffix. :rtype: tuple """
ret = () for frame in self: if len(ret) >= context_sensitivity_level*2: break ret = (frame.call_site_addr, frame.func_addr) + ret while len(ret) < context_sensitivity_level*2: ret = (None, None) + ret return ret
<SYSTEM_TASK:> Check if the return target exists in the stack, and return the index if exists. We always search from the most <END_TASK> <USER_TASK:> Description: def _find_return_target(self, target): """ Check if the return target exists in the stack, and return the index if exists. We always search from the most recent call stack frame since the most recent frame has a higher chance to be hit in normal CFG recovery. :param int target: Target of the return. :return: The index of the object :rtype: int """
for i, frame in enumerate(self): if frame.ret_addr == target: return i return None
<SYSTEM_TASK:> Load the web list from the configuration file. <END_TASK> <USER_TASK:> Description: def load(self, config): """Load the web list from the configuration file."""
web_list = [] if config is None: logger.debug("No configuration file available. Cannot load ports list.") elif not config.has_section(self._section): logger.debug("No [%s] section in the configuration file. Cannot load ports list." % self._section) else: logger.debug("Start reading the [%s] section in the configuration file" % self._section) refresh = int(config.get_value(self._section, 'refresh', default=self._default_refresh)) timeout = int(config.get_value(self._section, 'timeout', default=self._default_timeout)) # Read the web/url list for i in range(1, 256): new_web = {} postfix = 'web_%s_' % str(i) # Read mandatories configuration key: host new_web['url'] = config.get_value(self._section, '%s%s' % (postfix, 'url')) if new_web['url'] is None: continue url_parse = urlparse(new_web['url']) if not bool(url_parse.scheme) or not bool(url_parse.netloc): logger.error('Bad URL (%s) in the [%s] section of configuration file.' % (new_web['url'], self._section)) continue # Read optionals configuration keys # Default description is the URL without the http:// new_web['description'] = config.get_value(self._section, '%sdescription' % postfix, default="%s" % url_parse.netloc) # Default status new_web['status'] = None new_web['elapsed'] = 0 # Refresh rate in second new_web['refresh'] = refresh # Timeout in second new_web['timeout'] = int(config.get_value(self._section, '%stimeout' % postfix, default=timeout)) # RTT warning new_web['rtt_warning'] = config.get_value(self._section, '%srtt_warning' % postfix, default=None) if new_web['rtt_warning'] is not None: # Convert to second new_web['rtt_warning'] = int(new_web['rtt_warning']) / 1000.0 # Indice new_web['indice'] = 'web_' + str(i) # ssl_verify new_web['ssl_verify'] = config.get_value(self._section, '%sssl_verify' % postfix, default=True) # Proxy http_proxy = config.get_value(self._section, '%shttp_proxy' % postfix, default=None) https_proxy = config.get_value(self._section, '%shttps_proxy' % postfix, default=None) if https_proxy is None and http_proxy is None: new_web['proxies'] = None else: new_web['proxies'] = {'http' : http_proxy, 'https' : https_proxy } # Add the server to the list logger.debug("Add Web URL %s to the static list" % new_web['url']) web_list.append(new_web) # Ports list loaded logger.debug("Web list loaded: %s" % web_list) return web_list
<SYSTEM_TASK:> Build the sensors list depending of the type. <END_TASK> <USER_TASK:> Description: def build_sensors_list(self, type): """Build the sensors list depending of the type. type: SENSOR_TEMP_UNIT or SENSOR_FAN_UNIT output: a list """
ret = [] if type == SENSOR_TEMP_UNIT and self.init_temp: input_list = self.stemps self.stemps = psutil.sensors_temperatures() elif type == SENSOR_FAN_UNIT and self.init_fan: input_list = self.sfans self.sfans = psutil.sensors_fans() else: return ret for chipname, chip in iteritems(input_list): i = 1 for feature in chip: sensors_current = {} # Sensor name if feature.label == '': sensors_current['label'] = chipname + ' ' + str(i) else: sensors_current['label'] = feature.label # Fan speed and unit sensors_current['value'] = int(feature.current) sensors_current['unit'] = type # Add sensor to the list ret.append(sensors_current) i += 1 return ret
<SYSTEM_TASK:> Add an user to the dictionary. <END_TASK> <USER_TASK:> Description: def add_user(self, username, password): """Add an user to the dictionary."""
self.server.user_dict[username] = password self.server.isAuth = True
<SYSTEM_TASK:> Init the connection to the rabbitmq server. <END_TASK> <USER_TASK:> Description: def init(self): """Init the connection to the rabbitmq server."""
if not self.export_enable: return None try: parameters = pika.URLParameters( 'amqp://' + self.user + ':' + self.password + '@' + self.host + ':' + self.port + '/') connection = pika.BlockingConnection(parameters) channel = connection.channel() return channel except Exception as e: logger.critical("Connection to rabbitMQ failed : %s " % e) return None
<SYSTEM_TASK:> Write the points in RabbitMQ. <END_TASK> <USER_TASK:> Description: def export(self, name, columns, points): """Write the points in RabbitMQ."""
data = ('hostname=' + self.hostname + ', name=' + name + ', dateinfo=' + datetime.datetime.utcnow().isoformat()) for i in range(len(columns)): if not isinstance(points[i], Number): continue else: data += ", " + columns[i] + "=" + str(points[i]) logger.debug(data) try: self.client.basic_publish(exchange='', routing_key=self.queue, body=data) except Exception as e: logger.error("Can not export stats to RabbitMQ (%s)" % e)
<SYSTEM_TASK:> Normalize name for the Statsd convention <END_TASK> <USER_TASK:> Description: def normalize(name): """Normalize name for the Statsd convention"""
# Name should not contain some specials chars (issue #1068) ret = name.replace(':', '') ret = ret.replace('%', '') ret = ret.replace(' ', '_') return ret
<SYSTEM_TASK:> Init the connection to the Statsd server. <END_TASK> <USER_TASK:> Description: def init(self): """Init the connection to the Statsd server."""
if not self.export_enable: return None logger.info( "Stats will be exported to StatsD server: {}:{}".format(self.host, self.port)) return StatsClient(self.host, int(self.port), prefix=self.prefix)
<SYSTEM_TASK:> Load the ports list from the configuration file. <END_TASK> <USER_TASK:> Description: def load(self, config): """Load the ports list from the configuration file."""
ports_list = [] if config is None: logger.debug("No configuration file available. Cannot load ports list.") elif not config.has_section(self._section): logger.debug("No [%s] section in the configuration file. Cannot load ports list." % self._section) else: logger.debug("Start reading the [%s] section in the configuration file" % self._section) refresh = int(config.get_value(self._section, 'refresh', default=self._default_refresh)) timeout = int(config.get_value(self._section, 'timeout', default=self._default_timeout)) # Add default gateway on top of the ports_list lits default_gateway = config.get_value(self._section, 'port_default_gateway', default='False') if default_gateway.lower().startswith('true') and netifaces_tag: new_port = {} try: new_port['host'] = netifaces.gateways()['default'][netifaces.AF_INET][0] except KeyError: new_port['host'] = None # ICMP new_port['port'] = 0 new_port['description'] = 'DefaultGateway' new_port['refresh'] = refresh new_port['timeout'] = timeout new_port['status'] = None new_port['rtt_warning'] = None new_port['indice'] = str('port_0') logger.debug("Add default gateway %s to the static list" % (new_port['host'])) ports_list.append(new_port) # Read the scan list for i in range(1, 256): new_port = {} postfix = 'port_%s_' % str(i) # Read mandatories configuration key: host new_port['host'] = config.get_value(self._section, '%s%s' % (postfix, 'host')) if new_port['host'] is None: continue # Read optionals configuration keys # Port is set to 0 by default. 0 mean ICMP check instead of TCP check new_port['port'] = config.get_value(self._section, '%s%s' % (postfix, 'port'), 0) new_port['description'] = config.get_value(self._section, '%sdescription' % postfix, default="%s:%s" % (new_port['host'], new_port['port'])) # Default status new_port['status'] = None # Refresh rate in second new_port['refresh'] = refresh # Timeout in second new_port['timeout'] = int(config.get_value(self._section, '%stimeout' % postfix, default=timeout)) # RTT warning new_port['rtt_warning'] = config.get_value(self._section, '%srtt_warning' % postfix, default=None) if new_port['rtt_warning'] is not None: # Convert to second new_port['rtt_warning'] = int(new_port['rtt_warning']) / 1000.0 # Indice new_port['indice'] = 'port_' + str(i) # Add the server to the list logger.debug("Add port %s:%s to the static list" % (new_port['host'], new_port['port'])) ports_list.append(new_port) # Ports list loaded logger.debug("Ports list loaded: %s" % ports_list) return ports_list
<SYSTEM_TASK:> Init the connection to the OpenTSDB server. <END_TASK> <USER_TASK:> Description: def init(self): """Init the connection to the OpenTSDB server."""
if not self.export_enable: return None try: db = potsdb.Client(self.host, port=int(self.port), check_host=True) except Exception as e: logger.critical("Cannot connect to OpenTSDB server %s:%s (%s)" % (self.host, self.port, e)) sys.exit(2) return db
<SYSTEM_TASK:> Init the connection to the MQTT server. <END_TASK> <USER_TASK:> Description: def init(self): """Init the connection to the MQTT server."""
if not self.export_enable: return None try: client = paho.Client(client_id='glances_' + self.hostname, clean_session=False) client.username_pw_set(username=self.user, password=self.password) if self.tls: client.tls_set(certs.where()) client.connect(host=self.host, port=self.port) client.loop_start() return client except Exception as e: logger.critical("Connection to MQTT server failed : %s " % e) return None
<SYSTEM_TASK:> Write the points in MQTT. <END_TASK> <USER_TASK:> Description: def export(self, name, columns, points): """Write the points in MQTT."""
WHITELIST = '_-' + string.ascii_letters + string.digits SUBSTITUTE = '_' def whitelisted(s, whitelist=WHITELIST, substitute=SUBSTITUTE): return ''.join(c if c in whitelist else substitute for c in s) for sensor, value in zip(columns, points): try: sensor = [whitelisted(name) for name in sensor.split('.')] tobeexport = [self.topic, self.hostname, name] tobeexport.extend(sensor) topic = '/'.join(tobeexport) self.client.publish(topic, value) except Exception as e: logger.error("Can not export stats to MQTT server (%s)" % e)
<SYSTEM_TASK:> Load server and password list from the confiuration file. <END_TASK> <USER_TASK:> Description: def load(self): """Load server and password list from the confiuration file."""
# Init the static server list (if defined) self.static_server = GlancesStaticServer(config=self.config) # Init the password list (if defined) self.password = GlancesPassword(config=self.config)
<SYSTEM_TASK:> Return the URI for the given server dict. <END_TASK> <USER_TASK:> Description: def __get_uri(self, server): """Return the URI for the given server dict."""
# Select the connection mode (with or without password) if server['password'] != "": if server['status'] == 'PROTECTED': # Try with the preconfigure password (only if status is PROTECTED) clear_password = self.password.get_password(server['name']) if clear_password is not None: server['password'] = self.password.sha256_hash(clear_password) return 'http://{}:{}@{}:{}'.format(server['username'], server['password'], server['ip'], server['port']) else: return 'http://{}:{}'.format(server['ip'], server['port'])
<SYSTEM_TASK:> Connect and display the given server <END_TASK> <USER_TASK:> Description: def __display_server(self, server): """ Connect and display the given server """
# Display the Glances client for the selected server logger.debug("Selected server {}".format(server)) # Connection can take time # Display a popup self.screen.display_popup( 'Connect to {}:{}'.format(server['name'], server['port']), duration=1) # A password is needed to access to the server's stats if server['password'] is None: # First of all, check if a password is available in the [passwords] section clear_password = self.password.get_password(server['name']) if (clear_password is None or self.get_servers_list() [self.screen.active_server]['status'] == 'PROTECTED'): # Else, the password should be enter by the user # Display a popup to enter password clear_password = self.screen.display_popup( 'Password needed for {}: '.format(server['name']), is_input=True) # Store the password for the selected server if clear_password is not None: self.set_in_selected('password', self.password.sha256_hash(clear_password)) # Display the Glance client on the selected server logger.info("Connect Glances client to the {} server".format(server['key'])) # Init the client args_server = self.args # Overwrite connection setting args_server.client = server['ip'] args_server.port = server['port'] args_server.username = server['username'] args_server.password = server['password'] client = GlancesClient(config=self.config, args=args_server, return_to_browser=True) # Test if client and server are in the same major version if not client.login(): self.screen.display_popup( "Sorry, cannot connect to '{}'\n" "See '{}' for more details".format(server['name'], LOG_FILENAME)) # Set the ONLINE status for the selected server self.set_in_selected('status', 'OFFLINE') else: # Start the client loop # Return connection type: 'glances' or 'snmp' connection_type = client.serve_forever() try: logger.debug("Disconnect Glances client from the {} server".format(server['key'])) except IndexError: # Server did not exist anymore pass else: # Set the ONLINE status for the selected server if connection_type == 'snmp': self.set_in_selected('status', 'SNMP') else: self.set_in_selected('status', 'ONLINE') # Return to the browser (no server selected) self.screen.active_server = None
<SYSTEM_TASK:> Generate graph from the data. <END_TASK> <USER_TASK:> Description: def export(self, title, data): """Generate graph from the data. Example for the mem plugin: {'percent': [ (datetime.datetime(2018, 3, 24, 16, 27, 47, 282070), 51.8), (datetime.datetime(2018, 3, 24, 16, 27, 47, 540999), 51.9), (datetime.datetime(2018, 3, 24, 16, 27, 50, 653390), 52.0), (datetime.datetime(2018, 3, 24, 16, 27, 53, 749702), 52.0), (datetime.datetime(2018, 3, 24, 16, 27, 56, 825660), 52.0), ... ] } Return: * True if the graph have been generated * False if the graph have not been generated """
if data == {}: return False chart = DateTimeLine(title=title.capitalize(), width=self.width, height=self.height, style=self.style, show_dots=False, legend_at_bottom=True, x_label_rotation=20, x_value_formatter=lambda dt: dt.strftime('%Y/%m/%d %H:%M:%S')) for k, v in iteritems(time_serie_subsample(data, self.width)): chart.add(k, v) chart.render_to_file(os.path.join(self.path, title + '.svg')) return True
<SYSTEM_TASK:> Return True if Glances is running in standalone mode. <END_TASK> <USER_TASK:> Description: def is_standalone(self): """Return True if Glances is running in standalone mode."""
return (not self.args.client and not self.args.browser and not self.args.server and not self.args.webserver)
<SYSTEM_TASK:> Return True if Glances is running in client mode. <END_TASK> <USER_TASK:> Description: def is_client(self): """Return True if Glances is running in client mode."""
return (self.args.client or self.args.browser) and not self.args.server
<SYSTEM_TASK:> Read a password from the command line. <END_TASK> <USER_TASK:> Description: def __get_password(self, description='', confirm=False, clear=False, username='glances'): """Read a password from the command line. - if confirm = True, with confirmation - if clear = True, plain (clear password) """
from glances.password import GlancesPassword password = GlancesPassword(username=username) return password.get_password(description, confirm, clear)
<SYSTEM_TASK:> Load outdated parameter in the global section of the configuration file. <END_TASK> <USER_TASK:> Description: def load_config(self, config): """Load outdated parameter in the global section of the configuration file."""
global_section = 'global' if (hasattr(config, 'has_section') and config.has_section(global_section)): self.args.disable_check_update = config.get_value(global_section, 'check_update').lower() == 'false' else: logger.debug("Cannot find section {} in the configuration file".format(global_section)) return False return True
<SYSTEM_TASK:> Return True if a new version is available <END_TASK> <USER_TASK:> Description: def is_outdated(self): """Return True if a new version is available"""
if self.args.disable_check_update: # Check is disabled by configuration return False logger.debug("Check Glances version (installed: {} / latest: {})".format(self.installed_version(), self.latest_version())) return LooseVersion(self.latest_version()) > LooseVersion(self.installed_version())
<SYSTEM_TASK:> Load cache file and return cached data <END_TASK> <USER_TASK:> Description: def _load_cache(self): """Load cache file and return cached data"""
# If the cached file exist, read-it max_refresh_date = timedelta(days=7) cached_data = {} try: with open(self.cache_file, 'rb') as f: cached_data = pickle.load(f) except Exception as e: logger.debug("Cannot read version from cache file: {} ({})".format(self.cache_file, e)) else: logger.debug("Read version from cache file") if (cached_data['installed_version'] != self.installed_version() or datetime.now() - cached_data['refresh_date'] > max_refresh_date): # Reset the cache if: # - the installed version is different # - the refresh_date is > max_refresh_date cached_data = {} return cached_data
<SYSTEM_TASK:> Save data to the cache file. <END_TASK> <USER_TASK:> Description: def _save_cache(self): """Save data to the cache file."""
# Create the cache directory safe_makedirs(self.cache_dir) # Create/overwrite the cache file try: with open(self.cache_file, 'wb') as f: pickle.dump(self.data, f) except Exception as e: logger.error("Cannot write version to cache file {} ({})".format(self.cache_file, e))
<SYSTEM_TASK:> Update the servers' list screen. <END_TASK> <USER_TASK:> Description: def update(self, stats, duration=3, cs_status=None, return_to_browser=False): """Update the servers' list screen. Wait for __refresh_time sec / catch key every 100 ms. stats: Dict of dict with servers stats """
# Flush display logger.debug('Servers list: {}'.format(stats)) self.flush(stats) # Wait exitkey = False countdown = Timer(self.__refresh_time) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(stats) # Is it an exit or select server key ? exitkey = ( pressedkey == ord('\x1b') or pressedkey == ord('q') or pressedkey == 10) if not exitkey and pressedkey > -1: # Redraw display self.flush(stats) # Wait 100ms... self.wait() return self.active_server
<SYSTEM_TASK:> Write the points to the Prometheus exporter using Gauge. <END_TASK> <USER_TASK:> Description: def export(self, name, columns, points): """Write the points to the Prometheus exporter using Gauge."""
logger.debug("Export {} stats to Prometheus exporter".format(name)) # Remove non number stats and convert all to float (for Boolean) data = {k: float(v) for (k, v) in iteritems(dict(zip(columns, points))) if isinstance(v, Number)} # Write metrics to the Prometheus exporter for k, v in iteritems(data): # Prometheus metric name: prefix_<glances stats name> metric_name = self.prefix + self.METRIC_SEPARATOR + str(name) + self.METRIC_SEPARATOR + str(k) # Prometheus is very sensible to the metric name # See: https://prometheus.io/docs/practices/naming/ for c in ['.', '-', '/', ' ']: metric_name = metric_name.replace(c, self.METRIC_SEPARATOR) # Get the labels labels = self.parse_tags(self.labels) # Manage an internal dict between metric name and Gauge if metric_name not in self._metric_dict: self._metric_dict[metric_name] = Gauge(metric_name, k, labelnames=listkeys(labels)) # Write the value if hasattr(self._metric_dict[metric_name], 'labels'): # Add the labels (see issue #1255) self._metric_dict[metric_name].labels(**labels).set(v) else: self._metric_dict[metric_name].set(v)
<SYSTEM_TASK:> Parse tags into a dict. <END_TASK> <USER_TASK:> Description: def parse_tags(self, tags): """Parse tags into a dict. input tags: a comma separated list of 'key:value' pairs. Example: foo:bar,spam:eggs output dtags: a dict of tags. Example: {'foo': 'bar', 'spam': 'eggs'} """
dtags = {} if tags: try: dtags = dict([x.split(':') for x in tags.split(',')]) except ValueError: # one of the 'key:value' pairs was missing logger.info('Invalid tags passed: %s', tags) dtags = {} return dtags
<SYSTEM_TASK:> Update stats to a server. <END_TASK> <USER_TASK:> Description: def update(self, stats): """Update stats to a server. The method builds two lists: names and values and calls the export method to export the stats. Note: this class can be overwrite (for example in CSV and Graph). """
if not self.export_enable: return False # Get all the stats & limits all_stats = stats.getAllExportsAsDict(plugin_list=self.plugins_to_export()) all_limits = stats.getAllLimitsAsDict(plugin_list=self.plugins_to_export()) # Loop over plugins to export for plugin in self.plugins_to_export(): if isinstance(all_stats[plugin], dict): all_stats[plugin].update(all_limits[plugin]) elif isinstance(all_stats[plugin], list): # TypeError: string indices must be integers (Network plugin) #1054 for i in all_stats[plugin]: i.update(all_limits[plugin]) else: continue export_names, export_values = self.__build_export(all_stats[plugin]) self.export(plugin, export_names, export_values) return True
<SYSTEM_TASK:> Set the stats to the input_stats one. <END_TASK> <USER_TASK:> Description: def _set_stats(self, input_stats): """Set the stats to the input_stats one."""
# Build the all_stats with the get_raw() method of the plugins return {p: self._plugins[p].get_raw() for p in self._plugins if self._plugins[p].is_enable()}
<SYSTEM_TASK:> Init the connection to the InfluxDB server. <END_TASK> <USER_TASK:> Description: def init(self): """Init the connection to the InfluxDB server."""
if not self.export_enable: return None try: db = InfluxDBClient(host=self.host, port=self.port, username=self.user, password=self.password, database=self.db) get_all_db = [i['name'] for i in db.get_list_database()] except InfluxDBClientError as e: logger.critical("Cannot connect to InfluxDB database '%s' (%s)" % (self.db, e)) sys.exit(2) if self.db in get_all_db: logger.info( "Stats will be exported to InfluxDB server: {}".format(db._baseurl)) else: logger.critical("InfluxDB database '%s' did not exist. Please create it" % self.db) sys.exit(2) return db
<SYSTEM_TASK:> Normalize data for the InfluxDB's data model. <END_TASK> <USER_TASK:> Description: def _normalize(self, name, columns, points): """Normalize data for the InfluxDB's data model."""
for i, _ in enumerate(points): # Supported type: # https://docs.influxdata.com/influxdb/v1.5/write_protocols/line_protocol_reference/ if points[i] is None: # Ignore points with None value del(points[i]) del(columns[i]) continue try: points[i] = float(points[i]) except (TypeError, ValueError): pass else: continue try: points[i] = str(points[i]) except (TypeError, ValueError): pass else: continue return [{'measurement': name, 'tags': self.parse_tags(self.tags), 'fields': dict(zip(columns, points))}]
<SYSTEM_TASK:> Write the points to the InfluxDB server. <END_TASK> <USER_TASK:> Description: def export(self, name, columns, points): """Write the points to the InfluxDB server."""
# Manage prefix if self.prefix is not None: name = self.prefix + '.' + name # Write input to the InfluxDB database try: self.client.write_points(self._normalize(name, columns, points)) except Exception as e: logger.error("Cannot export {} stats to InfluxDB ({})".format(name, e)) else: logger.debug("Export {} stats to InfluxDB".format(name))
<SYSTEM_TASK:> Return True if the process item match the current filter <END_TASK> <USER_TASK:> Description: def is_filtered(self, process): """Return True if the process item match the current filter The proces item is a dict. """
if self.filter is None: # No filter => Not filtered return False if self.filter_key is None: # Apply filter on command line and process name return self._is_process_filtered(process, key='name') or \ self._is_process_filtered(process, key='cmdline') else: # Apply filter on <key> return self._is_process_filtered(process)
<SYSTEM_TASK:> Load the password from the configuration file. <END_TASK> <USER_TASK:> Description: def load(self, config): """Load the password from the configuration file."""
password_dict = {} if config is None: logger.warning("No configuration file available. Cannot load password list.") elif not config.has_section(self._section): logger.warning("No [%s] section in the configuration file. Cannot load password list." % self._section) else: logger.info("Start reading the [%s] section in the configuration file" % self._section) password_dict = dict(config.items(self._section)) # Password list loaded logger.info("%s password(s) loaded from the configuration file" % len(password_dict)) logger.debug("Password dictionary: %s" % password_dict) return password_dict
<SYSTEM_TASK:> Get GPU device memory consumption in percent. <END_TASK> <USER_TASK:> Description: def get_mem(device_handle): """Get GPU device memory consumption in percent."""
try: memory_info = pynvml.nvmlDeviceGetMemoryInfo(device_handle) return memory_info.used * 100.0 / memory_info.total except pynvml.NVMLError: return None
<SYSTEM_TASK:> Overwrite the exit method to close the GPU API. <END_TASK> <USER_TASK:> Description: def exit(self): """Overwrite the exit method to close the GPU API."""
if self.nvml_ready: try: pynvml.nvmlShutdown() except Exception as e: logger.debug("pynvml failed to shutdown correctly ({})".format(e)) # Call the father exit method super(Plugin, self).exit()
<SYSTEM_TASK:> Get stats from Glances server. <END_TASK> <USER_TASK:> Description: def update_glances(self): """Get stats from Glances server. Return the client/server connection status: - Connected: Connection OK - Disconnected: Connection NOK """
# Update the stats try: server_stats = json.loads(self.client.getAll()) except socket.error: # Client cannot get server stats return "Disconnected" except Fault: # Client cannot get server stats (issue #375) return "Disconnected" else: # Put it in the internal dict self.stats.update(server_stats) return "Connected"
<SYSTEM_TASK:> Manage limits of the folder list. <END_TASK> <USER_TASK:> Description: def get_alert(self, stat, header=""): """Manage limits of the folder list."""
if not isinstance(stat['size'], numbers.Number): ret = 'DEFAULT' else: ret = 'OK' if stat['critical'] is not None and \ stat['size'] > int(stat['critical']) * 1000000: ret = 'CRITICAL' elif stat['warning'] is not None and \ stat['size'] > int(stat['warning']) * 1000000: ret = 'WARNING' elif stat['careful'] is not None and \ stat['size'] > int(stat['careful']) * 1000000: ret = 'CAREFUL' # Get stat name stat_name = self.get_stat_name(header=header) # Manage threshold self.manage_threshold(stat_name, ret) # Manage action self.manage_action(stat_name, ret.lower(), header, stat[self.get_key()]) return ret
<SYSTEM_TASK:> A safe function for creating a directory tree. <END_TASK> <USER_TASK:> Description: def safe_makedirs(path): """A safe function for creating a directory tree."""
try: os.makedirs(path) except OSError as err: if err.errno == errno.EEXIST: if not os.path.isdir(path): raise else: raise
<SYSTEM_TASK:> Set the plugin list according to the Glances server. <END_TASK> <USER_TASK:> Description: def set_plugins(self, input_plugins): """Set the plugin list according to the Glances server."""
header = "glances_" for item in input_plugins: # Import the plugin try: plugin = __import__(header + item) except ImportError: # Server plugin can not be imported from the client side logger.error("Can not import {} plugin. Please upgrade your Glances client/server version.".format(item)) else: # Add the plugin to the dictionary # The key is the plugin name # for example, the file glances_xxx.py # generate self._plugins_list["xxx"] = ... logger.debug("Server uses {} plugin".format(item)) self._plugins[item] = plugin.Plugin(args=self.args) # Restoring system path sys.path = sys_path
<SYSTEM_TASK:> Init the connection to the RESTful server. <END_TASK> <USER_TASK:> Description: def init(self): """Init the connection to the RESTful server."""
if not self.export_enable: return None # Build the RESTful URL where the stats will be posted url = '{}://{}:{}{}'.format(self.protocol, self.host, self.port, self.path) logger.info( "Stats will be exported to the RESTful endpoint {}".format(url)) return url
<SYSTEM_TASK:> Init the monitored folder list. <END_TASK> <USER_TASK:> Description: def __set_folder_list(self, section): """Init the monitored folder list. The list is defined in the Glances configuration file. """
for l in range(1, self.__folder_list_max_size + 1): value = {} key = 'folder_' + str(l) + '_' # Path is mandatory value['indice'] = str(l) value['path'] = self.config.get_value(section, key + 'path') if value['path'] is None: continue else: value['path'] = nativestr(value['path']) # Optional conf keys for i in ['careful', 'warning', 'critical']: # Read threshold value[i] = self.config.get_value(section, key + i) if value[i] is not None: logger.debug("{} threshold for folder {} is {}".format(i, value["path"], value[i])) # Read action action = self.config.get_value(section, key + i + '_action') if action is not None: value[i + '_action'] = action logger.debug("{} action for folder {} is {}".format(i, value["path"], value[i + '_action'])) # Add the item to the list self.__folder_list.append(value)
<SYSTEM_TASK:> Return the size of the directory given by path <END_TASK> <USER_TASK:> Description: def __folder_size(self, path): """Return the size of the directory given by path path: <string>"""
ret = 0 for f in scandir(path): if f.is_dir() and (f.name != '.' or f.name != '..'): ret += self.__folder_size(os.path.join(path, f.name)) else: try: ret += f.stat().st_size except OSError: pass return ret
<SYSTEM_TASK:> Close the socket and context <END_TASK> <USER_TASK:> Description: def exit(self): """Close the socket and context"""
if self.client is not None: self.client.close() if self.context is not None: self.context.destroy()
<SYSTEM_TASK:> Write the points to the ZeroMQ server. <END_TASK> <USER_TASK:> Description: def export(self, name, columns, points): """Write the points to the ZeroMQ server."""
logger.debug("Export {} stats to ZeroMQ".format(name)) # Create DB input data = dict(zip(columns, points)) # Do not publish empty stats if data == {}: return False # Glances envelopes the stats in a publish message with two frames: # - First frame containing the following prefix (STRING) # - Second frame with the Glances plugin name (STRING) # - Third frame with the Glances plugin stats (JSON) message = [b(self.prefix), b(name), asbytes(json.dumps(data))] # Write data to the ZeroMQ bus # Result can be view: tcp://host:port try: self.client.send_multipart(message) except Exception as e: logger.error("Cannot export {} stats to ZeroMQ ({})".format(name, e)) return True
<SYSTEM_TASK:> Init the connection to the Cassandra server. <END_TASK> <USER_TASK:> Description: def init(self): """Init the connection to the Cassandra server."""
if not self.export_enable: return None # if username and/or password are not set the connection will try to connect with no auth auth_provider = PlainTextAuthProvider( username=self.username, password=self.password) # Cluster try: cluster = Cluster([self.host], port=int(self.port), protocol_version=int(self.protocol_version), auth_provider=auth_provider) session = cluster.connect() except Exception as e: logger.critical("Cannot connect to Cassandra cluster '%s:%s' (%s)" % (self.host, self.port, e)) sys.exit(2) # Keyspace try: session.set_keyspace(self.keyspace) except InvalidRequest as e: logger.info("Create keyspace {} on the Cassandra cluster".format(self.keyspace)) c = "CREATE KEYSPACE %s WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '%s' }" % (self.keyspace, self.replication_factor) session.execute(c) session.set_keyspace(self.keyspace) logger.info( "Stats will be exported to Cassandra cluster {} ({}) in keyspace {}".format( cluster.metadata.cluster_name, cluster.metadata.all_hosts(), self.keyspace)) # Table try: session.execute("CREATE TABLE %s (plugin text, time timeuuid, stat map<text,float>, PRIMARY KEY (plugin, time)) WITH CLUSTERING ORDER BY (time DESC)" % self.table) except Exception: logger.debug("Cassandra table %s already exist" % self.table) return cluster, session
<SYSTEM_TASK:> Write the points to the Cassandra cluster. <END_TASK> <USER_TASK:> Description: def export(self, name, columns, points): """Write the points to the Cassandra cluster."""
logger.debug("Export {} stats to Cassandra".format(name)) # Remove non number stats and convert all to float (for Boolean) data = {k: float(v) for (k, v) in dict(zip(columns, points)).iteritems() if isinstance(v, Number)} # Write input to the Cassandra table try: stmt = "INSERT INTO {} (plugin, time, stat) VALUES (?, ?, ?)".format(self.table) query = self.session.prepare(stmt) self.session.execute( query, (name, uuid_from_time(datetime.now()), data) ) except Exception as e: logger.error("Cannot export {} stats to Cassandra ({})".format(name, e))
<SYSTEM_TASK:> Try to determine the name of a Linux distribution. <END_TASK> <USER_TASK:> Description: def _linux_os_release(): """Try to determine the name of a Linux distribution. This function checks for the /etc/os-release file. It takes the name from the 'NAME' field and the version from 'VERSION_ID'. An empty string is returned if the above values cannot be determined. """
pretty_name = '' ashtray = {} keys = ['NAME', 'VERSION_ID'] try: with open(os.path.join('/etc', 'os-release')) as f: for line in f: for key in keys: if line.startswith(key): ashtray[key] = re.sub(r'^"|"$', '', line.strip().split('=')[1]) except (OSError, IOError): return pretty_name if ashtray: if 'NAME' in ashtray: pretty_name = ashtray['NAME'] if 'VERSION_ID' in ashtray: pretty_name += ' {}'.format(ashtray['VERSION_ID']) return pretty_name
<SYSTEM_TASK:> Return the alert status relative to the process number. <END_TASK> <USER_TASK:> Description: def get_alert(self, nbprocess=0, countmin=None, countmax=None, header="", log=False): """Return the alert status relative to the process number."""
if nbprocess is None: return 'OK' if countmin is None: countmin = nbprocess if countmax is None: countmax = nbprocess if nbprocess > 0: if int(countmin) <= int(nbprocess) <= int(countmax): return 'OK' else: return 'WARNING' else: if int(countmin) == 0: return 'OK' else: return 'CRITICAL'
<SYSTEM_TASK:> Return the alert status relative to the port scan return value. <END_TASK> <USER_TASK:> Description: def get_ports_alert(self, port, header="", log=False): """Return the alert status relative to the port scan return value."""
ret = 'OK' if port['status'] is None: ret = 'CAREFUL' elif port['status'] == 0: ret = 'CRITICAL' elif (isinstance(port['status'], (float, int)) and port['rtt_warning'] is not None and port['status'] > port['rtt_warning']): ret = 'WARNING' # Get stat name stat_name = self.get_stat_name(header=header) # Manage threshold self.manage_threshold(stat_name, ret) # Manage action self.manage_action(stat_name, ret.lower(), header, port[self.get_key()]) return ret
<SYSTEM_TASK:> Return the plugins list. <END_TASK> <USER_TASK:> Description: def getPluginsList(self, enable=True): """Return the plugins list. if enable is True, only return the active plugins (default) if enable is False, return all the plugins Return: list of plugin name """
if enable: return [p for p in self._plugins if self._plugins[p].is_enable()] else: return [p for p in self._plugins]
<SYSTEM_TASK:> Return the exports list. <END_TASK> <USER_TASK:> Description: def getExportsList(self, enable=True): """Return the exports list. if enable is True, only return the active exporters (default) if enable is False, return all the exporters Return: list of export module name """
if enable: return [e for e in self._exports] else: return [e for e in self._exports_all]
<SYSTEM_TASK:> Export all the stats. <END_TASK> <USER_TASK:> Description: def export(self, input_stats=None): """Export all the stats. Each export module is ran in a dedicated thread. """
# threads = [] input_stats = input_stats or {} for e in self._exports: logger.debug("Export stats using the %s module" % e) thread = threading.Thread(target=self._exports[e].update, args=(input_stats,)) # threads.append(thread) thread.start()
<SYSTEM_TASK:> Init the connection to the ES server. <END_TASK> <USER_TASK:> Description: def init(self): """Init the connection to the ES server."""
if not self.export_enable: return None self.index='{}-{}'.format(self.index, datetime.utcnow().strftime("%Y.%m.%d")) template_body = { "mappings": { "glances": { "dynamic_templates": [ { "integers": { "match_mapping_type": "long", "mapping": { "type": "integer" } } }, { "strings": { "match_mapping_type": "string", "mapping": { "type": "text", "fields": { "raw": { "type": "keyword", "ignore_above": 256 } } } } } ] } } } try: es = Elasticsearch(hosts=['{}:{}'.format(self.host, self.port)]) except Exception as e: logger.critical("Cannot connect to ElasticSearch server %s:%s (%s)" % (self.host, self.port, e)) sys.exit(2) else: logger.info("Connected to the ElasticSearch server %s:%s" % (self.host, self.port)) try: index_count = es.count(index=self.index)['count'] except Exception as e: # Index did not exist, it will be created at the first write # Create it... es.indices.create(index=self.index,body=template_body) else: logger.info("The index %s exists and holds %s entries." % (self.index, index_count)) return es
<SYSTEM_TASK:> Write the points to the ES server. <END_TASK> <USER_TASK:> Description: def export(self, name, columns, points): """Write the points to the ES server."""
logger.debug("Export {} stats to ElasticSearch".format(name)) # Create DB input # https://elasticsearch-py.readthedocs.io/en/master/helpers.html actions = [] for c, p in zip(columns, points): dtnow = datetime.utcnow() action = { "_index": self.index, "_id": '{}.{}'.format(name,c), "_type": "glances", "_source": { "plugin": name, "metric": c, "value": str(p), "timestamp": dtnow.isoformat('T') } } logger.debug("Exporting the following object to elasticsearch: {}".format(action)) actions.append(action) # Write input to the ES index try: helpers.bulk(self.client, actions) except Exception as e: logger.error("Cannot export {} stats to ElasticSearch ({})".format(name, e))
<SYSTEM_TASK:> Get the first public IP address returned by one of the online services. <END_TASK> <USER_TASK:> Description: def get(self): """Get the first public IP address returned by one of the online services."""
q = queue.Queue() for u, j, k in urls: t = threading.Thread(target=self._get_ip_public, args=(q, u, j, k)) t.daemon = True t.start() timer = Timer(self.timeout) ip = None while not timer.finished() and ip is None: if q.qsize() > 0: ip = q.get() return ', '.join(set([x.strip() for x in ip.split(',')]))
<SYSTEM_TASK:> Request the url service and put the result in the queue_target. <END_TASK> <USER_TASK:> Description: def _get_ip_public(self, queue_target, url, json=False, key=None): """Request the url service and put the result in the queue_target."""
try: response = urlopen(url, timeout=self.timeout).read().decode('utf-8') except Exception as e: logger.debug("IP plugin - Cannot open URL {} ({})".format(url, e)) queue_target.put(None) else: # Request depend on service try: if not json: queue_target.put(response) else: queue_target.put(loads(response)[key]) except ValueError: queue_target.put(None)
<SYSTEM_TASK:> Init the connection to the Kafka server. <END_TASK> <USER_TASK:> Description: def init(self): """Init the connection to the Kafka server."""
if not self.export_enable: return None # Build the server URI with host and port server_uri = '{}:{}'.format(self.host, self.port) try: s = KafkaProducer(bootstrap_servers=server_uri, value_serializer=lambda v: json.dumps(v).encode('utf-8'), compression_type=self.compression) except Exception as e: logger.critical("Cannot connect to Kafka server %s (%s)" % (server_uri, e)) sys.exit(2) else: logger.info("Connected to the Kafka server %s" % server_uri) return s
<SYSTEM_TASK:> Write the points to the kafka server. <END_TASK> <USER_TASK:> Description: def export(self, name, columns, points): """Write the points to the kafka server."""
logger.debug("Export {} stats to Kafka".format(name)) # Create DB input data = dict(zip(columns, points)) # Send stats to the kafka topic # key=<plugin name> # value=JSON dict try: self.client.send(self.topic, key=name, value=data) except Exception as e: logger.error("Cannot export {} stats to Kafka ({})".format(name, e))
<SYSTEM_TASK:> Specific case for io_counters <END_TASK> <USER_TASK:> Description: def _sort_io_counters(process, sortedby='io_counters', sortedby_secondary='memory_percent'): """Specific case for io_counters Sum of io_r + io_w"""
return process[sortedby][0] - process[sortedby][2] + process[sortedby][1] - process[sortedby][3]
<SYSTEM_TASK:> Return a sort lambda function for the sortedbykey <END_TASK> <USER_TASK:> Description: def _sort_lambda(sortedby='cpu_percent', sortedby_secondary='memory_percent'): """Return a sort lambda function for the sortedbykey"""
ret = None if sortedby == 'io_counters': ret = _sort_io_counters elif sortedby == 'cpu_times': ret = _sort_cpu_times return ret
<SYSTEM_TASK:> Update the global process count from the current processes list <END_TASK> <USER_TASK:> Description: def update_processcount(self, plist): """Update the global process count from the current processes list"""
# Update the maximum process ID (pid) number self.processcount['pid_max'] = self.pid_max # For each key in the processcount dict # count the number of processes with the same status for k in iterkeys(self.processcount): self.processcount[k] = len(list(filter(lambda v: v['status'] is k, plist))) # Compute thread self.processcount['thread'] = sum(i['num_threads'] for i in plist if i['num_threads'] is not None) # Compute total self.processcount['total'] = len(plist)
<SYSTEM_TASK:> Get the maximum PID value. <END_TASK> <USER_TASK:> Description: def pid_max(self): """ Get the maximum PID value. On Linux, the value is read from the `/proc/sys/kernel/pid_max` file. From `man 5 proc`: The default value for this file, 32768, results in the same range of PIDs as on earlier kernels. On 32-bit platfroms, 32768 is the maximum value for pid_max. On 64-bit systems, pid_max can be set to any value up to 2^22 (PID_MAX_LIMIT, approximately 4 million). If the file is unreadable or not available for whatever reason, returns None. Some other OSes: - On FreeBSD and macOS the maximum is 99999. - On OpenBSD >= 6.0 the maximum is 99999 (was 32766). - On NetBSD the maximum is 30000. :returns: int or None """
if LINUX: # XXX: waiting for https://github.com/giampaolo/psutil/issues/720 try: with open('/proc/sys/kernel/pid_max', 'rb') as f: return int(f.read()) except (OSError, IOError): return None else: return None
<SYSTEM_TASK:> Convert seconds to human-readable time. <END_TASK> <USER_TASK:> Description: def seconds_to_hms(input_seconds): """Convert seconds to human-readable time."""
minutes, seconds = divmod(input_seconds, 60) hours, minutes = divmod(minutes, 60) hours = int(hours) minutes = int(minutes) seconds = str(int(seconds)).zfill(2) return hours, minutes, seconds
<SYSTEM_TASK:> Return path, cmd and arguments for a process cmdline. <END_TASK> <USER_TASK:> Description: def split_cmdline(cmdline): """Return path, cmd and arguments for a process cmdline."""
path, cmd = os.path.split(cmdline[0]) arguments = ' '.join(cmdline[1:]) return path, cmd, arguments
<SYSTEM_TASK:> Return the alert relative to the Nice configuration list <END_TASK> <USER_TASK:> Description: def get_nice_alert(self, value): """Return the alert relative to the Nice configuration list"""
value = str(value) try: if value in self.get_limit('nice_critical'): return 'CRITICAL' except KeyError: pass try: if value in self.get_limit('nice_warning'): return 'WARNING' except KeyError: pass try: if value in self.get_limit('nice_careful'): return 'CAREFUL' except KeyError: pass return 'DEFAULT'
<SYSTEM_TASK:> Build the header and add it to the ret dict. <END_TASK> <USER_TASK:> Description: def __msg_curse_header(self, ret, process_sort_key, args=None): """Build the header and add it to the ret dict."""
sort_style = 'SORT' if args.disable_irix and 0 < self.nb_log_core < 10: msg = self.layout_header['cpu'].format('CPU%/' + str(self.nb_log_core)) elif args.disable_irix and self.nb_log_core != 0: msg = self.layout_header['cpu'].format('CPU%/C') else: msg = self.layout_header['cpu'].format('CPU%') ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'cpu_percent' else 'DEFAULT')) msg = self.layout_header['mem'].format('MEM%') ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'memory_percent' else 'DEFAULT')) msg = self.layout_header['virt'].format('VIRT') ret.append(self.curse_add_line(msg, optional=True)) msg = self.layout_header['res'].format('RES') ret.append(self.curse_add_line(msg, optional=True)) msg = self.layout_header['pid'].format('PID', width=self.__max_pid_size()) ret.append(self.curse_add_line(msg)) msg = self.layout_header['user'].format('USER') ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'username' else 'DEFAULT')) msg = self.layout_header['time'].format('TIME+') ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'cpu_times' else 'DEFAULT', optional=True)) msg = self.layout_header['thread'].format('THR') ret.append(self.curse_add_line(msg)) msg = self.layout_header['nice'].format('NI') ret.append(self.curse_add_line(msg)) msg = self.layout_header['status'].format('S') ret.append(self.curse_add_line(msg)) msg = self.layout_header['ior'].format('R/s') ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'io_counters' else 'DEFAULT', optional=True, additional=True)) msg = self.layout_header['iow'].format('W/s') ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'io_counters' else 'DEFAULT', optional=True, additional=True)) msg = self.layout_header['command'].format('Command') ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'name' else 'DEFAULT'))
<SYSTEM_TASK:> Return the sum of the stats value for the given key. <END_TASK> <USER_TASK:> Description: def __sum_stats(self, key, indice=None, mmm=None): """Return the sum of the stats value for the given key. * indice: If indice is set, get the p[key][indice] * mmm: display min, max, mean or current (if mmm=None) """
# Compute stats summary ret = 0 for p in self.stats: if key not in p: # Correct issue #1188 continue if p[key] is None: # Correct https://github.com/nicolargo/glances/issues/1105#issuecomment-363553788 continue if indice is None: ret += p[key] else: ret += p[key][indice] # Manage Min/Max/Mean mmm_key = self.__mmm_key(key, indice) if mmm == 'min': try: if self.mmm_min[mmm_key] > ret: self.mmm_min[mmm_key] = ret except AttributeError: self.mmm_min = {} return 0 except KeyError: self.mmm_min[mmm_key] = ret ret = self.mmm_min[mmm_key] elif mmm == 'max': try: if self.mmm_max[mmm_key] < ret: self.mmm_max[mmm_key] = ret except AttributeError: self.mmm_max = {} return 0 except KeyError: self.mmm_max[mmm_key] = ret ret = self.mmm_max[mmm_key] return ret
<SYSTEM_TASK:> Build and return the header line <END_TASK> <USER_TASK:> Description: def build_header(self, plugin, attribute, stat): """Build and return the header line"""
line = '' if attribute is not None: line += '{}.{}{}'.format(plugin, attribute, self.separator) else: if isinstance(stat, dict): for k in stat.keys(): line += '{}.{}{}'.format(plugin, str(k), self.separator) elif isinstance(stat, list): for i in stat: if isinstance(i, dict) and 'key' in i: for k in i.keys(): line += '{}.{}.{}{}'.format(plugin, str(i[i['key']]), str(k), self.separator) else: line += '{}{}'.format(plugin, self.separator) return line
<SYSTEM_TASK:> Build and return the data line <END_TASK> <USER_TASK:> Description: def build_data(self, plugin, attribute, stat): """Build and return the data line"""
line = '' if attribute is not None: line += '{}{}'.format(str(stat.get(attribute, self.na)), self.separator) else: if isinstance(stat, dict): for v in stat.values(): line += '{}{}'.format(str(v), self.separator) elif isinstance(stat, list): for i in stat: if isinstance(i, dict) and 'key' in i: for v in i.values(): line += '{}{}'.format(str(v), self.separator) else: line += '{}{}'.format(str(stat), self.separator) return line
<SYSTEM_TASK:> Init the connection to the Riemann server. <END_TASK> <USER_TASK:> Description: def init(self): """Init the connection to the Riemann server."""
if not self.export_enable: return None try: client = bernhard.Client(host=self.host, port=self.port) return client except Exception as e: logger.critical("Connection to Riemann failed : %s " % e) return None
<SYSTEM_TASK:> Write the points in Riemann. <END_TASK> <USER_TASK:> Description: def export(self, name, columns, points): """Write the points in Riemann."""
for i in range(len(columns)): if not isinstance(points[i], Number): continue else: data = {'host': self.hostname, 'service': name + " " + columns[i], 'metric': points[i]} logger.debug(data) try: self.client.send(data) except Exception as e: logger.error("Cannot export stats to Riemann (%s)" % e)
<SYSTEM_TASK:> Write the points to the CouchDB server. <END_TASK> <USER_TASK:> Description: def export(self, name, columns, points): """Write the points to the CouchDB server."""
logger.debug("Export {} stats to CouchDB".format(name)) # Create DB input data = dict(zip(columns, points)) # Set the type to the current stat name data['type'] = name data['time'] = couchdb.mapping.DateTimeField()._to_json(datetime.now()) # Write input to the CouchDB database # Result can be view: http://127.0.0.1:5984/_utils try: self.client[self.db].save(data) except Exception as e: logger.error("Cannot export {} stats to CouchDB ({})".format(name, e))
<SYSTEM_TASK:> Update core stats. <END_TASK> <USER_TASK:> Description: def update(self): """Update core stats. Stats is a dict (with both physical and log cpu number) instead of a integer. """
# Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib # The psutil 2.0 include psutil.cpu_count() and psutil.cpu_count(logical=False) # Return a dict with: # - phys: physical cores only (hyper thread CPUs are excluded) # - log: logical CPUs in the system # Return None if undefine try: stats["phys"] = psutil.cpu_count(logical=False) stats["log"] = psutil.cpu_count() except NameError: self.reset() elif self.input_method == 'snmp': # Update stats using SNMP # http://stackoverflow.com/questions/5662467/how-to-find-out-the-number-of-cpus-using-snmp pass # Update the stats self.stats = stats return self.stats
<SYSTEM_TASK:> Return the current sort in the loop <END_TASK> <USER_TASK:> Description: def loop_position(self): """Return the current sort in the loop"""
for i, v in enumerate(self._sort_loop): if v == glances_processes.sort_key: return i return 0
<SYSTEM_TASK:> Disable the full quicklook mode <END_TASK> <USER_TASK:> Description: def enable_fullquicklook(self): """Disable the full quicklook mode"""
self.args.disable_quicklook = False for p in ['cpu', 'gpu', 'mem', 'memswap']: setattr(self.args, 'disable_' + p, True)
<SYSTEM_TASK:> Shutdown the curses window. <END_TASK> <USER_TASK:> Description: def end(self): """Shutdown the curses window."""
if hasattr(curses, 'echo'): curses.echo() if hasattr(curses, 'nocbreak'): curses.nocbreak() if hasattr(curses, 'curs_set'): try: curses.curs_set(1) except Exception: pass curses.endwin()
<SYSTEM_TASK:> Display stats on the screen. <END_TASK> <USER_TASK:> Description: def display(self, stats, cs_status=None): """Display stats on the screen. stats: Stats database to display cs_status: "None": standalone or server mode "Connected": Client is connected to a Glances server "SNMP": Client is connected to a SNMP server "Disconnected": Client is disconnected from the server Return: True if the stats have been displayed False if the help have been displayed """
# Init the internal line/column for Glances Curses self.init_line_column() # Update the stats messages ########################### # Get all the plugins but quicklook and proceslist self.args.cs_status = cs_status __stat_display = self.__get_stat_display(stats, layer=cs_status) # Adapt number of processes to the available space max_processes_displayed = ( self.screen.getmaxyx()[0] - 11 - (0 if 'docker' not in __stat_display else self.get_stats_display_height(__stat_display["docker"])) - (0 if 'processcount' not in __stat_display else self.get_stats_display_height(__stat_display["processcount"])) - (0 if 'amps' not in __stat_display else self.get_stats_display_height(__stat_display["amps"])) - (0 if 'alert' not in __stat_display else self.get_stats_display_height(__stat_display["alert"]))) try: if self.args.enable_process_extended: max_processes_displayed -= 4 except AttributeError: pass if max_processes_displayed < 0: max_processes_displayed = 0 if (glances_processes.max_processes is None or glances_processes.max_processes != max_processes_displayed): logger.debug("Set number of displayed processes to {}".format(max_processes_displayed)) glances_processes.max_processes = max_processes_displayed # Get the processlist __stat_display["processlist"] = stats.get_plugin( 'processlist').get_stats_display(args=self.args) # Display the stats on the curses interface ########################################### # Help screen (on top of the other stats) if self.args.help_tag: # Display the stats... self.display_plugin( stats.get_plugin('help').get_stats_display(args=self.args)) # ... and exit return False # ===================================== # Display first line (system+ip+uptime) # Optionnaly: Cloud on second line # ===================================== self.__display_header(__stat_display) # ============================================================== # Display second line (<SUMMARY>+CPU|PERCPU+<GPU>+LOAD+MEM+SWAP) # ============================================================== self.__display_top(__stat_display, stats) # ================================================================== # Display left sidebar (NETWORK+PORTS+DISKIO+FS+SENSORS+Current time) # ================================================================== self.__display_left(__stat_display) # ==================================== # Display right stats (process and co) # ==================================== self.__display_right(__stat_display) # ===================== # Others popup messages # ===================== # Display edit filter popup # Only in standalone mode (cs_status is None) if self.edit_filter and cs_status is None: new_filter = self.display_popup( 'Process filter pattern: \n\n' + 'Examples:\n' + '- python\n' + '- .*python.*\n' + '- /usr/lib.*\n' + '- name:.*nautilus.*\n' + '- cmdline:.*glances.*\n' + '- username:nicolargo\n' + '- username:^root ', is_input=True, input_value=glances_processes.process_filter_input) glances_processes.process_filter = new_filter elif self.edit_filter and cs_status is not None: self.display_popup('Process filter only available in standalone mode') self.edit_filter = False # Display graph generation popup if self.args.generate_graph: self.display_popup('Generate graph in {}'.format(self.args.export_graph_path)) return True
<SYSTEM_TASK:> Display the left sidebar in the Curses interface. <END_TASK> <USER_TASK:> Description: def __display_left(self, stat_display): """Display the left sidebar in the Curses interface."""
self.init_column() if self.args.disable_left_sidebar: return for s in self._left_sidebar: if ((hasattr(self.args, 'enable_' + s) or hasattr(self.args, 'disable_' + s)) and s in stat_display): self.new_line() self.display_plugin(stat_display[s])
<SYSTEM_TASK:> Display the right sidebar in the Curses interface. <END_TASK> <USER_TASK:> Description: def __display_right(self, stat_display): """Display the right sidebar in the Curses interface. docker + processcount + amps + processlist + alert """
# Do not display anything if space is not available... if self.screen.getmaxyx()[1] < self._left_sidebar_min_width: return # Restore line position self.next_line = self.saved_line # Display right sidebar self.new_column() for p in self._right_sidebar: if p not in p: # Catch for issue #1470 continue self.new_line() if p == 'processlist': self.display_plugin(stat_display['processlist'], display_optional=(self.screen.getmaxyx()[1] > 102), display_additional=(not MACOS), max_y=(self.screen.getmaxyx()[0] - self.get_stats_display_height(stat_display['alert']) - 2)) else: self.display_plugin(stat_display[p])
<SYSTEM_TASK:> Display a centered popup. <END_TASK> <USER_TASK:> Description: def display_popup(self, message, size_x=None, size_y=None, duration=3, is_input=False, input_size=30, input_value=None): """ Display a centered popup. If is_input is False: Display a centered popup with the given message during duration seconds If size_x and size_y: set the popup size else set it automatically Return True if the popup could be displayed If is_input is True: Display a centered popup with the given message and a input field If size_x and size_y: set the popup size else set it automatically Return the input string or None if the field is empty """
# Center the popup sentence_list = message.split('\n') if size_x is None: size_x = len(max(sentence_list, key=len)) + 4 # Add space for the input field if is_input: size_x += input_size if size_y is None: size_y = len(sentence_list) + 4 screen_x = self.screen.getmaxyx()[1] screen_y = self.screen.getmaxyx()[0] if size_x > screen_x or size_y > screen_y: # No size to display the popup => abord return False pos_x = int((screen_x - size_x) / 2) pos_y = int((screen_y - size_y) / 2) # Create the popup popup = curses.newwin(size_y, size_x, pos_y, pos_x) # Fill the popup popup.border() # Add the message for y, m in enumerate(message.split('\n')): popup.addnstr(2 + y, 2, m, len(m)) if is_input and not WINDOWS: # Create a subwindow for the text field subpop = popup.derwin(1, input_size, 2, 2 + len(m)) subpop.attron(self.colors_list['FILTER']) # Init the field with the current value if input_value is not None: subpop.addnstr(0, 0, input_value, len(input_value)) # Display the popup popup.refresh() subpop.refresh() # Create the textbox inside the subwindows self.set_cursor(2) self.term_window.keypad(1) textbox = GlancesTextbox(subpop, insert_mode=False) textbox.edit() self.set_cursor(0) self.term_window.keypad(0) if textbox.gather() != '': logger.debug( "User enters the following string: %s" % textbox.gather()) return textbox.gather()[:-1] else: logger.debug("User centers an empty string") return None else: # Display the popup popup.refresh() self.wait(duration * 1000) return True
<SYSTEM_TASK:> Display the plugin_stats on the screen. <END_TASK> <USER_TASK:> Description: def display_plugin(self, plugin_stats, display_optional=True, display_additional=True, max_y=65535, add_space=0): """Display the plugin_stats on the screen. If display_optional=True display the optional stats If display_additional=True display additionnal stats max_y: do not display line > max_y add_space: add x space (line) after the plugin """
# Exit if: # - the plugin_stats message is empty # - the display tag = False if plugin_stats is None or not plugin_stats['msgdict'] or not plugin_stats['display']: # Exit return 0 # Get the screen size screen_x = self.screen.getmaxyx()[1] screen_y = self.screen.getmaxyx()[0] # Set the upper/left position of the message if plugin_stats['align'] == 'right': # Right align (last column) display_x = screen_x - self.get_stats_display_width(plugin_stats) else: display_x = self.column if plugin_stats['align'] == 'bottom': # Bottom (last line) display_y = screen_y - self.get_stats_display_height(plugin_stats) else: display_y = self.line # Display x = display_x x_max = x y = display_y for m in plugin_stats['msgdict']: # New line if m['msg'].startswith('\n'): # Go to the next line y += 1 # Return to the first column x = display_x continue # Do not display outside the screen if x < 0: continue if not m['splittable'] and (x + len(m['msg']) > screen_x): continue if y < 0 or (y + 1 > screen_y) or (y > max_y): break # If display_optional = False do not display optional stats if not display_optional and m['optional']: continue # If display_additional = False do not display additional stats if not display_additional and m['additional']: continue # Is it possible to display the stat with the current screen size # !!! Crach if not try/except... Why ??? try: self.term_window.addnstr(y, x, m['msg'], # Do not disply outside the screen screen_x - x, self.colors_list[m['decoration']]) except Exception: pass else: # New column # Python 2: we need to decode to get real screen size because # UTF-8 special tree chars occupy several bytes. # Python 3: strings are strings and bytes are bytes, all is # good. try: x += len(u(m['msg'])) except UnicodeDecodeError: # Quick and dirty hack for issue #745 pass if x > x_max: x_max = x # Compute the next Glances column/line position self.next_column = max( self.next_column, x_max + self.space_between_column) self.next_line = max(self.next_line, y + self.space_between_line) # Have empty lines after the plugins self.next_line += add_space
<SYSTEM_TASK:> Clear and update the screen. <END_TASK> <USER_TASK:> Description: def flush(self, stats, cs_status=None): """Clear and update the screen. stats: Stats database to display cs_status: "None": standalone or server mode "Connected": Client is connected to the server "Disconnected": Client is disconnected from the server """
self.erase() self.display(stats, cs_status=cs_status)
<SYSTEM_TASK:> Update the screen. <END_TASK> <USER_TASK:> Description: def update(self, stats, duration=3, cs_status=None, return_to_browser=False): """Update the screen. INPUT stats: Stats database to display duration: duration of the loop cs_status: "None": standalone or server mode "Connected": Client is connected to the server "Disconnected": Client is disconnected from the server return_to_browser: True: Do not exist, return to the browser list False: Exit and return to the shell OUTPUT True: Exit key has been pressed False: Others cases... """
# Flush display self.flush(stats, cs_status=cs_status) # If the duration is < 0 (update + export time > refresh_time) # Then display the interface and log a message if duration <= 0: logger.warning('Update and export time higher than refresh_time.') duration = 0.1 # Wait duration (in s) time exitkey = False countdown = Timer(duration) # Set the default timeout (in ms) for the getch method self.term_window.timeout(int(duration * 1000)) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(return_to_browser=return_to_browser) # Is it an exit key ? exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q')) if not exitkey and pressedkey > -1: # Redraw display self.flush(stats, cs_status=cs_status) # Overwrite the timeout with the countdown self.term_window.timeout(int(countdown.get() * 1000)) return exitkey
<SYSTEM_TASK:> Return the width of the formatted curses message. <END_TASK> <USER_TASK:> Description: def get_stats_display_width(self, curse_msg, without_option=False): """Return the width of the formatted curses message."""
try: if without_option: # Size without options c = len(max(''.join([(u(u(nativestr(i['msg'])).encode('ascii', 'replace')) if not i['optional'] else "") for i in curse_msg['msgdict']]).split('\n'), key=len)) else: # Size with all options c = len(max(''.join([u(u(nativestr(i['msg'])).encode('ascii', 'replace')) for i in curse_msg['msgdict']]).split('\n'), key=len)) except Exception as e: logger.debug('ERROR: Can not compute plugin width ({})'.format(e)) return 0 else: return c
<SYSTEM_TASK:> r"""Return the height of the formatted curses message. <END_TASK> <USER_TASK:> Description: def get_stats_display_height(self, curse_msg): r"""Return the height of the formatted curses message. The height is defined by the number of '\n' (new line). """
try: c = [i['msg'] for i in curse_msg['msgdict']].count('\n') except Exception as e: logger.debug('ERROR: Can not compute plugin height ({})'.format(e)) return 0 else: return c + 1
<SYSTEM_TASK:> SNMP getbulk request. <END_TASK> <USER_TASK:> Description: def getbulk_by_oid(self, non_repeaters, max_repetitions, *oid): """SNMP getbulk request. In contrast to snmpwalk, this information will typically be gathered in a single transaction with the agent, rather than one transaction per variable found. * non_repeaters: This specifies the number of supplied variables that should not be iterated over. * max_repetitions: This specifies the maximum number of iterations over the repeating variables. * oid: oid list > Return a list of dicts """
if self.version.startswith('3'): errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd( cmdgen.UsmUserData(self.user, self.auth), cmdgen.UdpTransportTarget((self.host, self.port)), non_repeaters, max_repetitions, *oid ) if self.version.startswith('2'): errorIndication, errorStatus, errorIndex, varBindTable = self.cmdGen.bulkCmd( cmdgen.CommunityData(self.community), cmdgen.UdpTransportTarget((self.host, self.port)), non_repeaters, max_repetitions, *oid ) else: # Bulk request are not available with SNMP version 1 return [] return self.__bulk_result__(errorIndication, errorStatus, errorIndex, varBindTable)
<SYSTEM_TASK:> Load the server list from the configuration file. <END_TASK> <USER_TASK:> Description: def load(self, config): """Load the server list from the configuration file."""
server_list = [] if config is None: logger.debug("No configuration file available. Cannot load server list.") elif not config.has_section(self._section): logger.warning("No [%s] section in the configuration file. Cannot load server list." % self._section) else: logger.info("Start reading the [%s] section in the configuration file" % self._section) for i in range(1, 256): new_server = {} postfix = 'server_%s_' % str(i) # Read the server name (mandatory) for s in ['name', 'port', 'alias']: new_server[s] = config.get_value(self._section, '%s%s' % (postfix, s)) if new_server['name'] is not None: # Manage optionnal information if new_server['port'] is None: new_server['port'] = '61209' new_server['username'] = 'glances' # By default, try empty (aka no) password new_server['password'] = '' try: new_server['ip'] = gethostbyname(new_server['name']) except gaierror as e: logger.error("Cannot get IP address for server %s (%s)" % (new_server['name'], e)) continue new_server['key'] = new_server['name'] + ':' + new_server['port'] # Default status is 'UNKNOWN' new_server['status'] = 'UNKNOWN' # Server type is 'STATIC' new_server['type'] = 'STATIC' # Add the server to the list logger.debug("Add server %s to the static list" % new_server['name']) server_list.append(new_server) # Server list loaded logger.info("%s server(s) loaded from the configuration file" % len(server_list)) logger.debug("Static server list: %s" % server_list) return server_list
<SYSTEM_TASK:> Add a new server to the list. <END_TASK> <USER_TASK:> Description: def add_server(self, name, ip, port): """Add a new server to the list."""
new_server = { 'key': name, # Zeroconf name with both hostname and port 'name': name.split(':')[0], # Short name 'ip': ip, # IP address seen by the client 'port': port, # TCP port 'username': 'glances', # Default username 'password': '', # Default password 'status': 'UNKNOWN', # Server status: 'UNKNOWN', 'OFFLINE', 'ONLINE', 'PROTECTED' 'type': 'DYNAMIC'} # Server type: 'STATIC' or 'DYNAMIC' self._server_list.append(new_server) logger.debug("Updated servers list (%s servers): %s" % (len(self._server_list), self._server_list))
<SYSTEM_TASK:> Remove a server from the dict. <END_TASK> <USER_TASK:> Description: def remove_server(self, name): """Remove a server from the dict."""
for i in self._server_list: if i['key'] == name: try: self._server_list.remove(i) logger.debug("Remove server %s from the list" % name) logger.debug("Updated servers list (%s servers): %s" % ( len(self._server_list), self._server_list)) except ValueError: logger.error( "Cannot remove server %s from the list" % name)
<SYSTEM_TASK:> Method called when a new Zeroconf client is detected. <END_TASK> <USER_TASK:> Description: def add_service(self, zeroconf, srv_type, srv_name): """Method called when a new Zeroconf client is detected. Return True if the zeroconf client is a Glances server Note: the return code will never be used """
if srv_type != zeroconf_type: return False logger.debug("Check new Zeroconf server: %s / %s" % (srv_type, srv_name)) info = zeroconf.get_service_info(srv_type, srv_name) if info: new_server_ip = socket.inet_ntoa(info.address) new_server_port = info.port # Add server to the global dict self.servers.add_server(srv_name, new_server_ip, new_server_port) logger.info("New Glances server detected (%s from %s:%s)" % (srv_name, new_server_ip, new_server_port)) else: logger.warning( "New Glances server detected, but Zeroconf info failed to be grabbed") return True
<SYSTEM_TASK:> Remove the server from the list. <END_TASK> <USER_TASK:> Description: def remove_service(self, zeroconf, srv_type, srv_name): """Remove the server from the list."""
self.servers.remove_server(srv_name) logger.info( "Glances server %s removed from the autodetect list" % srv_name)
<SYSTEM_TASK:> Try to find the active IP addresses. <END_TASK> <USER_TASK:> Description: def find_active_ip_address(): """Try to find the active IP addresses."""
import netifaces # Interface of the default gateway gateway_itf = netifaces.gateways()['default'][netifaces.AF_INET][1] # IP address for the interface return netifaces.ifaddresses(gateway_itf)[netifaces.AF_INET][0]['addr']
<SYSTEM_TASK:> Compress result with deflate algorithm if the client ask for it. <END_TASK> <USER_TASK:> Description: def compress(func): """Compress result with deflate algorithm if the client ask for it."""
def wrapper(*args, **kwargs): """Wrapper that take one function and return the compressed result.""" ret = func(*args, **kwargs) logger.debug('Receive {} {} request with header: {}'.format( request.method, request.url, ['{}: {}'.format(h, request.headers.get(h)) for h in request.headers.keys()] )) if 'deflate' in request.headers.get('Accept-Encoding', ''): response.headers['Content-Encoding'] = 'deflate' ret = deflate_compress(ret) else: response.headers['Content-Encoding'] = 'identity' return ret def deflate_compress(data, compress_level=6): """Compress given data using the DEFLATE algorithm""" # Init compression zobj = zlib.compressobj(compress_level, zlib.DEFLATED, zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, zlib.Z_DEFAULT_STRATEGY) # Return compressed object return zobj.compress(b(data)) + zobj.flush() return wrapper
<SYSTEM_TASK:> Load the outputs section of the configuration file. <END_TASK> <USER_TASK:> Description: def load_config(self, config): """Load the outputs section of the configuration file."""
# Limit the number of processes to display in the WebUI if config is not None and config.has_section('outputs'): logger.debug('Read number of processes to display in the WebUI') n = config.get_value('outputs', 'max_processes_display', default=None) logger.debug('Number of processes to display in the WebUI: {}'.format(n))
<SYSTEM_TASK:> Main entry point for Glances. <END_TASK> <USER_TASK:> Description: def main(): """Main entry point for Glances. Select the mode (standalone, client or server) Run it... """
# Catch the CTRL-C signal signal.signal(signal.SIGINT, __signal_handler) # Log Glances and psutil version logger.info('Start Glances {}'.format(__version__)) logger.info('{} {} and psutil {} detected'.format( platform.python_implementation(), platform.python_version(), psutil_version)) # Share global var global core # Create the Glances main instance core = GlancesMain() config = core.get_config() args = core.get_args() # Glances can be ran in standalone, client or server mode start(config=config, args=args)