text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> r"""Get a list of config file paths. <END_TASK> <USER_TASK:> Description: def config_file_paths(self): r"""Get a list of config file paths. The list is built taking into account of the OS, priority and location. * custom path: /path/to/glances * Linux, SunOS: ~/.config/glances, /etc/glances * *BSD: ~/.config/glances, /usr/local/etc/glances * macOS: ~/Library/Application Support/glances, /usr/local/etc/glances * Windows: %APPDATA%\glances The config file will be searched in the following order of priority: * /path/to/file (via -C flag) * user's home directory (per-user settings) * system-wide directory (system-wide settings) """
paths = [] if self.config_dir: paths.append(self.config_dir) paths.append(os.path.join(user_config_dir(), self.config_filename)) paths.append(os.path.join(system_config_dir(), self.config_filename)) return paths
<SYSTEM_TASK:> Return the configuration as a dict <END_TASK> <USER_TASK:> Description: def as_dict(self): """Return the configuration as a dict"""
dictionary = {} for section in self.parser.sections(): dictionary[section] = {} for option in self.parser.options(section): dictionary[section][option] = self.parser.get(section, option) return dictionary
<SYSTEM_TASK:> If the option did not exist, create a default value. <END_TASK> <USER_TASK:> Description: def set_default(self, section, option, default): """If the option did not exist, create a default value."""
if not self.parser.has_option(section, option): self.parser.set(section, option, default)
<SYSTEM_TASK:> Get the value of an option, if it exists. <END_TASK> <USER_TASK:> Description: def get_value(self, section, option, default=None): """Get the value of an option, if it exists. If it did not exist, then return the default value. It allows user to define dynamic configuration key (see issue#1204) Dynamic vlaue should starts and end with the ` char Example: prefix=`hostname` """
ret = default try: ret = self.parser.get(section, option) except NoOptionError: pass # Search a substring `foo` and replace it by the result of its exec if ret is not None: try: match = self.re_pattern.findall(ret) for m in match: ret = ret.replace(m, system_exec(m[1:-1])) except TypeError: pass return ret
<SYSTEM_TASK:> Get the int value of an option, if it exists. <END_TASK> <USER_TASK:> Description: def get_int_value(self, section, option, default=0): """Get the int value of an option, if it exists."""
try: return self.parser.getint(section, option) except NoOptionError: return int(default)
<SYSTEM_TASK:> Get the float value of an option, if it exists. <END_TASK> <USER_TASK:> Description: def get_float_value(self, section, option, default=0.0): """Get the float value of an option, if it exists."""
try: return self.parser.getfloat(section, option) except NoOptionError: return float(default)
<SYSTEM_TASK:> Get the bool value of an option, if it exists. <END_TASK> <USER_TASK:> Description: def get_bool_value(self, section, option, default=True): """Get the bool value of an option, if it exists."""
try: return self.parser.getboolean(section, option) except NoOptionError: return bool(default)
<SYSTEM_TASK:> Return the event position, if it exists. <END_TASK> <USER_TASK:> Description: def __event_exist(self, event_type): """Return the event position, if it exists. An event exist if: * end is < 0 * event_type is matching Return -1 if the item is not found. """
for i in range(self.len()): if self.events_list[i][1] < 0 and self.events_list[i][3] == event_type: return i return -1
<SYSTEM_TASK:> Return the process sort key <END_TASK> <USER_TASK:> Description: def get_event_sort_key(self, event_type): """Return the process sort key"""
# Process sort depending on alert type if event_type.startswith("MEM"): # Sort TOP process by memory_percent ret = 'memory_percent' elif event_type.startswith("CPU_IOWAIT"): # Sort TOP process by io_counters (only for Linux OS) ret = 'io_counters' else: # Default sort is... ret = 'cpu_percent' return ret
<SYSTEM_TASK:> Define the process auto sort key from the alert type. <END_TASK> <USER_TASK:> Description: def set_process_sort(self, event_type): """Define the process auto sort key from the alert type."""
if glances_processes.auto_sort: glances_processes.sort_key = self.get_event_sort_key(event_type)
<SYSTEM_TASK:> Add a new item to the logs list. <END_TASK> <USER_TASK:> Description: def add(self, event_state, event_type, event_value, proc_list=None, proc_desc="", peak_time=6): """Add a new item to the logs list. If 'event' is a 'new one', add it at the beginning of the list. If 'event' is not a 'new one', update the list . If event < peak_time then the alert is not set. """
proc_list = proc_list or glances_processes.getlist() # Add or update the log event_index = self.__event_exist(event_type) if event_index < 0: # Event did not exist, add it self._create_event(event_state, event_type, event_value, proc_list, proc_desc, peak_time) else: # Event exist, update it self._update_event(event_index, event_state, event_type, event_value, proc_list, proc_desc, peak_time) return self.len()
<SYSTEM_TASK:> Add a new item in the log list. <END_TASK> <USER_TASK:> Description: def _create_event(self, event_state, event_type, event_value, proc_list, proc_desc, peak_time): """Add a new item in the log list. Item is added only if the criticity (event_state) is WARNING or CRITICAL. """
if event_state == "WARNING" or event_state == "CRITICAL": # Define the automatic process sort key self.set_process_sort(event_type) # Create the new log item # Time is stored in Epoch format # Epoch -> DMYHMS = datetime.fromtimestamp(epoch) item = [ time.mktime(datetime.now().timetuple()), # START DATE -1, # END DATE event_state, # STATE: WARNING|CRITICAL event_type, # TYPE: CPU, LOAD, MEM... event_value, # MAX event_value, # AVG event_value, # MIN event_value, # SUM 1, # COUNT [], # TOP 3 PROCESS LIST proc_desc, # MONITORED PROCESSES DESC glances_processes.sort_key] # TOP PROCESS SORTKEY # Add the item to the list self.events_list.insert(0, item) # Limit the list to 'events_max' items if self.len() > self.events_max: self.events_list.pop() return True else: return False
<SYSTEM_TASK:> Update an event in the list <END_TASK> <USER_TASK:> Description: def _update_event(self, event_index, event_state, event_type, event_value, proc_list, proc_desc, peak_time): """Update an event in the list"""
if event_state == "OK" or event_state == "CAREFUL": # Reset the automatic process sort key self.reset_process_sort() # Set the end of the events endtime = time.mktime(datetime.now().timetuple()) if endtime - self.events_list[event_index][0] > peak_time: # If event is > peak_time seconds self.events_list[event_index][1] = endtime else: # If event <= peak_time seconds, ignore self.events_list.remove(self.events_list[event_index]) else: # Update the item self.set_process_sort(event_type) # State if event_state == "CRITICAL": self.events_list[event_index][2] = event_state # Min value self.events_list[event_index][6] = min(self.events_list[event_index][6], event_value) # Max value self.events_list[event_index][4] = max(self.events_list[event_index][4], event_value) # Average value self.events_list[event_index][7] += event_value self.events_list[event_index][8] += 1 self.events_list[event_index][5] = (self.events_list[event_index][7] / self.events_list[event_index][8]) # TOP PROCESS LIST (only for CRITICAL ALERT) if event_state == "CRITICAL": events_sort_key = self.get_event_sort_key(event_type) # Sort the current process list to retreive the TOP 3 processes self.events_list[event_index][9] = sort_stats(proc_list, events_sort_key)[0:3] self.events_list[event_index][11] = events_sort_key # MONITORED PROCESSES DESC self.events_list[event_index][10] = proc_desc return True
<SYSTEM_TASK:> Clean the logs list by deleting finished items. <END_TASK> <USER_TASK:> Description: def clean(self, critical=False): """Clean the logs list by deleting finished items. By default, only delete WARNING message. If critical = True, also delete CRITICAL message. """
# Create a new clean list clean_events_list = [] while self.len() > 0: item = self.events_list.pop() if item[1] < 0 or (not critical and item[2].startswith("CRITICAL")): clean_events_list.insert(0, item) # The list is now the clean one self.events_list = clean_events_list return self.len()
<SYSTEM_TASK:> Chek if SNMP is available on the server. <END_TASK> <USER_TASK:> Description: def check_snmp(self): """Chek if SNMP is available on the server."""
# Import the SNMP client class from glances.snmp import GlancesSNMPClient # Create an instance of the SNMP client clientsnmp = GlancesSNMPClient(host=self.args.client, port=self.args.snmp_port, version=self.args.snmp_version, community=self.args.snmp_community, user=self.args.snmp_user, auth=self.args.snmp_auth) # If we cannot grab the hostname, then exit... ret = clientsnmp.get_by_oid("1.3.6.1.2.1.1.5.0") != {} if ret: # Get the OS name (need to grab the good OID...) oid_os_name = clientsnmp.get_by_oid("1.3.6.1.2.1.1.1.0") try: self.system_name = self.get_system_name(oid_os_name['1.3.6.1.2.1.1.1.0']) logger.info("SNMP system name detected: {}".format(self.system_name)) except KeyError: self.system_name = None logger.warning("Cannot detect SNMP system name") return ret
<SYSTEM_TASK:> Get the short os name from the OS name OID string. <END_TASK> <USER_TASK:> Description: def get_system_name(self, oid_system_name): """Get the short os name from the OS name OID string."""
short_system_name = None if oid_system_name == '': return short_system_name # Find the short name in the oid_to_short_os_name dict for r, v in iteritems(oid_to_short_system_name): if re.search(r, oid_system_name): short_system_name = v break return short_system_name
<SYSTEM_TASK:> RAID alert messages. <END_TASK> <USER_TASK:> Description: def raid_alert(self, status, used, available, type): """RAID alert messages. [available/used] means that ideally the array may have _available_ devices however, _used_ devices are in use. Obviously when used >= available then things are good. """
if type == 'raid0': return 'OK' if status == 'inactive': return 'CRITICAL' if used is None or available is None: return 'DEFAULT' elif used < available: return 'WARNING' return 'OK'
<SYSTEM_TASK:> Grab plugin's stats. <END_TASK> <USER_TASK:> Description: def run(self): """Grab plugin's stats. Infinite loop, should be stopped by calling the stop() method """
if import_error_tag: self.stop() return False for k, v in iteritems(self.OPENSTACK_API_METADATA): r_url = '{}/{}'.format(self.OPENSTACK_API_URL, v) try: # Local request, a timeout of 3 seconds is OK r = requests.get(r_url, timeout=3) except Exception as e: logger.debug('cloud plugin - Cannot connect to the OpenStack metadata API {}: {}'.format(r_url, e)) break else: if r.ok: self._stats[k] = to_ascii(r.content) return True
<SYSTEM_TASK:> Return the AMPS process list according to the amp_value <END_TASK> <USER_TASK:> Description: def _build_amps_list(self, amp_value, processlist): """Return the AMPS process list according to the amp_value Search application monitored processes by a regular expression """
ret = [] try: # Search in both cmdline and name (for kernel thread, see #1261) for p in processlist: add_it = False if (re.search(amp_value.regex(), p['name']) is not None): add_it = True else: for c in p['cmdline']: if (re.search(amp_value.regex(), c) is not None): add_it = True break if add_it: ret.append({'pid': p['pid'], 'cpu_percent': p['cpu_percent'], 'memory_percent': p['memory_percent']}) except (TypeError, KeyError) as e: logger.debug("Can not build AMPS list ({})".format(e)) return ret
<SYSTEM_TASK:> Main loop for the CLI. <END_TASK> <USER_TASK:> Description: def __serve_forever(self): """Main loop for the CLI. return True if we should continue (no exit key has been pressed) """
# Start a counter used to compute the time needed for # update and export the stats counter = Counter() # Update stats self.stats.update() logger.debug('Stats updated in {} seconds'.format(counter.get())) # Export stats counter_export = Counter() self.stats.export(self.stats) logger.debug('Stats exported in {} seconds'.format(counter_export.get())) # Patch for issue1326 to avoid < 0 refresh adapted_refresh = self.refresh_time - counter.get() adapted_refresh = adapted_refresh if adapted_refresh > 0 else 0 # Display stats # and wait refresh_time - counter if not self.quiet: # The update function return True if an exit key 'q' or 'ESC' # has been pressed. ret = not self.screen.update(self.stats, duration=adapted_refresh) else: # Nothing is displayed # Break should be done via a signal (CTRL-C) time.sleep(adapted_refresh) ret = True return ret
<SYSTEM_TASK:> Wrapper to the serve_forever function. <END_TASK> <USER_TASK:> Description: def serve_forever(self): """Wrapper to the serve_forever function."""
loop = True while loop: loop = self.__serve_forever() self.end()
<SYSTEM_TASK:> End of the standalone CLI. <END_TASK> <USER_TASK:> Description: def end(self): """End of the standalone CLI."""
if not self.quiet: self.screen.end() # Exit from export modules self.stats.end() # Check Glances version versus PyPI one if self.outdated.is_outdated(): print("You are using Glances version {}, however version {} is available.".format( self.outdated.installed_version(), self.outdated.latest_version())) print("You should consider upgrading using: pip install --upgrade glances")
<SYSTEM_TASK:> Encode the plain_password with the salt of the hashed_password. <END_TASK> <USER_TASK:> Description: def check_password(self, hashed_password, plain_password): """Encode the plain_password with the salt of the hashed_password. Return the comparison with the encrypted_password. """
salt, encrypted_password = hashed_password.split('$') re_encrypted_password = self.get_hash(salt, plain_password) return encrypted_password == re_encrypted_password
<SYSTEM_TASK:> Get the password from a Glances client or server. <END_TASK> <USER_TASK:> Description: def get_password(self, description='', confirm=False, clear=False): """Get the password from a Glances client or server. For Glances server, get the password (confirm=True, clear=False): 1) from the password file (if it exists) 2) from the CLI Optionally: save the password to a file (hashed with salt + SHA-256) For Glances client, get the password (confirm=False, clear=True): 1) from the CLI 2) the password is hashed with SHA-256 (only SHA string transit through the network) """
if os.path.exists(self.password_file) and not clear: # If the password file exist then use it logger.info("Read password from file {}".format(self.password_file)) password = self.load_password() else: # password_sha256 is the plain SHA-256 password # password_hashed is the salt + SHA-256 password password_sha256 = self.sha256_hash(getpass.getpass(description)) password_hashed = self.hash_password(password_sha256) if confirm: # password_confirm is the clear password (only used to compare) password_confirm = self.sha256_hash(getpass.getpass('Password (confirm): ')) if not self.check_password(password_hashed, password_confirm): logger.critical("Sorry, passwords do not match. Exit.") sys.exit(1) # Return the plain SHA-256 or the salted password if clear: password = password_sha256 else: password = password_hashed # Save the hashed password to the password file if not clear: save_input = input('Do you want to save the password? [Yes/No]: ') if len(save_input) > 0 and save_input[0].upper() == 'Y': self.save_password(password_hashed) return password
<SYSTEM_TASK:> Save the hashed password to the Glances folder. <END_TASK> <USER_TASK:> Description: def save_password(self, hashed_password): """Save the hashed password to the Glances folder."""
# Create the glances directory safe_makedirs(self.password_dir) # Create/overwrite the password file with open(self.password_file, 'wb') as file_pwd: file_pwd.write(b(hashed_password))
<SYSTEM_TASK:> Load the hashed password from the Glances folder. <END_TASK> <USER_TASK:> Description: def load_password(self): """Load the hashed password from the Glances folder."""
# Read the password file, if it exists with open(self.password_file, 'r') as file_pwd: hashed_password = file_pwd.read() return hashed_password
<SYSTEM_TASK:> Update Wifi stats using the input method. <END_TASK> <USER_TASK:> Description: def update(self): """Update Wifi stats using the input method. Stats is a list of dict (one dict per hotspot) :returns: list -- Stats is a list of dict (hotspot) """
# Init new stats stats = self.get_init_value() # Exist if we can not grab the stats if import_error_tag: return stats if self.input_method == 'local': # Update stats using the standard system lib # Grab network interface stat using the psutil net_io_counter method try: netiocounters = psutil.net_io_counters(pernic=True) except UnicodeDecodeError: return stats for net in netiocounters: # Do not take hidden interface into account if self.is_hide(net): continue # Grab the stats using the Wifi Python lib try: wifi_cells = Cell.all(net) except InterfaceError as e: # Not a Wifi interface logger.debug("WIFI plugin: Scan InterfaceError ({})".format(e)) pass except Exception as e: # Other error logger.debug("WIFI plugin: Can not grab cellule stats ({})".format(e)) pass else: for wifi_cell in wifi_cells: hotspot = { 'key': self.get_key(), 'ssid': wifi_cell.ssid, 'signal': wifi_cell.signal, 'quality': wifi_cell.quality, 'encrypted': wifi_cell.encrypted, 'encryption_type': wifi_cell.encryption_type if wifi_cell.encrypted else None } # Add the hotspot to the list stats.append(hotspot) elif self.input_method == 'snmp': # Update stats using SNMP # Not implemented yet pass # Update the stats self.stats = stats return self.stats
<SYSTEM_TASK:> Overwrite the default get_alert method. <END_TASK> <USER_TASK:> Description: def get_alert(self, value): """Overwrite the default get_alert method. Alert is on signal quality where lower is better... :returns: string -- Signal alert """
ret = 'OK' try: if value <= self.get_limit('critical', stat_name=self.plugin_name): ret = 'CRITICAL' elif value <= self.get_limit('warning', stat_name=self.plugin_name): ret = 'WARNING' elif value <= self.get_limit('careful', stat_name=self.plugin_name): ret = 'CAREFUL' except (TypeError, KeyError) as e: # Catch TypeError for issue1373 ret = 'DEFAULT' return ret
<SYSTEM_TASK:> Get the history as a dict of list <END_TASK> <USER_TASK:> Description: def get(self, nb=0): """Get the history as a dict of list"""
return {i: self.stats_history[i].history_raw(nb=nb) for i in self.stats_history}
<SYSTEM_TASK:> Load AMP parameters from the configuration file. <END_TASK> <USER_TASK:> Description: def load_config(self, config): """Load AMP parameters from the configuration file."""
# Read AMP confifuration. # For ex, the AMP foo should have the following section: # # [foo] # enable=true # regex=\/usr\/bin\/nginx # refresh=60 # # and optionnaly: # # one_line=false # option1=opt1 # ... # amp_section = 'amp_' + self.amp_name if (hasattr(config, 'has_section') and config.has_section(amp_section)): logger.debug("AMP - {}: Load configuration".format(self.NAME)) for param, _ in config.items(amp_section): try: self.configs[param] = config.get_float_value(amp_section, param) except ValueError: self.configs[param] = config.get_value(amp_section, param).split(',') if len(self.configs[param]) == 1: self.configs[param] = self.configs[param][0] logger.debug("AMP - {}: Load parameter: {} = {}".format(self.NAME, param, self.configs[param])) else: logger.debug("AMP - {}: Can not find section {} in the configuration file".format(self.NAME, self.amp_name)) return False # enable, regex and refresh are mandatories # if not configured then AMP is disabled if self.enable(): for k in ['regex', 'refresh']: if k not in self.configs: logger.warning("AMP - {}: Can not find configuration key {} in section {}".format(self.NAME, k, self.amp_name)) self.configs['enable'] = 'false' else: logger.debug("AMP - {} is disabled".format(self.NAME)) # Init the count to 0 self.configs['count'] = 0 return self.enable()
<SYSTEM_TASK:> Parse the decision tree and return the message. <END_TASK> <USER_TASK:> Description: def global_message(): """Parse the decision tree and return the message. Note: message corresponding to the current threasholds values """
# Compute the weight for each item in the tree current_thresholds = glances_thresholds.get() for i in tree: i['weight'] = sum([current_thresholds[t].value() for t in i['thresholds'] if t in current_thresholds]) themax = max(tree, key=lambda d: d['weight']) if themax['weight'] >= themax['thresholds_min']: # Check if the weight is > to the minimal threashold value return themax['msg'] else: return tree[0]['msg']
<SYSTEM_TASK:> Overwrite the default export method. <END_TASK> <USER_TASK:> Description: def get_export(self): """Overwrite the default export method. - Only exports containers - The key is the first container name """
ret = [] try: ret = self.stats['containers'] except KeyError as e: logger.debug("docker plugin - Docker export error {}".format(e)) return ret
<SYSTEM_TASK:> Connect to the Docker server. <END_TASK> <USER_TASK:> Description: def connect(self): """Connect to the Docker server."""
try: ret = docker.from_env() except Exception as e: logger.error("docker plugin - Can not connect to Docker ({})".format(e)) ret = None return ret
<SYSTEM_TASK:> Return the container CPU usage. <END_TASK> <USER_TASK:> Description: def get_docker_cpu(self, container_id, all_stats): """Return the container CPU usage. Input: id is the full container id all_stats is the output of the stats method of the Docker API Output: a dict {'total': 1.49} """
cpu_new = {} ret = {'total': 0.0} # Read the stats # For each container, you will find a pseudo-file cpuacct.stat, # containing the CPU usage accumulated by the processes of the container. # Those times are expressed in ticks of 1/USER_HZ of a second. # On x86 systems, USER_HZ is 100. try: cpu_new['total'] = all_stats['cpu_stats']['cpu_usage']['total_usage'] cpu_new['system'] = all_stats['cpu_stats']['system_cpu_usage'] cpu_new['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or []) except KeyError as e: # all_stats do not have CPU information logger.debug("docker plugin - Cannot grab CPU usage for container {} ({})".format(container_id, e)) logger.debug(all_stats) else: # Previous CPU stats stored in the cpu_old variable if not hasattr(self, 'cpu_old'): # First call, we init the cpu_old variable self.cpu_old = {} try: self.cpu_old[container_id] = cpu_new except (IOError, UnboundLocalError): pass if container_id not in self.cpu_old: try: self.cpu_old[container_id] = cpu_new except (IOError, UnboundLocalError): pass else: # cpu_delta = float(cpu_new['total'] - self.cpu_old[container_id]['total']) system_delta = float(cpu_new['system'] - self.cpu_old[container_id]['system']) if cpu_delta > 0.0 and system_delta > 0.0: ret['total'] = (cpu_delta / system_delta) * float(cpu_new['nb_core']) * 100 # Save stats to compute next stats self.cpu_old[container_id] = cpu_new # Return the stats return ret
<SYSTEM_TASK:> Return the container MEMORY. <END_TASK> <USER_TASK:> Description: def get_docker_memory(self, container_id, all_stats): """Return the container MEMORY. Input: id is the full container id all_stats is the output of the stats method of the Docker API Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} """
ret = {} # Read the stats try: # Do not exist anymore with Docker 1.11 (issue #848) # ret['rss'] = all_stats['memory_stats']['stats']['rss'] # ret['cache'] = all_stats['memory_stats']['stats']['cache'] ret['usage'] = all_stats['memory_stats']['usage'] ret['limit'] = all_stats['memory_stats']['limit'] ret['max_usage'] = all_stats['memory_stats']['max_usage'] except (KeyError, TypeError) as e: # all_stats do not have MEM information logger.debug("docker plugin - Cannot grab MEM usage for container {} ({})".format(container_id, e)) logger.debug(all_stats) # Return the stats return ret
<SYSTEM_TASK:> Build the container name. <END_TASK> <USER_TASK:> Description: def _msg_name(self, container, max_width): """Build the container name."""
name = container['name'] if len(name) > max_width: name = '_' + name[-max_width + 1:] else: name = name[:max_width] return ' {:{width}}'.format(name, width=max_width)
<SYSTEM_TASK:> Return true if plugin is enabled. <END_TASK> <USER_TASK:> Description: def is_enable(self, plugin_name=None): """Return true if plugin is enabled."""
if not plugin_name: plugin_name = self.plugin_name try: d = getattr(self.args, 'disable_' + plugin_name) except AttributeError: return True else: return d is False
<SYSTEM_TASK:> Get the trend regarding to the last nb values. <END_TASK> <USER_TASK:> Description: def get_trend(self, item, nb=6): """Get the trend regarding to the last nb values. The trend is the diff between the mean of the last nb values and the current one. """
raw_history = self.get_raw_history(item=item, nb=nb) if raw_history is None or len(raw_history) < nb: return None last_nb = [v[1] for v in raw_history] return last_nb[-1] - mean(last_nb[:-1])
<SYSTEM_TASK:> Return the stats object for a specific item in JSON format. <END_TASK> <USER_TASK:> Description: def get_stats_item(self, item): """Return the stats object for a specific item in JSON format. Stats should be a list of dict (processlist, network...) """
if isinstance(self.stats, dict): try: return self._json_dumps({item: self.stats[item]}) except KeyError as e: logger.error("Cannot get item {} ({})".format(item, e)) return None elif isinstance(self.stats, list): try: # Source: # http://stackoverflow.com/questions/4573875/python-get-index-of-dictionary-item-in-list # But https://github.com/nicolargo/glances/issues/1401 return self._json_dumps({item: list(map(itemgetter(item), self.stats))}) except (KeyError, ValueError) as e: logger.error("Cannot get item {} ({})".format(item, e)) return None else: return None
<SYSTEM_TASK:> Return the stats object for a specific item=value in JSON format. <END_TASK> <USER_TASK:> Description: def get_stats_value(self, item, value): """Return the stats object for a specific item=value in JSON format. Stats should be a list of dict (processlist, network...) """
if not isinstance(self.stats, list): return None else: if value.isdigit(): value = int(value) try: return self._json_dumps({value: [i for i in self.stats if i[item] == value]}) except (KeyError, ValueError) as e: logger.error( "Cannot get item({})=value({}) ({})".format(item, value, e)) return None
<SYSTEM_TASK:> Update the stats views. <END_TASK> <USER_TASK:> Description: def update_views(self): """Update the stats views. The V of MVC A dict of dict with the needed information to display the stats. Example for the stat xxx: 'xxx': {'decoration': 'DEFAULT', 'optional': False, 'additional': False, 'splittable': False} """
ret = {} if (isinstance(self.get_raw(), list) and self.get_raw() is not None and self.get_key() is not None): # Stats are stored in a list of dict (ex: NETWORK, FS...) for i in self.get_raw(): ret[i[self.get_key()]] = {} for key in listkeys(i): value = {'decoration': 'DEFAULT', 'optional': False, 'additional': False, 'splittable': False} ret[i[self.get_key()]][key] = value elif isinstance(self.get_raw(), dict) and self.get_raw() is not None: # Stats are stored in a dict (ex: CPU, LOAD...) for key in listkeys(self.get_raw()): value = {'decoration': 'DEFAULT', 'optional': False, 'additional': False, 'splittable': False} ret[key] = value self.views = ret return self.views
<SYSTEM_TASK:> Return the views object. <END_TASK> <USER_TASK:> Description: def get_views(self, item=None, key=None, option=None): """Return the views object. If key is None, return all the view for the current plugin else if option is None return the view for the specific key (all option) else return the view fo the specific key/option Specify item if the stats are stored in a dict of dict (ex: NETWORK, FS...) """
if item is None: item_views = self.views else: item_views = self.views[item] if key is None: return item_views else: if option is None: return item_views[key] else: if option in item_views[key]: return item_views[key][option] else: return 'DEFAULT'
<SYSTEM_TASK:> Load limits from the configuration file, if it exists. <END_TASK> <USER_TASK:> Description: def load_limits(self, config): """Load limits from the configuration file, if it exists."""
# By default set the history length to 3 points per second during one day self._limits['history_size'] = 28800 if not hasattr(config, 'has_section'): return False # Read the global section if config.has_section('global'): self._limits['history_size'] = config.get_float_value('global', 'history_size', default=28800) logger.debug("Load configuration key: {} = {}".format('history_size', self._limits['history_size'])) # Read the plugin specific section if config.has_section(self.plugin_name): for level, _ in config.items(self.plugin_name): # Read limits limit = '_'.join([self.plugin_name, level]) try: self._limits[limit] = config.get_float_value(self.plugin_name, level) except ValueError: self._limits[limit] = config.get_value(self.plugin_name, level).split(",") logger.debug("Load limit: {} = {}".format(limit, self._limits[limit])) return True
<SYSTEM_TASK:> Return the stat name with an optional header <END_TASK> <USER_TASK:> Description: def get_stat_name(self, header=""): """"Return the stat name with an optional header"""
ret = self.plugin_name if header != "": ret += '_' + header return ret
<SYSTEM_TASK:> Return the alert status relative to a current value. <END_TASK> <USER_TASK:> Description: def get_alert(self, current=0, minimum=0, maximum=100, highlight_zero=True, is_max=False, header="", action_key=None, log=False): """Return the alert status relative to a current value. Use this function for minor stats. If current < CAREFUL of max then alert = OK If current > CAREFUL of max then alert = CAREFUL If current > WARNING of max then alert = WARNING If current > CRITICAL of max then alert = CRITICAL If highlight=True than 0.0 is highlighted If defined 'header' is added between the plugin name and the status. Only useful for stats with several alert status. If defined, 'action_key' define the key for the actions. By default, the action_key is equal to the header. If log=True than add log if necessary elif log=False than do not log elif log=None than apply the config given in the conf file """
# Manage 0 (0.0) value if highlight_zero is not True if not highlight_zero and current == 0: return 'DEFAULT' # Compute the % try: value = (current * 100) / maximum except ZeroDivisionError: return 'DEFAULT' except TypeError: return 'DEFAULT' # Build the stat_name stat_name = self.get_stat_name(header=header) # Manage limits # If is_max is set then display the value in MAX ret = 'MAX' if is_max else 'OK' try: if value >= self.get_limit('critical', stat_name=stat_name): ret = 'CRITICAL' elif value >= self.get_limit('warning', stat_name=stat_name): ret = 'WARNING' elif value >= self.get_limit('careful', stat_name=stat_name): ret = 'CAREFUL' elif current < minimum: ret = 'CAREFUL' except KeyError: return 'DEFAULT' # Manage log log_str = "" if self.get_limit_log(stat_name=stat_name, default_action=log): # Add _LOG to the return string # So stats will be highlited with a specific color log_str = "_LOG" # Add the log to the list glances_events.add(ret, stat_name.upper(), value) # Manage threshold self.manage_threshold(stat_name, ret) # Manage action self.manage_action(stat_name, ret.lower(), header, action_key) # Default is 'OK' return ret + log_str
<SYSTEM_TASK:> Manage the action for the current stat. <END_TASK> <USER_TASK:> Description: def manage_action(self, stat_name, trigger, header, action_key): """Manage the action for the current stat."""
# Here is a command line for the current trigger ? try: command, repeat = self.get_limit_action(trigger, stat_name=stat_name) except KeyError: # Reset the trigger self.actions.set(stat_name, trigger) else: # Define the action key for the stats dict # If not define, then it sets to header if action_key is None: action_key = header # A command line is available for the current alert # 1) Build the {{mustache}} dictionnary if isinstance(self.get_stats_action(), list): # If the stats are stored in a list of dict (fs plugin for exemple) # Return the dict for the current header mustache_dict = {} for item in self.get_stats_action(): if item[self.get_key()] == action_key: mustache_dict = item break else: # Use the stats dict mustache_dict = self.get_stats_action() # 2) Run the action self.actions.run( stat_name, trigger, command, repeat, mustache_dict=mustache_dict)
<SYSTEM_TASK:> Return True if the value is in the hide configuration list. <END_TASK> <USER_TASK:> Description: def is_hide(self, value, header=""): """Return True if the value is in the hide configuration list. The hide configuration list is defined in the glances.conf file. It is a comma separed list of regexp. Example for diskio: hide=sda2,sda5,loop.* """
# TODO: possible optimisation: create a re.compile list return not all(j is None for j in [re.match(i, value.lower()) for i in self.get_conf_value('hide', header=header)])
<SYSTEM_TASK:> Return the alias name for the relative header or None if nonexist. <END_TASK> <USER_TASK:> Description: def has_alias(self, header): """Return the alias name for the relative header or None if nonexist."""
try: # Force to lower case (issue #1126) return self._limits[self.plugin_name + '_' + header.lower() + '_' + 'alias'][0] except (KeyError, IndexError): # logger.debug("No alias found for {}".format(header)) return None
<SYSTEM_TASK:> Return a dict with all the information needed to display the stat. <END_TASK> <USER_TASK:> Description: def get_stats_display(self, args=None, max_width=None): """Return a dict with all the information needed to display the stat. key | description ---------------------------- display | Display the stat (True or False) msgdict | Message to display (list of dict [{ 'msg': msg, 'decoration': decoration } ... ]) align | Message position """
display_curse = False if hasattr(self, 'display_curse'): display_curse = self.display_curse if hasattr(self, 'align'): align_curse = self._align if max_width is not None: ret = {'display': display_curse, 'msgdict': self.msg_curse(args, max_width=max_width), 'align': align_curse} else: ret = {'display': display_curse, 'msgdict': self.msg_curse(args), 'align': align_curse} return ret
<SYSTEM_TASK:> Return a dict with. <END_TASK> <USER_TASK:> Description: def curse_add_line(self, msg, decoration="DEFAULT", optional=False, additional=False, splittable=False): """Return a dict with. Where: msg: string decoration: DEFAULT: no decoration UNDERLINE: underline BOLD: bold TITLE: for stat title PROCESS: for process name STATUS: for process status NICE: for process niceness CPU_TIME: for process cpu time OK: Value is OK and non logged OK_LOG: Value is OK and logged CAREFUL: Value is CAREFUL and non logged CAREFUL_LOG: Value is CAREFUL and logged WARNING: Value is WARINING and non logged WARNING_LOG: Value is WARINING and logged CRITICAL: Value is CRITICAL and non logged CRITICAL_LOG: Value is CRITICAL and logged optional: True if the stat is optional (display only if space is available) additional: True if the stat is additional (display only if space is available after optional) spittable: Line can be splitted to fit on the screen (default is not) """
return {'msg': msg, 'decoration': decoration, 'optional': optional, 'additional': additional, 'splittable': splittable}
<SYSTEM_TASK:> Make a nice human-readable string out of number. <END_TASK> <USER_TASK:> Description: def auto_unit(self, number, low_precision=False, min_symbol='K' ): """Make a nice human-readable string out of number. Number of decimal places increases as quantity approaches 1. CASE: 613421788 RESULT: 585M low_precision: 585M CASE: 5307033647 RESULT: 4.94G low_precision: 4.9G CASE: 44968414685 RESULT: 41.9G low_precision: 41.9G CASE: 838471403472 RESULT: 781G low_precision: 781G CASE: 9683209690677 RESULT: 8.81T low_precision: 8.8T CASE: 1073741824 RESULT: 1024M low_precision: 1024M CASE: 1181116006 RESULT: 1.10G low_precision: 1.1G :low_precision: returns less decimal places potentially (default is False) sacrificing precision for more readability. :min_symbol: Do not approache if number < min_symbol (default is K) """
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') if min_symbol in symbols: symbols = symbols[symbols.index(min_symbol):] prefix = { 'Y': 1208925819614629174706176, 'Z': 1180591620717411303424, 'E': 1152921504606846976, 'P': 1125899906842624, 'T': 1099511627776, 'G': 1073741824, 'M': 1048576, 'K': 1024 } for symbol in reversed(symbols): value = float(number) / prefix[symbol] if value > 1: decimal_precision = 0 if value < 10: decimal_precision = 2 elif value < 100: decimal_precision = 1 if low_precision: if symbol in 'MK': decimal_precision = 0 else: decimal_precision = min(1, decimal_precision) elif symbol in 'K': decimal_precision = 0 return '{:.{decimal}f}{symbol}'.format( value, decimal=decimal_precision, symbol=symbol) return '{!s}'.format(number)
<SYSTEM_TASK:> Return the trend message. <END_TASK> <USER_TASK:> Description: def trend_msg(self, trend, significant=1): """Return the trend message. Do not take into account if trend < significant """
ret = '-' if trend is None: ret = ' ' elif trend > significant: ret = '/' elif trend < -significant: ret = '\\' return ret
<SYSTEM_TASK:> Check if the plugin is enabled. <END_TASK> <USER_TASK:> Description: def _check_decorator(fct): """Check if the plugin is enabled."""
def wrapper(self, *args, **kw): if self.is_enable(): ret = fct(self, *args, **kw) else: ret = self.stats return ret return wrapper
<SYSTEM_TASK:> Write 'text' word-wrapped at self.width characters. <END_TASK> <USER_TASK:> Description: def _line(self, text, indent=0): """Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent while len(leading_space) + len(text) > self.width: # The text is too wide; wrap if possible. # Find the rightmost space that would obey our width constraint and # that's not an escaped space. available_space = self.width - len(leading_space) - len(' $') space = available_space while True: space = text.rfind(' ', 0, space) if (space < 0 or self._count_dollars_before_index(text, space) % 2 == 0): break if space < 0: # No such space; just use the first unescaped space we can find. space = available_space - 1 while True: space = text.find(' ', space + 1) if (space < 0 or self._count_dollars_before_index(text, space) % 2 == 0): break if space < 0: # Give up on breaking. break self.output.write(leading_space + text[0:space] + ' $\n') text = text[space+1:] # Subsequent lines are continuations, so indent them. leading_space = ' ' * (indent+2) self.output.write(leading_space + text + '\n')
<SYSTEM_TASK:> Run a subcommand, quietly. Prints the full command on error. <END_TASK> <USER_TASK:> Description: def _run_command(self, cmdline): """Run a subcommand, quietly. Prints the full command on error."""
try: if self.verbose: print(cmdline) subprocess.check_call(cmdline, shell=True) except subprocess.CalledProcessError: print('when running: ', cmdline) raise
<SYSTEM_TASK:> Returns a random integer that's avg on average, following a power law. <END_TASK> <USER_TASK:> Description: def paretoint(avg, alpha): """Returns a random integer that's avg on average, following a power law. alpha determines the shape of the power curve. alpha has to be larger than 1. The closer alpha is to 1, the higher the variation of the returned numbers."""
return int(random.paretovariate(alpha) * avg / (alpha / (alpha - 1)))
<SYSTEM_TASK:> Writes master build.ninja file, referencing all given subninjas. <END_TASK> <USER_TASK:> Description: def write_master_ninja(master_ninja, targets): """Writes master build.ninja file, referencing all given subninjas."""
master_ninja.variable('cxx', 'c++') master_ninja.variable('ld', '$cxx') if sys.platform == 'darwin': master_ninja.variable('alink', 'libtool -static') else: master_ninja.variable('alink', 'ar rcs') master_ninja.newline() master_ninja.pool('link_pool', depth=4) master_ninja.newline() master_ninja.rule('cxx', description='CXX $out', command='$cxx -MMD -MF $out.d $defines $includes $cflags -c $in -o $out', depfile='$out.d', deps='gcc') master_ninja.rule('alink', description='ARCHIVE $out', command='rm -f $out && $alink -o $out $in') master_ninja.rule('link', description='LINK $out', pool='link_pool', command='$ld $ldflags -o $out $in $libs') master_ninja.rule('stamp', description='STAMP $out', command='touch $out') master_ninja.newline() for target in targets: master_ninja.subninja(target.ninja_file_path) master_ninja.newline() master_ninja.comment('Short names for targets.') for target in targets: if target.name != target.output: master_ninja.build(target.name, 'phony', target.output) master_ninja.newline() master_ninja.build('all', 'phony', [target.output for target in targets]) master_ninja.default('all')
<SYSTEM_TASK:> Add the appropriate handlers to the web app. <END_TASK> <USER_TASK:> Description: def add_handlers(web_app, config): """Add the appropriate handlers to the web app. """
base_url = web_app.settings['base_url'] url = ujoin(base_url, config.page_url) assets_dir = config.assets_dir package_file = os.path.join(assets_dir, 'package.json') with open(package_file) as fid: data = json.load(fid) config.version = config.version or data['version'] config.name = config.name or data['name'] handlers = [ # TODO Redirect to /tree (url + r'/?', NAppHandler, {'config': config, 'page': 'tree'}), (url + r"/tree%s" % path_regex, NAppHandler, {'config': config, 'page': 'tree'}), (url + r"/edit%s" % path_regex, NAppHandler, {'config': config, 'page': 'edit'}), (url + r"/view%s" % path_regex, NAppHandler, {'config': config, 'page': 'view'}), (url + r"/static/(.*)", FileFindHandler, {'path': assets_dir}), ] web_app.add_handlers(".*$", handlers)
<SYSTEM_TASK:> read the lines from a file into a list <END_TASK> <USER_TASK:> Description: def get_list_from_file(file_name): """read the lines from a file into a list"""
with open(file_name, mode='r', encoding='utf-8') as f1: lst = f1.readlines() return lst
<SYSTEM_TASK:> append a line of text to a file <END_TASK> <USER_TASK:> Description: def append_to_file(file_name, line_data): """append a line of text to a file"""
with open(file_name, mode='a', encoding='utf-8') as f1: f1.write(line_data) f1.write("\n")
<SYSTEM_TASK:> Determine if input contains negation words <END_TASK> <USER_TASK:> Description: def negated(input_words, include_nt=True): """ Determine if input contains negation words """
input_words = [str(w).lower() for w in input_words] neg_words = [] neg_words.extend(NEGATE) for word in neg_words: if word in input_words: return True if include_nt: for word in input_words: if "n't" in word: return True if "least" in input_words: i = input_words.index("least") if i > 0 and input_words[i - 1] != "at": return True return False
<SYSTEM_TASK:> Normalize the score to be between -1 and 1 using an alpha that <END_TASK> <USER_TASK:> Description: def normalize(score, alpha=15): """ Normalize the score to be between -1 and 1 using an alpha that approximates the max expected value """
norm_score = score / math.sqrt((score * score) + alpha) if norm_score < -1.0: return -1.0 elif norm_score > 1.0: return 1.0 else: return norm_score
<SYSTEM_TASK:> Return a float for sentiment strength based on the input text. <END_TASK> <USER_TASK:> Description: def polarity_scores(self, text): """ Return a float for sentiment strength based on the input text. Positive values are positive valence, negative value are negative valence. """
# convert emojis to their textual descriptions text_token_list = text.split() text_no_emoji_lst = [] for token in text_token_list: if token in self.emojis: # get the textual description description = self.emojis[token] text_no_emoji_lst.append(description) else: text_no_emoji_lst.append(token) text = " ".join(x for x in text_no_emoji_lst) sentitext = SentiText(text) sentiments = [] words_and_emoticons = sentitext.words_and_emoticons for item in words_and_emoticons: valence = 0 i = words_and_emoticons.index(item) # check for vader_lexicon words that may be used as modifiers or negations if item.lower() in BOOSTER_DICT: sentiments.append(valence) continue if (i < len(words_and_emoticons) - 1 and item.lower() == "kind" and words_and_emoticons[i + 1].lower() == "of"): sentiments.append(valence) continue sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments) sentiments = self._but_check(words_and_emoticons, sentiments) valence_dict = self.score_valence(sentiments, text) return valence_dict
<SYSTEM_TASK:> Parse messages sent by the svn 'commit-email.pl' trigger. <END_TASK> <USER_TASK:> Description: def parse(self, m, prefix=None): """Parse messages sent by the svn 'commit-email.pl' trigger. """
# The mail is sent from the person doing the checkin. Assume that the # local username is enough to identify them (this assumes a one-server # cvs-over-rsh environment rather than the server-dirs-shared-over-NFS # model) name, addr = parseaddr(m["from"]) if not addr: return None # no From means this message isn't from svn at = addr.find("@") if at == -1: author = addr # might still be useful else: author = addr[:at] # we take the time of receipt as the time of checkin. Not correct (it # depends upon the email latency), but it avoids the # out-of-order-changes issue. Also syncmail doesn't give us anything # better to work with, unless you count pulling the v1-vs-v2 # timestamp out of the diffs, which would be ugly. TODO: Pulling the # 'Date:' header from the mail is a possibility, and # email.utils.parsedate_tz may be useful. It should be configurable, # however, because there are a lot of broken clocks out there. when = util.now() files = [] comments = "" lines = list(body_line_iterator(m)) rev = None while lines: line = lines.pop(0) # "Author: jmason" match = re.search(r"^Author: (\S+)", line) if match: author = match.group(1) # "New Revision: 105955" match = re.search(r"^New Revision: (\d+)", line) if match: rev = match.group(1) # possible TODO: use "Date: ..." data here instead of time of # commit message receipt, above. however, this timestamp is # specified *without* a timezone, in the server's local TZ, so to # be accurate buildbot would need a config setting to specify the # source server's expected TZ setting! messy. # this stanza ends with the "Log:" if (line == "Log:\n"): break # commit message is terminated by the file-listing section while lines: line = lines.pop(0) if line in ("Modified:\n", "Added:\n", "Removed:\n"): break comments += line comments = comments.rstrip() + "\n" while lines: line = lines.pop(0) if line == "\n": break if line.find("Modified:\n") == 0: continue # ignore this line if line.find("Added:\n") == 0: continue # ignore this line if line.find("Removed:\n") == 0: continue # ignore this line line = line.strip() thesefiles = line.split(" ") for f in thesefiles: if prefix: # insist that the file start with the prefix: we may get # changes we don't care about too if f.startswith(prefix): f = f[len(prefix):] else: log.msg("ignored file from svn commit: prefix '%s' " "does not match filename '%s'" % (prefix, f)) continue # TODO: figure out how new directories are described, set # .isdir files.append(f) if not files: log.msg("no matching files found, ignoring commit") return None return ('svn', dict(author=author, files=files, comments=comments, when=when, revision=rev))
<SYSTEM_TASK:> Returns a list of all members in the attribute group. <END_TASK> <USER_TASK:> Description: def _listAttrGroupMembers(self, attrGroup): """ Returns a list of all members in the attribute group. """
from inspect import getmembers, ismethod methods = getmembers(self, ismethod) group_prefix = attrGroup + '_' group_len = len(group_prefix) group_members = [method[0][group_len:] for method in methods if method[0].startswith(group_prefix)] return group_members
<SYSTEM_TASK:> Update a property, indexing the property by codebase if codebase is not <END_TASK> <USER_TASK:> Description: def updateSourceProperty(self, name, value, source=''): """ Update a property, indexing the property by codebase if codebase is not ''. Source steps should generally use this instead of setProperty. """
# pick a decent source name if source == '': source = self.__class__.__name__ if self.codebase != '': assert not isinstance(self.getProperty(name, None), str), \ "Sourcestep %s has a codebase, other sourcesteps don't" \ % self.name property_dict = self.getProperty(name, {}) property_dict[self.codebase] = value super().setProperty(name, property_dict, source) else: assert not isinstance(self.getProperty(name, None), dict), \ "Sourcestep %s does not have a codebase, other sourcesteps do" \ % self.name super().setProperty(name, value, source)
<SYSTEM_TASK:> Convert Gerrit account properties to Buildbot format <END_TASK> <USER_TASK:> Description: def _gerrit_user_to_author(props, username="unknown"): """ Convert Gerrit account properties to Buildbot format Take into account missing values """
username = props.get("username", username) username = props.get("name", username) if "email" in props: username += " <%(email)s>" % props return username
<SYSTEM_TASK:> This method finds the first parent class which is within the buildbot namespace <END_TASK> <USER_TASK:> Description: def getName(obj): """This method finds the first parent class which is within the buildbot namespace it prepends the name with as many ">" as the class is subclassed """
# elastic search does not like '.' in dict keys, so we replace by / def sanitize(name): return name.replace(".", "/") if isinstance(obj, _BuildStepFactory): klass = obj.factory else: klass = type(obj) name = "" klasses = (klass, ) + inspect.getmro(klass) for klass in klasses: if hasattr(klass, "__module__") and klass.__module__.startswith("buildbot."): return sanitize(name + klass.__module__ + "." + klass.__name__) else: name += ">" return sanitize(type(obj).__name__)
<SYSTEM_TASK:> Send the actual configuration of the builders, how the steps are agenced. <END_TASK> <USER_TASK:> Description: def fullData(master): """ Send the actual configuration of the builders, how the steps are agenced. Note that full data will never send actual detail of what command is run, name of servers, etc. """
builders = [] for b in master.config.builders: steps = [] for step in b.factory.steps: steps.append(getName(step)) builders.append(steps) return {'builders': builders}
<SYSTEM_TASK:> helper to produce lines suitable for setup.py's entry_points <END_TASK> <USER_TASK:> Description: def define_plugin_entry(name, module_name): """ helper to produce lines suitable for setup.py's entry_points """
if isinstance(name, tuple): entry, name = name else: entry = name return '%s = %s:%s' % (entry, module_name, name)
<SYSTEM_TASK:> helper to all groups for plugins <END_TASK> <USER_TASK:> Description: def define_plugin_entries(groups): """ helper to all groups for plugins """
result = dict() for group, modules in groups: tempo = [] for module_name, names in modules: tempo.extend([define_plugin_entry(name, module_name) for name in names]) result[group] = tempo return result
<SYSTEM_TASK:> Read changes since last change. <END_TASK> <USER_TASK:> Description: def _process_changes(self, newRev, branch): """ Read changes since last change. - Read list of commit hashes. - Extract details from each commit. - Add changes to database. """
# initial run, don't parse all history if not self.lastRev: return rebuild = False if newRev in self.lastRev.values(): if self.buildPushesWithNoCommits: existingRev = self.lastRev.get(branch) if existingRev is None: # This branch was completely unknown, rebuild log.msg('gitpoller: rebuilding {} for new branch "{}"'.format( newRev, branch)) rebuild = True elif existingRev != newRev: # This branch is known, but it now points to a different # commit than last time we saw it, rebuild. log.msg('gitpoller: rebuilding {} for updated branch "{}"'.format( newRev, branch)) rebuild = True # get the change list revListArgs = (['--format=%H', '{}'.format(newRev)] + ['^' + rev for rev in sorted(self.lastRev.values())] + ['--']) self.changeCount = 0 results = yield self._dovccmd('log', revListArgs, path=self.workdir) # process oldest change first revList = results.split() revList.reverse() if rebuild and not revList: revList = [newRev] self.changeCount = len(revList) self.lastRev[branch] = newRev if self.changeCount: log.msg('gitpoller: processing {} changes: {} from "{}" branch "{}"'.format( self.changeCount, revList, self.repourl, branch)) for rev in revList: dl = defer.DeferredList([ self._get_commit_timestamp(rev), self._get_commit_author(rev), self._get_commit_files(rev), self._get_commit_comments(rev), ], consumeErrors=True) results = yield dl # check for failures failures = [r[1] for r in results if not r[0]] if failures: for failure in failures: log.err( failure, "while processing changes for {} {}".format(newRev, branch)) # just fail on the first error; they're probably all related! failures[0].raiseException() timestamp, author, files, comments = [r[1] for r in results] yield self.master.data.updates.addChange( author=author, revision=bytes2unicode(rev, encoding=self.encoding), files=files, comments=comments, when_timestamp=timestamp, branch=bytes2unicode(self._removeHeads(branch)), project=self.project, repository=bytes2unicode(self.repourl, encoding=self.encoding), category=self.category, src='git')
<SYSTEM_TASK:> Convert a Lock identifier into an actual Lock instance. <END_TASK> <USER_TASK:> Description: def getLockByID(self, lockid): """Convert a Lock identifier into an actual Lock instance. @param lockid: a locks.MasterLock or locks.WorkerLock instance @return: a locks.RealMasterLock or locks.RealWorkerLock instance """
assert isinstance(lockid, (locks.MasterLock, locks.WorkerLock)) if lockid not in self.locks: self.locks[lockid] = lockid.lockClass(lockid) # if the master.cfg file has changed maxCount= on the lock, the next # time a build is started, they'll get a new RealLock instance. Note # that this requires that MasterLock and WorkerLock (marker) instances # be hashable and that they should compare properly. return self.locks[lockid]
<SYSTEM_TASK:> Call this when something suggests that a particular worker may now be <END_TASK> <USER_TASK:> Description: def maybeStartBuildsForWorker(self, worker_name): """ Call this when something suggests that a particular worker may now be available to start a build. @param worker_name: the name of the worker """
builders = self.getBuildersForWorker(worker_name) self.brd.maybeStartBuildsOn([b.name for b in builders])
<SYSTEM_TASK:> Rewrap text for output to the console. <END_TASK> <USER_TASK:> Description: def rewrap(text, width=None): """ Rewrap text for output to the console. Removes common indentation and rewraps paragraphs according to the console width. Line feeds between paragraphs preserved. Formatting of paragraphs that starts with additional indentation preserved. """
if width is None: width = 80 # Remove common indentation. text = textwrap.dedent(text) def needs_wrapping(line): # Line always non-empty. return not line[0].isspace() # Split text by lines and group lines that comprise paragraphs. wrapped_text = "" for do_wrap, lines in itertools.groupby(text.splitlines(True), key=needs_wrapping): paragraph = ''.join(lines) if do_wrap: paragraph = textwrap.fill(paragraph, width) wrapped_text += paragraph return wrapped_text
<SYSTEM_TASK:> Resolve a reference to a directive of this class <END_TASK> <USER_TASK:> Description: def resolve_ref(cls, domain, env, fromdocname, builder, typ, target, node, contnode): """ Resolve a reference to a directive of this class """
targets = domain.data['targets'].get(cls.ref_type, {}) try: todocname, targetname = targets[target] except KeyError: env.warn(fromdocname, "Missing BB reference: bb:%s:%s" % (cls.ref_type, target), node.line) return None return make_refnode(builder, fromdocname, todocname, targetname, contnode, target)
<SYSTEM_TASK:> call this function after the file exists to populate properties <END_TASK> <USER_TASK:> Description: def load(self): """ call this function after the file exists to populate properties """
# If we are given a string, open it up else assume it's something we # can call read on. if isinstance(self.specfile, str): f = open(self.specfile, 'r') else: f = self.specfile for line in f: if self.v_regex.match(line): self._pkg_version = self.v_regex.match(line).group(1) if self.n_regex.match(line): self._pkg_name = self.n_regex.match(line).group(1) f.close() self._loaded = True
<SYSTEM_TASK:> Consumer for CaptureBuildStartTime. Gets the build start time. <END_TASK> <USER_TASK:> Description: def consume(self, routingKey, msg): """ Consumer for CaptureBuildStartTime. Gets the build start time. """
builder_info = yield self.master.data.get(("builders", msg['builderid'])) if self._builder_name_matches(builder_info): try: ret_val = self._callback(*self._retValParams(msg)) except Exception as e: # catching generic exceptions is okay here since we propagate # it raise CaptureCallbackError("%s Exception raised: %s with message: %s" % (self._err_msg(msg, builder_info['name']), type(e).__name__, str(e))) context = self._defaultContext(msg, builder_info['name']) post_data = { self._time_type: ret_val } series_name = "%s-build-times" % builder_info['name'] yield self._store(post_data, series_name, context) else: yield defer.succeed(None)
<SYSTEM_TASK:> Stop worker process by sending it a signal. <END_TASK> <USER_TASK:> Description: def stopWorker(basedir, quiet, signame="TERM"): """ Stop worker process by sending it a signal. Using the specified basedir path, read worker process's pid file and try to terminate that process with specified signal. @param basedir: worker's basedir path @param quite: if False, don't print any messages to stdout @param signame: signal to send to the worker process @raise WorkerNotRunning: if worker pid file is not found """
import signal os.chdir(basedir) try: f = open("twistd.pid", "rt") except IOError: raise WorkerNotRunning() pid = int(f.read().strip()) signum = getattr(signal, "SIG" + signame) timer = 0 try: os.kill(pid, signum) except OSError as e: if e.errno != 3: raise time.sleep(0.1) while timer < 10: # poll once per second until twistd.pid goes away, up to 10 seconds try: os.kill(pid, 0) except OSError: if not quiet: print("worker process {0} is dead".format(pid)) return 0 timer += 1 time.sleep(1) if not quiet: print("never saw process go away") return 1
<SYSTEM_TASK:> Create a new Build instance. <END_TASK> <USER_TASK:> Description: def newBuild(self, requests): """Create a new Build instance. @param requests: a list of buildrequest dictionaries describing what is to be built """
b = self.buildClass(requests) b.useProgress = self.useProgress b.workdir = self.workdir b.setStepFactories(self.steps) return b
<SYSTEM_TASK:> This method can be used to add patters of warnings that should <END_TASK> <USER_TASK:> Description: def addSuppression(self, suppressionList): """ This method can be used to add patters of warnings that should not be counted. It takes a single argument, a list of patterns. Each pattern is a 4-tuple (FILE-RE, WARN-RE, START, END). FILE-RE is a regular expression (string or compiled regexp), or None. If None, the pattern matches all files, else only files matching the regexp. If directoryEnterPattern is specified in the class constructor, matching is against the full path name, eg. src/main.c. WARN-RE is similarly a regular expression matched against the text of the warning, or None to match all warnings. START and END form an inclusive line number range to match against. If START is None, there is no lower bound, similarly if END is none there is no upper bound."""
for fileRe, warnRe, start, end in suppressionList: if fileRe is not None and isinstance(fileRe, str): fileRe = re.compile(fileRe) if warnRe is not None and isinstance(warnRe, str): warnRe = re.compile(warnRe) self.suppressions.append((fileRe, warnRe, start, end))
<SYSTEM_TASK:> Match log lines against warningPattern. <END_TASK> <USER_TASK:> Description: def createSummary(self, log): """ Match log lines against warningPattern. Warnings are collected into another log for this step, and the build-wide 'warnings-count' is updated."""
# If there were any warnings, make the log if lines with warnings # available if self.warnCount: self.addCompleteLog("warnings (%d)" % self.warnCount, "\n".join(self.loggedWarnings) + "\n") warnings_stat = self.getStatistic('warnings', 0) self.setStatistic('warnings', warnings_stat + self.warnCount) old_count = self.getProperty("warnings-count", 0) self.setProperty( "warnings-count", old_count + self.warnCount, "WarningCountingShellCommand")
<SYSTEM_TASK:> cppcheck always return 0, unless a special parameter is given <END_TASK> <USER_TASK:> Description: def evaluateCommand(self, cmd): """ cppcheck always return 0, unless a special parameter is given """
for msg in self.flunkingIssues: if self.counts[msg] != 0: return FAILURE if self.getProperty('cppcheck-total') != 0: return WARNINGS return SUCCESS
<SYSTEM_TASK:> get the value from pass identified by 'entry' <END_TASK> <USER_TASK:> Description: def get(self, entry): """ get the value from pass identified by 'entry' """
try: output = yield utils.getProcessOutput( "pass", args=[entry], env=self._env ) return output.decode("utf-8", "ignore").splitlines()[0] except IOError: return None
<SYSTEM_TASK:> Patch startService and stopService so that they check the previous state <END_TASK> <USER_TASK:> Description: def patch(): """ Patch startService and stopService so that they check the previous state first. (used for debugging only) """
from twisted.application.service import Service old_startService = Service.startService old_stopService = Service.stopService def startService(self): assert not self.running, "%r already running" % (self,) return old_startService(self) def stopService(self): assert self.running, "%r already stopped" % (self,) return old_stopService(self) Service.startService = startService Service.stopService = stopService
<SYSTEM_TASK:> Perform any periodic database cleanup tasks. <END_TASK> <USER_TASK:> Description: def _doCleanup(self): """ Perform any periodic database cleanup tasks. @returns: Deferred """
# pass on this if we're not configured yet if not self.configured_url: return d = self.changes.pruneChanges(self.master.config.changeHorizon) d.addErrback(log.err, 'while pruning changes') return d
<SYSTEM_TASK:> Special setup for sqlite engines <END_TASK> <USER_TASK:> Description: def set_up(self, u, engine): """Special setup for sqlite engines"""
def connect_listener_enable_fk(connection, record): # fk must be enabled for all connections if not getattr(engine, "fk_disabled", False): return # http://trac.buildbot.net/ticket/3490#ticket # connection.execute('pragma foreign_keys=ON') sa.event.listen(engine.pool, 'connect', connect_listener_enable_fk) # try to enable WAL logging if u.database: def connect_listener(connection, record): connection.execute("pragma checkpoint_fullfsync = off") sa.event.listen(engine.pool, 'connect', connect_listener) log.msg("setting database journal mode to 'wal'") try: engine.execute("pragma journal_mode = wal") except Exception: log.msg("failed to set journal mode - database may fail")
<SYSTEM_TASK:> Special setup for mysql engines <END_TASK> <USER_TASK:> Description: def set_up(self, u, engine): """Special setup for mysql engines"""
# add the reconnecting PoolListener that will detect a # disconnected connection and automatically start a new # one. This provides a measure of additional safety over # the pool_recycle parameter, and is useful when e.g., the # mysql server goes away def checkout_listener(dbapi_con, con_record, con_proxy): try: cursor = dbapi_con.cursor() cursor.execute("SELECT 1") except dbapi_con.OperationalError as ex: if self.is_disconnect(ex.args): # sqlalchemy will re-create the connection log.msg('connection will be removed') raise sa.exc.DisconnectionError() log.msg('exception happened {}'.format(ex)) raise # older versions of sqlalchemy require the listener to be specified # in the kwargs, in a class instance if sautils.sa_version() < (0, 7, 0): class ReconnectingListener: pass rcl = ReconnectingListener() rcl.checkout = checkout_listener engine.pool.add_listener(rcl) else: sa.event.listen(engine.pool, 'checkout', checkout_listener)
<SYSTEM_TASK:> For mysql, take max_idle out of the query arguments, and <END_TASK> <USER_TASK:> Description: def special_case_mysql(self, u, kwargs): """For mysql, take max_idle out of the query arguments, and use its value for pool_recycle. Also, force use_unicode and charset to be True and 'utf8', failing if they were set to anything else."""
kwargs['pool_recycle'] = int(u.query.pop('max_idle', 3600)) # default to the MyISAM storage engine storage_engine = u.query.pop('storage_engine', 'MyISAM') kwargs['connect_args'] = { 'init_command': 'SET default_storage_engine=%s' % storage_engine, } if 'use_unicode' in u.query: if u.query['use_unicode'] != "True": raise TypeError("Buildbot requires use_unicode=True " + "(and adds it automatically)") else: u.query['use_unicode'] = True if 'charset' in u.query: if u.query['charset'] != "utf8": raise TypeError("Buildbot requires charset=utf8 " + "(and adds it automatically)") else: u.query['charset'] = 'utf8' return u, kwargs, None
<SYSTEM_TASK:> Return a novaclient from the given args. <END_TASK> <USER_TASK:> Description: def _constructClient(client_version, username, user_domain, password, project_name, project_domain, auth_url): """Return a novaclient from the given args."""
loader = loading.get_plugin_loader('password') # These only work with v3 if user_domain is not None or project_domain is not None: auth = loader.load_from_options(auth_url=auth_url, username=username, user_domain_name=user_domain, password=password, project_name=project_name, project_domain_name=project_domain) else: auth = loader.load_from_options(auth_url=auth_url, username=username, password=password, project_name=project_name) sess = session.Session(auth=auth) return client.Client(client_version, session=sess)
<SYSTEM_TASK:> Render all of the block device's values. <END_TASK> <USER_TASK:> Description: def _renderBlockDevice(self, block_device, build): """Render all of the block device's values."""
rendered_block_device = yield build.render(block_device) if rendered_block_device['volume_size'] is None: source_type = rendered_block_device['source_type'] source_uuid = rendered_block_device['uuid'] volume_size = self._determineVolumeSize(source_type, source_uuid) rendered_block_device['volume_size'] = volume_size return rendered_block_device
<SYSTEM_TASK:> Determine the minimum size the volume needs to be for the source. <END_TASK> <USER_TASK:> Description: def _determineVolumeSize(self, source_type, source_uuid): """ Determine the minimum size the volume needs to be for the source. Returns the size in GiB. """
nova = self.novaclient if source_type == 'image': # The size returned for an image is in bytes. Round up to the next # integer GiB. image = nova.images.get(source_uuid) if hasattr(image, 'OS-EXT-IMG-SIZE:size'): size = getattr(image, 'OS-EXT-IMG-SIZE:size') size_gb = int(math.ceil(size / 1024.0**3)) return size_gb elif source_type == 'volume': # Volumes are easy because they are already in GiB. volume = nova.volumes.get(source_uuid) return volume.size elif source_type == 'snapshot': snap = nova.volume_snapshots.get(source_uuid) return snap.size else: unknown_source = ("The source type '%s' for UUID '%s' is" " unknown" % (source_type, source_uuid)) raise ValueError(unknown_source)
<SYSTEM_TASK:> Decorate a do function to log before, after, and elapsed time, <END_TASK> <USER_TASK:> Description: def timed_do_fn(f): """Decorate a do function to log before, after, and elapsed time, with the name of the calling function. This is not speedy!"""
def wrap(callable, *args, **kwargs): global _debug_id # get a description of the function that called us st = traceback.extract_stack(limit=2) file, line, name, _ = st[0] # and its locals frame = inspect.currentframe() locals = frame.f_locals # invent a unique ID for the description id, _debug_id = _debug_id, _debug_id + 1 descr = "%s-%08x" % (name, id) start_time = time.time() log.msg("%s - before ('%s' line %d)" % (descr, file, line)) for name in locals: if name in ('self', 'thd'): continue log.msg("%s - %s = %r" % (descr, name, locals[name])) # wrap the callable to log the begin and end of the actual thread # function def callable_wrap(*args, **kargs): log.msg("%s - thd start" % (descr,)) try: return callable(*args, **kwargs) finally: log.msg("%s - thd end" % (descr,)) d = f(callable_wrap, *args, **kwargs) @d.addBoth def after(x): end_time = time.time() elapsed = (end_time - start_time) * 1000 log.msg("%s - after (%0.2f ms elapsed)" % (descr, elapsed)) return x return d wrap.__name__ = f.__name__ wrap.__doc__ = f.__doc__ return wrap
<SYSTEM_TASK:> Manually stop the pool. This is only necessary from tests, as the <END_TASK> <USER_TASK:> Description: def shutdown(self): """Manually stop the pool. This is only necessary from tests, as the pool will stop itself when the reactor stops under normal circumstances."""
if not self._stop_evt: return # pool is already stopped self.reactor.removeSystemEventTrigger(self._stop_evt) self._stop()
<SYSTEM_TASK:> Display the data attributes of an object in a readable format <END_TASK> <USER_TASK:> Description: def show(x): """Display the data attributes of an object in a readable format"""
print("data attributes of %r" % (x,)) names = dir(x) maxlen = max([0] + [len(n) for n in names]) for k in names: v = getattr(x, k) if isinstance(v, types.MethodType): continue if k[:2] == '__' and k[-2:] == '__': continue if isinstance(v, str): if len(v) > 80 - maxlen - 5: v = repr(v[:80 - maxlen - 5]) + "..." elif isinstance(v, (int, type(None))): v = str(v) elif isinstance(v, (list, tuple, dict)): v = "%s (%d elements)" % (v, len(v)) else: v = str(type(v)) print("%*s : %s" % (maxlen, k, v)) return x
<SYSTEM_TASK:> I lookup an existing predefined domain <END_TASK> <USER_TASK:> Description: def lookupByName(self, name): """ I lookup an existing predefined domain """
res = yield queue.executeInThread(self.connection.lookupByName, name) return self.DomainClass(self, res)
<SYSTEM_TASK:> I find existing VMs that are already running that might be orphaned instances of this worker. <END_TASK> <USER_TASK:> Description: def _find_existing_instance(self): """ I find existing VMs that are already running that might be orphaned instances of this worker. """
if not self.connection: return None domains = yield self.connection.all() for d in domains: name = yield d.name() if name.startswith(self.workername): self.domain = d break self.ready = True
<SYSTEM_TASK:> I start a new instance of a VM. <END_TASK> <USER_TASK:> Description: def start_instance(self, build): """ I start a new instance of a VM. If a base_image is specified, I will make a clone of that otherwise i will use image directly. If i'm not given libvirt domain definition XML, I will look for my name in the list of defined virtual machines and start that. """
if self.domain is not None: log.msg("Cannot start_instance '%s' as already active" % self.workername) return False yield self._prepare_base_image() try: if self.xml: self.domain = yield self.connection.create(self.xml) else: self.domain = yield self.connection.lookupByName(self.workername) yield self.domain.create() except Exception: log.err(failure.Failure(), "Cannot start a VM (%s), failing gracefully and triggering" "a new build check" % self.workername) self.domain = None return False return True
<SYSTEM_TASK:> I attempt to stop a running VM. <END_TASK> <USER_TASK:> Description: def stop_instance(self, fast=False): """ I attempt to stop a running VM. I make sure any connection to the worker is removed. If the VM was using a cloned image, I remove the clone When everything is tidied up, I ask that bbot looks for work to do """
log.msg("Attempting to stop '%s'" % self.workername) if self.domain is None: log.msg("I don't think that domain is even running, aborting") return defer.succeed(None) domain = self.domain self.domain = None if self.graceful_shutdown and not fast: log.msg("Graceful shutdown chosen for %s" % self.workername) d = domain.shutdown() else: d = domain.destroy() @d.addCallback def _disconnect(res): log.msg("VM destroyed (%s): Forcing its connection closed." % self.workername) return super().disconnect() @d.addBoth def _disconnected(res): log.msg( "We forced disconnection (%s), cleaning up and triggering new build" % self.workername) if self.base_image: os.remove(self.image) self.botmaster.maybeStartBuildsForWorker(self.workername) return res return d
<SYSTEM_TASK:> Splits the branchfile argument and assuming branch is <END_TASK> <USER_TASK:> Description: def get_simple_split(branchfile): """Splits the branchfile argument and assuming branch is the first path component in branchfile, will return branch and file else None."""
index = branchfile.find('/') if index == -1: return None, None branch, file = branchfile.split('/', 1) return branch, file