text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> A simple method that runs a ManagementUtility. <END_TASK> <USER_TASK:> Description: def execute_from_command_line(argv=None): """ A simple method that runs a ManagementUtility. """
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "colab.settings") from django.conf import settings if not hasattr(settings, 'SECRET_KEY') and 'initconfig' in sys.argv: command = initconfig.Command() command.handle() else: utility = ManagementUtility(argv) utility.execute()
<SYSTEM_TASK:> To get all the document that equal to the query <END_TASK> <USER_TASK:> Description: def raw_query(self, query, query_parameters=None): """ To get all the document that equal to the query @param str query: The rql query @param dict query_parameters: Add query parameters to the query {key : value} """
self.assert_no_raw_query() if len(self._where_tokens) != 0 or len(self._select_tokens) != 0 or len( self._order_by_tokens) != 0 or len(self._group_by_tokens) != 0: raise InvalidOperationException( "You can only use raw_query on a new query, without applying any operations " "(such as where, select, order_by, group_by, etc)") if query_parameters: self.query_parameters = query_parameters self._query = query return self
<SYSTEM_TASK:> To get all the document that equal to the value in the given field_name <END_TASK> <USER_TASK:> Description: def where_equals(self, field_name, value, exact=False): """ To get all the document that equal to the value in the given field_name @param str field_name: The field name in the index you want to query. @param value: The value will be the fields value you want to query @param bool exact: If True getting exact match of the query """
if field_name is None: raise ValueError("None field_name is invalid") field_name = Query.escape_if_needed(field_name) self._add_operator_if_needed() token = "equals" if self.negate: self.negate = False token = "not_equals" self.last_equality = {field_name: value} token = _Token(field_name=field_name, value=self.add_query_parameter(value), token=token, exact=exact) token.write = self.rql_where_write(token) self._where_tokens.append(token) return self
<SYSTEM_TASK:> To get all the document that equal to the value within kwargs with the specific key <END_TASK> <USER_TASK:> Description: def where(self, exact=False, **kwargs): """ To get all the document that equal to the value within kwargs with the specific key @param bool exact: If True getting exact match of the query @param kwargs: the keys of the kwargs will be the fields name in the index you want to query. The value will be the the fields value you want to query (if kwargs[field_name] is a list it will behave has the where_in method) """
for field_name in kwargs: if isinstance(kwargs[field_name], list): self.where_in(field_name, kwargs[field_name], exact) else: self.where_equals(field_name, kwargs[field_name], exact) return self
<SYSTEM_TASK:> For more complex text searching <END_TASK> <USER_TASK:> Description: def search(self, field_name, search_terms, operator=QueryOperator.OR): """ For more complex text searching @param str field_name: The field name in the index you want to query. :type str @param str search_terms: The terms you want to query @param QueryOperator operator: OR or AND """
if field_name is None: raise ValueError("None field_name is invalid") field_name = Query.escape_if_needed(field_name) self._add_operator_if_needed() self.negate_if_needed(field_name) self.last_equality = {field_name: "(" + search_terms + ")" if ' ' in search_terms else search_terms} token = _Token(field_name=field_name, token="search", value=self.add_query_parameter(search_terms), search_operator=operator) token.write = self.rql_where_write(token) self._where_tokens.append(token) return self
<SYSTEM_TASK:> To get all the document that ends with the value in the giving field_name <END_TASK> <USER_TASK:> Description: def where_ends_with(self, field_name, value): """ To get all the document that ends with the value in the giving field_name @param str field_name:The field name in the index you want to query. @param str value: The value will be the fields value you want to query """
if field_name is None: raise ValueError("None field_name is invalid") field_name = Query.escape_if_needed(field_name) self._add_operator_if_needed() self.negate_if_needed(field_name) self.last_equality = {field_name: value} token = _Token(field_name=field_name, token="endsWith", value=self.add_query_parameter(value)) token.write = self.rql_where_write(token) self._where_tokens.append(token) return self
<SYSTEM_TASK:> Check that the field has one of the specified values <END_TASK> <USER_TASK:> Description: def where_in(self, field_name, values, exact=False): """ Check that the field has one of the specified values @param str field_name: Name of the field @param str values: The values we wish to query @param bool exact: Getting the exact query (ex. case sensitive) """
field_name = Query.escape_if_needed(field_name) self._add_operator_if_needed() self.negate_if_needed(field_name) token = _Token(field_name=field_name, value=self.add_query_parameter(list(Utils.unpack_iterable(values))), token="in", exact=exact) token.write = self.rql_where_write(token) self._where_tokens.append(token) return self
<SYSTEM_TASK:> Query the facets results for this query using the specified list of facets with the given start and pageSize <END_TASK> <USER_TASK:> Description: def to_facets(self, facets, start=0, page_size=None): """ Query the facets results for this query using the specified list of facets with the given start and pageSize @param List[Facet] facets: List of facets @param int start: Start index for paging @param page_size: Paging PageSize. If set, overrides Facet.max_result """
if len(facets) == 0: raise ValueError("Facets must contain at least one entry", "facets") str_query = self.__str__() facet_query = FacetQuery(str_query, None, facets, start, page_size, query_parameters=self.query_parameters, wait_for_non_stale_results=self.wait_for_non_stale_results, wait_for_non_stale_results_timeout=self.timeout, cutoff_etag=self.cutoff_etag) command = GetFacetsCommand(query=facet_query) return self.session.requests_executor.execute(command)
<SYSTEM_TASK:> Getting the HIGHLIGHT_NUM_CHARS_BEFORE_MATCH setting <END_TASK> <USER_TASK:> Description: def find_window(self, highlight_locations): """Getting the HIGHLIGHT_NUM_CHARS_BEFORE_MATCH setting to find how many characters before the first word found should be removed from the window """
if len(self.text_block) <= self.max_length: return (0, self.max_length) num_chars_before = getattr( settings, 'HIGHLIGHT_NUM_CHARS_BEFORE_MATCH', 0 ) best_start, best_end = super(ColabHighlighter, self).find_window( highlight_locations ) if best_start <= num_chars_before: best_end -= best_start best_start = 0 else: best_start -= num_chars_before best_end -= num_chars_before return (best_start, best_end)
<SYSTEM_TASK:> Try to login and set the internal session id. <END_TASK> <USER_TASK:> Description: def login(self): """ Try to login and set the internal session id. Please note: - Any failed login resets all existing session ids, even of other users. - SIDs expire after some time """
response = self.session.get(self.base_url + '/login_sid.lua', timeout=10) xml = ET.fromstring(response.text) if xml.find('SID').text == "0000000000000000": challenge = xml.find('Challenge').text url = self.base_url + "/login_sid.lua" response = self.session.get(url, params={ "username": self.username, "response": self.calculate_response(challenge, self.password), }, timeout=10) xml = ET.fromstring(response.text) sid = xml.find('SID').text if xml.find('SID').text == "0000000000000000": blocktime = int(xml.find('BlockTime').text) exc = Exception("Login failed, please wait {} seconds".format( blocktime )) exc.blocktime = blocktime raise exc self.sid = sid return sid
<SYSTEM_TASK:> Calculate response for the challenge-response authentication <END_TASK> <USER_TASK:> Description: def calculate_response(self, challenge, password): """Calculate response for the challenge-response authentication"""
to_hash = (challenge + "-" + password).encode("UTF-16LE") hashed = hashlib.md5(to_hash).hexdigest() return "{0}-{1}".format(challenge, hashed)
<SYSTEM_TASK:> Returns a list of Actor objects for querying SmartHome devices. <END_TASK> <USER_TASK:> Description: def get_actors(self): """ Returns a list of Actor objects for querying SmartHome devices. This is currently the only working method for getting temperature data. """
devices = self.homeautoswitch("getdevicelistinfos") xml = ET.fromstring(devices) actors = [] for device in xml.findall('device'): actors.append(Actor(fritzbox=self, device=device)) return actors
<SYSTEM_TASK:> Return a actor identified by it's ain or return None <END_TASK> <USER_TASK:> Description: def get_actor_by_ain(self, ain): """ Return a actor identified by it's ain or return None """
for actor in self.get_actors(): if actor.actor_id == ain: return actor
<SYSTEM_TASK:> Call a switch method. <END_TASK> <USER_TASK:> Description: def homeautoswitch(self, cmd, ain=None, param=None): """ Call a switch method. Should only be used by internal library functions. """
assert self.sid, "Not logged in" params = { 'switchcmd': cmd, 'sid': self.sid, } if param is not None: params['param'] = param if ain: params['ain'] = ain url = self.base_url + '/webservices/homeautoswitch.lua' response = self.session.get(url, params=params, timeout=10) response.raise_for_status() return response.text.strip().encode('utf-8')
<SYSTEM_TASK:> Get information about all actors <END_TASK> <USER_TASK:> Description: def get_switch_actors(self): """ Get information about all actors This needs 1+(5n) requests where n = number of actors registered Deprecated, use get_actors instead. Returns a dict: [ain] = { 'name': Name of actor, 'state': Powerstate (boolean) 'present': Connected to server? (boolean) 'power': Current power consumption in mW 'energy': Used energy in Wh since last energy reset 'temperature': Current environment temperature in celsius } """
actors = {} for ain in self.homeautoswitch("getswitchlist").split(','): actors[ain] = { 'name': self.homeautoswitch("getswitchname", ain), 'state': bool(self.homeautoswitch("getswitchstate", ain)), 'present': bool(self.homeautoswitch("getswitchpresent", ain)), 'power': self.homeautoswitch("getswitchpower", ain), 'energy': self.homeautoswitch("getswitchenergy", ain), 'temperature': self.homeautoswitch("getswitchtemperature", ain), } return actors
<SYSTEM_TASK:> Return a list of devices. <END_TASK> <USER_TASK:> Description: def get_devices(self): """ Return a list of devices. Deprecated, use get_actors instead. """
url = self.base_url + '/net/home_auto_query.lua' response = self.session.get(url, params={ 'sid': self.sid, 'command': 'AllOutletStates', 'xhr': 0, }, timeout=15) response.raise_for_status() data = response.json() count = int(data["Outlet_count"]) devices = [] for i in range(1, count + 1): device = Device( int(data["DeviceID_{0}".format(i)]), int(data["DeviceConnectState_{0}".format(i)]), int(data["DeviceSwitchState_{0}".format(i)]) ) devices.append(device) return devices
<SYSTEM_TASK:> Return all available energy consumption data for the device. <END_TASK> <USER_TASK:> Description: def get_consumption(self, deviceid, timerange="10"): """ Return all available energy consumption data for the device. You need to divice watt_values by 100 and volt_values by 1000 to get the "real" values. :return: dict """
tranges = ("10", "24h", "month", "year") if timerange not in tranges: raise ValueError( "Unknown timerange. Possible values are: {0}".format(tranges) ) url = self.base_url + "/net/home_auto_query.lua" response = self.session.get(url, params={ 'sid': self.sid, 'command': 'EnergyStats_{0}'.format(timerange), 'id': deviceid, 'xhr': 0, }, timeout=15) response.raise_for_status() data = response.json() result = {} # Single result values values_map = { 'MM_Value_Amp': 'mm_value_amp', 'MM_Value_Power': 'mm_value_power', 'MM_Value_Volt': 'mm_value_volt', 'EnStats_average_value': 'enstats_average_value', 'EnStats_max_value': 'enstats_max_value', 'EnStats_min_value': 'enstats_min_value', 'EnStats_timer_type': 'enstats_timer_type', 'sum_Day': 'sum_day', 'sum_Month': 'sum_month', 'sum_Year': 'sum_year', } for avm_key, py_key in values_map.items(): result[py_key] = int(data[avm_key]) # Stats counts count = int(data["EnStats_count"]) watt_values = [None for i in range(count)] volt_values = [None for i in range(count)] for i in range(1, count + 1): watt_values[i - 1] = int(data["EnStats_watt_value_{}".format(i)]) volt_values[i - 1] = int(data["EnStats_volt_value_{}".format(i)]) result['watt_values'] = watt_values result['volt_values'] = volt_values return result
<SYSTEM_TASK:> Return the system logs since the last reboot. <END_TASK> <USER_TASK:> Description: def get_logs(self): """ Return the system logs since the last reboot. """
assert BeautifulSoup, "Please install bs4 to use this method" url = self.base_url + "/system/syslog.lua" response = self.session.get(url, params={ 'sid': self.sid, 'stylemode': 'print', }, timeout=15) response.raise_for_status() entries = [] tree = BeautifulSoup(response.text) rows = tree.find('table').find_all('tr') for row in rows: columns = row.find_all("td") date = columns[0].string time = columns[1].string message = columns[2].find("a").string merged = "{} {} {}".format(date, time, message.encode("UTF-8")) msg_hash = hashlib.md5(merged).hexdigest() entries.append(LogEntry(date, time, message, msg_hash)) return entries
<SYSTEM_TASK:> Returns True if the Hawk nonce has been seen already. <END_TASK> <USER_TASK:> Description: def seen_nonce(id, nonce, timestamp): """ Returns True if the Hawk nonce has been seen already. """
key = '{id}:{n}:{ts}'.format(id=id, n=nonce, ts=timestamp) if cache.get(key): log.warning('replay attack? already processed nonce {k}' .format(k=key)) return True else: log.debug('caching nonce {k}'.format(k=key)) cache.set(key, True, # We only need the nonce until the message itself expires. # This also adds a little bit of padding. timeout=getattr(settings, 'HAWK_MESSAGE_EXPIRATION', default_message_expiration) + 5) return False
<SYSTEM_TASK:> FritzBox SmartHome Tool <END_TASK> <USER_TASK:> Description: def cli(context, host, username, password): """ FritzBox SmartHome Tool \b Provides the following functions: - A easy to use library for querying SmartHome actors - This CLI tool for testing - A carbon client for pipeing data into graphite """
context.obj = FritzBox(host, username, password)
<SYSTEM_TASK:> Switch an actor's power to ON <END_TASK> <USER_TASK:> Description: def switch_on(context, ain): """Switch an actor's power to ON"""
context.obj.login() actor = context.obj.get_actor_by_ain(ain) if actor: click.echo("Switching {} on".format(actor.name)) actor.switch_on() else: click.echo("Actor not found: {}".format(ain))
<SYSTEM_TASK:> Show system logs since last reboot <END_TASK> <USER_TASK:> Description: def logs(context, format): """Show system logs since last reboot"""
fritz = context.obj fritz.login() messages = fritz.get_logs() if format == "plain": for msg in messages: merged = "{} {} {}".format(msg.date, msg.time, msg.message.encode("UTF-8")) click.echo(merged) if format == "json": entries = [msg._asdict() for msg in messages] click.echo(json.dumps({ "entries": entries, }))
<SYSTEM_TASK:> url of OpenShift where builder will connect <END_TASK> <USER_TASK:> Description: def get_builder_openshift_url(self): """ url of OpenShift where builder will connect """
key = "builder_openshift_url" url = self._get_deprecated(key, self.conf_section, key) if url is None: logger.warning("%r not found, falling back to get_openshift_base_uri()", key) url = self.get_openshift_base_uri() return url
<SYSTEM_TASK:> set parameters according to specification <END_TASK> <USER_TASK:> Description: def set_params(self, **kwargs): """ set parameters according to specification these parameters are accepted: :param pulp_secret: str, resource name of pulp secret :param koji_target: str, koji tag with packages used to build the image :param kojiroot: str, URL from which koji packages are fetched :param kojihub: str, URL of the koji hub :param koji_certs_secret: str, resource name of secret that holds the koji certificates :param koji_task_id: int, Koji Task that created this build config :param flatpak: if we should build a Flatpak OCI Image :param filesystem_koji_task_id: int, Koji Task that created the base filesystem :param pulp_registry: str, name of pulp registry in dockpulp.conf :param sources_command: str, command used to fetch dist-git sources :param architecture: str, architecture we are building for :param vendor: str, vendor name :param build_host: str, host the build will run on or None for auto :param authoritative_registry: str, the docker registry authoritative for this image :param distribution_scope: str, distribution scope for this image (private, authoritative-source-only, restricted, public) :param use_auth: bool, use auth from atomic-reactor? :param platform_node_selector: dict, a nodeselector for a specific platform :param platform_descriptors: dict, platforms and their archiectures and enable_v1 settings :param scratch_build_node_selector: dict, a nodeselector for scratch builds :param explicit_build_node_selector: dict, a nodeselector for explicit builds :param auto_build_node_selector: dict, a nodeselector for auto builds :param isolated_build_node_selector: dict, a nodeselector for isolated builds :param is_auto: bool, indicates if build is auto build :param parent_images_digests: dict, mapping image names with tags to platform specific digests, example: {'registry.fedorahosted.org/fedora:29': { x86_64': 'registry.fedorahosted.org/fedora@sha256:....'} } """
# Here we cater to the koji "scratch" build type, this will disable # all plugins that might cause importing of data to koji self.scratch = kwargs.pop('scratch', False) # When true, it indicates build was automatically started by # OpenShift via a trigger, for instance ImageChangeTrigger self.is_auto = kwargs.pop('is_auto', False) # An isolated build is meant to patch a certain release and not # update transient tags in container registry self.isolated = kwargs.pop('isolated', False) self.validate_build_variation() self.base_image = kwargs.get('base_image') self.platform_node_selector = kwargs.get('platform_node_selector', {}) self.platform_descriptors = kwargs.get('platform_descriptors', {}) self.scratch_build_node_selector = kwargs.get('scratch_build_node_selector', {}) self.explicit_build_node_selector = kwargs.get('explicit_build_node_selector', {}) self.auto_build_node_selector = kwargs.get('auto_build_node_selector', {}) self.isolated_build_node_selector = kwargs.get('isolated_build_node_selector', {}) logger.debug("setting params '%s' for %s", kwargs, self.spec) self.spec.set_params(**kwargs) self.osbs_api = kwargs.pop('osbs_api')
<SYSTEM_TASK:> Sets secret for plugin, if no plugin specified <END_TASK> <USER_TASK:> Description: def set_secret_for_plugin(self, secret, plugin=None, mount_path=None): """ Sets secret for plugin, if no plugin specified it will also set general secret :param secret: str, secret name :param plugin: tuple, (plugin type, plugin name, argument name) :param mount_path: str, mount path of secret """
has_plugin_conf = False if plugin is not None: has_plugin_conf = self.dj.dock_json_has_plugin_conf(plugin[0], plugin[1]) if 'secrets' in self.template['spec']['strategy']['customStrategy']: if not plugin or has_plugin_conf: custom = self.template['spec']['strategy']['customStrategy'] if mount_path: secret_path = mount_path else: secret_path = os.path.join(SECRETS_PATH, secret) logger.info("Configuring %s secret at %s", secret, secret_path) existing = [secret_mount for secret_mount in custom['secrets'] if secret_mount['secretSource']['name'] == secret] if existing: logger.debug("secret %s already set", secret) else: custom['secrets'].append({ 'secretSource': { 'name': secret, }, 'mountPath': secret_path, }) # there's no need to set args if no plugin secret specified # this is used in tag_and_push plugin, as it sets secret path # for each registry separately if plugin and plugin[2] is not None: self.dj.dock_json_set_arg(*(plugin + (secret_path,))) else: logger.debug("not setting secret for unused plugin %s", plugin[1])
<SYSTEM_TASK:> Remove trigger-related plugins when needed <END_TASK> <USER_TASK:> Description: def adjust_for_triggers(self): """Remove trigger-related plugins when needed If there are no triggers defined, it's assumed the feature is disabled and all trigger-related plugins are removed. If there are triggers defined, and this is a custom base image, some trigger-related plugins do not apply. Additionally, this method ensures that custom base images never have triggers since triggering a base image rebuild is not a valid scenario. """
triggers = self.template['spec'].get('triggers', []) remove_plugins = [ ("prebuild_plugins", "check_and_set_rebuild"), ("prebuild_plugins", "stop_autorebuild_if_disabled"), ] should_remove = False if triggers and (self.is_custom_base_image() or self.is_from_scratch_image()): if self.is_custom_base_image(): msg = "removing %s from request because custom base image" elif self.is_from_scratch_image(): msg = 'removing %s from request because FROM scratch image' del self.template['spec']['triggers'] should_remove = True elif not triggers: msg = "removing %s from request because there are no triggers" should_remove = True if should_remove: for when, which in remove_plugins: logger.info(msg, which) self.dj.remove_plugin(when, which)
<SYSTEM_TASK:> Disable plugins to handle builds depending on whether <END_TASK> <USER_TASK:> Description: def adjust_for_custom_base_image(self): """ Disable plugins to handle builds depending on whether or not this is a build from a custom base image. """
plugins = [] if self.is_custom_base_image(): # Plugins irrelevant to building base images. plugins.append(("prebuild_plugins", "pull_base_image")) plugins.append(("prebuild_plugins", "koji_parent")) plugins.append(("prebuild_plugins", "inject_parent_image")) msg = "removing %s from custom image build request" else: # Plugins not needed for building non base images. plugins.append(("prebuild_plugins", "add_filesystem")) msg = "removing %s from non custom image build request" for when, which in plugins: logger.info(msg, which) self.dj.remove_plugin(when, which)
<SYSTEM_TASK:> if there is yum repo specified, don't pick stuff from koji <END_TASK> <USER_TASK:> Description: def render_koji(self): """ if there is yum repo specified, don't pick stuff from koji """
phase = 'prebuild_plugins' plugin = 'koji' if not self.dj.dock_json_has_plugin_conf(phase, plugin): return if self.spec.yum_repourls.value: logger.info("removing koji from request " "because there is yum repo specified") self.dj.remove_plugin(phase, plugin) elif not (self.spec.koji_target.value and self.spec.kojiroot.value and self.spec.kojihub.value): logger.info("removing koji from request as not specified") self.dj.remove_plugin(phase, plugin) else: self.dj.dock_json_set_arg(phase, plugin, "target", self.spec.koji_target.value) self.dj.dock_json_set_arg(phase, plugin, "root", self.spec.kojiroot.value) self.dj.dock_json_set_arg(phase, plugin, "hub", self.spec.kojihub.value) if self.spec.proxy.value: self.dj.dock_json_set_arg(phase, plugin, "proxy", self.spec.proxy.value)
<SYSTEM_TASK:> if we have smtp_host and smtp_from, configure sendmail plugin, <END_TASK> <USER_TASK:> Description: def render_sendmail(self): """ if we have smtp_host and smtp_from, configure sendmail plugin, else remove it """
phase = 'exit_plugins' plugin = 'sendmail' if not self.dj.dock_json_has_plugin_conf(phase, plugin): return if self.spec.smtp_host.value and self.spec.smtp_from.value: self.dj.dock_json_set_arg(phase, plugin, 'url', self.spec.builder_openshift_url.value) self.dj.dock_json_set_arg(phase, plugin, 'smtp_host', self.spec.smtp_host.value) self.dj.dock_json_set_arg(phase, plugin, 'from_address', self.spec.smtp_from.value) else: logger.info("removing sendmail from request, " "requires smtp_host and smtp_from") self.dj.remove_plugin(phase, plugin) return if self.spec.kojihub.value and self.spec.kojiroot.value: self.dj.dock_json_set_arg(phase, plugin, 'koji_hub', self.spec.kojihub.value) self.dj.dock_json_set_arg(phase, plugin, "koji_root", self.spec.kojiroot.value) if self.spec.smtp_to_submitter.value: self.dj.dock_json_set_arg(phase, plugin, 'to_koji_submitter', self.spec.smtp_to_submitter.value) if self.spec.smtp_to_pkgowner.value: self.dj.dock_json_set_arg(phase, plugin, 'to_koji_pkgowner', self.spec.smtp_to_pkgowner.value) if self.spec.smtp_additional_addresses.value: self.dj.dock_json_set_arg(phase, plugin, 'additional_addresses', self.spec.smtp_additional_addresses.value) if self.spec.smtp_error_addresses.value: self.dj.dock_json_set_arg(phase, plugin, 'error_addresses', self.spec.smtp_error_addresses.value) if self.spec.smtp_email_domain.value: self.dj.dock_json_set_arg(phase, plugin, 'email_domain', self.spec.smtp_email_domain.value)
<SYSTEM_TASK:> If a pulp registry is specified, use pulp_pull plugin <END_TASK> <USER_TASK:> Description: def render_pulp_pull(self): """ If a pulp registry is specified, use pulp_pull plugin """
# pulp_pull is a multi-phase plugin phases = ('postbuild_plugins', 'exit_plugins') plugin = 'pulp_pull' for phase in phases: if not self.dj.dock_json_has_plugin_conf(phase, plugin): continue pulp_registry = self.spec.pulp_registry.value if not pulp_registry: logger.info("removing %s from request, requires pulp_registry", pulp_registry) self.dj.remove_plugin(phase, plugin) continue if not self.spec.kojihub.value: logger.info('Removing %s because no kojihub was specified', plugin) self.dj.remove_plugin(phase, plugin) continue if self.spec.prefer_schema1_digest.value is not None: self.dj.dock_json_set_arg(phase, 'pulp_pull', 'expect_v2schema2', not self.spec.prefer_schema1_digest.value)
<SYSTEM_TASK:> If a pulp registry is specified, use the pulp plugin as well as the <END_TASK> <USER_TASK:> Description: def render_pulp_sync(self): """ If a pulp registry is specified, use the pulp plugin as well as the delete_from_registry to delete the image after sync """
if not self.dj.dock_json_has_plugin_conf('postbuild_plugins', 'pulp_sync'): return pulp_registry = self.spec.pulp_registry.value # Find which registry to use docker_registry = None registry_secret = None registries = zip_longest(self.spec.registry_uris.value, self.spec.registry_secrets.value) for registry, secret in registries: if registry.version == 'v2': # First specified v2 registry is the one we'll tell pulp # to sync from. Keep the http prefix -- pulp wants it. docker_registry = registry.uri registry_secret = secret logger.info("using docker v2 registry %s for pulp_sync", docker_registry) break if pulp_registry and docker_registry: self.dj.dock_json_set_arg('postbuild_plugins', 'pulp_sync', 'pulp_registry_name', pulp_registry) self.dj.dock_json_set_arg('postbuild_plugins', 'pulp_sync', 'docker_registry', docker_registry) if registry_secret: self.set_secret_for_plugin(registry_secret, plugin=('postbuild_plugins', 'pulp_sync', 'registry_secret_path')) # Verify we have a pulp secret if self.spec.pulp_secret.value is None: raise OsbsValidationException("Pulp registry specified " "but no auth config") source_registry = self.spec.source_registry_uri.value perform_delete = (source_registry is None or source_registry.docker_uri != registry.docker_uri) if perform_delete: push_conf = self.dj.dock_json_get_plugin_conf('exit_plugins', 'delete_from_registry') args = push_conf.setdefault('args', {}) delete_registries = args.setdefault('registries', {}) placeholder = '{{REGISTRY_URI}}' # use passed in params like 'insecure' if available if placeholder in delete_registries: regdict = delete_registries[placeholder].copy() del delete_registries[placeholder] else: regdict = {} if registry_secret: regdict['secret'] = \ os.path.join(SECRETS_PATH, registry_secret) # tag_and_push configured the registry secret, no neet to set it again delete_registries[docker_registry] = regdict self.dj.dock_json_set_arg('exit_plugins', 'delete_from_registry', 'registries', delete_registries) else: logger.info("removing delete_from_registry from request, " "source and target registry are identical") self.dj.remove_plugin("exit_plugins", "delete_from_registry") else: # If no pulp registry is specified, don't run the pulp plugin logger.info("removing pulp_sync+delete_from_registry from request, " "requires pulp_registry and a v2 registry") self.dj.remove_plugin("postbuild_plugins", "pulp_sync") self.dj.remove_plugin("exit_plugins", "delete_from_registry")
<SYSTEM_TASK:> Configure the group_manifests plugin. Group is always set to false for now. <END_TASK> <USER_TASK:> Description: def render_group_manifests(self): """ Configure the group_manifests plugin. Group is always set to false for now. """
if not self.dj.dock_json_has_plugin_conf('postbuild_plugins', 'group_manifests'): return push_conf = self.dj.dock_json_get_plugin_conf('postbuild_plugins', 'group_manifests') args = push_conf.setdefault('args', {}) # modify registries in place registries = args.setdefault('registries', {}) placeholder = '{{REGISTRY_URI}}' if placeholder in registries: for registry, secret in zip_longest(self.spec.registry_uris.value, self.spec.registry_secrets.value): if not registry.uri: continue regdict = registries[placeholder].copy() regdict['version'] = registry.version if secret: regdict['secret'] = os.path.join(SECRETS_PATH, secret) registries[registry.docker_uri] = regdict del registries[placeholder] self.dj.dock_json_set_arg('postbuild_plugins', 'group_manifests', 'group', self.spec.group_manifests.value) goarch = {} for platform, architecture in self.platform_descriptors.items(): goarch[platform] = architecture['architecture'] self.dj.dock_json_set_arg('postbuild_plugins', 'group_manifests', 'goarch', goarch)
<SYSTEM_TASK:> Customize prod_inner for site specific customizations <END_TASK> <USER_TASK:> Description: def render_customizations(self): """ Customize prod_inner for site specific customizations """
disable_plugins = self.customize_conf.get('disable_plugins', []) if not disable_plugins: logger.debug("No site-specific plugins to disable") else: for plugin_dict in disable_plugins: try: self.dj.remove_plugin( plugin_dict['plugin_type'], plugin_dict['plugin_name'] ) logger.debug( "site-specific plugin disabled -> Type:{} Name:{}".format( plugin_dict['plugin_type'], plugin_dict['plugin_name'] ) ) except KeyError: # Malformed config logger.debug("Invalid custom configuration found for disable_plugins") enable_plugins = self.customize_conf.get('enable_plugins', []) if not enable_plugins: logger.debug("No site-specific plugins to enable") else: for plugin_dict in enable_plugins: try: self.dj.add_plugin( plugin_dict['plugin_type'], plugin_dict['plugin_name'], plugin_dict['plugin_args'] ) logger.debug( "site-specific plugin enabled -> Type:{} Name:{} Args: {}".format( plugin_dict['plugin_type'], plugin_dict['plugin_name'], plugin_dict['plugin_args'] ) ) except KeyError: # Malformed config logger.debug("Invalid custom configuration found for enable_plugins")
<SYSTEM_TASK:> Only used for setting up the testing framework. <END_TASK> <USER_TASK:> Description: def setup_json_capture(osbs, os_conf, capture_dir): """ Only used for setting up the testing framework. """
try: os.mkdir(capture_dir) except OSError: pass finally: osbs.os._con.request = ResponseSaver(capture_dir, os_conf.get_openshift_api_uri(), os_conf.get_k8s_api_uri(), osbs.os._con.request).request
<SYSTEM_TASK:> get size of longest value in specific column <END_TASK> <USER_TASK:> Description: def _longest_val_in_column(self, col): """ get size of longest value in specific column :param col: str, column name :return int """
try: # +2 is for implicit separator return max([len(x[col]) for x in self.table if x[col]]) + 2 except KeyError: logger.error("there is no column %r", col) raise
<SYSTEM_TASK:> initialize all values based on provided input <END_TASK> <USER_TASK:> Description: def _init(self): """ initialize all values based on provided input :return: None """
self.col_count = len(self.col_list) # list of lengths of longest entries in columns self.col_longest = self.get_all_longest_col_lengths() self.data_length = sum(self.col_longest.values()) if self.terminal_width > 0: # free space is space which should be equeally distributed for all columns # self.terminal_width -- terminal is our canvas # - self.data_length -- substract length of content (the actual data) # - self.col_count + 1 -- table lines are not part of free space, their width is # (number of columns - 1) self.total_free_space = (self.terminal_width - self.data_length) - self.col_count + 1 if self.total_free_space <= 0: self.total_free_space = None else: self.default_column_space = self.total_free_space // self.col_count self.default_column_space_remainder = self.total_free_space % self.col_count logger.debug("total free space: %d, column space: %d, remainder: %d, columns: %d", self.total_free_space, self.default_column_space, self.default_column_space_remainder, self.col_count) else: self.total_free_space = None
<SYSTEM_TASK:> count all values needed to display whole table <END_TASK> <USER_TASK:> Description: def _count_sizes(self): """ count all values needed to display whole table <><---terminal-width-----------><> <> HEADER | HEADER2 | HEADER3 <> <>---------+----------+---------<> kudos to PostgreSQL developers :return: None """
format_list = [] header_sepa_format_list = [] # actual widths of columns self.col_widths = {} for col in self.col_list: col_length = self.col_longest[col] col_width = col_length + self._separate() # -2 is for implicit separator -- spaces around format_list.append(" {%s:%d} " % (col, col_width - 2)) header_sepa_format_list.append("{%s:%d}" % (col, col_width)) self.col_widths[col] = col_width logger.debug("column widths %s", self.col_widths) self.format_str = "|".join(format_list) self.header_format_str = "+".join(header_sepa_format_list) self.header_data = {} for k in self.col_widths: self.header_data[k] = "-" * self.col_widths[k]
<SYSTEM_TASK:> iterate over all columns and get their longest values <END_TASK> <USER_TASK:> Description: def get_all_longest_col_lengths(self): """ iterate over all columns and get their longest values :return: dict, {"column_name": 132} """
response = {} for col in self.col_list: response[col] = self._longest_val_in_column(col) return response
<SYSTEM_TASK:> get a width of separator for current column <END_TASK> <USER_TASK:> Description: def _separate(self): """ get a width of separator for current column :return: int """
if self.total_free_space is None: return 0 else: sepa = self.default_column_space # we need to distribute remainders if self.default_column_space_remainder > 0: sepa += 1 self.default_column_space_remainder -= 1 logger.debug("remainder: %d, separator: %d", self.default_column_space_remainder, sepa) return sepa
<SYSTEM_TASK:> print provided table <END_TASK> <USER_TASK:> Description: def render(self): """ print provided table :return: None """
print(self.format_str.format(**self.header), file=sys.stderr) print(self.header_format_str.format(**self.header_data), file=sys.stderr) for row in self.data: print(self.format_str.format(**row))
<SYSTEM_TASK:> set parameters in the user parameters <END_TASK> <USER_TASK:> Description: def set_params(self, **kwargs): """ set parameters in the user parameters these parameters are accepted: :param git_uri: str, uri of the git repository for the source :param git_ref: str, commit ID of the branch to be pulled :param git_branch: str, branch name of the branch to be pulled :param base_image: str, name of the parent image :param name_label: str, label of the parent image :param user: str, name of the user requesting the build :param component: str, name of the component :param release: str, :param build_image: str, :param build_imagestream: str, :param build_from: str, :param build_type: str, orchestrator or worker :param platforms: list of str, platforms to build on :param platform: str, platform :param koji_target: str, koji tag with packages used to build the image :param koji_task_id: str, koji ID :param koji_parent_build: str, :param koji_upload_dir: str, koji directory where the completed image will be uploaded :param flatpak: if we should build a Flatpak OCI Image :param flatpak_base_image: str, name of the Flatpack OCI Image :param reactor_config_map: str, name of the config map containing the reactor environment :param reactor_config_override: dict, data structure for reactor config to be injected as an environment variable into a worker build; when used, reactor_config_map is ignored. :param yum_repourls: list of str, uris of the yum repos to pull from :param signing_intent: bool, True to sign the resulting image :param compose_ids: list of int, ODCS composes to use instead of generating new ones :param filesystem_koji_task_id: int, Koji Task that created the base filesystem :param platform_node_selector: dict, a nodeselector for a user_paramsific platform :param scratch_build_node_selector: dict, a nodeselector for scratch builds :param explicit_build_node_selector: dict, a nodeselector for explicit builds :param auto_build_node_selector: dict, a nodeselector for auto builds :param isolated_build_node_selector: dict, a nodeselector for isolated builds :param operator_manifests_extract_platform: str, indicates which platform should upload operator manifests to koji :param parent_images_digests: dict, mapping image digests to names and platforms """
# Here we cater to the koji "scratch" build type, this will disable # all plugins that might cause importing of data to koji self.scratch = kwargs.get('scratch') # When true, it indicates build was automatically started by # OpenShift via a trigger, for instance ImageChangeTrigger self.is_auto = kwargs.pop('is_auto', False) # An isolated build is meant to patch a certain release and not # update transient tags in container registry self.isolated = kwargs.get('isolated') self.osbs_api = kwargs.pop('osbs_api', None) self.validate_build_variation() self.base_image = kwargs.get('base_image') self.platform_node_selector = kwargs.get('platform_node_selector', {}) self.scratch_build_node_selector = kwargs.get('scratch_build_node_selector', {}) self.explicit_build_node_selector = kwargs.get('explicit_build_node_selector', {}) self.auto_build_node_selector = kwargs.get('auto_build_node_selector', {}) self.isolated_build_node_selector = kwargs.get('isolated_build_node_selector', {}) logger.debug("now setting params '%s' for user_params", kwargs) self.user_params.set_params(**kwargs) self.source_registry = None self.organization = None
<SYSTEM_TASK:> Sets data from reactor config <END_TASK> <USER_TASK:> Description: def set_data_from_reactor_config(self): """ Sets data from reactor config """
reactor_config_override = self.user_params.reactor_config_override.value reactor_config_map = self.user_params.reactor_config_map.value data = None if reactor_config_override: data = reactor_config_override elif reactor_config_map: config_map = self.osbs_api.get_config_map(reactor_config_map) data = config_map.get_data_by_key('config.yaml') if not data: if self.user_params.flatpak.value: raise OsbsValidationException("flatpak_base_image must be provided") else: return source_registry_key = 'source_registry' registry_organization_key = 'registries_organization' req_secrets_key = 'required_secrets' token_secrets_key = 'worker_token_secrets' flatpak_key = 'flatpak' flatpak_base_image_key = 'base_image' if source_registry_key in data: self.source_registry = data[source_registry_key] if registry_organization_key in data: self.organization = data[registry_organization_key] if self.user_params.flatpak.value: flatpack_base_image = data.get(flatpak_key, {}).get(flatpak_base_image_key, None) if flatpack_base_image: self.base_image = flatpack_base_image self.user_params.base_image.value = flatpack_base_image else: raise OsbsValidationException("flatpak_base_image must be provided") required_secrets = data.get(req_secrets_key, []) token_secrets = data.get(token_secrets_key, []) self._set_required_secrets(required_secrets, token_secrets)
<SYSTEM_TASK:> Sets required secrets <END_TASK> <USER_TASK:> Description: def _set_required_secrets(self, required_secrets, token_secrets): """ Sets required secrets """
if self.user_params.build_type.value == BUILD_TYPE_ORCHESTRATOR: required_secrets += token_secrets if not required_secrets: return secrets = self.template['spec']['strategy']['customStrategy'].setdefault('secrets', []) existing = set(secret_mount['secretSource']['name'] for secret_mount in secrets) required_secrets = set(required_secrets) already_set = required_secrets.intersection(existing) if already_set: logger.debug("secrets %s are already set", already_set) for secret in required_secrets - existing: secret_path = os.path.join(SECRETS_PATH, secret) logger.info("Configuring %s secret at %s", secret, secret_path) secrets.append({ 'secretSource': { 'name': secret, }, 'mountPath': secret_path, })
<SYSTEM_TASK:> Remove certain plugins in order to handle the "isolated build" <END_TASK> <USER_TASK:> Description: def adjust_for_isolated(self): """ Remove certain plugins in order to handle the "isolated build" scenario. """
if self.user_params.isolated.value: remove_plugins = [ ("prebuild_plugins", "check_and_set_rebuild"), ("prebuild_plugins", "stop_autorebuild_if_disabled") ] for when, which in remove_plugins: self.pt.remove_plugin(when, which, 'removed from isolated build request')
<SYSTEM_TASK:> Remove plugins that don't work when building Flatpaks <END_TASK> <USER_TASK:> Description: def adjust_for_flatpak(self): """ Remove plugins that don't work when building Flatpaks """
if self.user_params.flatpak.value: remove_plugins = [ ("prebuild_plugins", "resolve_composes"), # We'll extract the filesystem anyways for a Flatpak instead of exporting # the docker image directly, so squash just slows things down. ("prepublish_plugins", "squash"), # Pulp can't currently handle Flatpaks, which are OCI images ("postbuild_plugins", "pulp_push"), ("postbuild_plugins", "pulp_tag"), ("postbuild_plugins", "pulp_sync"), ("exit_plugins", "pulp_publish"), ("exit_plugins", "pulp_pull"), # delete_from_registry is used for deleting builds from the temporary registry # that pulp_sync mirrors from. ("exit_plugins", "delete_from_registry"), ] for when, which in remove_plugins: self.pt.remove_plugin(when, which, 'not needed for flatpak build')
<SYSTEM_TASK:> Customize template for site user specified customizations <END_TASK> <USER_TASK:> Description: def render_customizations(self): """ Customize template for site user specified customizations """
disable_plugins = self.pt.customize_conf.get('disable_plugins', []) if not disable_plugins: logger.debug('No site-user specified plugins to disable') else: for plugin in disable_plugins: try: self.pt.remove_plugin(plugin['plugin_type'], plugin['plugin_name'], 'disabled at user request') except KeyError: # Malformed config logger.info('Invalid custom configuration found for disable_plugins') enable_plugins = self.pt.customize_conf.get('enable_plugins', []) if not enable_plugins: logger.debug('No site-user specified plugins to enable"') else: for plugin in enable_plugins: try: msg = 'enabled at user request' self.pt.add_plugin(plugin['plugin_type'], plugin['plugin_name'], plugin['plugin_args'], msg) except KeyError: # Malformed config logger.info('Invalid custom configuration found for enable_plugins')
<SYSTEM_TASK:> if there is yum repo in user params, don't pick stuff from koji <END_TASK> <USER_TASK:> Description: def render_koji(self): """ if there is yum repo in user params, don't pick stuff from koji """
phase = 'prebuild_plugins' plugin = 'koji' if not self.pt.has_plugin_conf(phase, plugin): return if self.user_params.yum_repourls.value: self.pt.remove_plugin(phase, plugin, 'there is a yum repo user parameter') elif not self.pt.set_plugin_arg_valid(phase, plugin, "target", self.user_params.koji_target.value): self.pt.remove_plugin(phase, plugin, 'no koji target supplied in user parameters')
<SYSTEM_TASK:> If the check_and_set_platforms plugin is present, configure it <END_TASK> <USER_TASK:> Description: def render_check_and_set_platforms(self): """ If the check_and_set_platforms plugin is present, configure it """
phase = 'prebuild_plugins' plugin = 'check_and_set_platforms' if not self.pt.has_plugin_conf(phase, plugin): return if self.user_params.koji_target.value: self.pt.set_plugin_arg(phase, plugin, "koji_target", self.user_params.koji_target.value)
<SYSTEM_TASK:> Returns all builds matching a given set of label selectors. It is up to the <END_TASK> <USER_TASK:> Description: def get_all_build_configs_by_labels(self, label_selectors): """ Returns all builds matching a given set of label selectors. It is up to the calling function to filter the results. """
labels = ['%s=%s' % (field, value) for field, value in label_selectors] labels = ','.join(labels) url = self._build_url("buildconfigs/", labelSelector=labels) return self._get(url).json()['items']
<SYSTEM_TASK:> Returns a build config matching the given label <END_TASK> <USER_TASK:> Description: def get_build_config_by_labels(self, label_selectors): """ Returns a build config matching the given label selectors. This method will raise OsbsException if not exactly one build config is found. """
items = self.get_all_build_configs_by_labels(label_selectors) if not items: raise OsbsException( "Build config not found for labels: %r" % (label_selectors, )) if len(items) > 1: raise OsbsException( "More than one build config found for labels: %r" % (label_selectors, )) return items[0]
<SYSTEM_TASK:> Returns a build config matching the given label selectors, filtering against <END_TASK> <USER_TASK:> Description: def get_build_config_by_labels_filtered(self, label_selectors, filter_key, filter_value): """ Returns a build config matching the given label selectors, filtering against another predetermined value. This method will raise OsbsException if not exactly one build config is found after filtering. """
items = self.get_all_build_configs_by_labels(label_selectors) if filter_value is not None: build_configs = [] for build_config in items: match_value = graceful_chain_get(build_config, *filter_key.split('.')) if filter_value == match_value: build_configs.append(build_config) items = build_configs if not items: raise OsbsException( "Build config not found for labels: %r" % (label_selectors, )) if len(items) > 1: raise OsbsException( "More than one build config found for labels: %r" % (label_selectors, )) return items[0]
<SYSTEM_TASK:> stream logs from build <END_TASK> <USER_TASK:> Description: def stream_logs(self, build_id): """ stream logs from build :param build_id: str :return: iterator """
kwargs = {'follow': 1} # If connection is closed within this many seconds, give up: min_idle_timeout = 60 # Stream logs, but be careful of the connection closing # due to idle timeout. In that case, try again until the # call returns more quickly than a reasonable timeout # would be set to. last_activity = time.time() while True: buildlogs_url = self._build_url("builds/%s/log/" % build_id, **kwargs) try: response = self._get(buildlogs_url, stream=1, headers={'Connection': 'close'}) check_response(response) for line in response.iter_lines(): last_activity = time.time() yield line # NOTE1: If self._get causes ChunkedEncodingError, ConnectionError, # or IncompleteRead to be raised, they'll be wrapped in # OsbsNetworkException or OsbsException # NOTE2: If iter_lines causes ChunkedEncodingError # or IncompleteRead to be raised, it'll simply be silenced. # NOTE3: An exception may be raised from # check_response(). In this case, exception will be # wrapped in OsbsException or OsbsNetworkException, # inspect cause to detect ConnectionError. except OsbsException as exc: if not isinstance(exc.cause, ConnectionError): raise idle = time.time() - last_activity logger.debug("connection closed after %ds", idle) if idle < min_idle_timeout: # Finish output return since = int(idle - 1) logger.debug("fetching logs starting from %ds ago", since) kwargs['sinceSeconds'] = since
<SYSTEM_TASK:> List builds matching criteria <END_TASK> <USER_TASK:> Description: def list_builds(self, build_config_id=None, koji_task_id=None, field_selector=None, labels=None): """ List builds matching criteria :param build_config_id: str, only list builds created from BuildConfig :param koji_task_id: str, only list builds for Koji Task ID :param field_selector: str, field selector for query :return: HttpResponse """
query = {} selector = '{key}={value}' label = {} if labels is not None: label.update(labels) if build_config_id is not None: label['buildconfig'] = build_config_id if koji_task_id is not None: label['koji-task-id'] = str(koji_task_id) if label: query['labelSelector'] = ','.join([selector.format(key=key, value=value) for key, value in label.items()]) if field_selector is not None: query['fieldSelector'] = field_selector url = self._build_url("builds/", **query) return self._get(url)
<SYSTEM_TASK:> Prevent builds being scheduled and wait for running builds to finish. <END_TASK> <USER_TASK:> Description: def create_resource_quota(self, name, quota_json): """ Prevent builds being scheduled and wait for running builds to finish. :return: """
url = self._build_k8s_url("resourcequotas/") response = self._post(url, data=json.dumps(quota_json), headers={"Content-Type": "application/json"}) if response.status_code == http_client.CONFLICT: url = self._build_k8s_url("resourcequotas/%s" % name) response = self._put(url, data=json.dumps(quota_json), headers={"Content-Type": "application/json"}) check_response(response) return response
<SYSTEM_TASK:> adjust labels or annotations on object <END_TASK> <USER_TASK:> Description: def adjust_attributes_on_object(self, collection, name, things, values, how): """ adjust labels or annotations on object labels have to match RE: (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])? and have at most 63 chars :param collection: str, object collection e.g. 'builds' :param name: str, name of object :param things: str, 'labels' or 'annotations' :param values: dict, values to set :param how: callable, how to adjust the values e.g. self._replace_metadata_things :return: """
url = self._build_url("%s/%s" % (collection, name)) response = self._get(url) logger.debug("before modification: %s", response.content) build_json = response.json() how(build_json['metadata'], things, values) response = self._put(url, data=json.dumps(build_json), use_json=True) check_response(response) return response
<SYSTEM_TASK:> set annotations on build object <END_TASK> <USER_TASK:> Description: def update_annotations_on_build(self, build_id, annotations): """ set annotations on build object :param build_id: str, id of build :param annotations: dict, annotations to set :return: """
return self.adjust_attributes_on_object('builds', build_id, 'annotations', annotations, self._update_metadata_things)
<SYSTEM_TASK:> Load table data from a Google Spreadsheet. <END_TASK> <USER_TASK:> Description: def load(self): """ Load table data from a Google Spreadsheet. This method consider :py:attr:`.source` as a path to the credential JSON file to access Google Sheets API. The method automatically search the header row start from :py:attr:`.start_row`. The condition of the header row is that all of the columns have value (except empty columns). :return: Loaded table data. Return one |TableData| for each sheet in the workbook. The table name for data will be determined by :py:meth:`~.GoogleSheetsTableLoader.make_table_name`. :rtype: iterator of |TableData| :raises pytablereader.DataError: If the header row is not found. :raises pytablereader.OpenError: If the spread sheet not found. """
import gspread from oauth2client.service_account import ServiceAccountCredentials self._validate_table_name() self._validate_title() scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] credentials = ServiceAccountCredentials.from_json_keyfile_name(self.source, scope) gc = gspread.authorize(credentials) try: for worksheet in gc.open(self.title).worksheets(): self._worksheet = worksheet self.__all_values = [row for row in worksheet.get_all_values()] if self._is_empty_sheet(): continue try: self.__strip_empty_col() except ValueError: continue value_matrix = self.__all_values[self._get_start_row_idx() :] try: headers = value_matrix[0] rows = value_matrix[1:] except IndexError: continue self.inc_table_count() yield TableData( self.make_table_name(), headers, rows, dp_extractor=self.dp_extractor, type_hints=self._extract_type_hints(headers), ) except gspread.exceptions.SpreadsheetNotFound: raise OpenError("spreadsheet '{}' not found".format(self.title)) except gspread.exceptions.APIError as e: raise APIError(e)
<SYSTEM_TASK:> Performs update of given `orig` BuildConfig with values from `new` BuildConfig. <END_TASK> <USER_TASK:> Description: def buildconfig_update(orig, new, remove_nonexistent_keys=False): """Performs update of given `orig` BuildConfig with values from `new` BuildConfig. Both BuildConfigs have to be represented as `dict`s. This function: - adds all key/value pairs to `orig` from `new` that are missing - replaces values in `orig` for keys that are in both - removes key/value pairs from `orig` for keys that are not in `new`, but only in dicts nested inside `strategy` key (see https://github.com/projectatomic/osbs-client/pull/273#issuecomment-148038314) """
if isinstance(orig, dict) and isinstance(new, dict): clean_triggers(orig, new) if remove_nonexistent_keys: missing = set(orig.keys()) - set(new.keys()) for k in missing: orig.pop(k) for k, v in new.items(): if k == 'strategy': remove_nonexistent_keys = True if isinstance(orig.get(k), dict) and isinstance(v, dict): buildconfig_update(orig[k], v, remove_nonexistent_keys) else: orig[k] = v
<SYSTEM_TASK:> clone provided git repo to target_dir, optionally checkout provided commit <END_TASK> <USER_TASK:> Description: def checkout_git_repo(git_url, target_dir=None, commit=None, retry_times=GIT_MAX_RETRIES, branch=None, depth=None): """ clone provided git repo to target_dir, optionally checkout provided commit yield the ClonedRepoData and delete the repo when finished :param git_url: str, git repo to clone :param target_dir: str, filesystem path where the repo should be cloned :param commit: str, commit to checkout, SHA-1 or ref :param retry_times: int, number of retries for git clone :param branch: str, optional branch of the commit, required if depth is provided :param depth: int, optional expected depth :return: str, int, commit ID of HEAD """
tmpdir = tempfile.mkdtemp() target_dir = target_dir or os.path.join(tmpdir, "repo") try: yield clone_git_repo(git_url, target_dir, commit, retry_times, branch, depth) finally: shutil.rmtree(tmpdir)
<SYSTEM_TASK:> clone provided git repo to target_dir, optionally checkout provided commit <END_TASK> <USER_TASK:> Description: def clone_git_repo(git_url, target_dir=None, commit=None, retry_times=GIT_MAX_RETRIES, branch=None, depth=None): """ clone provided git repo to target_dir, optionally checkout provided commit :param git_url: str, git repo to clone :param target_dir: str, filesystem path where the repo should be cloned :param commit: str, commit to checkout, SHA-1 or ref :param retry_times: int, number of retries for git clone :param branch: str, optional branch of the commit, required if depth is provided :param depth: int, optional expected depth :return: str, int, commit ID of HEAD """
retry_delay = GIT_BACKOFF_FACTOR target_dir = target_dir or os.path.join(tempfile.mkdtemp(), "repo") commit = commit or "master" logger.info("cloning git repo '%s'", git_url) logger.debug("url = '%s', dir = '%s', commit = '%s'", git_url, target_dir, commit) cmd = ["git", "clone"] if branch: cmd += ["-b", branch, "--single-branch"] if depth: cmd += ["--depth", str(depth)] elif depth: logger.warning("branch not provided for %s, depth setting ignored", git_url) depth = None cmd += [git_url, target_dir] logger.debug("cloning '%s'", cmd) repo_commit = '' repo_depth = None for counter in range(retry_times + 1): try: # we are using check_output, even though we aren't using # the return value, but we will get 'output' in exception subprocess.check_output(cmd, stderr=subprocess.STDOUT) repo_commit, repo_depth = reset_git_repo(target_dir, commit, depth) break except subprocess.CalledProcessError as exc: if counter != retry_times: logger.info("retrying command '%s':\n '%s'", cmd, exc.output) time.sleep(retry_delay * (2 ** counter)) else: raise OsbsException("Unable to clone git repo '%s' " "branch '%s'" % (git_url, branch), cause=exc, traceback=sys.exc_info()[2]) return ClonedRepoData(target_dir, repo_commit, repo_depth)
<SYSTEM_TASK:> hard reset git clone in target_dir to given git_reference <END_TASK> <USER_TASK:> Description: def reset_git_repo(target_dir, git_reference, retry_depth=None): """ hard reset git clone in target_dir to given git_reference :param target_dir: str, filesystem path where the repo is cloned :param git_reference: str, any valid git reference :param retry_depth: int, if the repo was cloned with --shallow, this is the expected depth of the commit :return: str and int, commit ID of HEAD and commit depth of git_reference """
deepen = retry_depth or 0 base_commit_depth = 0 for _ in range(GIT_FETCH_RETRY): try: if not deepen: cmd = ['git', 'rev-list', '--count', git_reference] base_commit_depth = int(subprocess.check_output(cmd, cwd=target_dir)) - 1 cmd = ["git", "reset", "--hard", git_reference] logger.debug("Resetting current HEAD: '%s'", cmd) subprocess.check_call(cmd, cwd=target_dir) break except subprocess.CalledProcessError: if not deepen: raise OsbsException('cannot find commit %s in repo %s' % (git_reference, target_dir)) deepen *= 2 cmd = ["git", "fetch", "--depth", str(deepen)] subprocess.check_call(cmd, cwd=target_dir) logger.debug("Couldn't find commit %s, increasing depth with '%s'", git_reference, cmd) else: raise OsbsException('cannot find commit %s in repo %s' % (git_reference, target_dir)) cmd = ["git", "rev-parse", "HEAD"] logger.debug("getting SHA-1 of provided ref '%s'", git_reference) commit_id = subprocess.check_output(cmd, cwd=target_dir, universal_newlines=True) commit_id = commit_id.strip() logger.info("commit ID = %s", commit_id) final_commit_depth = None if not deepen: cmd = ['git', 'rev-list', '--count', 'HEAD'] final_commit_depth = int(subprocess.check_output(cmd, cwd=target_dir)) - base_commit_depth return commit_id, final_commit_depth
<SYSTEM_TASK:> return time tuple from an RFC 3339-formatted time string <END_TASK> <USER_TASK:> Description: def get_time_from_rfc3339(rfc3339): """ return time tuple from an RFC 3339-formatted time string :param rfc3339: str, time in RFC 3339 format :return: float, seconds since the Epoch """
try: # py 3 dt = dateutil.parser.parse(rfc3339, ignoretz=False) return dt.timestamp() except NameError: # py 2 # Decode the RFC 3339 date with no fractional seconds (the # format Origin provides). Note that this will fail to parse # valid ISO8601 timestamps not in this exact format. time_tuple = strptime(rfc3339, '%Y-%m-%dT%H:%M:%SZ') return timegm(time_tuple)
<SYSTEM_TASK:> return name string representing the given git repo and branch <END_TASK> <USER_TASK:> Description: def make_name_from_git(repo, branch, limit=53, separator='-', hash_size=5): """ return name string representing the given git repo and branch to be used as a build name. NOTE: Build name will be used to generate pods which have a limit of 64 characters and is composed as: <buildname>-<buildnumber>-<podsuffix> rhel7-1-build Assuming '-XXXX' (5 chars) and '-build' (6 chars) as default suffixes, name should be limited to 53 chars (64 - 11). OpenShift is very peculiar in which BuildConfig names it allows. For this reason, only certain characters are allowed. Any disallowed characters will be removed from repo and branch names. :param repo: str, the git repository to be used :param branch: str, the git branch to be used :param limit: int, max name length :param separator: str, used to separate the repo and branch in name :return: str, name representing git repo and branch. """
branch = branch or 'unknown' full = urlparse(repo).path.lstrip('/') + branch repo = git_repo_humanish_part_from_uri(repo) shaval = sha256(full.encode('utf-8')).hexdigest() hash_str = shaval[:hash_size] limit = limit - len(hash_str) - 1 sanitized = sanitize_strings_for_openshift(repo, branch, limit, separator, False) return separator.join(filter(None, (sanitized, hash_str)))
<SYSTEM_TASK:> wraps the result of make_name_from_git in a suffix and postfix <END_TASK> <USER_TASK:> Description: def wrap_name_from_git(prefix, suffix, *args, **kwargs): """ wraps the result of make_name_from_git in a suffix and postfix adding separators for each. see docstring for make_name_from_git for a full list of parameters """
# 64 is maximum length allowed by OpenShift # 2 is the number of dashes that will be added prefix = ''.join(filter(VALID_BUILD_CONFIG_NAME_CHARS.match, list(prefix))) suffix = ''.join(filter(VALID_BUILD_CONFIG_NAME_CHARS.match, list(suffix))) kwargs['limit'] = kwargs.get('limit', 64) - len(prefix) - len(suffix) - 2 name_from_git = make_name_from_git(*args, **kwargs) return '-'.join([prefix, name_from_git, suffix])
<SYSTEM_TASK:> returns the most preferred label name <END_TASK> <USER_TASK:> Description: def get_name(self, label_type): """ returns the most preferred label name if there isn't any correct name in the list it will return newest label name """
if label_type in self._label_values: return self._label_values[label_type][0] else: return Labels.LABEL_NAMES[label_type][0]
<SYSTEM_TASK:> Return dictionary, new label name indexed by old label name. <END_TASK> <USER_TASK:> Description: def get_new_names_by_old(): """Return dictionary, new label name indexed by old label name."""
newdict = {} for label_type, label_names in Labels.LABEL_NAMES.items(): for oldname in label_names[1:]: newdict[oldname] = Labels.LABEL_NAMES[label_type][0] return newdict
<SYSTEM_TASK:> Checks whether kerberos credential cache has ticket-granting ticket that is valid for at least <END_TASK> <USER_TASK:> Description: def kerberos_ccache_init(principal, keytab_file, ccache_file=None): """ Checks whether kerberos credential cache has ticket-granting ticket that is valid for at least an hour. Default ccache is used unless ccache_file is provided. In that case, KRB5CCNAME environment variable is set to the value of ccache_file if we successfully obtain the ticket. """
tgt_valid = False env = {"LC_ALL": "C"} # klist uses locales to format date on RHEL7+ if ccache_file: env["KRB5CCNAME"] = ccache_file # check if we have tgt that is valid more than one hour rc, klist, _ = run(["klist"], extraenv=env) if rc == 0: for line in klist.splitlines(): m = re.match(KLIST_TGT_RE, line) if m: year = m.group("year") if len(year) == 2: year = "20" + year expires = datetime.datetime( int(year), int(m.group("month")), int(m.group("day")), int(m.group("hour")), int(m.group("minute")), int(m.group("second")) ) if expires - datetime.datetime.now() > datetime.timedelta(hours=1): logger.debug("Valid TGT found, not renewing") tgt_valid = True break if not tgt_valid: logger.debug("Retrieving kerberos TGT") rc, out, err = run(["kinit", "-k", "-t", keytab_file, principal], extraenv=env) if rc != 0: raise OsbsException("kinit returned %s:\nstdout: %s\nstderr: %s" % (rc, out, err)) if ccache_file: os.environ["KRB5CCNAME"] = ccache_file
<SYSTEM_TASK:> Find the data stored in the config_map <END_TASK> <USER_TASK:> Description: def get_data(self): """ Find the data stored in the config_map :return: dict, the json of the data data that was passed into the ConfigMap on creation """
data = graceful_chain_get(self.json, "data") if data is None: return {} data_dict = {} for key in data: if self.is_yaml(key): data_dict[key] = yaml.load(data[key]) else: data_dict[key] = json.loads(data[key]) return data_dict
<SYSTEM_TASK:> Find the object stored by a JSON string at key 'name' <END_TASK> <USER_TASK:> Description: def get_data_by_key(self, name): """ Find the object stored by a JSON string at key 'name' :return: str or dict, the json of the str or dict stored in the ConfigMap at that location """
data = graceful_chain_get(self.json, "data") if data is None or name not in data: return {} if self.is_yaml(name): return yaml.load(data[name]) or {} return json.loads(data[name])
<SYSTEM_TASK:> List builds with matching fields <END_TASK> <USER_TASK:> Description: def list_builds(self, field_selector=None, koji_task_id=None, running=None, labels=None): """ List builds with matching fields :param field_selector: str, field selector for Builds :param koji_task_id: str, only list builds for Koji Task ID :return: BuildResponse list """
if running: running_fs = ",".join(["status!={status}".format(status=status.capitalize()) for status in BUILD_FINISHED_STATES]) if not field_selector: field_selector = running_fs else: field_selector = ','.join([field_selector, running_fs]) response = self.os.list_builds(field_selector=field_selector, koji_task_id=koji_task_id, labels=labels) serialized_response = response.json() build_list = [] for build in serialized_response["items"]: build_list.append(BuildResponse(build, self)) return build_list
<SYSTEM_TASK:> return instance of BuildRequest or BuildRequestV2 <END_TASK> <USER_TASK:> Description: def get_build_request(self, build_type=None, inner_template=None, outer_template=None, customize_conf=None, arrangement_version=DEFAULT_ARRANGEMENT_VERSION): """ return instance of BuildRequest or BuildRequestV2 :param build_type: str, unused :param inner_template: str, name of inner template for BuildRequest :param outer_template: str, name of outer template for BuildRequest :param customize_conf: str, name of customization config for BuildRequest :param arrangement_version: int, value of the arrangement version :return: instance of BuildRequest or BuildRequestV2 """
if build_type is not None: warnings.warn("build types are deprecated, do not use the build_type argument") validate_arrangement_version(arrangement_version) if not arrangement_version or arrangement_version < REACTOR_CONFIG_ARRANGEMENT_VERSION: build_request = BuildRequest( build_json_store=self.os_conf.get_build_json_store(), inner_template=inner_template, outer_template=outer_template, customize_conf=customize_conf) else: build_request = BuildRequestV2( build_json_store=self.os_conf.get_build_json_store(), outer_template=outer_template, customize_conf=customize_conf) # Apply configured resource limits. cpu_limit = self.build_conf.get_cpu_limit() memory_limit = self.build_conf.get_memory_limit() storage_limit = self.build_conf.get_storage_limit() if (cpu_limit is not None or memory_limit is not None or storage_limit is not None): build_request.set_resource_limits(cpu=cpu_limit, memory=memory_limit, storage=storage_limit) return build_request
<SYSTEM_TASK:> render provided build_request and submit build from it <END_TASK> <USER_TASK:> Description: def create_build_from_buildrequest(self, build_request): """ render provided build_request and submit build from it :param build_request: instance of build.build_request.BuildRequest :return: instance of build.build_response.BuildResponse """
build_request.set_openshift_required_version(self.os_conf.get_openshift_required_version()) build = build_request.render() response = self.os.create_build(json.dumps(build)) build_response = BuildResponse(response.json(), self) return build_response
<SYSTEM_TASK:> Return ImageStream, and ImageStreamTag name for base_image of build_request <END_TASK> <USER_TASK:> Description: def _get_image_stream_info_for_build_request(self, build_request): """Return ImageStream, and ImageStreamTag name for base_image of build_request If build_request is not auto instantiated, objects are not fetched and None, None is returned. """
image_stream = None image_stream_tag_name = None if build_request.has_ist_trigger(): image_stream_tag_id = build_request.trigger_imagestreamtag image_stream_id, image_stream_tag_name = image_stream_tag_id.split(':') try: image_stream = self.get_image_stream(image_stream_id).json() except OsbsResponseException as x: if x.status_code != 404: raise if image_stream: try: self.get_image_stream_tag(image_stream_tag_id).json() except OsbsResponseException as x: if x.status_code != 404: raise return image_stream, image_stream_tag_name
<SYSTEM_TASK:> Create a production build <END_TASK> <USER_TASK:> Description: def create_prod_build(self, *args, **kwargs): """ Create a production build :param git_uri: str, URI of git repository :param git_ref: str, reference to commit :param git_branch: str, branch name :param user: str, user name :param component: str, not used anymore :param target: str, koji target :param architecture: str, build architecture :param yum_repourls: list, URLs for yum repos :param koji_task_id: int, koji task ID requesting build :param scratch: bool, this is a scratch build :param platform: str, the platform name :param platforms: list<str>, the name of each platform :param release: str, the release value to use :param inner_template: str, name of inner template for BuildRequest :param outer_template: str, name of outer template for BuildRequest :param customize_conf: str, name of customization config for BuildRequest :param arrangement_version: int, numbered arrangement of plugins for orchestration workflow :param signing_intent: str, signing intent of the ODCS composes :param compose_ids: list<int>, ODCS composes used :return: BuildResponse instance """
logger.warning("prod (all-in-one) builds are deprecated, " "please use create_orchestrator_build " "(support will be removed in version 0.54)") return self._do_create_prod_build(*args, **kwargs)
<SYSTEM_TASK:> Create a worker build <END_TASK> <USER_TASK:> Description: def create_worker_build(self, **kwargs): """ Create a worker build Pass through method to create_prod_build with the following modifications: - platform param is required - release param is required - arrangement_version param is required, which is used to select which worker_inner:n.json template to use - inner template set to worker_inner:n.json if not set - outer template set to worker.json if not set - customize configuration set to worker_customize.json if not set :return: BuildResponse instance """
missing = set() for required in ('platform', 'release', 'arrangement_version'): if not kwargs.get(required): missing.add(required) if missing: raise ValueError("Worker build missing required parameters: %s" % missing) if kwargs.get('platforms'): raise ValueError("Worker build called with unwanted platforms param") arrangement_version = kwargs['arrangement_version'] kwargs.setdefault('inner_template', WORKER_INNER_TEMPLATE.format( arrangement_version=arrangement_version)) kwargs.setdefault('outer_template', WORKER_OUTER_TEMPLATE) kwargs.setdefault('customize_conf', WORKER_CUSTOMIZE_CONF) kwargs['build_type'] = BUILD_TYPE_WORKER try: return self._do_create_prod_build(**kwargs) except IOError as ex: if os.path.basename(ex.filename) == kwargs['inner_template']: raise OsbsValidationException("worker invalid arrangement_version %s" % arrangement_version) raise
<SYSTEM_TASK:> Create an orchestrator build <END_TASK> <USER_TASK:> Description: def create_orchestrator_build(self, **kwargs): """ Create an orchestrator build Pass through method to create_prod_build with the following modifications: - platforms param is required - arrangement_version param may be used to select which orchestrator_inner:n.json template to use - inner template set to orchestrator_inner:n.json if not set - outer template set to orchestrator.json if not set - customize configuration set to orchestrator_customize.json if not set :return: BuildResponse instance """
if not self.can_orchestrate(): raise OsbsOrchestratorNotEnabled("can't create orchestrate build " "when can_orchestrate isn't enabled") extra = [x for x in ('platform',) if kwargs.get(x)] if extra: raise ValueError("Orchestrator build called with unwanted parameters: %s" % extra) arrangement_version = kwargs.setdefault('arrangement_version', self.build_conf.get_arrangement_version()) if arrangement_version < REACTOR_CONFIG_ARRANGEMENT_VERSION and not kwargs.get('platforms'): raise ValueError('Orchestrator build requires platforms param') kwargs.setdefault('inner_template', ORCHESTRATOR_INNER_TEMPLATE.format( arrangement_version=arrangement_version)) kwargs.setdefault('outer_template', ORCHESTRATOR_OUTER_TEMPLATE) kwargs.setdefault('customize_conf', ORCHESTRATOR_CUSTOMIZE_CONF) kwargs['build_type'] = BUILD_TYPE_ORCHESTRATOR try: return self._do_create_prod_build(**kwargs) except IOError as ex: if os.path.basename(ex.filename) == kwargs['inner_template']: raise OsbsValidationException("orchestrator invalid arrangement_version %s" % arrangement_version) raise
<SYSTEM_TASK:> provide logs from orchestrator build <END_TASK> <USER_TASK:> Description: def get_orchestrator_build_logs(self, build_id, follow=False, wait_if_missing=False): """ provide logs from orchestrator build :param build_id: str :param follow: bool, fetch logs as they come? :param wait_if_missing: bool, if build doesn't exist, wait :return: generator yielding objects with attributes 'platform' and 'line' """
logs = self.get_build_logs(build_id=build_id, follow=follow, wait_if_missing=wait_if_missing, decode=True) if logs is None: return if isinstance(logs, GeneratorType): for entries in logs: for entry in entries.splitlines(): yield LogEntry(*self._parse_build_log_entry(entry)) else: for entry in logs.splitlines(): yield LogEntry(*self._parse_build_log_entry(entry))
<SYSTEM_TASK:> Import image tags from specified container repository. <END_TASK> <USER_TASK:> Description: def import_image_tags(self, name, tags, repository, insecure=False): """Import image tags from specified container repository. :param name: str, name of ImageStream object :param tags: iterable, tags to be imported :param repository: str, remote location of container image in the format <registry>/<repository> :param insecure: bool, indicates whenever registry is secure :return: bool, whether tags were imported """
stream_import_file = os.path.join(self.os_conf.get_build_json_store(), 'image_stream_import.json') with open(stream_import_file) as f: stream_import = json.load(f) return self.os.import_image_tags(name, stream_import, tags, repository, insecure)
<SYSTEM_TASK:> Ensures the tag is monitored in ImageStream <END_TASK> <USER_TASK:> Description: def ensure_image_stream_tag(self, stream, tag_name, scheduled=False, source_registry=None, organization=None, base_image=None): """Ensures the tag is monitored in ImageStream :param stream: dict, ImageStream object :param tag_name: str, name of tag to check, without name of ImageStream as prefix :param scheduled: bool, if True, importPolicy.scheduled will be set to True in ImageStreamTag :param source_registry: dict, info about source registry :param organization: str, oganization for registry :param base_image: str, base image :return: bool, whether or not modifications were performed """
img_stream_tag_file = os.path.join(self.os_conf.get_build_json_store(), 'image_stream_tag.json') with open(img_stream_tag_file) as f: tag_template = json.load(f) repository = None registry = None insecure = False if source_registry: registry = RegistryURI(source_registry['url']).docker_uri insecure = source_registry.get('insecure', False) if base_image and registry: repository = self._get_enclosed_repo_with_source_registry(base_image, registry, organization) return self.os.ensure_image_stream_tag(stream, tag_name, tag_template, scheduled, repository=repository, insecure=insecure)
<SYSTEM_TASK:> Create an ImageStream object <END_TASK> <USER_TASK:> Description: def create_image_stream(self, name, docker_image_repository, insecure_registry=False): """ Create an ImageStream object Raises exception on error :param name: str, name of ImageStream :param docker_image_repository: str, pull spec for docker image repository :param insecure_registry: bool, whether plain HTTP should be used :return: response """
img_stream_file = os.path.join(self.os_conf.get_build_json_store(), 'image_stream.json') with open(img_stream_file) as f: stream = json.load(f) stream['metadata']['name'] = name stream['metadata'].setdefault('annotations', {}) stream['metadata']['annotations'][ANNOTATION_SOURCE_REPO] = docker_image_repository if insecure_registry: stream['metadata']['annotations'][ANNOTATION_INSECURE_REPO] = 'true' return self.os.create_image_stream(json.dumps(stream))
<SYSTEM_TASK:> Find the filename extension for the 'docker save' output, which <END_TASK> <USER_TASK:> Description: def get_compression_extension(self): """ Find the filename extension for the 'docker save' output, which may or may not be compressed. Raises OsbsValidationException if the extension cannot be determined due to a configuration error. :returns: str including leading dot, or else None if no compression """
build_request = BuildRequest(build_json_store=self.os_conf.get_build_json_store()) inner = build_request.inner_template postbuild_plugins = inner.get('postbuild_plugins', []) for plugin in postbuild_plugins: if plugin.get('name') == 'compress': args = plugin.get('args', {}) method = args.get('method', 'gzip') if method == 'gzip': return '.gz' elif method == 'lzma': return '.xz' raise OsbsValidationException("unknown compression method '%s'" % method) return None
<SYSTEM_TASK:> Create an ConfigMap object on the server <END_TASK> <USER_TASK:> Description: def create_config_map(self, name, data): """ Create an ConfigMap object on the server Raises exception on error :param name: str, name of configMap :param data: dict, dictionary of data to be stored :returns: ConfigMapResponse containing the ConfigMap with name and data """
config_data_file = os.path.join(self.os_conf.get_build_json_store(), 'config_map.json') with open(config_data_file) as f: config_data = json.load(f) config_data['metadata']['name'] = name data_dict = {} for key, value in data.items(): data_dict[key] = json.dumps(value) config_data['data'] = data_dict response = self.os.create_config_map(config_data) config_map_response = ConfigMapResponse(response.json()) return config_map_response
<SYSTEM_TASK:> Get a ConfigMap object from the server <END_TASK> <USER_TASK:> Description: def get_config_map(self, name): """ Get a ConfigMap object from the server Raises exception on error :param name: str, name of configMap to get from the server :returns: ConfigMapResponse containing the ConfigMap with the requested name """
response = self.os.get_config_map(name) config_map_response = ConfigMapResponse(response.json()) return config_map_response
<SYSTEM_TASK:> Wipe the bolt database. <END_TASK> <USER_TASK:> Description: def wipe(self): """ Wipe the bolt database. Calling this after HoverPy has been instantiated is potentially dangerous. This function is mostly used internally for unit tests. """
try: if os.isfile(self._dbpath): os.remove(self._dbpath) except OSError: pass
<SYSTEM_TASK:> Set the required environment variables to enable the use of hoverfly as a proxy. <END_TASK> <USER_TASK:> Description: def __enableProxy(self): """ Set the required environment variables to enable the use of hoverfly as a proxy. """
os.environ[ "HTTP_PROXY"] = self.httpProxy() os.environ[ "HTTPS_PROXY"] = self.httpsProxy() os.environ["REQUESTS_CA_BUNDLE"] = os.path.join( os.path.dirname( os.path.abspath(__file__)), "cert.pem")
<SYSTEM_TASK:> HoverFly fails to launch if it's already running on <END_TASK> <USER_TASK:> Description: def __writepid(self, pid): """ HoverFly fails to launch if it's already running on the same ports. So we have to keep track of them using temp files with the proxy port and admin port, containing the processe's PID. """
import tempfile d = tempfile.gettempdir() name = os.path.join(d, "hoverpy.%i.%i"%(self._proxyPort, self._adminPort)) with open(name, 'w') as f: f.write(str(pid)) logging.debug("writing to %s"%name)
<SYSTEM_TASK:> Remove the PID file on shutdown, unfortunately <END_TASK> <USER_TASK:> Description: def __rmpid(self): """ Remove the PID file on shutdown, unfortunately this may not get called if not given the time to shut down. """
import tempfile d = tempfile.gettempdir() name = os.path.join(d, "hoverpy.%i.%i"%(self._proxyPort, self._adminPort)) if os.path.exists(name): os.unlink(name) logging.debug("deleting %s"%name)
<SYSTEM_TASK:> Start the hoverfly process. <END_TASK> <USER_TASK:> Description: def __start(self): """ Start the hoverfly process. This function waits until it can make contact with the hoverfly API before returning. """
logging.debug("starting %i" % id(self)) self.__kill_if_not_shut_properly() self.FNULL = open(os.devnull, 'w') flags = self.__flags() cmd = [hoverfly] + flags if self._showCmd: print(cmd) self._process = Popen( [hoverfly] + flags, stdin=self.FNULL, stdout=self.FNULL, stderr=subprocess.STDOUT) start = time.time() while time.time() - start < 1: try: url = "http://%s:%i/api/health" % (self._host, self._adminPort) r = self._session.get(url) j = r.json() up = "message" in j and "healthy" in j["message"] if up: logging.debug("has pid %i" % self._process.pid) self.__writepid(self._process.pid) return self._process else: time.sleep(1/100.0) except: # import traceback # traceback.print_exc() # wait 10 ms before trying again time.sleep(1/100.0) pass logging.error("Could not start hoverfly!") raise ValueError("Could not start hoverfly!")
<SYSTEM_TASK:> Stop the hoverfly process. <END_TASK> <USER_TASK:> Description: def __stop(self): """ Stop the hoverfly process. """
if logging: logging.debug("stopping") self._process.terminate() # communicate means we wait until the process # was actually terminated, this removes some # warnings in python3 self._process.communicate() self._process = None self.FNULL.close() self.FNULL = None self.__disableProxy() # del self._session # self._session = None self.__rmpid()
<SYSTEM_TASK:> Internal method. Turns arguments into flags. <END_TASK> <USER_TASK:> Description: def __flags(self): """ Internal method. Turns arguments into flags. """
flags = [] if self._capture: flags.append("-capture") if self._spy: flags.append("-spy") if self._dbpath: flags += ["-db-path", self._dbpath] flags += ["-db", "boltdb"] else: flags += ["-db", "memory"] if self._synthesize: assert(self._middleware) flags += ["-synthesize"] if self._simulation: flags += ["-import", self._simulation] if self._proxyPort: flags += ["-pp", str(self._proxyPort)] if self._adminPort: flags += ["-ap", str(self._adminPort)] if self._modify: flags += ["-modify"] if self._verbose: flags += ["-v"] if self._dev: flags += ["-dev"] if self._metrics: flags += ["-metrics"] if self._auth: flags += ["-auth"] if self._middleware: flags += ["-middleware", self._middleware] if self._cert: flags += ["-cert", self._cert] if self._certName: flags += ["-cert-name", self._certName] if self._certOrg: flags += ["-cert-org", self._certOrg] if self._destination: flags += ["-destination", self._destination] if self._key: flags += ["-key", self._key] if self._dest: for i in range(len(self._dest)): flags += ["-dest", self._dest[i]] if self._generateCACert: flags += ["-generate-ca-cert"] if not self._tlsVerification: flags += ["-tls-verification", "false"] logging.debug("flags:" + str(flags)) return flags
<SYSTEM_TASK:> Extract tabular data as |TableData| instances from a JSON file. <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a JSON file. |load_source_desc_file| This method can be loading four types of JSON formats: **(1)** Single table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (1): single table { "type": "array", "items": { "type": "object", "additionalProperties": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (1) [ {"attr_b": 4, "attr_c": "a", "attr_a": 1}, {"attr_b": 2.1, "attr_c": "bb", "attr_a": 2}, {"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3} ] The example data will be loaded as the following tabular data: .. table:: +------+------+------+ |attr_a|attr_b|attr_c| +======+======+======+ | 1| 4.0|a | +------+------+------+ | 2| 2.1|bb | +------+------+------+ | 3| 120.9|ccc | +------+------+------+ **(2)** Single table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (2): single table { "type": "object", "additionalProperties": { "type": "array", "items": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (2) { "attr_a": [1, 2, 3], "attr_b": [4, 2.1, 120.9], "attr_c": ["a", "bb", "ccc"] } The example data will be loaded as the following tabular data: .. table:: +------+------+------+ |attr_a|attr_b|attr_c| +======+======+======+ | 1| 4.0|a | +------+------+------+ | 2| 2.1|bb | +------+------+------+ | 3| 120.9|ccc | +------+------+------+ **(3)** Single table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (3): single table { "type": "object", "additionalProperties": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (3) { "num_ratings": 27, "support_threads": 1, "downloaded": 925716, "last_updated":"2017-12-01 6:22am GMT", "added":"2010-01-20", "num": 1.1, "hoge": null } The example data will be loaded as the following tabular data: .. table:: +---------------+---------------------+ | key | value | +===============+=====================+ |num_ratings | 27| +---------------+---------------------+ |support_threads| 1| +---------------+---------------------+ |downloaded | 925716| +---------------+---------------------+ |last_updated |2017-12-01 6:22am GMT| +---------------+---------------------+ |added |2010-01-20 | +---------------+---------------------+ |num | 1.1| +---------------+---------------------+ |hoge |None | +---------------+---------------------+ **(4)** Multiple table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (4): multiple tables { "type": "object", "additionalProperties": { "type": "array", "items": { "type": "object", "additionalProperties": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (4) { "table_a" : [ {"attr_b": 4, "attr_c": "a", "attr_a": 1}, {"attr_b": 2.1, "attr_c": "bb", "attr_a": 2}, {"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3} ], "table_b" : [ {"a": 1, "b": 4}, {"a": 2 }, {"a": 3, "b": 120.9} ] } The example data will be loaded as the following tabular data: .. table:: table_a +------+------+------+ |attr_a|attr_b|attr_c| +======+======+======+ | 1| 4.0|a | +------+------+------+ | 2| 2.1|bb | +------+------+------+ | 3| 120.9|ccc | +------+------+------+ .. table:: table_b +-+-----+ |a| b | +=+=====+ |1| 4.0| +-+-----+ |2| None| +-+-----+ |3|120.9| +-+-----+ **(5)** Multiple table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (5): multiple tables { "type": "object", "additionalProperties": { "type": "object", "additionalProperties": { "type": "array", "items": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (5) { "table_a" : { "attr_a": [1, 2, 3], "attr_b": [4, 2.1, 120.9], "attr_c": ["a", "bb", "ccc"] }, "table_b" : { "a": [1, 3], "b": [4, 120.9] } } The example data will be loaded as the following tabular data: .. table:: table_a +------+------+------+ |attr_a|attr_b|attr_c| +======+======+======+ | 1| 4.0|a | +------+------+------+ | 2| 2.1|bb | +------+------+------+ | 3| 120.9|ccc | +------+------+------+ .. table:: table_b +-+-----+ |a| b | +=+=====+ |1| 4.0| +-+-----+ |3|120.9| +-+-----+ **(6)** Multiple table data in a file: .. code-block:: json :caption: Acceptable JSON Schema (6): multiple tables { "type": "object", "additionalProperties": { "type": "object", "additionalProperties": { "anyOf": [ {"type": "string"}, {"type": "number"}, {"type": "boolean"}, {"type": "null"} ] } } } .. code-block:: json :caption: Acceptable JSON example for the JSON schema (6) { "table_a": { "num_ratings": 27, "support_threads": 1, "downloaded": 925716, "last_updated":"2017-12-01 6:22am GMT", "added":"2010-01-20", "num": 1.1, "hoge": null }, "table_b": { "a": 4, "b": 120.9 } } The example data will be loaded as the following tabular data: .. table:: table_a +---------------+---------------------+ | key | value | +===============+=====================+ |num_ratings | 27| +---------------+---------------------+ |support_threads| 1| +---------------+---------------------+ |downloaded | 925716| +---------------+---------------------+ |last_updated |2017-12-01 6:22am GMT| +---------------+---------------------+ |added |2010-01-20 | +---------------+---------------------+ |num | 1.1| +---------------+---------------------+ |hoge |None | +---------------+---------------------+ .. table:: table_b +---+-----+ |key|value| +===+=====+ |a | 4.0| +---+-----+ |b |120.9| +---+-----+ :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(key)s`` | This replaced the different value | for each single/multiple JSON tables: | [single JSON table] | ``%(format_name)s%(format_id)s`` | [multiple JSON table] Table data key. ``%(format_name)s`` ``"json"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the data is invalid JSON. :raises pytablereader.error.ValidationError: If the data is not acceptable JSON format. """
formatter = JsonTableFormatter(self.load_dict()) formatter.accept(self) return formatter.to_table_data()
<SYSTEM_TASK:> Extract tabular data as |TableData| instances from a MediaWiki file. <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a MediaWiki file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(key)s`` | This replaced to: | **(1)** ``caption`` mark of the table | **(2)** ``%(format_name)s%(format_id)s`` | if ``caption`` mark not included | in the table. ``%(format_name)s`` ``"mediawiki"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the MediaWiki data is invalid or empty. """
self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) with io.open(self.source, "r", encoding=self.encoding) as fp: formatter = MediaWikiTableFormatter(fp.read()) formatter.accept(self) return formatter.to_table_data()
<SYSTEM_TASK:> Extract tabular data as |TableData| instances from a MediaWiki text <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a MediaWiki text object. |load_source_desc_text| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` ``""`` ``%(key)s`` | This replaced to: | **(1)** ``caption`` mark of the table | **(2)** ``%(format_name)s%(format_id)s`` | if ``caption`` mark not included | in the table. ``%(format_name)s`` ``"mediawiki"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the MediaWiki data is invalid or empty. """
self._validate() self._logger.logging_load() formatter = MediaWikiTableFormatter(self.source) formatter.accept(self) return formatter.to_table_data()
<SYSTEM_TASK:> return dock json from existing build json <END_TASK> <USER_TASK:> Description: def get_dock_json(self): """ return dock json from existing build json """
env_json = self.build_json['spec']['strategy']['customStrategy']['env'] try: p = [env for env in env_json if env["name"] == "ATOMIC_REACTOR_PLUGINS"] except TypeError: raise RuntimeError("\"env\" is not iterable") if len(p) <= 0: raise RuntimeError("\"env\" misses key ATOMIC_REACTOR_PLUGINS") dock_json_str = p[0]['value'] dock_json = json.loads(dock_json_str) return dock_json
<SYSTEM_TASK:> Find the image IDs the containers use. <END_TASK> <USER_TASK:> Description: def get_container_image_ids(self): """ Find the image IDs the containers use. :return: dict, image tag to docker ID """
statuses = graceful_chain_get(self.json, "status", "containerStatuses") if statuses is None: return {} def remove_prefix(image_id, prefix): if image_id.startswith(prefix): return image_id[len(prefix):] return image_id return {status['image']: remove_prefix(status['imageID'], 'docker://') for status in statuses}
<SYSTEM_TASK:> Find the reason a pod failed <END_TASK> <USER_TASK:> Description: def get_failure_reason(self): """ Find the reason a pod failed :return: dict, which will always have key 'reason': reason: brief reason for state containerID (if known): ID of container exitCode (if known): numeric exit code """
reason_key = 'reason' cid_key = 'containerID' exit_key = 'exitCode' pod_status = self.json.get('status', {}) statuses = pod_status.get('containerStatuses', []) # Find the first non-zero exit code from a container # and return its 'message' or 'reason' value for status in statuses: try: terminated = status['state']['terminated'] exit_code = terminated['exitCode'] if exit_code != 0: reason_dict = { exit_key: exit_code, } if 'containerID' in terminated: reason_dict[cid_key] = terminated['containerID'] for key in ['message', 'reason']: try: reason_dict[reason_key] = terminated[key] break except KeyError: continue else: # Both 'message' and 'reason' are missing reason_dict[reason_key] = 'Exit code {code}'.format( code=exit_code ) return reason_dict except KeyError: continue # Failing that, return the 'message' or 'reason' value for the # pod for key in ['message', 'reason']: try: return {reason_key: pod_status[key]} except KeyError: continue return {reason_key: pod_status['phase']}
<SYSTEM_TASK:> Return an error message based on atomic-reactor's metadata <END_TASK> <USER_TASK:> Description: def get_error_message(self): """ Return an error message based on atomic-reactor's metadata """
error_reason = self.get_error_reason() if error_reason: error_message = error_reason.get('pod') or None if error_message: return "Error in pod: %s" % error_message plugin = error_reason.get('plugin')[0] or None error_message = error_reason.get('plugin')[1] or None if error_message: # Plugin has non-empty error description return "Error in plugin %s: %s" % (plugin, error_message) else: return "Error in plugin %s" % plugin
<SYSTEM_TASK:> Extract tabular data as |TableData| instances from an Excel file. <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from an Excel file. |spreadsheet_load_desc| :return: Loaded |TableData| iterator. |TableData| created for each sheet in the workbook. |load_table_name_desc| =================== ==================================== Format specifier Value after the replacement =================== ==================================== ``%(filename)s`` Filename of the workbook ``%(sheet)s`` Name of the sheet ``%(format_name)s`` ``"spreadsheet"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ==================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the header row is not found. :raises pytablereader.error.OpenError: If failed to open the source file. """
import xlrd self._validate() self._logger.logging_load() try: workbook = xlrd.open_workbook(self.source) except xlrd.biffh.XLRDError as e: raise OpenError(e) for worksheet in workbook.sheets(): self._worksheet = worksheet if self._is_empty_sheet(): continue self.__extract_not_empty_col_idx() try: start_row_idx = self._get_start_row_idx() except DataError: continue rows = [ self.__get_row_values(row_idx) for row_idx in range(start_row_idx + 1, self._row_count) ] self.inc_table_count() headers = self.__get_row_values(start_row_idx) yield TableData( self._make_table_name(), headers, rows, dp_extractor=self.dp_extractor, type_hints=self._extract_type_hints(headers), )
<SYSTEM_TASK:> Extract tabular data as |TableData| instances from a LTSV file. <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a LTSV file. |load_source_desc_file| :return: Loaded table data. |load_table_name_desc| =================== ======================================== Format specifier Value after the replacement =================== ======================================== ``%(filename)s`` |filename_desc| ``%(format_name)s`` ``"ltsv"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ======================================== :rtype: |TableData| iterator :raises pytablereader.InvalidHeaderNameError: If an invalid label name is included in the LTSV file. :raises pytablereader.DataError: If the LTSV data is invalid. """
self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) self._ltsv_input_stream = io.open(self.source, "r", encoding=self.encoding) for data_matrix in self._to_data_matrix(): formatter = SingleJsonTableConverterA(data_matrix) formatter.accept(self) return formatter.to_table_data()
<SYSTEM_TASK:> Extract tabular data as |TableData| instances from a LTSV text object. <END_TASK> <USER_TASK:> Description: def load(self): """ Extract tabular data as |TableData| instances from a LTSV text object. |load_source_desc_text| :return: Loaded table data. |load_table_name_desc| =================== ======================================== Format specifier Value after the replacement =================== ======================================== ``%(filename)s`` ``""`` ``%(format_name)s`` ``"ltsv"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ======================================== :rtype: |TableData| iterator :raises pytablereader.InvalidHeaderNameError: If an invalid label name is included in the LTSV file. :raises pytablereader.DataError: If the LTSV data is invalid. """
self._validate() self._logger.logging_load() self._ltsv_input_stream = self.source.splitlines() for data_matrix in self._to_data_matrix(): formatter = SingleJsonTableConverterA(data_matrix) formatter.accept(self) return formatter.to_table_data()